1 /*
2 * arch/cris/mm/fault.c
3 *
4 * Copyright (C) 2000-2010 Axis Communications AB
5 */
6
7 #include <linux/mm.h>
8 #include <linux/interrupt.h>
9 #include <linux/module.h>
10 #include <linux/wait.h>
11 #include <asm/uaccess.h>
12 #include <arch/system.h>
13
14 extern int find_fixup_code(struct pt_regs *);
15 extern void die_if_kernel(const char *, struct pt_regs *, long);
16 extern void show_registers(struct pt_regs *regs);
17
18 /* debug of low-level TLB reload */
19 #undef DEBUG
20
21 #ifdef DEBUG
22 #define D(x) x
23 #else
24 #define D(x)
25 #endif
26
27 /* debug of higher-level faults */
28 #define DPG(x)
29
30 /* current active page directory */
31
32 DEFINE_PER_CPU(pgd_t *, current_pgd);
33 unsigned long cris_signal_return_page;
34
35 /*
36 * This routine handles page faults. It determines the address,
37 * and the problem, and then passes it off to one of the appropriate
38 * routines.
39 *
40 * Notice that the address we're given is aligned to the page the fault
41 * occurred in, since we only get the PFN in R_MMU_CAUSE not the complete
42 * address.
43 *
44 * error_code:
45 * bit 0 == 0 means no page found, 1 means protection fault
46 * bit 1 == 0 means read, 1 means write
47 *
48 * If this routine detects a bad access, it returns 1, otherwise it
49 * returns 0.
50 */
51
52 asmlinkage void
do_page_fault(unsigned long address,struct pt_regs * regs,int protection,int writeaccess)53 do_page_fault(unsigned long address, struct pt_regs *regs,
54 int protection, int writeaccess)
55 {
56 struct task_struct *tsk;
57 struct mm_struct *mm;
58 struct vm_area_struct * vma;
59 siginfo_t info;
60 int fault;
61
62 D(printk(KERN_DEBUG
63 "Page fault for %lX on %X at %lX, prot %d write %d\n",
64 address, smp_processor_id(), instruction_pointer(regs),
65 protection, writeaccess));
66
67 tsk = current;
68
69 /*
70 * We fault-in kernel-space virtual memory on-demand. The
71 * 'reference' page table is init_mm.pgd.
72 *
73 * NOTE! We MUST NOT take any locks for this case. We may
74 * be in an interrupt or a critical region, and should
75 * only copy the information from the master page table,
76 * nothing more.
77 *
78 * NOTE2: This is done so that, when updating the vmalloc
79 * mappings we don't have to walk all processes pgdirs and
80 * add the high mappings all at once. Instead we do it as they
81 * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
82 * bit set so sometimes the TLB can use a lingering entry.
83 *
84 * This verifies that the fault happens in kernel space
85 * and that the fault was not a protection error (error_code & 1).
86 */
87
88 if (address >= VMALLOC_START &&
89 !protection &&
90 !user_mode(regs))
91 goto vmalloc_fault;
92
93 /* When stack execution is not allowed we store the signal
94 * trampolines in the reserved cris_signal_return_page.
95 * Handle this in the exact same way as vmalloc (we know
96 * that the mapping is there and is valid so no need to
97 * call handle_mm_fault).
98 */
99 if (cris_signal_return_page &&
100 address == cris_signal_return_page &&
101 !protection && user_mode(regs))
102 goto vmalloc_fault;
103
104 /* we can and should enable interrupts at this point */
105 local_irq_enable();
106
107 mm = tsk->mm;
108 info.si_code = SEGV_MAPERR;
109
110 /*
111 * If we're in an interrupt or "atomic" operation or have no
112 * user context, we must not take the fault.
113 */
114
115 if (in_atomic() || !mm)
116 goto no_context;
117
118 down_read(&mm->mmap_sem);
119 vma = find_vma(mm, address);
120 if (!vma)
121 goto bad_area;
122 if (vma->vm_start <= address)
123 goto good_area;
124 if (!(vma->vm_flags & VM_GROWSDOWN))
125 goto bad_area;
126 if (user_mode(regs)) {
127 /*
128 * accessing the stack below usp is always a bug.
129 * we get page-aligned addresses so we can only check
130 * if we're within a page from usp, but that might be
131 * enough to catch brutal errors at least.
132 */
133 if (address + PAGE_SIZE < rdusp())
134 goto bad_area;
135 }
136 if (expand_stack(vma, address))
137 goto bad_area;
138
139 /*
140 * Ok, we have a good vm_area for this memory access, so
141 * we can handle it..
142 */
143
144 good_area:
145 info.si_code = SEGV_ACCERR;
146
147 /* first do some preliminary protection checks */
148
149 if (writeaccess == 2){
150 if (!(vma->vm_flags & VM_EXEC))
151 goto bad_area;
152 } else if (writeaccess == 1) {
153 if (!(vma->vm_flags & VM_WRITE))
154 goto bad_area;
155 } else {
156 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
157 goto bad_area;
158 }
159
160 /*
161 * If for any reason at all we couldn't handle the fault,
162 * make sure we exit gracefully rather than endlessly redo
163 * the fault.
164 */
165
166 fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
167 if (unlikely(fault & VM_FAULT_ERROR)) {
168 if (fault & VM_FAULT_OOM)
169 goto out_of_memory;
170 else if (fault & VM_FAULT_SIGBUS)
171 goto do_sigbus;
172 BUG();
173 }
174 if (fault & VM_FAULT_MAJOR)
175 tsk->maj_flt++;
176 else
177 tsk->min_flt++;
178
179 up_read(&mm->mmap_sem);
180 return;
181
182 /*
183 * Something tried to access memory that isn't in our memory map..
184 * Fix it, but check if it's kernel or user first..
185 */
186
187 bad_area:
188 up_read(&mm->mmap_sem);
189
190 bad_area_nosemaphore:
191 DPG(show_registers(regs));
192
193 /* User mode accesses just cause a SIGSEGV */
194
195 if (user_mode(regs)) {
196 printk(KERN_NOTICE "%s (pid %d) segfaults for page "
197 "address %08lx at pc %08lx\n",
198 tsk->comm, tsk->pid,
199 address, instruction_pointer(regs));
200
201 /* With DPG on, we've already dumped registers above. */
202 DPG(if (0))
203 show_registers(regs);
204
205 #ifdef CONFIG_NO_SEGFAULT_TERMINATION
206 DECLARE_WAIT_QUEUE_HEAD(wq);
207 wait_event_interruptible(wq, 0 == 1);
208 #else
209 info.si_signo = SIGSEGV;
210 info.si_errno = 0;
211 /* info.si_code has been set above */
212 info.si_addr = (void *)address;
213 force_sig_info(SIGSEGV, &info, tsk);
214 #endif
215 return;
216 }
217
218 no_context:
219
220 /* Are we prepared to handle this kernel fault?
221 *
222 * (The kernel has valid exception-points in the source
223 * when it accesses user-memory. When it fails in one
224 * of those points, we find it in a table and do a jump
225 * to some fixup code that loads an appropriate error
226 * code)
227 */
228
229 if (find_fixup_code(regs))
230 return;
231
232 /*
233 * Oops. The kernel tried to access some bad page. We'll have to
234 * terminate things with extreme prejudice.
235 */
236
237 if (!oops_in_progress) {
238 oops_in_progress = 1;
239 if ((unsigned long) (address) < PAGE_SIZE)
240 printk(KERN_ALERT "Unable to handle kernel NULL "
241 "pointer dereference");
242 else
243 printk(KERN_ALERT "Unable to handle kernel access"
244 " at virtual address %08lx\n", address);
245
246 die_if_kernel("Oops", regs, (writeaccess << 1) | protection);
247 oops_in_progress = 0;
248 }
249
250 do_exit(SIGKILL);
251
252 /*
253 * We ran out of memory, or some other thing happened to us that made
254 * us unable to handle the page fault gracefully.
255 */
256
257 out_of_memory:
258 up_read(&mm->mmap_sem);
259 if (!user_mode(regs))
260 goto no_context;
261 pagefault_out_of_memory();
262 return;
263
264 do_sigbus:
265 up_read(&mm->mmap_sem);
266
267 /*
268 * Send a sigbus, regardless of whether we were in kernel
269 * or user mode.
270 */
271 info.si_signo = SIGBUS;
272 info.si_errno = 0;
273 info.si_code = BUS_ADRERR;
274 info.si_addr = (void *)address;
275 force_sig_info(SIGBUS, &info, tsk);
276
277 /* Kernel mode? Handle exceptions or die */
278 if (!user_mode(regs))
279 goto no_context;
280 return;
281
282 vmalloc_fault:
283 {
284 /*
285 * Synchronize this task's top level page-table
286 * with the 'reference' page table.
287 *
288 * Use current_pgd instead of tsk->active_mm->pgd
289 * since the latter might be unavailable if this
290 * code is executed in a misfortunately run irq
291 * (like inside schedule() between switch_mm and
292 * switch_to...).
293 */
294
295 int offset = pgd_index(address);
296 pgd_t *pgd, *pgd_k;
297 pud_t *pud, *pud_k;
298 pmd_t *pmd, *pmd_k;
299 pte_t *pte_k;
300
301 pgd = (pgd_t *)per_cpu(current_pgd, smp_processor_id()) + offset;
302 pgd_k = init_mm.pgd + offset;
303
304 /* Since we're two-level, we don't need to do both
305 * set_pgd and set_pmd (they do the same thing). If
306 * we go three-level at some point, do the right thing
307 * with pgd_present and set_pgd here.
308 *
309 * Also, since the vmalloc area is global, we don't
310 * need to copy individual PTE's, it is enough to
311 * copy the pgd pointer into the pte page of the
312 * root task. If that is there, we'll find our pte if
313 * it exists.
314 */
315
316 pud = pud_offset(pgd, address);
317 pud_k = pud_offset(pgd_k, address);
318 if (!pud_present(*pud_k))
319 goto no_context;
320
321 pmd = pmd_offset(pud, address);
322 pmd_k = pmd_offset(pud_k, address);
323
324 if (!pmd_present(*pmd_k))
325 goto bad_area_nosemaphore;
326
327 set_pmd(pmd, *pmd_k);
328
329 /* Make sure the actual PTE exists as well to
330 * catch kernel vmalloc-area accesses to non-mapped
331 * addresses. If we don't do this, this will just
332 * silently loop forever.
333 */
334
335 pte_k = pte_offset_kernel(pmd_k, address);
336 if (!pte_present(*pte_k))
337 goto no_context;
338
339 return;
340 }
341 }
342
343 /* Find fixup code. */
344 int
find_fixup_code(struct pt_regs * regs)345 find_fixup_code(struct pt_regs *regs)
346 {
347 const struct exception_table_entry *fixup;
348 /* in case of delay slot fault (v32) */
349 unsigned long ip = (instruction_pointer(regs) & ~0x1);
350
351 fixup = search_exception_tables(ip);
352 if (fixup != 0) {
353 /* Adjust the instruction pointer in the stackframe. */
354 instruction_pointer(regs) = fixup->fixup;
355 arch_fixup(regs);
356 return 1;
357 }
358
359 return 0;
360 }
361