1 /*
2 * fault.c: Page fault handlers for the Sparc.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9 #include <asm/head.h>
10
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/signal.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/perf_event.h>
22 #include <linux/interrupt.h>
23 #include <linux/kdebug.h>
24
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/memreg.h>
28 #include <asm/openprom.h>
29 #include <asm/oplib.h>
30 #include <asm/smp.h>
31 #include <asm/traps.h>
32 #include <asm/uaccess.h>
33
34 extern int prom_node_root;
35
36 int show_unhandled_signals = 1;
37
38 /* At boot time we determine these two values necessary for setting
39 * up the segment maps and page table entries (pte's).
40 */
41
42 int num_segmaps, num_contexts;
43 int invalid_segment;
44
45 /* various Virtual Address Cache parameters we find at boot time... */
46
47 int vac_size, vac_linesize, vac_do_hw_vac_flushes;
48 int vac_entries_per_context, vac_entries_per_segment;
49 int vac_entries_per_page;
50
51 /* Return how much physical memory we have. */
probe_memory(void)52 unsigned long probe_memory(void)
53 {
54 unsigned long total = 0;
55 int i;
56
57 for (i = 0; sp_banks[i].num_bytes; i++)
58 total += sp_banks[i].num_bytes;
59
60 return total;
61 }
62
63 extern void sun4c_complete_all_stores(void);
64
65 /* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
sparc_lvl15_nmi(struct pt_regs * regs,unsigned long serr,unsigned long svaddr,unsigned long aerr,unsigned long avaddr)66 asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
67 unsigned long svaddr, unsigned long aerr,
68 unsigned long avaddr)
69 {
70 sun4c_complete_all_stores();
71 printk("FAULT: NMI received\n");
72 printk("SREGS: Synchronous Error %08lx\n", serr);
73 printk(" Synchronous Vaddr %08lx\n", svaddr);
74 printk(" Asynchronous Error %08lx\n", aerr);
75 printk(" Asynchronous Vaddr %08lx\n", avaddr);
76 if (sun4c_memerr_reg)
77 printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
78 printk("REGISTER DUMP:\n");
79 show_regs(regs);
80 prom_halt();
81 }
82
83 static void unhandled_fault(unsigned long, struct task_struct *,
84 struct pt_regs *) __attribute__ ((noreturn));
85
unhandled_fault(unsigned long address,struct task_struct * tsk,struct pt_regs * regs)86 static void unhandled_fault(unsigned long address, struct task_struct *tsk,
87 struct pt_regs *regs)
88 {
89 if((unsigned long) address < PAGE_SIZE) {
90 printk(KERN_ALERT
91 "Unable to handle kernel NULL pointer dereference\n");
92 } else {
93 printk(KERN_ALERT "Unable to handle kernel paging request "
94 "at virtual address %08lx\n", address);
95 }
96 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
97 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
98 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
99 (tsk->mm ? (unsigned long) tsk->mm->pgd :
100 (unsigned long) tsk->active_mm->pgd));
101 die_if_kernel("Oops", regs);
102 }
103
lookup_fault(unsigned long pc,unsigned long ret_pc,unsigned long address)104 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
105 unsigned long address)
106 {
107 struct pt_regs regs;
108 unsigned long g2;
109 unsigned int insn;
110 int i;
111
112 i = search_extables_range(ret_pc, &g2);
113 switch (i) {
114 case 3:
115 /* load & store will be handled by fixup */
116 return 3;
117
118 case 1:
119 /* store will be handled by fixup, load will bump out */
120 /* for _to_ macros */
121 insn = *((unsigned int *) pc);
122 if ((insn >> 21) & 1)
123 return 1;
124 break;
125
126 case 2:
127 /* load will be handled by fixup, store will bump out */
128 /* for _from_ macros */
129 insn = *((unsigned int *) pc);
130 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
131 return 2;
132 break;
133
134 default:
135 break;
136 }
137
138 memset(®s, 0, sizeof (regs));
139 regs.pc = pc;
140 regs.npc = pc + 4;
141 __asm__ __volatile__(
142 "rd %%psr, %0\n\t"
143 "nop\n\t"
144 "nop\n\t"
145 "nop\n" : "=r" (regs.psr));
146 unhandled_fault(address, current, ®s);
147
148 /* Not reached */
149 return 0;
150 }
151
152 static inline void
show_signal_msg(struct pt_regs * regs,int sig,int code,unsigned long address,struct task_struct * tsk)153 show_signal_msg(struct pt_regs *regs, int sig, int code,
154 unsigned long address, struct task_struct *tsk)
155 {
156 if (!unhandled_signal(tsk, sig))
157 return;
158
159 if (!printk_ratelimit())
160 return;
161
162 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
163 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
164 tsk->comm, task_pid_nr(tsk), address,
165 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
166 (void *)regs->u_regs[UREG_FP], code);
167
168 print_vma_addr(KERN_CONT " in ", regs->pc);
169
170 printk(KERN_CONT "\n");
171 }
172
__do_fault_siginfo(int code,int sig,struct pt_regs * regs,unsigned long addr)173 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
174 unsigned long addr)
175 {
176 siginfo_t info;
177
178 info.si_signo = sig;
179 info.si_code = code;
180 info.si_errno = 0;
181 info.si_addr = (void __user *) addr;
182 info.si_trapno = 0;
183
184 if (unlikely(show_unhandled_signals))
185 show_signal_msg(regs, sig, info.si_code,
186 addr, current);
187
188 force_sig_info (sig, &info, current);
189 }
190
191 extern unsigned long safe_compute_effective_address(struct pt_regs *,
192 unsigned int);
193
compute_si_addr(struct pt_regs * regs,int text_fault)194 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
195 {
196 unsigned int insn;
197
198 if (text_fault)
199 return regs->pc;
200
201 if (regs->psr & PSR_PS) {
202 insn = *(unsigned int *) regs->pc;
203 } else {
204 __get_user(insn, (unsigned int *) regs->pc);
205 }
206
207 return safe_compute_effective_address(regs, insn);
208 }
209
do_fault_siginfo(int code,int sig,struct pt_regs * regs,int text_fault)210 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
211 int text_fault)
212 {
213 unsigned long addr = compute_si_addr(regs, text_fault);
214
215 __do_fault_siginfo(code, sig, regs, addr);
216 }
217
do_sparc_fault(struct pt_regs * regs,int text_fault,int write,unsigned long address)218 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
219 unsigned long address)
220 {
221 struct vm_area_struct *vma;
222 struct task_struct *tsk = current;
223 struct mm_struct *mm = tsk->mm;
224 unsigned int fixup;
225 unsigned long g2;
226 int from_user = !(regs->psr & PSR_PS);
227 int fault, code;
228 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
229 (write ? FAULT_FLAG_WRITE : 0));
230
231 if(text_fault)
232 address = regs->pc;
233
234 /*
235 * We fault-in kernel-space virtual memory on-demand. The
236 * 'reference' page table is init_mm.pgd.
237 *
238 * NOTE! We MUST NOT take any locks for this case. We may
239 * be in an interrupt or a critical region, and should
240 * only copy the information from the master page table,
241 * nothing more.
242 */
243 code = SEGV_MAPERR;
244 if (!ARCH_SUN4C && address >= TASK_SIZE)
245 goto vmalloc_fault;
246
247 /*
248 * If we're in an interrupt or have no user
249 * context, we must not take the fault..
250 */
251 if (in_atomic() || !mm)
252 goto no_context;
253
254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
255
256 retry:
257 down_read(&mm->mmap_sem);
258
259 /*
260 * The kernel referencing a bad kernel pointer can lock up
261 * a sun4c machine completely, so we must attempt recovery.
262 */
263 if(!from_user && address >= PAGE_OFFSET)
264 goto bad_area;
265
266 vma = find_vma(mm, address);
267 if(!vma)
268 goto bad_area;
269 if(vma->vm_start <= address)
270 goto good_area;
271 if(!(vma->vm_flags & VM_GROWSDOWN))
272 goto bad_area;
273 if(expand_stack(vma, address))
274 goto bad_area;
275 /*
276 * Ok, we have a good vm_area for this memory access, so
277 * we can handle it..
278 */
279 good_area:
280 code = SEGV_ACCERR;
281 if(write) {
282 if(!(vma->vm_flags & VM_WRITE))
283 goto bad_area;
284 } else {
285 /* Allow reads even for write-only mappings */
286 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
287 goto bad_area;
288 }
289
290 /*
291 * If for any reason at all we couldn't handle the fault,
292 * make sure we exit gracefully rather than endlessly redo
293 * the fault.
294 */
295 fault = handle_mm_fault(mm, vma, address, flags);
296
297 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
298 return;
299
300 if (unlikely(fault & VM_FAULT_ERROR)) {
301 if (fault & VM_FAULT_OOM)
302 goto out_of_memory;
303 else if (fault & VM_FAULT_SIGBUS)
304 goto do_sigbus;
305 BUG();
306 }
307
308 if (flags & FAULT_FLAG_ALLOW_RETRY) {
309 if (fault & VM_FAULT_MAJOR) {
310 current->maj_flt++;
311 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
312 1, regs, address);
313 } else {
314 current->min_flt++;
315 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
316 1, regs, address);
317 }
318 if (fault & VM_FAULT_RETRY) {
319 flags &= ~FAULT_FLAG_ALLOW_RETRY;
320
321 /* No need to up_read(&mm->mmap_sem) as we would
322 * have already released it in __lock_page_or_retry
323 * in mm/filemap.c.
324 */
325
326 goto retry;
327 }
328 }
329
330 up_read(&mm->mmap_sem);
331 return;
332
333 /*
334 * Something tried to access memory that isn't in our memory map..
335 * Fix it, but check if it's kernel or user first..
336 */
337 bad_area:
338 up_read(&mm->mmap_sem);
339
340 bad_area_nosemaphore:
341 /* User mode accesses just cause a SIGSEGV */
342 if (from_user) {
343 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
344 return;
345 }
346
347 /* Is this in ex_table? */
348 no_context:
349 g2 = regs->u_regs[UREG_G2];
350 if (!from_user) {
351 fixup = search_extables_range(regs->pc, &g2);
352 if (fixup > 10) { /* Values below are reserved for other things */
353 extern const unsigned __memset_start[];
354 extern const unsigned __memset_end[];
355 extern const unsigned __csum_partial_copy_start[];
356 extern const unsigned __csum_partial_copy_end[];
357
358 #ifdef DEBUG_EXCEPTIONS
359 printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
360 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
361 regs->pc, fixup, g2);
362 #endif
363 if ((regs->pc >= (unsigned long)__memset_start &&
364 regs->pc < (unsigned long)__memset_end) ||
365 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
366 regs->pc < (unsigned long)__csum_partial_copy_end)) {
367 regs->u_regs[UREG_I4] = address;
368 regs->u_regs[UREG_I5] = regs->pc;
369 }
370 regs->u_regs[UREG_G2] = g2;
371 regs->pc = fixup;
372 regs->npc = regs->pc + 4;
373 return;
374 }
375 }
376
377 unhandled_fault (address, tsk, regs);
378 do_exit(SIGKILL);
379
380 /*
381 * We ran out of memory, or some other thing happened to us that made
382 * us unable to handle the page fault gracefully.
383 */
384 out_of_memory:
385 up_read(&mm->mmap_sem);
386 if (from_user) {
387 pagefault_out_of_memory();
388 return;
389 }
390 goto no_context;
391
392 do_sigbus:
393 up_read(&mm->mmap_sem);
394 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
395 if (!from_user)
396 goto no_context;
397
398 vmalloc_fault:
399 {
400 /*
401 * Synchronize this task's top level page-table
402 * with the 'reference' page table.
403 */
404 int offset = pgd_index(address);
405 pgd_t *pgd, *pgd_k;
406 pmd_t *pmd, *pmd_k;
407
408 pgd = tsk->active_mm->pgd + offset;
409 pgd_k = init_mm.pgd + offset;
410
411 if (!pgd_present(*pgd)) {
412 if (!pgd_present(*pgd_k))
413 goto bad_area_nosemaphore;
414 pgd_val(*pgd) = pgd_val(*pgd_k);
415 return;
416 }
417
418 pmd = pmd_offset(pgd, address);
419 pmd_k = pmd_offset(pgd_k, address);
420
421 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
422 goto bad_area_nosemaphore;
423 *pmd = *pmd_k;
424 return;
425 }
426 }
427
do_sun4c_fault(struct pt_regs * regs,int text_fault,int write,unsigned long address)428 asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
429 unsigned long address)
430 {
431 extern void sun4c_update_mmu_cache(struct vm_area_struct *,
432 unsigned long,pte_t *);
433 extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
434 struct task_struct *tsk = current;
435 struct mm_struct *mm = tsk->mm;
436 pgd_t *pgdp;
437 pte_t *ptep;
438
439 if (text_fault) {
440 address = regs->pc;
441 } else if (!write &&
442 !(regs->psr & PSR_PS)) {
443 unsigned int insn, __user *ip;
444
445 ip = (unsigned int __user *)regs->pc;
446 if (!get_user(insn, ip)) {
447 if ((insn & 0xc1680000) == 0xc0680000)
448 write = 1;
449 }
450 }
451
452 if (!mm) {
453 /* We are oopsing. */
454 do_sparc_fault(regs, text_fault, write, address);
455 BUG(); /* P3 Oops already, you bitch */
456 }
457
458 pgdp = pgd_offset(mm, address);
459 ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
460
461 if (pgd_val(*pgdp)) {
462 if (write) {
463 if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
464 == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
465 unsigned long flags;
466
467 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
468 _SUN4C_PAGE_MODIFIED |
469 _SUN4C_PAGE_VALID |
470 _SUN4C_PAGE_DIRTY);
471
472 local_irq_save(flags);
473 if (sun4c_get_segmap(address) != invalid_segment) {
474 sun4c_put_pte(address, pte_val(*ptep));
475 local_irq_restore(flags);
476 return;
477 }
478 local_irq_restore(flags);
479 }
480 } else {
481 if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
482 == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
483 unsigned long flags;
484
485 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
486 _SUN4C_PAGE_VALID);
487
488 local_irq_save(flags);
489 if (sun4c_get_segmap(address) != invalid_segment) {
490 sun4c_put_pte(address, pte_val(*ptep));
491 local_irq_restore(flags);
492 return;
493 }
494 local_irq_restore(flags);
495 }
496 }
497 }
498
499 /* This conditional is 'interesting'. */
500 if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
501 && (pte_val(*ptep) & _SUN4C_PAGE_VALID))
502 /* Note: It is safe to not grab the MMAP semaphore here because
503 * we know that update_mmu_cache() will not sleep for
504 * any reason (at least not in the current implementation)
505 * and therefore there is no danger of another thread getting
506 * on the CPU and doing a shrink_mmap() on this vma.
507 */
508 sun4c_update_mmu_cache (find_vma(current->mm, address), address,
509 ptep);
510 else
511 do_sparc_fault(regs, text_fault, write, address);
512 }
513
514 /* This always deals with user addresses. */
force_user_fault(unsigned long address,int write)515 static void force_user_fault(unsigned long address, int write)
516 {
517 struct vm_area_struct *vma;
518 struct task_struct *tsk = current;
519 struct mm_struct *mm = tsk->mm;
520 int code;
521
522 code = SEGV_MAPERR;
523
524 down_read(&mm->mmap_sem);
525 vma = find_vma(mm, address);
526 if(!vma)
527 goto bad_area;
528 if(vma->vm_start <= address)
529 goto good_area;
530 if(!(vma->vm_flags & VM_GROWSDOWN))
531 goto bad_area;
532 if(expand_stack(vma, address))
533 goto bad_area;
534 good_area:
535 code = SEGV_ACCERR;
536 if(write) {
537 if(!(vma->vm_flags & VM_WRITE))
538 goto bad_area;
539 } else {
540 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
541 goto bad_area;
542 }
543 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
544 case VM_FAULT_SIGBUS:
545 case VM_FAULT_OOM:
546 goto do_sigbus;
547 }
548 up_read(&mm->mmap_sem);
549 return;
550 bad_area:
551 up_read(&mm->mmap_sem);
552 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
553 return;
554
555 do_sigbus:
556 up_read(&mm->mmap_sem);
557 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
558 }
559
check_stack_aligned(unsigned long sp)560 static void check_stack_aligned(unsigned long sp)
561 {
562 if (sp & 0x7UL)
563 force_sig(SIGILL, current);
564 }
565
window_overflow_fault(void)566 void window_overflow_fault(void)
567 {
568 unsigned long sp;
569
570 sp = current_thread_info()->rwbuf_stkptrs[0];
571 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
572 force_user_fault(sp + 0x38, 1);
573 force_user_fault(sp, 1);
574
575 check_stack_aligned(sp);
576 }
577
window_underflow_fault(unsigned long sp)578 void window_underflow_fault(unsigned long sp)
579 {
580 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
581 force_user_fault(sp + 0x38, 0);
582 force_user_fault(sp, 0);
583
584 check_stack_aligned(sp);
585 }
586
window_ret_fault(struct pt_regs * regs)587 void window_ret_fault(struct pt_regs *regs)
588 {
589 unsigned long sp;
590
591 sp = regs->u_regs[UREG_FP];
592 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
593 force_user_fault(sp + 0x38, 0);
594 force_user_fault(sp, 0);
595
596 check_stack_aligned(sp);
597 }
598