1 /*
2 * MMU fault handling support.
3 *
4 * Copyright (C) 1998-2002 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/smp_lock.h>
11 #include <linux/interrupt.h>
12
13 #include <asm/pgtable.h>
14 #include <asm/processor.h>
15 #include <asm/system.h>
16 #include <asm/uaccess.h>
17 #include <asm/hardirq.h>
18
19 extern void die (char *, struct pt_regs *, long);
20
21 /*
22 * This routine is analogous to expand_stack() but instead grows the
23 * register backing store (which grows towards higher addresses).
24 * Since the register backing store is access sequentially, we
25 * disallow growing the RBS by more than a page at a time. Note that
26 * the VM_GROWSUP flag can be set on any VM area but that's fine
27 * because the total process size is still limited by RLIMIT_STACK and
28 * RLIMIT_AS.
29 */
30 static inline long
expand_backing_store(struct vm_area_struct * vma,unsigned long address)31 expand_backing_store (struct vm_area_struct *vma, unsigned long address)
32 {
33 unsigned long grow;
34
35 grow = PAGE_SIZE >> PAGE_SHIFT;
36 if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur
37 || (((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur))
38 return -ENOMEM;
39 vma->vm_end += PAGE_SIZE;
40 vma->vm_mm->total_vm += grow;
41 if (vma->vm_flags & VM_LOCKED)
42 vma->vm_mm->locked_vm += grow;
43 return 0;
44 }
45
46 /*
47 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
48 * (inside region 5, on ia64) and that page is present.
49 */
50 static int
mapped_kernel_page_is_present(unsigned long address)51 mapped_kernel_page_is_present (unsigned long address)
52 {
53 pgd_t *pgd;
54 pmd_t *pmd;
55 pte_t *ptep, pte;
56
57 pgd = pgd_offset_k(address);
58 if (pgd_none(*pgd) || pgd_bad(*pgd))
59 return 0;
60
61 pmd = pmd_offset(pgd,address);
62 if (pmd_none(*pmd) || pmd_bad(*pmd))
63 return 0;
64
65 ptep = pte_offset(pmd, address);
66 if (!ptep)
67 return 0;
68
69 pte = *ptep;
70 return pte_present(pte);
71 }
72
73 void
ia64_do_page_fault(unsigned long address,unsigned long isr,struct pt_regs * regs)74 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
75 {
76 int signal = SIGSEGV, code = SEGV_MAPERR;
77 struct vm_area_struct *vma, *prev_vma;
78 struct mm_struct *mm = current->mm;
79 struct siginfo si;
80 unsigned long mask;
81
82 /*
83 * If we're in an interrupt or have no user context, we must not take the fault..
84 */
85 if (in_interrupt() || !mm)
86 goto no_context;
87
88 /*
89 * If fault is in region 5 and we are in the kernel, we may already
90 * have the mmap_sem (VALID_PAGE macro is called during mmap). There
91 * should be no vma for region 5 addr's anyway, so skip getting the
92 * semaphore and go directly to the code that handles a bad area.
93 */
94 if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
95 goto bad_area_no_up;
96
97 down_read(&mm->mmap_sem);
98
99 vma = find_vma_prev(mm, address, &prev_vma);
100 if (!vma)
101 goto bad_area;
102
103 /* find_vma_prev() returns vma such that address < vma->vm_end or NULL */
104 if (address < vma->vm_start)
105 goto check_expansion;
106
107 good_area:
108 code = SEGV_ACCERR;
109
110 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
111
112 # define VM_READ_BIT 0
113 # define VM_WRITE_BIT 1
114 # define VM_EXEC_BIT 2
115
116 # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
117 || (1 << VM_EXEC_BIT) != VM_EXEC)
118 # error File is out of sync with <linux/mm.h>. Pleaes update.
119 # endif
120
121 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
122 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)
123 | (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT));
124
125 if ((vma->vm_flags & mask) != mask)
126 goto bad_area;
127
128 survive:
129 /*
130 * If for any reason at all we couldn't handle the fault, make
131 * sure we exit gracefully rather than endlessly redo the
132 * fault.
133 */
134 switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) {
135 case 1:
136 ++current->min_flt;
137 break;
138 case 2:
139 ++current->maj_flt;
140 break;
141 case 0:
142 /*
143 * We ran out of memory, or some other thing happened
144 * to us that made us unable to handle the page fault
145 * gracefully.
146 */
147 signal = SIGBUS;
148 goto bad_area;
149 default:
150 goto out_of_memory;
151 }
152 up_read(&mm->mmap_sem);
153 return;
154
155 check_expansion:
156 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
157 if (!(vma->vm_flags & VM_GROWSDOWN))
158 goto bad_area;
159 if (rgn_index(address) != rgn_index(vma->vm_start)
160 || rgn_offset(address) >= RGN_MAP_LIMIT)
161 goto bad_area;
162 if (expand_stack(vma, address))
163 goto bad_area;
164 } else {
165 vma = prev_vma;
166 if (rgn_index(address) != rgn_index(vma->vm_start)
167 || rgn_offset(address) >= RGN_MAP_LIMIT)
168 goto bad_area;
169 if (expand_backing_store(vma, address))
170 goto bad_area;
171 }
172 goto good_area;
173
174 bad_area:
175 up_read(&mm->mmap_sem);
176 bad_area_no_up:
177 if ((isr & IA64_ISR_SP)
178 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
179 {
180 /*
181 * This fault was due to a speculative load or lfetch.fault, set the "ed"
182 * bit in the psr to ensure forward progress. (Target register will get a
183 * NaT for ld.s, lfetch will be canceled.)
184 */
185 ia64_psr(regs)->ed = 1;
186 return;
187 }
188 if (user_mode(regs)) {
189 si.si_signo = signal;
190 si.si_errno = 0;
191 si.si_code = code;
192 si.si_addr = (void *) address;
193 si.si_isr = isr;
194 si.si_flags = __ISR_VALID;
195 force_sig_info(signal, &si, current);
196 return;
197 }
198
199 no_context:
200 if (isr & IA64_ISR_SP) {
201 /*
202 * This fault was due to a speculative load set the "ed" bit in the psr to
203 * ensure forward progress (target register will get a NaT).
204 */
205 ia64_psr(regs)->ed = 1;
206 return;
207 }
208
209 /*
210 * Since we have no vma's for region 5, we might get here even if the address is
211 * valid, due to the VHPT walker inserting a non present translation that becomes
212 * stale. If that happens, the non present fault handler already purged the stale
213 * translation, which fixed the problem. So, we check to see if the translation is
214 * valid, and return if it is.
215 */
216 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
217 return;
218
219 if (done_with_exception(regs))
220 return;
221
222 /*
223 * Oops. The kernel tried to access some bad page. We'll have to terminate things
224 * with extreme prejudice.
225 */
226 bust_spinlocks(1);
227
228 if (address < PAGE_SIZE)
229 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
230 else
231 printk(KERN_ALERT "Unable to handle kernel paging request at "
232 "virtual address %016lx\n", address);
233 die("Oops", regs, isr);
234 bust_spinlocks(0);
235 do_exit(SIGKILL);
236 return;
237
238 out_of_memory:
239 if (current->pid == 1) {
240 yield();
241 goto survive;
242 }
243 up_read(&mm->mmap_sem);
244 printk(KERN_CRIT "VM: killing process %s\n", current->comm);
245 if (user_mode(regs))
246 do_exit(SIGKILL);
247 goto no_context;
248 }
249