1 /* $Id: fault.c,v 1.1.1.1.2.3 2002/10/24 05:52:58 mrbrown Exp $
2  *
3  *  linux/arch/sh/mm/fault.c
4  *  Copyright (C) 1999  Niibe Yutaka
5  *
6  *  Based on linux/arch/i386/mm/fault.c:
7  *   Copyright (C) 1995  Linus Torvalds
8  */
9 
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
21 #include <linux/interrupt.h>
22 
23 #include <asm/system.h>
24 #include <asm/io.h>
25 #include <asm/uaccess.h>
26 #include <asm/pgalloc.h>
27 #include <asm/hardirq.h>
28 #include <asm/mmu_context.h>
29 
30 #if defined(CONFIG_SH_KGDB)
31 #include <asm/kgdb.h>
32 #endif
33 
34 extern void die(const char *,struct pt_regs *,long);
35 
36 /*
37  * Ugly, ugly, but the goto's result in better assembly..
38  */
__verify_write(const void * addr,unsigned long size)39 int __verify_write(const void * addr, unsigned long size)
40 {
41 	struct vm_area_struct * vma;
42 	unsigned long start = (unsigned long) addr;
43 
44 	if (!size)
45 		return 1;
46 
47 	vma = find_vma(current->mm, start);
48 	if (!vma)
49 		goto bad_area;
50 	if (vma->vm_start > start)
51 		goto check_stack;
52 
53 good_area:
54 	if (!(vma->vm_flags & VM_WRITE))
55 		goto bad_area;
56 	size--;
57 	size += start & ~PAGE_MASK;
58 	size >>= PAGE_SHIFT;
59 	start &= PAGE_MASK;
60 
61 	for (;;) {
62 		if (handle_mm_fault(current->mm, vma, start, 1) <= 0)
63 			goto bad_area;
64 		if (!size)
65 			break;
66 		size--;
67 		start += PAGE_SIZE;
68 		if (start < vma->vm_end)
69 			continue;
70 		vma = vma->vm_next;
71 		if (!vma || vma->vm_start != start)
72 			goto bad_area;
73 		if (!(vma->vm_flags & VM_WRITE))
74 			goto bad_area;;
75 	}
76 	return 1;
77 
78 check_stack:
79 	if (!(vma->vm_flags & VM_GROWSDOWN))
80 		goto bad_area;
81 	if (expand_stack(vma, start) == 0)
82 		goto good_area;
83 
84 bad_area:
85 	return 0;
86 }
87 
88 /*
89  * This routine handles page faults.  It determines the address,
90  * and the problem, and then passes it off to one of the appropriate
91  * routines.
92  */
do_page_fault(struct pt_regs * regs,unsigned long writeaccess,unsigned long address)93 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
94 			      unsigned long address)
95 {
96 	struct task_struct *tsk;
97 	struct mm_struct *mm;
98 	struct vm_area_struct * vma;
99 	unsigned long page;
100 	unsigned long fixup;
101 
102 #if defined(CONFIG_SH_KGDB)
103 	if (kgdb_nofault && kgdb_bus_err_hook)
104 	  kgdb_bus_err_hook();
105 #endif
106 
107 	tsk = current;
108 	mm = tsk->mm;
109 
110 	/*
111 	 * If we're in an interrupt or have no user
112 	 * context, we must not take the fault..
113 	 */
114 	if (in_interrupt() || !mm)
115 		goto no_context;
116 
117 	down_read(&mm->mmap_sem);
118 
119 	vma = find_vma(mm, address);
120 	if (!vma)
121 		goto bad_area;
122 	if (vma->vm_start <= address)
123 		goto good_area;
124 	if (!(vma->vm_flags & VM_GROWSDOWN))
125 		goto bad_area;
126 	if (expand_stack(vma, address))
127 		goto bad_area;
128 /*
129  * Ok, we have a good vm_area for this memory access, so
130  * we can handle it..
131  */
132 good_area:
133 	if (writeaccess) {
134 		if (!(vma->vm_flags & VM_WRITE))
135 			goto bad_area;
136 	} else {
137 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
138 			goto bad_area;
139 	}
140 
141 	/*
142 	 * If for any reason at all we couldn't handle the fault,
143 	 * make sure we exit gracefully rather than endlessly redo
144 	 * the fault.
145 	 */
146 survive:
147 	switch (handle_mm_fault(mm, vma, address, writeaccess)) {
148 	case 1:
149 		tsk->min_flt++;
150 		break;
151 	case 2:
152 		tsk->maj_flt++;
153 		break;
154 	case 0:
155 		goto do_sigbus;
156 	default:
157 		goto out_of_memory;
158 	}
159 
160 	up_read(&mm->mmap_sem);
161 	return;
162 
163 /*
164  * Something tried to access memory that isn't in our memory map..
165  * Fix it, but check if it's kernel or user first..
166  */
167 bad_area:
168 	up_read(&mm->mmap_sem);
169 
170 	if (user_mode(regs)) {
171 		tsk->thread.address = address;
172 		tsk->thread.error_code = writeaccess;
173 		force_sig(SIGSEGV, tsk);
174 		return;
175 	}
176 
177 no_context:
178 	/* Are we prepared to handle this kernel fault?  */
179 	fixup = search_exception_table(regs->pc);
180 	if (fixup != 0) {
181 		regs->pc = fixup;
182 		return;
183 	}
184 
185 /*
186  * Oops. The kernel tried to access some bad page. We'll have to
187  * terminate things with extreme prejudice.
188  *
189  */
190 	if (address < PAGE_SIZE)
191 		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
192 	else
193 		printk(KERN_ALERT "Unable to handle kernel paging request");
194 	printk(" at virtual address %08lx\n", address);
195 	printk(KERN_ALERT "pc = %08lx\n", regs->pc);
196 	asm volatile("mov.l	%1, %0"
197 		     : "=r" (page)
198 		     : "m" (__m(MMU_TTB)));
199 	if (page) {
200 		page = ((unsigned long *) page)[address >> 22];
201 		printk(KERN_ALERT "*pde = %08lx\n", page);
202 		if (page & _PAGE_PRESENT) {
203 			page &= PAGE_MASK;
204 			address &= 0x003ff000;
205 			page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
206 			printk(KERN_ALERT "*pte = %08lx\n", page);
207 		}
208 	}
209 	die("Oops", regs, writeaccess);
210 	do_exit(SIGKILL);
211 
212 /*
213  * We ran out of memory, or some other thing happened to us that made
214  * us unable to handle the page fault gracefully.
215  */
216 out_of_memory:
217 	if (current->pid == 1) {
218 		yield();
219 		goto survive;
220 	}
221 	up_read(&mm->mmap_sem);
222 	printk("VM: killing process %s\n", tsk->comm);
223 	if (user_mode(regs))
224 		do_exit(SIGKILL);
225 	goto no_context;
226 
227 do_sigbus:
228 	up_read(&mm->mmap_sem);
229 
230 	/*
231 	 * Send a sigbus, regardless of whether we were in kernel
232 	 * or user mode.
233 	 */
234 	tsk->thread.address = address;
235 	tsk->thread.error_code = writeaccess;
236 	tsk->thread.trap_no = 14;
237 	force_sig(SIGBUS, tsk);
238 
239 	/* Kernel mode? Handle exceptions or die */
240 	if (!user_mode(regs))
241 		goto no_context;
242 }
243 
244 /*
245  * Called with interrupt disabled.
246  */
__do_page_fault(struct pt_regs * regs,unsigned long writeaccess,unsigned long address)247 asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
248 			       unsigned long address)
249 {
250 	pgd_t *dir;
251 	pmd_t *pmd;
252 	pte_t *pte;
253 	pte_t entry;
254 
255 #if defined(CONFIG_SH_KGDB)
256 	if (kgdb_nofault && kgdb_bus_err_hook)
257 	  kgdb_bus_err_hook();
258 #endif
259 	if (address >= P3SEG && address < P4SEG)
260 		dir = pgd_offset_k(address);
261 	else if (address >= TASK_SIZE)
262 		return 1;
263 	else if (!current->mm)
264 		return 1;
265 	else
266 		dir = pgd_offset(current->mm, address);
267 
268 	pmd = pmd_offset(dir, address);
269 	if (pmd_none(*pmd))
270 		return 1;
271 	if (pmd_bad(*pmd)) {
272 		pmd_ERROR(*pmd);
273 		pmd_clear(pmd);
274 		return 1;
275 	}
276 	pte = pte_offset(pmd, address);
277 	entry = *pte;
278 	if (pte_none(entry) || pte_not_present(entry)
279 	    || (writeaccess && !pte_write(entry)))
280 		return 1;
281 
282 	if (writeaccess)
283 		entry = pte_mkdirty(entry);
284 	entry = pte_mkyoung(entry);
285 #if defined(__SH4__)
286 	/*
287 	 * ITLB is not affected by "ldtlb" instruction.
288 	 * So, we need to flush the entry by ourselves.
289 	 */
290 	__flush_tlb_page(get_asid(), address&PAGE_MASK);
291 #endif
292 	set_pte(pte, entry);
293 	update_mmu_cache(NULL, address, entry);
294 	return 0;
295 }
296 
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t pte)297 void update_mmu_cache(struct vm_area_struct * vma,
298 		      unsigned long address, pte_t pte)
299 {
300 	unsigned long flags;
301 	unsigned long pteval;
302 	unsigned long vpn;
303 #if defined(__SH4__)
304 	struct page *page;
305 	unsigned long ptea;
306 #endif
307 
308 	/* Ptrace may call this routine. */
309 	if (vma && current->active_mm != vma->vm_mm)
310 		return;
311 
312 #if defined(__SH4__)
313 	page = pte_page(pte);
314 	if (VALID_PAGE(page) && !test_bit(PG_mapped, &page->flags)) {
315 		unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
316 		__flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
317 		__set_bit(PG_mapped, &page->flags);
318 	}
319 #endif
320 
321 	save_and_cli(flags);
322 
323 	/* Set PTEH register */
324 	vpn = (address & MMU_VPN_MASK) | get_asid();
325 	ctrl_outl(vpn, MMU_PTEH);
326 
327 	pteval = pte_val(pte);
328 #if defined(__SH4__)
329 	/* Set PTEA register */
330 	/* TODO: make this look less hacky */
331 	ptea = ((pteval >> 28) & 0xe) | (pteval & 0x1);
332 	ctrl_outl(ptea, MMU_PTEA);
333 #endif
334 
335 	/* Set PTEL register */
336 	pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
337 	/* conveniently, we want all the software flags to be 0 anyway */
338 	ctrl_outl(pteval, MMU_PTEL);
339 
340 	/* Load the TLB */
341 	asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
342 	restore_flags(flags);
343 }
344 
__flush_tlb_page(unsigned long asid,unsigned long page)345 void __flush_tlb_page(unsigned long asid, unsigned long page)
346 {
347 	unsigned long addr, data;
348 
349 	/*
350 	 * NOTE: PTEH.ASID should be set to this MM
351 	 *       _AND_ we need to write ASID to the array.
352 	 *
353 	 * It would be simple if we didn't need to set PTEH.ASID...
354 	 */
355 #if defined(__sh3__)
356 	addr = MMU_TLB_ADDRESS_ARRAY |(page & 0x1F000)| MMU_PAGE_ASSOC_BIT;
357 	data = (page & 0xfffe0000) | asid; /* VALID bit is off */
358 	ctrl_outl(data, addr);
359 #elif defined(__SH4__)
360 	addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
361 	data = page | asid; /* VALID bit is off */
362 	jump_to_P2();
363 	ctrl_outl(data, addr);
364 	back_to_P1();
365 #endif
366 }
367 
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)368 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
369 {
370 	if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
371 		unsigned long flags;
372 		unsigned long asid;
373 		unsigned long saved_asid = MMU_NO_ASID;
374 
375 		asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
376 		page &= PAGE_MASK;
377 
378 		save_and_cli(flags);
379 		if (vma->vm_mm != current->mm) {
380 			saved_asid = get_asid();
381 			set_asid(asid);
382 		}
383 		__flush_tlb_page(asid, page);
384 		if (saved_asid != MMU_NO_ASID)
385 			set_asid(saved_asid);
386 		restore_flags(flags);
387 	}
388 }
389 
flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)390 void flush_tlb_range(struct mm_struct *mm, unsigned long start,
391 		     unsigned long end)
392 {
393 	if (mm->context != NO_CONTEXT) {
394 		unsigned long flags;
395 		int size;
396 
397 		save_and_cli(flags);
398 		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
399 		if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
400 			mm->context = NO_CONTEXT;
401 			if (mm == current->mm)
402 				activate_context(mm);
403 		} else {
404 			unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
405 			unsigned long saved_asid = MMU_NO_ASID;
406 
407 			start &= PAGE_MASK;
408 			end += (PAGE_SIZE - 1);
409 			end &= PAGE_MASK;
410 			if (mm != current->mm) {
411 				saved_asid = get_asid();
412 				set_asid(asid);
413 			}
414 			while (start < end) {
415 				__flush_tlb_page(asid, start);
416 				start += PAGE_SIZE;
417 			}
418 			if (saved_asid != MMU_NO_ASID)
419 				set_asid(saved_asid);
420 		}
421 		restore_flags(flags);
422 	}
423 }
424 
flush_tlb_mm(struct mm_struct * mm)425 void flush_tlb_mm(struct mm_struct *mm)
426 {
427 	/* Invalidate all TLB of this process. */
428 	/* Instead of invalidating each TLB, we get new MMU context. */
429 	if (mm->context != NO_CONTEXT) {
430 		unsigned long flags;
431 
432 		save_and_cli(flags);
433 		mm->context = NO_CONTEXT;
434 		if (mm == current->mm)
435 			activate_context(mm);
436 		restore_flags(flags);
437 	}
438 }
439 
flush_tlb_all(void)440 void flush_tlb_all(void)
441 {
442 	unsigned long flags, status;
443 
444 	/*
445 	 * Flush all the TLB.
446 	 *
447 	 * Write to the MMU control register's bit:
448 	 * 	TF-bit for SH-3, TI-bit for SH-4.
449 	 *      It's same position, bit #2.
450 	 */
451 	save_and_cli(flags);
452 	status = ctrl_inl(MMUCR);
453 	status |= 0x04;
454 	ctrl_outl(status, MMUCR);
455 	restore_flags(flags);
456 }
457