1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * arch/sh64/mm/tlbmiss.c
7  *
8  * Original code from fault.c
9  * Copyright (C) 2000, 2001  Paolo Alberelli
10  *
11  * Fast PTE->TLB refill path
12  * Copyright (C) 2003 Richard.Curnow@superh.com
13  *
14  * IMPORTANT NOTES :
15  * The do_fast_page_fault function is called from a context in entry.S where very few registers
16  * have been saved.  In particular, the code in this file must be compiled not to use ANY
17  * caller-save regiseters that are not part of the restricted save set.  Also, it means that
18  * code in this file must not make calls to functions elsewhere in the kernel, or else the
19  * excepting context will see corruption in its caller-save registers.  Plus, the entry.S save
20  * area is non-reentrant, so this code has to run with SR.BL==1, i.e. no interrupts taken inside
21  * it and panic on any exception.
22  *
23  */
24 
25 #include <linux/signal.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/ptrace.h>
32 #include <linux/mman.h>
33 #include <linux/mm.h>
34 #include <linux/smp.h>
35 #include <linux/smp_lock.h>
36 #include <linux/interrupt.h>
37 
38 #include <asm/system.h>
39 #include <asm/tlb.h>
40 #include <asm/io.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgalloc.h>
43 #include <asm/hardirq.h>
44 #include <asm/mmu_context.h>
45 #include <asm/registers.h>		/* required by inline asm statements */
46 
47 /* Callable from fault.c, so not static */
__do_tlb_refill(unsigned long address,unsigned long long is_text_not_data,pte_t * pte)48 __inline__ void __do_tlb_refill(unsigned long address,
49                             unsigned long long is_text_not_data, pte_t *pte)
50 {
51 	unsigned long long ptel;
52 	unsigned long long pteh=0;
53 	struct tlb_info *tlbp;
54 	unsigned long long next;
55 
56 	/* Get PTEL first */
57 	ptel = pte_val(*pte);
58 
59 	/*
60 	 * Set PTEH register
61 	 */
62 	pteh = address & MMU_VPN_MASK;
63 
64 	/* Sign extend based on neff. */
65 #if (NEFF == 32)
66 	/* Faster sign extension */
67 	pteh = (unsigned long long)(signed long long)(signed long)pteh;
68 #else
69 	/* General case */
70 	pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh;
71 #endif
72 
73 	/* Set the ASID. */
74 	pteh |= get_asid() << PTEH_ASID_SHIFT;
75 
76 	pteh |= PTEH_VALID;
77 
78 	/* Set PTEL register, set_pte has performed the sign extension */
79 	ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
80 	ptel |= _PAGE_FLAGS_HARDWARE_DEFAULT; /* add default flags */
81 
82 	tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
83 	next = tlbp->next;
84 	__flush_tlb_slot(next);
85 	asm volatile ("putcfg %0,1,%2\n\n\t"
86 		      "putcfg %0,0,%1\n"
87 		      :  : "r" (next), "r" (pteh), "r" (ptel) );
88 
89 	next += TLB_STEP;
90 	if (next > tlbp->last) next = tlbp->first;
91 	tlbp->next = next;
92 
93 }
94 
handle_vmalloc_fault(struct mm_struct * mm,unsigned long protection_flags,unsigned long long textaccess,unsigned long address)95 static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long protection_flags,
96                                 unsigned long long textaccess,
97 				unsigned long address)
98 {
99 	pgd_t *dir;
100 	pmd_t *pmd;
101 	static pte_t *pte;
102 	pte_t entry;
103 
104 	dir = pgd_offset_k(address);
105 	pmd = pmd_offset(dir, address);
106 
107 
108 	if (pmd_none(*pmd)) {
109 		return 0;
110 	}
111 	if (pmd_bad(*pmd)) {
112 		pmd_clear(pmd);
113 		return 0;
114 	}
115 	pte = pte_offset(pmd, address);
116 	entry = *pte;
117 	if (pte_none(entry) || !pte_present(entry)) {
118 		return 0;
119 	}
120 
121 	if ((pte_val(entry) & protection_flags) != protection_flags) {
122 		return 0;
123 	}
124 
125         __do_tlb_refill(address, textaccess, pte);
126 
127 	return 1;
128 }
129 
handle_tlbmiss(struct mm_struct * mm,unsigned long long protection_flags,unsigned long long textaccess,unsigned long address)130 static int handle_tlbmiss(struct mm_struct *mm, unsigned long long protection_flags,
131 			unsigned long long textaccess,
132 			unsigned long address)
133 {
134 	pgd_t *dir;
135 	pmd_t *pmd;
136 	pte_t *pte;
137 	pte_t entry;
138 
139 
140 	/* NB. The PGD currently only contains a single entry - there is no
141 	   page table tree stored for the top half of the address space since
142 	   virtual pages in that region should never be mapped in user mode.
143 	   (In kernel mode, the only things in that region are the 512Mb super
144 	   page (locked in), and vmalloc (modules) +  I/O device pages (handled
145 	   by handle_vmalloc_fault), so no PGD for the upper half is required
146 	   by kernel mode either).
147 
148 	   See how mm->pgd is allocated and initialised in pgd_alloc to see why
149 	   the next test is necessary.  - RPC */
150 	if (address >= (unsigned long) TASK_SIZE) {
151 		/* upper half - never has page table entries. */
152 		return 0;
153 	}
154 	dir = pgd_offset(mm, address);
155 	if (pgd_none(*dir)) {
156 		return 0;
157 	}
158 	if (!pgd_present(*dir)) {
159 		return 0;
160 	}
161 
162 	pmd = pmd_offset(dir, address);
163 	if (pmd_none(*pmd)) {
164 		return 0;
165 	}
166 	if (!pmd_present(*pmd)) {
167 		return 0;
168 	}
169 	pte = pte_offset(pmd, address);
170 	entry = *pte;
171 	if (pte_none(entry)) {
172 		return 0;
173 	}
174 	if (!pte_present(entry)) {
175 		return 0;
176 	}
177 
178 	/* If the page doesn't have sufficient protection bits set to service the
179 	   kind of fault being handled, there's not much point doing the TLB refill.
180 	   Punt the fault to the general handler. */
181 	if ((pte_val(entry) & protection_flags) != protection_flags) {
182 		return 0;
183 	}
184 
185         __do_tlb_refill(address, textaccess, pte);
186 
187 	return 1;
188 }
189 
190 /* Put all this information into one structure so that everything is just arithmetic
191    relative to a single base address.  This reduces the number of movi/shori pairs needed
192    just to load addresses of static data. */
193 struct expevt_lookup {
194 	unsigned short protection_flags[8];
195 	unsigned char  is_text_access[8];
196 	unsigned char  is_write_access[8];
197 };
198 
199 #define PRU (1<<9)
200 #define PRW (1<<8)
201 #define PRX (1<<7)
202 #define PRR (1<<6)
203 
204 #define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
205 #define YOUNG (_PAGE_ACCESSED)
206 
207 /* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
208    the fault happened in user mode or privileged mode. */
209 static struct expevt_lookup expevt_lookup_table = {
210 	.protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
211 	.is_text_access   = {1,   1,   0, 0, 0,   0,   0,   0}
212 };
213 
214 /*
215    This routine handles page faults that can be serviced just by refilling a
216    TLB entry from an existing page table entry.  (This case represents a very
217    large majority of page faults.) Return 1 if the fault was successfully
218    handled.  Return 0 if the fault could not be handled.  (This leads into the
219    general fault handling in fault.c which deals with mapping file-backed
220    pages, stack growth, segmentation faults, swapping etc etc)
221  */
do_fast_page_fault(unsigned long long ssr_md,unsigned long long expevt,unsigned long address)222 asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt,
223 			          unsigned long address)
224 {
225 	struct task_struct *tsk;
226 	struct mm_struct *mm;
227 	unsigned long long textaccess;
228 	unsigned long long protection_flags;
229 	unsigned long long index;
230 	unsigned long long expevt4;
231 
232 	/* The next few lines implement a way of hashing EXPEVT into a small array index
233 	   which can be used to lookup parameters specific to the type of TLBMISS being
234 	   handled.  Note:
235 	   ITLBMISS has EXPEVT==0xa40
236 	   RTLBMISS has EXPEVT==0x040
237 	   WTLBMISS has EXPEVT==0x060
238 	*/
239 
240 	expevt4 = (expevt >> 4);
241 	/* TODO : xor ssr_md into this expression too.  Then we can check that PRU is set
242 	   when it needs to be. */
243 	index = expevt4 ^ (expevt4 >> 5);
244 	index &= 7;
245 	protection_flags = expevt_lookup_table.protection_flags[index];
246 	textaccess       = expevt_lookup_table.is_text_access[index];
247 
248 #if (CONFIG_SH64_PROC_TLB)
249 	++calls_to_do_fast_page_fault;
250 #endif
251 
252 	/* SIM
253 	 * Note this is now called with interrupts still disabled
254 	 * This is to cope with being called for a missing IO port
255 	 * address with interupts disabled. This should be fixed as
256 	 * soon as we have a better 'fast path' miss handler.
257 	 *
258 	 * Plus take care how you try and debug this stuff.
259 	 * For example, writing debug data to a port which you
260 	 * have just faulted on is not going to work.
261 	 */
262 
263 	tsk = current;
264 	mm = tsk->mm;
265 
266 	if ((address >= VMALLOC_START && address < VMALLOC_END) ||
267 	    (address >= IOBASE_VADDR  && address < IOBASE_END)) {
268 		if (ssr_md) {
269 			/* Process-contexts can never have this address range mapped */
270 			if (handle_vmalloc_fault(mm, protection_flags, textaccess, address)) {
271 				return 1;
272 			}
273 		}
274 	} else if (!in_interrupt() && mm) {
275 		if (handle_tlbmiss(mm, protection_flags, textaccess, address)) {
276 			return 1;
277 		}
278 	}
279 
280 	return 0;
281 
282 }
283 
284