1 /*
2  *  linux/arch/cris/mm/tlb.c
3  *
4  *  Copyright (C) 2000, 2001  Axis Communications AB
5  *
6  *  Authors:   Bjorn Wesen (bjornw@axis.com)
7  *
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/init.h>
19 
20 #include <asm/system.h>
21 #include <asm/segment.h>
22 #include <asm/pgtable.h>
23 #include <asm/svinto.h>
24 #include <asm/mmu_context.h>
25 
26 #define D(x)
27 
28 /* CRIS in Etrax100LX TLB */
29 
30 #define NUM_TLB_ENTRIES 64
31 #define NUM_PAGEID 64
32 #define INVALID_PAGEID 63
33 #define NO_CONTEXT -1
34 
35 /* The TLB can host up to 64 different mm contexts at the same time.
36  * The running context is R_MMU_CONTEXT, and each TLB entry contains a
37  * page_id that has to match to give a hit. In page_id_map, we keep track
38  * of which mm's we have assigned which page_id's, so that we know when
39  * to invalidate TLB entries.
40  *
41  * The last page_id is never running - it is used as an invalid page_id
42  * so we can make TLB entries that will never match.
43  *
44  * Notice that we need to make the flushes atomic, otherwise an interrupt
45  * handler that uses vmalloced memory might cause a TLB load in the middle
46  * of a flush causing.
47  */
48 
49 struct mm_struct *page_id_map[NUM_PAGEID];
50 
51 static int map_replace_ptr = 1;  /* which page_id_map entry to replace next */
52 
53 /* invalidate all TLB entries */
54 
55 void
flush_tlb_all(void)56 flush_tlb_all(void)
57 {
58 	int i;
59 	unsigned long flags;
60 
61 	/* the vpn of i & 0xf is so we dont write similar TLB entries
62 	 * in the same 4-way entry group. details..
63 	 */
64 
65 	save_and_cli(flags); /* flush needs to be atomic */
66 	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
67 		*R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
68 		*R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
69 			      IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );
70 
71 		*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no       ) |
72 			      IO_STATE(R_TLB_LO, valid, no       ) |
73 			      IO_STATE(R_TLB_LO, kernel,no	 ) |
74 			      IO_STATE(R_TLB_LO, we,    no       ) |
75 			      IO_FIELD(R_TLB_LO, pfn,   0        ) );
76 	}
77 	restore_flags(flags);
78 	D(printk("tlb: flushed all\n"));
79 }
80 
81 /* invalidate the selected mm context only */
82 
83 void
flush_tlb_mm(struct mm_struct * mm)84 flush_tlb_mm(struct mm_struct *mm)
85 {
86 	int i;
87 	int page_id = mm->context;
88 	unsigned long flags;
89 
90 	D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));
91 
92 	if(page_id == NO_CONTEXT)
93 		return;
94 
95 	/* mark the TLB entries that match the page_id as invalid.
96 	 * here we could also check the _PAGE_GLOBAL bit and NOT flush
97 	 * global pages. is it worth the extra I/O ?
98 	 */
99 
100 	save_and_cli(flags);  /* flush needs to be atomic */
101 	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
102 		*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
103 		if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) {
104 			*R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
105 				      IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );
106 
107 			*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
108 				      IO_STATE(R_TLB_LO, valid, no  ) |
109 				      IO_STATE(R_TLB_LO, kernel,no  ) |
110 				      IO_STATE(R_TLB_LO, we,    no  ) |
111 				      IO_FIELD(R_TLB_LO, pfn,   0   ) );
112 		}
113 	}
114 	restore_flags(flags);
115 }
116 
117 /* invalidate a single page */
118 
119 void
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)120 flush_tlb_page(struct vm_area_struct *vma,
121 	       unsigned long addr)
122 {
123 	struct mm_struct *mm = vma->vm_mm;
124 	int page_id = mm->context;
125 	int i;
126 	unsigned long flags;
127 
128 	D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm));
129 
130 	if(page_id == NO_CONTEXT)
131 		return;
132 
133 	addr &= PAGE_MASK; /* perhaps not necessary */
134 
135 	/* invalidate those TLB entries that match both the mm context
136 	 * and the virtual address requested
137 	 */
138 
139 	save_and_cli(flags);  /* flush needs to be atomic */
140 	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
141 		unsigned long tlb_hi;
142 		*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
143 		tlb_hi = *R_TLB_HI;
144 		if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
145 		    (tlb_hi & PAGE_MASK) == addr) {
146 			*R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
147 				addr; /* same addr as before works. */
148 
149 			*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
150 				      IO_STATE(R_TLB_LO, valid, no  ) |
151 				      IO_STATE(R_TLB_LO, kernel,no  ) |
152 				      IO_STATE(R_TLB_LO, we,    no  ) |
153 				      IO_FIELD(R_TLB_LO, pfn,   0   ) );
154 		}
155 	}
156 	restore_flags(flags);
157 }
158 
159 /* invalidate a page range */
160 
161 void
flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)162 flush_tlb_range(struct mm_struct *mm,
163 		unsigned long start,
164 		unsigned long end)
165 {
166 	int page_id = mm->context;
167 	int i;
168 	unsigned long flags;
169 
170 	D(printk("tlb: flush range %p<->%p in context %d (%p)\n",
171 		 start, end, page_id, mm));
172 
173 	if(page_id == NO_CONTEXT)
174 		return;
175 
176 	start &= PAGE_MASK;  /* probably not necessary */
177 	end &= PAGE_MASK;    /* dito */
178 
179 	/* invalidate those TLB entries that match both the mm context
180 	 * and the virtual address range
181 	 */
182 
183 	save_and_cli(flags);  /* flush needs to be atomic */
184 	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
185 		unsigned long tlb_hi, vpn;
186 		*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
187 		tlb_hi = *R_TLB_HI;
188 		vpn = tlb_hi & PAGE_MASK;
189 		if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
190 		    vpn >= start && vpn < end) {
191 			*R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
192 				      IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );
193 
194 			*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
195 				      IO_STATE(R_TLB_LO, valid, no  ) |
196 				      IO_STATE(R_TLB_LO, kernel,no  ) |
197 				      IO_STATE(R_TLB_LO, we,    no  ) |
198 				      IO_FIELD(R_TLB_LO, pfn,   0   ) );
199 		}
200 	}
201 	restore_flags(flags);
202 }
203 
204 /* dump the entire TLB for debug purposes */
205 
206 #if 0
207 void
208 dump_tlb_all(void)
209 {
210 	int i;
211 	unsigned long flags;
212 
213 	printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we  |\n");
214 
215 	save_and_cli(flags);
216 	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
217 		*R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
218 		printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n",
219 		       i, *R_TLB_HI, *R_TLB_LO);
220 	}
221 	restore_flags(flags);
222 }
223 #endif
224 
225 /*
226  * Initialize the context related info for a new mm_struct
227  * instance.
228  */
229 
230 int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)231 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
232 {
233 	mm->context = NO_CONTEXT;
234 	return 0;
235 }
236 
237 /* the following functions are similar to those used in the PPC port */
238 
239 static inline void
alloc_context(struct mm_struct * mm)240 alloc_context(struct mm_struct *mm)
241 {
242 	struct mm_struct *old_mm;
243 
244 	D(printk("tlb: alloc context %d (%p)\n", map_replace_ptr, mm));
245 
246 	/* did we replace an mm ? */
247 
248 	old_mm = page_id_map[map_replace_ptr];
249 
250 	if(old_mm) {
251 		/* throw out any TLB entries belonging to the mm we replace
252 		 * in the map
253 		 */
254 		flush_tlb_mm(old_mm);
255 
256 		old_mm->context = NO_CONTEXT;
257 	}
258 
259 	/* insert it into the page_id_map */
260 
261 	mm->context = map_replace_ptr;
262 	page_id_map[map_replace_ptr] = mm;
263 
264 	map_replace_ptr++;
265 
266 	if(map_replace_ptr == INVALID_PAGEID)
267 		map_replace_ptr = 0;         /* wrap around */
268 }
269 
270 /*
271  * if needed, get a new MMU context for the mm. otherwise nothing is done.
272  */
273 
274 void
get_mmu_context(struct mm_struct * mm)275 get_mmu_context(struct mm_struct *mm)
276 {
277 	if(mm->context == NO_CONTEXT)
278 		alloc_context(mm);
279 }
280 
281 /* called in schedule() just before actually doing the switch_to */
282 
283 void
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk,int cpu)284 switch_mm(struct mm_struct *prev, struct mm_struct *next,
285 	  struct task_struct *tsk, int cpu)
286 {
287 	/* make sure we have a context */
288 
289 	get_mmu_context(next);
290 
291 	/* remember the pgd for the fault handlers
292 	 * this is similar to the pgd register in some other CPU's.
293 	 * we need our own copy of it because current and active_mm
294 	 * might be invalid at points where we still need to derefer
295 	 * the pgd.
296 	 */
297 
298 	current_pgd = next->pgd;
299 
300 	/* switch context in the MMU */
301 
302 	D(printk("switching mmu_context to %d (%p)\n", next->context, next));
303 
304 	*R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context);
305 }
306 
307 
308 /* called by __exit_mm to destroy the used MMU context if any before
309  * destroying the mm itself. this is only called when the last user of the mm
310  * drops it.
311  *
312  * the only thing we really need to do here is mark the used PID slot
313  * as empty.
314  */
315 
316 void
destroy_context(struct mm_struct * mm)317 destroy_context(struct mm_struct *mm)
318 {
319 	if(mm->context != NO_CONTEXT) {
320 		D(printk("destroy_context %d (%p)\n", mm->context, mm));
321 		flush_tlb_mm(mm);  /* TODO this might be redundant ? */
322 		page_id_map[mm->context] = NULL;
323 		/* mm->context = NO_CONTEXT; redundant.. mm will be freed */
324 	}
325 }
326 
327 /* called once during VM initialization, from init.c */
328 
329 void __init
tlb_init(void)330 tlb_init(void)
331 {
332 	int i;
333 
334 	/* clear the page_id map */
335 
336 	for (i = 1; i < sizeof (page_id_map) / sizeof (page_id_map[0]); i++)
337 		page_id_map[i] = NULL;
338 
339 	/* invalidate the entire TLB */
340 
341 	flush_tlb_all();
342 
343 	/* the init_mm has context 0 from the boot */
344 
345 	page_id_map[0] = &init_mm;
346 }
347