1 /*
2  * PowerPC64 Segment Translation Support.
3  *
4  * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
5  *    Copyright (c) 2001 Dave Engebretsen
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/config.h>
14 #include <asm/pgtable.h>
15 #include <asm/mmu.h>
16 #include <asm/mmu_context.h>
17 #include <asm/paca.h>
18 #include <asm/naca.h>
19 #include <asm/pmc.h>
20 #include <asm/cputable.h>
21 
22 inline int make_ste(unsigned long stab,
23 		    unsigned long esid, unsigned long vsid);
24 inline void make_slbe(unsigned long esid, unsigned long vsid,
25 		      int large);
26 
27 /*
28  * Build an entry for the base kernel segment and put it into
29  * the segment table or SLB.  All other segment table or SLB
30  * entries are faulted in.
31  */
stab_initialize(unsigned long stab)32 void stab_initialize(unsigned long stab)
33 {
34 	unsigned long esid, vsid;
35 
36 	esid = GET_ESID(KERNELBASE);
37 	vsid = get_kernel_vsid(esid << SID_SHIFT);
38 
39 	if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
40                 /* Invalidate the entire SLB & all the ERATS */
41                 __asm__ __volatile__("isync" : : : "memory");
42 #ifndef CONFIG_PPC_ISERIES
43                 __asm__ __volatile__("slbmte  %0,%0"
44                                       : : "r" (0) : "memory");
45                 __asm__ __volatile__("isync; slbia; isync":::"memory");
46 		make_slbe(esid, vsid, 0);
47 #else
48                 __asm__ __volatile__("isync; slbia; isync":::"memory");
49 #endif
50 	} else {
51                 __asm__ __volatile__("isync; slbia; isync":::"memory");
52 		make_ste(stab, esid, vsid);
53 	}
54 }
55 
56 /*
57  * Create a segment table entry for the given esid/vsid pair.
58  */
59 inline int
make_ste(unsigned long stab,unsigned long esid,unsigned long vsid)60 make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
61 {
62 	unsigned long entry, group, old_esid, castout_entry, i;
63 	unsigned int global_entry;
64 	STE *ste, *castout_ste;
65 	unsigned char kp = 1;
66 
67 #ifdef CONFIG_SHARED_MEMORY_ADDRESSING
68 	if(((esid >> SMALLOC_ESID_SHIFT) ==
69 	    (SMALLOC_START >> SMALLOC_EA_SHIFT)) &&
70 	   (current->thread.flags & PPC_FLAG_SHARED)) {
71 		kp = 0;
72 	}
73 #endif
74 
75 	/* Search the primary group first. */
76 	global_entry = (esid & 0x1f) << 3;
77 	ste = (STE *)(stab | ((esid & 0x1f) << 7));
78 
79 	/*
80 	 * Find an empty entry, if one exists.
81 	 */
82 	for(group = 0; group < 2; group++) {
83 		for(entry = 0; entry < 8; entry++, ste++) {
84 			if(!(ste->dw0.dw0.v)) {
85 				ste->dw1.dw1.vsid = vsid;
86 				/* Order VSID updte */
87 				__asm__ __volatile__ ("eieio" : : : "memory");
88 				ste->dw0.dw0.esid = esid;
89 				ste->dw0.dw0.v  = 1;
90 				ste->dw0.dw0.kp = kp;
91 				/* Order update     */
92 				__asm__ __volatile__ ("sync" : : : "memory");
93 
94 				return(global_entry | entry);
95 			}
96 		}
97 		/* Now search the secondary group. */
98 		global_entry = ((~esid) & 0x1f) << 3;
99 		ste = (STE *)(stab | (((~esid) & 0x1f) << 7));
100 	}
101 
102 	/*
103 	 * Could not find empty entry, pick one with a round robin selection.
104 	 * Search all entries in the two groups.  Note that the first time
105 	 * we get here, we start with entry 1 so the initializer
106 	 * can be common with the SLB castout code.
107 	 */
108 
109 	/* This assumes we never castout when initializing the stab. */
110 	PMC_SW_PROCESSOR(stab_capacity_castouts);
111 
112 	castout_entry = get_paca()->xStab_data.next_round_robin;
113 	for(i = 0; i < 16; i++) {
114 		if(castout_entry < 8) {
115 			global_entry = (esid & 0x1f) << 3;
116 			ste = (STE *)(stab | ((esid & 0x1f) << 7));
117 			castout_ste = ste + castout_entry;
118 		} else {
119 			global_entry = ((~esid) & 0x1f) << 3;
120 			ste = (STE *)(stab | (((~esid) & 0x1f) << 7));
121 			castout_ste = ste + (castout_entry - 8);
122 		}
123 
124 		if((((castout_ste->dw0.dw0.esid) >> 32) == 0) ||
125 		   (((castout_ste->dw0.dw0.esid) & 0xffffffff) > 0)) {
126 			/* Found an entry to castout.  It is either a user */
127 			/* region, or a secondary kernel segment.          */
128 			break;
129 		}
130 
131 		castout_entry = (castout_entry + 1) & 0xf;
132 	}
133 
134 	get_paca()->xStab_data.next_round_robin = (castout_entry + 1) & 0xf;
135 
136 	/* Modify the old entry to the new value. */
137 
138 	/* Force previous translations to complete. DRENG */
139 	__asm__ __volatile__ ("isync" : : : "memory" );
140 
141 	castout_ste->dw0.dw0.v = 0;
142 	__asm__ __volatile__ ("sync" : : : "memory" );    /* Order update */
143 	castout_ste->dw1.dw1.vsid = vsid;
144 	__asm__ __volatile__ ("eieio" : : : "memory" );   /* Order update */
145 	old_esid = castout_ste->dw0.dw0.esid;
146 	castout_ste->dw0.dw0.esid = esid;
147 	castout_ste->dw0.dw0.v  = 1;
148 	castout_ste->dw0.dw0.kp = kp;
149 	__asm__ __volatile__ ("slbie  %0" : : "r" (old_esid << SID_SHIFT));
150 	/* Ensure completion of slbie */
151 	__asm__ __volatile__ ("sync" : : : "memory" );
152 
153 	return(global_entry | (castout_entry & 0x7));
154 }
155 
156 /*
157  * Create a segment buffer entry for the given esid/vsid pair.
158  */
make_slbe(unsigned long esid,unsigned long vsid,int large)159 inline void make_slbe(unsigned long esid, unsigned long vsid, int large)
160 {
161 	unsigned long entry, castout_entry;
162 	union {
163 		unsigned long word0;
164 		slb_dword0    data;
165 	} esid_data;
166 	union {
167 		unsigned long word0;
168 		slb_dword1    data;
169 	} vsid_data;
170 	unsigned char kp = 1;
171 
172 #ifdef CONFIG_SHARED_MEMORY_ADDRESSING
173 	if(((esid >> SMALLOC_ESID_SHIFT) ==
174 	    (SMALLOC_START >> SMALLOC_EA_SHIFT)) &&
175 	   (current->thread.flags & PPC_FLAG_SHARED)) {
176 		kp = 0;
177 	}
178 #endif
179 
180 	/*
181 	 * Find an empty entry, if one exists.
182 	 */
183 	for(entry = 0; entry < naca->slb_size; entry++) {
184 		__asm__ __volatile__("slbmfee  %0,%1"
185 				     : "=r" (esid_data) : "r" (entry));
186 		if(!esid_data.data.v) {
187 			/*
188 			 * Write the new SLB entry.
189 			 */
190 			vsid_data.word0 = 0;
191 			vsid_data.data.vsid = vsid;
192 			vsid_data.data.kp = kp;
193 			if (large)
194 				vsid_data.data.l = 1;
195 
196 			esid_data.word0 = 0;
197 			esid_data.data.esid = esid;
198 			esid_data.data.v = 1;
199 			esid_data.data.index = entry;
200 
201 			/* slbie not needed as no previous mapping existed. */
202 			/* Order update  */
203 			__asm__ __volatile__ ("isync" : : : "memory");
204 			__asm__ __volatile__ ("slbmte  %0,%1"
205 					      : : "r" (vsid_data),
206 					      "r" (esid_data));
207 			/* Order update  */
208 			__asm__ __volatile__ ("isync" : : : "memory");
209 			return;
210 		}
211 	}
212 
213 	/*
214 	 * Could not find empty entry, pick one with a round robin selection.
215 	 */
216 
217 	PMC_SW_PROCESSOR(stab_capacity_castouts);
218 
219 	/*
220 	 * Never cast out the segment for our own stack. Since we
221 	 * dont invalidate the ERAT we could have a valid translation
222 	 * for our stack during the first part of exception exit
223 	 * which gets invalidated due to a tlbie from another cpu at a
224 	 * non recoverable point (after setting srr0/1) - Anton
225 	 */
226 
227 	castout_entry = get_paca()->xStab_data.next_round_robin;
228 	do {
229 		entry = castout_entry;
230 		castout_entry++;
231 		if (castout_entry >= naca->slb_size)
232 			castout_entry = 1;
233 		asm volatile("slbmfee  %0,%1" : "=r" (esid_data) : "r" (entry));
234 	} while (esid_data.data.esid == GET_ESID((unsigned long)_get_SP()));
235 
236 	get_paca()->xStab_data.next_round_robin = castout_entry;
237 
238 	/* We're executing this code on the interrupt stack, so the
239 	 * above code might pick the kernel stack segment as the victim.
240 	 *
241 	 * Because of this, we need to invalidate the old entry. We need
242 	 * to do this since it'll otherwise be in the ERAT and might come
243 	 * back and haunt us if it get's thrown out of there at the wrong
244 	 * time (i.e. similar to throwing out our own stack above).
245 	 */
246 
247 	esid_data.data.v = 0;
248 	__asm__ __volatile__("slbie  %0" : : "r" (esid_data));
249 
250 	/*
251 	 * Write the new SLB entry.
252 	 */
253 	vsid_data.word0 = 0;
254 	vsid_data.data.vsid = vsid;
255 	vsid_data.data.kp = kp;
256 	if (large)
257 		vsid_data.data.l = 1;
258 
259 	esid_data.word0 = 0;
260 	esid_data.data.esid = esid;
261 	esid_data.data.v = 1;
262 	esid_data.data.index = entry;
263 
264 	__asm__ __volatile__ ("isync" : : : "memory");   /* Order update */
265 	__asm__ __volatile__ ("slbmte  %0,%1"
266 			      : : "r" (vsid_data), "r" (esid_data));
267 	__asm__ __volatile__ ("isync" : : : "memory" );   /* Order update */
268 }
269 
270 /*
271  * Allocate a segment table entry for the given ea.
272  */
ste_allocate(unsigned long ea,unsigned long trap)273 int ste_allocate ( unsigned long ea,
274 		   unsigned long trap)
275 {
276 	unsigned long vsid, esid;
277 	int kernel_segment = 0;
278 
279 	PMC_SW_PROCESSOR(stab_faults);
280 
281 	/* Check for invalid effective addresses. */
282 	if (!IS_VALID_EA(ea)) {
283 		return 1;
284 	}
285 
286 	/* Kernel or user address? */
287 	if (REGION_ID(ea)) {
288 		kernel_segment = 1;
289 		vsid = get_kernel_vsid( ea );
290 	} else {
291 		struct mm_struct *mm = current->mm;
292 		if ( mm ) {
293 			vsid = get_vsid(mm->context, ea );
294 		} else {
295 			return 1;
296 		}
297 	}
298 
299 #ifdef CONFIG_SHARED_MEMORY_ADDRESSING
300 	/* Shared segments might be mapped into a user task space,
301 	 * so we need to add them to the list of entries to flush
302 	 */
303 	if ((ea >> SMALLOC_EA_SHIFT) == (SMALLOC_START >> SMALLOC_EA_SHIFT)) {
304 		kernel_segment = 0;
305 	}
306 #endif
307 
308 	esid = GET_ESID(ea);
309 	if (trap == 0x380 || trap == 0x480) {
310 #ifndef CONFIG_PPC_ISERIES
311 		if (REGION_ID(ea) == KERNEL_REGION_ID)
312 			make_slbe(esid, vsid, 1);
313 		else
314 #endif
315 			make_slbe(esid, vsid, 0);
316 	} else {
317 		unsigned char top_entry, stab_entry, *segments;
318 
319 		stab_entry = make_ste(get_paca()->xStab_data.virt, esid, vsid);
320 		PMC_SW_PROCESSOR_A(stab_entry_use, stab_entry & 0xf);
321 
322 		segments = get_paca()->xSegments;
323 		top_entry = segments[0];
324 		if(!kernel_segment && top_entry < (STAB_CACHE_SIZE - 1)) {
325 			top_entry++;
326 			segments[top_entry] = stab_entry;
327 			if(top_entry == STAB_CACHE_SIZE - 1) top_entry = 0xff;
328 			segments[0] = top_entry;
329 		}
330 	}
331 
332 	return(0);
333 }
334 
335 /*
336  * Flush all entries from the segment table of the current processor.
337  * Kernel and Bolted entries are not removed as we cannot tolerate
338  * faults on those addresses.
339  */
340 
341 #define STAB_PRESSURE 0
342 
flush_stab(void)343 void flush_stab(void)
344 {
345 	STE *stab = (STE *) get_paca()->xStab_data.virt;
346 	unsigned char *segments = get_paca()->xSegments;
347 	unsigned long flags, i;
348 
349 	if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
350 		unsigned long flags;
351 
352 		PMC_SW_PROCESSOR(stab_invalidations);
353 
354 		__save_and_cli(flags);
355 		__asm__ __volatile__("isync; slbia; isync":::"memory");
356 		__restore_flags(flags);
357 	} else {
358 		unsigned long entry;
359 		STE *ste;
360 
361 		/* Force previous translations to complete. DRENG */
362 		__asm__ __volatile__ ("isync" : : : "memory");
363 
364 		__save_and_cli(flags);
365 		if(segments[0] != 0xff && !STAB_PRESSURE) {
366 			for(i = 1; i <= segments[0]; i++) {
367 				ste = stab + segments[i];
368 				ste->dw0.dw0.v = 0;
369 				PMC_SW_PROCESSOR(stab_invalidations);
370 			}
371 		} else {
372 			/* Invalidate all entries. */
373                         ste = stab;
374 
375 		        /* Never flush the first entry. */
376 		        ste += 1;
377 			for(entry = 1;
378 			    entry < (PAGE_SIZE / sizeof(STE));
379 			    entry++, ste++) {
380 				unsigned long ea;
381 				ea = ste->dw0.dw0.esid << SID_SHIFT;
382 				if (STAB_PRESSURE || (!REGION_ID(ea)) ||
383 				    (REGION_ID(ea) == VMALLOC_REGION_ID)) {
384 					ste->dw0.dw0.v = 0;
385 					PMC_SW_PROCESSOR(stab_invalidations);
386 				}
387 			}
388 		}
389 
390 		*((unsigned long *)segments) = 0;
391 		__restore_flags(flags);
392 
393 		/* Invalidate the SLB. */
394 		/* Force invals to complete. */
395 		__asm__ __volatile__ ("sync" : : : "memory");
396 		/* Flush the SLB.            */
397 		__asm__ __volatile__ ("slbia" : : : "memory");
398 		/* Force flush to complete.  */
399 		__asm__ __volatile__ ("sync" : : : "memory");
400 	}
401 }
402