1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Inline assembly cache operations.
7  *
8  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11  */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14 
15 #include <asm/asm.h>
16 #include <asm/cacheops.h>
17 #include <asm/cpu-features.h>
18 #include <asm/mipsmtregs.h>
19 
20 /*
21  * This macro return a properly sign-extended address suitable as base address
22  * for indexed cache operations.  Two issues here:
23  *
24  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
25  *    the index bits from the virtual address.  This breaks with tradition
26  *    set by the R4000.  To keep unpleasant surprises from happening we pick
27  *    an address in KSEG0 / CKSEG0.
28  *  - We need a properly sign extended address for 64-bit code.  To get away
29  *    without ifdefs we let the compiler do it by a type cast.
30  */
31 #define INDEX_BASE	CKSEG0
32 
33 #define cache_op(op,addr)						\
34 	__asm__ __volatile__(						\
35 	"	.set	push					\n"	\
36 	"	.set	noreorder				\n"	\
37 	"	.set	mips3\n\t				\n"	\
38 	"	cache	%0, %1					\n"	\
39 	"	.set	pop					\n"	\
40 	:								\
41 	: "i" (op), "R" (*(unsigned char *)(addr)))
42 
43 #ifdef CONFIG_MIPS_MT
44 /*
45  * Temporary hacks for SMTC debug. Optionally force single-threaded
46  * execution during I-cache flushes.
47  */
48 
49 #define PROTECT_CACHE_FLUSHES 1
50 
51 #ifdef PROTECT_CACHE_FLUSHES
52 
53 extern int mt_protiflush;
54 extern int mt_protdflush;
55 extern void mt_cflush_lockdown(void);
56 extern void mt_cflush_release(void);
57 
58 #define BEGIN_MT_IPROT \
59 	unsigned long flags = 0;			\
60 	unsigned long mtflags = 0;			\
61 	if(mt_protiflush) {				\
62 		local_irq_save(flags);			\
63 		ehb();					\
64 		mtflags = dvpe();			\
65 		mt_cflush_lockdown();			\
66 	}
67 
68 #define END_MT_IPROT \
69 	if(mt_protiflush) {				\
70 		mt_cflush_release();			\
71 		evpe(mtflags);				\
72 		local_irq_restore(flags);		\
73 	}
74 
75 #define BEGIN_MT_DPROT \
76 	unsigned long flags = 0;			\
77 	unsigned long mtflags = 0;			\
78 	if(mt_protdflush) {				\
79 		local_irq_save(flags);			\
80 		ehb();					\
81 		mtflags = dvpe();			\
82 		mt_cflush_lockdown();			\
83 	}
84 
85 #define END_MT_DPROT \
86 	if(mt_protdflush) {				\
87 		mt_cflush_release();			\
88 		evpe(mtflags);				\
89 		local_irq_restore(flags);		\
90 	}
91 
92 #else
93 
94 #define BEGIN_MT_IPROT
95 #define BEGIN_MT_DPROT
96 #define END_MT_IPROT
97 #define END_MT_DPROT
98 
99 #endif /* PROTECT_CACHE_FLUSHES */
100 
101 #define __iflush_prologue						\
102 	unsigned long redundance;					\
103 	extern int mt_n_iflushes;					\
104 	BEGIN_MT_IPROT							\
105 	for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
106 
107 #define __iflush_epilogue						\
108 	END_MT_IPROT							\
109 	}
110 
111 #define __dflush_prologue						\
112 	unsigned long redundance;					\
113 	extern int mt_n_dflushes;					\
114 	BEGIN_MT_DPROT							\
115 	for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
116 
117 #define __dflush_epilogue \
118 	END_MT_DPROT	 \
119 	}
120 
121 #define __inv_dflush_prologue __dflush_prologue
122 #define __inv_dflush_epilogue __dflush_epilogue
123 #define __sflush_prologue {
124 #define __sflush_epilogue }
125 #define __inv_sflush_prologue __sflush_prologue
126 #define __inv_sflush_epilogue __sflush_epilogue
127 
128 #else /* CONFIG_MIPS_MT */
129 
130 #define __iflush_prologue {
131 #define __iflush_epilogue }
132 #define __dflush_prologue {
133 #define __dflush_epilogue }
134 #define __inv_dflush_prologue {
135 #define __inv_dflush_epilogue }
136 #define __sflush_prologue {
137 #define __sflush_epilogue }
138 #define __inv_sflush_prologue {
139 #define __inv_sflush_epilogue }
140 
141 #endif /* CONFIG_MIPS_MT */
142 
flush_icache_line_indexed(unsigned long addr)143 static inline void flush_icache_line_indexed(unsigned long addr)
144 {
145 	__iflush_prologue
146 	cache_op(Index_Invalidate_I, addr);
147 	__iflush_epilogue
148 }
149 
flush_dcache_line_indexed(unsigned long addr)150 static inline void flush_dcache_line_indexed(unsigned long addr)
151 {
152 	__dflush_prologue
153 	cache_op(Index_Writeback_Inv_D, addr);
154 	__dflush_epilogue
155 }
156 
flush_scache_line_indexed(unsigned long addr)157 static inline void flush_scache_line_indexed(unsigned long addr)
158 {
159 	cache_op(Index_Writeback_Inv_SD, addr);
160 }
161 
flush_icache_line(unsigned long addr)162 static inline void flush_icache_line(unsigned long addr)
163 {
164 	__iflush_prologue
165 	cache_op(Hit_Invalidate_I, addr);
166 	__iflush_epilogue
167 }
168 
flush_dcache_line(unsigned long addr)169 static inline void flush_dcache_line(unsigned long addr)
170 {
171 	__dflush_prologue
172 	cache_op(Hit_Writeback_Inv_D, addr);
173 	__dflush_epilogue
174 }
175 
invalidate_dcache_line(unsigned long addr)176 static inline void invalidate_dcache_line(unsigned long addr)
177 {
178 	__dflush_prologue
179 	cache_op(Hit_Invalidate_D, addr);
180 	__dflush_epilogue
181 }
182 
invalidate_scache_line(unsigned long addr)183 static inline void invalidate_scache_line(unsigned long addr)
184 {
185 	cache_op(Hit_Invalidate_SD, addr);
186 }
187 
flush_scache_line(unsigned long addr)188 static inline void flush_scache_line(unsigned long addr)
189 {
190 	cache_op(Hit_Writeback_Inv_SD, addr);
191 }
192 
193 #define protected_cache_op(op,addr)				\
194 	__asm__ __volatile__(					\
195 	"	.set	push			\n"		\
196 	"	.set	noreorder		\n"		\
197 	"	.set	mips3			\n"		\
198 	"1:	cache	%0, (%1)		\n"		\
199 	"2:	.set	pop			\n"		\
200 	"	.section __ex_table,\"a\"	\n"		\
201 	"	"STR(PTR)" 1b, 2b		\n"		\
202 	"	.previous"					\
203 	:							\
204 	: "i" (op), "r" (addr))
205 
206 /*
207  * The next two are for badland addresses like signal trampolines.
208  */
protected_flush_icache_line(unsigned long addr)209 static inline void protected_flush_icache_line(unsigned long addr)
210 {
211 	protected_cache_op(Hit_Invalidate_I, addr);
212 }
213 
214 /*
215  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
216  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
217  * caches.  We're talking about one cacheline unnecessarily getting invalidated
218  * here so the penalty isn't overly hard.
219  */
protected_writeback_dcache_line(unsigned long addr)220 static inline void protected_writeback_dcache_line(unsigned long addr)
221 {
222 	protected_cache_op(Hit_Writeback_Inv_D, addr);
223 }
224 
protected_writeback_scache_line(unsigned long addr)225 static inline void protected_writeback_scache_line(unsigned long addr)
226 {
227 	protected_cache_op(Hit_Writeback_Inv_SD, addr);
228 }
229 
230 /*
231  * This one is RM7000-specific
232  */
invalidate_tcache_page(unsigned long addr)233 static inline void invalidate_tcache_page(unsigned long addr)
234 {
235 	cache_op(Page_Invalidate_T, addr);
236 }
237 
238 #define cache16_unroll32(base,op)					\
239 	__asm__ __volatile__(						\
240 	"	.set push					\n"	\
241 	"	.set noreorder					\n"	\
242 	"	.set mips3					\n"	\
243 	"	cache %1, 0x000(%0); cache %1, 0x010(%0)	\n"	\
244 	"	cache %1, 0x020(%0); cache %1, 0x030(%0)	\n"	\
245 	"	cache %1, 0x040(%0); cache %1, 0x050(%0)	\n"	\
246 	"	cache %1, 0x060(%0); cache %1, 0x070(%0)	\n"	\
247 	"	cache %1, 0x080(%0); cache %1, 0x090(%0)	\n"	\
248 	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)	\n"	\
249 	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)	\n"	\
250 	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)	\n"	\
251 	"	cache %1, 0x100(%0); cache %1, 0x110(%0)	\n"	\
252 	"	cache %1, 0x120(%0); cache %1, 0x130(%0)	\n"	\
253 	"	cache %1, 0x140(%0); cache %1, 0x150(%0)	\n"	\
254 	"	cache %1, 0x160(%0); cache %1, 0x170(%0)	\n"	\
255 	"	cache %1, 0x180(%0); cache %1, 0x190(%0)	\n"	\
256 	"	cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)	\n"	\
257 	"	cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)	\n"	\
258 	"	cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)	\n"	\
259 	"	.set pop					\n"	\
260 		:							\
261 		: "r" (base),						\
262 		  "i" (op));
263 
264 #define cache32_unroll32(base,op)					\
265 	__asm__ __volatile__(						\
266 	"	.set push					\n"	\
267 	"	.set noreorder					\n"	\
268 	"	.set mips3					\n"	\
269 	"	cache %1, 0x000(%0); cache %1, 0x020(%0)	\n"	\
270 	"	cache %1, 0x040(%0); cache %1, 0x060(%0)	\n"	\
271 	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)	\n"	\
272 	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)	\n"	\
273 	"	cache %1, 0x100(%0); cache %1, 0x120(%0)	\n"	\
274 	"	cache %1, 0x140(%0); cache %1, 0x160(%0)	\n"	\
275 	"	cache %1, 0x180(%0); cache %1, 0x1a0(%0)	\n"	\
276 	"	cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)	\n"	\
277 	"	cache %1, 0x200(%0); cache %1, 0x220(%0)	\n"	\
278 	"	cache %1, 0x240(%0); cache %1, 0x260(%0)	\n"	\
279 	"	cache %1, 0x280(%0); cache %1, 0x2a0(%0)	\n"	\
280 	"	cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)	\n"	\
281 	"	cache %1, 0x300(%0); cache %1, 0x320(%0)	\n"	\
282 	"	cache %1, 0x340(%0); cache %1, 0x360(%0)	\n"	\
283 	"	cache %1, 0x380(%0); cache %1, 0x3a0(%0)	\n"	\
284 	"	cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)	\n"	\
285 	"	.set pop					\n"	\
286 		:							\
287 		: "r" (base),						\
288 		  "i" (op));
289 
290 #define cache64_unroll32(base,op)					\
291 	__asm__ __volatile__(						\
292 	"	.set push					\n"	\
293 	"	.set noreorder					\n"	\
294 	"	.set mips3					\n"	\
295 	"	cache %1, 0x000(%0); cache %1, 0x040(%0)	\n"	\
296 	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)	\n"	\
297 	"	cache %1, 0x100(%0); cache %1, 0x140(%0)	\n"	\
298 	"	cache %1, 0x180(%0); cache %1, 0x1c0(%0)	\n"	\
299 	"	cache %1, 0x200(%0); cache %1, 0x240(%0)	\n"	\
300 	"	cache %1, 0x280(%0); cache %1, 0x2c0(%0)	\n"	\
301 	"	cache %1, 0x300(%0); cache %1, 0x340(%0)	\n"	\
302 	"	cache %1, 0x380(%0); cache %1, 0x3c0(%0)	\n"	\
303 	"	cache %1, 0x400(%0); cache %1, 0x440(%0)	\n"	\
304 	"	cache %1, 0x480(%0); cache %1, 0x4c0(%0)	\n"	\
305 	"	cache %1, 0x500(%0); cache %1, 0x540(%0)	\n"	\
306 	"	cache %1, 0x580(%0); cache %1, 0x5c0(%0)	\n"	\
307 	"	cache %1, 0x600(%0); cache %1, 0x640(%0)	\n"	\
308 	"	cache %1, 0x680(%0); cache %1, 0x6c0(%0)	\n"	\
309 	"	cache %1, 0x700(%0); cache %1, 0x740(%0)	\n"	\
310 	"	cache %1, 0x780(%0); cache %1, 0x7c0(%0)	\n"	\
311 	"	.set pop					\n"	\
312 		:							\
313 		: "r" (base),						\
314 		  "i" (op));
315 
316 #define cache128_unroll32(base,op)					\
317 	__asm__ __volatile__(						\
318 	"	.set push					\n"	\
319 	"	.set noreorder					\n"	\
320 	"	.set mips3					\n"	\
321 	"	cache %1, 0x000(%0); cache %1, 0x080(%0)	\n"	\
322 	"	cache %1, 0x100(%0); cache %1, 0x180(%0)	\n"	\
323 	"	cache %1, 0x200(%0); cache %1, 0x280(%0)	\n"	\
324 	"	cache %1, 0x300(%0); cache %1, 0x380(%0)	\n"	\
325 	"	cache %1, 0x400(%0); cache %1, 0x480(%0)	\n"	\
326 	"	cache %1, 0x500(%0); cache %1, 0x580(%0)	\n"	\
327 	"	cache %1, 0x600(%0); cache %1, 0x680(%0)	\n"	\
328 	"	cache %1, 0x700(%0); cache %1, 0x780(%0)	\n"	\
329 	"	cache %1, 0x800(%0); cache %1, 0x880(%0)	\n"	\
330 	"	cache %1, 0x900(%0); cache %1, 0x980(%0)	\n"	\
331 	"	cache %1, 0xa00(%0); cache %1, 0xa80(%0)	\n"	\
332 	"	cache %1, 0xb00(%0); cache %1, 0xb80(%0)	\n"	\
333 	"	cache %1, 0xc00(%0); cache %1, 0xc80(%0)	\n"	\
334 	"	cache %1, 0xd00(%0); cache %1, 0xd80(%0)	\n"	\
335 	"	cache %1, 0xe00(%0); cache %1, 0xe80(%0)	\n"	\
336 	"	cache %1, 0xf00(%0); cache %1, 0xf80(%0)	\n"	\
337 	"	.set pop					\n"	\
338 		:							\
339 		: "r" (base),						\
340 		  "i" (op));
341 
342 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
343 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
344 static inline void blast_##pfx##cache##lsize(void)			\
345 {									\
346 	unsigned long start = INDEX_BASE;				\
347 	unsigned long end = start + current_cpu_data.desc.waysize;	\
348 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
349 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
350 	                       current_cpu_data.desc.waybit;		\
351 	unsigned long ws, addr;						\
352 									\
353 	__##pfx##flush_prologue						\
354 									\
355 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
356 		for (addr = start; addr < end; addr += lsize * 32)	\
357 			cache##lsize##_unroll32(addr|ws, indexop);	\
358 									\
359 	__##pfx##flush_epilogue						\
360 }									\
361 									\
362 static inline void blast_##pfx##cache##lsize##_page(unsigned long page)	\
363 {									\
364 	unsigned long start = page;					\
365 	unsigned long end = page + PAGE_SIZE;				\
366 									\
367 	__##pfx##flush_prologue						\
368 									\
369 	do {								\
370 		cache##lsize##_unroll32(start, hitop);			\
371 		start += lsize * 32;					\
372 	} while (start < end);						\
373 									\
374 	__##pfx##flush_epilogue						\
375 }									\
376 									\
377 static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
378 {									\
379 	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
380 	unsigned long start = INDEX_BASE + (page & indexmask);		\
381 	unsigned long end = start + PAGE_SIZE;				\
382 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
383 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
384 	                       current_cpu_data.desc.waybit;		\
385 	unsigned long ws, addr;						\
386 									\
387 	__##pfx##flush_prologue						\
388 									\
389 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
390 		for (addr = start; addr < end; addr += lsize * 32)	\
391 			cache##lsize##_unroll32(addr|ws, indexop);	\
392 									\
393 	__##pfx##flush_epilogue						\
394 }
395 
396 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
397 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
398 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
399 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
400 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
401 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
402 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64)
403 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
404 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
405 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
406 
407 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
408 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
409 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
410 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
411 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
412 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
413 
414 /* build blast_xxx_range, protected_blast_xxx_range */
415 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
416 static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
417 						    unsigned long end)	\
418 {									\
419 	unsigned long lsize = cpu_##desc##_line_size();			\
420 	unsigned long addr = start & ~(lsize - 1);			\
421 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
422 									\
423 	__##pfx##flush_prologue						\
424 									\
425 	while (1) {							\
426 		prot##cache_op(hitop, addr);				\
427 		if (addr == aend)					\
428 			break;						\
429 		addr += lsize;						\
430 	}								\
431 									\
432 	__##pfx##flush_epilogue						\
433 }
434 
435 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
436 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
437 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
438 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
439 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
440 /* blast_inv_dcache_range */
441 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
442 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
443 
444 #endif /* _ASM_R4KCACHE_H */
445