1 /* $Id: cache-sh4.c,v 1.1.1.1.2.8 2003/07/09 09:59:30 trent Exp $
2  *
3  *  linux/arch/sh/mm/cache-sh4.c
4  *
5  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
6  */
7 
8 #include <linux/config.h>
9 #include <linux/init.h>
10 #include <linux/mman.h>
11 #include <linux/mm.h>
12 #include <linux/threads.h>
13 #include <asm/addrspace.h>
14 #include <asm/page.h>
15 #include <asm/pgtable.h>
16 #include <asm/processor.h>
17 #include <asm/cache.h>
18 #include <asm/io.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgalloc.h>
21 #include <asm/mmu_context.h>
22 
23 #define CCR		 0xff00001c	/* Address of Cache Control Register */
24 
25 #define CCR_CACHE_OCE	0x0001	/* Operand Cache Enable */
26 #define CCR_CACHE_WT	0x0002	/* Write-Through (for P0,U0,P3) (else writeback)*/
27 #define CCR_CACHE_CB	0x0004	/* Copy-Back (for P1) (else writethrough) */
28 #define CCR_CACHE_OCI	0x0008	/* OC Invalidate */
29 #define CCR_CACHE_ORA	0x0020	/* OC RAM Mode */
30 #define CCR_CACHE_OIX	0x0080	/* OC Index Enable */
31 #define CCR_CACHE_ICE	0x0100	/* Instruction Cache Enable */
32 #define CCR_CACHE_ICI	0x0800	/* IC Invalidate */
33 #define CCR_CACHE_IIX	0x8000	/* IC Index Enable */
34 
35 
36 
37 #if defined(CONFIG_SH_CACHE_ASSOC)
38 #define CCR_CACHE_EMODE 0x80000000
39 /* CCR setup for associative mode: 16k+32k 2-way, P1 copy-back, enable */
40 #define CCR_CACHE_VAL	(CCR_CACHE_EMODE|CCR_CACHE_ENABLE|CCR_CACHE_CB)
41 #else
42 /* Default CCR setup: 8k+16k-byte cache, P1-copy-back, enable */
43 #define CCR_CACHE_VAL	(CCR_CACHE_ENABLE|CCR_CACHE_CB)
44 #endif
45 
46 #define CCR_CACHE_INIT	(CCR_CACHE_VAL|CCR_CACHE_OCI|CCR_CACHE_ICI)
47 #define CCR_CACHE_ENABLE (CCR_CACHE_OCE|CCR_CACHE_ICE)
48 
49 #define CACHE_IC_ADDRESS_ARRAY 0xf0000000
50 #define CACHE_OC_ADDRESS_ARRAY 0xf4000000
51 #define CACHE_VALID	  1
52 #define CACHE_UPDATED	  2
53 #define CACHE_ASSOC	  8
54 
55 #define CACHE_OC_WAY_SHIFT       14
56 #define CACHE_IC_WAY_SHIFT       13
57 #define CACHE_OC_ENTRY_SHIFT      5
58 #define CACHE_IC_ENTRY_SHIFT      5
59 #define CACHE_OC_ENTRY_MASK		0x3fe0
60 #define CACHE_OC_ENTRY_PHYS_MASK	0x0fe0
61 #define CACHE_IC_ENTRY_MASK		0x1fe0
62 #define CACHE_IC_NUM_ENTRIES	256
63 #define CACHE_OC_NUM_ENTRIES	512
64 
65 #define CACHE_NUM_WAYS 2
66 
67 static void __init
detect_cpu_and_cache_system(void)68 detect_cpu_and_cache_system(void)
69 {
70 #ifdef CONFIG_CPU_SUBTYPE_ST40
71 	cpu_data->type = CPU_ST40;
72 #elif defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751)
73 	cpu_data->type = CPU_SH7750;
74 #elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
75 	cpu_data->type = CPU_SH4202;
76 #else
77 #error Unknown SH4 CPU type
78 #endif
79 }
80 
cache_init(void)81 void __init cache_init(void)
82 {
83 	unsigned long ccr;
84 
85 	detect_cpu_and_cache_system();
86 
87 	jump_to_P2();
88 	ccr = ctrl_inl(CCR);
89 	if (ccr & CCR_CACHE_ENABLE) {
90 		/*
91 		 * XXX: Should check RA here.
92 		 * If RA was 1, we only need to flush the half of the caches.
93 		 */
94 		unsigned long addr, data;
95 
96 #if defined(CONFIG_SH_CACHE_ASSOC)
97                 unsigned long way;
98 
99                 for (way = 0; way <= CACHE_NUM_WAYS; ++way) {
100                         unsigned long waybit = way << CACHE_OC_WAY_SHIFT;
101 
102 		        for (addr = CACHE_OC_ADDRESS_ARRAY + waybit;
103 		             addr < (CACHE_OC_ADDRESS_ARRAY + waybit +
104 			             (CACHE_OC_NUM_ENTRIES <<
105                                       CACHE_OC_ENTRY_SHIFT));
106 		             addr += (1 << CACHE_OC_ENTRY_SHIFT)) {
107 
108 			        data = ctrl_inl(addr);
109 
110 			        if ((data & (CACHE_UPDATED|CACHE_VALID))
111 			            == (CACHE_UPDATED|CACHE_VALID))
112 				        ctrl_outl(data & ~CACHE_UPDATED, addr);
113 		        }
114                 }
115 #else
116 		for (addr = CACHE_OC_ADDRESS_ARRAY;
117 		     addr < (CACHE_OC_ADDRESS_ARRAY+
118 			     (CACHE_OC_NUM_ENTRIES << CACHE_OC_ENTRY_SHIFT));
119 		     addr += (1 << CACHE_OC_ENTRY_SHIFT)) {
120 			data = ctrl_inl(addr);
121 			if ((data & (CACHE_UPDATED|CACHE_VALID))
122 			    == (CACHE_UPDATED|CACHE_VALID))
123 				ctrl_outl(data & ~CACHE_UPDATED, addr);
124 		}
125 #endif
126 	}
127 
128 	ctrl_outl(CCR_CACHE_INIT, CCR);
129 	back_to_P1();
130 }
131 
132 /*
133  * SH-4 has virtually indexed and physically tagged cache.
134  */
135 
136 static struct semaphore p3map_sem[4];
137 
p3_cache_init(void)138 void __init p3_cache_init(void)
139 {
140 	/* In ioremap.c */
141 	extern int remap_area_pages(unsigned long address,
142 				    unsigned long phys_addr,
143 				    unsigned long size, unsigned long flags);
144 
145 	if (remap_area_pages(P3SEG, 0, PAGE_SIZE*4, _PAGE_CACHABLE))
146 		panic("%s failed.", __FUNCTION__);
147 	sema_init (&p3map_sem[0], 1);
148 	sema_init (&p3map_sem[1], 1);
149 	sema_init (&p3map_sem[2], 1);
150 	sema_init (&p3map_sem[3], 1);
151 }
152 
153 /*
154  * Write back the dirty D-caches, but not invalidate them.
155  *
156  * START: Virtual Address (U0, P1, or P3)
157  * SIZE: Size of the region.
158  */
__flush_wback_region(void * start,int size)159 void __flush_wback_region(void *start, int size)
160 {
161 	unsigned long v;
162 	unsigned long begin, end;
163 
164 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
165 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
166 		& ~(L1_CACHE_BYTES-1);
167 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
168 		asm volatile("ocbwb	%0"
169 			     : /* no output */
170 			     : "m" (__m(v)));
171 	}
172 }
173 
174 /*
175  * Write back the dirty D-caches and invalidate them.
176  *
177  * START: Virtual Address (U0, P1, or P3)
178  * SIZE: Size of the region.
179  */
__flush_purge_region(void * start,int size)180 void __flush_purge_region(void *start, int size)
181 {
182 	unsigned long v;
183 	unsigned long begin, end;
184 
185 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
186 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
187 		& ~(L1_CACHE_BYTES-1);
188 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
189 		asm volatile("ocbp	%0"
190 			     : /* no output */
191 			     : "m" (__m(v)));
192 	}
193 }
194 
195 
196 /*
197  * No write back please
198  */
__flush_invalidate_region(void * start,int size)199 void __flush_invalidate_region(void *start, int size)
200 {
201 	unsigned long v;
202 	unsigned long begin, end;
203 
204 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
205 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
206 		& ~(L1_CACHE_BYTES-1);
207 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
208 		asm volatile("ocbi	%0"
209 			     : /* no output */
210 			     : "m" (__m(v)));
211 	}
212 }
213 
__flush_icache_all(void)214 void __flush_icache_all(void)
215 {
216 	unsigned long flags;
217 
218 	save_and_cli(flags);
219 	jump_to_P2();
220 	ctrl_outl(CCR_CACHE_VAL|CCR_CACHE_ICI, CCR);
221 	back_to_P1();
222 	restore_flags(flags);
223 }
224 
225 /*
226  * Write back the range of D-cache, and purge the I-cache.
227  *
228  * Called from kernel/module.c:sys_init_module and routine for a.out format.
229  */
flush_icache_range(unsigned long start,unsigned long end)230 void flush_icache_range(unsigned long start, unsigned long end)
231 {
232 	flush_cache_all();
233 }
234 
235 /*
236  * Write back the D-cache and purge the I-cache for signal trampoline.
237  */
flush_cache_sigtramp(unsigned long addr)238 void flush_cache_sigtramp(unsigned long addr)
239 {
240 	unsigned long v, index;
241 	unsigned long flags;
242 
243 	v = addr & ~(L1_CACHE_BYTES-1);
244 	asm volatile("ocbwb	%0"
245 		     : /* no output */
246 		     : "m" (__m(v)));
247 
248 	index = CACHE_IC_ADDRESS_ARRAY| (v&CACHE_IC_ENTRY_MASK);
249 	save_and_cli(flags);
250 	jump_to_P2();
251 	ctrl_outl(0, index);	/* Clear out Valid-bit */
252 
253 #if defined(CONFIG_SH_CACHE_ASSOC)
254 	/* Must invalidate both ways for associative cache */
255 	ctrl_outl(0, index | (1 << CACHE_IC_WAY_SHIFT));
256 #endif
257 
258 	back_to_P1();
259 	restore_flags(flags);
260 }
261 
flush_cache_4096(unsigned long start,unsigned long phys)262 static inline void flush_cache_4096(unsigned long start,
263 				    unsigned long phys)
264 {
265 	unsigned long flags;
266 	extern void __flush_cache_4096(unsigned long addr, unsigned long phys, unsigned long exec_offset);
267 
268 #if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_ST40) || defined(CONFIG_CPU_SUBTYPE_SH4_202)
269 	if (start >= CACHE_OC_ADDRESS_ARRAY) {
270 		/*
271 		 * SH7751 and ST40 have no restriction to handle cache.
272 		 * (While SH7750 must do that at P2 area.)
273 		 */
274 		__flush_cache_4096(start | CACHE_ASSOC, phys | 0x80000000, 0);
275 	} else
276 #endif
277 	{
278 		save_and_cli(flags);
279 		__flush_cache_4096(start | CACHE_ASSOC, phys | 0x80000000, 0x20000000);
280 		restore_flags(flags);
281 	}
282 }
283 
284 /*
285  * Write back & invalidate the D-cache of the page.
286  * (To avoid "alias" issues)
287  */
flush_dcache_page(struct page * page)288 void flush_dcache_page(struct page *page)
289 {
290 	if (test_bit(PG_mapped, &page->flags)) {
291 		unsigned long phys = PHYSADDR(page_address(page));
292 
293 		/* Loop all the D-cache */
294 		flush_cache_4096(CACHE_OC_ADDRESS_ARRAY,          phys);
295 		flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x1000, phys);
296 		flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x2000, phys);
297 		flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x3000, phys);
298 	}
299 }
300 
flush_icache_all(void)301 static inline void flush_icache_all(void)
302 {
303 	unsigned long flags;
304 
305 	save_and_cli(flags);
306 	jump_to_P2();
307 	/* Flush I-cache */
308 	ctrl_outl(CCR_CACHE_VAL|CCR_CACHE_ICI, CCR);
309 	back_to_P1();
310 	restore_flags(flags);
311 }
312 
flush_cache_all(void)313 void flush_cache_all(void)
314 {
315 	extern void __flush_dcache_all(void);
316 
317 	__flush_dcache_all();
318 	flush_icache_all();
319 }
320 
flush_cache_mm(struct mm_struct * mm)321 void flush_cache_mm(struct mm_struct *mm)
322 {
323 	/* Is there any good way? */
324 	/* XXX: possibly call flush_cache_range for each vm area */
325 	/*
326 	 * FIXME: Really, the optimal solution here would be able to flush out
327 	 * individual lines created by the specified context, but this isn't
328 	 * feasible for a number of architectures (such as MIPS, and some
329 	 * SPARC) .. is this possible for SuperH?
330 	 *
331 	 * In the meantime, we'll just flush all of the caches.. this
332 	 * seems to be the simplest way to avoid at least a few wasted
333 	 * cache flushes. -Lethal
334 	 */
335 	flush_cache_all();
336 }
337 
__flush_cache_page(struct vm_area_struct * vma,unsigned long address,unsigned long phys)338 static void __flush_cache_page(struct vm_area_struct *vma,
339 			       unsigned long address,
340 			       unsigned long phys)
341 {
342 	/* We only need to flush D-cache when we have alias */
343 	if ((address^phys) & CACHE_ALIAS) {
344 		/* Loop 4K of the D-cache */
345 		flush_cache_4096(
346 			CACHE_OC_ADDRESS_ARRAY | (address & CACHE_ALIAS),
347 			phys);
348 		/* Loop another 4K of the D-cache */
349 		flush_cache_4096(
350 			CACHE_OC_ADDRESS_ARRAY | (phys & CACHE_ALIAS),
351 			phys);
352 	}
353 
354 	if (vma->vm_flags & VM_EXEC)
355 		/* Loop 4K (half) of the I-cache */
356 		flush_cache_4096(
357 			CACHE_IC_ADDRESS_ARRAY | (address & 0x1000),
358 			phys);
359 }
360 
361 /*
362  * Write back and invalidate D-caches.
363  *
364  * START, END: Virtual Address (U0 address)
365  *
366  * NOTE: We need to flush the _physical_ page entry.
367  * Flushing the cache lines for U0 only isn't enough.
368  * We need to flush for P1 too, which may contain aliases.
369  */
flush_cache_range(struct mm_struct * mm,unsigned long start,unsigned long end)370 void flush_cache_range(struct mm_struct *mm, unsigned long start,
371 		       unsigned long end)
372 {
373 	extern void flush_cache_4096_all(unsigned long start);
374 
375 	unsigned long p = start & PAGE_MASK;
376 	pgd_t *dir;
377 	pmd_t *pmd;
378 	pte_t *pte;
379 	pte_t entry;
380 	unsigned long phys;
381 	unsigned long d = 0;
382 
383 	dir = pgd_offset(mm, p);
384 	pmd = pmd_offset(dir, p);
385 
386 	do {
387 		if (pmd_none(*pmd) || pmd_bad(*pmd)) {
388 			p &= ~((1 << PMD_SHIFT) -1);
389 			p += (1 << PMD_SHIFT);
390 			pmd++;
391 			continue;
392 		}
393 		pte = pte_offset(pmd, p);
394 		do {
395 			entry = *pte;
396 			if ((pte_val(entry) & _PAGE_PRESENT)) {
397 				phys = pte_val(entry)&PTE_PHYS_MASK;
398 				if ((p^phys) & CACHE_ALIAS) {
399 					d |= 1 << ((p & CACHE_ALIAS)>>12);
400 					d |= 1 << ((phys & CACHE_ALIAS)>>12);
401 					if (d == 0x0f)
402 						goto loop_exit;
403 				}
404 			}
405 			pte++;
406 			p += PAGE_SIZE;
407 		} while (p < end && (unsigned long)pte & PAGE_MASK);
408 		pmd++;
409 	} while (p < end);
410  loop_exit:
411 	if (d & 1)
412 		flush_cache_4096_all(0);
413 	if (d & 2)
414 		flush_cache_4096_all(0x1000);
415 	if (d & 4)
416 		flush_cache_4096_all(0x2000);
417 	if (d & 8)
418 		flush_cache_4096_all(0x3000);
419 	flush_icache_all();
420 }
421 
422 /*
423  * Write back and invalidate I/D-caches for the page.
424  *
425  * ADDR: Virtual Address (U0 address)
426  */
flush_cache_page(struct vm_area_struct * vma,unsigned long address)427 void flush_cache_page(struct vm_area_struct *vma, unsigned long address)
428 {
429 	pgd_t *dir;
430 	pmd_t *pmd;
431 	pte_t *pte;
432 	pte_t entry;
433 	unsigned long phys;
434 
435 	dir = pgd_offset(vma->vm_mm, address);
436 	pmd = pmd_offset(dir, address);
437 	if (pmd_none(*pmd) || pmd_bad(*pmd))
438 		return;
439 	pte = pte_offset(pmd, address);
440 	entry = *pte;
441 	if (!(pte_val(entry) & _PAGE_PRESENT))
442 		return;
443 
444 	phys = pte_val(entry)&PTE_PHYS_MASK;
445 	__flush_cache_page(vma, address, phys);
446 }
447 
448 /*
449  * clear_user_page
450  * @to: P1 address
451  * @address: U0 address to be mapped
452  */
clear_user_page(void * to,unsigned long address)453 void clear_user_page(void *to, unsigned long address)
454 {
455 	struct page *page = virt_to_page(to);
456 
457 	__set_bit(PG_mapped, &page->flags);
458 	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
459 		clear_page(to);
460 	else {
461 		pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
462 					   _PAGE_RW | _PAGE_CACHABLE |
463 					   _PAGE_DIRTY | _PAGE_ACCESSED |
464 					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
465 		unsigned long phys_addr = PHYSADDR(to);
466 		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
467 		pgd_t *dir = pgd_offset_k(p3_addr);
468 		pmd_t *pmd = pmd_offset(dir, p3_addr);
469 		pte_t *pte = pte_offset(pmd, p3_addr);
470 		pte_t entry;
471 		unsigned long flags;
472 
473 		entry = mk_pte_phys(phys_addr, pgprot);
474 		down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
475 		set_pte(pte, entry);
476 		save_and_cli(flags);
477 		__flush_tlb_page(get_asid(), p3_addr);
478 		restore_flags(flags);
479 		update_mmu_cache(NULL, p3_addr, entry);
480 		__clear_user_page((void *)p3_addr, to);
481 		pte_clear(pte);
482 		up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
483 	}
484 }
485 
486 /*
487  * copy_user_page
488  * @to: P1 address
489  * @from: P1 address
490  * @address: U0 address to be mapped
491  */
copy_user_page(void * to,void * from,unsigned long address)492 void copy_user_page(void *to, void *from, unsigned long address)
493 {
494 	struct page *page = virt_to_page(to);
495 
496 	__set_bit(PG_mapped, &page->flags);
497 	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
498 		copy_page(to, from);
499 	else {
500 		pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
501 					   _PAGE_RW | _PAGE_CACHABLE |
502 					   _PAGE_DIRTY | _PAGE_ACCESSED |
503 					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
504 		unsigned long phys_addr = PHYSADDR(to);
505 		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
506 		pgd_t *dir = pgd_offset_k(p3_addr);
507 		pmd_t *pmd = pmd_offset(dir, p3_addr);
508 		pte_t *pte = pte_offset(pmd, p3_addr);
509 		pte_t entry;
510 		unsigned long flags;
511 
512 		entry = mk_pte_phys(phys_addr, pgprot);
513 		down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
514 		set_pte(pte, entry);
515 		save_and_cli(flags);
516 		__flush_tlb_page(get_asid(), p3_addr);
517 		restore_flags(flags);
518 		update_mmu_cache(NULL, p3_addr, entry);
519 		__copy_user_page((void *)p3_addr, from, to);
520 		pte_clear(pte);
521 		up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
522 	}
523 }
524 
525 
526 /****************************************************************************/
527 
528 #if defined(CONFIG_SH_CACHE_ASSOC)
529 /*
530  * It is no possible to use the approach implement in clear_page.S when we
531  * are in 2-way set associative mode as it would only clear half the cache, in
532  * general. For the moment we simply implement it as a iteration through the
533  * cache flushing both ways, this in itself is not optimial as the delay latency
534  * for interupts is probably longer than necessary!
535  *
536  * benedict.gaster.superh.com
537  */
__flush_dcache_all(void)538 void __flush_dcache_all(void)
539 {
540 	unsigned long flags;
541 	unsigned long addr;
542         unsigned long way;
543 
544 	save_and_cli(flags);
545 #if !defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH4_202)
546 	jump_to_P2();
547 #endif
548         /* Clear the U and V bits for each line and each way. On SH-4, this
549          * causes write-back if both U and V are set before the address write.
550          */
551 	for (way = 0; way <= 1; ++way) {
552 	        unsigned long waybit = way << CACHE_OC_WAY_SHIFT;
553 
554 	        /* Loop all the D-cache */
555                 for (addr = CACHE_OC_ADDRESS_ARRAY + waybit;
556 	             addr < (CACHE_OC_ADDRESS_ARRAY + waybit
557 		             + (CACHE_OC_NUM_ENTRIES << CACHE_OC_ENTRY_SHIFT));
558 	             addr += (1 << CACHE_OC_ENTRY_SHIFT)) {
559 			ctrl_outl(0, addr);
560                 }
561 	}
562 
563 #if !defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH4_202)
564 	back_to_P1();
565 #endif
566 	restore_flags(flags);
567 }
568 
flush_cache_4096_all(unsigned long start)569 void flush_cache_4096_all(unsigned long start)
570 {
571   unsigned long phys = PHYSADDR(start);
572 
573   /* Loop all the D-cache */
574   flush_cache_4096(CACHE_OC_ADDRESS_ARRAY,          phys);
575 }
576 #endif
577 
578 
579 
580 
581 
582 
583 /****************************************************************************/
584 
585 #if defined(CONFIG_SH_CACHE_ASSOC)
586 /*
587  * It is no possible to use the approach implement in clear_page.S when we
588  * are in 2-way set associative mode as it would only clear half the cache, in
589  * general. For the moment we simply implement it as a iteration through the
590  * cache flushing both ways, this in itself is not optimial as the delay latency
591  * for interupts is probably longer than necessary!
592  *
593  * benedict.gaster.superh.com
594  */
__flush_dcache_all(void)595 void __flush_dcache_all(void)
596 {
597 	unsigned long flags;
598 	unsigned long addr;
599         unsigned long way;
600 
601 	save_and_cli(flags);
602 #if !defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH4_202)
603 	jump_to_P2();
604 #endif
605         /* Clear the U and V bits for each line and each way. On SH-4, this
606          * causes write-back if both U and V are set before the address write.
607          */
608 	for (way = 0; way <= 1; ++way) {
609 	        unsigned long waybit = way << CACHE_OC_WAY_SHIFT;
610 
611 	        /* Loop all the D-cache */
612                 for (addr = CACHE_OC_ADDRESS_ARRAY + waybit;
613 	             addr < (CACHE_OC_ADDRESS_ARRAY + waybit
614 		             + (CACHE_OC_NUM_ENTRIES << CACHE_OC_ENTRY_SHIFT));
615 	             addr += (1 << CACHE_OC_ENTRY_SHIFT)) {
616 			ctrl_outl(0, addr);
617                 }
618 	}
619 
620 #if !defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH4_202)
621 	back_to_P1();
622 #endif
623 	restore_flags(flags);
624 }
625 
flush_cache_4096_all(unsigned long start)626 void flush_cache_4096_all(unsigned long start)
627 {
628   unsigned long phys = PHYSADDR(start);
629 
630   /* Loop all the D-cache */
631   flush_cache_4096(CACHE_OC_ADDRESS_ARRAY,          phys);
632 }
633 #endif
634 
635 
636 
637 
638 
639