1 /*
2  * r2300.c: R2000 and R3000 specific mmu/cache code.
3  *
4  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
5  *
6  * with a lot of changes to make this thing work for R3000s
7  * Tx39XX R4k style caches added. HK
8  * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
9  * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
10  */
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 
16 #include <asm/cacheops.h>
17 #include <asm/page.h>
18 #include <asm/pgtable.h>
19 #include <asm/mmu_context.h>
20 #include <asm/system.h>
21 #include <asm/isadep.h>
22 #include <asm/io.h>
23 #include <asm/bootinfo.h>
24 #include <asm/cpu.h>
25 
26 /* For R3000 cores with R4000 style caches */
27 static unsigned long icache_size, dcache_size;		/* Size in bytes */
28 
29 #include <asm/r4kcache.h>
30 
31 extern int r3k_have_wired_reg;	/* in r3k-tlb.c */
32 
33 /* This sequence is required to ensure icache is disabled immediately */
34 #define TX39_STOP_STREAMING() \
35 __asm__ __volatile__( \
36 	".set    push\n\t" \
37 	".set    noreorder\n\t" \
38 	"b       1f\n\t" \
39 	"nop\n\t" \
40 	"1:\n\t" \
41 	".set pop" \
42 	)
43 
44 /* TX39H-style cache flush routines. */
tx39h_flush_icache_all(void)45 static void tx39h_flush_icache_all(void)
46 {
47 	unsigned long start = KSEG0;
48 	unsigned long end = (start + icache_size);
49 	unsigned long flags, config;
50 
51 	/* disable icache (set ICE#) */
52 	local_irq_save(flags);
53 	config = read_c0_conf();
54 	write_c0_conf(config & ~TX39_CONF_ICE);
55 	TX39_STOP_STREAMING();
56 
57 	/* invalidate icache */
58 	while (start < end) {
59 		cache16_unroll32(start, Index_Invalidate_I);
60 		start += 0x200;
61 	}
62 
63 	write_c0_conf(config);
64 	local_irq_restore(flags);
65 }
66 
tx39h_dma_cache_wback_inv(unsigned long addr,unsigned long size)67 static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
68 {
69 	unsigned long end, a;
70 	unsigned long dc_lsize = current_cpu_data.dcache.linesz;
71 
72 	/* Catch bad driver code */
73 	BUG_ON(size == 0);
74 
75 	iob();
76 	a = addr & ~(dc_lsize - 1);
77 	end = (addr + size - 1) & ~(dc_lsize - 1);
78 	while (1) {
79 		invalidate_dcache_line(a); /* Hit_Invalidate_D */
80 		if (a == end) break;
81 		a += dc_lsize;
82 	}
83 }
84 
85 
86 /* TX39H2,TX39H3 */
tx39_blast_dcache_page(unsigned long addr)87 static inline void tx39_blast_dcache_page(unsigned long addr)
88 {
89 	if (current_cpu_data.cputype != CPU_TX3912)
90 		blast_dcache16_page(addr);
91 }
92 
tx39_blast_dcache_page_indexed(unsigned long addr)93 static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
94 {
95 	blast_dcache16_page_indexed(addr);
96 }
97 
tx39_blast_dcache(void)98 static inline void tx39_blast_dcache(void)
99 {
100 	blast_dcache16();
101 }
102 
tx39_blast_icache_page(unsigned long addr)103 static inline void tx39_blast_icache_page(unsigned long addr)
104 {
105 	unsigned long flags, config;
106 	/* disable icache (set ICE#) */
107 	local_irq_save(flags);
108 	config = read_c0_conf();
109 	write_c0_conf(config & ~TX39_CONF_ICE);
110 	TX39_STOP_STREAMING();
111 	blast_icache16_page(addr);
112 	write_c0_conf(config);
113 	local_irq_restore(flags);
114 }
115 
tx39_blast_icache_page_indexed(unsigned long addr)116 static inline void tx39_blast_icache_page_indexed(unsigned long addr)
117 {
118 	unsigned long flags, config;
119 	/* disable icache (set ICE#) */
120 	local_irq_save(flags);
121 	config = read_c0_conf();
122 	write_c0_conf(config & ~TX39_CONF_ICE);
123 	TX39_STOP_STREAMING();
124 	blast_icache16_page_indexed(addr);
125 	write_c0_conf(config);
126 	local_irq_restore(flags);
127 }
128 
tx39_blast_icache(void)129 static inline void tx39_blast_icache(void)
130 {
131 	unsigned long flags, config;
132 	/* disable icache (set ICE#) */
133 	local_irq_save(flags);
134 	config = read_c0_conf();
135 	write_c0_conf(config & ~TX39_CONF_ICE);
136 	TX39_STOP_STREAMING();
137 	blast_icache16();
138 	write_c0_conf(config);
139 	local_irq_restore(flags);
140 }
141 
tx39_flush_cache_all(void)142 static inline void tx39_flush_cache_all(void)
143 {
144 	if (!cpu_has_dc_aliases)
145 		return;
146 
147 	tx39_blast_dcache();
148 	tx39_blast_icache();
149 }
150 
tx39___flush_cache_all(void)151 static inline void tx39___flush_cache_all(void)
152 {
153 	tx39_blast_dcache();
154 	tx39_blast_icache();
155 }
156 
tx39_flush_cache_mm(struct mm_struct * mm)157 static void tx39_flush_cache_mm(struct mm_struct *mm)
158 {
159 	if (!cpu_has_dc_aliases)
160 		return;
161 
162 	if (cpu_context(smp_processor_id(), mm) != 0) {
163 		tx39_flush_cache_all();
164 	}
165 }
166 
tx39_flush_cache_range(struct mm_struct * mm,unsigned long start,unsigned long end)167 static void tx39_flush_cache_range(struct mm_struct *mm,
168 				    unsigned long start,
169 				    unsigned long end)
170 {
171 	if (!cpu_has_dc_aliases)
172 		return;
173 
174 	if (cpu_context(smp_processor_id(), mm) != 0) {
175 		tx39_blast_dcache();
176 		tx39_blast_icache();
177 	}
178 }
179 
tx39_flush_cache_page(struct vm_area_struct * vma,unsigned long page)180 static void tx39_flush_cache_page(struct vm_area_struct *vma,
181 				   unsigned long page)
182 {
183 	int exec = vma->vm_flags & VM_EXEC;
184 	struct mm_struct *mm = vma->vm_mm;
185 	pgd_t *pgdp;
186 	pmd_t *pmdp;
187 	pte_t *ptep;
188 
189 	/*
190 	 * If ownes no valid ASID yet, cannot possibly have gotten
191 	 * this page into the cache.
192 	 */
193 	if (cpu_context(smp_processor_id(), mm) == 0)
194 		return;
195 
196 	page &= PAGE_MASK;
197 	pgdp = pgd_offset(mm, page);
198 	pmdp = pmd_offset(pgdp, page);
199 	ptep = pte_offset(pmdp, page);
200 
201 	/*
202 	 * If the page isn't marked valid, the page cannot possibly be
203 	 * in the cache.
204 	 */
205 	if (!(pte_val(*ptep) & _PAGE_PRESENT))
206 		return;
207 
208 	/*
209 	 * Doing flushes for another ASID than the current one is
210 	 * too difficult since stupid R4k caches do a TLB translation
211 	 * for every cache flush operation.  So we do indexed flushes
212 	 * in that case, which doesn't overly flush the cache too much.
213 	 */
214 	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
215 		if (cpu_has_dc_aliases || exec)
216 			tx39_blast_dcache_page(page);
217 		if (exec)
218 			tx39_blast_icache_page(page);
219 
220 		return;
221 	}
222 
223 	/*
224 	 * Do indexed flush, too much work to get the (possible) TLB refills
225 	 * to work correctly.
226 	 */
227 	page = (KSEG0 + (page & (dcache_size - 1)));
228 	if (cpu_has_dc_aliases || exec)
229 		tx39_blast_dcache_page_indexed(page);
230 	if (exec)
231 		tx39_blast_icache_page_indexed(page);
232 }
233 
tx39_flush_data_cache_page(unsigned long addr)234 static void tx39_flush_data_cache_page(unsigned long addr)
235 {
236 	tx39_blast_dcache_page(addr);
237 }
238 
tx39_flush_icache_range(unsigned long start,unsigned long end)239 static void tx39_flush_icache_range(unsigned long start, unsigned long end)
240 {
241 	unsigned long dc_lsize = current_cpu_data.dcache.linesz;
242 	unsigned long addr, aend;
243 
244 	if (end - start > dcache_size)
245 		tx39_blast_dcache();
246 	else {
247 		addr = start & ~(dc_lsize - 1);
248 		aend = (end - 1) & ~(dc_lsize - 1);
249 
250 		while (1) {
251 			/* Hit_Writeback_Inv_D */
252 			protected_writeback_dcache_line(addr);
253 			if (addr == aend)
254 				break;
255 			addr += dc_lsize;
256 		}
257 	}
258 
259 	if (end - start > icache_size)
260 		tx39_blast_icache();
261 	else {
262 		unsigned long flags, config;
263 		addr = start & ~(dc_lsize - 1);
264 		aend = (end - 1) & ~(dc_lsize - 1);
265 		/* disable icache (set ICE#) */
266 		local_irq_save(flags);
267 		config = read_c0_conf();
268 		write_c0_conf(config & ~TX39_CONF_ICE);
269 		TX39_STOP_STREAMING();
270 		while (1) {
271 			/* Hit_Invalidate_I */
272 			protected_flush_icache_line(addr);
273 			if (addr == aend)
274 				break;
275 			addr += dc_lsize;
276 		}
277 		write_c0_conf(config);
278 		local_irq_restore(flags);
279 	}
280 }
281 
282 /*
283  * Ok, this seriously sucks.  We use them to flush a user page but don't
284  * know the virtual address, so we have to blast away the whole icache
285  * which is significantly more expensive than the real thing.  Otoh we at
286  * least know the kernel address of the page so we can flush it
287  * selectivly.
288  */
tx39_flush_icache_page(struct vm_area_struct * vma,struct page * page)289 static void tx39_flush_icache_page(struct vm_area_struct *vma, struct page *page)
290 {
291 	unsigned long addr;
292 	/*
293 	 * If there's no context yet, or the page isn't executable, no icache
294 	 * flush is needed.
295 	 */
296 	if (!(vma->vm_flags & VM_EXEC))
297 		return;
298 
299 	addr = (unsigned long) page_address(page);
300 	tx39_blast_dcache_page(addr);
301 
302 	/*
303 	 * We're not sure of the virtual address(es) involved here, so
304 	 * we have to flush the entire I-cache.
305 	 */
306 	tx39_blast_icache();
307 }
308 
tx39_dma_cache_wback_inv(unsigned long addr,unsigned long size)309 static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
310 {
311 	unsigned long end, a;
312 
313 	if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
314 		end = addr + size;
315 		do {
316 			tx39_blast_dcache_page(addr);
317 			addr += PAGE_SIZE;
318 		} while(addr != end);
319 	} else if (size > dcache_size) {
320 		tx39_blast_dcache();
321 	} else {
322 		unsigned long dc_lsize = current_cpu_data.dcache.linesz;
323 		a = addr & ~(dc_lsize - 1);
324 		end = (addr + size - 1) & ~(dc_lsize - 1);
325 		while (1) {
326 			flush_dcache_line(a); /* Hit_Writeback_Inv_D */
327 			if (a == end) break;
328 			a += dc_lsize;
329 		}
330 	}
331 }
332 
tx39_dma_cache_inv(unsigned long addr,unsigned long size)333 static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
334 {
335 	unsigned long end, a;
336 
337 	if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
338 		end = addr + size;
339 		do {
340 			tx39_blast_dcache_page(addr);
341 			addr += PAGE_SIZE;
342 		} while(addr != end);
343 	} else if (size > dcache_size) {
344 		tx39_blast_dcache();
345 	} else {
346 		unsigned long dc_lsize = current_cpu_data.dcache.linesz;
347 		a = addr & ~(dc_lsize - 1);
348 		end = (addr + size - 1) & ~(dc_lsize - 1);
349 		while (1) {
350 			invalidate_dcache_line(a); /* Hit_Invalidate_D */
351 			if (a == end) break;
352 			a += dc_lsize;
353 		}
354 	}
355 }
356 
tx39_flush_cache_sigtramp(unsigned long addr)357 static void tx39_flush_cache_sigtramp(unsigned long addr)
358 {
359 	unsigned long ic_lsize = current_cpu_data.icache.linesz;
360 	unsigned long dc_lsize = current_cpu_data.dcache.linesz;
361 	unsigned long config;
362 	unsigned long flags;
363 
364 	protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
365 
366 	/* disable icache (set ICE#) */
367 	local_irq_save(flags);
368 	config = read_c0_conf();
369 	write_c0_conf(config & ~TX39_CONF_ICE);
370 	TX39_STOP_STREAMING();
371 	protected_flush_icache_line(addr & ~(ic_lsize - 1));
372 	write_c0_conf(config);
373 	local_irq_restore(flags);
374 }
375 
tx39_probe_cache(void)376 static __init void tx39_probe_cache(void)
377 {
378 	unsigned long config;
379 
380 	config = read_c0_conf();
381 
382 	icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
383 				  TX39_CONF_ICS_SHIFT));
384 	dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
385 				  TX39_CONF_DCS_SHIFT));
386 
387 	current_cpu_data.icache.linesz = 16;
388 	switch (current_cpu_data.cputype) {
389 	case CPU_TX3912:
390 		current_cpu_data.icache.ways = 1;
391 		current_cpu_data.dcache.ways = 1;
392 		current_cpu_data.dcache.linesz = 4;
393 		break;
394 
395 	case CPU_TX3927:
396 		current_cpu_data.icache.ways = 2;
397 		current_cpu_data.dcache.ways = 2;
398 		current_cpu_data.dcache.linesz = 16;
399 		break;
400 
401 	case CPU_TX3922:
402 	default:
403 		current_cpu_data.icache.ways = 1;
404 		current_cpu_data.dcache.ways = 1;
405 		current_cpu_data.dcache.linesz = 16;
406 		break;
407 	}
408 }
409 
ld_mmu_tx39(void)410 void __init ld_mmu_tx39(void)
411 {
412 	extern void build_clear_page(void);
413 	extern void build_copy_page(void);
414 	unsigned long config;
415 
416 	config = read_c0_conf();
417 	config &= ~TX39_CONF_WBON;
418 	write_c0_conf(config);
419 
420 	tx39_probe_cache();
421 
422 	switch (current_cpu_data.cputype) {
423 	case CPU_TX3912:
424 		/* TX39/H core (writethru direct-map cache) */
425 		_flush_cache_all	= tx39h_flush_icache_all;
426 		___flush_cache_all	= tx39h_flush_icache_all;
427 		_flush_cache_mm		= (void *) tx39h_flush_icache_all;
428 		_flush_cache_range	= (void *) tx39h_flush_icache_all;
429 		_flush_cache_page	= (void *) tx39h_flush_icache_all;
430 		_flush_icache_page	= (void *) tx39h_flush_icache_all;
431 		_flush_icache_range	= (void *) tx39h_flush_icache_all;
432 
433 		_flush_cache_sigtramp	= (void *) tx39h_flush_icache_all;
434 		_flush_data_cache_page	= (void *) tx39h_flush_icache_all;
435 
436 		_dma_cache_wback_inv	= tx39h_dma_cache_wback_inv;
437 
438 		shm_align_mask		= PAGE_SIZE - 1;
439 
440 		break;
441 
442 	case CPU_TX3922:
443 	case CPU_TX3927:
444 	default:
445 		/* TX39/H2,H3 core (writeback 2way-set-associative cache) */
446 		r3k_have_wired_reg = 1;
447 		write_c0_wired(0);	/* set 8 on reset... */
448 		/* board-dependent init code may set WBON */
449 
450 		_flush_cache_all = tx39_flush_cache_all;
451 		___flush_cache_all = tx39___flush_cache_all;
452 		_flush_cache_mm = tx39_flush_cache_mm;
453 		_flush_cache_range = tx39_flush_cache_range;
454 		_flush_cache_page = tx39_flush_cache_page;
455 		_flush_icache_page = tx39_flush_icache_page;
456 		_flush_icache_range = tx39_flush_icache_range;
457 
458 		_flush_cache_sigtramp = tx39_flush_cache_sigtramp;
459 		_flush_data_cache_page = tx39_flush_data_cache_page;
460 
461 		_dma_cache_wback_inv = tx39_dma_cache_wback_inv;
462 		_dma_cache_wback = tx39_dma_cache_wback_inv;
463 		_dma_cache_inv = tx39_dma_cache_inv;
464 
465 		shm_align_mask = max_t(unsigned long,
466 		                       (dcache_size / current_cpu_data.dcache.ways) - 1,
467 		                       PAGE_SIZE - 1);
468 
469 		break;
470 	}
471 
472 	current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
473 	current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
474 
475 	current_cpu_data.icache.sets =
476 		current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
477 	current_cpu_data.dcache.sets =
478 		current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
479 
480 	if (current_cpu_data.dcache.waysize > PAGE_SIZE)
481 		current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
482 
483 	current_cpu_data.icache.waybit = 0;
484 	current_cpu_data.dcache.waybit = 0;
485 
486 	printk("Primary instruction cache %ldkB, linesize %d bytes\n",
487 		icache_size >> 10, current_cpu_data.icache.linesz);
488 	printk("Primary data cache %ldkB, linesize %d bytes\n",
489 		dcache_size >> 10, current_cpu_data.dcache.linesz);
490 
491 	build_clear_page();
492 	build_copy_page();
493 }
494