1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13 
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <asm/pdc.h>
22 #include <asm/cache.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/page.h>
26 #include <asm/pgalloc.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
30 
31 int split_tlb __read_mostly;
32 int dcache_stride __read_mostly;
33 int icache_stride __read_mostly;
34 EXPORT_SYMBOL(dcache_stride);
35 
36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37 EXPORT_SYMBOL(flush_dcache_page_asm);
38 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39 
40 
41 /* On some machines (e.g. ones with the Merced bus), there can be
42  * only a single PxTLB broadcast at a time; this must be guaranteed
43  * by software.  We put a spinlock around all TLB flushes  to
44  * ensure this.
45  */
46 DEFINE_SPINLOCK(pa_tlb_lock);
47 
48 struct pdc_cache_info cache_info __read_mostly;
49 #ifndef CONFIG_PA20
50 static struct pdc_btlb_info btlb_info __read_mostly;
51 #endif
52 
53 #ifdef CONFIG_SMP
54 void
flush_data_cache(void)55 flush_data_cache(void)
56 {
57 	on_each_cpu(flush_data_cache_local, NULL, 1);
58 }
59 void
flush_instruction_cache(void)60 flush_instruction_cache(void)
61 {
62 	on_each_cpu(flush_instruction_cache_local, NULL, 1);
63 }
64 #endif
65 
66 void
flush_cache_all_local(void)67 flush_cache_all_local(void)
68 {
69 	flush_instruction_cache_local(NULL);
70 	flush_data_cache_local(NULL);
71 }
72 EXPORT_SYMBOL(flush_cache_all_local);
73 
74 void
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)75 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
76 {
77 	struct page *page = pte_page(*ptep);
78 
79 	if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
80 	    test_bit(PG_dcache_dirty, &page->flags)) {
81 
82 		flush_kernel_dcache_page(page);
83 		clear_bit(PG_dcache_dirty, &page->flags);
84 	} else if (parisc_requires_coherency())
85 		flush_kernel_dcache_page(page);
86 }
87 
88 void
show_cache_info(struct seq_file * m)89 show_cache_info(struct seq_file *m)
90 {
91 	char buf[32];
92 
93 	seq_printf(m, "I-cache\t\t: %ld KB\n",
94 		cache_info.ic_size/1024 );
95 	if (cache_info.dc_loop != 1)
96 		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
97 	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
98 		cache_info.dc_size/1024,
99 		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
100 		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
101 		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
102 	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
103 		cache_info.it_size,
104 		cache_info.dt_size,
105 		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
106 	);
107 
108 #ifndef CONFIG_PA20
109 	/* BTLB - Block TLB */
110 	if (btlb_info.max_size==0) {
111 		seq_printf(m, "BTLB\t\t: not supported\n" );
112 	} else {
113 		seq_printf(m,
114 		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
115 		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
116 		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
117 		btlb_info.max_size, (int)4096,
118 		btlb_info.max_size>>8,
119 		btlb_info.fixed_range_info.num_i,
120 		btlb_info.fixed_range_info.num_d,
121 		btlb_info.fixed_range_info.num_comb,
122 		btlb_info.variable_range_info.num_i,
123 		btlb_info.variable_range_info.num_d,
124 		btlb_info.variable_range_info.num_comb
125 		);
126 	}
127 #endif
128 }
129 
130 void __init
parisc_cache_init(void)131 parisc_cache_init(void)
132 {
133 	if (pdc_cache_info(&cache_info) < 0)
134 		panic("parisc_cache_init: pdc_cache_info failed");
135 
136 #if 0
137 	printk("ic_size %lx dc_size %lx it_size %lx\n",
138 		cache_info.ic_size,
139 		cache_info.dc_size,
140 		cache_info.it_size);
141 
142 	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
143 		cache_info.dc_base,
144 		cache_info.dc_stride,
145 		cache_info.dc_count,
146 		cache_info.dc_loop);
147 
148 	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
149 		*(unsigned long *) (&cache_info.dc_conf),
150 		cache_info.dc_conf.cc_alias,
151 		cache_info.dc_conf.cc_block,
152 		cache_info.dc_conf.cc_line,
153 		cache_info.dc_conf.cc_shift);
154 	printk("	wt %d sh %d cst %d hv %d\n",
155 		cache_info.dc_conf.cc_wt,
156 		cache_info.dc_conf.cc_sh,
157 		cache_info.dc_conf.cc_cst,
158 		cache_info.dc_conf.cc_hv);
159 
160 	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
161 		cache_info.ic_base,
162 		cache_info.ic_stride,
163 		cache_info.ic_count,
164 		cache_info.ic_loop);
165 
166 	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
167 		*(unsigned long *) (&cache_info.ic_conf),
168 		cache_info.ic_conf.cc_alias,
169 		cache_info.ic_conf.cc_block,
170 		cache_info.ic_conf.cc_line,
171 		cache_info.ic_conf.cc_shift);
172 	printk("	wt %d sh %d cst %d hv %d\n",
173 		cache_info.ic_conf.cc_wt,
174 		cache_info.ic_conf.cc_sh,
175 		cache_info.ic_conf.cc_cst,
176 		cache_info.ic_conf.cc_hv);
177 
178 	printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
179 		cache_info.dt_conf.tc_sh,
180 		cache_info.dt_conf.tc_page,
181 		cache_info.dt_conf.tc_cst,
182 		cache_info.dt_conf.tc_aid,
183 		cache_info.dt_conf.tc_pad1);
184 
185 	printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
186 		cache_info.it_conf.tc_sh,
187 		cache_info.it_conf.tc_page,
188 		cache_info.it_conf.tc_cst,
189 		cache_info.it_conf.tc_aid,
190 		cache_info.it_conf.tc_pad1);
191 #endif
192 
193 	split_tlb = 0;
194 	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
195 		if (cache_info.dt_conf.tc_sh == 2)
196 			printk(KERN_WARNING "Unexpected TLB configuration. "
197 			"Will flush I/D separately (could be optimized).\n");
198 
199 		split_tlb = 1;
200 	}
201 
202 	/* "New and Improved" version from Jim Hull
203 	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
204 	 * The following CAFL_STRIDE is an optimized version, see
205 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
206 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
207 	 */
208 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
209 	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
210 	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
211 #undef CAFL_STRIDE
212 
213 #ifndef CONFIG_PA20
214 	if (pdc_btlb_info(&btlb_info) < 0) {
215 		memset(&btlb_info, 0, sizeof btlb_info);
216 	}
217 #endif
218 
219 	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
220 						PDC_MODEL_NVA_UNSUPPORTED) {
221 		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
222 #if 0
223 		panic("SMP kernel required to avoid non-equivalent aliasing");
224 #endif
225 	}
226 }
227 
disable_sr_hashing(void)228 void disable_sr_hashing(void)
229 {
230 	int srhash_type, retval;
231 	unsigned long space_bits;
232 
233 	switch (boot_cpu_data.cpu_type) {
234 	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
235 		BUG();
236 		return;
237 
238 	case pcxs:
239 	case pcxt:
240 	case pcxt_:
241 		srhash_type = SRHASH_PCXST;
242 		break;
243 
244 	case pcxl:
245 		srhash_type = SRHASH_PCXL;
246 		break;
247 
248 	case pcxl2: /* pcxl2 doesn't support space register hashing */
249 		return;
250 
251 	default: /* Currently all PA2.0 machines use the same ins. sequence */
252 		srhash_type = SRHASH_PA20;
253 		break;
254 	}
255 
256 	disable_sr_hashing_asm(srhash_type);
257 
258 	retval = pdc_spaceid_bits(&space_bits);
259 	/* If this procedure isn't implemented, don't panic. */
260 	if (retval < 0 && retval != PDC_BAD_OPTION)
261 		panic("pdc_spaceid_bits call failed.\n");
262 	if (space_bits != 0)
263 		panic("SpaceID hashing is still on!\n");
264 }
265 
266 static inline void
__flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long physaddr)267 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
268 		   unsigned long physaddr)
269 {
270 	flush_dcache_page_asm(physaddr, vmaddr);
271 	if (vma->vm_flags & VM_EXEC)
272 		flush_icache_page_asm(physaddr, vmaddr);
273 }
274 
flush_dcache_page(struct page * page)275 void flush_dcache_page(struct page *page)
276 {
277 	struct address_space *mapping = page_mapping(page);
278 	struct vm_area_struct *mpnt;
279 	struct prio_tree_iter iter;
280 	unsigned long offset;
281 	unsigned long addr, old_addr = 0;
282 	pgoff_t pgoff;
283 
284 	if (mapping && !mapping_mapped(mapping)) {
285 		set_bit(PG_dcache_dirty, &page->flags);
286 		return;
287 	}
288 
289 	flush_kernel_dcache_page(page);
290 
291 	if (!mapping)
292 		return;
293 
294 	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
295 
296 	/* We have carefully arranged in arch_get_unmapped_area() that
297 	 * *any* mappings of a file are always congruently mapped (whether
298 	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
299 	 * to flush one address here for them all to become coherent */
300 
301 	flush_dcache_mmap_lock(mapping);
302 	vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
303 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
304 		addr = mpnt->vm_start + offset;
305 
306 		/* The TLB is the engine of coherence on parisc: The
307 		 * CPU is entitled to speculate any page with a TLB
308 		 * mapping, so here we kill the mapping then flush the
309 		 * page along a special flush only alias mapping.
310 		 * This guarantees that the page is no-longer in the
311 		 * cache for any process and nor may it be
312 		 * speculatively read in (until the user or kernel
313 		 * specifically accesses it, of course) */
314 
315 		flush_tlb_page(mpnt, addr);
316 		if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
317 			__flush_cache_page(mpnt, addr, page_to_phys(page));
318 			if (old_addr)
319 				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
320 			old_addr = addr;
321 		}
322 	}
323 	flush_dcache_mmap_unlock(mapping);
324 }
325 EXPORT_SYMBOL(flush_dcache_page);
326 
327 /* Defined in arch/parisc/kernel/pacache.S */
328 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
329 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
330 EXPORT_SYMBOL(flush_data_cache_local);
331 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
332 
clear_user_page_asm(void * page,unsigned long vaddr)333 void clear_user_page_asm(void *page, unsigned long vaddr)
334 {
335 	unsigned long flags;
336 	/* This function is implemented in assembly in pacache.S */
337 	extern void __clear_user_page_asm(void *page, unsigned long vaddr);
338 
339 	purge_tlb_start(flags);
340 	__clear_user_page_asm(page, vaddr);
341 	purge_tlb_end(flags);
342 }
343 
344 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
345 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
346 
parisc_setup_cache_timing(void)347 void __init parisc_setup_cache_timing(void)
348 {
349 	unsigned long rangetime, alltime;
350 	unsigned long size;
351 
352 	alltime = mfctl(16);
353 	flush_data_cache();
354 	alltime = mfctl(16) - alltime;
355 
356 	size = (unsigned long)(_end - _text);
357 	rangetime = mfctl(16);
358 	flush_kernel_dcache_range((unsigned long)_text, size);
359 	rangetime = mfctl(16) - rangetime;
360 
361 	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
362 		alltime, size, rangetime);
363 
364 	/* Racy, but if we see an intermediate value, it's ok too... */
365 	parisc_cache_flush_threshold = size * alltime / rangetime;
366 
367 	parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
368 	if (!parisc_cache_flush_threshold)
369 		parisc_cache_flush_threshold = FLUSH_THRESHOLD;
370 
371 	if (parisc_cache_flush_threshold > cache_info.dc_size)
372 		parisc_cache_flush_threshold = cache_info.dc_size;
373 
374 	printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
375 }
376 
377 extern void purge_kernel_dcache_page(unsigned long);
378 extern void clear_user_page_asm(void *page, unsigned long vaddr);
379 
clear_user_page(void * page,unsigned long vaddr,struct page * pg)380 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
381 {
382 	unsigned long flags;
383 
384 	purge_kernel_dcache_page((unsigned long)page);
385 	purge_tlb_start(flags);
386 	pdtlb_kernel(page);
387 	purge_tlb_end(flags);
388 	clear_user_page_asm(page, vaddr);
389 }
390 EXPORT_SYMBOL(clear_user_page);
391 
flush_kernel_dcache_page_addr(void * addr)392 void flush_kernel_dcache_page_addr(void *addr)
393 {
394 	unsigned long flags;
395 
396 	flush_kernel_dcache_page_asm(addr);
397 	purge_tlb_start(flags);
398 	pdtlb_kernel(addr);
399 	purge_tlb_end(flags);
400 }
401 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
402 
copy_user_page(void * vto,void * vfrom,unsigned long vaddr,struct page * pg)403 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
404 		    struct page *pg)
405 {
406 	/* no coherency needed (all in kmap/kunmap) */
407 	copy_user_page_asm(vto, vfrom);
408 	if (!parisc_requires_coherency())
409 		flush_kernel_dcache_page_asm(vto);
410 }
411 EXPORT_SYMBOL(copy_user_page);
412 
413 #ifdef CONFIG_PA8X00
414 
kunmap_parisc(void * addr)415 void kunmap_parisc(void *addr)
416 {
417 	if (parisc_requires_coherency())
418 		flush_kernel_dcache_page_addr(addr);
419 }
420 EXPORT_SYMBOL(kunmap_parisc);
421 #endif
422 
purge_tlb_entries(struct mm_struct * mm,unsigned long addr)423 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
424 {
425 	unsigned long flags;
426 
427 	/* Note: purge_tlb_entries can be called at startup with
428 	   no context.  */
429 
430 	/* Disable preemption while we play with %sr1.  */
431 	preempt_disable();
432 	mtsp(mm->context, 1);
433 	purge_tlb_start(flags);
434 	pdtlb(addr);
435 	pitlb(addr);
436 	purge_tlb_end(flags);
437 	preempt_enable();
438 }
439 EXPORT_SYMBOL(purge_tlb_entries);
440 
__flush_tlb_range(unsigned long sid,unsigned long start,unsigned long end)441 void __flush_tlb_range(unsigned long sid, unsigned long start,
442 		       unsigned long end)
443 {
444 	unsigned long npages;
445 
446 	npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
447 	if (npages >= 512)  /* 2MB of space: arbitrary, should be tuned */
448 		flush_tlb_all();
449 	else {
450 		unsigned long flags;
451 
452 		mtsp(sid, 1);
453 		purge_tlb_start(flags);
454 		if (split_tlb) {
455 			while (npages--) {
456 				pdtlb(start);
457 				pitlb(start);
458 				start += PAGE_SIZE;
459 			}
460 		} else {
461 			while (npages--) {
462 				pdtlb(start);
463 				start += PAGE_SIZE;
464 			}
465 		}
466 		purge_tlb_end(flags);
467 	}
468 }
469 
cacheflush_h_tmp_function(void * dummy)470 static void cacheflush_h_tmp_function(void *dummy)
471 {
472 	flush_cache_all_local();
473 }
474 
flush_cache_all(void)475 void flush_cache_all(void)
476 {
477 	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
478 }
479 
flush_cache_mm(struct mm_struct * mm)480 void flush_cache_mm(struct mm_struct *mm)
481 {
482 #ifdef CONFIG_SMP
483 	flush_cache_all();
484 #else
485 	flush_cache_all_local();
486 #endif
487 }
488 
489 void
flush_user_dcache_range(unsigned long start,unsigned long end)490 flush_user_dcache_range(unsigned long start, unsigned long end)
491 {
492 	if ((end - start) < parisc_cache_flush_threshold)
493 		flush_user_dcache_range_asm(start,end);
494 	else
495 		flush_data_cache();
496 }
497 
498 void
flush_user_icache_range(unsigned long start,unsigned long end)499 flush_user_icache_range(unsigned long start, unsigned long end)
500 {
501 	if ((end - start) < parisc_cache_flush_threshold)
502 		flush_user_icache_range_asm(start,end);
503 	else
504 		flush_instruction_cache();
505 }
506 
507 
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)508 void flush_cache_range(struct vm_area_struct *vma,
509 		unsigned long start, unsigned long end)
510 {
511 	int sr3;
512 
513 	BUG_ON(!vma->vm_mm->context);
514 
515 	sr3 = mfsp(3);
516 	if (vma->vm_mm->context == sr3) {
517 		flush_user_dcache_range(start,end);
518 		flush_user_icache_range(start,end);
519 	} else {
520 		flush_cache_all();
521 	}
522 }
523 
524 void
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)525 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
526 {
527 	BUG_ON(!vma->vm_mm->context);
528 
529 	flush_tlb_page(vma, vmaddr);
530 	__flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
531 
532 }
533