1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <asm/pdc.h>
23 #include <asm/cache.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/page.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
30 #include <asm/mmu_context.h>
31
32 int split_tlb __ro_after_init;
33 int dcache_stride __ro_after_init;
34 int icache_stride __ro_after_init;
35 EXPORT_SYMBOL(dcache_stride);
36
37 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38 EXPORT_SYMBOL(flush_dcache_page_asm);
39 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
41
42 /* Internal implementation in arch/parisc/kernel/pacache.S */
43 void flush_data_cache_local(void *); /* flushes local data-cache only */
44 void flush_instruction_cache_local(void); /* flushes local code-cache only */
45
46 /* On some machines (i.e., ones with the Merced bus), there can be
47 * only a single PxTLB broadcast at a time; this must be guaranteed
48 * by software. We need a spinlock around all TLB flushes to ensure
49 * this.
50 */
51 DEFINE_SPINLOCK(pa_tlb_flush_lock);
52
53 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
54 int pa_serialize_tlb_flushes __ro_after_init;
55 #endif
56
57 struct pdc_cache_info cache_info __ro_after_init;
58 #ifndef CONFIG_PA20
59 static struct pdc_btlb_info btlb_info __ro_after_init;
60 #endif
61
62 DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
63 DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
64 DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
65
cache_flush_local_cpu(void * dummy)66 static void cache_flush_local_cpu(void *dummy)
67 {
68 if (static_branch_likely(&parisc_has_icache))
69 flush_instruction_cache_local();
70 if (static_branch_likely(&parisc_has_dcache))
71 flush_data_cache_local(NULL);
72 }
73
flush_cache_all_local(void)74 void flush_cache_all_local(void)
75 {
76 cache_flush_local_cpu(NULL);
77 }
78
flush_cache_all(void)79 void flush_cache_all(void)
80 {
81 if (static_branch_likely(&parisc_has_cache))
82 on_each_cpu(cache_flush_local_cpu, NULL, 1);
83 }
84
flush_data_cache(void)85 static inline void flush_data_cache(void)
86 {
87 if (static_branch_likely(&parisc_has_dcache))
88 on_each_cpu(flush_data_cache_local, NULL, 1);
89 }
90
91
92 /* Kernel virtual address of pfn. */
93 #define pfn_va(pfn) __va(PFN_PHYS(pfn))
94
95 void
__update_cache(pte_t pte)96 __update_cache(pte_t pte)
97 {
98 unsigned long pfn = pte_pfn(pte);
99 struct page *page;
100
101 /* We don't have pte special. As a result, we can be called with
102 an invalid pfn and we don't need to flush the kernel dcache page.
103 This occurs with FireGL card in C8000. */
104 if (!pfn_valid(pfn))
105 return;
106
107 page = pfn_to_page(pfn);
108 if (page_mapping_file(page) &&
109 test_bit(PG_dcache_dirty, &page->flags)) {
110 flush_kernel_dcache_page_addr(pfn_va(pfn));
111 clear_bit(PG_dcache_dirty, &page->flags);
112 } else if (parisc_requires_coherency())
113 flush_kernel_dcache_page_addr(pfn_va(pfn));
114 }
115
116 void
show_cache_info(struct seq_file * m)117 show_cache_info(struct seq_file *m)
118 {
119 char buf[32];
120
121 seq_printf(m, "I-cache\t\t: %ld KB\n",
122 cache_info.ic_size/1024 );
123 if (cache_info.dc_loop != 1)
124 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
125 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
126 cache_info.dc_size/1024,
127 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
128 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
129 ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
130 cache_info.dc_conf.cc_alias
131 );
132 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
133 cache_info.it_size,
134 cache_info.dt_size,
135 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
136 );
137
138 #ifndef CONFIG_PA20
139 /* BTLB - Block TLB */
140 if (btlb_info.max_size==0) {
141 seq_printf(m, "BTLB\t\t: not supported\n" );
142 } else {
143 seq_printf(m,
144 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
145 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
146 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
147 btlb_info.max_size, (int)4096,
148 btlb_info.max_size>>8,
149 btlb_info.fixed_range_info.num_i,
150 btlb_info.fixed_range_info.num_d,
151 btlb_info.fixed_range_info.num_comb,
152 btlb_info.variable_range_info.num_i,
153 btlb_info.variable_range_info.num_d,
154 btlb_info.variable_range_info.num_comb
155 );
156 }
157 #endif
158 }
159
160 void __init
parisc_cache_init(void)161 parisc_cache_init(void)
162 {
163 if (pdc_cache_info(&cache_info) < 0)
164 panic("parisc_cache_init: pdc_cache_info failed");
165
166 #if 0
167 printk("ic_size %lx dc_size %lx it_size %lx\n",
168 cache_info.ic_size,
169 cache_info.dc_size,
170 cache_info.it_size);
171
172 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
173 cache_info.dc_base,
174 cache_info.dc_stride,
175 cache_info.dc_count,
176 cache_info.dc_loop);
177
178 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
179 *(unsigned long *) (&cache_info.dc_conf),
180 cache_info.dc_conf.cc_alias,
181 cache_info.dc_conf.cc_block,
182 cache_info.dc_conf.cc_line,
183 cache_info.dc_conf.cc_shift);
184 printk(" wt %d sh %d cst %d hv %d\n",
185 cache_info.dc_conf.cc_wt,
186 cache_info.dc_conf.cc_sh,
187 cache_info.dc_conf.cc_cst,
188 cache_info.dc_conf.cc_hv);
189
190 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
191 cache_info.ic_base,
192 cache_info.ic_stride,
193 cache_info.ic_count,
194 cache_info.ic_loop);
195
196 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
197 cache_info.it_sp_base,
198 cache_info.it_sp_stride,
199 cache_info.it_sp_count,
200 cache_info.it_loop,
201 cache_info.it_off_base,
202 cache_info.it_off_stride,
203 cache_info.it_off_count);
204
205 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
206 cache_info.dt_sp_base,
207 cache_info.dt_sp_stride,
208 cache_info.dt_sp_count,
209 cache_info.dt_loop,
210 cache_info.dt_off_base,
211 cache_info.dt_off_stride,
212 cache_info.dt_off_count);
213
214 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
215 *(unsigned long *) (&cache_info.ic_conf),
216 cache_info.ic_conf.cc_alias,
217 cache_info.ic_conf.cc_block,
218 cache_info.ic_conf.cc_line,
219 cache_info.ic_conf.cc_shift);
220 printk(" wt %d sh %d cst %d hv %d\n",
221 cache_info.ic_conf.cc_wt,
222 cache_info.ic_conf.cc_sh,
223 cache_info.ic_conf.cc_cst,
224 cache_info.ic_conf.cc_hv);
225
226 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
227 cache_info.dt_conf.tc_sh,
228 cache_info.dt_conf.tc_page,
229 cache_info.dt_conf.tc_cst,
230 cache_info.dt_conf.tc_aid,
231 cache_info.dt_conf.tc_sr);
232
233 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
234 cache_info.it_conf.tc_sh,
235 cache_info.it_conf.tc_page,
236 cache_info.it_conf.tc_cst,
237 cache_info.it_conf.tc_aid,
238 cache_info.it_conf.tc_sr);
239 #endif
240
241 split_tlb = 0;
242 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
243 if (cache_info.dt_conf.tc_sh == 2)
244 printk(KERN_WARNING "Unexpected TLB configuration. "
245 "Will flush I/D separately (could be optimized).\n");
246
247 split_tlb = 1;
248 }
249
250 /* "New and Improved" version from Jim Hull
251 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
252 * The following CAFL_STRIDE is an optimized version, see
253 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
254 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
255 */
256 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
257 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
258 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
259 #undef CAFL_STRIDE
260
261 #ifndef CONFIG_PA20
262 if (pdc_btlb_info(&btlb_info) < 0) {
263 memset(&btlb_info, 0, sizeof btlb_info);
264 }
265 #endif
266
267 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
268 PDC_MODEL_NVA_UNSUPPORTED) {
269 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
270 #if 0
271 panic("SMP kernel required to avoid non-equivalent aliasing");
272 #endif
273 }
274 }
275
disable_sr_hashing(void)276 void disable_sr_hashing(void)
277 {
278 int srhash_type, retval;
279 unsigned long space_bits;
280
281 switch (boot_cpu_data.cpu_type) {
282 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
283 BUG();
284 return;
285
286 case pcxs:
287 case pcxt:
288 case pcxt_:
289 srhash_type = SRHASH_PCXST;
290 break;
291
292 case pcxl:
293 srhash_type = SRHASH_PCXL;
294 break;
295
296 case pcxl2: /* pcxl2 doesn't support space register hashing */
297 return;
298
299 default: /* Currently all PA2.0 machines use the same ins. sequence */
300 srhash_type = SRHASH_PA20;
301 break;
302 }
303
304 disable_sr_hashing_asm(srhash_type);
305
306 retval = pdc_spaceid_bits(&space_bits);
307 /* If this procedure isn't implemented, don't panic. */
308 if (retval < 0 && retval != PDC_BAD_OPTION)
309 panic("pdc_spaceid_bits call failed.\n");
310 if (space_bits != 0)
311 panic("SpaceID hashing is still on!\n");
312 }
313
314 static inline void
__flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long physaddr)315 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
316 unsigned long physaddr)
317 {
318 if (!static_branch_likely(&parisc_has_cache))
319 return;
320 preempt_disable();
321 flush_dcache_page_asm(physaddr, vmaddr);
322 if (vma->vm_flags & VM_EXEC)
323 flush_icache_page_asm(physaddr, vmaddr);
324 preempt_enable();
325 }
326
flush_user_cache_page(struct vm_area_struct * vma,unsigned long vmaddr)327 static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
328 {
329 unsigned long flags, space, pgd, prot;
330 #ifdef CONFIG_TLB_PTLOCK
331 unsigned long pgd_lock;
332 #endif
333
334 vmaddr &= PAGE_MASK;
335
336 preempt_disable();
337
338 /* Set context for flush */
339 local_irq_save(flags);
340 prot = mfctl(8);
341 space = mfsp(SR_USER);
342 pgd = mfctl(25);
343 #ifdef CONFIG_TLB_PTLOCK
344 pgd_lock = mfctl(28);
345 #endif
346 switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
347 local_irq_restore(flags);
348
349 flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
350 if (vma->vm_flags & VM_EXEC)
351 flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
352 flush_tlb_page(vma, vmaddr);
353
354 /* Restore previous context */
355 local_irq_save(flags);
356 #ifdef CONFIG_TLB_PTLOCK
357 mtctl(pgd_lock, 28);
358 #endif
359 mtctl(pgd, 25);
360 mtsp(space, SR_USER);
361 mtctl(prot, 8);
362 local_irq_restore(flags);
363
364 preempt_enable();
365 }
366
get_ptep(struct mm_struct * mm,unsigned long addr)367 static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
368 {
369 pte_t *ptep = NULL;
370 pgd_t *pgd = mm->pgd;
371 p4d_t *p4d;
372 pud_t *pud;
373 pmd_t *pmd;
374
375 if (!pgd_none(*pgd)) {
376 p4d = p4d_offset(pgd, addr);
377 if (!p4d_none(*p4d)) {
378 pud = pud_offset(p4d, addr);
379 if (!pud_none(*pud)) {
380 pmd = pmd_offset(pud, addr);
381 if (!pmd_none(*pmd))
382 ptep = pte_offset_map(pmd, addr);
383 }
384 }
385 }
386 return ptep;
387 }
388
pte_needs_flush(pte_t pte)389 static inline bool pte_needs_flush(pte_t pte)
390 {
391 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
392 == (_PAGE_PRESENT | _PAGE_ACCESSED);
393 }
394
flush_dcache_page(struct page * page)395 void flush_dcache_page(struct page *page)
396 {
397 struct address_space *mapping = page_mapping_file(page);
398 struct vm_area_struct *mpnt;
399 unsigned long offset;
400 unsigned long addr, old_addr = 0;
401 unsigned long count = 0;
402 pgoff_t pgoff;
403
404 if (mapping && !mapping_mapped(mapping)) {
405 set_bit(PG_dcache_dirty, &page->flags);
406 return;
407 }
408
409 flush_kernel_dcache_page_addr(page_address(page));
410
411 if (!mapping)
412 return;
413
414 pgoff = page->index;
415
416 /*
417 * We have carefully arranged in arch_get_unmapped_area() that
418 * *any* mappings of a file are always congruently mapped (whether
419 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
420 * to flush one address here for them all to become coherent
421 * on machines that support equivalent aliasing
422 */
423 flush_dcache_mmap_lock(mapping);
424 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
425 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
426 addr = mpnt->vm_start + offset;
427 if (parisc_requires_coherency()) {
428 pte_t *ptep;
429
430 ptep = get_ptep(mpnt->vm_mm, addr);
431 if (ptep && pte_needs_flush(*ptep))
432 flush_user_cache_page(mpnt, addr);
433 } else {
434 /*
435 * The TLB is the engine of coherence on parisc:
436 * The CPU is entitled to speculate any page
437 * with a TLB mapping, so here we kill the
438 * mapping then flush the page along a special
439 * flush only alias mapping. This guarantees that
440 * the page is no-longer in the cache for any
441 * process and nor may it be speculatively read
442 * in (until the user or kernel specifically
443 * accesses it, of course)
444 */
445 flush_tlb_page(mpnt, addr);
446 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
447 != (addr & (SHM_COLOUR - 1))) {
448 __flush_cache_page(mpnt, addr, page_to_phys(page));
449 /*
450 * Software is allowed to have any number
451 * of private mappings to a page.
452 */
453 if (!(mpnt->vm_flags & VM_SHARED))
454 continue;
455 if (old_addr)
456 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
457 old_addr, addr, mpnt->vm_file);
458 old_addr = addr;
459 }
460 }
461 WARN_ON(++count == 4096);
462 }
463 flush_dcache_mmap_unlock(mapping);
464 }
465 EXPORT_SYMBOL(flush_dcache_page);
466
467 /* Defined in arch/parisc/kernel/pacache.S */
468 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
469 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
470
471 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
472 static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
473
474 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
475 static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
476
parisc_setup_cache_timing(void)477 void __init parisc_setup_cache_timing(void)
478 {
479 unsigned long rangetime, alltime;
480 unsigned long size;
481 unsigned long threshold, threshold2;
482
483 alltime = mfctl(16);
484 flush_data_cache();
485 alltime = mfctl(16) - alltime;
486
487 size = (unsigned long)(_end - _text);
488 rangetime = mfctl(16);
489 flush_kernel_dcache_range((unsigned long)_text, size);
490 rangetime = mfctl(16) - rangetime;
491
492 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
493 alltime, size, rangetime);
494
495 threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
496 pr_info("Calculated flush threshold is %lu KiB\n",
497 threshold/1024);
498
499 /*
500 * The threshold computed above isn't very reliable. The following
501 * heuristic works reasonably well on c8000/rp3440.
502 */
503 threshold2 = cache_info.dc_size * num_online_cpus();
504 parisc_cache_flush_threshold = threshold2;
505 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
506 parisc_cache_flush_threshold/1024);
507
508 /* calculate TLB flush threshold */
509
510 /* On SMP machines, skip the TLB measure of kernel text which
511 * has been mapped as huge pages. */
512 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
513 threshold = max(cache_info.it_size, cache_info.dt_size);
514 threshold *= PAGE_SIZE;
515 threshold /= num_online_cpus();
516 goto set_tlb_threshold;
517 }
518
519 size = (unsigned long)_end - (unsigned long)_text;
520 rangetime = mfctl(16);
521 flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
522 rangetime = mfctl(16) - rangetime;
523
524 alltime = mfctl(16);
525 flush_tlb_all();
526 alltime = mfctl(16) - alltime;
527
528 printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
529 alltime, size, rangetime);
530
531 threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
532 printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
533 threshold/1024);
534
535 set_tlb_threshold:
536 if (threshold > FLUSH_TLB_THRESHOLD)
537 parisc_tlb_flush_threshold = threshold;
538 else
539 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
540
541 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
542 parisc_tlb_flush_threshold/1024);
543 }
544
545 extern void purge_kernel_dcache_page_asm(unsigned long);
546 extern void clear_user_page_asm(void *, unsigned long);
547 extern void copy_user_page_asm(void *, void *, unsigned long);
548
flush_kernel_dcache_page_addr(void * addr)549 void flush_kernel_dcache_page_addr(void *addr)
550 {
551 unsigned long flags;
552
553 flush_kernel_dcache_page_asm(addr);
554 purge_tlb_start(flags);
555 pdtlb(SR_KERNEL, addr);
556 purge_tlb_end(flags);
557 }
558 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
559
flush_cache_page_if_present(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)560 static void flush_cache_page_if_present(struct vm_area_struct *vma,
561 unsigned long vmaddr, unsigned long pfn)
562 {
563 pte_t *ptep = get_ptep(vma->vm_mm, vmaddr);
564
565 /*
566 * The pte check is racy and sometimes the flush will trigger
567 * a non-access TLB miss. Hopefully, the page has already been
568 * flushed.
569 */
570 if (ptep && pte_needs_flush(*ptep))
571 flush_cache_page(vma, vmaddr, pfn);
572 }
573
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)574 void copy_user_highpage(struct page *to, struct page *from,
575 unsigned long vaddr, struct vm_area_struct *vma)
576 {
577 void *kto, *kfrom;
578
579 kfrom = kmap_local_page(from);
580 kto = kmap_local_page(to);
581 flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
582 copy_page_asm(kto, kfrom);
583 kunmap_local(kto);
584 kunmap_local(kfrom);
585 }
586
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long user_vaddr,void * dst,void * src,int len)587 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
588 unsigned long user_vaddr, void *dst, void *src, int len)
589 {
590 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
591 memcpy(dst, src, len);
592 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
593 }
594
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long user_vaddr,void * dst,void * src,int len)595 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
596 unsigned long user_vaddr, void *dst, void *src, int len)
597 {
598 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
599 memcpy(dst, src, len);
600 }
601
602 /* __flush_tlb_range()
603 *
604 * returns 1 if all TLBs were flushed.
605 */
__flush_tlb_range(unsigned long sid,unsigned long start,unsigned long end)606 int __flush_tlb_range(unsigned long sid, unsigned long start,
607 unsigned long end)
608 {
609 unsigned long flags;
610
611 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
612 end - start >= parisc_tlb_flush_threshold) {
613 flush_tlb_all();
614 return 1;
615 }
616
617 /* Purge TLB entries for small ranges using the pdtlb and
618 pitlb instructions. These instructions execute locally
619 but cause a purge request to be broadcast to other TLBs. */
620 while (start < end) {
621 purge_tlb_start(flags);
622 mtsp(sid, SR_TEMP1);
623 pdtlb(SR_TEMP1, start);
624 pitlb(SR_TEMP1, start);
625 purge_tlb_end(flags);
626 start += PAGE_SIZE;
627 }
628 return 0;
629 }
630
flush_cache_pages(struct vm_area_struct * vma,unsigned long start,unsigned long end)631 static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
632 {
633 unsigned long addr, pfn;
634 pte_t *ptep;
635
636 for (addr = start; addr < end; addr += PAGE_SIZE) {
637 /*
638 * The vma can contain pages that aren't present. Although
639 * the pte search is expensive, we need the pte to find the
640 * page pfn and to check whether the page should be flushed.
641 */
642 ptep = get_ptep(vma->vm_mm, addr);
643 if (ptep && pte_needs_flush(*ptep)) {
644 if (parisc_requires_coherency()) {
645 flush_user_cache_page(vma, addr);
646 } else {
647 pfn = pte_pfn(*ptep);
648 if (WARN_ON(!pfn_valid(pfn)))
649 return;
650 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
651 }
652 }
653 }
654 }
655
mm_total_size(struct mm_struct * mm)656 static inline unsigned long mm_total_size(struct mm_struct *mm)
657 {
658 struct vm_area_struct *vma;
659 unsigned long usize = 0;
660
661 for (vma = mm->mmap; vma && usize < parisc_cache_flush_threshold; vma = vma->vm_next)
662 usize += vma->vm_end - vma->vm_start;
663 return usize;
664 }
665
flush_cache_mm(struct mm_struct * mm)666 void flush_cache_mm(struct mm_struct *mm)
667 {
668 struct vm_area_struct *vma;
669
670 /*
671 * Flushing the whole cache on each cpu takes forever on
672 * rp3440, etc. So, avoid it if the mm isn't too big.
673 *
674 * Note that we must flush the entire cache on machines
675 * with aliasing caches to prevent random segmentation
676 * faults.
677 */
678 if (!parisc_requires_coherency()
679 || mm_total_size(mm) >= parisc_cache_flush_threshold) {
680 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
681 return;
682 flush_tlb_all();
683 flush_cache_all();
684 return;
685 }
686
687 /* Flush mm */
688 for (vma = mm->mmap; vma; vma = vma->vm_next)
689 flush_cache_pages(vma, vma->vm_start, vma->vm_end);
690 }
691
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)692 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
693 {
694 if (!parisc_requires_coherency()
695 || end - start >= parisc_cache_flush_threshold) {
696 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
697 return;
698 flush_tlb_range(vma, start, end);
699 flush_cache_all();
700 return;
701 }
702
703 flush_cache_pages(vma, start, end);
704 }
705
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)706 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
707 {
708 if (WARN_ON(!pfn_valid(pfn)))
709 return;
710 if (parisc_requires_coherency())
711 flush_user_cache_page(vma, vmaddr);
712 else
713 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
714 }
715
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)716 void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
717 {
718 if (!PageAnon(page))
719 return;
720
721 if (parisc_requires_coherency()) {
722 if (vma->vm_flags & VM_SHARED)
723 flush_data_cache();
724 else
725 flush_user_cache_page(vma, vmaddr);
726 return;
727 }
728
729 flush_tlb_page(vma, vmaddr);
730 preempt_disable();
731 flush_dcache_page_asm(page_to_phys(page), vmaddr);
732 preempt_enable();
733 }
734
flush_kernel_vmap_range(void * vaddr,int size)735 void flush_kernel_vmap_range(void *vaddr, int size)
736 {
737 unsigned long start = (unsigned long)vaddr;
738 unsigned long end = start + size;
739
740 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
741 (unsigned long)size >= parisc_cache_flush_threshold) {
742 flush_tlb_kernel_range(start, end);
743 flush_data_cache();
744 return;
745 }
746
747 flush_kernel_dcache_range_asm(start, end);
748 flush_tlb_kernel_range(start, end);
749 }
750 EXPORT_SYMBOL(flush_kernel_vmap_range);
751
invalidate_kernel_vmap_range(void * vaddr,int size)752 void invalidate_kernel_vmap_range(void *vaddr, int size)
753 {
754 unsigned long start = (unsigned long)vaddr;
755 unsigned long end = start + size;
756
757 /* Ensure DMA is complete */
758 asm_syncdma();
759
760 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
761 (unsigned long)size >= parisc_cache_flush_threshold) {
762 flush_tlb_kernel_range(start, end);
763 flush_data_cache();
764 return;
765 }
766
767 purge_kernel_dcache_range_asm(start, end);
768 flush_tlb_kernel_range(start, end);
769 }
770 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
771