1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_CACHEFLUSH_H
10 #define _ASM_CACHEFLUSH_H
11
12 /* Keep includes the same across arches. */
13 #include <linux/mm.h>
14 #include <asm/cpu-features.h>
15
16 /* Cache flushing:
17 *
18 * - flush_cache_all() flushes entire cache
19 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
20 * - flush_cache_dup mm(mm) handles cache flushing when forking
21 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
22 * - flush_cache_range(vma, start, end) flushes a range of pages
23 * - flush_icache_range(start, end) flush a range of instructions
24 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
25 *
26 * MIPS specific flush operations:
27 *
28 * - flush_icache_all() flush the entire instruction cache
29 * - flush_data_cache_page() flushes a page from the data cache
30 * - __flush_icache_user_range(start, end) flushes range of user instructions
31 */
32
33 /*
34 * This flag is used to indicate that the page pointed to by a pte
35 * is dirty and requires cleaning before returning it to the user.
36 */
37 #define PG_dcache_dirty PG_arch_1
38
39 #define folio_test_dcache_dirty(folio) \
40 test_bit(PG_dcache_dirty, &(folio)->flags)
41 #define folio_set_dcache_dirty(folio) \
42 set_bit(PG_dcache_dirty, &(folio)->flags)
43 #define folio_clear_dcache_dirty(folio) \
44 clear_bit(PG_dcache_dirty, &(folio)->flags)
45
46 extern void (*flush_cache_all)(void);
47 extern void (*__flush_cache_all)(void);
48 extern void (*flush_cache_mm)(struct mm_struct *mm);
49 #define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
50 extern void (*flush_cache_range)(struct vm_area_struct *vma,
51 unsigned long start, unsigned long end);
52 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
53 extern void __flush_dcache_pages(struct page *page, unsigned int nr);
54
55 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
flush_dcache_folio(struct folio * folio)56 static inline void flush_dcache_folio(struct folio *folio)
57 {
58 if (cpu_has_dc_aliases)
59 __flush_dcache_pages(&folio->page, folio_nr_pages(folio));
60 else if (!cpu_has_ic_fills_f_dc)
61 folio_set_dcache_dirty(folio);
62 }
63 #define flush_dcache_folio flush_dcache_folio
64
flush_dcache_page(struct page * page)65 static inline void flush_dcache_page(struct page *page)
66 {
67 if (cpu_has_dc_aliases)
68 __flush_dcache_pages(page, 1);
69 else if (!cpu_has_ic_fills_f_dc)
70 folio_set_dcache_dirty(page_folio(page));
71 }
72
73 #define flush_dcache_mmap_lock(mapping) do { } while (0)
74 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
75
76 #define ARCH_HAS_FLUSH_ANON_PAGE
77 extern void __flush_anon_page(struct page *, unsigned long);
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)78 static inline void flush_anon_page(struct vm_area_struct *vma,
79 struct page *page, unsigned long vmaddr)
80 {
81 if (cpu_has_dc_aliases && PageAnon(page))
82 __flush_anon_page(page, vmaddr);
83 }
84
85 extern void (*flush_icache_range)(unsigned long start, unsigned long end);
86 extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
87 extern void (*__flush_icache_user_range)(unsigned long start,
88 unsigned long end);
89 extern void (*__local_flush_icache_user_range)(unsigned long start,
90 unsigned long end);
91
92 extern void (*__flush_cache_vmap)(void);
93
flush_cache_vmap(unsigned long start,unsigned long end)94 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
95 {
96 if (cpu_has_dc_aliases)
97 __flush_cache_vmap();
98 }
99
100 #define flush_cache_vmap_early(start, end) do { } while (0)
101
102 extern void (*__flush_cache_vunmap)(void);
103
flush_cache_vunmap(unsigned long start,unsigned long end)104 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
105 {
106 if (cpu_has_dc_aliases)
107 __flush_cache_vunmap();
108 }
109
110 extern void copy_to_user_page(struct vm_area_struct *vma,
111 struct page *page, unsigned long vaddr, void *dst, const void *src,
112 unsigned long len);
113
114 extern void copy_from_user_page(struct vm_area_struct *vma,
115 struct page *page, unsigned long vaddr, void *dst, const void *src,
116 unsigned long len);
117
118 extern void (*flush_icache_all)(void);
119 extern void (*flush_data_cache_page)(unsigned long addr);
120
121 /* Run kernel code uncached, useful for cache probing functions. */
122 unsigned long run_uncached(void *func);
123
124 extern void *kmap_coherent(struct page *page, unsigned long addr);
125 extern void kunmap_coherent(void);
126 extern void *kmap_noncoherent(struct page *page, unsigned long addr);
127
kunmap_noncoherent(void)128 static inline void kunmap_noncoherent(void)
129 {
130 kunmap_coherent();
131 }
132
133 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
134 /*
135 * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
136 * cache writeback and invalidate operation.
137 */
138 extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
139
flush_kernel_vmap_range(void * vaddr,int size)140 static inline void flush_kernel_vmap_range(void *vaddr, int size)
141 {
142 if (cpu_has_dc_aliases)
143 __flush_kernel_vmap_range((unsigned long) vaddr, size);
144 }
145
invalidate_kernel_vmap_range(void * vaddr,int size)146 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
147 {
148 if (cpu_has_dc_aliases)
149 __flush_kernel_vmap_range((unsigned long) vaddr, size);
150 }
151
152 #endif /* _ASM_CACHEFLUSH_H */
153