1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_GENERIC_CACHEFLUSH_H
3 #define _ASM_GENERIC_CACHEFLUSH_H
4 
5 struct mm_struct;
6 struct vm_area_struct;
7 struct page;
8 struct address_space;
9 
10 /*
11  * The cache doesn't need to be flushed when TLB entries change when
12  * the cache is mapped to physical memory, not virtual memory
13  */
14 #ifndef flush_cache_all
flush_cache_all(void)15 static inline void flush_cache_all(void)
16 {
17 }
18 #endif
19 
20 #ifndef flush_cache_mm
flush_cache_mm(struct mm_struct * mm)21 static inline void flush_cache_mm(struct mm_struct *mm)
22 {
23 }
24 #endif
25 
26 #ifndef flush_cache_dup_mm
flush_cache_dup_mm(struct mm_struct * mm)27 static inline void flush_cache_dup_mm(struct mm_struct *mm)
28 {
29 }
30 #endif
31 
32 #ifndef flush_cache_range
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)33 static inline void flush_cache_range(struct vm_area_struct *vma,
34 				     unsigned long start,
35 				     unsigned long end)
36 {
37 }
38 #endif
39 
40 #ifndef flush_cache_page
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)41 static inline void flush_cache_page(struct vm_area_struct *vma,
42 				    unsigned long vmaddr,
43 				    unsigned long pfn)
44 {
45 }
46 #endif
47 
48 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
flush_dcache_page(struct page * page)49 static inline void flush_dcache_page(struct page *page)
50 {
51 }
52 
53 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
54 #endif
55 
56 #ifndef flush_dcache_mmap_lock
flush_dcache_mmap_lock(struct address_space * mapping)57 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
58 {
59 }
60 #endif
61 
62 #ifndef flush_dcache_mmap_unlock
flush_dcache_mmap_unlock(struct address_space * mapping)63 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
64 {
65 }
66 #endif
67 
68 #ifndef flush_icache_range
flush_icache_range(unsigned long start,unsigned long end)69 static inline void flush_icache_range(unsigned long start, unsigned long end)
70 {
71 }
72 #endif
73 
74 #ifndef flush_icache_user_range
75 #define flush_icache_user_range flush_icache_range
76 #endif
77 
78 #ifndef flush_icache_page
flush_icache_page(struct vm_area_struct * vma,struct page * page)79 static inline void flush_icache_page(struct vm_area_struct *vma,
80 				     struct page *page)
81 {
82 }
83 #endif
84 
85 #ifndef flush_icache_user_page
flush_icache_user_page(struct vm_area_struct * vma,struct page * page,unsigned long addr,int len)86 static inline void flush_icache_user_page(struct vm_area_struct *vma,
87 					   struct page *page,
88 					   unsigned long addr, int len)
89 {
90 }
91 #endif
92 
93 #ifndef flush_cache_vmap
flush_cache_vmap(unsigned long start,unsigned long end)94 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
95 {
96 }
97 #endif
98 
99 #ifndef flush_cache_vunmap
flush_cache_vunmap(unsigned long start,unsigned long end)100 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
101 {
102 }
103 #endif
104 
105 #ifndef copy_to_user_page
106 #define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
107 	do { \
108 		memcpy(dst, src, len); \
109 		flush_icache_user_page(vma, page, vaddr, len); \
110 	} while (0)
111 #endif
112 
113 #ifndef copy_from_user_page
114 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
115 	memcpy(dst, src, len)
116 #endif
117 
118 #endif /* _ASM_GENERIC_CACHEFLUSH_H */
119