1 #ifndef _LINUX_HIGHMEM_H
2 #define _LINUX_HIGHMEM_H
3
4 #include <linux/fs.h>
5 #include <linux/kernel.h>
6 #include <linux/bug.h>
7 #include <linux/mm.h>
8 #include <linux/uaccess.h>
9 #include <linux/hardirq.h>
10
11 #include <asm/cacheflush.h>
12
13 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)14 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
15 {
16 }
17 #endif
18
19 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
flush_kernel_dcache_page(struct page * page)20 static inline void flush_kernel_dcache_page(struct page *page)
21 {
22 }
flush_kernel_vmap_range(void * vaddr,int size)23 static inline void flush_kernel_vmap_range(void *vaddr, int size)
24 {
25 }
invalidate_kernel_vmap_range(void * vaddr,int size)26 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
27 {
28 }
29 #endif
30
31 #include <asm/kmap_types.h>
32
33 #ifdef CONFIG_HIGHMEM
34 #include <asm/highmem.h>
35
36 /* declarations for linux/mm/highmem.c */
37 unsigned int nr_free_highpages(void);
38 extern unsigned long totalhigh_pages;
39
40 void kmap_flush_unused(void);
41
42 struct page *kmap_to_page(void *addr);
43
44 #else /* CONFIG_HIGHMEM */
45
nr_free_highpages(void)46 static inline unsigned int nr_free_highpages(void) { return 0; }
47
kmap_to_page(void * addr)48 static inline struct page *kmap_to_page(void *addr)
49 {
50 return virt_to_page(addr);
51 }
52
53 #define totalhigh_pages 0UL
54
55 #ifndef ARCH_HAS_KMAP
kmap(struct page * page)56 static inline void *kmap(struct page *page)
57 {
58 might_sleep();
59 return page_address(page);
60 }
61
kunmap(struct page * page)62 static inline void kunmap(struct page *page)
63 {
64 }
65
kmap_atomic(struct page * page)66 static inline void *kmap_atomic(struct page *page)
67 {
68 pagefault_disable();
69 return page_address(page);
70 }
71 #define kmap_atomic_prot(page, prot) kmap_atomic(page)
72
__kunmap_atomic(void * addr)73 static inline void __kunmap_atomic(void *addr)
74 {
75 pagefault_enable();
76 }
77
78 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
79 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
80
81 #define kmap_flush_unused() do {} while(0)
82 #endif
83
84 #endif /* CONFIG_HIGHMEM */
85
86 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
87
88 DECLARE_PER_CPU(int, __kmap_atomic_idx);
89
kmap_atomic_idx_push(void)90 static inline int kmap_atomic_idx_push(void)
91 {
92 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
93
94 #ifdef CONFIG_DEBUG_HIGHMEM
95 WARN_ON_ONCE(in_irq() && !irqs_disabled());
96 BUG_ON(idx > KM_TYPE_NR);
97 #endif
98 return idx;
99 }
100
kmap_atomic_idx(void)101 static inline int kmap_atomic_idx(void)
102 {
103 return __this_cpu_read(__kmap_atomic_idx) - 1;
104 }
105
kmap_atomic_idx_pop(void)106 static inline void kmap_atomic_idx_pop(void)
107 {
108 #ifdef CONFIG_DEBUG_HIGHMEM
109 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
110
111 BUG_ON(idx < 0);
112 #else
113 __this_cpu_dec(__kmap_atomic_idx);
114 #endif
115 }
116
117 #endif
118
119 /*
120 * NOTE:
121 * kmap_atomic() and kunmap_atomic() with two arguments are deprecated.
122 * We only keep them for backward compatibility, any usage of them
123 * are now warned.
124 */
125
126 #define PASTE(a, b) a ## b
127 #define PASTE2(a, b) PASTE(a, b)
128
129 #define NARG_(_2, _1, n, ...) n
130 #define NARG(...) NARG_(__VA_ARGS__, 2, 1, :)
131
kmap_atomic_deprecated(struct page * page,enum km_type km)132 static inline void __deprecated *kmap_atomic_deprecated(struct page *page,
133 enum km_type km)
134 {
135 return kmap_atomic(page);
136 }
137
138 #define kmap_atomic1(...) kmap_atomic(__VA_ARGS__)
139 #define kmap_atomic2(...) kmap_atomic_deprecated(__VA_ARGS__)
140 #define kmap_atomic(...) PASTE2(kmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
141
__kunmap_atomic_deprecated(void * addr,enum km_type km)142 static inline void __deprecated __kunmap_atomic_deprecated(void *addr,
143 enum km_type km)
144 {
145 __kunmap_atomic(addr);
146 }
147
148 /*
149 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
150 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
151 */
152 #define kunmap_atomic_deprecated(addr, km) \
153 do { \
154 BUILD_BUG_ON(__same_type((addr), struct page *)); \
155 __kunmap_atomic_deprecated(addr, km); \
156 } while (0)
157
158 #define kunmap_atomic_withcheck(addr) \
159 do { \
160 BUILD_BUG_ON(__same_type((addr), struct page *)); \
161 __kunmap_atomic(addr); \
162 } while (0)
163
164 #define kunmap_atomic1(...) kunmap_atomic_withcheck(__VA_ARGS__)
165 #define kunmap_atomic2(...) kunmap_atomic_deprecated(__VA_ARGS__)
166 #define kunmap_atomic(...) PASTE2(kunmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
167 /**** End of C pre-processor tricks for deprecated macros ****/
168
169 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
170 #ifndef clear_user_highpage
clear_user_highpage(struct page * page,unsigned long vaddr)171 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
172 {
173 void *addr = kmap_atomic(page);
174 clear_user_page(addr, vaddr, page);
175 kunmap_atomic(addr);
176 }
177 #endif
178
179 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
180 /**
181 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
182 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
183 * @vma: The VMA the page is to be allocated for
184 * @vaddr: The virtual address the page will be inserted into
185 *
186 * This function will allocate a page for a VMA but the caller is expected
187 * to specify via movableflags whether the page will be movable in the
188 * future or not
189 *
190 * An architecture may override this function by defining
191 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
192 * implementation.
193 */
194 static inline struct page *
__alloc_zeroed_user_highpage(gfp_t movableflags,struct vm_area_struct * vma,unsigned long vaddr)195 __alloc_zeroed_user_highpage(gfp_t movableflags,
196 struct vm_area_struct *vma,
197 unsigned long vaddr)
198 {
199 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
200 vma, vaddr);
201
202 if (page)
203 clear_user_highpage(page, vaddr);
204
205 return page;
206 }
207 #endif
208
209 /**
210 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
211 * @vma: The VMA the page is to be allocated for
212 * @vaddr: The virtual address the page will be inserted into
213 *
214 * This function will allocate a page for a VMA that the caller knows will
215 * be able to migrate in the future using move_pages() or reclaimed
216 */
217 static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct * vma,unsigned long vaddr)218 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
219 unsigned long vaddr)
220 {
221 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
222 }
223
clear_highpage(struct page * page)224 static inline void clear_highpage(struct page *page)
225 {
226 void *kaddr = kmap_atomic(page);
227 clear_page(kaddr);
228 kunmap_atomic(kaddr);
229 }
230
zero_user_segments(struct page * page,unsigned start1,unsigned end1,unsigned start2,unsigned end2)231 static inline void zero_user_segments(struct page *page,
232 unsigned start1, unsigned end1,
233 unsigned start2, unsigned end2)
234 {
235 void *kaddr = kmap_atomic(page);
236
237 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
238
239 if (end1 > start1)
240 memset(kaddr + start1, 0, end1 - start1);
241
242 if (end2 > start2)
243 memset(kaddr + start2, 0, end2 - start2);
244
245 kunmap_atomic(kaddr);
246 flush_dcache_page(page);
247 }
248
zero_user_segment(struct page * page,unsigned start,unsigned end)249 static inline void zero_user_segment(struct page *page,
250 unsigned start, unsigned end)
251 {
252 zero_user_segments(page, start, end, 0, 0);
253 }
254
zero_user(struct page * page,unsigned start,unsigned size)255 static inline void zero_user(struct page *page,
256 unsigned start, unsigned size)
257 {
258 zero_user_segments(page, start, start + size, 0, 0);
259 }
260
memclear_highpage_flush(struct page * page,unsigned int offset,unsigned int size)261 static inline void __deprecated memclear_highpage_flush(struct page *page,
262 unsigned int offset, unsigned int size)
263 {
264 zero_user(page, offset, size);
265 }
266
267 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
268
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)269 static inline void copy_user_highpage(struct page *to, struct page *from,
270 unsigned long vaddr, struct vm_area_struct *vma)
271 {
272 char *vfrom, *vto;
273
274 vfrom = kmap_atomic(from);
275 vto = kmap_atomic(to);
276 copy_user_page(vto, vfrom, vaddr, to);
277 kunmap_atomic(vto);
278 kunmap_atomic(vfrom);
279 }
280
281 #endif
282
copy_highpage(struct page * to,struct page * from)283 static inline void copy_highpage(struct page *to, struct page *from)
284 {
285 char *vfrom, *vto;
286
287 vfrom = kmap_atomic(from);
288 vto = kmap_atomic(to);
289 copy_page(vto, vfrom);
290 kunmap_atomic(vto);
291 kunmap_atomic(vfrom);
292 }
293
294 #endif /* _LINUX_HIGHMEM_H */
295