1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_H
3 #define _LINUX_HIGHMEM_H
4 
5 #include <linux/fs.h>
6 #include <linux/kernel.h>
7 #include <linux/bug.h>
8 #include <linux/cacheflush.h>
9 #include <linux/kmsan.h>
10 #include <linux/mm.h>
11 #include <linux/uaccess.h>
12 #include <linux/hardirq.h>
13 
14 #include "highmem-internal.h"
15 
16 /**
17  * kmap - Map a page for long term usage
18  * @page:	Pointer to the page to be mapped
19  *
20  * Returns: The virtual address of the mapping
21  *
22  * Can only be invoked from preemptible task context because on 32bit
23  * systems with CONFIG_HIGHMEM enabled this function might sleep.
24  *
25  * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
26  * this returns the virtual address of the direct kernel mapping.
27  *
28  * The returned virtual address is globally visible and valid up to the
29  * point where it is unmapped via kunmap(). The pointer can be handed to
30  * other contexts.
31  *
32  * For highmem pages on 32bit systems this can be slow as the mapping space
33  * is limited and protected by a global lock. In case that there is no
34  * mapping slot available the function blocks until a slot is released via
35  * kunmap().
36  */
37 static inline void *kmap(struct page *page);
38 
39 /**
40  * kunmap - Unmap the virtual address mapped by kmap()
41  * @page:	Pointer to the page which was mapped by kmap()
42  *
43  * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
44  * pages in the low memory area.
45  */
46 static inline void kunmap(struct page *page);
47 
48 /**
49  * kmap_to_page - Get the page for a kmap'ed address
50  * @addr:	The address to look up
51  *
52  * Returns: The page which is mapped to @addr.
53  */
54 static inline struct page *kmap_to_page(void *addr);
55 
56 /**
57  * kmap_flush_unused - Flush all unused kmap mappings in order to
58  *		       remove stray mappings
59  */
60 static inline void kmap_flush_unused(void);
61 
62 /**
63  * kmap_local_page - Map a page for temporary usage
64  * @page: Pointer to the page to be mapped
65  *
66  * Returns: The virtual address of the mapping
67  *
68  * Can be invoked from any context, including interrupts.
69  *
70  * Requires careful handling when nesting multiple mappings because the map
71  * management is stack based. The unmap has to be in the reverse order of
72  * the map operation:
73  *
74  * addr1 = kmap_local_page(page1);
75  * addr2 = kmap_local_page(page2);
76  * ...
77  * kunmap_local(addr2);
78  * kunmap_local(addr1);
79  *
80  * Unmapping addr1 before addr2 is invalid and causes malfunction.
81  *
82  * Contrary to kmap() mappings the mapping is only valid in the context of
83  * the caller and cannot be handed to other contexts.
84  *
85  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
86  * virtual address of the direct mapping. Only real highmem pages are
87  * temporarily mapped.
88  *
89  * While it is significantly faster than kmap() for the higmem case it
90  * comes with restrictions about the pointer validity.
91  *
92  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
93  * disabling migration in order to keep the virtual address stable across
94  * preemption. No caller of kmap_local_page() can rely on this side effect.
95  */
96 static inline void *kmap_local_page(struct page *page);
97 
98 /**
99  * kmap_local_folio - Map a page in this folio for temporary usage
100  * @folio: The folio containing the page.
101  * @offset: The byte offset within the folio which identifies the page.
102  *
103  * Requires careful handling when nesting multiple mappings because the map
104  * management is stack based. The unmap has to be in the reverse order of
105  * the map operation::
106  *
107  *   addr1 = kmap_local_folio(folio1, offset1);
108  *   addr2 = kmap_local_folio(folio2, offset2);
109  *   ...
110  *   kunmap_local(addr2);
111  *   kunmap_local(addr1);
112  *
113  * Unmapping addr1 before addr2 is invalid and causes malfunction.
114  *
115  * Contrary to kmap() mappings the mapping is only valid in the context of
116  * the caller and cannot be handed to other contexts.
117  *
118  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
119  * virtual address of the direct mapping. Only real highmem pages are
120  * temporarily mapped.
121  *
122  * While it is significantly faster than kmap() for the higmem case it
123  * comes with restrictions about the pointer validity. Only use when really
124  * necessary.
125  *
126  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
127  * disabling migration in order to keep the virtual address stable across
128  * preemption. No caller of kmap_local_folio() can rely on this side effect.
129  *
130  * Context: Can be invoked from any context.
131  * Return: The virtual address of @offset.
132  */
133 static inline void *kmap_local_folio(struct folio *folio, size_t offset);
134 
135 /**
136  * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
137  * @page:	Pointer to the page to be mapped
138  *
139  * Returns: The virtual address of the mapping
140  *
141  * In fact a wrapper around kmap_local_page() which also disables pagefaults
142  * and, depending on PREEMPT_RT configuration, also CPU migration and
143  * preemption. Therefore users should not count on the latter two side effects.
144  *
145  * Mappings should always be released by kunmap_atomic().
146  *
147  * Do not use in new code. Use kmap_local_page() instead.
148  *
149  * It is used in atomic context when code wants to access the contents of a
150  * page that might be allocated from high memory (see __GFP_HIGHMEM), for
151  * example a page in the pagecache.  The API has two functions, and they
152  * can be used in a manner similar to the following::
153  *
154  *   // Find the page of interest.
155  *   struct page *page = find_get_page(mapping, offset);
156  *
157  *   // Gain access to the contents of that page.
158  *   void *vaddr = kmap_atomic(page);
159  *
160  *   // Do something to the contents of that page.
161  *   memset(vaddr, 0, PAGE_SIZE);
162  *
163  *   // Unmap that page.
164  *   kunmap_atomic(vaddr);
165  *
166  * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
167  * call, not the argument.
168  *
169  * If you need to map two pages because you want to copy from one page to
170  * another you need to keep the kmap_atomic calls strictly nested, like:
171  *
172  * vaddr1 = kmap_atomic(page1);
173  * vaddr2 = kmap_atomic(page2);
174  *
175  * memcpy(vaddr1, vaddr2, PAGE_SIZE);
176  *
177  * kunmap_atomic(vaddr2);
178  * kunmap_atomic(vaddr1);
179  */
180 static inline void *kmap_atomic(struct page *page);
181 
182 /* Highmem related interfaces for management code */
183 static inline unsigned int nr_free_highpages(void);
184 static inline unsigned long totalhigh_pages(void);
185 
186 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)187 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
188 {
189 }
190 #endif
191 
192 #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
flush_kernel_vmap_range(void * vaddr,int size)193 static inline void flush_kernel_vmap_range(void *vaddr, int size)
194 {
195 }
invalidate_kernel_vmap_range(void * vaddr,int size)196 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
197 {
198 }
199 #endif
200 
201 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
202 #ifndef clear_user_highpage
clear_user_highpage(struct page * page,unsigned long vaddr)203 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
204 {
205 	void *addr = kmap_local_page(page);
206 	clear_user_page(addr, vaddr, page);
207 	kunmap_local(addr);
208 }
209 #endif
210 
211 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
212 /**
213  * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
214  * @vma: The VMA the page is to be allocated for
215  * @vaddr: The virtual address the page will be inserted into
216  *
217  * Returns: The allocated and zeroed HIGHMEM page
218  *
219  * This function will allocate a page for a VMA that the caller knows will
220  * be able to migrate in the future using move_pages() or reclaimed
221  *
222  * An architecture may override this function by defining
223  * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
224  * implementation.
225  */
226 static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct * vma,unsigned long vaddr)227 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
228 				   unsigned long vaddr)
229 {
230 	struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
231 
232 	if (page)
233 		clear_user_highpage(page, vaddr);
234 
235 	return page;
236 }
237 #endif
238 
clear_highpage(struct page * page)239 static inline void clear_highpage(struct page *page)
240 {
241 	void *kaddr = kmap_local_page(page);
242 	clear_page(kaddr);
243 	kunmap_local(kaddr);
244 }
245 
clear_highpage_kasan_tagged(struct page * page)246 static inline void clear_highpage_kasan_tagged(struct page *page)
247 {
248 	u8 tag;
249 
250 	tag = page_kasan_tag(page);
251 	page_kasan_tag_reset(page);
252 	clear_highpage(page);
253 	page_kasan_tag_set(page, tag);
254 }
255 
256 #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
257 
tag_clear_highpage(struct page * page)258 static inline void tag_clear_highpage(struct page *page)
259 {
260 }
261 
262 #endif
263 
264 /*
265  * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
266  * If we pass in a head page, we can zero up to the size of the compound page.
267  */
268 #ifdef CONFIG_HIGHMEM
269 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
270 		unsigned start2, unsigned end2);
271 #else
zero_user_segments(struct page * page,unsigned start1,unsigned end1,unsigned start2,unsigned end2)272 static inline void zero_user_segments(struct page *page,
273 		unsigned start1, unsigned end1,
274 		unsigned start2, unsigned end2)
275 {
276 	void *kaddr = kmap_local_page(page);
277 	unsigned int i;
278 
279 	BUG_ON(end1 > page_size(page) || end2 > page_size(page));
280 
281 	if (end1 > start1)
282 		memset(kaddr + start1, 0, end1 - start1);
283 
284 	if (end2 > start2)
285 		memset(kaddr + start2, 0, end2 - start2);
286 
287 	kunmap_local(kaddr);
288 	for (i = 0; i < compound_nr(page); i++)
289 		flush_dcache_page(page + i);
290 }
291 #endif
292 
zero_user_segment(struct page * page,unsigned start,unsigned end)293 static inline void zero_user_segment(struct page *page,
294 	unsigned start, unsigned end)
295 {
296 	zero_user_segments(page, start, end, 0, 0);
297 }
298 
zero_user(struct page * page,unsigned start,unsigned size)299 static inline void zero_user(struct page *page,
300 	unsigned start, unsigned size)
301 {
302 	zero_user_segments(page, start, start + size, 0, 0);
303 }
304 
305 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
306 
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)307 static inline void copy_user_highpage(struct page *to, struct page *from,
308 	unsigned long vaddr, struct vm_area_struct *vma)
309 {
310 	char *vfrom, *vto;
311 
312 	vfrom = kmap_local_page(from);
313 	vto = kmap_local_page(to);
314 	copy_user_page(vto, vfrom, vaddr, to);
315 	kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
316 	kunmap_local(vto);
317 	kunmap_local(vfrom);
318 }
319 
320 #endif
321 
322 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
323 
copy_highpage(struct page * to,struct page * from)324 static inline void copy_highpage(struct page *to, struct page *from)
325 {
326 	char *vfrom, *vto;
327 
328 	vfrom = kmap_local_page(from);
329 	vto = kmap_local_page(to);
330 	copy_page(vto, vfrom);
331 	kmsan_copy_page_meta(to, from);
332 	kunmap_local(vto);
333 	kunmap_local(vfrom);
334 }
335 
336 #endif
337 
memcpy_page(struct page * dst_page,size_t dst_off,struct page * src_page,size_t src_off,size_t len)338 static inline void memcpy_page(struct page *dst_page, size_t dst_off,
339 			       struct page *src_page, size_t src_off,
340 			       size_t len)
341 {
342 	char *dst = kmap_local_page(dst_page);
343 	char *src = kmap_local_page(src_page);
344 
345 	VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
346 	memcpy(dst + dst_off, src + src_off, len);
347 	kunmap_local(src);
348 	kunmap_local(dst);
349 }
350 
memset_page(struct page * page,size_t offset,int val,size_t len)351 static inline void memset_page(struct page *page, size_t offset, int val,
352 			       size_t len)
353 {
354 	char *addr = kmap_local_page(page);
355 
356 	VM_BUG_ON(offset + len > PAGE_SIZE);
357 	memset(addr + offset, val, len);
358 	kunmap_local(addr);
359 }
360 
memcpy_from_page(char * to,struct page * page,size_t offset,size_t len)361 static inline void memcpy_from_page(char *to, struct page *page,
362 				    size_t offset, size_t len)
363 {
364 	char *from = kmap_local_page(page);
365 
366 	VM_BUG_ON(offset + len > PAGE_SIZE);
367 	memcpy(to, from + offset, len);
368 	kunmap_local(from);
369 }
370 
memcpy_to_page(struct page * page,size_t offset,const char * from,size_t len)371 static inline void memcpy_to_page(struct page *page, size_t offset,
372 				  const char *from, size_t len)
373 {
374 	char *to = kmap_local_page(page);
375 
376 	VM_BUG_ON(offset + len > PAGE_SIZE);
377 	memcpy(to + offset, from, len);
378 	flush_dcache_page(page);
379 	kunmap_local(to);
380 }
381 
memzero_page(struct page * page,size_t offset,size_t len)382 static inline void memzero_page(struct page *page, size_t offset, size_t len)
383 {
384 	char *addr = kmap_local_page(page);
385 
386 	VM_BUG_ON(offset + len > PAGE_SIZE);
387 	memset(addr + offset, 0, len);
388 	flush_dcache_page(page);
389 	kunmap_local(addr);
390 }
391 
392 /**
393  * folio_zero_segments() - Zero two byte ranges in a folio.
394  * @folio: The folio to write to.
395  * @start1: The first byte to zero.
396  * @xend1: One more than the last byte in the first range.
397  * @start2: The first byte to zero in the second range.
398  * @xend2: One more than the last byte in the second range.
399  */
folio_zero_segments(struct folio * folio,size_t start1,size_t xend1,size_t start2,size_t xend2)400 static inline void folio_zero_segments(struct folio *folio,
401 		size_t start1, size_t xend1, size_t start2, size_t xend2)
402 {
403 	zero_user_segments(&folio->page, start1, xend1, start2, xend2);
404 }
405 
406 /**
407  * folio_zero_segment() - Zero a byte range in a folio.
408  * @folio: The folio to write to.
409  * @start: The first byte to zero.
410  * @xend: One more than the last byte to zero.
411  */
folio_zero_segment(struct folio * folio,size_t start,size_t xend)412 static inline void folio_zero_segment(struct folio *folio,
413 		size_t start, size_t xend)
414 {
415 	zero_user_segments(&folio->page, start, xend, 0, 0);
416 }
417 
418 /**
419  * folio_zero_range() - Zero a byte range in a folio.
420  * @folio: The folio to write to.
421  * @start: The first byte to zero.
422  * @length: The number of bytes to zero.
423  */
folio_zero_range(struct folio * folio,size_t start,size_t length)424 static inline void folio_zero_range(struct folio *folio,
425 		size_t start, size_t length)
426 {
427 	zero_user_segments(&folio->page, start, start + length, 0, 0);
428 }
429 
430 #endif /* _LINUX_HIGHMEM_H */
431