1 #ifndef _LINUX_HIGHMEM_H
2 #define _LINUX_HIGHMEM_H
3
4 #include <linux/fs.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/uaccess.h>
8 #include <linux/hardirq.h>
9
10 #include <asm/cacheflush.h>
11
12 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)13 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
14 {
15 }
16 #endif
17
18 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
flush_kernel_dcache_page(struct page * page)19 static inline void flush_kernel_dcache_page(struct page *page)
20 {
21 }
flush_kernel_vmap_range(void * vaddr,int size)22 static inline void flush_kernel_vmap_range(void *vaddr, int size)
23 {
24 }
invalidate_kernel_vmap_range(void * vaddr,int size)25 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
26 {
27 }
28 #endif
29
30 #include <asm/kmap_types.h>
31
32 #ifdef CONFIG_HIGHMEM
33 #include <asm/highmem.h>
34
35 /* declarations for linux/mm/highmem.c */
36 unsigned int nr_free_highpages(void);
37 extern unsigned long totalhigh_pages;
38
39 void kmap_flush_unused(void);
40
41 #else /* CONFIG_HIGHMEM */
42
nr_free_highpages(void)43 static inline unsigned int nr_free_highpages(void) { return 0; }
44
45 #define totalhigh_pages 0UL
46
47 #ifndef ARCH_HAS_KMAP
kmap(struct page * page)48 static inline void *kmap(struct page *page)
49 {
50 might_sleep();
51 return page_address(page);
52 }
53
kunmap(struct page * page)54 static inline void kunmap(struct page *page)
55 {
56 }
57
__kmap_atomic(struct page * page)58 static inline void *__kmap_atomic(struct page *page)
59 {
60 pagefault_disable();
61 return page_address(page);
62 }
63 #define kmap_atomic_prot(page, prot) __kmap_atomic(page)
64
__kunmap_atomic(void * addr)65 static inline void __kunmap_atomic(void *addr)
66 {
67 pagefault_enable();
68 }
69
70 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
71 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
72
73 #define kmap_flush_unused() do {} while(0)
74 #endif
75
76 #endif /* CONFIG_HIGHMEM */
77
78 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
79
80 DECLARE_PER_CPU(int, __kmap_atomic_idx);
81
kmap_atomic_idx_push(void)82 static inline int kmap_atomic_idx_push(void)
83 {
84 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
85
86 #ifdef CONFIG_DEBUG_HIGHMEM
87 WARN_ON_ONCE(in_irq() && !irqs_disabled());
88 BUG_ON(idx > KM_TYPE_NR);
89 #endif
90 return idx;
91 }
92
kmap_atomic_idx(void)93 static inline int kmap_atomic_idx(void)
94 {
95 return __this_cpu_read(__kmap_atomic_idx) - 1;
96 }
97
kmap_atomic_idx_pop(void)98 static inline void kmap_atomic_idx_pop(void)
99 {
100 #ifdef CONFIG_DEBUG_HIGHMEM
101 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
102
103 BUG_ON(idx < 0);
104 #else
105 __this_cpu_dec(__kmap_atomic_idx);
106 #endif
107 }
108
109 #endif
110
111 /*
112 * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
113 */
114 #define kmap_atomic(page, args...) __kmap_atomic(page)
115
116 /*
117 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
118 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
119 */
120 #define kunmap_atomic(addr, args...) \
121 do { \
122 BUILD_BUG_ON(__same_type((addr), struct page *)); \
123 __kunmap_atomic(addr); \
124 } while (0)
125
126 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
127 #ifndef clear_user_highpage
clear_user_highpage(struct page * page,unsigned long vaddr)128 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
129 {
130 void *addr = kmap_atomic(page, KM_USER0);
131 clear_user_page(addr, vaddr, page);
132 kunmap_atomic(addr, KM_USER0);
133 }
134 #endif
135
136 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
137 /**
138 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
139 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
140 * @vma: The VMA the page is to be allocated for
141 * @vaddr: The virtual address the page will be inserted into
142 *
143 * This function will allocate a page for a VMA but the caller is expected
144 * to specify via movableflags whether the page will be movable in the
145 * future or not
146 *
147 * An architecture may override this function by defining
148 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
149 * implementation.
150 */
151 static inline struct page *
__alloc_zeroed_user_highpage(gfp_t movableflags,struct vm_area_struct * vma,unsigned long vaddr)152 __alloc_zeroed_user_highpage(gfp_t movableflags,
153 struct vm_area_struct *vma,
154 unsigned long vaddr)
155 {
156 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
157 vma, vaddr);
158
159 if (page)
160 clear_user_highpage(page, vaddr);
161
162 return page;
163 }
164 #endif
165
166 /**
167 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
168 * @vma: The VMA the page is to be allocated for
169 * @vaddr: The virtual address the page will be inserted into
170 *
171 * This function will allocate a page for a VMA that the caller knows will
172 * be able to migrate in the future using move_pages() or reclaimed
173 */
174 static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct * vma,unsigned long vaddr)175 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
176 unsigned long vaddr)
177 {
178 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
179 }
180
clear_highpage(struct page * page)181 static inline void clear_highpage(struct page *page)
182 {
183 void *kaddr = kmap_atomic(page, KM_USER0);
184 clear_page(kaddr);
185 kunmap_atomic(kaddr, KM_USER0);
186 }
187
zero_user_segments(struct page * page,unsigned start1,unsigned end1,unsigned start2,unsigned end2)188 static inline void zero_user_segments(struct page *page,
189 unsigned start1, unsigned end1,
190 unsigned start2, unsigned end2)
191 {
192 void *kaddr = kmap_atomic(page, KM_USER0);
193
194 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
195
196 if (end1 > start1)
197 memset(kaddr + start1, 0, end1 - start1);
198
199 if (end2 > start2)
200 memset(kaddr + start2, 0, end2 - start2);
201
202 kunmap_atomic(kaddr, KM_USER0);
203 flush_dcache_page(page);
204 }
205
zero_user_segment(struct page * page,unsigned start,unsigned end)206 static inline void zero_user_segment(struct page *page,
207 unsigned start, unsigned end)
208 {
209 zero_user_segments(page, start, end, 0, 0);
210 }
211
zero_user(struct page * page,unsigned start,unsigned size)212 static inline void zero_user(struct page *page,
213 unsigned start, unsigned size)
214 {
215 zero_user_segments(page, start, start + size, 0, 0);
216 }
217
memclear_highpage_flush(struct page * page,unsigned int offset,unsigned int size)218 static inline void __deprecated memclear_highpage_flush(struct page *page,
219 unsigned int offset, unsigned int size)
220 {
221 zero_user(page, offset, size);
222 }
223
224 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
225
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)226 static inline void copy_user_highpage(struct page *to, struct page *from,
227 unsigned long vaddr, struct vm_area_struct *vma)
228 {
229 char *vfrom, *vto;
230
231 vfrom = kmap_atomic(from, KM_USER0);
232 vto = kmap_atomic(to, KM_USER1);
233 copy_user_page(vto, vfrom, vaddr, to);
234 kunmap_atomic(vto, KM_USER1);
235 kunmap_atomic(vfrom, KM_USER0);
236 }
237
238 #endif
239
copy_highpage(struct page * to,struct page * from)240 static inline void copy_highpage(struct page *to, struct page *from)
241 {
242 char *vfrom, *vto;
243
244 vfrom = kmap_atomic(from, KM_USER0);
245 vto = kmap_atomic(to, KM_USER1);
246 copy_page(vto, vfrom);
247 kunmap_atomic(vto, KM_USER1);
248 kunmap_atomic(vfrom, KM_USER0);
249 }
250
251 #endif /* _LINUX_HIGHMEM_H */
252