1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4 
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7 
8 #include <linux/fs.h> /* only for vma_is_dax() */
9 
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 		  struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 		  struct vm_area_struct *vma);
18 
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
huge_pud_set_accessed(struct vm_fault * vmf,pud_t orig_pud)22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26 
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
28 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
29 			   pmd_t *pmd, unsigned long addr, unsigned long next);
30 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
31 		 unsigned long addr);
32 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
33 		 unsigned long addr);
34 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
35 		   unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
36 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
37 		    pmd_t *pmd, unsigned long addr, pgprot_t newprot,
38 		    unsigned long cp_flags);
39 
40 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
41 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
42 
43 enum transparent_hugepage_flag {
44 	TRANSPARENT_HUGEPAGE_UNSUPPORTED,
45 	TRANSPARENT_HUGEPAGE_FLAG,
46 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
47 	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
48 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
49 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
50 	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
51 	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
52 	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
53 };
54 
55 struct kobject;
56 struct kobj_attribute;
57 
58 ssize_t single_hugepage_flag_store(struct kobject *kobj,
59 				   struct kobj_attribute *attr,
60 				   const char *buf, size_t count,
61 				   enum transparent_hugepage_flag flag);
62 ssize_t single_hugepage_flag_show(struct kobject *kobj,
63 				  struct kobj_attribute *attr, char *buf,
64 				  enum transparent_hugepage_flag flag);
65 extern struct kobj_attribute shmem_enabled_attr;
66 
67 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
68 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
69 
70 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
71 #define HPAGE_PMD_SHIFT PMD_SHIFT
72 #define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
73 #define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
74 
75 #define HPAGE_PUD_SHIFT PUD_SHIFT
76 #define HPAGE_PUD_SIZE	((1UL) << HPAGE_PUD_SHIFT)
77 #define HPAGE_PUD_MASK	(~(HPAGE_PUD_SIZE - 1))
78 
79 extern unsigned long transparent_hugepage_flags;
80 
81 #define hugepage_flags_enabled()					       \
82 	(transparent_hugepage_flags &				       \
83 	 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |		       \
84 	  (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
85 #define hugepage_flags_always()				\
86 	(transparent_hugepage_flags &			\
87 	 (1<<TRANSPARENT_HUGEPAGE_FLAG))
88 
89 /*
90  * Do the below checks:
91  *   - For file vma, check if the linear page offset of vma is
92  *     HPAGE_PMD_NR aligned within the file.  The hugepage is
93  *     guaranteed to be hugepage-aligned within the file, but we must
94  *     check that the PMD-aligned addresses in the VMA map to
95  *     PMD-aligned offsets within the file, else the hugepage will
96  *     not be PMD-mappable.
97  *   - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
98  *     area.
99  */
transhuge_vma_suitable(struct vm_area_struct * vma,unsigned long addr)100 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
101 		unsigned long addr)
102 {
103 	unsigned long haddr;
104 
105 	/* Don't have to check pgoff for anonymous vma */
106 	if (!vma_is_anonymous(vma)) {
107 		if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
108 				HPAGE_PMD_NR))
109 			return false;
110 	}
111 
112 	haddr = addr & HPAGE_PMD_MASK;
113 
114 	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
115 		return false;
116 	return true;
117 }
118 
file_thp_enabled(struct vm_area_struct * vma)119 static inline bool file_thp_enabled(struct vm_area_struct *vma)
120 {
121 	struct inode *inode;
122 
123 	if (!vma->vm_file)
124 		return false;
125 
126 	inode = vma->vm_file->f_inode;
127 
128 	return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
129 	       (vma->vm_flags & VM_EXEC) &&
130 	       !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
131 }
132 
133 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
134 			bool smaps, bool in_pf, bool enforce_sysfs);
135 
136 #define transparent_hugepage_use_zero_page()				\
137 	(transparent_hugepage_flags &					\
138 	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
139 
140 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
141 		unsigned long len, unsigned long pgoff, unsigned long flags);
142 
143 void folio_prep_large_rmappable(struct folio *folio);
144 bool can_split_folio(struct folio *folio, int *pextra_pins);
145 int split_huge_page_to_list(struct page *page, struct list_head *list);
split_huge_page(struct page * page)146 static inline int split_huge_page(struct page *page)
147 {
148 	return split_huge_page_to_list(page, NULL);
149 }
150 void deferred_split_folio(struct folio *folio);
151 
152 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
153 		unsigned long address, bool freeze, struct folio *folio);
154 
155 #define split_huge_pmd(__vma, __pmd, __address)				\
156 	do {								\
157 		pmd_t *____pmd = (__pmd);				\
158 		if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)	\
159 					|| pmd_devmap(*____pmd))	\
160 			__split_huge_pmd(__vma, __pmd, __address,	\
161 						false, NULL);		\
162 	}  while (0)
163 
164 
165 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
166 		bool freeze, struct folio *folio);
167 
168 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
169 		unsigned long address);
170 
171 #define split_huge_pud(__vma, __pud, __address)				\
172 	do {								\
173 		pud_t *____pud = (__pud);				\
174 		if (pud_trans_huge(*____pud)				\
175 					|| pud_devmap(*____pud))	\
176 			__split_huge_pud(__vma, __pud, __address);	\
177 	}  while (0)
178 
179 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
180 		     int advice);
181 int madvise_collapse(struct vm_area_struct *vma,
182 		     struct vm_area_struct **prev,
183 		     unsigned long start, unsigned long end);
184 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
185 			   unsigned long end, long adjust_next);
186 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
187 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
188 
is_swap_pmd(pmd_t pmd)189 static inline int is_swap_pmd(pmd_t pmd)
190 {
191 	return !pmd_none(pmd) && !pmd_present(pmd);
192 }
193 
194 /* mmap_lock must be held on entry */
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)195 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
196 		struct vm_area_struct *vma)
197 {
198 	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
199 		return __pmd_trans_huge_lock(pmd, vma);
200 	else
201 		return NULL;
202 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)203 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
204 		struct vm_area_struct *vma)
205 {
206 	if (pud_trans_huge(*pud) || pud_devmap(*pud))
207 		return __pud_trans_huge_lock(pud, vma);
208 	else
209 		return NULL;
210 }
211 
212 /**
213  * folio_test_pmd_mappable - Can we map this folio with a PMD?
214  * @folio: The folio to test
215  */
folio_test_pmd_mappable(struct folio * folio)216 static inline bool folio_test_pmd_mappable(struct folio *folio)
217 {
218 	return folio_order(folio) >= HPAGE_PMD_ORDER;
219 }
220 
221 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
222 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
223 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
224 		pud_t *pud, int flags, struct dev_pagemap **pgmap);
225 
226 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
227 
228 extern struct page *huge_zero_page;
229 extern unsigned long huge_zero_pfn;
230 
is_huge_zero_page(struct page * page)231 static inline bool is_huge_zero_page(struct page *page)
232 {
233 	return READ_ONCE(huge_zero_page) == page;
234 }
235 
is_huge_zero_pmd(pmd_t pmd)236 static inline bool is_huge_zero_pmd(pmd_t pmd)
237 {
238 	return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
239 }
240 
is_huge_zero_pud(pud_t pud)241 static inline bool is_huge_zero_pud(pud_t pud)
242 {
243 	return false;
244 }
245 
246 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
247 void mm_put_huge_zero_page(struct mm_struct *mm);
248 
249 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
250 
thp_migration_supported(void)251 static inline bool thp_migration_supported(void)
252 {
253 	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
254 }
255 
256 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
257 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
258 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
259 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
260 
261 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
262 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
263 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
264 
folio_test_pmd_mappable(struct folio * folio)265 static inline bool folio_test_pmd_mappable(struct folio *folio)
266 {
267 	return false;
268 }
269 
transhuge_vma_suitable(struct vm_area_struct * vma,unsigned long addr)270 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
271 		unsigned long addr)
272 {
273 	return false;
274 }
275 
hugepage_vma_check(struct vm_area_struct * vma,unsigned long vm_flags,bool smaps,bool in_pf,bool enforce_sysfs)276 static inline bool hugepage_vma_check(struct vm_area_struct *vma,
277 				      unsigned long vm_flags, bool smaps,
278 				      bool in_pf, bool enforce_sysfs)
279 {
280 	return false;
281 }
282 
folio_prep_large_rmappable(struct folio * folio)283 static inline void folio_prep_large_rmappable(struct folio *folio) {}
284 
285 #define transparent_hugepage_flags 0UL
286 
287 #define thp_get_unmapped_area	NULL
288 
289 static inline bool
can_split_folio(struct folio * folio,int * pextra_pins)290 can_split_folio(struct folio *folio, int *pextra_pins)
291 {
292 	return false;
293 }
294 static inline int
split_huge_page_to_list(struct page * page,struct list_head * list)295 split_huge_page_to_list(struct page *page, struct list_head *list)
296 {
297 	return 0;
298 }
split_huge_page(struct page * page)299 static inline int split_huge_page(struct page *page)
300 {
301 	return 0;
302 }
deferred_split_folio(struct folio * folio)303 static inline void deferred_split_folio(struct folio *folio) {}
304 #define split_huge_pmd(__vma, __pmd, __address)	\
305 	do { } while (0)
306 
__split_huge_pmd(struct vm_area_struct * vma,pmd_t * pmd,unsigned long address,bool freeze,struct folio * folio)307 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
308 		unsigned long address, bool freeze, struct folio *folio) {}
split_huge_pmd_address(struct vm_area_struct * vma,unsigned long address,bool freeze,struct folio * folio)309 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
310 		unsigned long address, bool freeze, struct folio *folio) {}
311 
312 #define split_huge_pud(__vma, __pmd, __address)	\
313 	do { } while (0)
314 
hugepage_madvise(struct vm_area_struct * vma,unsigned long * vm_flags,int advice)315 static inline int hugepage_madvise(struct vm_area_struct *vma,
316 				   unsigned long *vm_flags, int advice)
317 {
318 	return -EINVAL;
319 }
320 
madvise_collapse(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)321 static inline int madvise_collapse(struct vm_area_struct *vma,
322 				   struct vm_area_struct **prev,
323 				   unsigned long start, unsigned long end)
324 {
325 	return -EINVAL;
326 }
327 
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,long adjust_next)328 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
329 					 unsigned long start,
330 					 unsigned long end,
331 					 long adjust_next)
332 {
333 }
is_swap_pmd(pmd_t pmd)334 static inline int is_swap_pmd(pmd_t pmd)
335 {
336 	return 0;
337 }
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)338 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
339 		struct vm_area_struct *vma)
340 {
341 	return NULL;
342 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)343 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
344 		struct vm_area_struct *vma)
345 {
346 	return NULL;
347 }
348 
do_huge_pmd_numa_page(struct vm_fault * vmf)349 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
350 {
351 	return 0;
352 }
353 
is_huge_zero_page(struct page * page)354 static inline bool is_huge_zero_page(struct page *page)
355 {
356 	return false;
357 }
358 
is_huge_zero_pmd(pmd_t pmd)359 static inline bool is_huge_zero_pmd(pmd_t pmd)
360 {
361 	return false;
362 }
363 
is_huge_zero_pud(pud_t pud)364 static inline bool is_huge_zero_pud(pud_t pud)
365 {
366 	return false;
367 }
368 
mm_put_huge_zero_page(struct mm_struct * mm)369 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
370 {
371 	return;
372 }
373 
follow_devmap_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,int flags,struct dev_pagemap ** pgmap)374 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
375 	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
376 {
377 	return NULL;
378 }
379 
follow_devmap_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud,int flags,struct dev_pagemap ** pgmap)380 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
381 	unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
382 {
383 	return NULL;
384 }
385 
thp_migration_supported(void)386 static inline bool thp_migration_supported(void)
387 {
388 	return false;
389 }
390 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
391 
split_folio_to_list(struct folio * folio,struct list_head * list)392 static inline int split_folio_to_list(struct folio *folio,
393 		struct list_head *list)
394 {
395 	return split_huge_page_to_list(&folio->page, list);
396 }
397 
split_folio(struct folio * folio)398 static inline int split_folio(struct folio *folio)
399 {
400 	return split_folio_to_list(folio, NULL);
401 }
402 
403 /*
404  * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
405  * limitations in the implementation like arm64 MTE can override this to
406  * false
407  */
408 #ifndef arch_thp_swp_supported
arch_thp_swp_supported(void)409 static inline bool arch_thp_swp_supported(void)
410 {
411 	return true;
412 }
413 #endif
414 
415 #endif /* _LINUX_HUGE_MM_H */
416