1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4 
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14 #include <linux/userfaultfd_k.h>
15 
16 struct ctl_table;
17 struct user_struct;
18 struct mmu_gather;
19 struct node;
20 
21 #ifndef CONFIG_ARCH_HAS_HUGEPD
22 typedef struct { unsigned long pd; } hugepd_t;
23 #define is_hugepd(hugepd) (0)
24 #define __hugepd(x) ((hugepd_t) { (x) })
25 #endif
26 
27 #ifdef CONFIG_HUGETLB_PAGE
28 
29 #include <linux/mempolicy.h>
30 #include <linux/shm.h>
31 #include <asm/tlbflush.h>
32 
33 /*
34  * For HugeTLB page, there are more metadata to save in the struct page. But
35  * the head struct page cannot meet our needs, so we have to abuse other tail
36  * struct page to store the metadata. In order to avoid conflicts caused by
37  * subsequent use of more tail struct pages, we gather these discrete indexes
38  * of tail struct page here.
39  */
40 enum {
41 	SUBPAGE_INDEX_SUBPOOL = 1,	/* reuse page->private */
42 #ifdef CONFIG_CGROUP_HUGETLB
43 	SUBPAGE_INDEX_CGROUP,		/* reuse page->private */
44 	SUBPAGE_INDEX_CGROUP_RSVD,	/* reuse page->private */
45 	__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
46 #endif
47 #ifdef CONFIG_MEMORY_FAILURE
48 	SUBPAGE_INDEX_HWPOISON,
49 #endif
50 	__NR_USED_SUBPAGE,
51 };
52 
53 struct hugepage_subpool {
54 	spinlock_t lock;
55 	long count;
56 	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
57 	long used_hpages;	/* Used count against maximum, includes */
58 				/* both allocated and reserved pages. */
59 	struct hstate *hstate;
60 	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
61 	long rsv_hpages;	/* Pages reserved against global pool to */
62 				/* satisfy minimum size. */
63 };
64 
65 struct resv_map {
66 	struct kref refs;
67 	spinlock_t lock;
68 	struct list_head regions;
69 	long adds_in_progress;
70 	struct list_head region_cache;
71 	long region_cache_count;
72 #ifdef CONFIG_CGROUP_HUGETLB
73 	/*
74 	 * On private mappings, the counter to uncharge reservations is stored
75 	 * here. If these fields are 0, then either the mapping is shared, or
76 	 * cgroup accounting is disabled for this resv_map.
77 	 */
78 	struct page_counter *reservation_counter;
79 	unsigned long pages_per_hpage;
80 	struct cgroup_subsys_state *css;
81 #endif
82 };
83 
84 /*
85  * Region tracking -- allows tracking of reservations and instantiated pages
86  *                    across the pages in a mapping.
87  *
88  * The region data structures are embedded into a resv_map and protected
89  * by a resv_map's lock.  The set of regions within the resv_map represent
90  * reservations for huge pages, or huge pages that have already been
91  * instantiated within the map.  The from and to elements are huge page
92  * indices into the associated mapping.  from indicates the starting index
93  * of the region.  to represents the first index past the end of  the region.
94  *
95  * For example, a file region structure with from == 0 and to == 4 represents
96  * four huge pages in a mapping.  It is important to note that the to element
97  * represents the first element past the end of the region. This is used in
98  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
99  *
100  * Interval notation of the form [from, to) will be used to indicate that
101  * the endpoint from is inclusive and to is exclusive.
102  */
103 struct file_region {
104 	struct list_head link;
105 	long from;
106 	long to;
107 #ifdef CONFIG_CGROUP_HUGETLB
108 	/*
109 	 * On shared mappings, each reserved region appears as a struct
110 	 * file_region in resv_map. These fields hold the info needed to
111 	 * uncharge each reservation.
112 	 */
113 	struct page_counter *reservation_counter;
114 	struct cgroup_subsys_state *css;
115 #endif
116 };
117 
118 struct hugetlb_vma_lock {
119 	struct kref refs;
120 	struct rw_semaphore rw_sema;
121 	struct vm_area_struct *vma;
122 };
123 
124 extern struct resv_map *resv_map_alloc(void);
125 void resv_map_release(struct kref *ref);
126 
127 extern spinlock_t hugetlb_lock;
128 extern int hugetlb_max_hstate __read_mostly;
129 #define for_each_hstate(h) \
130 	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
131 
132 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
133 						long min_hpages);
134 void hugepage_put_subpool(struct hugepage_subpool *spool);
135 
136 void hugetlb_dup_vma_private(struct vm_area_struct *vma);
137 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
138 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
139 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
140 		loff_t *);
141 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
142 		loff_t *);
143 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
144 		loff_t *);
145 
146 int move_hugetlb_page_tables(struct vm_area_struct *vma,
147 			     struct vm_area_struct *new_vma,
148 			     unsigned long old_addr, unsigned long new_addr,
149 			     unsigned long len);
150 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
151 			    struct vm_area_struct *, struct vm_area_struct *);
152 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
153 			 struct page **, struct vm_area_struct **,
154 			 unsigned long *, unsigned long *, long, unsigned int,
155 			 int *);
156 void unmap_hugepage_range(struct vm_area_struct *,
157 			  unsigned long, unsigned long, struct page *,
158 			  zap_flags_t);
159 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
160 			  struct vm_area_struct *vma,
161 			  unsigned long start, unsigned long end,
162 			  struct page *ref_page, zap_flags_t zap_flags);
163 void hugetlb_report_meminfo(struct seq_file *);
164 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
165 void hugetlb_show_meminfo_node(int nid);
166 unsigned long hugetlb_total_pages(void);
167 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
168 			unsigned long address, unsigned int flags);
169 #ifdef CONFIG_USERFAULTFD
170 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
171 				struct vm_area_struct *dst_vma,
172 				unsigned long dst_addr,
173 				unsigned long src_addr,
174 				enum mcopy_atomic_mode mode,
175 				struct page **pagep,
176 				bool wp_copy);
177 #endif /* CONFIG_USERFAULTFD */
178 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
179 						struct vm_area_struct *vma,
180 						vm_flags_t vm_flags);
181 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
182 						long freed);
183 int isolate_hugetlb(struct page *page, struct list_head *list);
184 int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
185 int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
186 void putback_active_hugepage(struct page *page);
187 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
188 void free_huge_page(struct page *page);
189 void hugetlb_fix_reserve_counts(struct inode *inode);
190 extern struct mutex *hugetlb_fault_mutex_table;
191 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
192 
193 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
194 		      unsigned long addr, pud_t *pud);
195 
196 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
197 
198 extern int sysctl_hugetlb_shm_group;
199 extern struct list_head huge_boot_pages;
200 
201 /* arch callbacks */
202 
203 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
204 			unsigned long addr, unsigned long sz);
205 pte_t *huge_pte_offset(struct mm_struct *mm,
206 		       unsigned long addr, unsigned long sz);
207 unsigned long hugetlb_mask_last_page(struct hstate *h);
208 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
209 				unsigned long addr, pte_t *ptep);
210 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
211 				unsigned long *start, unsigned long *end);
212 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
213 			      int write);
214 struct page *follow_huge_pd(struct vm_area_struct *vma,
215 			    unsigned long address, hugepd_t hpd,
216 			    int flags, int pdshift);
217 struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
218 				 int flags);
219 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
220 				pud_t *pud, int flags);
221 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
222 			     pgd_t *pgd, int flags);
223 
224 void hugetlb_vma_lock_read(struct vm_area_struct *vma);
225 void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
226 void hugetlb_vma_lock_write(struct vm_area_struct *vma);
227 void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
228 int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
229 void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
230 void hugetlb_vma_lock_release(struct kref *kref);
231 
232 int pmd_huge(pmd_t pmd);
233 int pud_huge(pud_t pud);
234 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
235 		unsigned long address, unsigned long end, pgprot_t newprot,
236 		unsigned long cp_flags);
237 
238 bool is_hugetlb_entry_migration(pte_t pte);
239 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
240 
241 #else /* !CONFIG_HUGETLB_PAGE */
242 
hugetlb_dup_vma_private(struct vm_area_struct * vma)243 static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
244 {
245 }
246 
clear_vma_resv_huge_pages(struct vm_area_struct * vma)247 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
248 {
249 }
250 
hugetlb_total_pages(void)251 static inline unsigned long hugetlb_total_pages(void)
252 {
253 	return 0;
254 }
255 
hugetlb_page_mapping_lock_write(struct page * hpage)256 static inline struct address_space *hugetlb_page_mapping_lock_write(
257 							struct page *hpage)
258 {
259 	return NULL;
260 }
261 
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)262 static inline int huge_pmd_unshare(struct mm_struct *mm,
263 					struct vm_area_struct *vma,
264 					unsigned long addr, pte_t *ptep)
265 {
266 	return 0;
267 }
268 
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)269 static inline void adjust_range_if_pmd_sharing_possible(
270 				struct vm_area_struct *vma,
271 				unsigned long *start, unsigned long *end)
272 {
273 }
274 
follow_hugetlb_page(struct mm_struct * mm,struct vm_area_struct * vma,struct page ** pages,struct vm_area_struct ** vmas,unsigned long * position,unsigned long * nr_pages,long i,unsigned int flags,int * nonblocking)275 static inline long follow_hugetlb_page(struct mm_struct *mm,
276 			struct vm_area_struct *vma, struct page **pages,
277 			struct vm_area_struct **vmas, unsigned long *position,
278 			unsigned long *nr_pages, long i, unsigned int flags,
279 			int *nonblocking)
280 {
281 	BUG();
282 	return 0;
283 }
284 
follow_huge_addr(struct mm_struct * mm,unsigned long address,int write)285 static inline struct page *follow_huge_addr(struct mm_struct *mm,
286 					unsigned long address, int write)
287 {
288 	return ERR_PTR(-EINVAL);
289 }
290 
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)291 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
292 					  struct mm_struct *src,
293 					  struct vm_area_struct *dst_vma,
294 					  struct vm_area_struct *src_vma)
295 {
296 	BUG();
297 	return 0;
298 }
299 
move_hugetlb_page_tables(struct vm_area_struct * vma,struct vm_area_struct * new_vma,unsigned long old_addr,unsigned long new_addr,unsigned long len)300 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
301 					   struct vm_area_struct *new_vma,
302 					   unsigned long old_addr,
303 					   unsigned long new_addr,
304 					   unsigned long len)
305 {
306 	BUG();
307 	return 0;
308 }
309 
hugetlb_report_meminfo(struct seq_file * m)310 static inline void hugetlb_report_meminfo(struct seq_file *m)
311 {
312 }
313 
hugetlb_report_node_meminfo(char * buf,int len,int nid)314 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
315 {
316 	return 0;
317 }
318 
hugetlb_show_meminfo_node(int nid)319 static inline void hugetlb_show_meminfo_node(int nid)
320 {
321 }
322 
follow_huge_pd(struct vm_area_struct * vma,unsigned long address,hugepd_t hpd,int flags,int pdshift)323 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
324 				unsigned long address, hugepd_t hpd, int flags,
325 				int pdshift)
326 {
327 	return NULL;
328 }
329 
follow_huge_pmd_pte(struct vm_area_struct * vma,unsigned long address,int flags)330 static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
331 				unsigned long address, int flags)
332 {
333 	return NULL;
334 }
335 
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int flags)336 static inline struct page *follow_huge_pud(struct mm_struct *mm,
337 				unsigned long address, pud_t *pud, int flags)
338 {
339 	return NULL;
340 }
341 
follow_huge_pgd(struct mm_struct * mm,unsigned long address,pgd_t * pgd,int flags)342 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
343 				unsigned long address, pgd_t *pgd, int flags)
344 {
345 	return NULL;
346 }
347 
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)348 static inline int prepare_hugepage_range(struct file *file,
349 				unsigned long addr, unsigned long len)
350 {
351 	return -EINVAL;
352 }
353 
hugetlb_vma_lock_read(struct vm_area_struct * vma)354 static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
355 {
356 }
357 
hugetlb_vma_unlock_read(struct vm_area_struct * vma)358 static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
359 {
360 }
361 
hugetlb_vma_lock_write(struct vm_area_struct * vma)362 static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
363 {
364 }
365 
hugetlb_vma_unlock_write(struct vm_area_struct * vma)366 static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
367 {
368 }
369 
hugetlb_vma_trylock_write(struct vm_area_struct * vma)370 static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
371 {
372 	return 1;
373 }
374 
hugetlb_vma_assert_locked(struct vm_area_struct * vma)375 static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
376 {
377 }
378 
pmd_huge(pmd_t pmd)379 static inline int pmd_huge(pmd_t pmd)
380 {
381 	return 0;
382 }
383 
pud_huge(pud_t pud)384 static inline int pud_huge(pud_t pud)
385 {
386 	return 0;
387 }
388 
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)389 static inline int is_hugepage_only_range(struct mm_struct *mm,
390 					unsigned long addr, unsigned long len)
391 {
392 	return 0;
393 }
394 
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)395 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
396 				unsigned long addr, unsigned long end,
397 				unsigned long floor, unsigned long ceiling)
398 {
399 	BUG();
400 }
401 
402 #ifdef CONFIG_USERFAULTFD
hugetlb_mcopy_atomic_pte(struct mm_struct * dst_mm,pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,enum mcopy_atomic_mode mode,struct page ** pagep,bool wp_copy)403 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
404 						pte_t *dst_pte,
405 						struct vm_area_struct *dst_vma,
406 						unsigned long dst_addr,
407 						unsigned long src_addr,
408 						enum mcopy_atomic_mode mode,
409 						struct page **pagep,
410 						bool wp_copy)
411 {
412 	BUG();
413 	return 0;
414 }
415 #endif /* CONFIG_USERFAULTFD */
416 
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)417 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
418 					unsigned long sz)
419 {
420 	return NULL;
421 }
422 
isolate_hugetlb(struct page * page,struct list_head * list)423 static inline int isolate_hugetlb(struct page *page, struct list_head *list)
424 {
425 	return -EBUSY;
426 }
427 
get_hwpoison_huge_page(struct page * page,bool * hugetlb)428 static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
429 {
430 	return 0;
431 }
432 
get_huge_page_for_hwpoison(unsigned long pfn,int flags)433 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
434 {
435 	return 0;
436 }
437 
putback_active_hugepage(struct page * page)438 static inline void putback_active_hugepage(struct page *page)
439 {
440 }
441 
move_hugetlb_state(struct page * oldpage,struct page * newpage,int reason)442 static inline void move_hugetlb_state(struct page *oldpage,
443 					struct page *newpage, int reason)
444 {
445 }
446 
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot,unsigned long cp_flags)447 static inline unsigned long hugetlb_change_protection(
448 			struct vm_area_struct *vma, unsigned long address,
449 			unsigned long end, pgprot_t newprot,
450 			unsigned long cp_flags)
451 {
452 	return 0;
453 }
454 
__unmap_hugepage_range_final(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page,zap_flags_t zap_flags)455 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
456 			struct vm_area_struct *vma, unsigned long start,
457 			unsigned long end, struct page *ref_page,
458 			zap_flags_t zap_flags)
459 {
460 	BUG();
461 }
462 
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)463 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
464 			struct vm_area_struct *vma, unsigned long address,
465 			unsigned int flags)
466 {
467 	BUG();
468 	return 0;
469 }
470 
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)471 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
472 
473 #endif /* !CONFIG_HUGETLB_PAGE */
474 /*
475  * hugepages at page global directory. If arch support
476  * hugepages at pgd level, they need to define this.
477  */
478 #ifndef pgd_huge
479 #define pgd_huge(x)	0
480 #endif
481 #ifndef p4d_huge
482 #define p4d_huge(x)	0
483 #endif
484 
485 #ifndef pgd_write
pgd_write(pgd_t pgd)486 static inline int pgd_write(pgd_t pgd)
487 {
488 	BUG();
489 	return 0;
490 }
491 #endif
492 
493 #define HUGETLB_ANON_FILE "anon_hugepage"
494 
495 enum {
496 	/*
497 	 * The file will be used as an shm file so shmfs accounting rules
498 	 * apply
499 	 */
500 	HUGETLB_SHMFS_INODE     = 1,
501 	/*
502 	 * The file is being created on the internal vfs mount and shmfs
503 	 * accounting rules do not apply
504 	 */
505 	HUGETLB_ANONHUGE_INODE  = 2,
506 };
507 
508 #ifdef CONFIG_HUGETLBFS
509 struct hugetlbfs_sb_info {
510 	long	max_inodes;   /* inodes allowed */
511 	long	free_inodes;  /* inodes free */
512 	spinlock_t	stat_lock;
513 	struct hstate *hstate;
514 	struct hugepage_subpool *spool;
515 	kuid_t	uid;
516 	kgid_t	gid;
517 	umode_t mode;
518 };
519 
HUGETLBFS_SB(struct super_block * sb)520 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
521 {
522 	return sb->s_fs_info;
523 }
524 
525 struct hugetlbfs_inode_info {
526 	struct shared_policy policy;
527 	struct inode vfs_inode;
528 	unsigned int seals;
529 };
530 
HUGETLBFS_I(struct inode * inode)531 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
532 {
533 	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
534 }
535 
536 extern const struct file_operations hugetlbfs_file_operations;
537 extern const struct vm_operations_struct hugetlb_vm_ops;
538 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
539 				int creat_flags, int page_size_log);
540 
is_file_hugepages(struct file * file)541 static inline bool is_file_hugepages(struct file *file)
542 {
543 	if (file->f_op == &hugetlbfs_file_operations)
544 		return true;
545 
546 	return is_file_shm_hugepages(file);
547 }
548 
hstate_inode(struct inode * i)549 static inline struct hstate *hstate_inode(struct inode *i)
550 {
551 	return HUGETLBFS_SB(i->i_sb)->hstate;
552 }
553 #else /* !CONFIG_HUGETLBFS */
554 
555 #define is_file_hugepages(file)			false
556 static inline struct file *
hugetlb_file_setup(const char * name,size_t size,vm_flags_t acctflag,int creat_flags,int page_size_log)557 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
558 		int creat_flags, int page_size_log)
559 {
560 	return ERR_PTR(-ENOSYS);
561 }
562 
hstate_inode(struct inode * i)563 static inline struct hstate *hstate_inode(struct inode *i)
564 {
565 	return NULL;
566 }
567 #endif /* !CONFIG_HUGETLBFS */
568 
569 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
570 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
571 					unsigned long len, unsigned long pgoff,
572 					unsigned long flags);
573 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
574 
575 unsigned long
576 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
577 				  unsigned long len, unsigned long pgoff,
578 				  unsigned long flags);
579 
580 /*
581  * huegtlb page specific state flags.  These flags are located in page.private
582  * of the hugetlb head page.  Functions created via the below macros should be
583  * used to manipulate these flags.
584  *
585  * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
586  *	allocation time.  Cleared when page is fully instantiated.  Free
587  *	routine checks flag to restore a reservation on error paths.
588  *	Synchronization:  Examined or modified by code that knows it has
589  *	the only reference to page.  i.e. After allocation but before use
590  *	or when the page is being freed.
591  * HPG_migratable  - Set after a newly allocated page is added to the page
592  *	cache and/or page tables.  Indicates the page is a candidate for
593  *	migration.
594  *	Synchronization:  Initially set after new page allocation with no
595  *	locking.  When examined and modified during migration processing
596  *	(isolate, migrate, putback) the hugetlb_lock is held.
597  * HPG_temporary - Set on a page that is temporarily allocated from the buddy
598  *	allocator.  Typically used for migration target pages when no pages
599  *	are available in the pool.  The hugetlb free page path will
600  *	immediately free pages with this flag set to the buddy allocator.
601  *	Synchronization: Can be set after huge page allocation from buddy when
602  *	code knows it has only reference.  All other examinations and
603  *	modifications require hugetlb_lock.
604  * HPG_freed - Set when page is on the free lists.
605  *	Synchronization: hugetlb_lock held for examination and modification.
606  * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
607  * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
608  *     that is not tracked by raw_hwp_page list.
609  */
610 enum hugetlb_page_flags {
611 	HPG_restore_reserve = 0,
612 	HPG_migratable,
613 	HPG_temporary,
614 	HPG_freed,
615 	HPG_vmemmap_optimized,
616 	HPG_raw_hwp_unreliable,
617 	__NR_HPAGEFLAGS,
618 };
619 
620 /*
621  * Macros to create test, set and clear function definitions for
622  * hugetlb specific page flags.
623  */
624 #ifdef CONFIG_HUGETLB_PAGE
625 #define TESTHPAGEFLAG(uname, flname)				\
626 static inline int HPage##uname(struct page *page)		\
627 	{ return test_bit(HPG_##flname, &(page->private)); }
628 
629 #define SETHPAGEFLAG(uname, flname)				\
630 static inline void SetHPage##uname(struct page *page)		\
631 	{ set_bit(HPG_##flname, &(page->private)); }
632 
633 #define CLEARHPAGEFLAG(uname, flname)				\
634 static inline void ClearHPage##uname(struct page *page)		\
635 	{ clear_bit(HPG_##flname, &(page->private)); }
636 #else
637 #define TESTHPAGEFLAG(uname, flname)				\
638 static inline int HPage##uname(struct page *page)		\
639 	{ return 0; }
640 
641 #define SETHPAGEFLAG(uname, flname)				\
642 static inline void SetHPage##uname(struct page *page)		\
643 	{ }
644 
645 #define CLEARHPAGEFLAG(uname, flname)				\
646 static inline void ClearHPage##uname(struct page *page)		\
647 	{ }
648 #endif
649 
650 #define HPAGEFLAG(uname, flname)				\
651 	TESTHPAGEFLAG(uname, flname)				\
652 	SETHPAGEFLAG(uname, flname)				\
653 	CLEARHPAGEFLAG(uname, flname)				\
654 
655 /*
656  * Create functions associated with hugetlb page flags
657  */
658 HPAGEFLAG(RestoreReserve, restore_reserve)
659 HPAGEFLAG(Migratable, migratable)
660 HPAGEFLAG(Temporary, temporary)
661 HPAGEFLAG(Freed, freed)
662 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
663 HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
664 
665 #ifdef CONFIG_HUGETLB_PAGE
666 
667 #define HSTATE_NAME_LEN 32
668 /* Defines one hugetlb page size */
669 struct hstate {
670 	struct mutex resize_lock;
671 	int next_nid_to_alloc;
672 	int next_nid_to_free;
673 	unsigned int order;
674 	unsigned int demote_order;
675 	unsigned long mask;
676 	unsigned long max_huge_pages;
677 	unsigned long nr_huge_pages;
678 	unsigned long free_huge_pages;
679 	unsigned long resv_huge_pages;
680 	unsigned long surplus_huge_pages;
681 	unsigned long nr_overcommit_huge_pages;
682 	struct list_head hugepage_activelist;
683 	struct list_head hugepage_freelists[MAX_NUMNODES];
684 	unsigned int max_huge_pages_node[MAX_NUMNODES];
685 	unsigned int nr_huge_pages_node[MAX_NUMNODES];
686 	unsigned int free_huge_pages_node[MAX_NUMNODES];
687 	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
688 #ifdef CONFIG_CGROUP_HUGETLB
689 	/* cgroup control files */
690 	struct cftype cgroup_files_dfl[8];
691 	struct cftype cgroup_files_legacy[10];
692 #endif
693 	char name[HSTATE_NAME_LEN];
694 };
695 
696 struct huge_bootmem_page {
697 	struct list_head list;
698 	struct hstate *hstate;
699 };
700 
701 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
702 struct page *alloc_huge_page(struct vm_area_struct *vma,
703 				unsigned long addr, int avoid_reserve);
704 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
705 				nodemask_t *nmask, gfp_t gfp_mask);
706 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
707 				unsigned long address);
708 int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
709 			pgoff_t idx);
710 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
711 				unsigned long address, struct page *page);
712 
713 /* arch callback */
714 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
715 int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
716 bool __init hugetlb_node_alloc_supported(void);
717 
718 void __init hugetlb_add_hstate(unsigned order);
719 bool __init arch_hugetlb_valid_size(unsigned long size);
720 struct hstate *size_to_hstate(unsigned long size);
721 
722 #ifndef HUGE_MAX_HSTATE
723 #define HUGE_MAX_HSTATE 1
724 #endif
725 
726 extern struct hstate hstates[HUGE_MAX_HSTATE];
727 extern unsigned int default_hstate_idx;
728 
729 #define default_hstate (hstates[default_hstate_idx])
730 
731 /*
732  * hugetlb page subpool pointer located in hpage[1].private
733  */
hugetlb_page_subpool(struct page * hpage)734 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
735 {
736 	return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
737 }
738 
hugetlb_set_page_subpool(struct page * hpage,struct hugepage_subpool * subpool)739 static inline void hugetlb_set_page_subpool(struct page *hpage,
740 					struct hugepage_subpool *subpool)
741 {
742 	set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
743 }
744 
hstate_file(struct file * f)745 static inline struct hstate *hstate_file(struct file *f)
746 {
747 	return hstate_inode(file_inode(f));
748 }
749 
hstate_sizelog(int page_size_log)750 static inline struct hstate *hstate_sizelog(int page_size_log)
751 {
752 	if (!page_size_log)
753 		return &default_hstate;
754 
755 	return size_to_hstate(1UL << page_size_log);
756 }
757 
hstate_vma(struct vm_area_struct * vma)758 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
759 {
760 	return hstate_file(vma->vm_file);
761 }
762 
huge_page_size(const struct hstate * h)763 static inline unsigned long huge_page_size(const struct hstate *h)
764 {
765 	return (unsigned long)PAGE_SIZE << h->order;
766 }
767 
768 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
769 
770 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
771 
huge_page_mask(struct hstate * h)772 static inline unsigned long huge_page_mask(struct hstate *h)
773 {
774 	return h->mask;
775 }
776 
huge_page_order(struct hstate * h)777 static inline unsigned int huge_page_order(struct hstate *h)
778 {
779 	return h->order;
780 }
781 
huge_page_shift(struct hstate * h)782 static inline unsigned huge_page_shift(struct hstate *h)
783 {
784 	return h->order + PAGE_SHIFT;
785 }
786 
hstate_is_gigantic(struct hstate * h)787 static inline bool hstate_is_gigantic(struct hstate *h)
788 {
789 	return huge_page_order(h) >= MAX_ORDER;
790 }
791 
pages_per_huge_page(const struct hstate * h)792 static inline unsigned int pages_per_huge_page(const struct hstate *h)
793 {
794 	return 1 << h->order;
795 }
796 
blocks_per_huge_page(struct hstate * h)797 static inline unsigned int blocks_per_huge_page(struct hstate *h)
798 {
799 	return huge_page_size(h) / 512;
800 }
801 
802 #include <asm/hugetlb.h>
803 
804 #ifndef is_hugepage_only_range
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)805 static inline int is_hugepage_only_range(struct mm_struct *mm,
806 					unsigned long addr, unsigned long len)
807 {
808 	return 0;
809 }
810 #define is_hugepage_only_range is_hugepage_only_range
811 #endif
812 
813 #ifndef arch_clear_hugepage_flags
arch_clear_hugepage_flags(struct page * page)814 static inline void arch_clear_hugepage_flags(struct page *page) { }
815 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
816 #endif
817 
818 #ifndef arch_make_huge_pte
arch_make_huge_pte(pte_t entry,unsigned int shift,vm_flags_t flags)819 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
820 				       vm_flags_t flags)
821 {
822 	return pte_mkhuge(entry);
823 }
824 #endif
825 
page_hstate(struct page * page)826 static inline struct hstate *page_hstate(struct page *page)
827 {
828 	VM_BUG_ON_PAGE(!PageHuge(page), page);
829 	return size_to_hstate(page_size(page));
830 }
831 
hstate_index_to_shift(unsigned index)832 static inline unsigned hstate_index_to_shift(unsigned index)
833 {
834 	return hstates[index].order + PAGE_SHIFT;
835 }
836 
hstate_index(struct hstate * h)837 static inline int hstate_index(struct hstate *h)
838 {
839 	return h - hstates;
840 }
841 
842 extern int dissolve_free_huge_page(struct page *page);
843 extern int dissolve_free_huge_pages(unsigned long start_pfn,
844 				    unsigned long end_pfn);
845 
846 #ifdef CONFIG_MEMORY_FAILURE
847 extern void hugetlb_clear_page_hwpoison(struct page *hpage);
848 #else
hugetlb_clear_page_hwpoison(struct page * hpage)849 static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
850 {
851 }
852 #endif
853 
854 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
855 #ifndef arch_hugetlb_migration_supported
arch_hugetlb_migration_supported(struct hstate * h)856 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
857 {
858 	if ((huge_page_shift(h) == PMD_SHIFT) ||
859 		(huge_page_shift(h) == PUD_SHIFT) ||
860 			(huge_page_shift(h) == PGDIR_SHIFT))
861 		return true;
862 	else
863 		return false;
864 }
865 #endif
866 #else
arch_hugetlb_migration_supported(struct hstate * h)867 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
868 {
869 	return false;
870 }
871 #endif
872 
hugepage_migration_supported(struct hstate * h)873 static inline bool hugepage_migration_supported(struct hstate *h)
874 {
875 	return arch_hugetlb_migration_supported(h);
876 }
877 
878 /*
879  * Movability check is different as compared to migration check.
880  * It determines whether or not a huge page should be placed on
881  * movable zone or not. Movability of any huge page should be
882  * required only if huge page size is supported for migration.
883  * There won't be any reason for the huge page to be movable if
884  * it is not migratable to start with. Also the size of the huge
885  * page should be large enough to be placed under a movable zone
886  * and still feasible enough to be migratable. Just the presence
887  * in movable zone does not make the migration feasible.
888  *
889  * So even though large huge page sizes like the gigantic ones
890  * are migratable they should not be movable because its not
891  * feasible to migrate them from movable zone.
892  */
hugepage_movable_supported(struct hstate * h)893 static inline bool hugepage_movable_supported(struct hstate *h)
894 {
895 	if (!hugepage_migration_supported(h))
896 		return false;
897 
898 	if (hstate_is_gigantic(h))
899 		return false;
900 	return true;
901 }
902 
903 /* Movability of hugepages depends on migration support. */
htlb_alloc_mask(struct hstate * h)904 static inline gfp_t htlb_alloc_mask(struct hstate *h)
905 {
906 	if (hugepage_movable_supported(h))
907 		return GFP_HIGHUSER_MOVABLE;
908 	else
909 		return GFP_HIGHUSER;
910 }
911 
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)912 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
913 {
914 	gfp_t modified_mask = htlb_alloc_mask(h);
915 
916 	/* Some callers might want to enforce node */
917 	modified_mask |= (gfp_mask & __GFP_THISNODE);
918 
919 	modified_mask |= (gfp_mask & __GFP_NOWARN);
920 
921 	return modified_mask;
922 }
923 
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)924 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
925 					   struct mm_struct *mm, pte_t *pte)
926 {
927 	if (huge_page_size(h) == PMD_SIZE)
928 		return pmd_lockptr(mm, (pmd_t *) pte);
929 	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
930 	return &mm->page_table_lock;
931 }
932 
933 #ifndef hugepages_supported
934 /*
935  * Some platform decide whether they support huge pages at boot
936  * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
937  * when there is no such support
938  */
939 #define hugepages_supported() (HPAGE_SHIFT != 0)
940 #endif
941 
942 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
943 
hugetlb_count_init(struct mm_struct * mm)944 static inline void hugetlb_count_init(struct mm_struct *mm)
945 {
946 	atomic_long_set(&mm->hugetlb_usage, 0);
947 }
948 
hugetlb_count_add(long l,struct mm_struct * mm)949 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
950 {
951 	atomic_long_add(l, &mm->hugetlb_usage);
952 }
953 
hugetlb_count_sub(long l,struct mm_struct * mm)954 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
955 {
956 	atomic_long_sub(l, &mm->hugetlb_usage);
957 }
958 
959 #ifndef huge_ptep_modify_prot_start
960 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)961 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
962 						unsigned long addr, pte_t *ptep)
963 {
964 	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
965 }
966 #endif
967 
968 #ifndef huge_ptep_modify_prot_commit
969 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)970 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
971 						unsigned long addr, pte_t *ptep,
972 						pte_t old_pte, pte_t pte)
973 {
974 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
975 }
976 #endif
977 
978 #ifdef CONFIG_NUMA
979 void hugetlb_register_node(struct node *node);
980 void hugetlb_unregister_node(struct node *node);
981 #endif
982 
983 #else	/* CONFIG_HUGETLB_PAGE */
984 struct hstate {};
985 
986 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
987 {
988 	return NULL;
989 }
990 
991 static inline int isolate_or_dissolve_huge_page(struct page *page,
992 						struct list_head *list)
993 {
994 	return -ENOMEM;
995 }
996 
997 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
998 					   unsigned long addr,
999 					   int avoid_reserve)
1000 {
1001 	return NULL;
1002 }
1003 
1004 static inline struct page *
1005 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1006 			nodemask_t *nmask, gfp_t gfp_mask)
1007 {
1008 	return NULL;
1009 }
1010 
1011 static inline struct page *alloc_huge_page_vma(struct hstate *h,
1012 					       struct vm_area_struct *vma,
1013 					       unsigned long address)
1014 {
1015 	return NULL;
1016 }
1017 
1018 static inline int __alloc_bootmem_huge_page(struct hstate *h)
1019 {
1020 	return 0;
1021 }
1022 
1023 static inline struct hstate *hstate_file(struct file *f)
1024 {
1025 	return NULL;
1026 }
1027 
1028 static inline struct hstate *hstate_sizelog(int page_size_log)
1029 {
1030 	return NULL;
1031 }
1032 
1033 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1034 {
1035 	return NULL;
1036 }
1037 
1038 static inline struct hstate *page_hstate(struct page *page)
1039 {
1040 	return NULL;
1041 }
1042 
1043 static inline struct hstate *size_to_hstate(unsigned long size)
1044 {
1045 	return NULL;
1046 }
1047 
1048 static inline unsigned long huge_page_size(struct hstate *h)
1049 {
1050 	return PAGE_SIZE;
1051 }
1052 
1053 static inline unsigned long huge_page_mask(struct hstate *h)
1054 {
1055 	return PAGE_MASK;
1056 }
1057 
1058 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1059 {
1060 	return PAGE_SIZE;
1061 }
1062 
1063 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1064 {
1065 	return PAGE_SIZE;
1066 }
1067 
1068 static inline unsigned int huge_page_order(struct hstate *h)
1069 {
1070 	return 0;
1071 }
1072 
1073 static inline unsigned int huge_page_shift(struct hstate *h)
1074 {
1075 	return PAGE_SHIFT;
1076 }
1077 
1078 static inline bool hstate_is_gigantic(struct hstate *h)
1079 {
1080 	return false;
1081 }
1082 
1083 static inline unsigned int pages_per_huge_page(struct hstate *h)
1084 {
1085 	return 1;
1086 }
1087 
1088 static inline unsigned hstate_index_to_shift(unsigned index)
1089 {
1090 	return 0;
1091 }
1092 
1093 static inline int hstate_index(struct hstate *h)
1094 {
1095 	return 0;
1096 }
1097 
1098 static inline int dissolve_free_huge_page(struct page *page)
1099 {
1100 	return 0;
1101 }
1102 
1103 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1104 					   unsigned long end_pfn)
1105 {
1106 	return 0;
1107 }
1108 
1109 static inline bool hugepage_migration_supported(struct hstate *h)
1110 {
1111 	return false;
1112 }
1113 
1114 static inline bool hugepage_movable_supported(struct hstate *h)
1115 {
1116 	return false;
1117 }
1118 
1119 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1120 {
1121 	return 0;
1122 }
1123 
1124 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1125 {
1126 	return 0;
1127 }
1128 
1129 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1130 					   struct mm_struct *mm, pte_t *pte)
1131 {
1132 	return &mm->page_table_lock;
1133 }
1134 
1135 static inline void hugetlb_count_init(struct mm_struct *mm)
1136 {
1137 }
1138 
1139 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1140 {
1141 }
1142 
1143 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1144 {
1145 }
1146 
1147 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1148 					  unsigned long addr, pte_t *ptep)
1149 {
1150 	return *ptep;
1151 }
1152 
1153 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1154 				   pte_t *ptep, pte_t pte)
1155 {
1156 }
1157 
1158 static inline void hugetlb_register_node(struct node *node)
1159 {
1160 }
1161 
1162 static inline void hugetlb_unregister_node(struct node *node)
1163 {
1164 }
1165 #endif	/* CONFIG_HUGETLB_PAGE */
1166 
huge_pte_lock(struct hstate * h,struct mm_struct * mm,pte_t * pte)1167 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1168 					struct mm_struct *mm, pte_t *pte)
1169 {
1170 	spinlock_t *ptl;
1171 
1172 	ptl = huge_pte_lockptr(h, mm, pte);
1173 	spin_lock(ptl);
1174 	return ptl;
1175 }
1176 
1177 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1178 extern void __init hugetlb_cma_reserve(int order);
1179 #else
hugetlb_cma_reserve(int order)1180 static inline __init void hugetlb_cma_reserve(int order)
1181 {
1182 }
1183 #endif
1184 
1185 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1186 
1187 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1188 /*
1189  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1190  * implement this.
1191  */
1192 #define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1193 #endif
1194 
1195 #endif /* _LINUX_HUGETLB_H */
1196