1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14 #include <linux/userfaultfd_k.h>
15
16 struct ctl_table;
17 struct user_struct;
18 struct mmu_gather;
19
20 #ifndef is_hugepd
21 typedef struct { unsigned long pd; } hugepd_t;
22 #define is_hugepd(hugepd) (0)
23 #define __hugepd(x) ((hugepd_t) { (x) })
24 #endif
25
26 #ifdef CONFIG_HUGETLB_PAGE
27
28 #include <linux/mempolicy.h>
29 #include <linux/shm.h>
30 #include <asm/tlbflush.h>
31
32 /*
33 * For HugeTLB page, there are more metadata to save in the struct page. But
34 * the head struct page cannot meet our needs, so we have to abuse other tail
35 * struct page to store the metadata. In order to avoid conflicts caused by
36 * subsequent use of more tail struct pages, we gather these discrete indexes
37 * of tail struct page here.
38 */
39 enum {
40 SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */
41 #ifdef CONFIG_CGROUP_HUGETLB
42 SUBPAGE_INDEX_CGROUP, /* reuse page->private */
43 SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
44 __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
45 #endif
46 __NR_USED_SUBPAGE,
47 };
48
49 struct hugepage_subpool {
50 spinlock_t lock;
51 long count;
52 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
53 long used_hpages; /* Used count against maximum, includes */
54 /* both allocated and reserved pages. */
55 struct hstate *hstate;
56 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
57 long rsv_hpages; /* Pages reserved against global pool to */
58 /* satisfy minimum size. */
59 };
60
61 struct resv_map {
62 struct kref refs;
63 spinlock_t lock;
64 struct list_head regions;
65 long adds_in_progress;
66 struct list_head region_cache;
67 long region_cache_count;
68 #ifdef CONFIG_CGROUP_HUGETLB
69 /*
70 * On private mappings, the counter to uncharge reservations is stored
71 * here. If these fields are 0, then either the mapping is shared, or
72 * cgroup accounting is disabled for this resv_map.
73 */
74 struct page_counter *reservation_counter;
75 unsigned long pages_per_hpage;
76 struct cgroup_subsys_state *css;
77 #endif
78 };
79
80 /*
81 * Region tracking -- allows tracking of reservations and instantiated pages
82 * across the pages in a mapping.
83 *
84 * The region data structures are embedded into a resv_map and protected
85 * by a resv_map's lock. The set of regions within the resv_map represent
86 * reservations for huge pages, or huge pages that have already been
87 * instantiated within the map. The from and to elements are huge page
88 * indices into the associated mapping. from indicates the starting index
89 * of the region. to represents the first index past the end of the region.
90 *
91 * For example, a file region structure with from == 0 and to == 4 represents
92 * four huge pages in a mapping. It is important to note that the to element
93 * represents the first element past the end of the region. This is used in
94 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
95 *
96 * Interval notation of the form [from, to) will be used to indicate that
97 * the endpoint from is inclusive and to is exclusive.
98 */
99 struct file_region {
100 struct list_head link;
101 long from;
102 long to;
103 #ifdef CONFIG_CGROUP_HUGETLB
104 /*
105 * On shared mappings, each reserved region appears as a struct
106 * file_region in resv_map. These fields hold the info needed to
107 * uncharge each reservation.
108 */
109 struct page_counter *reservation_counter;
110 struct cgroup_subsys_state *css;
111 #endif
112 };
113
114 extern struct resv_map *resv_map_alloc(void);
115 void resv_map_release(struct kref *ref);
116
117 extern spinlock_t hugetlb_lock;
118 extern int hugetlb_max_hstate __read_mostly;
119 #define for_each_hstate(h) \
120 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
121
122 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
123 long min_hpages);
124 void hugepage_put_subpool(struct hugepage_subpool *spool);
125
126 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
127 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
128 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
129 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
130 loff_t *);
131 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
132 loff_t *);
133 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
134 loff_t *);
135
136 int move_hugetlb_page_tables(struct vm_area_struct *vma,
137 struct vm_area_struct *new_vma,
138 unsigned long old_addr, unsigned long new_addr,
139 unsigned long len);
140 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
141 struct vm_area_struct *, struct vm_area_struct *);
142 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
143 struct page **, struct vm_area_struct **,
144 unsigned long *, unsigned long *, long, unsigned int,
145 int *);
146 void unmap_hugepage_range(struct vm_area_struct *,
147 unsigned long, unsigned long, struct page *,
148 zap_flags_t);
149 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
150 struct vm_area_struct *vma,
151 unsigned long start, unsigned long end,
152 struct page *ref_page, zap_flags_t zap_flags);
153 void hugetlb_report_meminfo(struct seq_file *);
154 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
155 void hugetlb_show_meminfo(void);
156 unsigned long hugetlb_total_pages(void);
157 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
158 unsigned long address, unsigned int flags);
159 #ifdef CONFIG_USERFAULTFD
160 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
161 struct vm_area_struct *dst_vma,
162 unsigned long dst_addr,
163 unsigned long src_addr,
164 enum mcopy_atomic_mode mode,
165 struct page **pagep,
166 bool wp_copy);
167 #endif /* CONFIG_USERFAULTFD */
168 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
169 struct vm_area_struct *vma,
170 vm_flags_t vm_flags);
171 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
172 long freed);
173 int isolate_hugetlb(struct page *page, struct list_head *list);
174 int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
175 int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
176 void putback_active_hugepage(struct page *page);
177 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
178 void free_huge_page(struct page *page);
179 void hugetlb_fix_reserve_counts(struct inode *inode);
180 extern struct mutex *hugetlb_fault_mutex_table;
181 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
182
183 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
184 unsigned long addr, pud_t *pud);
185
186 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
187
188 extern int sysctl_hugetlb_shm_group;
189 extern struct list_head huge_boot_pages;
190
191 /* arch callbacks */
192
193 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
194 unsigned long addr, unsigned long sz);
195 pte_t *huge_pte_offset(struct mm_struct *mm,
196 unsigned long addr, unsigned long sz);
197 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
198 unsigned long *addr, pte_t *ptep);
199 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
200 unsigned long *start, unsigned long *end);
201 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
202 int write);
203 struct page *follow_huge_pd(struct vm_area_struct *vma,
204 unsigned long address, hugepd_t hpd,
205 int flags, int pdshift);
206 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
207 pmd_t *pmd, int flags);
208 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
209 pud_t *pud, int flags);
210 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
211 pgd_t *pgd, int flags);
212
213 int pmd_huge(pmd_t pmd);
214 int pud_huge(pud_t pud);
215 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
216 unsigned long address, unsigned long end, pgprot_t newprot,
217 unsigned long cp_flags);
218
219 bool is_hugetlb_entry_migration(pte_t pte);
220 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
221
222 #else /* !CONFIG_HUGETLB_PAGE */
223
reset_vma_resv_huge_pages(struct vm_area_struct * vma)224 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
225 {
226 }
227
clear_vma_resv_huge_pages(struct vm_area_struct * vma)228 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
229 {
230 }
231
hugetlb_total_pages(void)232 static inline unsigned long hugetlb_total_pages(void)
233 {
234 return 0;
235 }
236
hugetlb_page_mapping_lock_write(struct page * hpage)237 static inline struct address_space *hugetlb_page_mapping_lock_write(
238 struct page *hpage)
239 {
240 return NULL;
241 }
242
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long * addr,pte_t * ptep)243 static inline int huge_pmd_unshare(struct mm_struct *mm,
244 struct vm_area_struct *vma,
245 unsigned long *addr, pte_t *ptep)
246 {
247 return 0;
248 }
249
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)250 static inline void adjust_range_if_pmd_sharing_possible(
251 struct vm_area_struct *vma,
252 unsigned long *start, unsigned long *end)
253 {
254 }
255
follow_hugetlb_page(struct mm_struct * mm,struct vm_area_struct * vma,struct page ** pages,struct vm_area_struct ** vmas,unsigned long * position,unsigned long * nr_pages,long i,unsigned int flags,int * nonblocking)256 static inline long follow_hugetlb_page(struct mm_struct *mm,
257 struct vm_area_struct *vma, struct page **pages,
258 struct vm_area_struct **vmas, unsigned long *position,
259 unsigned long *nr_pages, long i, unsigned int flags,
260 int *nonblocking)
261 {
262 BUG();
263 return 0;
264 }
265
follow_huge_addr(struct mm_struct * mm,unsigned long address,int write)266 static inline struct page *follow_huge_addr(struct mm_struct *mm,
267 unsigned long address, int write)
268 {
269 return ERR_PTR(-EINVAL);
270 }
271
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)272 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
273 struct mm_struct *src,
274 struct vm_area_struct *dst_vma,
275 struct vm_area_struct *src_vma)
276 {
277 BUG();
278 return 0;
279 }
280
move_hugetlb_page_tables(struct vm_area_struct * vma,struct vm_area_struct * new_vma,unsigned long old_addr,unsigned long new_addr,unsigned long len)281 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
282 struct vm_area_struct *new_vma,
283 unsigned long old_addr,
284 unsigned long new_addr,
285 unsigned long len)
286 {
287 BUG();
288 return 0;
289 }
290
hugetlb_report_meminfo(struct seq_file * m)291 static inline void hugetlb_report_meminfo(struct seq_file *m)
292 {
293 }
294
hugetlb_report_node_meminfo(char * buf,int len,int nid)295 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
296 {
297 return 0;
298 }
299
hugetlb_show_meminfo(void)300 static inline void hugetlb_show_meminfo(void)
301 {
302 }
303
follow_huge_pd(struct vm_area_struct * vma,unsigned long address,hugepd_t hpd,int flags,int pdshift)304 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
305 unsigned long address, hugepd_t hpd, int flags,
306 int pdshift)
307 {
308 return NULL;
309 }
310
follow_huge_pmd(struct mm_struct * mm,unsigned long address,pmd_t * pmd,int flags)311 static inline struct page *follow_huge_pmd(struct mm_struct *mm,
312 unsigned long address, pmd_t *pmd, int flags)
313 {
314 return NULL;
315 }
316
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int flags)317 static inline struct page *follow_huge_pud(struct mm_struct *mm,
318 unsigned long address, pud_t *pud, int flags)
319 {
320 return NULL;
321 }
322
follow_huge_pgd(struct mm_struct * mm,unsigned long address,pgd_t * pgd,int flags)323 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
324 unsigned long address, pgd_t *pgd, int flags)
325 {
326 return NULL;
327 }
328
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)329 static inline int prepare_hugepage_range(struct file *file,
330 unsigned long addr, unsigned long len)
331 {
332 return -EINVAL;
333 }
334
pmd_huge(pmd_t pmd)335 static inline int pmd_huge(pmd_t pmd)
336 {
337 return 0;
338 }
339
pud_huge(pud_t pud)340 static inline int pud_huge(pud_t pud)
341 {
342 return 0;
343 }
344
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)345 static inline int is_hugepage_only_range(struct mm_struct *mm,
346 unsigned long addr, unsigned long len)
347 {
348 return 0;
349 }
350
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)351 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
352 unsigned long addr, unsigned long end,
353 unsigned long floor, unsigned long ceiling)
354 {
355 BUG();
356 }
357
358 #ifdef CONFIG_USERFAULTFD
hugetlb_mcopy_atomic_pte(struct mm_struct * dst_mm,pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,enum mcopy_atomic_mode mode,struct page ** pagep,bool wp_copy)359 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
360 pte_t *dst_pte,
361 struct vm_area_struct *dst_vma,
362 unsigned long dst_addr,
363 unsigned long src_addr,
364 enum mcopy_atomic_mode mode,
365 struct page **pagep,
366 bool wp_copy)
367 {
368 BUG();
369 return 0;
370 }
371 #endif /* CONFIG_USERFAULTFD */
372
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)373 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
374 unsigned long sz)
375 {
376 return NULL;
377 }
378
isolate_hugetlb(struct page * page,struct list_head * list)379 static inline int isolate_hugetlb(struct page *page, struct list_head *list)
380 {
381 return -EBUSY;
382 }
383
get_hwpoison_huge_page(struct page * page,bool * hugetlb)384 static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
385 {
386 return 0;
387 }
388
get_huge_page_for_hwpoison(unsigned long pfn,int flags)389 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
390 {
391 return 0;
392 }
393
putback_active_hugepage(struct page * page)394 static inline void putback_active_hugepage(struct page *page)
395 {
396 }
397
move_hugetlb_state(struct page * oldpage,struct page * newpage,int reason)398 static inline void move_hugetlb_state(struct page *oldpage,
399 struct page *newpage, int reason)
400 {
401 }
402
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot,unsigned long cp_flags)403 static inline unsigned long hugetlb_change_protection(
404 struct vm_area_struct *vma, unsigned long address,
405 unsigned long end, pgprot_t newprot,
406 unsigned long cp_flags)
407 {
408 return 0;
409 }
410
__unmap_hugepage_range_final(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page,zap_flags_t zap_flags)411 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
412 struct vm_area_struct *vma, unsigned long start,
413 unsigned long end, struct page *ref_page,
414 zap_flags_t zap_flags)
415 {
416 BUG();
417 }
418
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)419 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
420 struct vm_area_struct *vma, unsigned long address,
421 unsigned int flags)
422 {
423 BUG();
424 return 0;
425 }
426
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)427 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
428
429 #endif /* !CONFIG_HUGETLB_PAGE */
430 /*
431 * hugepages at page global directory. If arch support
432 * hugepages at pgd level, they need to define this.
433 */
434 #ifndef pgd_huge
435 #define pgd_huge(x) 0
436 #endif
437 #ifndef p4d_huge
438 #define p4d_huge(x) 0
439 #endif
440
441 #ifndef pgd_write
pgd_write(pgd_t pgd)442 static inline int pgd_write(pgd_t pgd)
443 {
444 BUG();
445 return 0;
446 }
447 #endif
448
449 #define HUGETLB_ANON_FILE "anon_hugepage"
450
451 enum {
452 /*
453 * The file will be used as an shm file so shmfs accounting rules
454 * apply
455 */
456 HUGETLB_SHMFS_INODE = 1,
457 /*
458 * The file is being created on the internal vfs mount and shmfs
459 * accounting rules do not apply
460 */
461 HUGETLB_ANONHUGE_INODE = 2,
462 };
463
464 #ifdef CONFIG_HUGETLBFS
465 struct hugetlbfs_sb_info {
466 long max_inodes; /* inodes allowed */
467 long free_inodes; /* inodes free */
468 spinlock_t stat_lock;
469 struct hstate *hstate;
470 struct hugepage_subpool *spool;
471 kuid_t uid;
472 kgid_t gid;
473 umode_t mode;
474 };
475
HUGETLBFS_SB(struct super_block * sb)476 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
477 {
478 return sb->s_fs_info;
479 }
480
481 struct hugetlbfs_inode_info {
482 struct shared_policy policy;
483 struct inode vfs_inode;
484 unsigned int seals;
485 };
486
HUGETLBFS_I(struct inode * inode)487 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
488 {
489 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
490 }
491
492 extern const struct file_operations hugetlbfs_file_operations;
493 extern const struct vm_operations_struct hugetlb_vm_ops;
494 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
495 int creat_flags, int page_size_log);
496
is_file_hugepages(struct file * file)497 static inline bool is_file_hugepages(struct file *file)
498 {
499 if (file->f_op == &hugetlbfs_file_operations)
500 return true;
501
502 return is_file_shm_hugepages(file);
503 }
504
hstate_inode(struct inode * i)505 static inline struct hstate *hstate_inode(struct inode *i)
506 {
507 return HUGETLBFS_SB(i->i_sb)->hstate;
508 }
509 #else /* !CONFIG_HUGETLBFS */
510
511 #define is_file_hugepages(file) false
512 static inline struct file *
hugetlb_file_setup(const char * name,size_t size,vm_flags_t acctflag,int creat_flags,int page_size_log)513 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
514 int creat_flags, int page_size_log)
515 {
516 return ERR_PTR(-ENOSYS);
517 }
518
hstate_inode(struct inode * i)519 static inline struct hstate *hstate_inode(struct inode *i)
520 {
521 return NULL;
522 }
523 #endif /* !CONFIG_HUGETLBFS */
524
525 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
526 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
527 unsigned long len, unsigned long pgoff,
528 unsigned long flags);
529 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
530
531 unsigned long
532 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
533 unsigned long len, unsigned long pgoff,
534 unsigned long flags);
535
536 /*
537 * huegtlb page specific state flags. These flags are located in page.private
538 * of the hugetlb head page. Functions created via the below macros should be
539 * used to manipulate these flags.
540 *
541 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
542 * allocation time. Cleared when page is fully instantiated. Free
543 * routine checks flag to restore a reservation on error paths.
544 * Synchronization: Examined or modified by code that knows it has
545 * the only reference to page. i.e. After allocation but before use
546 * or when the page is being freed.
547 * HPG_migratable - Set after a newly allocated page is added to the page
548 * cache and/or page tables. Indicates the page is a candidate for
549 * migration.
550 * Synchronization: Initially set after new page allocation with no
551 * locking. When examined and modified during migration processing
552 * (isolate, migrate, putback) the hugetlb_lock is held.
553 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
554 * allocator. Typically used for migration target pages when no pages
555 * are available in the pool. The hugetlb free page path will
556 * immediately free pages with this flag set to the buddy allocator.
557 * Synchronization: Can be set after huge page allocation from buddy when
558 * code knows it has only reference. All other examinations and
559 * modifications require hugetlb_lock.
560 * HPG_freed - Set when page is on the free lists.
561 * Synchronization: hugetlb_lock held for examination and modification.
562 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
563 */
564 enum hugetlb_page_flags {
565 HPG_restore_reserve = 0,
566 HPG_migratable,
567 HPG_temporary,
568 HPG_freed,
569 HPG_vmemmap_optimized,
570 __NR_HPAGEFLAGS,
571 };
572
573 /*
574 * Macros to create test, set and clear function definitions for
575 * hugetlb specific page flags.
576 */
577 #ifdef CONFIG_HUGETLB_PAGE
578 #define TESTHPAGEFLAG(uname, flname) \
579 static inline int HPage##uname(struct page *page) \
580 { return test_bit(HPG_##flname, &(page->private)); }
581
582 #define SETHPAGEFLAG(uname, flname) \
583 static inline void SetHPage##uname(struct page *page) \
584 { set_bit(HPG_##flname, &(page->private)); }
585
586 #define CLEARHPAGEFLAG(uname, flname) \
587 static inline void ClearHPage##uname(struct page *page) \
588 { clear_bit(HPG_##flname, &(page->private)); }
589 #else
590 #define TESTHPAGEFLAG(uname, flname) \
591 static inline int HPage##uname(struct page *page) \
592 { return 0; }
593
594 #define SETHPAGEFLAG(uname, flname) \
595 static inline void SetHPage##uname(struct page *page) \
596 { }
597
598 #define CLEARHPAGEFLAG(uname, flname) \
599 static inline void ClearHPage##uname(struct page *page) \
600 { }
601 #endif
602
603 #define HPAGEFLAG(uname, flname) \
604 TESTHPAGEFLAG(uname, flname) \
605 SETHPAGEFLAG(uname, flname) \
606 CLEARHPAGEFLAG(uname, flname) \
607
608 /*
609 * Create functions associated with hugetlb page flags
610 */
611 HPAGEFLAG(RestoreReserve, restore_reserve)
612 HPAGEFLAG(Migratable, migratable)
613 HPAGEFLAG(Temporary, temporary)
614 HPAGEFLAG(Freed, freed)
615 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
616
617 #ifdef CONFIG_HUGETLB_PAGE
618
619 #define HSTATE_NAME_LEN 32
620 /* Defines one hugetlb page size */
621 struct hstate {
622 struct mutex resize_lock;
623 int next_nid_to_alloc;
624 int next_nid_to_free;
625 unsigned int order;
626 unsigned int demote_order;
627 unsigned long mask;
628 unsigned long max_huge_pages;
629 unsigned long nr_huge_pages;
630 unsigned long free_huge_pages;
631 unsigned long resv_huge_pages;
632 unsigned long surplus_huge_pages;
633 unsigned long nr_overcommit_huge_pages;
634 struct list_head hugepage_activelist;
635 struct list_head hugepage_freelists[MAX_NUMNODES];
636 unsigned int max_huge_pages_node[MAX_NUMNODES];
637 unsigned int nr_huge_pages_node[MAX_NUMNODES];
638 unsigned int free_huge_pages_node[MAX_NUMNODES];
639 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
640 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
641 unsigned int optimize_vmemmap_pages;
642 #endif
643 #ifdef CONFIG_CGROUP_HUGETLB
644 /* cgroup control files */
645 struct cftype cgroup_files_dfl[8];
646 struct cftype cgroup_files_legacy[10];
647 #endif
648 char name[HSTATE_NAME_LEN];
649 };
650
651 struct huge_bootmem_page {
652 struct list_head list;
653 struct hstate *hstate;
654 };
655
656 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
657 struct page *alloc_huge_page(struct vm_area_struct *vma,
658 unsigned long addr, int avoid_reserve);
659 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
660 nodemask_t *nmask, gfp_t gfp_mask);
661 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
662 unsigned long address);
663 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
664 pgoff_t idx);
665 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
666 unsigned long address, struct page *page);
667
668 /* arch callback */
669 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
670 int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
671 bool __init hugetlb_node_alloc_supported(void);
672
673 void __init hugetlb_add_hstate(unsigned order);
674 bool __init arch_hugetlb_valid_size(unsigned long size);
675 struct hstate *size_to_hstate(unsigned long size);
676
677 #ifndef HUGE_MAX_HSTATE
678 #define HUGE_MAX_HSTATE 1
679 #endif
680
681 extern struct hstate hstates[HUGE_MAX_HSTATE];
682 extern unsigned int default_hstate_idx;
683
684 #define default_hstate (hstates[default_hstate_idx])
685
686 /*
687 * hugetlb page subpool pointer located in hpage[1].private
688 */
hugetlb_page_subpool(struct page * hpage)689 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
690 {
691 return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
692 }
693
hugetlb_set_page_subpool(struct page * hpage,struct hugepage_subpool * subpool)694 static inline void hugetlb_set_page_subpool(struct page *hpage,
695 struct hugepage_subpool *subpool)
696 {
697 set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
698 }
699
hstate_file(struct file * f)700 static inline struct hstate *hstate_file(struct file *f)
701 {
702 return hstate_inode(file_inode(f));
703 }
704
hstate_sizelog(int page_size_log)705 static inline struct hstate *hstate_sizelog(int page_size_log)
706 {
707 if (!page_size_log)
708 return &default_hstate;
709
710 return size_to_hstate(1UL << page_size_log);
711 }
712
hstate_vma(struct vm_area_struct * vma)713 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
714 {
715 return hstate_file(vma->vm_file);
716 }
717
huge_page_size(struct hstate * h)718 static inline unsigned long huge_page_size(struct hstate *h)
719 {
720 return (unsigned long)PAGE_SIZE << h->order;
721 }
722
723 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
724
725 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
726
huge_page_mask(struct hstate * h)727 static inline unsigned long huge_page_mask(struct hstate *h)
728 {
729 return h->mask;
730 }
731
huge_page_order(struct hstate * h)732 static inline unsigned int huge_page_order(struct hstate *h)
733 {
734 return h->order;
735 }
736
huge_page_shift(struct hstate * h)737 static inline unsigned huge_page_shift(struct hstate *h)
738 {
739 return h->order + PAGE_SHIFT;
740 }
741
hstate_is_gigantic(struct hstate * h)742 static inline bool hstate_is_gigantic(struct hstate *h)
743 {
744 return huge_page_order(h) >= MAX_ORDER;
745 }
746
pages_per_huge_page(struct hstate * h)747 static inline unsigned int pages_per_huge_page(struct hstate *h)
748 {
749 return 1 << h->order;
750 }
751
blocks_per_huge_page(struct hstate * h)752 static inline unsigned int blocks_per_huge_page(struct hstate *h)
753 {
754 return huge_page_size(h) / 512;
755 }
756
757 #include <asm/hugetlb.h>
758
759 #ifndef is_hugepage_only_range
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)760 static inline int is_hugepage_only_range(struct mm_struct *mm,
761 unsigned long addr, unsigned long len)
762 {
763 return 0;
764 }
765 #define is_hugepage_only_range is_hugepage_only_range
766 #endif
767
768 #ifndef arch_clear_hugepage_flags
arch_clear_hugepage_flags(struct page * page)769 static inline void arch_clear_hugepage_flags(struct page *page) { }
770 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
771 #endif
772
773 #ifndef arch_make_huge_pte
arch_make_huge_pte(pte_t entry,unsigned int shift,vm_flags_t flags)774 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
775 vm_flags_t flags)
776 {
777 return pte_mkhuge(entry);
778 }
779 #endif
780
page_hstate(struct page * page)781 static inline struct hstate *page_hstate(struct page *page)
782 {
783 VM_BUG_ON_PAGE(!PageHuge(page), page);
784 return size_to_hstate(page_size(page));
785 }
786
hstate_index_to_shift(unsigned index)787 static inline unsigned hstate_index_to_shift(unsigned index)
788 {
789 return hstates[index].order + PAGE_SHIFT;
790 }
791
hstate_index(struct hstate * h)792 static inline int hstate_index(struct hstate *h)
793 {
794 return h - hstates;
795 }
796
797 extern int dissolve_free_huge_page(struct page *page);
798 extern int dissolve_free_huge_pages(unsigned long start_pfn,
799 unsigned long end_pfn);
800
801 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
802 #ifndef arch_hugetlb_migration_supported
arch_hugetlb_migration_supported(struct hstate * h)803 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
804 {
805 if ((huge_page_shift(h) == PMD_SHIFT) ||
806 (huge_page_shift(h) == PUD_SHIFT) ||
807 (huge_page_shift(h) == PGDIR_SHIFT))
808 return true;
809 else
810 return false;
811 }
812 #endif
813 #else
arch_hugetlb_migration_supported(struct hstate * h)814 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
815 {
816 return false;
817 }
818 #endif
819
hugepage_migration_supported(struct hstate * h)820 static inline bool hugepage_migration_supported(struct hstate *h)
821 {
822 return arch_hugetlb_migration_supported(h);
823 }
824
825 /*
826 * Movability check is different as compared to migration check.
827 * It determines whether or not a huge page should be placed on
828 * movable zone or not. Movability of any huge page should be
829 * required only if huge page size is supported for migration.
830 * There won't be any reason for the huge page to be movable if
831 * it is not migratable to start with. Also the size of the huge
832 * page should be large enough to be placed under a movable zone
833 * and still feasible enough to be migratable. Just the presence
834 * in movable zone does not make the migration feasible.
835 *
836 * So even though large huge page sizes like the gigantic ones
837 * are migratable they should not be movable because its not
838 * feasible to migrate them from movable zone.
839 */
hugepage_movable_supported(struct hstate * h)840 static inline bool hugepage_movable_supported(struct hstate *h)
841 {
842 if (!hugepage_migration_supported(h))
843 return false;
844
845 if (hstate_is_gigantic(h))
846 return false;
847 return true;
848 }
849
850 /* Movability of hugepages depends on migration support. */
htlb_alloc_mask(struct hstate * h)851 static inline gfp_t htlb_alloc_mask(struct hstate *h)
852 {
853 if (hugepage_movable_supported(h))
854 return GFP_HIGHUSER_MOVABLE;
855 else
856 return GFP_HIGHUSER;
857 }
858
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)859 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
860 {
861 gfp_t modified_mask = htlb_alloc_mask(h);
862
863 /* Some callers might want to enforce node */
864 modified_mask |= (gfp_mask & __GFP_THISNODE);
865
866 modified_mask |= (gfp_mask & __GFP_NOWARN);
867
868 return modified_mask;
869 }
870
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)871 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
872 struct mm_struct *mm, pte_t *pte)
873 {
874 if (huge_page_size(h) == PMD_SIZE)
875 return pmd_lockptr(mm, (pmd_t *) pte);
876 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
877 return &mm->page_table_lock;
878 }
879
880 #ifndef hugepages_supported
881 /*
882 * Some platform decide whether they support huge pages at boot
883 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
884 * when there is no such support
885 */
886 #define hugepages_supported() (HPAGE_SHIFT != 0)
887 #endif
888
889 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
890
hugetlb_count_init(struct mm_struct * mm)891 static inline void hugetlb_count_init(struct mm_struct *mm)
892 {
893 atomic_long_set(&mm->hugetlb_usage, 0);
894 }
895
hugetlb_count_add(long l,struct mm_struct * mm)896 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
897 {
898 atomic_long_add(l, &mm->hugetlb_usage);
899 }
900
hugetlb_count_sub(long l,struct mm_struct * mm)901 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
902 {
903 atomic_long_sub(l, &mm->hugetlb_usage);
904 }
905
906 #ifndef set_huge_swap_pte_at
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)907 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
908 pte_t *ptep, pte_t pte, unsigned long sz)
909 {
910 set_huge_pte_at(mm, addr, ptep, pte);
911 }
912 #endif
913
914 #ifndef huge_ptep_modify_prot_start
915 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)916 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
917 unsigned long addr, pte_t *ptep)
918 {
919 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
920 }
921 #endif
922
923 #ifndef huge_ptep_modify_prot_commit
924 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)925 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
926 unsigned long addr, pte_t *ptep,
927 pte_t old_pte, pte_t pte)
928 {
929 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
930 }
931 #endif
932
933 #else /* CONFIG_HUGETLB_PAGE */
934 struct hstate {};
935
936 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
937 {
938 return NULL;
939 }
940
941 static inline int isolate_or_dissolve_huge_page(struct page *page,
942 struct list_head *list)
943 {
944 return -ENOMEM;
945 }
946
947 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
948 unsigned long addr,
949 int avoid_reserve)
950 {
951 return NULL;
952 }
953
954 static inline struct page *
955 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
956 nodemask_t *nmask, gfp_t gfp_mask)
957 {
958 return NULL;
959 }
960
961 static inline struct page *alloc_huge_page_vma(struct hstate *h,
962 struct vm_area_struct *vma,
963 unsigned long address)
964 {
965 return NULL;
966 }
967
968 static inline int __alloc_bootmem_huge_page(struct hstate *h)
969 {
970 return 0;
971 }
972
973 static inline struct hstate *hstate_file(struct file *f)
974 {
975 return NULL;
976 }
977
978 static inline struct hstate *hstate_sizelog(int page_size_log)
979 {
980 return NULL;
981 }
982
983 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
984 {
985 return NULL;
986 }
987
988 static inline struct hstate *page_hstate(struct page *page)
989 {
990 return NULL;
991 }
992
993 static inline struct hstate *size_to_hstate(unsigned long size)
994 {
995 return NULL;
996 }
997
998 static inline unsigned long huge_page_size(struct hstate *h)
999 {
1000 return PAGE_SIZE;
1001 }
1002
1003 static inline unsigned long huge_page_mask(struct hstate *h)
1004 {
1005 return PAGE_MASK;
1006 }
1007
1008 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1009 {
1010 return PAGE_SIZE;
1011 }
1012
1013 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1014 {
1015 return PAGE_SIZE;
1016 }
1017
1018 static inline unsigned int huge_page_order(struct hstate *h)
1019 {
1020 return 0;
1021 }
1022
1023 static inline unsigned int huge_page_shift(struct hstate *h)
1024 {
1025 return PAGE_SHIFT;
1026 }
1027
1028 static inline bool hstate_is_gigantic(struct hstate *h)
1029 {
1030 return false;
1031 }
1032
1033 static inline unsigned int pages_per_huge_page(struct hstate *h)
1034 {
1035 return 1;
1036 }
1037
1038 static inline unsigned hstate_index_to_shift(unsigned index)
1039 {
1040 return 0;
1041 }
1042
1043 static inline int hstate_index(struct hstate *h)
1044 {
1045 return 0;
1046 }
1047
1048 static inline int dissolve_free_huge_page(struct page *page)
1049 {
1050 return 0;
1051 }
1052
1053 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1054 unsigned long end_pfn)
1055 {
1056 return 0;
1057 }
1058
1059 static inline bool hugepage_migration_supported(struct hstate *h)
1060 {
1061 return false;
1062 }
1063
1064 static inline bool hugepage_movable_supported(struct hstate *h)
1065 {
1066 return false;
1067 }
1068
1069 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1070 {
1071 return 0;
1072 }
1073
1074 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1075 {
1076 return 0;
1077 }
1078
1079 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1080 struct mm_struct *mm, pte_t *pte)
1081 {
1082 return &mm->page_table_lock;
1083 }
1084
1085 static inline void hugetlb_count_init(struct mm_struct *mm)
1086 {
1087 }
1088
1089 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1090 {
1091 }
1092
1093 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1094 {
1095 }
1096
1097 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
1098 pte_t *ptep, pte_t pte, unsigned long sz)
1099 {
1100 }
1101
1102 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1103 unsigned long addr, pte_t *ptep)
1104 {
1105 return *ptep;
1106 }
1107
1108 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1109 pte_t *ptep, pte_t pte)
1110 {
1111 }
1112 #endif /* CONFIG_HUGETLB_PAGE */
1113
huge_pte_lock(struct hstate * h,struct mm_struct * mm,pte_t * pte)1114 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1115 struct mm_struct *mm, pte_t *pte)
1116 {
1117 spinlock_t *ptl;
1118
1119 ptl = huge_pte_lockptr(h, mm, pte);
1120 spin_lock(ptl);
1121 return ptl;
1122 }
1123
1124 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1125 extern void __init hugetlb_cma_reserve(int order);
1126 extern void __init hugetlb_cma_check(void);
1127 #else
hugetlb_cma_reserve(int order)1128 static inline __init void hugetlb_cma_reserve(int order)
1129 {
1130 }
hugetlb_cma_check(void)1131 static inline __init void hugetlb_cma_check(void)
1132 {
1133 }
1134 #endif
1135
1136 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1137
1138 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1139 /*
1140 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1141 * implement this.
1142 */
1143 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1144 #endif
1145
1146 #endif /* _LINUX_HUGETLB_H */
1147