1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <linux/io.h>
28 
29 #include <linux/hugetlb.h>
30 #include <linux/node.h>
31 #include "internal.h"
32 
33 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
34 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35 unsigned long hugepages_treat_as_movable;
36 
37 static int max_hstate;
38 unsigned int default_hstate_idx;
39 struct hstate hstates[HUGE_MAX_HSTATE];
40 
41 __initdata LIST_HEAD(huge_boot_pages);
42 
43 /* for command line parsing */
44 static struct hstate * __initdata parsed_hstate;
45 static unsigned long __initdata default_hstate_max_huge_pages;
46 static unsigned long __initdata default_hstate_size;
47 
48 #define for_each_hstate(h) \
49 	for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
50 
51 /*
52  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
53  */
54 static DEFINE_SPINLOCK(hugetlb_lock);
55 
unlock_or_release_subpool(struct hugepage_subpool * spool)56 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
57 {
58 	bool free = (spool->count == 0) && (spool->used_hpages == 0);
59 
60 	spin_unlock(&spool->lock);
61 
62 	/* If no pages are used, and no other handles to the subpool
63 	 * remain, free the subpool the subpool remain */
64 	if (free)
65 		kfree(spool);
66 }
67 
hugepage_new_subpool(long nr_blocks)68 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
69 {
70 	struct hugepage_subpool *spool;
71 
72 	spool = kmalloc(sizeof(*spool), GFP_KERNEL);
73 	if (!spool)
74 		return NULL;
75 
76 	spin_lock_init(&spool->lock);
77 	spool->count = 1;
78 	spool->max_hpages = nr_blocks;
79 	spool->used_hpages = 0;
80 
81 	return spool;
82 }
83 
hugepage_put_subpool(struct hugepage_subpool * spool)84 void hugepage_put_subpool(struct hugepage_subpool *spool)
85 {
86 	spin_lock(&spool->lock);
87 	BUG_ON(!spool->count);
88 	spool->count--;
89 	unlock_or_release_subpool(spool);
90 }
91 
hugepage_subpool_get_pages(struct hugepage_subpool * spool,long delta)92 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
93 				      long delta)
94 {
95 	int ret = 0;
96 
97 	if (!spool)
98 		return 0;
99 
100 	spin_lock(&spool->lock);
101 	if ((spool->used_hpages + delta) <= spool->max_hpages) {
102 		spool->used_hpages += delta;
103 	} else {
104 		ret = -ENOMEM;
105 	}
106 	spin_unlock(&spool->lock);
107 
108 	return ret;
109 }
110 
hugepage_subpool_put_pages(struct hugepage_subpool * spool,long delta)111 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
112 				       long delta)
113 {
114 	if (!spool)
115 		return;
116 
117 	spin_lock(&spool->lock);
118 	spool->used_hpages -= delta;
119 	/* If hugetlbfs_put_super couldn't free spool due to
120 	* an outstanding quota reference, free it now. */
121 	unlock_or_release_subpool(spool);
122 }
123 
subpool_inode(struct inode * inode)124 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
125 {
126 	return HUGETLBFS_SB(inode->i_sb)->spool;
127 }
128 
subpool_vma(struct vm_area_struct * vma)129 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
130 {
131 	return subpool_inode(vma->vm_file->f_dentry->d_inode);
132 }
133 
134 /*
135  * Region tracking -- allows tracking of reservations and instantiated pages
136  *                    across the pages in a mapping.
137  *
138  * The region data structures are protected by a combination of the mmap_sem
139  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
140  * must either hold the mmap_sem for write, or the mmap_sem for read and
141  * the hugetlb_instantiation mutex:
142  *
143  *	down_write(&mm->mmap_sem);
144  * or
145  *	down_read(&mm->mmap_sem);
146  *	mutex_lock(&hugetlb_instantiation_mutex);
147  */
148 struct file_region {
149 	struct list_head link;
150 	long from;
151 	long to;
152 };
153 
region_add(struct list_head * head,long f,long t)154 static long region_add(struct list_head *head, long f, long t)
155 {
156 	struct file_region *rg, *nrg, *trg;
157 
158 	/* Locate the region we are either in or before. */
159 	list_for_each_entry(rg, head, link)
160 		if (f <= rg->to)
161 			break;
162 
163 	/* Round our left edge to the current segment if it encloses us. */
164 	if (f > rg->from)
165 		f = rg->from;
166 
167 	/* Check for and consume any regions we now overlap with. */
168 	nrg = rg;
169 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
170 		if (&rg->link == head)
171 			break;
172 		if (rg->from > t)
173 			break;
174 
175 		/* If this area reaches higher then extend our area to
176 		 * include it completely.  If this is not the first area
177 		 * which we intend to reuse, free it. */
178 		if (rg->to > t)
179 			t = rg->to;
180 		if (rg != nrg) {
181 			list_del(&rg->link);
182 			kfree(rg);
183 		}
184 	}
185 	nrg->from = f;
186 	nrg->to = t;
187 	return 0;
188 }
189 
region_chg(struct list_head * head,long f,long t)190 static long region_chg(struct list_head *head, long f, long t)
191 {
192 	struct file_region *rg, *nrg;
193 	long chg = 0;
194 
195 	/* Locate the region we are before or in. */
196 	list_for_each_entry(rg, head, link)
197 		if (f <= rg->to)
198 			break;
199 
200 	/* If we are below the current region then a new region is required.
201 	 * Subtle, allocate a new region at the position but make it zero
202 	 * size such that we can guarantee to record the reservation. */
203 	if (&rg->link == head || t < rg->from) {
204 		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
205 		if (!nrg)
206 			return -ENOMEM;
207 		nrg->from = f;
208 		nrg->to   = f;
209 		INIT_LIST_HEAD(&nrg->link);
210 		list_add(&nrg->link, rg->link.prev);
211 
212 		return t - f;
213 	}
214 
215 	/* Round our left edge to the current segment if it encloses us. */
216 	if (f > rg->from)
217 		f = rg->from;
218 	chg = t - f;
219 
220 	/* Check for and consume any regions we now overlap with. */
221 	list_for_each_entry(rg, rg->link.prev, link) {
222 		if (&rg->link == head)
223 			break;
224 		if (rg->from > t)
225 			return chg;
226 
227 		/* We overlap with this area, if it extends further than
228 		 * us then we must extend ourselves.  Account for its
229 		 * existing reservation. */
230 		if (rg->to > t) {
231 			chg += rg->to - t;
232 			t = rg->to;
233 		}
234 		chg -= rg->to - rg->from;
235 	}
236 	return chg;
237 }
238 
region_truncate(struct list_head * head,long end)239 static long region_truncate(struct list_head *head, long end)
240 {
241 	struct file_region *rg, *trg;
242 	long chg = 0;
243 
244 	/* Locate the region we are either in or before. */
245 	list_for_each_entry(rg, head, link)
246 		if (end <= rg->to)
247 			break;
248 	if (&rg->link == head)
249 		return 0;
250 
251 	/* If we are in the middle of a region then adjust it. */
252 	if (end > rg->from) {
253 		chg = rg->to - end;
254 		rg->to = end;
255 		rg = list_entry(rg->link.next, typeof(*rg), link);
256 	}
257 
258 	/* Drop any remaining regions. */
259 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
260 		if (&rg->link == head)
261 			break;
262 		chg += rg->to - rg->from;
263 		list_del(&rg->link);
264 		kfree(rg);
265 	}
266 	return chg;
267 }
268 
region_count(struct list_head * head,long f,long t)269 static long region_count(struct list_head *head, long f, long t)
270 {
271 	struct file_region *rg;
272 	long chg = 0;
273 
274 	/* Locate each segment we overlap with, and count that overlap. */
275 	list_for_each_entry(rg, head, link) {
276 		int seg_from;
277 		int seg_to;
278 
279 		if (rg->to <= f)
280 			continue;
281 		if (rg->from >= t)
282 			break;
283 
284 		seg_from = max(rg->from, f);
285 		seg_to = min(rg->to, t);
286 
287 		chg += seg_to - seg_from;
288 	}
289 
290 	return chg;
291 }
292 
293 /*
294  * Convert the address within this vma to the page offset within
295  * the mapping, in pagecache page units; huge pages here.
296  */
vma_hugecache_offset(struct hstate * h,struct vm_area_struct * vma,unsigned long address)297 static pgoff_t vma_hugecache_offset(struct hstate *h,
298 			struct vm_area_struct *vma, unsigned long address)
299 {
300 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
301 			(vma->vm_pgoff >> huge_page_order(h));
302 }
303 
linear_hugepage_index(struct vm_area_struct * vma,unsigned long address)304 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
305 				     unsigned long address)
306 {
307 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
308 }
309 
310 /*
311  * Return the size of the pages allocated when backing a VMA. In the majority
312  * cases this will be same size as used by the page table entries.
313  */
vma_kernel_pagesize(struct vm_area_struct * vma)314 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
315 {
316 	struct hstate *hstate;
317 
318 	if (!is_vm_hugetlb_page(vma))
319 		return PAGE_SIZE;
320 
321 	hstate = hstate_vma(vma);
322 
323 	return 1UL << (hstate->order + PAGE_SHIFT);
324 }
325 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
326 
327 /*
328  * Return the page size being used by the MMU to back a VMA. In the majority
329  * of cases, the page size used by the kernel matches the MMU size. On
330  * architectures where it differs, an architecture-specific version of this
331  * function is required.
332  */
333 #ifndef vma_mmu_pagesize
vma_mmu_pagesize(struct vm_area_struct * vma)334 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
335 {
336 	return vma_kernel_pagesize(vma);
337 }
338 #endif
339 
340 /*
341  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
342  * bits of the reservation map pointer, which are always clear due to
343  * alignment.
344  */
345 #define HPAGE_RESV_OWNER    (1UL << 0)
346 #define HPAGE_RESV_UNMAPPED (1UL << 1)
347 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
348 
349 /*
350  * These helpers are used to track how many pages are reserved for
351  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
352  * is guaranteed to have their future faults succeed.
353  *
354  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
355  * the reserve counters are updated with the hugetlb_lock held. It is safe
356  * to reset the VMA at fork() time as it is not in use yet and there is no
357  * chance of the global counters getting corrupted as a result of the values.
358  *
359  * The private mapping reservation is represented in a subtly different
360  * manner to a shared mapping.  A shared mapping has a region map associated
361  * with the underlying file, this region map represents the backing file
362  * pages which have ever had a reservation assigned which this persists even
363  * after the page is instantiated.  A private mapping has a region map
364  * associated with the original mmap which is attached to all VMAs which
365  * reference it, this region map represents those offsets which have consumed
366  * reservation ie. where pages have been instantiated.
367  */
get_vma_private_data(struct vm_area_struct * vma)368 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
369 {
370 	return (unsigned long)vma->vm_private_data;
371 }
372 
set_vma_private_data(struct vm_area_struct * vma,unsigned long value)373 static void set_vma_private_data(struct vm_area_struct *vma,
374 							unsigned long value)
375 {
376 	vma->vm_private_data = (void *)value;
377 }
378 
379 struct resv_map {
380 	struct kref refs;
381 	struct list_head regions;
382 };
383 
resv_map_alloc(void)384 static struct resv_map *resv_map_alloc(void)
385 {
386 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
387 	if (!resv_map)
388 		return NULL;
389 
390 	kref_init(&resv_map->refs);
391 	INIT_LIST_HEAD(&resv_map->regions);
392 
393 	return resv_map;
394 }
395 
resv_map_release(struct kref * ref)396 static void resv_map_release(struct kref *ref)
397 {
398 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
399 
400 	/* Clear out any active regions before we release the map. */
401 	region_truncate(&resv_map->regions, 0);
402 	kfree(resv_map);
403 }
404 
vma_resv_map(struct vm_area_struct * vma)405 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
406 {
407 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
408 	if (!(vma->vm_flags & VM_MAYSHARE))
409 		return (struct resv_map *)(get_vma_private_data(vma) &
410 							~HPAGE_RESV_MASK);
411 	return NULL;
412 }
413 
set_vma_resv_map(struct vm_area_struct * vma,struct resv_map * map)414 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
415 {
416 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
417 	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
418 
419 	set_vma_private_data(vma, (get_vma_private_data(vma) &
420 				HPAGE_RESV_MASK) | (unsigned long)map);
421 }
422 
set_vma_resv_flags(struct vm_area_struct * vma,unsigned long flags)423 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
424 {
425 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
426 	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
427 
428 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
429 }
430 
is_vma_resv_set(struct vm_area_struct * vma,unsigned long flag)431 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
432 {
433 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
434 
435 	return (get_vma_private_data(vma) & flag) != 0;
436 }
437 
438 /* Decrement the reserved pages in the hugepage pool by one */
decrement_hugepage_resv_vma(struct hstate * h,struct vm_area_struct * vma)439 static void decrement_hugepage_resv_vma(struct hstate *h,
440 			struct vm_area_struct *vma)
441 {
442 	if (vma->vm_flags & VM_NORESERVE)
443 		return;
444 
445 	if (vma->vm_flags & VM_MAYSHARE) {
446 		/* Shared mappings always use reserves */
447 		h->resv_huge_pages--;
448 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
449 		/*
450 		 * Only the process that called mmap() has reserves for
451 		 * private mappings.
452 		 */
453 		h->resv_huge_pages--;
454 	}
455 }
456 
457 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
reset_vma_resv_huge_pages(struct vm_area_struct * vma)458 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
459 {
460 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
461 	if (!(vma->vm_flags & VM_MAYSHARE))
462 		vma->vm_private_data = (void *)0;
463 }
464 
465 /* Returns true if the VMA has associated reserve pages */
vma_has_reserves(struct vm_area_struct * vma)466 static int vma_has_reserves(struct vm_area_struct *vma)
467 {
468 	if (vma->vm_flags & VM_MAYSHARE)
469 		return 1;
470 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
471 		return 1;
472 	return 0;
473 }
474 
copy_gigantic_page(struct page * dst,struct page * src)475 static void copy_gigantic_page(struct page *dst, struct page *src)
476 {
477 	int i;
478 	struct hstate *h = page_hstate(src);
479 	struct page *dst_base = dst;
480 	struct page *src_base = src;
481 
482 	for (i = 0; i < pages_per_huge_page(h); ) {
483 		cond_resched();
484 		copy_highpage(dst, src);
485 
486 		i++;
487 		dst = mem_map_next(dst, dst_base, i);
488 		src = mem_map_next(src, src_base, i);
489 	}
490 }
491 
copy_huge_page(struct page * dst,struct page * src)492 void copy_huge_page(struct page *dst, struct page *src)
493 {
494 	int i;
495 	struct hstate *h = page_hstate(src);
496 
497 	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
498 		copy_gigantic_page(dst, src);
499 		return;
500 	}
501 
502 	might_sleep();
503 	for (i = 0; i < pages_per_huge_page(h); i++) {
504 		cond_resched();
505 		copy_highpage(dst + i, src + i);
506 	}
507 }
508 
enqueue_huge_page(struct hstate * h,struct page * page)509 static void enqueue_huge_page(struct hstate *h, struct page *page)
510 {
511 	int nid = page_to_nid(page);
512 	list_add(&page->lru, &h->hugepage_freelists[nid]);
513 	h->free_huge_pages++;
514 	h->free_huge_pages_node[nid]++;
515 }
516 
dequeue_huge_page_node(struct hstate * h,int nid)517 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
518 {
519 	struct page *page;
520 
521 	if (list_empty(&h->hugepage_freelists[nid]))
522 		return NULL;
523 	page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
524 	list_del(&page->lru);
525 	set_page_refcounted(page);
526 	h->free_huge_pages--;
527 	h->free_huge_pages_node[nid]--;
528 	return page;
529 }
530 
dequeue_huge_page_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address,int avoid_reserve)531 static struct page *dequeue_huge_page_vma(struct hstate *h,
532 				struct vm_area_struct *vma,
533 				unsigned long address, int avoid_reserve)
534 {
535 	struct page *page = NULL;
536 	struct mempolicy *mpol;
537 	nodemask_t *nodemask;
538 	struct zonelist *zonelist;
539 	struct zone *zone;
540 	struct zoneref *z;
541 	unsigned int cpuset_mems_cookie;
542 
543 retry_cpuset:
544 	cpuset_mems_cookie = get_mems_allowed();
545 	zonelist = huge_zonelist(vma, address,
546 					htlb_alloc_mask, &mpol, &nodemask);
547 	/*
548 	 * A child process with MAP_PRIVATE mappings created by their parent
549 	 * have no page reserves. This check ensures that reservations are
550 	 * not "stolen". The child may still get SIGKILLed
551 	 */
552 	if (!vma_has_reserves(vma) &&
553 			h->free_huge_pages - h->resv_huge_pages == 0)
554 		goto err;
555 
556 	/* If reserves cannot be used, ensure enough pages are in the pool */
557 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
558 		goto err;
559 
560 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
561 						MAX_NR_ZONES - 1, nodemask) {
562 		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
563 			page = dequeue_huge_page_node(h, zone_to_nid(zone));
564 			if (page) {
565 				if (!avoid_reserve)
566 					decrement_hugepage_resv_vma(h, vma);
567 				break;
568 			}
569 		}
570 	}
571 
572 	mpol_cond_put(mpol);
573 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
574 		goto retry_cpuset;
575 	return page;
576 
577 err:
578 	mpol_cond_put(mpol);
579 	return NULL;
580 }
581 
update_and_free_page(struct hstate * h,struct page * page)582 static void update_and_free_page(struct hstate *h, struct page *page)
583 {
584 	int i;
585 
586 	VM_BUG_ON(h->order >= MAX_ORDER);
587 
588 	h->nr_huge_pages--;
589 	h->nr_huge_pages_node[page_to_nid(page)]--;
590 	for (i = 0; i < pages_per_huge_page(h); i++) {
591 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
592 				1 << PG_referenced | 1 << PG_dirty |
593 				1 << PG_active | 1 << PG_reserved |
594 				1 << PG_private | 1 << PG_writeback);
595 	}
596 	set_compound_page_dtor(page, NULL);
597 	set_page_refcounted(page);
598 	arch_release_hugepage(page);
599 	__free_pages(page, huge_page_order(h));
600 }
601 
size_to_hstate(unsigned long size)602 struct hstate *size_to_hstate(unsigned long size)
603 {
604 	struct hstate *h;
605 
606 	for_each_hstate(h) {
607 		if (huge_page_size(h) == size)
608 			return h;
609 	}
610 	return NULL;
611 }
612 
free_huge_page(struct page * page)613 static void free_huge_page(struct page *page)
614 {
615 	/*
616 	 * Can't pass hstate in here because it is called from the
617 	 * compound page destructor.
618 	 */
619 	struct hstate *h = page_hstate(page);
620 	int nid = page_to_nid(page);
621 	struct hugepage_subpool *spool =
622 		(struct hugepage_subpool *)page_private(page);
623 
624 	set_page_private(page, 0);
625 	page->mapping = NULL;
626 	BUG_ON(page_count(page));
627 	BUG_ON(page_mapcount(page));
628 	INIT_LIST_HEAD(&page->lru);
629 
630 	spin_lock(&hugetlb_lock);
631 	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
632 		update_and_free_page(h, page);
633 		h->surplus_huge_pages--;
634 		h->surplus_huge_pages_node[nid]--;
635 	} else {
636 		enqueue_huge_page(h, page);
637 	}
638 	spin_unlock(&hugetlb_lock);
639 	hugepage_subpool_put_pages(spool, 1);
640 }
641 
prep_new_huge_page(struct hstate * h,struct page * page,int nid)642 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
643 {
644 	set_compound_page_dtor(page, free_huge_page);
645 	spin_lock(&hugetlb_lock);
646 	h->nr_huge_pages++;
647 	h->nr_huge_pages_node[nid]++;
648 	spin_unlock(&hugetlb_lock);
649 	put_page(page); /* free it into the hugepage allocator */
650 }
651 
prep_compound_gigantic_page(struct page * page,unsigned long order)652 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
653 {
654 	int i;
655 	int nr_pages = 1 << order;
656 	struct page *p = page + 1;
657 
658 	/* we rely on prep_new_huge_page to set the destructor */
659 	set_compound_order(page, order);
660 	__SetPageHead(page);
661 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
662 		__SetPageTail(p);
663 		set_page_count(p, 0);
664 		p->first_page = page;
665 	}
666 }
667 
PageHuge(struct page * page)668 int PageHuge(struct page *page)
669 {
670 	compound_page_dtor *dtor;
671 
672 	if (!PageCompound(page))
673 		return 0;
674 
675 	page = compound_head(page);
676 	dtor = get_compound_page_dtor(page);
677 
678 	return dtor == free_huge_page;
679 }
680 EXPORT_SYMBOL_GPL(PageHuge);
681 
682 /*
683  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
684  * normal or transparent huge pages.
685  */
PageHeadHuge(struct page * page_head)686 int PageHeadHuge(struct page *page_head)
687 {
688 	compound_page_dtor *dtor;
689 
690 	if (!PageHead(page_head))
691 		return 0;
692 
693 	dtor = get_compound_page_dtor(page_head);
694 
695 	return dtor == free_huge_page;
696 }
697 EXPORT_SYMBOL_GPL(PageHeadHuge);
698 
__basepage_index(struct page * page)699 pgoff_t __basepage_index(struct page *page)
700 {
701 	struct page *page_head = compound_head(page);
702 	pgoff_t index = page_index(page_head);
703 	unsigned long compound_idx;
704 
705 	if (!PageHuge(page_head))
706 		return page_index(page);
707 
708 	if (compound_order(page_head) >= MAX_ORDER)
709 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
710 	else
711 		compound_idx = page - page_head;
712 
713 	return (index << compound_order(page_head)) + compound_idx;
714 }
715 
alloc_fresh_huge_page_node(struct hstate * h,int nid)716 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
717 {
718 	struct page *page;
719 
720 	if (h->order >= MAX_ORDER)
721 		return NULL;
722 
723 	page = alloc_pages_exact_node(nid,
724 		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
725 						__GFP_REPEAT|__GFP_NOWARN,
726 		huge_page_order(h));
727 	if (page) {
728 		if (arch_prepare_hugepage(page)) {
729 			__free_pages(page, huge_page_order(h));
730 			return NULL;
731 		}
732 		prep_new_huge_page(h, page, nid);
733 	}
734 
735 	return page;
736 }
737 
738 /*
739  * common helper functions for hstate_next_node_to_{alloc|free}.
740  * We may have allocated or freed a huge page based on a different
741  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
742  * be outside of *nodes_allowed.  Ensure that we use an allowed
743  * node for alloc or free.
744  */
next_node_allowed(int nid,nodemask_t * nodes_allowed)745 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
746 {
747 	nid = next_node(nid, *nodes_allowed);
748 	if (nid == MAX_NUMNODES)
749 		nid = first_node(*nodes_allowed);
750 	VM_BUG_ON(nid >= MAX_NUMNODES);
751 
752 	return nid;
753 }
754 
get_valid_node_allowed(int nid,nodemask_t * nodes_allowed)755 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
756 {
757 	if (!node_isset(nid, *nodes_allowed))
758 		nid = next_node_allowed(nid, nodes_allowed);
759 	return nid;
760 }
761 
762 /*
763  * returns the previously saved node ["this node"] from which to
764  * allocate a persistent huge page for the pool and advance the
765  * next node from which to allocate, handling wrap at end of node
766  * mask.
767  */
hstate_next_node_to_alloc(struct hstate * h,nodemask_t * nodes_allowed)768 static int hstate_next_node_to_alloc(struct hstate *h,
769 					nodemask_t *nodes_allowed)
770 {
771 	int nid;
772 
773 	VM_BUG_ON(!nodes_allowed);
774 
775 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
776 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
777 
778 	return nid;
779 }
780 
alloc_fresh_huge_page(struct hstate * h,nodemask_t * nodes_allowed)781 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
782 {
783 	struct page *page;
784 	int start_nid;
785 	int next_nid;
786 	int ret = 0;
787 
788 	start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
789 	next_nid = start_nid;
790 
791 	do {
792 		page = alloc_fresh_huge_page_node(h, next_nid);
793 		if (page) {
794 			ret = 1;
795 			break;
796 		}
797 		next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
798 	} while (next_nid != start_nid);
799 
800 	if (ret)
801 		count_vm_event(HTLB_BUDDY_PGALLOC);
802 	else
803 		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
804 
805 	return ret;
806 }
807 
808 /*
809  * helper for free_pool_huge_page() - return the previously saved
810  * node ["this node"] from which to free a huge page.  Advance the
811  * next node id whether or not we find a free huge page to free so
812  * that the next attempt to free addresses the next node.
813  */
hstate_next_node_to_free(struct hstate * h,nodemask_t * nodes_allowed)814 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
815 {
816 	int nid;
817 
818 	VM_BUG_ON(!nodes_allowed);
819 
820 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
821 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
822 
823 	return nid;
824 }
825 
826 /*
827  * Free huge page from pool from next node to free.
828  * Attempt to keep persistent huge pages more or less
829  * balanced over allowed nodes.
830  * Called with hugetlb_lock locked.
831  */
free_pool_huge_page(struct hstate * h,nodemask_t * nodes_allowed,bool acct_surplus)832 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
833 							 bool acct_surplus)
834 {
835 	int start_nid;
836 	int next_nid;
837 	int ret = 0;
838 
839 	start_nid = hstate_next_node_to_free(h, nodes_allowed);
840 	next_nid = start_nid;
841 
842 	do {
843 		/*
844 		 * If we're returning unused surplus pages, only examine
845 		 * nodes with surplus pages.
846 		 */
847 		if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
848 		    !list_empty(&h->hugepage_freelists[next_nid])) {
849 			struct page *page =
850 				list_entry(h->hugepage_freelists[next_nid].next,
851 					  struct page, lru);
852 			list_del(&page->lru);
853 			h->free_huge_pages--;
854 			h->free_huge_pages_node[next_nid]--;
855 			if (acct_surplus) {
856 				h->surplus_huge_pages--;
857 				h->surplus_huge_pages_node[next_nid]--;
858 			}
859 			update_and_free_page(h, page);
860 			ret = 1;
861 			break;
862 		}
863 		next_nid = hstate_next_node_to_free(h, nodes_allowed);
864 	} while (next_nid != start_nid);
865 
866 	return ret;
867 }
868 
alloc_buddy_huge_page(struct hstate * h,int nid)869 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
870 {
871 	struct page *page;
872 	unsigned int r_nid;
873 
874 	if (h->order >= MAX_ORDER)
875 		return NULL;
876 
877 	/*
878 	 * Assume we will successfully allocate the surplus page to
879 	 * prevent racing processes from causing the surplus to exceed
880 	 * overcommit
881 	 *
882 	 * This however introduces a different race, where a process B
883 	 * tries to grow the static hugepage pool while alloc_pages() is
884 	 * called by process A. B will only examine the per-node
885 	 * counters in determining if surplus huge pages can be
886 	 * converted to normal huge pages in adjust_pool_surplus(). A
887 	 * won't be able to increment the per-node counter, until the
888 	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
889 	 * no more huge pages can be converted from surplus to normal
890 	 * state (and doesn't try to convert again). Thus, we have a
891 	 * case where a surplus huge page exists, the pool is grown, and
892 	 * the surplus huge page still exists after, even though it
893 	 * should just have been converted to a normal huge page. This
894 	 * does not leak memory, though, as the hugepage will be freed
895 	 * once it is out of use. It also does not allow the counters to
896 	 * go out of whack in adjust_pool_surplus() as we don't modify
897 	 * the node values until we've gotten the hugepage and only the
898 	 * per-node value is checked there.
899 	 */
900 	spin_lock(&hugetlb_lock);
901 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
902 		spin_unlock(&hugetlb_lock);
903 		return NULL;
904 	} else {
905 		h->nr_huge_pages++;
906 		h->surplus_huge_pages++;
907 	}
908 	spin_unlock(&hugetlb_lock);
909 
910 	if (nid == NUMA_NO_NODE)
911 		page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
912 				   __GFP_REPEAT|__GFP_NOWARN,
913 				   huge_page_order(h));
914 	else
915 		page = alloc_pages_exact_node(nid,
916 			htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
917 			__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
918 
919 	if (page && arch_prepare_hugepage(page)) {
920 		__free_pages(page, huge_page_order(h));
921 		page = NULL;
922 	}
923 
924 	spin_lock(&hugetlb_lock);
925 	if (page) {
926 		r_nid = page_to_nid(page);
927 		set_compound_page_dtor(page, free_huge_page);
928 		/*
929 		 * We incremented the global counters already
930 		 */
931 		h->nr_huge_pages_node[r_nid]++;
932 		h->surplus_huge_pages_node[r_nid]++;
933 		__count_vm_event(HTLB_BUDDY_PGALLOC);
934 	} else {
935 		h->nr_huge_pages--;
936 		h->surplus_huge_pages--;
937 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
938 	}
939 	spin_unlock(&hugetlb_lock);
940 
941 	return page;
942 }
943 
944 /*
945  * This allocation function is useful in the context where vma is irrelevant.
946  * E.g. soft-offlining uses this function because it only cares physical
947  * address of error page.
948  */
alloc_huge_page_node(struct hstate * h,int nid)949 struct page *alloc_huge_page_node(struct hstate *h, int nid)
950 {
951 	struct page *page;
952 
953 	spin_lock(&hugetlb_lock);
954 	page = dequeue_huge_page_node(h, nid);
955 	spin_unlock(&hugetlb_lock);
956 
957 	if (!page)
958 		page = alloc_buddy_huge_page(h, nid);
959 
960 	return page;
961 }
962 
963 /*
964  * Increase the hugetlb pool such that it can accommodate a reservation
965  * of size 'delta'.
966  */
gather_surplus_pages(struct hstate * h,int delta)967 static int gather_surplus_pages(struct hstate *h, int delta)
968 {
969 	struct list_head surplus_list;
970 	struct page *page, *tmp;
971 	int ret, i;
972 	int needed, allocated;
973 	bool alloc_ok = true;
974 
975 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
976 	if (needed <= 0) {
977 		h->resv_huge_pages += delta;
978 		return 0;
979 	}
980 
981 	allocated = 0;
982 	INIT_LIST_HEAD(&surplus_list);
983 
984 	ret = -ENOMEM;
985 retry:
986 	spin_unlock(&hugetlb_lock);
987 	for (i = 0; i < needed; i++) {
988 		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
989 		if (!page) {
990 			alloc_ok = false;
991 			break;
992 		}
993 		list_add(&page->lru, &surplus_list);
994 	}
995 	allocated += i;
996 
997 	/*
998 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
999 	 * because either resv_huge_pages or free_huge_pages may have changed.
1000 	 */
1001 	spin_lock(&hugetlb_lock);
1002 	needed = (h->resv_huge_pages + delta) -
1003 			(h->free_huge_pages + allocated);
1004 	if (needed > 0) {
1005 		if (alloc_ok)
1006 			goto retry;
1007 		/*
1008 		 * We were not able to allocate enough pages to
1009 		 * satisfy the entire reservation so we free what
1010 		 * we've allocated so far.
1011 		 */
1012 		goto free;
1013 	}
1014 	/*
1015 	 * The surplus_list now contains _at_least_ the number of extra pages
1016 	 * needed to accommodate the reservation.  Add the appropriate number
1017 	 * of pages to the hugetlb pool and free the extras back to the buddy
1018 	 * allocator.  Commit the entire reservation here to prevent another
1019 	 * process from stealing the pages as they are added to the pool but
1020 	 * before they are reserved.
1021 	 */
1022 	needed += allocated;
1023 	h->resv_huge_pages += delta;
1024 	ret = 0;
1025 
1026 	/* Free the needed pages to the hugetlb pool */
1027 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1028 		if ((--needed) < 0)
1029 			break;
1030 		list_del(&page->lru);
1031 		/*
1032 		 * This page is now managed by the hugetlb allocator and has
1033 		 * no users -- drop the buddy allocator's reference.
1034 		 */
1035 		put_page_testzero(page);
1036 		VM_BUG_ON(page_count(page));
1037 		enqueue_huge_page(h, page);
1038 	}
1039 free:
1040 	spin_unlock(&hugetlb_lock);
1041 
1042 	/* Free unnecessary surplus pages to the buddy allocator */
1043 	if (!list_empty(&surplus_list)) {
1044 		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1045 			list_del(&page->lru);
1046 			put_page(page);
1047 		}
1048 	}
1049 	spin_lock(&hugetlb_lock);
1050 
1051 	return ret;
1052 }
1053 
1054 /*
1055  * When releasing a hugetlb pool reservation, any surplus pages that were
1056  * allocated to satisfy the reservation must be explicitly freed if they were
1057  * never used.
1058  * Called with hugetlb_lock held.
1059  */
return_unused_surplus_pages(struct hstate * h,unsigned long unused_resv_pages)1060 static void return_unused_surplus_pages(struct hstate *h,
1061 					unsigned long unused_resv_pages)
1062 {
1063 	unsigned long nr_pages;
1064 
1065 	/* Uncommit the reservation */
1066 	h->resv_huge_pages -= unused_resv_pages;
1067 
1068 	/* Cannot return gigantic pages currently */
1069 	if (h->order >= MAX_ORDER)
1070 		return;
1071 
1072 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1073 
1074 	/*
1075 	 * We want to release as many surplus pages as possible, spread
1076 	 * evenly across all nodes with memory. Iterate across these nodes
1077 	 * until we can no longer free unreserved surplus pages. This occurs
1078 	 * when the nodes with surplus pages have no free pages.
1079 	 * free_pool_huge_page() will balance the the freed pages across the
1080 	 * on-line nodes with memory and will handle the hstate accounting.
1081 	 */
1082 	while (nr_pages--) {
1083 		if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
1084 			break;
1085 		cond_resched_lock(&hugetlb_lock);
1086 	}
1087 }
1088 
1089 /*
1090  * Determine if the huge page at addr within the vma has an associated
1091  * reservation.  Where it does not we will need to logically increase
1092  * reservation and actually increase subpool usage before an allocation
1093  * can occur.  Where any new reservation would be required the
1094  * reservation change is prepared, but not committed.  Once the page
1095  * has been allocated from the subpool and instantiated the change should
1096  * be committed via vma_commit_reservation.  No action is required on
1097  * failure.
1098  */
vma_needs_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)1099 static long vma_needs_reservation(struct hstate *h,
1100 			struct vm_area_struct *vma, unsigned long addr)
1101 {
1102 	struct address_space *mapping = vma->vm_file->f_mapping;
1103 	struct inode *inode = mapping->host;
1104 
1105 	if (vma->vm_flags & VM_MAYSHARE) {
1106 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1107 		return region_chg(&inode->i_mapping->private_list,
1108 							idx, idx + 1);
1109 
1110 	} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1111 		return 1;
1112 
1113 	} else  {
1114 		long err;
1115 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1116 		struct resv_map *reservations = vma_resv_map(vma);
1117 
1118 		err = region_chg(&reservations->regions, idx, idx + 1);
1119 		if (err < 0)
1120 			return err;
1121 		return 0;
1122 	}
1123 }
vma_commit_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)1124 static void vma_commit_reservation(struct hstate *h,
1125 			struct vm_area_struct *vma, unsigned long addr)
1126 {
1127 	struct address_space *mapping = vma->vm_file->f_mapping;
1128 	struct inode *inode = mapping->host;
1129 
1130 	if (vma->vm_flags & VM_MAYSHARE) {
1131 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1132 		region_add(&inode->i_mapping->private_list, idx, idx + 1);
1133 
1134 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1135 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1136 		struct resv_map *reservations = vma_resv_map(vma);
1137 
1138 		/* Mark this page used in the map. */
1139 		region_add(&reservations->regions, idx, idx + 1);
1140 	}
1141 }
1142 
alloc_huge_page(struct vm_area_struct * vma,unsigned long addr,int avoid_reserve)1143 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1144 				    unsigned long addr, int avoid_reserve)
1145 {
1146 	struct hugepage_subpool *spool = subpool_vma(vma);
1147 	struct hstate *h = hstate_vma(vma);
1148 	struct page *page;
1149 	long chg;
1150 
1151 	/*
1152 	 * Processes that did not create the mapping will have no
1153 	 * reserves and will not have accounted against subpool
1154 	 * limit. Check that the subpool limit can be made before
1155 	 * satisfying the allocation MAP_NORESERVE mappings may also
1156 	 * need pages and subpool limit allocated allocated if no reserve
1157 	 * mapping overlaps.
1158 	 */
1159 	chg = vma_needs_reservation(h, vma, addr);
1160 	if (chg < 0)
1161 		return ERR_PTR(-VM_FAULT_OOM);
1162 	if (chg)
1163 		if (hugepage_subpool_get_pages(spool, chg))
1164 			return ERR_PTR(-VM_FAULT_SIGBUS);
1165 
1166 	spin_lock(&hugetlb_lock);
1167 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1168 	spin_unlock(&hugetlb_lock);
1169 
1170 	if (!page) {
1171 		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1172 		if (!page) {
1173 			hugepage_subpool_put_pages(spool, chg);
1174 			return ERR_PTR(-VM_FAULT_SIGBUS);
1175 		}
1176 	}
1177 
1178 	set_page_private(page, (unsigned long)spool);
1179 
1180 	vma_commit_reservation(h, vma, addr);
1181 
1182 	return page;
1183 }
1184 
alloc_bootmem_huge_page(struct hstate * h)1185 int __weak alloc_bootmem_huge_page(struct hstate *h)
1186 {
1187 	struct huge_bootmem_page *m;
1188 	int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1189 
1190 	while (nr_nodes) {
1191 		void *addr;
1192 
1193 		addr = __alloc_bootmem_node_nopanic(
1194 				NODE_DATA(hstate_next_node_to_alloc(h,
1195 						&node_states[N_HIGH_MEMORY])),
1196 				huge_page_size(h), huge_page_size(h), 0);
1197 
1198 		if (addr) {
1199 			/*
1200 			 * Use the beginning of the huge page to store the
1201 			 * huge_bootmem_page struct (until gather_bootmem
1202 			 * puts them into the mem_map).
1203 			 */
1204 			m = addr;
1205 			goto found;
1206 		}
1207 		nr_nodes--;
1208 	}
1209 	return 0;
1210 
1211 found:
1212 	BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1213 	/* Put them into a private list first because mem_map is not up yet */
1214 	list_add(&m->list, &huge_boot_pages);
1215 	m->hstate = h;
1216 	return 1;
1217 }
1218 
prep_compound_huge_page(struct page * page,int order)1219 static void prep_compound_huge_page(struct page *page, int order)
1220 {
1221 	if (unlikely(order > (MAX_ORDER - 1)))
1222 		prep_compound_gigantic_page(page, order);
1223 	else
1224 		prep_compound_page(page, order);
1225 }
1226 
1227 /* Put bootmem huge pages into the standard lists after mem_map is up */
gather_bootmem_prealloc(void)1228 static void __init gather_bootmem_prealloc(void)
1229 {
1230 	struct huge_bootmem_page *m;
1231 
1232 	list_for_each_entry(m, &huge_boot_pages, list) {
1233 		struct hstate *h = m->hstate;
1234 		struct page *page;
1235 
1236 #ifdef CONFIG_HIGHMEM
1237 		page = pfn_to_page(m->phys >> PAGE_SHIFT);
1238 		free_bootmem_late((unsigned long)m,
1239 				  sizeof(struct huge_bootmem_page));
1240 #else
1241 		page = virt_to_page(m);
1242 #endif
1243 		__ClearPageReserved(page);
1244 		WARN_ON(page_count(page) != 1);
1245 		prep_compound_huge_page(page, h->order);
1246 		prep_new_huge_page(h, page, page_to_nid(page));
1247 		/*
1248 		 * If we had gigantic hugepages allocated at boot time, we need
1249 		 * to restore the 'stolen' pages to totalram_pages in order to
1250 		 * fix confusing memory reports from free(1) and another
1251 		 * side-effects, like CommitLimit going negative.
1252 		 */
1253 		if (h->order > (MAX_ORDER - 1))
1254 			totalram_pages += 1 << h->order;
1255 	}
1256 }
1257 
hugetlb_hstate_alloc_pages(struct hstate * h)1258 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1259 {
1260 	unsigned long i;
1261 
1262 	for (i = 0; i < h->max_huge_pages; ++i) {
1263 		if (h->order >= MAX_ORDER) {
1264 			if (!alloc_bootmem_huge_page(h))
1265 				break;
1266 		} else if (!alloc_fresh_huge_page(h,
1267 					 &node_states[N_HIGH_MEMORY]))
1268 			break;
1269 	}
1270 	h->max_huge_pages = i;
1271 }
1272 
hugetlb_init_hstates(void)1273 static void __init hugetlb_init_hstates(void)
1274 {
1275 	struct hstate *h;
1276 
1277 	for_each_hstate(h) {
1278 		/* oversize hugepages were init'ed in early boot */
1279 		if (h->order < MAX_ORDER)
1280 			hugetlb_hstate_alloc_pages(h);
1281 	}
1282 }
1283 
memfmt(char * buf,unsigned long n)1284 static char * __init memfmt(char *buf, unsigned long n)
1285 {
1286 	if (n >= (1UL << 30))
1287 		sprintf(buf, "%lu GB", n >> 30);
1288 	else if (n >= (1UL << 20))
1289 		sprintf(buf, "%lu MB", n >> 20);
1290 	else
1291 		sprintf(buf, "%lu KB", n >> 10);
1292 	return buf;
1293 }
1294 
report_hugepages(void)1295 static void __init report_hugepages(void)
1296 {
1297 	struct hstate *h;
1298 
1299 	for_each_hstate(h) {
1300 		char buf[32];
1301 		printk(KERN_INFO "HugeTLB registered %s page size, "
1302 				 "pre-allocated %ld pages\n",
1303 			memfmt(buf, huge_page_size(h)),
1304 			h->free_huge_pages);
1305 	}
1306 }
1307 
1308 #ifdef CONFIG_HIGHMEM
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)1309 static void try_to_free_low(struct hstate *h, unsigned long count,
1310 						nodemask_t *nodes_allowed)
1311 {
1312 	int i;
1313 
1314 	if (h->order >= MAX_ORDER)
1315 		return;
1316 
1317 	for_each_node_mask(i, *nodes_allowed) {
1318 		struct page *page, *next;
1319 		struct list_head *freel = &h->hugepage_freelists[i];
1320 		list_for_each_entry_safe(page, next, freel, lru) {
1321 			if (count >= h->nr_huge_pages)
1322 				return;
1323 			if (PageHighMem(page))
1324 				continue;
1325 			list_del(&page->lru);
1326 			update_and_free_page(h, page);
1327 			h->free_huge_pages--;
1328 			h->free_huge_pages_node[page_to_nid(page)]--;
1329 		}
1330 	}
1331 }
1332 #else
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)1333 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1334 						nodemask_t *nodes_allowed)
1335 {
1336 }
1337 #endif
1338 
1339 /*
1340  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1341  * balanced by operating on them in a round-robin fashion.
1342  * Returns 1 if an adjustment was made.
1343  */
adjust_pool_surplus(struct hstate * h,nodemask_t * nodes_allowed,int delta)1344 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1345 				int delta)
1346 {
1347 	int start_nid, next_nid;
1348 	int ret = 0;
1349 
1350 	VM_BUG_ON(delta != -1 && delta != 1);
1351 
1352 	if (delta < 0)
1353 		start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1354 	else
1355 		start_nid = hstate_next_node_to_free(h, nodes_allowed);
1356 	next_nid = start_nid;
1357 
1358 	do {
1359 		int nid = next_nid;
1360 		if (delta < 0)  {
1361 			/*
1362 			 * To shrink on this node, there must be a surplus page
1363 			 */
1364 			if (!h->surplus_huge_pages_node[nid]) {
1365 				next_nid = hstate_next_node_to_alloc(h,
1366 								nodes_allowed);
1367 				continue;
1368 			}
1369 		}
1370 		if (delta > 0) {
1371 			/*
1372 			 * Surplus cannot exceed the total number of pages
1373 			 */
1374 			if (h->surplus_huge_pages_node[nid] >=
1375 						h->nr_huge_pages_node[nid]) {
1376 				next_nid = hstate_next_node_to_free(h,
1377 								nodes_allowed);
1378 				continue;
1379 			}
1380 		}
1381 
1382 		h->surplus_huge_pages += delta;
1383 		h->surplus_huge_pages_node[nid] += delta;
1384 		ret = 1;
1385 		break;
1386 	} while (next_nid != start_nid);
1387 
1388 	return ret;
1389 }
1390 
1391 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
set_max_huge_pages(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)1392 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1393 						nodemask_t *nodes_allowed)
1394 {
1395 	unsigned long min_count, ret;
1396 
1397 	if (h->order >= MAX_ORDER)
1398 		return h->max_huge_pages;
1399 
1400 	/*
1401 	 * Increase the pool size
1402 	 * First take pages out of surplus state.  Then make up the
1403 	 * remaining difference by allocating fresh huge pages.
1404 	 *
1405 	 * We might race with alloc_buddy_huge_page() here and be unable
1406 	 * to convert a surplus huge page to a normal huge page. That is
1407 	 * not critical, though, it just means the overall size of the
1408 	 * pool might be one hugepage larger than it needs to be, but
1409 	 * within all the constraints specified by the sysctls.
1410 	 */
1411 	spin_lock(&hugetlb_lock);
1412 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1413 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
1414 			break;
1415 	}
1416 
1417 	while (count > persistent_huge_pages(h)) {
1418 		/*
1419 		 * If this allocation races such that we no longer need the
1420 		 * page, free_huge_page will handle it by freeing the page
1421 		 * and reducing the surplus.
1422 		 */
1423 		spin_unlock(&hugetlb_lock);
1424 		ret = alloc_fresh_huge_page(h, nodes_allowed);
1425 		spin_lock(&hugetlb_lock);
1426 		if (!ret)
1427 			goto out;
1428 
1429 		/* Bail for signals. Probably ctrl-c from user */
1430 		if (signal_pending(current))
1431 			goto out;
1432 	}
1433 
1434 	/*
1435 	 * Decrease the pool size
1436 	 * First return free pages to the buddy allocator (being careful
1437 	 * to keep enough around to satisfy reservations).  Then place
1438 	 * pages into surplus state as needed so the pool will shrink
1439 	 * to the desired size as pages become free.
1440 	 *
1441 	 * By placing pages into the surplus state independent of the
1442 	 * overcommit value, we are allowing the surplus pool size to
1443 	 * exceed overcommit. There are few sane options here. Since
1444 	 * alloc_buddy_huge_page() is checking the global counter,
1445 	 * though, we'll note that we're not allowed to exceed surplus
1446 	 * and won't grow the pool anywhere else. Not until one of the
1447 	 * sysctls are changed, or the surplus pages go out of use.
1448 	 */
1449 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1450 	min_count = max(count, min_count);
1451 	try_to_free_low(h, min_count, nodes_allowed);
1452 	while (min_count < persistent_huge_pages(h)) {
1453 		if (!free_pool_huge_page(h, nodes_allowed, 0))
1454 			break;
1455 		cond_resched_lock(&hugetlb_lock);
1456 	}
1457 	while (count < persistent_huge_pages(h)) {
1458 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
1459 			break;
1460 	}
1461 out:
1462 	ret = persistent_huge_pages(h);
1463 	spin_unlock(&hugetlb_lock);
1464 	return ret;
1465 }
1466 
1467 #define HSTATE_ATTR_RO(_name) \
1468 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1469 
1470 #define HSTATE_ATTR(_name) \
1471 	static struct kobj_attribute _name##_attr = \
1472 		__ATTR(_name, 0644, _name##_show, _name##_store)
1473 
1474 static struct kobject *hugepages_kobj;
1475 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1476 
1477 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1478 
kobj_to_hstate(struct kobject * kobj,int * nidp)1479 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1480 {
1481 	int i;
1482 
1483 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
1484 		if (hstate_kobjs[i] == kobj) {
1485 			if (nidp)
1486 				*nidp = NUMA_NO_NODE;
1487 			return &hstates[i];
1488 		}
1489 
1490 	return kobj_to_node_hstate(kobj, nidp);
1491 }
1492 
nr_hugepages_show_common(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1493 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1494 					struct kobj_attribute *attr, char *buf)
1495 {
1496 	struct hstate *h;
1497 	unsigned long nr_huge_pages;
1498 	int nid;
1499 
1500 	h = kobj_to_hstate(kobj, &nid);
1501 	if (nid == NUMA_NO_NODE)
1502 		nr_huge_pages = h->nr_huge_pages;
1503 	else
1504 		nr_huge_pages = h->nr_huge_pages_node[nid];
1505 
1506 	return sprintf(buf, "%lu\n", nr_huge_pages);
1507 }
1508 
nr_hugepages_store_common(bool obey_mempolicy,struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)1509 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1510 			struct kobject *kobj, struct kobj_attribute *attr,
1511 			const char *buf, size_t len)
1512 {
1513 	int err;
1514 	int nid;
1515 	unsigned long count;
1516 	struct hstate *h;
1517 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1518 
1519 	err = strict_strtoul(buf, 10, &count);
1520 	if (err)
1521 		goto out;
1522 
1523 	h = kobj_to_hstate(kobj, &nid);
1524 	if (h->order >= MAX_ORDER) {
1525 		err = -EINVAL;
1526 		goto out;
1527 	}
1528 
1529 	if (nid == NUMA_NO_NODE) {
1530 		/*
1531 		 * global hstate attribute
1532 		 */
1533 		if (!(obey_mempolicy &&
1534 				init_nodemask_of_mempolicy(nodes_allowed))) {
1535 			NODEMASK_FREE(nodes_allowed);
1536 			nodes_allowed = &node_states[N_HIGH_MEMORY];
1537 		}
1538 	} else if (nodes_allowed) {
1539 		/*
1540 		 * per node hstate attribute: adjust count to global,
1541 		 * but restrict alloc/free to the specified node.
1542 		 */
1543 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1544 		init_nodemask_of_node(nodes_allowed, nid);
1545 	} else
1546 		nodes_allowed = &node_states[N_HIGH_MEMORY];
1547 
1548 	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1549 
1550 	if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1551 		NODEMASK_FREE(nodes_allowed);
1552 
1553 	return len;
1554 out:
1555 	NODEMASK_FREE(nodes_allowed);
1556 	return err;
1557 }
1558 
nr_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1559 static ssize_t nr_hugepages_show(struct kobject *kobj,
1560 				       struct kobj_attribute *attr, char *buf)
1561 {
1562 	return nr_hugepages_show_common(kobj, attr, buf);
1563 }
1564 
nr_hugepages_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)1565 static ssize_t nr_hugepages_store(struct kobject *kobj,
1566 	       struct kobj_attribute *attr, const char *buf, size_t len)
1567 {
1568 	return nr_hugepages_store_common(false, kobj, attr, buf, len);
1569 }
1570 HSTATE_ATTR(nr_hugepages);
1571 
1572 #ifdef CONFIG_NUMA
1573 
1574 /*
1575  * hstate attribute for optionally mempolicy-based constraint on persistent
1576  * huge page alloc/free.
1577  */
nr_hugepages_mempolicy_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1578 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1579 				       struct kobj_attribute *attr, char *buf)
1580 {
1581 	return nr_hugepages_show_common(kobj, attr, buf);
1582 }
1583 
nr_hugepages_mempolicy_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)1584 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1585 	       struct kobj_attribute *attr, const char *buf, size_t len)
1586 {
1587 	return nr_hugepages_store_common(true, kobj, attr, buf, len);
1588 }
1589 HSTATE_ATTR(nr_hugepages_mempolicy);
1590 #endif
1591 
1592 
nr_overcommit_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1593 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1594 					struct kobj_attribute *attr, char *buf)
1595 {
1596 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1597 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1598 }
1599 
nr_overcommit_hugepages_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1600 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1601 		struct kobj_attribute *attr, const char *buf, size_t count)
1602 {
1603 	int err;
1604 	unsigned long input;
1605 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1606 
1607 	if (h->order >= MAX_ORDER)
1608 		return -EINVAL;
1609 
1610 	err = strict_strtoul(buf, 10, &input);
1611 	if (err)
1612 		return err;
1613 
1614 	spin_lock(&hugetlb_lock);
1615 	h->nr_overcommit_huge_pages = input;
1616 	spin_unlock(&hugetlb_lock);
1617 
1618 	return count;
1619 }
1620 HSTATE_ATTR(nr_overcommit_hugepages);
1621 
free_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1622 static ssize_t free_hugepages_show(struct kobject *kobj,
1623 					struct kobj_attribute *attr, char *buf)
1624 {
1625 	struct hstate *h;
1626 	unsigned long free_huge_pages;
1627 	int nid;
1628 
1629 	h = kobj_to_hstate(kobj, &nid);
1630 	if (nid == NUMA_NO_NODE)
1631 		free_huge_pages = h->free_huge_pages;
1632 	else
1633 		free_huge_pages = h->free_huge_pages_node[nid];
1634 
1635 	return sprintf(buf, "%lu\n", free_huge_pages);
1636 }
1637 HSTATE_ATTR_RO(free_hugepages);
1638 
resv_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1639 static ssize_t resv_hugepages_show(struct kobject *kobj,
1640 					struct kobj_attribute *attr, char *buf)
1641 {
1642 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1643 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
1644 }
1645 HSTATE_ATTR_RO(resv_hugepages);
1646 
surplus_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1647 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1648 					struct kobj_attribute *attr, char *buf)
1649 {
1650 	struct hstate *h;
1651 	unsigned long surplus_huge_pages;
1652 	int nid;
1653 
1654 	h = kobj_to_hstate(kobj, &nid);
1655 	if (nid == NUMA_NO_NODE)
1656 		surplus_huge_pages = h->surplus_huge_pages;
1657 	else
1658 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
1659 
1660 	return sprintf(buf, "%lu\n", surplus_huge_pages);
1661 }
1662 HSTATE_ATTR_RO(surplus_hugepages);
1663 
1664 static struct attribute *hstate_attrs[] = {
1665 	&nr_hugepages_attr.attr,
1666 	&nr_overcommit_hugepages_attr.attr,
1667 	&free_hugepages_attr.attr,
1668 	&resv_hugepages_attr.attr,
1669 	&surplus_hugepages_attr.attr,
1670 #ifdef CONFIG_NUMA
1671 	&nr_hugepages_mempolicy_attr.attr,
1672 #endif
1673 	NULL,
1674 };
1675 
1676 static struct attribute_group hstate_attr_group = {
1677 	.attrs = hstate_attrs,
1678 };
1679 
hugetlb_sysfs_add_hstate(struct hstate * h,struct kobject * parent,struct kobject ** hstate_kobjs,struct attribute_group * hstate_attr_group)1680 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1681 				    struct kobject **hstate_kobjs,
1682 				    struct attribute_group *hstate_attr_group)
1683 {
1684 	int retval;
1685 	int hi = h - hstates;
1686 
1687 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1688 	if (!hstate_kobjs[hi])
1689 		return -ENOMEM;
1690 
1691 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1692 	if (retval)
1693 		kobject_put(hstate_kobjs[hi]);
1694 
1695 	return retval;
1696 }
1697 
hugetlb_sysfs_init(void)1698 static void __init hugetlb_sysfs_init(void)
1699 {
1700 	struct hstate *h;
1701 	int err;
1702 
1703 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1704 	if (!hugepages_kobj)
1705 		return;
1706 
1707 	for_each_hstate(h) {
1708 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1709 					 hstate_kobjs, &hstate_attr_group);
1710 		if (err)
1711 			printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1712 								h->name);
1713 	}
1714 }
1715 
1716 #ifdef CONFIG_NUMA
1717 
1718 /*
1719  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1720  * with node devices in node_devices[] using a parallel array.  The array
1721  * index of a node device or _hstate == node id.
1722  * This is here to avoid any static dependency of the node device driver, in
1723  * the base kernel, on the hugetlb module.
1724  */
1725 struct node_hstate {
1726 	struct kobject		*hugepages_kobj;
1727 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
1728 };
1729 struct node_hstate node_hstates[MAX_NUMNODES];
1730 
1731 /*
1732  * A subset of global hstate attributes for node devices
1733  */
1734 static struct attribute *per_node_hstate_attrs[] = {
1735 	&nr_hugepages_attr.attr,
1736 	&free_hugepages_attr.attr,
1737 	&surplus_hugepages_attr.attr,
1738 	NULL,
1739 };
1740 
1741 static struct attribute_group per_node_hstate_attr_group = {
1742 	.attrs = per_node_hstate_attrs,
1743 };
1744 
1745 /*
1746  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
1747  * Returns node id via non-NULL nidp.
1748  */
kobj_to_node_hstate(struct kobject * kobj,int * nidp)1749 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1750 {
1751 	int nid;
1752 
1753 	for (nid = 0; nid < nr_node_ids; nid++) {
1754 		struct node_hstate *nhs = &node_hstates[nid];
1755 		int i;
1756 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
1757 			if (nhs->hstate_kobjs[i] == kobj) {
1758 				if (nidp)
1759 					*nidp = nid;
1760 				return &hstates[i];
1761 			}
1762 	}
1763 
1764 	BUG();
1765 	return NULL;
1766 }
1767 
1768 /*
1769  * Unregister hstate attributes from a single node device.
1770  * No-op if no hstate attributes attached.
1771  */
hugetlb_unregister_node(struct node * node)1772 void hugetlb_unregister_node(struct node *node)
1773 {
1774 	struct hstate *h;
1775 	struct node_hstate *nhs = &node_hstates[node->dev.id];
1776 
1777 	if (!nhs->hugepages_kobj)
1778 		return;		/* no hstate attributes */
1779 
1780 	for_each_hstate(h)
1781 		if (nhs->hstate_kobjs[h - hstates]) {
1782 			kobject_put(nhs->hstate_kobjs[h - hstates]);
1783 			nhs->hstate_kobjs[h - hstates] = NULL;
1784 		}
1785 
1786 	kobject_put(nhs->hugepages_kobj);
1787 	nhs->hugepages_kobj = NULL;
1788 }
1789 
1790 /*
1791  * hugetlb module exit:  unregister hstate attributes from node devices
1792  * that have them.
1793  */
hugetlb_unregister_all_nodes(void)1794 static void hugetlb_unregister_all_nodes(void)
1795 {
1796 	int nid;
1797 
1798 	/*
1799 	 * disable node device registrations.
1800 	 */
1801 	register_hugetlbfs_with_node(NULL, NULL);
1802 
1803 	/*
1804 	 * remove hstate attributes from any nodes that have them.
1805 	 */
1806 	for (nid = 0; nid < nr_node_ids; nid++)
1807 		hugetlb_unregister_node(&node_devices[nid]);
1808 }
1809 
1810 /*
1811  * Register hstate attributes for a single node device.
1812  * No-op if attributes already registered.
1813  */
hugetlb_register_node(struct node * node)1814 void hugetlb_register_node(struct node *node)
1815 {
1816 	struct hstate *h;
1817 	struct node_hstate *nhs = &node_hstates[node->dev.id];
1818 	int err;
1819 
1820 	if (nhs->hugepages_kobj)
1821 		return;		/* already allocated */
1822 
1823 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1824 							&node->dev.kobj);
1825 	if (!nhs->hugepages_kobj)
1826 		return;
1827 
1828 	for_each_hstate(h) {
1829 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1830 						nhs->hstate_kobjs,
1831 						&per_node_hstate_attr_group);
1832 		if (err) {
1833 			printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1834 					" for node %d\n",
1835 						h->name, node->dev.id);
1836 			hugetlb_unregister_node(node);
1837 			break;
1838 		}
1839 	}
1840 }
1841 
1842 /*
1843  * hugetlb init time:  register hstate attributes for all registered node
1844  * devices of nodes that have memory.  All on-line nodes should have
1845  * registered their associated device by this time.
1846  */
hugetlb_register_all_nodes(void)1847 static void hugetlb_register_all_nodes(void)
1848 {
1849 	int nid;
1850 
1851 	for_each_node_state(nid, N_HIGH_MEMORY) {
1852 		struct node *node = &node_devices[nid];
1853 		if (node->dev.id == nid)
1854 			hugetlb_register_node(node);
1855 	}
1856 
1857 	/*
1858 	 * Let the node device driver know we're here so it can
1859 	 * [un]register hstate attributes on node hotplug.
1860 	 */
1861 	register_hugetlbfs_with_node(hugetlb_register_node,
1862 				     hugetlb_unregister_node);
1863 }
1864 #else	/* !CONFIG_NUMA */
1865 
kobj_to_node_hstate(struct kobject * kobj,int * nidp)1866 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1867 {
1868 	BUG();
1869 	if (nidp)
1870 		*nidp = -1;
1871 	return NULL;
1872 }
1873 
hugetlb_unregister_all_nodes(void)1874 static void hugetlb_unregister_all_nodes(void) { }
1875 
hugetlb_register_all_nodes(void)1876 static void hugetlb_register_all_nodes(void) { }
1877 
1878 #endif
1879 
hugetlb_exit(void)1880 static void __exit hugetlb_exit(void)
1881 {
1882 	struct hstate *h;
1883 
1884 	hugetlb_unregister_all_nodes();
1885 
1886 	for_each_hstate(h) {
1887 		kobject_put(hstate_kobjs[h - hstates]);
1888 	}
1889 
1890 	kobject_put(hugepages_kobj);
1891 }
1892 module_exit(hugetlb_exit);
1893 
hugetlb_init(void)1894 static int __init hugetlb_init(void)
1895 {
1896 	/* Some platform decide whether they support huge pages at boot
1897 	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1898 	 * there is no such support
1899 	 */
1900 	if (HPAGE_SHIFT == 0)
1901 		return 0;
1902 
1903 	if (!size_to_hstate(default_hstate_size)) {
1904 		default_hstate_size = HPAGE_SIZE;
1905 		if (!size_to_hstate(default_hstate_size))
1906 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1907 	}
1908 	default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1909 	if (default_hstate_max_huge_pages)
1910 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1911 
1912 	hugetlb_init_hstates();
1913 
1914 	gather_bootmem_prealloc();
1915 
1916 	report_hugepages();
1917 
1918 	hugetlb_sysfs_init();
1919 
1920 	hugetlb_register_all_nodes();
1921 
1922 	return 0;
1923 }
1924 module_init(hugetlb_init);
1925 
1926 /* Should be called on processing a hugepagesz=... option */
hugetlb_add_hstate(unsigned order)1927 void __init hugetlb_add_hstate(unsigned order)
1928 {
1929 	struct hstate *h;
1930 	unsigned long i;
1931 
1932 	if (size_to_hstate(PAGE_SIZE << order)) {
1933 		printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1934 		return;
1935 	}
1936 	BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1937 	BUG_ON(order == 0);
1938 	h = &hstates[max_hstate++];
1939 	h->order = order;
1940 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1941 	h->nr_huge_pages = 0;
1942 	h->free_huge_pages = 0;
1943 	for (i = 0; i < MAX_NUMNODES; ++i)
1944 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1945 	h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1946 	h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1947 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1948 					huge_page_size(h)/1024);
1949 
1950 	parsed_hstate = h;
1951 }
1952 
hugetlb_nrpages_setup(char * s)1953 static int __init hugetlb_nrpages_setup(char *s)
1954 {
1955 	unsigned long *mhp;
1956 	static unsigned long *last_mhp;
1957 
1958 	/*
1959 	 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1960 	 * so this hugepages= parameter goes to the "default hstate".
1961 	 */
1962 	if (!max_hstate)
1963 		mhp = &default_hstate_max_huge_pages;
1964 	else
1965 		mhp = &parsed_hstate->max_huge_pages;
1966 
1967 	if (mhp == last_mhp) {
1968 		printk(KERN_WARNING "hugepages= specified twice without "
1969 			"interleaving hugepagesz=, ignoring\n");
1970 		return 1;
1971 	}
1972 
1973 	if (sscanf(s, "%lu", mhp) <= 0)
1974 		*mhp = 0;
1975 
1976 	/*
1977 	 * Global state is always initialized later in hugetlb_init.
1978 	 * But we need to allocate >= MAX_ORDER hstates here early to still
1979 	 * use the bootmem allocator.
1980 	 */
1981 	if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1982 		hugetlb_hstate_alloc_pages(parsed_hstate);
1983 
1984 	last_mhp = mhp;
1985 
1986 	return 1;
1987 }
1988 __setup("hugepages=", hugetlb_nrpages_setup);
1989 
hugetlb_default_setup(char * s)1990 static int __init hugetlb_default_setup(char *s)
1991 {
1992 	default_hstate_size = memparse(s, &s);
1993 	return 1;
1994 }
1995 __setup("default_hugepagesz=", hugetlb_default_setup);
1996 
cpuset_mems_nr(unsigned int * array)1997 static unsigned int cpuset_mems_nr(unsigned int *array)
1998 {
1999 	int node;
2000 	unsigned int nr = 0;
2001 
2002 	for_each_node_mask(node, cpuset_current_mems_allowed)
2003 		nr += array[node];
2004 
2005 	return nr;
2006 }
2007 
2008 #ifdef CONFIG_SYSCTL
hugetlb_sysctl_handler_common(bool obey_mempolicy,struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)2009 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2010 			 struct ctl_table *table, int write,
2011 			 void __user *buffer, size_t *length, loff_t *ppos)
2012 {
2013 	struct hstate *h = &default_hstate;
2014 	unsigned long tmp;
2015 	int ret;
2016 
2017 	tmp = h->max_huge_pages;
2018 
2019 	if (write && h->order >= MAX_ORDER)
2020 		return -EINVAL;
2021 
2022 	table->data = &tmp;
2023 	table->maxlen = sizeof(unsigned long);
2024 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2025 	if (ret)
2026 		goto out;
2027 
2028 	if (write) {
2029 		NODEMASK_ALLOC(nodemask_t, nodes_allowed,
2030 						GFP_KERNEL | __GFP_NORETRY);
2031 		if (!(obey_mempolicy &&
2032 			       init_nodemask_of_mempolicy(nodes_allowed))) {
2033 			NODEMASK_FREE(nodes_allowed);
2034 			nodes_allowed = &node_states[N_HIGH_MEMORY];
2035 		}
2036 		h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2037 
2038 		if (nodes_allowed != &node_states[N_HIGH_MEMORY])
2039 			NODEMASK_FREE(nodes_allowed);
2040 	}
2041 out:
2042 	return ret;
2043 }
2044 
hugetlb_sysctl_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)2045 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2046 			  void __user *buffer, size_t *length, loff_t *ppos)
2047 {
2048 
2049 	return hugetlb_sysctl_handler_common(false, table, write,
2050 							buffer, length, ppos);
2051 }
2052 
2053 #ifdef CONFIG_NUMA
hugetlb_mempolicy_sysctl_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)2054 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2055 			  void __user *buffer, size_t *length, loff_t *ppos)
2056 {
2057 	return hugetlb_sysctl_handler_common(true, table, write,
2058 							buffer, length, ppos);
2059 }
2060 #endif /* CONFIG_NUMA */
2061 
hugetlb_treat_movable_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)2062 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
2063 			void __user *buffer,
2064 			size_t *length, loff_t *ppos)
2065 {
2066 	proc_dointvec(table, write, buffer, length, ppos);
2067 	if (hugepages_treat_as_movable)
2068 		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
2069 	else
2070 		htlb_alloc_mask = GFP_HIGHUSER;
2071 	return 0;
2072 }
2073 
hugetlb_overcommit_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)2074 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2075 			void __user *buffer,
2076 			size_t *length, loff_t *ppos)
2077 {
2078 	struct hstate *h = &default_hstate;
2079 	unsigned long tmp;
2080 	int ret;
2081 
2082 	tmp = h->nr_overcommit_huge_pages;
2083 
2084 	if (write && h->order >= MAX_ORDER)
2085 		return -EINVAL;
2086 
2087 	table->data = &tmp;
2088 	table->maxlen = sizeof(unsigned long);
2089 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2090 	if (ret)
2091 		goto out;
2092 
2093 	if (write) {
2094 		spin_lock(&hugetlb_lock);
2095 		h->nr_overcommit_huge_pages = tmp;
2096 		spin_unlock(&hugetlb_lock);
2097 	}
2098 out:
2099 	return ret;
2100 }
2101 
2102 #endif /* CONFIG_SYSCTL */
2103 
hugetlb_report_meminfo(struct seq_file * m)2104 void hugetlb_report_meminfo(struct seq_file *m)
2105 {
2106 	struct hstate *h = &default_hstate;
2107 	seq_printf(m,
2108 			"HugePages_Total:   %5lu\n"
2109 			"HugePages_Free:    %5lu\n"
2110 			"HugePages_Rsvd:    %5lu\n"
2111 			"HugePages_Surp:    %5lu\n"
2112 			"Hugepagesize:   %8lu kB\n",
2113 			h->nr_huge_pages,
2114 			h->free_huge_pages,
2115 			h->resv_huge_pages,
2116 			h->surplus_huge_pages,
2117 			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2118 }
2119 
hugetlb_report_node_meminfo(int nid,char * buf)2120 int hugetlb_report_node_meminfo(int nid, char *buf)
2121 {
2122 	struct hstate *h = &default_hstate;
2123 	return sprintf(buf,
2124 		"Node %d HugePages_Total: %5u\n"
2125 		"Node %d HugePages_Free:  %5u\n"
2126 		"Node %d HugePages_Surp:  %5u\n",
2127 		nid, h->nr_huge_pages_node[nid],
2128 		nid, h->free_huge_pages_node[nid],
2129 		nid, h->surplus_huge_pages_node[nid]);
2130 }
2131 
2132 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
hugetlb_total_pages(void)2133 unsigned long hugetlb_total_pages(void)
2134 {
2135 	struct hstate *h;
2136 	unsigned long nr_total_pages = 0;
2137 
2138 	for_each_hstate(h)
2139 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2140 	return nr_total_pages;
2141 }
2142 
hugetlb_acct_memory(struct hstate * h,long delta)2143 static int hugetlb_acct_memory(struct hstate *h, long delta)
2144 {
2145 	int ret = -ENOMEM;
2146 
2147 	spin_lock(&hugetlb_lock);
2148 	/*
2149 	 * When cpuset is configured, it breaks the strict hugetlb page
2150 	 * reservation as the accounting is done on a global variable. Such
2151 	 * reservation is completely rubbish in the presence of cpuset because
2152 	 * the reservation is not checked against page availability for the
2153 	 * current cpuset. Application can still potentially OOM'ed by kernel
2154 	 * with lack of free htlb page in cpuset that the task is in.
2155 	 * Attempt to enforce strict accounting with cpuset is almost
2156 	 * impossible (or too ugly) because cpuset is too fluid that
2157 	 * task or memory node can be dynamically moved between cpusets.
2158 	 *
2159 	 * The change of semantics for shared hugetlb mapping with cpuset is
2160 	 * undesirable. However, in order to preserve some of the semantics,
2161 	 * we fall back to check against current free page availability as
2162 	 * a best attempt and hopefully to minimize the impact of changing
2163 	 * semantics that cpuset has.
2164 	 */
2165 	if (delta > 0) {
2166 		if (gather_surplus_pages(h, delta) < 0)
2167 			goto out;
2168 
2169 		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2170 			return_unused_surplus_pages(h, delta);
2171 			goto out;
2172 		}
2173 	}
2174 
2175 	ret = 0;
2176 	if (delta < 0)
2177 		return_unused_surplus_pages(h, (unsigned long) -delta);
2178 
2179 out:
2180 	spin_unlock(&hugetlb_lock);
2181 	return ret;
2182 }
2183 
hugetlb_vm_op_open(struct vm_area_struct * vma)2184 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2185 {
2186 	struct resv_map *reservations = vma_resv_map(vma);
2187 
2188 	/*
2189 	 * This new VMA should share its siblings reservation map if present.
2190 	 * The VMA will only ever have a valid reservation map pointer where
2191 	 * it is being copied for another still existing VMA.  As that VMA
2192 	 * has a reference to the reservation map it cannot disappear until
2193 	 * after this open call completes.  It is therefore safe to take a
2194 	 * new reference here without additional locking.
2195 	 */
2196 	if (reservations)
2197 		kref_get(&reservations->refs);
2198 }
2199 
resv_map_put(struct vm_area_struct * vma)2200 static void resv_map_put(struct vm_area_struct *vma)
2201 {
2202 	struct resv_map *reservations = vma_resv_map(vma);
2203 
2204 	if (!reservations)
2205 		return;
2206 	kref_put(&reservations->refs, resv_map_release);
2207 }
2208 
hugetlb_vm_op_close(struct vm_area_struct * vma)2209 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2210 {
2211 	struct hstate *h = hstate_vma(vma);
2212 	struct resv_map *reservations = vma_resv_map(vma);
2213 	struct hugepage_subpool *spool = subpool_vma(vma);
2214 	unsigned long reserve;
2215 	unsigned long start;
2216 	unsigned long end;
2217 
2218 	if (reservations) {
2219 		start = vma_hugecache_offset(h, vma, vma->vm_start);
2220 		end = vma_hugecache_offset(h, vma, vma->vm_end);
2221 
2222 		reserve = (end - start) -
2223 			region_count(&reservations->regions, start, end);
2224 
2225 		resv_map_put(vma);
2226 
2227 		if (reserve) {
2228 			hugetlb_acct_memory(h, -reserve);
2229 			hugepage_subpool_put_pages(spool, reserve);
2230 		}
2231 	}
2232 }
2233 
2234 /*
2235  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2236  * handle_mm_fault() to try to instantiate regular-sized pages in the
2237  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2238  * this far.
2239  */
hugetlb_vm_op_fault(struct vm_area_struct * vma,struct vm_fault * vmf)2240 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2241 {
2242 	BUG();
2243 	return 0;
2244 }
2245 
2246 const struct vm_operations_struct hugetlb_vm_ops = {
2247 	.fault = hugetlb_vm_op_fault,
2248 	.open = hugetlb_vm_op_open,
2249 	.close = hugetlb_vm_op_close,
2250 };
2251 
make_huge_pte(struct vm_area_struct * vma,struct page * page,int writable)2252 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2253 				int writable)
2254 {
2255 	pte_t entry;
2256 
2257 	if (writable) {
2258 		entry =
2259 		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2260 	} else {
2261 		entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2262 	}
2263 	entry = pte_mkyoung(entry);
2264 	entry = pte_mkhuge(entry);
2265 
2266 	return entry;
2267 }
2268 
set_huge_ptep_writable(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)2269 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2270 				   unsigned long address, pte_t *ptep)
2271 {
2272 	pte_t entry;
2273 
2274 	entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2275 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2276 		update_mmu_cache(vma, address, ptep);
2277 }
2278 
is_hugetlb_entry_migration(pte_t pte)2279 static int is_hugetlb_entry_migration(pte_t pte)
2280 {
2281 	swp_entry_t swp;
2282 
2283 	if (huge_pte_none(pte) || pte_present(pte))
2284 		return 0;
2285 	swp = pte_to_swp_entry(pte);
2286 	if (non_swap_entry(swp) && is_migration_entry(swp))
2287 		return 1;
2288 	else
2289 		return 0;
2290 }
2291 
is_hugetlb_entry_hwpoisoned(pte_t pte)2292 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2293 {
2294 	swp_entry_t swp;
2295 
2296 	if (huge_pte_none(pte) || pte_present(pte))
2297 		return 0;
2298 	swp = pte_to_swp_entry(pte);
2299 	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2300 		return 1;
2301 	else
2302 		return 0;
2303 }
2304 
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * vma)2305 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2306 			    struct vm_area_struct *vma)
2307 {
2308 	pte_t *src_pte, *dst_pte, entry;
2309 	struct page *ptepage;
2310 	unsigned long addr;
2311 	int cow;
2312 	struct hstate *h = hstate_vma(vma);
2313 	unsigned long sz = huge_page_size(h);
2314 
2315 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2316 
2317 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2318 		src_pte = huge_pte_offset(src, addr);
2319 		if (!src_pte)
2320 			continue;
2321 		dst_pte = huge_pte_alloc(dst, addr, sz);
2322 		if (!dst_pte)
2323 			goto nomem;
2324 
2325 		/* If the pagetables are shared don't copy or take references */
2326 		if (dst_pte == src_pte)
2327 			continue;
2328 
2329 		spin_lock(&dst->page_table_lock);
2330 		spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2331 		entry = huge_ptep_get(src_pte);
2332 		if (huge_pte_none(entry)) { /* skip none entry */
2333 			;
2334 		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
2335 				    is_hugetlb_entry_hwpoisoned(entry))) {
2336 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
2337 
2338 			if (is_write_migration_entry(swp_entry) && cow) {
2339 				/*
2340 				 * COW mappings require pages in both
2341 				 * parent and child to be set to read.
2342 				 */
2343 				make_migration_entry_read(&swp_entry);
2344 				entry = swp_entry_to_pte(swp_entry);
2345 				set_huge_pte_at(src, addr, src_pte, entry);
2346 			}
2347 			set_huge_pte_at(dst, addr, dst_pte, entry);
2348 		} else {
2349 			if (cow)
2350 				huge_ptep_set_wrprotect(src, addr, src_pte);
2351 			ptepage = pte_page(entry);
2352 			get_page(ptepage);
2353 			page_dup_rmap(ptepage);
2354 			set_huge_pte_at(dst, addr, dst_pte, entry);
2355 		}
2356 		spin_unlock(&src->page_table_lock);
2357 		spin_unlock(&dst->page_table_lock);
2358 	}
2359 	return 0;
2360 
2361 nomem:
2362 	return -ENOMEM;
2363 }
2364 
__unmap_hugepage_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)2365 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2366 			    unsigned long end, struct page *ref_page)
2367 {
2368 	struct mm_struct *mm = vma->vm_mm;
2369 	unsigned long address;
2370 	pte_t *ptep;
2371 	pte_t pte;
2372 	struct page *page;
2373 	struct page *tmp;
2374 	struct hstate *h = hstate_vma(vma);
2375 	unsigned long sz = huge_page_size(h);
2376 
2377 	/*
2378 	 * A page gathering list, protected by per file i_mmap_mutex. The
2379 	 * lock is used to avoid list corruption from multiple unmapping
2380 	 * of the same page since we are using page->lru.
2381 	 */
2382 	LIST_HEAD(page_list);
2383 
2384 	WARN_ON(!is_vm_hugetlb_page(vma));
2385 	BUG_ON(start & ~huge_page_mask(h));
2386 	BUG_ON(end & ~huge_page_mask(h));
2387 
2388 	mmu_notifier_invalidate_range_start(mm, start, end);
2389 	spin_lock(&mm->page_table_lock);
2390 	for (address = start; address < end; address += sz) {
2391 		ptep = huge_pte_offset(mm, address);
2392 		if (!ptep)
2393 			continue;
2394 
2395 		if (huge_pmd_unshare(mm, &address, ptep))
2396 			continue;
2397 
2398 		pte = huge_ptep_get(ptep);
2399 		if (huge_pte_none(pte))
2400 			continue;
2401 
2402 		/*
2403 		 * HWPoisoned hugepage is already unmapped and dropped reference
2404 		 */
2405 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2406 			continue;
2407 
2408 		page = pte_page(pte);
2409 		/*
2410 		 * If a reference page is supplied, it is because a specific
2411 		 * page is being unmapped, not a range. Ensure the page we
2412 		 * are about to unmap is the actual page of interest.
2413 		 */
2414 		if (ref_page) {
2415 			if (page != ref_page)
2416 				continue;
2417 
2418 			/*
2419 			 * Mark the VMA as having unmapped its page so that
2420 			 * future faults in this VMA will fail rather than
2421 			 * looking like data was lost
2422 			 */
2423 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2424 		}
2425 
2426 		pte = huge_ptep_get_and_clear(mm, address, ptep);
2427 		if (pte_dirty(pte))
2428 			set_page_dirty(page);
2429 		list_add(&page->lru, &page_list);
2430 
2431 		/* Bail out after unmapping reference page if supplied */
2432 		if (ref_page)
2433 			break;
2434 	}
2435 	flush_tlb_range(vma, start, end);
2436 	spin_unlock(&mm->page_table_lock);
2437 	mmu_notifier_invalidate_range_end(mm, start, end);
2438 	list_for_each_entry_safe(page, tmp, &page_list, lru) {
2439 		page_remove_rmap(page);
2440 		list_del(&page->lru);
2441 		put_page(page);
2442 	}
2443 }
2444 
unmap_hugepage_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)2445 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2446 			  unsigned long end, struct page *ref_page)
2447 {
2448 	mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2449 	__unmap_hugepage_range(vma, start, end, ref_page);
2450 	/*
2451 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
2452 	 * test will fail on a vma being torn down, and not grab a page table
2453 	 * on its way out.  We're lucky that the flag has such an appropriate
2454 	 * name, and can in fact be safely cleared here. We could clear it
2455 	 * before the __unmap_hugepage_range above, but all that's necessary
2456 	 * is to clear it before releasing the i_mmap_mutex below.
2457 	 *
2458 	 * This works because in the contexts this is called, the VMA is
2459 	 * going to be destroyed. It is not vunerable to madvise(DONTNEED)
2460 	 * because madvise is not supported on hugetlbfs. The same applies
2461 	 * for direct IO. unmap_hugepage_range() is only being called just
2462 	 * before free_pgtables() so clearing VM_MAYSHARE will not cause
2463 	 * surprises later.
2464 	 */
2465 	vma->vm_flags &= ~VM_MAYSHARE;
2466 	mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2467 }
2468 
2469 /*
2470  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2471  * mappping it owns the reserve page for. The intention is to unmap the page
2472  * from other VMAs and let the children be SIGKILLed if they are faulting the
2473  * same region.
2474  */
unmap_ref_private(struct mm_struct * mm,struct vm_area_struct * vma,struct page * page,unsigned long address)2475 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2476 				struct page *page, unsigned long address)
2477 {
2478 	struct hstate *h = hstate_vma(vma);
2479 	struct vm_area_struct *iter_vma;
2480 	struct address_space *mapping;
2481 	struct prio_tree_iter iter;
2482 	pgoff_t pgoff;
2483 
2484 	/*
2485 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2486 	 * from page cache lookup which is in HPAGE_SIZE units.
2487 	 */
2488 	address = address & huge_page_mask(h);
2489 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2490 			vma->vm_pgoff;
2491 	mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
2492 
2493 	/*
2494 	 * Take the mapping lock for the duration of the table walk. As
2495 	 * this mapping should be shared between all the VMAs,
2496 	 * __unmap_hugepage_range() is called as the lock is already held
2497 	 */
2498 	mutex_lock(&mapping->i_mmap_mutex);
2499 	vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2500 		/* Do not unmap the current VMA */
2501 		if (iter_vma == vma)
2502 			continue;
2503 
2504 		/*
2505 		 * Unmap the page from other VMAs without their own reserves.
2506 		 * They get marked to be SIGKILLed if they fault in these
2507 		 * areas. This is because a future no-page fault on this VMA
2508 		 * could insert a zeroed page instead of the data existing
2509 		 * from the time of fork. This would look like data corruption
2510 		 */
2511 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2512 			__unmap_hugepage_range(iter_vma,
2513 				address, address + huge_page_size(h),
2514 				page);
2515 	}
2516 	mutex_unlock(&mapping->i_mmap_mutex);
2517 
2518 	return 1;
2519 }
2520 
2521 /*
2522  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2523  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2524  * cannot race with other handlers or page migration.
2525  * Keep the pte_same checks anyway to make transition from the mutex easier.
2526  */
hugetlb_cow(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,pte_t pte,struct page * pagecache_page)2527 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2528 			unsigned long address, pte_t *ptep, pte_t pte,
2529 			struct page *pagecache_page)
2530 {
2531 	struct hstate *h = hstate_vma(vma);
2532 	struct page *old_page, *new_page;
2533 	int avoidcopy;
2534 	int outside_reserve = 0;
2535 
2536 	old_page = pte_page(pte);
2537 
2538 retry_avoidcopy:
2539 	/* If no-one else is actually using this page, avoid the copy
2540 	 * and just make the page writable */
2541 	avoidcopy = (page_mapcount(old_page) == 1);
2542 	if (avoidcopy) {
2543 		if (PageAnon(old_page))
2544 			page_move_anon_rmap(old_page, vma, address);
2545 		set_huge_ptep_writable(vma, address, ptep);
2546 		return 0;
2547 	}
2548 
2549 	/*
2550 	 * If the process that created a MAP_PRIVATE mapping is about to
2551 	 * perform a COW due to a shared page count, attempt to satisfy
2552 	 * the allocation without using the existing reserves. The pagecache
2553 	 * page is used to determine if the reserve at this address was
2554 	 * consumed or not. If reserves were used, a partial faulted mapping
2555 	 * at the time of fork() could consume its reserves on COW instead
2556 	 * of the full address range.
2557 	 */
2558 	if (!(vma->vm_flags & VM_MAYSHARE) &&
2559 			is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2560 			old_page != pagecache_page)
2561 		outside_reserve = 1;
2562 
2563 	page_cache_get(old_page);
2564 
2565 	/* Drop page_table_lock as buddy allocator may be called */
2566 	spin_unlock(&mm->page_table_lock);
2567 	new_page = alloc_huge_page(vma, address, outside_reserve);
2568 
2569 	if (IS_ERR(new_page)) {
2570 		page_cache_release(old_page);
2571 
2572 		/*
2573 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
2574 		 * it is due to references held by a child and an insufficient
2575 		 * huge page pool. To guarantee the original mappers
2576 		 * reliability, unmap the page from child processes. The child
2577 		 * may get SIGKILLed if it later faults.
2578 		 */
2579 		if (outside_reserve) {
2580 			BUG_ON(huge_pte_none(pte));
2581 			if (unmap_ref_private(mm, vma, old_page, address)) {
2582 				BUG_ON(huge_pte_none(pte));
2583 				spin_lock(&mm->page_table_lock);
2584 				ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2585 				if (likely(pte_same(huge_ptep_get(ptep), pte)))
2586 					goto retry_avoidcopy;
2587 				/*
2588 				 * race occurs while re-acquiring page_table_lock, and
2589 				 * our job is done.
2590 				 */
2591 				return 0;
2592 			}
2593 			WARN_ON_ONCE(1);
2594 		}
2595 
2596 		/* Caller expects lock to be held */
2597 		spin_lock(&mm->page_table_lock);
2598 		return -PTR_ERR(new_page);
2599 	}
2600 
2601 	/*
2602 	 * When the original hugepage is shared one, it does not have
2603 	 * anon_vma prepared.
2604 	 */
2605 	if (unlikely(anon_vma_prepare(vma))) {
2606 		page_cache_release(new_page);
2607 		page_cache_release(old_page);
2608 		/* Caller expects lock to be held */
2609 		spin_lock(&mm->page_table_lock);
2610 		return VM_FAULT_OOM;
2611 	}
2612 
2613 	copy_user_huge_page(new_page, old_page, address, vma,
2614 			    pages_per_huge_page(h));
2615 	__SetPageUptodate(new_page);
2616 
2617 	/*
2618 	 * Retake the page_table_lock to check for racing updates
2619 	 * before the page tables are altered
2620 	 */
2621 	spin_lock(&mm->page_table_lock);
2622 	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2623 	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2624 		/* Break COW */
2625 		mmu_notifier_invalidate_range_start(mm,
2626 			address & huge_page_mask(h),
2627 			(address & huge_page_mask(h)) + huge_page_size(h));
2628 		huge_ptep_clear_flush(vma, address, ptep);
2629 		set_huge_pte_at(mm, address, ptep,
2630 				make_huge_pte(vma, new_page, 1));
2631 		page_remove_rmap(old_page);
2632 		hugepage_add_new_anon_rmap(new_page, vma, address);
2633 		/* Make the old page be freed below */
2634 		new_page = old_page;
2635 		mmu_notifier_invalidate_range_end(mm,
2636 			address & huge_page_mask(h),
2637 			(address & huge_page_mask(h)) + huge_page_size(h));
2638 	}
2639 	page_cache_release(new_page);
2640 	page_cache_release(old_page);
2641 	return 0;
2642 }
2643 
2644 /* Return the pagecache page at a given address within a VMA */
hugetlbfs_pagecache_page(struct hstate * h,struct vm_area_struct * vma,unsigned long address)2645 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2646 			struct vm_area_struct *vma, unsigned long address)
2647 {
2648 	struct address_space *mapping;
2649 	pgoff_t idx;
2650 
2651 	mapping = vma->vm_file->f_mapping;
2652 	idx = vma_hugecache_offset(h, vma, address);
2653 
2654 	return find_lock_page(mapping, idx);
2655 }
2656 
2657 /*
2658  * Return whether there is a pagecache page to back given address within VMA.
2659  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2660  */
hugetlbfs_pagecache_present(struct hstate * h,struct vm_area_struct * vma,unsigned long address)2661 static bool hugetlbfs_pagecache_present(struct hstate *h,
2662 			struct vm_area_struct *vma, unsigned long address)
2663 {
2664 	struct address_space *mapping;
2665 	pgoff_t idx;
2666 	struct page *page;
2667 
2668 	mapping = vma->vm_file->f_mapping;
2669 	idx = vma_hugecache_offset(h, vma, address);
2670 
2671 	page = find_get_page(mapping, idx);
2672 	if (page)
2673 		put_page(page);
2674 	return page != NULL;
2675 }
2676 
hugetlb_no_page(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int flags)2677 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2678 			unsigned long address, pte_t *ptep, unsigned int flags)
2679 {
2680 	struct hstate *h = hstate_vma(vma);
2681 	int ret = VM_FAULT_SIGBUS;
2682 	int anon_rmap = 0;
2683 	pgoff_t idx;
2684 	unsigned long size;
2685 	struct page *page;
2686 	struct address_space *mapping;
2687 	pte_t new_pte;
2688 
2689 	/*
2690 	 * Currently, we are forced to kill the process in the event the
2691 	 * original mapper has unmapped pages from the child due to a failed
2692 	 * COW. Warn that such a situation has occurred as it may not be obvious
2693 	 */
2694 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2695 		printk(KERN_WARNING
2696 			"PID %d killed due to inadequate hugepage pool\n",
2697 			current->pid);
2698 		return ret;
2699 	}
2700 
2701 	mapping = vma->vm_file->f_mapping;
2702 	idx = vma_hugecache_offset(h, vma, address);
2703 
2704 	/*
2705 	 * Use page lock to guard against racing truncation
2706 	 * before we get page_table_lock.
2707 	 */
2708 retry:
2709 	page = find_lock_page(mapping, idx);
2710 	if (!page) {
2711 		size = i_size_read(mapping->host) >> huge_page_shift(h);
2712 		if (idx >= size)
2713 			goto out;
2714 		page = alloc_huge_page(vma, address, 0);
2715 		if (IS_ERR(page)) {
2716 			ret = -PTR_ERR(page);
2717 			goto out;
2718 		}
2719 		clear_huge_page(page, address, pages_per_huge_page(h));
2720 		__SetPageUptodate(page);
2721 
2722 		if (vma->vm_flags & VM_MAYSHARE) {
2723 			int err;
2724 			struct inode *inode = mapping->host;
2725 
2726 			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2727 			if (err) {
2728 				put_page(page);
2729 				if (err == -EEXIST)
2730 					goto retry;
2731 				goto out;
2732 			}
2733 
2734 			spin_lock(&inode->i_lock);
2735 			inode->i_blocks += blocks_per_huge_page(h);
2736 			spin_unlock(&inode->i_lock);
2737 		} else {
2738 			lock_page(page);
2739 			if (unlikely(anon_vma_prepare(vma))) {
2740 				ret = VM_FAULT_OOM;
2741 				goto backout_unlocked;
2742 			}
2743 			anon_rmap = 1;
2744 		}
2745 	} else {
2746 		/*
2747 		 * If memory error occurs between mmap() and fault, some process
2748 		 * don't have hwpoisoned swap entry for errored virtual address.
2749 		 * So we need to block hugepage fault by PG_hwpoison bit check.
2750 		 */
2751 		if (unlikely(PageHWPoison(page))) {
2752 			ret = VM_FAULT_HWPOISON |
2753 			      VM_FAULT_SET_HINDEX(h - hstates);
2754 			goto backout_unlocked;
2755 		}
2756 	}
2757 
2758 	/*
2759 	 * If we are going to COW a private mapping later, we examine the
2760 	 * pending reservations for this page now. This will ensure that
2761 	 * any allocations necessary to record that reservation occur outside
2762 	 * the spinlock.
2763 	 */
2764 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2765 		if (vma_needs_reservation(h, vma, address) < 0) {
2766 			ret = VM_FAULT_OOM;
2767 			goto backout_unlocked;
2768 		}
2769 
2770 	spin_lock(&mm->page_table_lock);
2771 	size = i_size_read(mapping->host) >> huge_page_shift(h);
2772 	if (idx >= size)
2773 		goto backout;
2774 
2775 	ret = 0;
2776 	if (!huge_pte_none(huge_ptep_get(ptep)))
2777 		goto backout;
2778 
2779 	if (anon_rmap)
2780 		hugepage_add_new_anon_rmap(page, vma, address);
2781 	else
2782 		page_dup_rmap(page);
2783 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2784 				&& (vma->vm_flags & VM_SHARED)));
2785 	set_huge_pte_at(mm, address, ptep, new_pte);
2786 
2787 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2788 		/* Optimization, do the COW without a second fault */
2789 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2790 	}
2791 
2792 	spin_unlock(&mm->page_table_lock);
2793 	unlock_page(page);
2794 out:
2795 	return ret;
2796 
2797 backout:
2798 	spin_unlock(&mm->page_table_lock);
2799 backout_unlocked:
2800 	unlock_page(page);
2801 	put_page(page);
2802 	goto out;
2803 }
2804 
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)2805 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2806 			unsigned long address, unsigned int flags)
2807 {
2808 	pte_t *ptep;
2809 	pte_t entry;
2810 	int ret;
2811 	struct page *page = NULL;
2812 	struct page *pagecache_page = NULL;
2813 	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2814 	struct hstate *h = hstate_vma(vma);
2815 
2816 	address &= huge_page_mask(h);
2817 
2818 	ptep = huge_pte_offset(mm, address);
2819 	if (ptep) {
2820 		entry = huge_ptep_get(ptep);
2821 		if (unlikely(is_hugetlb_entry_migration(entry))) {
2822 			migration_entry_wait_huge(mm, ptep);
2823 			return 0;
2824 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2825 			return VM_FAULT_HWPOISON_LARGE |
2826 			       VM_FAULT_SET_HINDEX(h - hstates);
2827 	}
2828 
2829 	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2830 	if (!ptep)
2831 		return VM_FAULT_OOM;
2832 
2833 	/*
2834 	 * Serialize hugepage allocation and instantiation, so that we don't
2835 	 * get spurious allocation failures if two CPUs race to instantiate
2836 	 * the same page in the page cache.
2837 	 */
2838 	mutex_lock(&hugetlb_instantiation_mutex);
2839 	entry = huge_ptep_get(ptep);
2840 	if (huge_pte_none(entry)) {
2841 		ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2842 		goto out_mutex;
2843 	}
2844 
2845 	ret = 0;
2846 
2847 	/*
2848 	 * If we are going to COW the mapping later, we examine the pending
2849 	 * reservations for this page now. This will ensure that any
2850 	 * allocations necessary to record that reservation occur outside the
2851 	 * spinlock. For private mappings, we also lookup the pagecache
2852 	 * page now as it is used to determine if a reservation has been
2853 	 * consumed.
2854 	 */
2855 	if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2856 		if (vma_needs_reservation(h, vma, address) < 0) {
2857 			ret = VM_FAULT_OOM;
2858 			goto out_mutex;
2859 		}
2860 
2861 		if (!(vma->vm_flags & VM_MAYSHARE))
2862 			pagecache_page = hugetlbfs_pagecache_page(h,
2863 								vma, address);
2864 	}
2865 
2866 	/*
2867 	 * hugetlb_cow() requires page locks of pte_page(entry) and
2868 	 * pagecache_page, so here we need take the former one
2869 	 * when page != pagecache_page or !pagecache_page.
2870 	 * Note that locking order is always pagecache_page -> page,
2871 	 * so no worry about deadlock.
2872 	 */
2873 	page = pte_page(entry);
2874 	get_page(page);
2875 	if (page != pagecache_page)
2876 		lock_page(page);
2877 
2878 	spin_lock(&mm->page_table_lock);
2879 	/* Check for a racing update before calling hugetlb_cow */
2880 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2881 		goto out_page_table_lock;
2882 
2883 
2884 	if (flags & FAULT_FLAG_WRITE) {
2885 		if (!pte_write(entry)) {
2886 			ret = hugetlb_cow(mm, vma, address, ptep, entry,
2887 							pagecache_page);
2888 			goto out_page_table_lock;
2889 		}
2890 		entry = pte_mkdirty(entry);
2891 	}
2892 	entry = pte_mkyoung(entry);
2893 	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2894 						flags & FAULT_FLAG_WRITE))
2895 		update_mmu_cache(vma, address, ptep);
2896 
2897 out_page_table_lock:
2898 	spin_unlock(&mm->page_table_lock);
2899 
2900 	if (pagecache_page) {
2901 		unlock_page(pagecache_page);
2902 		put_page(pagecache_page);
2903 	}
2904 	if (page != pagecache_page)
2905 		unlock_page(page);
2906 	put_page(page);
2907 
2908 out_mutex:
2909 	mutex_unlock(&hugetlb_instantiation_mutex);
2910 
2911 	return ret;
2912 }
2913 
2914 /* Can be overriden by architectures */
2915 __attribute__((weak)) struct page *
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int write)2916 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2917 	       pud_t *pud, int write)
2918 {
2919 	BUG();
2920 	return NULL;
2921 }
2922 
follow_hugetlb_page(struct mm_struct * mm,struct vm_area_struct * vma,struct page ** pages,struct vm_area_struct ** vmas,unsigned long * position,int * length,int i,unsigned int flags)2923 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2924 			struct page **pages, struct vm_area_struct **vmas,
2925 			unsigned long *position, int *length, int i,
2926 			unsigned int flags)
2927 {
2928 	unsigned long pfn_offset;
2929 	unsigned long vaddr = *position;
2930 	int remainder = *length;
2931 	struct hstate *h = hstate_vma(vma);
2932 
2933 	spin_lock(&mm->page_table_lock);
2934 	while (vaddr < vma->vm_end && remainder) {
2935 		pte_t *pte;
2936 		int absent;
2937 		struct page *page;
2938 
2939 		/*
2940 		 * Some archs (sparc64, sh*) have multiple pte_ts to
2941 		 * each hugepage.  We have to make sure we get the
2942 		 * first, for the page indexing below to work.
2943 		 */
2944 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2945 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
2946 
2947 		/*
2948 		 * When coredumping, it suits get_dump_page if we just return
2949 		 * an error where there's an empty slot with no huge pagecache
2950 		 * to back it.  This way, we avoid allocating a hugepage, and
2951 		 * the sparse dumpfile avoids allocating disk blocks, but its
2952 		 * huge holes still show up with zeroes where they need to be.
2953 		 */
2954 		if (absent && (flags & FOLL_DUMP) &&
2955 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2956 			remainder = 0;
2957 			break;
2958 		}
2959 
2960 		/*
2961 		 * We need call hugetlb_fault for both hugepages under migration
2962 		 * (in which case hugetlb_fault waits for the migration,) and
2963 		 * hwpoisoned hugepages (in which case we need to prevent the
2964 		 * caller from accessing to them.) In order to do this, we use
2965 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
2966 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
2967 		 * both cases, and because we can't follow correct pages
2968 		 * directly from any kind of swap entries.
2969 		 */
2970 		if (absent || is_swap_pte(huge_ptep_get(pte)) ||
2971 		    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2972 			int ret;
2973 
2974 			spin_unlock(&mm->page_table_lock);
2975 			ret = hugetlb_fault(mm, vma, vaddr,
2976 				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2977 			spin_lock(&mm->page_table_lock);
2978 			if (!(ret & VM_FAULT_ERROR))
2979 				continue;
2980 
2981 			remainder = 0;
2982 			break;
2983 		}
2984 
2985 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2986 		page = pte_page(huge_ptep_get(pte));
2987 same_page:
2988 		if (pages) {
2989 			pages[i] = mem_map_offset(page, pfn_offset);
2990 			get_page(pages[i]);
2991 		}
2992 
2993 		if (vmas)
2994 			vmas[i] = vma;
2995 
2996 		vaddr += PAGE_SIZE;
2997 		++pfn_offset;
2998 		--remainder;
2999 		++i;
3000 		if (vaddr < vma->vm_end && remainder &&
3001 				pfn_offset < pages_per_huge_page(h)) {
3002 			/*
3003 			 * We use pfn_offset to avoid touching the pageframes
3004 			 * of this compound page.
3005 			 */
3006 			goto same_page;
3007 		}
3008 	}
3009 	spin_unlock(&mm->page_table_lock);
3010 	*length = remainder;
3011 	*position = vaddr;
3012 
3013 	return i ? i : -EFAULT;
3014 }
3015 
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot)3016 void hugetlb_change_protection(struct vm_area_struct *vma,
3017 		unsigned long address, unsigned long end, pgprot_t newprot)
3018 {
3019 	struct mm_struct *mm = vma->vm_mm;
3020 	unsigned long start = address;
3021 	pte_t *ptep;
3022 	pte_t pte;
3023 	struct hstate *h = hstate_vma(vma);
3024 
3025 	BUG_ON(address >= end);
3026 	flush_cache_range(vma, address, end);
3027 
3028 	mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
3029 	spin_lock(&mm->page_table_lock);
3030 	for (; address < end; address += huge_page_size(h)) {
3031 		ptep = huge_pte_offset(mm, address);
3032 		if (!ptep)
3033 			continue;
3034 		if (huge_pmd_unshare(mm, &address, ptep))
3035 			continue;
3036 		if (!huge_pte_none(huge_ptep_get(ptep))) {
3037 			pte = huge_ptep_get_and_clear(mm, address, ptep);
3038 			pte = pte_mkhuge(pte_modify(pte, newprot));
3039 			set_huge_pte_at(mm, address, ptep, pte);
3040 		}
3041 	}
3042 	spin_unlock(&mm->page_table_lock);
3043 	/*
3044 	 * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
3045 	 * may have cleared our pud entry and done put_page on the page table:
3046 	 * once we release i_mmap_mutex, another task can do the final put_page
3047 	 * and that page table be reused and filled with junk.
3048 	 */
3049 	flush_tlb_range(vma, start, end);
3050 	mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3051 }
3052 
hugetlb_reserve_pages(struct inode * inode,long from,long to,struct vm_area_struct * vma,vm_flags_t vm_flags)3053 int hugetlb_reserve_pages(struct inode *inode,
3054 					long from, long to,
3055 					struct vm_area_struct *vma,
3056 					vm_flags_t vm_flags)
3057 {
3058 	long ret, chg;
3059 	struct hstate *h = hstate_inode(inode);
3060 	struct hugepage_subpool *spool = subpool_inode(inode);
3061 
3062 	/*
3063 	 * Only apply hugepage reservation if asked. At fault time, an
3064 	 * attempt will be made for VM_NORESERVE to allocate a page
3065 	 * without using reserves
3066 	 */
3067 	if (vm_flags & VM_NORESERVE)
3068 		return 0;
3069 
3070 	/*
3071 	 * Shared mappings base their reservation on the number of pages that
3072 	 * are already allocated on behalf of the file. Private mappings need
3073 	 * to reserve the full area even if read-only as mprotect() may be
3074 	 * called to make the mapping read-write. Assume !vma is a shm mapping
3075 	 */
3076 	if (!vma || vma->vm_flags & VM_MAYSHARE)
3077 		chg = region_chg(&inode->i_mapping->private_list, from, to);
3078 	else {
3079 		struct resv_map *resv_map = resv_map_alloc();
3080 		if (!resv_map)
3081 			return -ENOMEM;
3082 
3083 		chg = to - from;
3084 
3085 		set_vma_resv_map(vma, resv_map);
3086 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3087 	}
3088 
3089 	if (chg < 0) {
3090 		ret = chg;
3091 		goto out_err;
3092 	}
3093 
3094 	/* There must be enough pages in the subpool for the mapping */
3095 	if (hugepage_subpool_get_pages(spool, chg)) {
3096 		ret = -ENOSPC;
3097 		goto out_err;
3098 	}
3099 
3100 	/*
3101 	 * Check enough hugepages are available for the reservation.
3102 	 * Hand the pages back to the subpool if there are not
3103 	 */
3104 	ret = hugetlb_acct_memory(h, chg);
3105 	if (ret < 0) {
3106 		hugepage_subpool_put_pages(spool, chg);
3107 		goto out_err;
3108 	}
3109 
3110 	/*
3111 	 * Account for the reservations made. Shared mappings record regions
3112 	 * that have reservations as they are shared by multiple VMAs.
3113 	 * When the last VMA disappears, the region map says how much
3114 	 * the reservation was and the page cache tells how much of
3115 	 * the reservation was consumed. Private mappings are per-VMA and
3116 	 * only the consumed reservations are tracked. When the VMA
3117 	 * disappears, the original reservation is the VMA size and the
3118 	 * consumed reservations are stored in the map. Hence, nothing
3119 	 * else has to be done for private mappings here
3120 	 */
3121 	if (!vma || vma->vm_flags & VM_MAYSHARE)
3122 		region_add(&inode->i_mapping->private_list, from, to);
3123 	return 0;
3124 out_err:
3125 	if (vma)
3126 		resv_map_put(vma);
3127 	return ret;
3128 }
3129 
hugetlb_unreserve_pages(struct inode * inode,long offset,long freed)3130 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3131 {
3132 	struct hstate *h = hstate_inode(inode);
3133 	long chg = region_truncate(&inode->i_mapping->private_list, offset);
3134 	struct hugepage_subpool *spool = subpool_inode(inode);
3135 
3136 	spin_lock(&inode->i_lock);
3137 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3138 	spin_unlock(&inode->i_lock);
3139 
3140 	hugepage_subpool_put_pages(spool, (chg - freed));
3141 	hugetlb_acct_memory(h, -(chg - freed));
3142 }
3143 
3144 #ifdef CONFIG_MEMORY_FAILURE
3145 
3146 /* Should be called in hugetlb_lock */
is_hugepage_on_freelist(struct page * hpage)3147 static int is_hugepage_on_freelist(struct page *hpage)
3148 {
3149 	struct page *page;
3150 	struct page *tmp;
3151 	struct hstate *h = page_hstate(hpage);
3152 	int nid = page_to_nid(hpage);
3153 
3154 	list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3155 		if (page == hpage)
3156 			return 1;
3157 	return 0;
3158 }
3159 
3160 /*
3161  * This function is called from memory failure code.
3162  * Assume the caller holds page lock of the head page.
3163  */
dequeue_hwpoisoned_huge_page(struct page * hpage)3164 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3165 {
3166 	struct hstate *h = page_hstate(hpage);
3167 	int nid = page_to_nid(hpage);
3168 	int ret = -EBUSY;
3169 
3170 	spin_lock(&hugetlb_lock);
3171 	if (is_hugepage_on_freelist(hpage)) {
3172 		list_del(&hpage->lru);
3173 		set_page_refcounted(hpage);
3174 		h->free_huge_pages--;
3175 		h->free_huge_pages_node[nid]--;
3176 		ret = 0;
3177 	}
3178 	spin_unlock(&hugetlb_lock);
3179 	return ret;
3180 }
3181 #endif
3182