1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/io.h>
28 
29 #include <linux/hugetlb.h>
30 #include <linux/node.h>
31 #include "internal.h"
32 
33 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
34 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35 unsigned long hugepages_treat_as_movable;
36 
37 static int max_hstate;
38 unsigned int default_hstate_idx;
39 struct hstate hstates[HUGE_MAX_HSTATE];
40 
41 __initdata LIST_HEAD(huge_boot_pages);
42 
43 /* for command line parsing */
44 static struct hstate * __initdata parsed_hstate;
45 static unsigned long __initdata default_hstate_max_huge_pages;
46 static unsigned long __initdata default_hstate_size;
47 
48 #define for_each_hstate(h) \
49 	for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
50 
51 /*
52  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
53  */
54 static DEFINE_SPINLOCK(hugetlb_lock);
55 
56 /*
57  * Region tracking -- allows tracking of reservations and instantiated pages
58  *                    across the pages in a mapping.
59  *
60  * The region data structures are protected by a combination of the mmap_sem
61  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
62  * must either hold the mmap_sem for write, or the mmap_sem for read and
63  * the hugetlb_instantiation mutex:
64  *
65  * 	down_write(&mm->mmap_sem);
66  * or
67  * 	down_read(&mm->mmap_sem);
68  * 	mutex_lock(&hugetlb_instantiation_mutex);
69  */
70 struct file_region {
71 	struct list_head link;
72 	long from;
73 	long to;
74 };
75 
region_add(struct list_head * head,long f,long t)76 static long region_add(struct list_head *head, long f, long t)
77 {
78 	struct file_region *rg, *nrg, *trg;
79 
80 	/* Locate the region we are either in or before. */
81 	list_for_each_entry(rg, head, link)
82 		if (f <= rg->to)
83 			break;
84 
85 	/* Round our left edge to the current segment if it encloses us. */
86 	if (f > rg->from)
87 		f = rg->from;
88 
89 	/* Check for and consume any regions we now overlap with. */
90 	nrg = rg;
91 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
92 		if (&rg->link == head)
93 			break;
94 		if (rg->from > t)
95 			break;
96 
97 		/* If this area reaches higher then extend our area to
98 		 * include it completely.  If this is not the first area
99 		 * which we intend to reuse, free it. */
100 		if (rg->to > t)
101 			t = rg->to;
102 		if (rg != nrg) {
103 			list_del(&rg->link);
104 			kfree(rg);
105 		}
106 	}
107 	nrg->from = f;
108 	nrg->to = t;
109 	return 0;
110 }
111 
region_chg(struct list_head * head,long f,long t)112 static long region_chg(struct list_head *head, long f, long t)
113 {
114 	struct file_region *rg, *nrg;
115 	long chg = 0;
116 
117 	/* Locate the region we are before or in. */
118 	list_for_each_entry(rg, head, link)
119 		if (f <= rg->to)
120 			break;
121 
122 	/* If we are below the current region then a new region is required.
123 	 * Subtle, allocate a new region at the position but make it zero
124 	 * size such that we can guarantee to record the reservation. */
125 	if (&rg->link == head || t < rg->from) {
126 		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
127 		if (!nrg)
128 			return -ENOMEM;
129 		nrg->from = f;
130 		nrg->to   = f;
131 		INIT_LIST_HEAD(&nrg->link);
132 		list_add(&nrg->link, rg->link.prev);
133 
134 		return t - f;
135 	}
136 
137 	/* Round our left edge to the current segment if it encloses us. */
138 	if (f > rg->from)
139 		f = rg->from;
140 	chg = t - f;
141 
142 	/* Check for and consume any regions we now overlap with. */
143 	list_for_each_entry(rg, rg->link.prev, link) {
144 		if (&rg->link == head)
145 			break;
146 		if (rg->from > t)
147 			return chg;
148 
149 		/* We overlap with this area, if it extends further than
150 		 * us then we must extend ourselves.  Account for its
151 		 * existing reservation. */
152 		if (rg->to > t) {
153 			chg += rg->to - t;
154 			t = rg->to;
155 		}
156 		chg -= rg->to - rg->from;
157 	}
158 	return chg;
159 }
160 
region_truncate(struct list_head * head,long end)161 static long region_truncate(struct list_head *head, long end)
162 {
163 	struct file_region *rg, *trg;
164 	long chg = 0;
165 
166 	/* Locate the region we are either in or before. */
167 	list_for_each_entry(rg, head, link)
168 		if (end <= rg->to)
169 			break;
170 	if (&rg->link == head)
171 		return 0;
172 
173 	/* If we are in the middle of a region then adjust it. */
174 	if (end > rg->from) {
175 		chg = rg->to - end;
176 		rg->to = end;
177 		rg = list_entry(rg->link.next, typeof(*rg), link);
178 	}
179 
180 	/* Drop any remaining regions. */
181 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
182 		if (&rg->link == head)
183 			break;
184 		chg += rg->to - rg->from;
185 		list_del(&rg->link);
186 		kfree(rg);
187 	}
188 	return chg;
189 }
190 
region_count(struct list_head * head,long f,long t)191 static long region_count(struct list_head *head, long f, long t)
192 {
193 	struct file_region *rg;
194 	long chg = 0;
195 
196 	/* Locate each segment we overlap with, and count that overlap. */
197 	list_for_each_entry(rg, head, link) {
198 		int seg_from;
199 		int seg_to;
200 
201 		if (rg->to <= f)
202 			continue;
203 		if (rg->from >= t)
204 			break;
205 
206 		seg_from = max(rg->from, f);
207 		seg_to = min(rg->to, t);
208 
209 		chg += seg_to - seg_from;
210 	}
211 
212 	return chg;
213 }
214 
215 /*
216  * Convert the address within this vma to the page offset within
217  * the mapping, in pagecache page units; huge pages here.
218  */
vma_hugecache_offset(struct hstate * h,struct vm_area_struct * vma,unsigned long address)219 static pgoff_t vma_hugecache_offset(struct hstate *h,
220 			struct vm_area_struct *vma, unsigned long address)
221 {
222 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
223 			(vma->vm_pgoff >> huge_page_order(h));
224 }
225 
linear_hugepage_index(struct vm_area_struct * vma,unsigned long address)226 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
227 				     unsigned long address)
228 {
229 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
230 }
231 
232 /*
233  * Return the size of the pages allocated when backing a VMA. In the majority
234  * cases this will be same size as used by the page table entries.
235  */
vma_kernel_pagesize(struct vm_area_struct * vma)236 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
237 {
238 	struct hstate *hstate;
239 
240 	if (!is_vm_hugetlb_page(vma))
241 		return PAGE_SIZE;
242 
243 	hstate = hstate_vma(vma);
244 
245 	return 1UL << (hstate->order + PAGE_SHIFT);
246 }
247 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
248 
249 /*
250  * Return the page size being used by the MMU to back a VMA. In the majority
251  * of cases, the page size used by the kernel matches the MMU size. On
252  * architectures where it differs, an architecture-specific version of this
253  * function is required.
254  */
255 #ifndef vma_mmu_pagesize
vma_mmu_pagesize(struct vm_area_struct * vma)256 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
257 {
258 	return vma_kernel_pagesize(vma);
259 }
260 #endif
261 
262 /*
263  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
264  * bits of the reservation map pointer, which are always clear due to
265  * alignment.
266  */
267 #define HPAGE_RESV_OWNER    (1UL << 0)
268 #define HPAGE_RESV_UNMAPPED (1UL << 1)
269 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
270 
271 /*
272  * These helpers are used to track how many pages are reserved for
273  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
274  * is guaranteed to have their future faults succeed.
275  *
276  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
277  * the reserve counters are updated with the hugetlb_lock held. It is safe
278  * to reset the VMA at fork() time as it is not in use yet and there is no
279  * chance of the global counters getting corrupted as a result of the values.
280  *
281  * The private mapping reservation is represented in a subtly different
282  * manner to a shared mapping.  A shared mapping has a region map associated
283  * with the underlying file, this region map represents the backing file
284  * pages which have ever had a reservation assigned which this persists even
285  * after the page is instantiated.  A private mapping has a region map
286  * associated with the original mmap which is attached to all VMAs which
287  * reference it, this region map represents those offsets which have consumed
288  * reservation ie. where pages have been instantiated.
289  */
get_vma_private_data(struct vm_area_struct * vma)290 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
291 {
292 	return (unsigned long)vma->vm_private_data;
293 }
294 
set_vma_private_data(struct vm_area_struct * vma,unsigned long value)295 static void set_vma_private_data(struct vm_area_struct *vma,
296 							unsigned long value)
297 {
298 	vma->vm_private_data = (void *)value;
299 }
300 
301 struct resv_map {
302 	struct kref refs;
303 	struct list_head regions;
304 };
305 
resv_map_alloc(void)306 static struct resv_map *resv_map_alloc(void)
307 {
308 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
309 	if (!resv_map)
310 		return NULL;
311 
312 	kref_init(&resv_map->refs);
313 	INIT_LIST_HEAD(&resv_map->regions);
314 
315 	return resv_map;
316 }
317 
resv_map_release(struct kref * ref)318 static void resv_map_release(struct kref *ref)
319 {
320 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
321 
322 	/* Clear out any active regions before we release the map. */
323 	region_truncate(&resv_map->regions, 0);
324 	kfree(resv_map);
325 }
326 
vma_resv_map(struct vm_area_struct * vma)327 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
328 {
329 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
330 	if (!(vma->vm_flags & VM_MAYSHARE))
331 		return (struct resv_map *)(get_vma_private_data(vma) &
332 							~HPAGE_RESV_MASK);
333 	return NULL;
334 }
335 
set_vma_resv_map(struct vm_area_struct * vma,struct resv_map * map)336 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
337 {
338 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
339 	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
340 
341 	set_vma_private_data(vma, (get_vma_private_data(vma) &
342 				HPAGE_RESV_MASK) | (unsigned long)map);
343 }
344 
set_vma_resv_flags(struct vm_area_struct * vma,unsigned long flags)345 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
346 {
347 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
348 	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
349 
350 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
351 }
352 
is_vma_resv_set(struct vm_area_struct * vma,unsigned long flag)353 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
354 {
355 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
356 
357 	return (get_vma_private_data(vma) & flag) != 0;
358 }
359 
360 /* Decrement the reserved pages in the hugepage pool by one */
decrement_hugepage_resv_vma(struct hstate * h,struct vm_area_struct * vma)361 static void decrement_hugepage_resv_vma(struct hstate *h,
362 			struct vm_area_struct *vma)
363 {
364 	if (vma->vm_flags & VM_NORESERVE)
365 		return;
366 
367 	if (vma->vm_flags & VM_MAYSHARE) {
368 		/* Shared mappings always use reserves */
369 		h->resv_huge_pages--;
370 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
371 		/*
372 		 * Only the process that called mmap() has reserves for
373 		 * private mappings.
374 		 */
375 		h->resv_huge_pages--;
376 	}
377 }
378 
379 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
reset_vma_resv_huge_pages(struct vm_area_struct * vma)380 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
381 {
382 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
383 	if (!(vma->vm_flags & VM_MAYSHARE))
384 		vma->vm_private_data = (void *)0;
385 }
386 
387 /* Returns true if the VMA has associated reserve pages */
vma_has_reserves(struct vm_area_struct * vma)388 static int vma_has_reserves(struct vm_area_struct *vma)
389 {
390 	if (vma->vm_flags & VM_MAYSHARE)
391 		return 1;
392 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
393 		return 1;
394 	return 0;
395 }
396 
copy_gigantic_page(struct page * dst,struct page * src)397 static void copy_gigantic_page(struct page *dst, struct page *src)
398 {
399 	int i;
400 	struct hstate *h = page_hstate(src);
401 	struct page *dst_base = dst;
402 	struct page *src_base = src;
403 
404 	for (i = 0; i < pages_per_huge_page(h); ) {
405 		cond_resched();
406 		copy_highpage(dst, src);
407 
408 		i++;
409 		dst = mem_map_next(dst, dst_base, i);
410 		src = mem_map_next(src, src_base, i);
411 	}
412 }
413 
copy_huge_page(struct page * dst,struct page * src)414 void copy_huge_page(struct page *dst, struct page *src)
415 {
416 	int i;
417 	struct hstate *h = page_hstate(src);
418 
419 	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
420 		copy_gigantic_page(dst, src);
421 		return;
422 	}
423 
424 	might_sleep();
425 	for (i = 0; i < pages_per_huge_page(h); i++) {
426 		cond_resched();
427 		copy_highpage(dst + i, src + i);
428 	}
429 }
430 
enqueue_huge_page(struct hstate * h,struct page * page)431 static void enqueue_huge_page(struct hstate *h, struct page *page)
432 {
433 	int nid = page_to_nid(page);
434 	list_add(&page->lru, &h->hugepage_freelists[nid]);
435 	h->free_huge_pages++;
436 	h->free_huge_pages_node[nid]++;
437 }
438 
dequeue_huge_page_node(struct hstate * h,int nid)439 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
440 {
441 	struct page *page;
442 
443 	if (list_empty(&h->hugepage_freelists[nid]))
444 		return NULL;
445 	page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
446 	list_del(&page->lru);
447 	set_page_refcounted(page);
448 	h->free_huge_pages--;
449 	h->free_huge_pages_node[nid]--;
450 	return page;
451 }
452 
dequeue_huge_page_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address,int avoid_reserve)453 static struct page *dequeue_huge_page_vma(struct hstate *h,
454 				struct vm_area_struct *vma,
455 				unsigned long address, int avoid_reserve)
456 {
457 	struct page *page = NULL;
458 	struct mempolicy *mpol;
459 	nodemask_t *nodemask;
460 	struct zonelist *zonelist;
461 	struct zone *zone;
462 	struct zoneref *z;
463 
464 	get_mems_allowed();
465 	zonelist = huge_zonelist(vma, address,
466 					htlb_alloc_mask, &mpol, &nodemask);
467 	/*
468 	 * A child process with MAP_PRIVATE mappings created by their parent
469 	 * have no page reserves. This check ensures that reservations are
470 	 * not "stolen". The child may still get SIGKILLed
471 	 */
472 	if (!vma_has_reserves(vma) &&
473 			h->free_huge_pages - h->resv_huge_pages == 0)
474 		goto err;
475 
476 	/* If reserves cannot be used, ensure enough pages are in the pool */
477 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
478 		goto err;;
479 
480 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
481 						MAX_NR_ZONES - 1, nodemask) {
482 		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
483 			page = dequeue_huge_page_node(h, zone_to_nid(zone));
484 			if (page) {
485 				if (!avoid_reserve)
486 					decrement_hugepage_resv_vma(h, vma);
487 				break;
488 			}
489 		}
490 	}
491 err:
492 	mpol_cond_put(mpol);
493 	put_mems_allowed();
494 	return page;
495 }
496 
update_and_free_page(struct hstate * h,struct page * page)497 static void update_and_free_page(struct hstate *h, struct page *page)
498 {
499 	int i;
500 
501 	VM_BUG_ON(h->order >= MAX_ORDER);
502 
503 	h->nr_huge_pages--;
504 	h->nr_huge_pages_node[page_to_nid(page)]--;
505 	for (i = 0; i < pages_per_huge_page(h); i++) {
506 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
507 				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
508 				1 << PG_private | 1<< PG_writeback);
509 	}
510 	set_compound_page_dtor(page, NULL);
511 	set_page_refcounted(page);
512 	arch_release_hugepage(page);
513 	__free_pages(page, huge_page_order(h));
514 }
515 
size_to_hstate(unsigned long size)516 struct hstate *size_to_hstate(unsigned long size)
517 {
518 	struct hstate *h;
519 
520 	for_each_hstate(h) {
521 		if (huge_page_size(h) == size)
522 			return h;
523 	}
524 	return NULL;
525 }
526 
free_huge_page(struct page * page)527 static void free_huge_page(struct page *page)
528 {
529 	/*
530 	 * Can't pass hstate in here because it is called from the
531 	 * compound page destructor.
532 	 */
533 	struct hstate *h = page_hstate(page);
534 	int nid = page_to_nid(page);
535 	struct address_space *mapping;
536 
537 	mapping = (struct address_space *) page_private(page);
538 	set_page_private(page, 0);
539 	page->mapping = NULL;
540 	BUG_ON(page_count(page));
541 	BUG_ON(page_mapcount(page));
542 	INIT_LIST_HEAD(&page->lru);
543 
544 	spin_lock(&hugetlb_lock);
545 	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
546 		update_and_free_page(h, page);
547 		h->surplus_huge_pages--;
548 		h->surplus_huge_pages_node[nid]--;
549 	} else {
550 		enqueue_huge_page(h, page);
551 	}
552 	spin_unlock(&hugetlb_lock);
553 	if (mapping)
554 		hugetlb_put_quota(mapping, 1);
555 }
556 
prep_new_huge_page(struct hstate * h,struct page * page,int nid)557 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
558 {
559 	set_compound_page_dtor(page, free_huge_page);
560 	spin_lock(&hugetlb_lock);
561 	h->nr_huge_pages++;
562 	h->nr_huge_pages_node[nid]++;
563 	spin_unlock(&hugetlb_lock);
564 	put_page(page); /* free it into the hugepage allocator */
565 }
566 
prep_compound_gigantic_page(struct page * page,unsigned long order)567 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
568 {
569 	int i;
570 	int nr_pages = 1 << order;
571 	struct page *p = page + 1;
572 
573 	/* we rely on prep_new_huge_page to set the destructor */
574 	set_compound_order(page, order);
575 	__SetPageHead(page);
576 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
577 		__SetPageTail(p);
578 		p->first_page = page;
579 	}
580 }
581 
PageHuge(struct page * page)582 int PageHuge(struct page *page)
583 {
584 	compound_page_dtor *dtor;
585 
586 	if (!PageCompound(page))
587 		return 0;
588 
589 	page = compound_head(page);
590 	dtor = get_compound_page_dtor(page);
591 
592 	return dtor == free_huge_page;
593 }
594 
595 EXPORT_SYMBOL_GPL(PageHuge);
596 
alloc_fresh_huge_page_node(struct hstate * h,int nid)597 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
598 {
599 	struct page *page;
600 
601 	if (h->order >= MAX_ORDER)
602 		return NULL;
603 
604 	page = alloc_pages_exact_node(nid,
605 		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
606 						__GFP_REPEAT|__GFP_NOWARN,
607 		huge_page_order(h));
608 	if (page) {
609 		if (arch_prepare_hugepage(page)) {
610 			__free_pages(page, huge_page_order(h));
611 			return NULL;
612 		}
613 		prep_new_huge_page(h, page, nid);
614 	}
615 
616 	return page;
617 }
618 
619 /*
620  * common helper functions for hstate_next_node_to_{alloc|free}.
621  * We may have allocated or freed a huge page based on a different
622  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
623  * be outside of *nodes_allowed.  Ensure that we use an allowed
624  * node for alloc or free.
625  */
next_node_allowed(int nid,nodemask_t * nodes_allowed)626 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
627 {
628 	nid = next_node(nid, *nodes_allowed);
629 	if (nid == MAX_NUMNODES)
630 		nid = first_node(*nodes_allowed);
631 	VM_BUG_ON(nid >= MAX_NUMNODES);
632 
633 	return nid;
634 }
635 
get_valid_node_allowed(int nid,nodemask_t * nodes_allowed)636 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
637 {
638 	if (!node_isset(nid, *nodes_allowed))
639 		nid = next_node_allowed(nid, nodes_allowed);
640 	return nid;
641 }
642 
643 /*
644  * returns the previously saved node ["this node"] from which to
645  * allocate a persistent huge page for the pool and advance the
646  * next node from which to allocate, handling wrap at end of node
647  * mask.
648  */
hstate_next_node_to_alloc(struct hstate * h,nodemask_t * nodes_allowed)649 static int hstate_next_node_to_alloc(struct hstate *h,
650 					nodemask_t *nodes_allowed)
651 {
652 	int nid;
653 
654 	VM_BUG_ON(!nodes_allowed);
655 
656 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
657 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
658 
659 	return nid;
660 }
661 
alloc_fresh_huge_page(struct hstate * h,nodemask_t * nodes_allowed)662 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
663 {
664 	struct page *page;
665 	int start_nid;
666 	int next_nid;
667 	int ret = 0;
668 
669 	start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
670 	next_nid = start_nid;
671 
672 	do {
673 		page = alloc_fresh_huge_page_node(h, next_nid);
674 		if (page) {
675 			ret = 1;
676 			break;
677 		}
678 		next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
679 	} while (next_nid != start_nid);
680 
681 	if (ret)
682 		count_vm_event(HTLB_BUDDY_PGALLOC);
683 	else
684 		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
685 
686 	return ret;
687 }
688 
689 /*
690  * helper for free_pool_huge_page() - return the previously saved
691  * node ["this node"] from which to free a huge page.  Advance the
692  * next node id whether or not we find a free huge page to free so
693  * that the next attempt to free addresses the next node.
694  */
hstate_next_node_to_free(struct hstate * h,nodemask_t * nodes_allowed)695 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
696 {
697 	int nid;
698 
699 	VM_BUG_ON(!nodes_allowed);
700 
701 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
702 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
703 
704 	return nid;
705 }
706 
707 /*
708  * Free huge page from pool from next node to free.
709  * Attempt to keep persistent huge pages more or less
710  * balanced over allowed nodes.
711  * Called with hugetlb_lock locked.
712  */
free_pool_huge_page(struct hstate * h,nodemask_t * nodes_allowed,bool acct_surplus)713 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
714 							 bool acct_surplus)
715 {
716 	int start_nid;
717 	int next_nid;
718 	int ret = 0;
719 
720 	start_nid = hstate_next_node_to_free(h, nodes_allowed);
721 	next_nid = start_nid;
722 
723 	do {
724 		/*
725 		 * If we're returning unused surplus pages, only examine
726 		 * nodes with surplus pages.
727 		 */
728 		if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
729 		    !list_empty(&h->hugepage_freelists[next_nid])) {
730 			struct page *page =
731 				list_entry(h->hugepage_freelists[next_nid].next,
732 					  struct page, lru);
733 			list_del(&page->lru);
734 			h->free_huge_pages--;
735 			h->free_huge_pages_node[next_nid]--;
736 			if (acct_surplus) {
737 				h->surplus_huge_pages--;
738 				h->surplus_huge_pages_node[next_nid]--;
739 			}
740 			update_and_free_page(h, page);
741 			ret = 1;
742 			break;
743 		}
744 		next_nid = hstate_next_node_to_free(h, nodes_allowed);
745 	} while (next_nid != start_nid);
746 
747 	return ret;
748 }
749 
alloc_buddy_huge_page(struct hstate * h,int nid)750 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
751 {
752 	struct page *page;
753 	unsigned int r_nid;
754 
755 	if (h->order >= MAX_ORDER)
756 		return NULL;
757 
758 	/*
759 	 * Assume we will successfully allocate the surplus page to
760 	 * prevent racing processes from causing the surplus to exceed
761 	 * overcommit
762 	 *
763 	 * This however introduces a different race, where a process B
764 	 * tries to grow the static hugepage pool while alloc_pages() is
765 	 * called by process A. B will only examine the per-node
766 	 * counters in determining if surplus huge pages can be
767 	 * converted to normal huge pages in adjust_pool_surplus(). A
768 	 * won't be able to increment the per-node counter, until the
769 	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
770 	 * no more huge pages can be converted from surplus to normal
771 	 * state (and doesn't try to convert again). Thus, we have a
772 	 * case where a surplus huge page exists, the pool is grown, and
773 	 * the surplus huge page still exists after, even though it
774 	 * should just have been converted to a normal huge page. This
775 	 * does not leak memory, though, as the hugepage will be freed
776 	 * once it is out of use. It also does not allow the counters to
777 	 * go out of whack in adjust_pool_surplus() as we don't modify
778 	 * the node values until we've gotten the hugepage and only the
779 	 * per-node value is checked there.
780 	 */
781 	spin_lock(&hugetlb_lock);
782 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
783 		spin_unlock(&hugetlb_lock);
784 		return NULL;
785 	} else {
786 		h->nr_huge_pages++;
787 		h->surplus_huge_pages++;
788 	}
789 	spin_unlock(&hugetlb_lock);
790 
791 	if (nid == NUMA_NO_NODE)
792 		page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
793 				   __GFP_REPEAT|__GFP_NOWARN,
794 				   huge_page_order(h));
795 	else
796 		page = alloc_pages_exact_node(nid,
797 			htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
798 			__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
799 
800 	if (page && arch_prepare_hugepage(page)) {
801 		__free_pages(page, huge_page_order(h));
802 		return NULL;
803 	}
804 
805 	spin_lock(&hugetlb_lock);
806 	if (page) {
807 		r_nid = page_to_nid(page);
808 		set_compound_page_dtor(page, free_huge_page);
809 		/*
810 		 * We incremented the global counters already
811 		 */
812 		h->nr_huge_pages_node[r_nid]++;
813 		h->surplus_huge_pages_node[r_nid]++;
814 		__count_vm_event(HTLB_BUDDY_PGALLOC);
815 	} else {
816 		h->nr_huge_pages--;
817 		h->surplus_huge_pages--;
818 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
819 	}
820 	spin_unlock(&hugetlb_lock);
821 
822 	return page;
823 }
824 
825 /*
826  * This allocation function is useful in the context where vma is irrelevant.
827  * E.g. soft-offlining uses this function because it only cares physical
828  * address of error page.
829  */
alloc_huge_page_node(struct hstate * h,int nid)830 struct page *alloc_huge_page_node(struct hstate *h, int nid)
831 {
832 	struct page *page;
833 
834 	spin_lock(&hugetlb_lock);
835 	page = dequeue_huge_page_node(h, nid);
836 	spin_unlock(&hugetlb_lock);
837 
838 	if (!page)
839 		page = alloc_buddy_huge_page(h, nid);
840 
841 	return page;
842 }
843 
844 /*
845  * Increase the hugetlb pool such that it can accommodate a reservation
846  * of size 'delta'.
847  */
gather_surplus_pages(struct hstate * h,int delta)848 static int gather_surplus_pages(struct hstate *h, int delta)
849 {
850 	struct list_head surplus_list;
851 	struct page *page, *tmp;
852 	int ret, i;
853 	int needed, allocated;
854 
855 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
856 	if (needed <= 0) {
857 		h->resv_huge_pages += delta;
858 		return 0;
859 	}
860 
861 	allocated = 0;
862 	INIT_LIST_HEAD(&surplus_list);
863 
864 	ret = -ENOMEM;
865 retry:
866 	spin_unlock(&hugetlb_lock);
867 	for (i = 0; i < needed; i++) {
868 		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
869 		if (!page)
870 			/*
871 			 * We were not able to allocate enough pages to
872 			 * satisfy the entire reservation so we free what
873 			 * we've allocated so far.
874 			 */
875 			goto free;
876 
877 		list_add(&page->lru, &surplus_list);
878 	}
879 	allocated += needed;
880 
881 	/*
882 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
883 	 * because either resv_huge_pages or free_huge_pages may have changed.
884 	 */
885 	spin_lock(&hugetlb_lock);
886 	needed = (h->resv_huge_pages + delta) -
887 			(h->free_huge_pages + allocated);
888 	if (needed > 0)
889 		goto retry;
890 
891 	/*
892 	 * The surplus_list now contains _at_least_ the number of extra pages
893 	 * needed to accommodate the reservation.  Add the appropriate number
894 	 * of pages to the hugetlb pool and free the extras back to the buddy
895 	 * allocator.  Commit the entire reservation here to prevent another
896 	 * process from stealing the pages as they are added to the pool but
897 	 * before they are reserved.
898 	 */
899 	needed += allocated;
900 	h->resv_huge_pages += delta;
901 	ret = 0;
902 
903 	spin_unlock(&hugetlb_lock);
904 	/* Free the needed pages to the hugetlb pool */
905 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
906 		if ((--needed) < 0)
907 			break;
908 		list_del(&page->lru);
909 		/*
910 		 * This page is now managed by the hugetlb allocator and has
911 		 * no users -- drop the buddy allocator's reference.
912 		 */
913 		put_page_testzero(page);
914 		VM_BUG_ON(page_count(page));
915 		enqueue_huge_page(h, page);
916 	}
917 
918 	/* Free unnecessary surplus pages to the buddy allocator */
919 free:
920 	if (!list_empty(&surplus_list)) {
921 		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
922 			list_del(&page->lru);
923 			put_page(page);
924 		}
925 	}
926 	spin_lock(&hugetlb_lock);
927 
928 	return ret;
929 }
930 
931 /*
932  * When releasing a hugetlb pool reservation, any surplus pages that were
933  * allocated to satisfy the reservation must be explicitly freed if they were
934  * never used.
935  * Called with hugetlb_lock held.
936  */
return_unused_surplus_pages(struct hstate * h,unsigned long unused_resv_pages)937 static void return_unused_surplus_pages(struct hstate *h,
938 					unsigned long unused_resv_pages)
939 {
940 	unsigned long nr_pages;
941 
942 	/* Uncommit the reservation */
943 	h->resv_huge_pages -= unused_resv_pages;
944 
945 	/* Cannot return gigantic pages currently */
946 	if (h->order >= MAX_ORDER)
947 		return;
948 
949 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
950 
951 	/*
952 	 * We want to release as many surplus pages as possible, spread
953 	 * evenly across all nodes with memory. Iterate across these nodes
954 	 * until we can no longer free unreserved surplus pages. This occurs
955 	 * when the nodes with surplus pages have no free pages.
956 	 * free_pool_huge_page() will balance the the freed pages across the
957 	 * on-line nodes with memory and will handle the hstate accounting.
958 	 */
959 	while (nr_pages--) {
960 		if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
961 			break;
962 	}
963 }
964 
965 /*
966  * Determine if the huge page at addr within the vma has an associated
967  * reservation.  Where it does not we will need to logically increase
968  * reservation and actually increase quota before an allocation can occur.
969  * Where any new reservation would be required the reservation change is
970  * prepared, but not committed.  Once the page has been quota'd allocated
971  * an instantiated the change should be committed via vma_commit_reservation.
972  * No action is required on failure.
973  */
vma_needs_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)974 static long vma_needs_reservation(struct hstate *h,
975 			struct vm_area_struct *vma, unsigned long addr)
976 {
977 	struct address_space *mapping = vma->vm_file->f_mapping;
978 	struct inode *inode = mapping->host;
979 
980 	if (vma->vm_flags & VM_MAYSHARE) {
981 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
982 		return region_chg(&inode->i_mapping->private_list,
983 							idx, idx + 1);
984 
985 	} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
986 		return 1;
987 
988 	} else  {
989 		long err;
990 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
991 		struct resv_map *reservations = vma_resv_map(vma);
992 
993 		err = region_chg(&reservations->regions, idx, idx + 1);
994 		if (err < 0)
995 			return err;
996 		return 0;
997 	}
998 }
vma_commit_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)999 static void vma_commit_reservation(struct hstate *h,
1000 			struct vm_area_struct *vma, unsigned long addr)
1001 {
1002 	struct address_space *mapping = vma->vm_file->f_mapping;
1003 	struct inode *inode = mapping->host;
1004 
1005 	if (vma->vm_flags & VM_MAYSHARE) {
1006 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1007 		region_add(&inode->i_mapping->private_list, idx, idx + 1);
1008 
1009 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1010 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1011 		struct resv_map *reservations = vma_resv_map(vma);
1012 
1013 		/* Mark this page used in the map. */
1014 		region_add(&reservations->regions, idx, idx + 1);
1015 	}
1016 }
1017 
alloc_huge_page(struct vm_area_struct * vma,unsigned long addr,int avoid_reserve)1018 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1019 				    unsigned long addr, int avoid_reserve)
1020 {
1021 	struct hstate *h = hstate_vma(vma);
1022 	struct page *page;
1023 	struct address_space *mapping = vma->vm_file->f_mapping;
1024 	struct inode *inode = mapping->host;
1025 	long chg;
1026 
1027 	/*
1028 	 * Processes that did not create the mapping will have no reserves and
1029 	 * will not have accounted against quota. Check that the quota can be
1030 	 * made before satisfying the allocation
1031 	 * MAP_NORESERVE mappings may also need pages and quota allocated
1032 	 * if no reserve mapping overlaps.
1033 	 */
1034 	chg = vma_needs_reservation(h, vma, addr);
1035 	if (chg < 0)
1036 		return ERR_PTR(chg);
1037 	if (chg)
1038 		if (hugetlb_get_quota(inode->i_mapping, chg))
1039 			return ERR_PTR(-ENOSPC);
1040 
1041 	spin_lock(&hugetlb_lock);
1042 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1043 	spin_unlock(&hugetlb_lock);
1044 
1045 	if (!page) {
1046 		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1047 		if (!page) {
1048 			hugetlb_put_quota(inode->i_mapping, chg);
1049 			return ERR_PTR(-VM_FAULT_SIGBUS);
1050 		}
1051 	}
1052 
1053 	set_page_private(page, (unsigned long) mapping);
1054 
1055 	vma_commit_reservation(h, vma, addr);
1056 
1057 	return page;
1058 }
1059 
alloc_bootmem_huge_page(struct hstate * h)1060 int __weak alloc_bootmem_huge_page(struct hstate *h)
1061 {
1062 	struct huge_bootmem_page *m;
1063 	int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1064 
1065 	while (nr_nodes) {
1066 		void *addr;
1067 
1068 		addr = __alloc_bootmem_node_nopanic(
1069 				NODE_DATA(hstate_next_node_to_alloc(h,
1070 						&node_states[N_HIGH_MEMORY])),
1071 				huge_page_size(h), huge_page_size(h), 0);
1072 
1073 		if (addr) {
1074 			/*
1075 			 * Use the beginning of the huge page to store the
1076 			 * huge_bootmem_page struct (until gather_bootmem
1077 			 * puts them into the mem_map).
1078 			 */
1079 			m = addr;
1080 			goto found;
1081 		}
1082 		nr_nodes--;
1083 	}
1084 	return 0;
1085 
1086 found:
1087 	BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1088 	/* Put them into a private list first because mem_map is not up yet */
1089 	list_add(&m->list, &huge_boot_pages);
1090 	m->hstate = h;
1091 	return 1;
1092 }
1093 
prep_compound_huge_page(struct page * page,int order)1094 static void prep_compound_huge_page(struct page *page, int order)
1095 {
1096 	if (unlikely(order > (MAX_ORDER - 1)))
1097 		prep_compound_gigantic_page(page, order);
1098 	else
1099 		prep_compound_page(page, order);
1100 }
1101 
1102 /* Put bootmem huge pages into the standard lists after mem_map is up */
gather_bootmem_prealloc(void)1103 static void __init gather_bootmem_prealloc(void)
1104 {
1105 	struct huge_bootmem_page *m;
1106 
1107 	list_for_each_entry(m, &huge_boot_pages, list) {
1108 		struct page *page = virt_to_page(m);
1109 		struct hstate *h = m->hstate;
1110 		__ClearPageReserved(page);
1111 		WARN_ON(page_count(page) != 1);
1112 		prep_compound_huge_page(page, h->order);
1113 		prep_new_huge_page(h, page, page_to_nid(page));
1114 	}
1115 }
1116 
hugetlb_hstate_alloc_pages(struct hstate * h)1117 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1118 {
1119 	unsigned long i;
1120 
1121 	for (i = 0; i < h->max_huge_pages; ++i) {
1122 		if (h->order >= MAX_ORDER) {
1123 			if (!alloc_bootmem_huge_page(h))
1124 				break;
1125 		} else if (!alloc_fresh_huge_page(h,
1126 					 &node_states[N_HIGH_MEMORY]))
1127 			break;
1128 	}
1129 	h->max_huge_pages = i;
1130 }
1131 
hugetlb_init_hstates(void)1132 static void __init hugetlb_init_hstates(void)
1133 {
1134 	struct hstate *h;
1135 
1136 	for_each_hstate(h) {
1137 		/* oversize hugepages were init'ed in early boot */
1138 		if (h->order < MAX_ORDER)
1139 			hugetlb_hstate_alloc_pages(h);
1140 	}
1141 }
1142 
memfmt(char * buf,unsigned long n)1143 static char * __init memfmt(char *buf, unsigned long n)
1144 {
1145 	if (n >= (1UL << 30))
1146 		sprintf(buf, "%lu GB", n >> 30);
1147 	else if (n >= (1UL << 20))
1148 		sprintf(buf, "%lu MB", n >> 20);
1149 	else
1150 		sprintf(buf, "%lu KB", n >> 10);
1151 	return buf;
1152 }
1153 
report_hugepages(void)1154 static void __init report_hugepages(void)
1155 {
1156 	struct hstate *h;
1157 
1158 	for_each_hstate(h) {
1159 		char buf[32];
1160 		printk(KERN_INFO "HugeTLB registered %s page size, "
1161 				 "pre-allocated %ld pages\n",
1162 			memfmt(buf, huge_page_size(h)),
1163 			h->free_huge_pages);
1164 	}
1165 }
1166 
1167 #ifdef CONFIG_HIGHMEM
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)1168 static void try_to_free_low(struct hstate *h, unsigned long count,
1169 						nodemask_t *nodes_allowed)
1170 {
1171 	int i;
1172 
1173 	if (h->order >= MAX_ORDER)
1174 		return;
1175 
1176 	for_each_node_mask(i, *nodes_allowed) {
1177 		struct page *page, *next;
1178 		struct list_head *freel = &h->hugepage_freelists[i];
1179 		list_for_each_entry_safe(page, next, freel, lru) {
1180 			if (count >= h->nr_huge_pages)
1181 				return;
1182 			if (PageHighMem(page))
1183 				continue;
1184 			list_del(&page->lru);
1185 			update_and_free_page(h, page);
1186 			h->free_huge_pages--;
1187 			h->free_huge_pages_node[page_to_nid(page)]--;
1188 		}
1189 	}
1190 }
1191 #else
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)1192 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1193 						nodemask_t *nodes_allowed)
1194 {
1195 }
1196 #endif
1197 
1198 /*
1199  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1200  * balanced by operating on them in a round-robin fashion.
1201  * Returns 1 if an adjustment was made.
1202  */
adjust_pool_surplus(struct hstate * h,nodemask_t * nodes_allowed,int delta)1203 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1204 				int delta)
1205 {
1206 	int start_nid, next_nid;
1207 	int ret = 0;
1208 
1209 	VM_BUG_ON(delta != -1 && delta != 1);
1210 
1211 	if (delta < 0)
1212 		start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1213 	else
1214 		start_nid = hstate_next_node_to_free(h, nodes_allowed);
1215 	next_nid = start_nid;
1216 
1217 	do {
1218 		int nid = next_nid;
1219 		if (delta < 0)  {
1220 			/*
1221 			 * To shrink on this node, there must be a surplus page
1222 			 */
1223 			if (!h->surplus_huge_pages_node[nid]) {
1224 				next_nid = hstate_next_node_to_alloc(h,
1225 								nodes_allowed);
1226 				continue;
1227 			}
1228 		}
1229 		if (delta > 0) {
1230 			/*
1231 			 * Surplus cannot exceed the total number of pages
1232 			 */
1233 			if (h->surplus_huge_pages_node[nid] >=
1234 						h->nr_huge_pages_node[nid]) {
1235 				next_nid = hstate_next_node_to_free(h,
1236 								nodes_allowed);
1237 				continue;
1238 			}
1239 		}
1240 
1241 		h->surplus_huge_pages += delta;
1242 		h->surplus_huge_pages_node[nid] += delta;
1243 		ret = 1;
1244 		break;
1245 	} while (next_nid != start_nid);
1246 
1247 	return ret;
1248 }
1249 
1250 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
set_max_huge_pages(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)1251 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1252 						nodemask_t *nodes_allowed)
1253 {
1254 	unsigned long min_count, ret;
1255 
1256 	if (h->order >= MAX_ORDER)
1257 		return h->max_huge_pages;
1258 
1259 	/*
1260 	 * Increase the pool size
1261 	 * First take pages out of surplus state.  Then make up the
1262 	 * remaining difference by allocating fresh huge pages.
1263 	 *
1264 	 * We might race with alloc_buddy_huge_page() here and be unable
1265 	 * to convert a surplus huge page to a normal huge page. That is
1266 	 * not critical, though, it just means the overall size of the
1267 	 * pool might be one hugepage larger than it needs to be, but
1268 	 * within all the constraints specified by the sysctls.
1269 	 */
1270 	spin_lock(&hugetlb_lock);
1271 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1272 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
1273 			break;
1274 	}
1275 
1276 	while (count > persistent_huge_pages(h)) {
1277 		/*
1278 		 * If this allocation races such that we no longer need the
1279 		 * page, free_huge_page will handle it by freeing the page
1280 		 * and reducing the surplus.
1281 		 */
1282 		spin_unlock(&hugetlb_lock);
1283 		ret = alloc_fresh_huge_page(h, nodes_allowed);
1284 		spin_lock(&hugetlb_lock);
1285 		if (!ret)
1286 			goto out;
1287 
1288 		/* Bail for signals. Probably ctrl-c from user */
1289 		if (signal_pending(current))
1290 			goto out;
1291 	}
1292 
1293 	/*
1294 	 * Decrease the pool size
1295 	 * First return free pages to the buddy allocator (being careful
1296 	 * to keep enough around to satisfy reservations).  Then place
1297 	 * pages into surplus state as needed so the pool will shrink
1298 	 * to the desired size as pages become free.
1299 	 *
1300 	 * By placing pages into the surplus state independent of the
1301 	 * overcommit value, we are allowing the surplus pool size to
1302 	 * exceed overcommit. There are few sane options here. Since
1303 	 * alloc_buddy_huge_page() is checking the global counter,
1304 	 * though, we'll note that we're not allowed to exceed surplus
1305 	 * and won't grow the pool anywhere else. Not until one of the
1306 	 * sysctls are changed, or the surplus pages go out of use.
1307 	 */
1308 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1309 	min_count = max(count, min_count);
1310 	try_to_free_low(h, min_count, nodes_allowed);
1311 	while (min_count < persistent_huge_pages(h)) {
1312 		if (!free_pool_huge_page(h, nodes_allowed, 0))
1313 			break;
1314 	}
1315 	while (count < persistent_huge_pages(h)) {
1316 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
1317 			break;
1318 	}
1319 out:
1320 	ret = persistent_huge_pages(h);
1321 	spin_unlock(&hugetlb_lock);
1322 	return ret;
1323 }
1324 
1325 #define HSTATE_ATTR_RO(_name) \
1326 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1327 
1328 #define HSTATE_ATTR(_name) \
1329 	static struct kobj_attribute _name##_attr = \
1330 		__ATTR(_name, 0644, _name##_show, _name##_store)
1331 
1332 static struct kobject *hugepages_kobj;
1333 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1334 
1335 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1336 
kobj_to_hstate(struct kobject * kobj,int * nidp)1337 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1338 {
1339 	int i;
1340 
1341 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
1342 		if (hstate_kobjs[i] == kobj) {
1343 			if (nidp)
1344 				*nidp = NUMA_NO_NODE;
1345 			return &hstates[i];
1346 		}
1347 
1348 	return kobj_to_node_hstate(kobj, nidp);
1349 }
1350 
nr_hugepages_show_common(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1351 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1352 					struct kobj_attribute *attr, char *buf)
1353 {
1354 	struct hstate *h;
1355 	unsigned long nr_huge_pages;
1356 	int nid;
1357 
1358 	h = kobj_to_hstate(kobj, &nid);
1359 	if (nid == NUMA_NO_NODE)
1360 		nr_huge_pages = h->nr_huge_pages;
1361 	else
1362 		nr_huge_pages = h->nr_huge_pages_node[nid];
1363 
1364 	return sprintf(buf, "%lu\n", nr_huge_pages);
1365 }
1366 
nr_hugepages_store_common(bool obey_mempolicy,struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)1367 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1368 			struct kobject *kobj, struct kobj_attribute *attr,
1369 			const char *buf, size_t len)
1370 {
1371 	int err;
1372 	int nid;
1373 	unsigned long count;
1374 	struct hstate *h;
1375 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1376 
1377 	err = strict_strtoul(buf, 10, &count);
1378 	if (err)
1379 		goto out;
1380 
1381 	h = kobj_to_hstate(kobj, &nid);
1382 	if (h->order >= MAX_ORDER) {
1383 		err = -EINVAL;
1384 		goto out;
1385 	}
1386 
1387 	if (nid == NUMA_NO_NODE) {
1388 		/*
1389 		 * global hstate attribute
1390 		 */
1391 		if (!(obey_mempolicy &&
1392 				init_nodemask_of_mempolicy(nodes_allowed))) {
1393 			NODEMASK_FREE(nodes_allowed);
1394 			nodes_allowed = &node_states[N_HIGH_MEMORY];
1395 		}
1396 	} else if (nodes_allowed) {
1397 		/*
1398 		 * per node hstate attribute: adjust count to global,
1399 		 * but restrict alloc/free to the specified node.
1400 		 */
1401 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1402 		init_nodemask_of_node(nodes_allowed, nid);
1403 	} else
1404 		nodes_allowed = &node_states[N_HIGH_MEMORY];
1405 
1406 	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1407 
1408 	if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1409 		NODEMASK_FREE(nodes_allowed);
1410 
1411 	return len;
1412 out:
1413 	NODEMASK_FREE(nodes_allowed);
1414 	return err;
1415 }
1416 
nr_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1417 static ssize_t nr_hugepages_show(struct kobject *kobj,
1418 				       struct kobj_attribute *attr, char *buf)
1419 {
1420 	return nr_hugepages_show_common(kobj, attr, buf);
1421 }
1422 
nr_hugepages_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)1423 static ssize_t nr_hugepages_store(struct kobject *kobj,
1424 	       struct kobj_attribute *attr, const char *buf, size_t len)
1425 {
1426 	return nr_hugepages_store_common(false, kobj, attr, buf, len);
1427 }
1428 HSTATE_ATTR(nr_hugepages);
1429 
1430 #ifdef CONFIG_NUMA
1431 
1432 /*
1433  * hstate attribute for optionally mempolicy-based constraint on persistent
1434  * huge page alloc/free.
1435  */
nr_hugepages_mempolicy_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1436 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1437 				       struct kobj_attribute *attr, char *buf)
1438 {
1439 	return nr_hugepages_show_common(kobj, attr, buf);
1440 }
1441 
nr_hugepages_mempolicy_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)1442 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1443 	       struct kobj_attribute *attr, const char *buf, size_t len)
1444 {
1445 	return nr_hugepages_store_common(true, kobj, attr, buf, len);
1446 }
1447 HSTATE_ATTR(nr_hugepages_mempolicy);
1448 #endif
1449 
1450 
nr_overcommit_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1451 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1452 					struct kobj_attribute *attr, char *buf)
1453 {
1454 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1455 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1456 }
1457 
nr_overcommit_hugepages_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1458 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1459 		struct kobj_attribute *attr, const char *buf, size_t count)
1460 {
1461 	int err;
1462 	unsigned long input;
1463 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1464 
1465 	if (h->order >= MAX_ORDER)
1466 		return -EINVAL;
1467 
1468 	err = strict_strtoul(buf, 10, &input);
1469 	if (err)
1470 		return err;
1471 
1472 	spin_lock(&hugetlb_lock);
1473 	h->nr_overcommit_huge_pages = input;
1474 	spin_unlock(&hugetlb_lock);
1475 
1476 	return count;
1477 }
1478 HSTATE_ATTR(nr_overcommit_hugepages);
1479 
free_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1480 static ssize_t free_hugepages_show(struct kobject *kobj,
1481 					struct kobj_attribute *attr, char *buf)
1482 {
1483 	struct hstate *h;
1484 	unsigned long free_huge_pages;
1485 	int nid;
1486 
1487 	h = kobj_to_hstate(kobj, &nid);
1488 	if (nid == NUMA_NO_NODE)
1489 		free_huge_pages = h->free_huge_pages;
1490 	else
1491 		free_huge_pages = h->free_huge_pages_node[nid];
1492 
1493 	return sprintf(buf, "%lu\n", free_huge_pages);
1494 }
1495 HSTATE_ATTR_RO(free_hugepages);
1496 
resv_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1497 static ssize_t resv_hugepages_show(struct kobject *kobj,
1498 					struct kobj_attribute *attr, char *buf)
1499 {
1500 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1501 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
1502 }
1503 HSTATE_ATTR_RO(resv_hugepages);
1504 
surplus_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1505 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1506 					struct kobj_attribute *attr, char *buf)
1507 {
1508 	struct hstate *h;
1509 	unsigned long surplus_huge_pages;
1510 	int nid;
1511 
1512 	h = kobj_to_hstate(kobj, &nid);
1513 	if (nid == NUMA_NO_NODE)
1514 		surplus_huge_pages = h->surplus_huge_pages;
1515 	else
1516 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
1517 
1518 	return sprintf(buf, "%lu\n", surplus_huge_pages);
1519 }
1520 HSTATE_ATTR_RO(surplus_hugepages);
1521 
1522 static struct attribute *hstate_attrs[] = {
1523 	&nr_hugepages_attr.attr,
1524 	&nr_overcommit_hugepages_attr.attr,
1525 	&free_hugepages_attr.attr,
1526 	&resv_hugepages_attr.attr,
1527 	&surplus_hugepages_attr.attr,
1528 #ifdef CONFIG_NUMA
1529 	&nr_hugepages_mempolicy_attr.attr,
1530 #endif
1531 	NULL,
1532 };
1533 
1534 static struct attribute_group hstate_attr_group = {
1535 	.attrs = hstate_attrs,
1536 };
1537 
hugetlb_sysfs_add_hstate(struct hstate * h,struct kobject * parent,struct kobject ** hstate_kobjs,struct attribute_group * hstate_attr_group)1538 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1539 				    struct kobject **hstate_kobjs,
1540 				    struct attribute_group *hstate_attr_group)
1541 {
1542 	int retval;
1543 	int hi = h - hstates;
1544 
1545 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1546 	if (!hstate_kobjs[hi])
1547 		return -ENOMEM;
1548 
1549 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1550 	if (retval)
1551 		kobject_put(hstate_kobjs[hi]);
1552 
1553 	return retval;
1554 }
1555 
hugetlb_sysfs_init(void)1556 static void __init hugetlb_sysfs_init(void)
1557 {
1558 	struct hstate *h;
1559 	int err;
1560 
1561 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1562 	if (!hugepages_kobj)
1563 		return;
1564 
1565 	for_each_hstate(h) {
1566 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1567 					 hstate_kobjs, &hstate_attr_group);
1568 		if (err)
1569 			printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1570 								h->name);
1571 	}
1572 }
1573 
1574 #ifdef CONFIG_NUMA
1575 
1576 /*
1577  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1578  * with node sysdevs in node_devices[] using a parallel array.  The array
1579  * index of a node sysdev or _hstate == node id.
1580  * This is here to avoid any static dependency of the node sysdev driver, in
1581  * the base kernel, on the hugetlb module.
1582  */
1583 struct node_hstate {
1584 	struct kobject		*hugepages_kobj;
1585 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
1586 };
1587 struct node_hstate node_hstates[MAX_NUMNODES];
1588 
1589 /*
1590  * A subset of global hstate attributes for node sysdevs
1591  */
1592 static struct attribute *per_node_hstate_attrs[] = {
1593 	&nr_hugepages_attr.attr,
1594 	&free_hugepages_attr.attr,
1595 	&surplus_hugepages_attr.attr,
1596 	NULL,
1597 };
1598 
1599 static struct attribute_group per_node_hstate_attr_group = {
1600 	.attrs = per_node_hstate_attrs,
1601 };
1602 
1603 /*
1604  * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
1605  * Returns node id via non-NULL nidp.
1606  */
kobj_to_node_hstate(struct kobject * kobj,int * nidp)1607 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1608 {
1609 	int nid;
1610 
1611 	for (nid = 0; nid < nr_node_ids; nid++) {
1612 		struct node_hstate *nhs = &node_hstates[nid];
1613 		int i;
1614 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
1615 			if (nhs->hstate_kobjs[i] == kobj) {
1616 				if (nidp)
1617 					*nidp = nid;
1618 				return &hstates[i];
1619 			}
1620 	}
1621 
1622 	BUG();
1623 	return NULL;
1624 }
1625 
1626 /*
1627  * Unregister hstate attributes from a single node sysdev.
1628  * No-op if no hstate attributes attached.
1629  */
hugetlb_unregister_node(struct node * node)1630 void hugetlb_unregister_node(struct node *node)
1631 {
1632 	struct hstate *h;
1633 	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1634 
1635 	if (!nhs->hugepages_kobj)
1636 		return;		/* no hstate attributes */
1637 
1638 	for_each_hstate(h)
1639 		if (nhs->hstate_kobjs[h - hstates]) {
1640 			kobject_put(nhs->hstate_kobjs[h - hstates]);
1641 			nhs->hstate_kobjs[h - hstates] = NULL;
1642 		}
1643 
1644 	kobject_put(nhs->hugepages_kobj);
1645 	nhs->hugepages_kobj = NULL;
1646 }
1647 
1648 /*
1649  * hugetlb module exit:  unregister hstate attributes from node sysdevs
1650  * that have them.
1651  */
hugetlb_unregister_all_nodes(void)1652 static void hugetlb_unregister_all_nodes(void)
1653 {
1654 	int nid;
1655 
1656 	/*
1657 	 * disable node sysdev registrations.
1658 	 */
1659 	register_hugetlbfs_with_node(NULL, NULL);
1660 
1661 	/*
1662 	 * remove hstate attributes from any nodes that have them.
1663 	 */
1664 	for (nid = 0; nid < nr_node_ids; nid++)
1665 		hugetlb_unregister_node(&node_devices[nid]);
1666 }
1667 
1668 /*
1669  * Register hstate attributes for a single node sysdev.
1670  * No-op if attributes already registered.
1671  */
hugetlb_register_node(struct node * node)1672 void hugetlb_register_node(struct node *node)
1673 {
1674 	struct hstate *h;
1675 	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1676 	int err;
1677 
1678 	if (nhs->hugepages_kobj)
1679 		return;		/* already allocated */
1680 
1681 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1682 							&node->sysdev.kobj);
1683 	if (!nhs->hugepages_kobj)
1684 		return;
1685 
1686 	for_each_hstate(h) {
1687 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1688 						nhs->hstate_kobjs,
1689 						&per_node_hstate_attr_group);
1690 		if (err) {
1691 			printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1692 					" for node %d\n",
1693 						h->name, node->sysdev.id);
1694 			hugetlb_unregister_node(node);
1695 			break;
1696 		}
1697 	}
1698 }
1699 
1700 /*
1701  * hugetlb init time:  register hstate attributes for all registered node
1702  * sysdevs of nodes that have memory.  All on-line nodes should have
1703  * registered their associated sysdev by this time.
1704  */
hugetlb_register_all_nodes(void)1705 static void hugetlb_register_all_nodes(void)
1706 {
1707 	int nid;
1708 
1709 	for_each_node_state(nid, N_HIGH_MEMORY) {
1710 		struct node *node = &node_devices[nid];
1711 		if (node->sysdev.id == nid)
1712 			hugetlb_register_node(node);
1713 	}
1714 
1715 	/*
1716 	 * Let the node sysdev driver know we're here so it can
1717 	 * [un]register hstate attributes on node hotplug.
1718 	 */
1719 	register_hugetlbfs_with_node(hugetlb_register_node,
1720 				     hugetlb_unregister_node);
1721 }
1722 #else	/* !CONFIG_NUMA */
1723 
kobj_to_node_hstate(struct kobject * kobj,int * nidp)1724 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1725 {
1726 	BUG();
1727 	if (nidp)
1728 		*nidp = -1;
1729 	return NULL;
1730 }
1731 
hugetlb_unregister_all_nodes(void)1732 static void hugetlb_unregister_all_nodes(void) { }
1733 
hugetlb_register_all_nodes(void)1734 static void hugetlb_register_all_nodes(void) { }
1735 
1736 #endif
1737 
hugetlb_exit(void)1738 static void __exit hugetlb_exit(void)
1739 {
1740 	struct hstate *h;
1741 
1742 	hugetlb_unregister_all_nodes();
1743 
1744 	for_each_hstate(h) {
1745 		kobject_put(hstate_kobjs[h - hstates]);
1746 	}
1747 
1748 	kobject_put(hugepages_kobj);
1749 }
1750 module_exit(hugetlb_exit);
1751 
hugetlb_init(void)1752 static int __init hugetlb_init(void)
1753 {
1754 	/* Some platform decide whether they support huge pages at boot
1755 	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1756 	 * there is no such support
1757 	 */
1758 	if (HPAGE_SHIFT == 0)
1759 		return 0;
1760 
1761 	if (!size_to_hstate(default_hstate_size)) {
1762 		default_hstate_size = HPAGE_SIZE;
1763 		if (!size_to_hstate(default_hstate_size))
1764 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1765 	}
1766 	default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1767 	if (default_hstate_max_huge_pages)
1768 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1769 
1770 	hugetlb_init_hstates();
1771 
1772 	gather_bootmem_prealloc();
1773 
1774 	report_hugepages();
1775 
1776 	hugetlb_sysfs_init();
1777 
1778 	hugetlb_register_all_nodes();
1779 
1780 	return 0;
1781 }
1782 module_init(hugetlb_init);
1783 
1784 /* Should be called on processing a hugepagesz=... option */
hugetlb_add_hstate(unsigned order)1785 void __init hugetlb_add_hstate(unsigned order)
1786 {
1787 	struct hstate *h;
1788 	unsigned long i;
1789 
1790 	if (size_to_hstate(PAGE_SIZE << order)) {
1791 		printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1792 		return;
1793 	}
1794 	BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1795 	BUG_ON(order == 0);
1796 	h = &hstates[max_hstate++];
1797 	h->order = order;
1798 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1799 	h->nr_huge_pages = 0;
1800 	h->free_huge_pages = 0;
1801 	for (i = 0; i < MAX_NUMNODES; ++i)
1802 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1803 	h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1804 	h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1805 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1806 					huge_page_size(h)/1024);
1807 
1808 	parsed_hstate = h;
1809 }
1810 
hugetlb_nrpages_setup(char * s)1811 static int __init hugetlb_nrpages_setup(char *s)
1812 {
1813 	unsigned long *mhp;
1814 	static unsigned long *last_mhp;
1815 
1816 	/*
1817 	 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1818 	 * so this hugepages= parameter goes to the "default hstate".
1819 	 */
1820 	if (!max_hstate)
1821 		mhp = &default_hstate_max_huge_pages;
1822 	else
1823 		mhp = &parsed_hstate->max_huge_pages;
1824 
1825 	if (mhp == last_mhp) {
1826 		printk(KERN_WARNING "hugepages= specified twice without "
1827 			"interleaving hugepagesz=, ignoring\n");
1828 		return 1;
1829 	}
1830 
1831 	if (sscanf(s, "%lu", mhp) <= 0)
1832 		*mhp = 0;
1833 
1834 	/*
1835 	 * Global state is always initialized later in hugetlb_init.
1836 	 * But we need to allocate >= MAX_ORDER hstates here early to still
1837 	 * use the bootmem allocator.
1838 	 */
1839 	if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1840 		hugetlb_hstate_alloc_pages(parsed_hstate);
1841 
1842 	last_mhp = mhp;
1843 
1844 	return 1;
1845 }
1846 __setup("hugepages=", hugetlb_nrpages_setup);
1847 
hugetlb_default_setup(char * s)1848 static int __init hugetlb_default_setup(char *s)
1849 {
1850 	default_hstate_size = memparse(s, &s);
1851 	return 1;
1852 }
1853 __setup("default_hugepagesz=", hugetlb_default_setup);
1854 
cpuset_mems_nr(unsigned int * array)1855 static unsigned int cpuset_mems_nr(unsigned int *array)
1856 {
1857 	int node;
1858 	unsigned int nr = 0;
1859 
1860 	for_each_node_mask(node, cpuset_current_mems_allowed)
1861 		nr += array[node];
1862 
1863 	return nr;
1864 }
1865 
1866 #ifdef CONFIG_SYSCTL
hugetlb_sysctl_handler_common(bool obey_mempolicy,struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)1867 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1868 			 struct ctl_table *table, int write,
1869 			 void __user *buffer, size_t *length, loff_t *ppos)
1870 {
1871 	struct hstate *h = &default_hstate;
1872 	unsigned long tmp;
1873 	int ret;
1874 
1875 	tmp = h->max_huge_pages;
1876 
1877 	if (write && h->order >= MAX_ORDER)
1878 		return -EINVAL;
1879 
1880 	table->data = &tmp;
1881 	table->maxlen = sizeof(unsigned long);
1882 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1883 	if (ret)
1884 		goto out;
1885 
1886 	if (write) {
1887 		NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1888 						GFP_KERNEL | __GFP_NORETRY);
1889 		if (!(obey_mempolicy &&
1890 			       init_nodemask_of_mempolicy(nodes_allowed))) {
1891 			NODEMASK_FREE(nodes_allowed);
1892 			nodes_allowed = &node_states[N_HIGH_MEMORY];
1893 		}
1894 		h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
1895 
1896 		if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1897 			NODEMASK_FREE(nodes_allowed);
1898 	}
1899 out:
1900 	return ret;
1901 }
1902 
hugetlb_sysctl_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)1903 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1904 			  void __user *buffer, size_t *length, loff_t *ppos)
1905 {
1906 
1907 	return hugetlb_sysctl_handler_common(false, table, write,
1908 							buffer, length, ppos);
1909 }
1910 
1911 #ifdef CONFIG_NUMA
hugetlb_mempolicy_sysctl_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)1912 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
1913 			  void __user *buffer, size_t *length, loff_t *ppos)
1914 {
1915 	return hugetlb_sysctl_handler_common(true, table, write,
1916 							buffer, length, ppos);
1917 }
1918 #endif /* CONFIG_NUMA */
1919 
hugetlb_treat_movable_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)1920 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1921 			void __user *buffer,
1922 			size_t *length, loff_t *ppos)
1923 {
1924 	proc_dointvec(table, write, buffer, length, ppos);
1925 	if (hugepages_treat_as_movable)
1926 		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1927 	else
1928 		htlb_alloc_mask = GFP_HIGHUSER;
1929 	return 0;
1930 }
1931 
hugetlb_overcommit_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)1932 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1933 			void __user *buffer,
1934 			size_t *length, loff_t *ppos)
1935 {
1936 	struct hstate *h = &default_hstate;
1937 	unsigned long tmp;
1938 	int ret;
1939 
1940 	tmp = h->nr_overcommit_huge_pages;
1941 
1942 	if (write && h->order >= MAX_ORDER)
1943 		return -EINVAL;
1944 
1945 	table->data = &tmp;
1946 	table->maxlen = sizeof(unsigned long);
1947 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1948 	if (ret)
1949 		goto out;
1950 
1951 	if (write) {
1952 		spin_lock(&hugetlb_lock);
1953 		h->nr_overcommit_huge_pages = tmp;
1954 		spin_unlock(&hugetlb_lock);
1955 	}
1956 out:
1957 	return ret;
1958 }
1959 
1960 #endif /* CONFIG_SYSCTL */
1961 
hugetlb_report_meminfo(struct seq_file * m)1962 void hugetlb_report_meminfo(struct seq_file *m)
1963 {
1964 	struct hstate *h = &default_hstate;
1965 	seq_printf(m,
1966 			"HugePages_Total:   %5lu\n"
1967 			"HugePages_Free:    %5lu\n"
1968 			"HugePages_Rsvd:    %5lu\n"
1969 			"HugePages_Surp:    %5lu\n"
1970 			"Hugepagesize:   %8lu kB\n",
1971 			h->nr_huge_pages,
1972 			h->free_huge_pages,
1973 			h->resv_huge_pages,
1974 			h->surplus_huge_pages,
1975 			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
1976 }
1977 
hugetlb_report_node_meminfo(int nid,char * buf)1978 int hugetlb_report_node_meminfo(int nid, char *buf)
1979 {
1980 	struct hstate *h = &default_hstate;
1981 	return sprintf(buf,
1982 		"Node %d HugePages_Total: %5u\n"
1983 		"Node %d HugePages_Free:  %5u\n"
1984 		"Node %d HugePages_Surp:  %5u\n",
1985 		nid, h->nr_huge_pages_node[nid],
1986 		nid, h->free_huge_pages_node[nid],
1987 		nid, h->surplus_huge_pages_node[nid]);
1988 }
1989 
1990 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
hugetlb_total_pages(void)1991 unsigned long hugetlb_total_pages(void)
1992 {
1993 	struct hstate *h = &default_hstate;
1994 	return h->nr_huge_pages * pages_per_huge_page(h);
1995 }
1996 
hugetlb_acct_memory(struct hstate * h,long delta)1997 static int hugetlb_acct_memory(struct hstate *h, long delta)
1998 {
1999 	int ret = -ENOMEM;
2000 
2001 	spin_lock(&hugetlb_lock);
2002 	/*
2003 	 * When cpuset is configured, it breaks the strict hugetlb page
2004 	 * reservation as the accounting is done on a global variable. Such
2005 	 * reservation is completely rubbish in the presence of cpuset because
2006 	 * the reservation is not checked against page availability for the
2007 	 * current cpuset. Application can still potentially OOM'ed by kernel
2008 	 * with lack of free htlb page in cpuset that the task is in.
2009 	 * Attempt to enforce strict accounting with cpuset is almost
2010 	 * impossible (or too ugly) because cpuset is too fluid that
2011 	 * task or memory node can be dynamically moved between cpusets.
2012 	 *
2013 	 * The change of semantics for shared hugetlb mapping with cpuset is
2014 	 * undesirable. However, in order to preserve some of the semantics,
2015 	 * we fall back to check against current free page availability as
2016 	 * a best attempt and hopefully to minimize the impact of changing
2017 	 * semantics that cpuset has.
2018 	 */
2019 	if (delta > 0) {
2020 		if (gather_surplus_pages(h, delta) < 0)
2021 			goto out;
2022 
2023 		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2024 			return_unused_surplus_pages(h, delta);
2025 			goto out;
2026 		}
2027 	}
2028 
2029 	ret = 0;
2030 	if (delta < 0)
2031 		return_unused_surplus_pages(h, (unsigned long) -delta);
2032 
2033 out:
2034 	spin_unlock(&hugetlb_lock);
2035 	return ret;
2036 }
2037 
hugetlb_vm_op_open(struct vm_area_struct * vma)2038 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2039 {
2040 	struct resv_map *reservations = vma_resv_map(vma);
2041 
2042 	/*
2043 	 * This new VMA should share its siblings reservation map if present.
2044 	 * The VMA will only ever have a valid reservation map pointer where
2045 	 * it is being copied for another still existing VMA.  As that VMA
2046 	 * has a reference to the reservation map it cannot disappear until
2047 	 * after this open call completes.  It is therefore safe to take a
2048 	 * new reference here without additional locking.
2049 	 */
2050 	if (reservations)
2051 		kref_get(&reservations->refs);
2052 }
2053 
hugetlb_vm_op_close(struct vm_area_struct * vma)2054 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2055 {
2056 	struct hstate *h = hstate_vma(vma);
2057 	struct resv_map *reservations = vma_resv_map(vma);
2058 	unsigned long reserve;
2059 	unsigned long start;
2060 	unsigned long end;
2061 
2062 	if (reservations) {
2063 		start = vma_hugecache_offset(h, vma, vma->vm_start);
2064 		end = vma_hugecache_offset(h, vma, vma->vm_end);
2065 
2066 		reserve = (end - start) -
2067 			region_count(&reservations->regions, start, end);
2068 
2069 		kref_put(&reservations->refs, resv_map_release);
2070 
2071 		if (reserve) {
2072 			hugetlb_acct_memory(h, -reserve);
2073 			hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
2074 		}
2075 	}
2076 }
2077 
2078 /*
2079  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2080  * handle_mm_fault() to try to instantiate regular-sized pages in the
2081  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2082  * this far.
2083  */
hugetlb_vm_op_fault(struct vm_area_struct * vma,struct vm_fault * vmf)2084 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2085 {
2086 	BUG();
2087 	return 0;
2088 }
2089 
2090 const struct vm_operations_struct hugetlb_vm_ops = {
2091 	.fault = hugetlb_vm_op_fault,
2092 	.open = hugetlb_vm_op_open,
2093 	.close = hugetlb_vm_op_close,
2094 };
2095 
make_huge_pte(struct vm_area_struct * vma,struct page * page,int writable)2096 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2097 				int writable)
2098 {
2099 	pte_t entry;
2100 
2101 	if (writable) {
2102 		entry =
2103 		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2104 	} else {
2105 		entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2106 	}
2107 	entry = pte_mkyoung(entry);
2108 	entry = pte_mkhuge(entry);
2109 
2110 	return entry;
2111 }
2112 
set_huge_ptep_writable(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)2113 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2114 				   unsigned long address, pte_t *ptep)
2115 {
2116 	pte_t entry;
2117 
2118 	entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2119 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
2120 		update_mmu_cache(vma, address, ptep);
2121 	}
2122 }
2123 
2124 
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * vma)2125 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2126 			    struct vm_area_struct *vma)
2127 {
2128 	pte_t *src_pte, *dst_pte, entry;
2129 	struct page *ptepage;
2130 	unsigned long addr;
2131 	int cow;
2132 	struct hstate *h = hstate_vma(vma);
2133 	unsigned long sz = huge_page_size(h);
2134 
2135 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2136 
2137 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2138 		src_pte = huge_pte_offset(src, addr);
2139 		if (!src_pte)
2140 			continue;
2141 		dst_pte = huge_pte_alloc(dst, addr, sz);
2142 		if (!dst_pte)
2143 			goto nomem;
2144 
2145 		/* If the pagetables are shared don't copy or take references */
2146 		if (dst_pte == src_pte)
2147 			continue;
2148 
2149 		spin_lock(&dst->page_table_lock);
2150 		spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2151 		if (!huge_pte_none(huge_ptep_get(src_pte))) {
2152 			if (cow)
2153 				huge_ptep_set_wrprotect(src, addr, src_pte);
2154 			entry = huge_ptep_get(src_pte);
2155 			ptepage = pte_page(entry);
2156 			get_page(ptepage);
2157 			page_dup_rmap(ptepage);
2158 			set_huge_pte_at(dst, addr, dst_pte, entry);
2159 		}
2160 		spin_unlock(&src->page_table_lock);
2161 		spin_unlock(&dst->page_table_lock);
2162 	}
2163 	return 0;
2164 
2165 nomem:
2166 	return -ENOMEM;
2167 }
2168 
is_hugetlb_entry_migration(pte_t pte)2169 static int is_hugetlb_entry_migration(pte_t pte)
2170 {
2171 	swp_entry_t swp;
2172 
2173 	if (huge_pte_none(pte) || pte_present(pte))
2174 		return 0;
2175 	swp = pte_to_swp_entry(pte);
2176 	if (non_swap_entry(swp) && is_migration_entry(swp)) {
2177 		return 1;
2178 	} else
2179 		return 0;
2180 }
2181 
is_hugetlb_entry_hwpoisoned(pte_t pte)2182 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2183 {
2184 	swp_entry_t swp;
2185 
2186 	if (huge_pte_none(pte) || pte_present(pte))
2187 		return 0;
2188 	swp = pte_to_swp_entry(pte);
2189 	if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
2190 		return 1;
2191 	} else
2192 		return 0;
2193 }
2194 
__unmap_hugepage_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)2195 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2196 			    unsigned long end, struct page *ref_page)
2197 {
2198 	struct mm_struct *mm = vma->vm_mm;
2199 	unsigned long address;
2200 	pte_t *ptep;
2201 	pte_t pte;
2202 	struct page *page;
2203 	struct page *tmp;
2204 	struct hstate *h = hstate_vma(vma);
2205 	unsigned long sz = huge_page_size(h);
2206 
2207 	/*
2208 	 * A page gathering list, protected by per file i_mmap_lock. The
2209 	 * lock is used to avoid list corruption from multiple unmapping
2210 	 * of the same page since we are using page->lru.
2211 	 */
2212 	LIST_HEAD(page_list);
2213 
2214 	WARN_ON(!is_vm_hugetlb_page(vma));
2215 	BUG_ON(start & ~huge_page_mask(h));
2216 	BUG_ON(end & ~huge_page_mask(h));
2217 
2218 	mmu_notifier_invalidate_range_start(mm, start, end);
2219 	spin_lock(&mm->page_table_lock);
2220 	for (address = start; address < end; address += sz) {
2221 		ptep = huge_pte_offset(mm, address);
2222 		if (!ptep)
2223 			continue;
2224 
2225 		if (huge_pmd_unshare(mm, &address, ptep))
2226 			continue;
2227 
2228 		/*
2229 		 * If a reference page is supplied, it is because a specific
2230 		 * page is being unmapped, not a range. Ensure the page we
2231 		 * are about to unmap is the actual page of interest.
2232 		 */
2233 		if (ref_page) {
2234 			pte = huge_ptep_get(ptep);
2235 			if (huge_pte_none(pte))
2236 				continue;
2237 			page = pte_page(pte);
2238 			if (page != ref_page)
2239 				continue;
2240 
2241 			/*
2242 			 * Mark the VMA as having unmapped its page so that
2243 			 * future faults in this VMA will fail rather than
2244 			 * looking like data was lost
2245 			 */
2246 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2247 		}
2248 
2249 		pte = huge_ptep_get_and_clear(mm, address, ptep);
2250 		if (huge_pte_none(pte))
2251 			continue;
2252 
2253 		/*
2254 		 * HWPoisoned hugepage is already unmapped and dropped reference
2255 		 */
2256 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2257 			continue;
2258 
2259 		page = pte_page(pte);
2260 		if (pte_dirty(pte))
2261 			set_page_dirty(page);
2262 		list_add(&page->lru, &page_list);
2263 	}
2264 	spin_unlock(&mm->page_table_lock);
2265 	flush_tlb_range(vma, start, end);
2266 	mmu_notifier_invalidate_range_end(mm, start, end);
2267 	list_for_each_entry_safe(page, tmp, &page_list, lru) {
2268 		page_remove_rmap(page);
2269 		list_del(&page->lru);
2270 		put_page(page);
2271 	}
2272 }
2273 
unmap_hugepage_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)2274 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2275 			  unsigned long end, struct page *ref_page)
2276 {
2277 	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2278 	__unmap_hugepage_range(vma, start, end, ref_page);
2279 	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2280 }
2281 
2282 /*
2283  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2284  * mappping it owns the reserve page for. The intention is to unmap the page
2285  * from other VMAs and let the children be SIGKILLed if they are faulting the
2286  * same region.
2287  */
unmap_ref_private(struct mm_struct * mm,struct vm_area_struct * vma,struct page * page,unsigned long address)2288 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2289 				struct page *page, unsigned long address)
2290 {
2291 	struct hstate *h = hstate_vma(vma);
2292 	struct vm_area_struct *iter_vma;
2293 	struct address_space *mapping;
2294 	struct prio_tree_iter iter;
2295 	pgoff_t pgoff;
2296 
2297 	/*
2298 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2299 	 * from page cache lookup which is in HPAGE_SIZE units.
2300 	 */
2301 	address = address & huge_page_mask(h);
2302 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
2303 		+ (vma->vm_pgoff >> PAGE_SHIFT);
2304 	mapping = (struct address_space *)page_private(page);
2305 
2306 	/*
2307 	 * Take the mapping lock for the duration of the table walk. As
2308 	 * this mapping should be shared between all the VMAs,
2309 	 * __unmap_hugepage_range() is called as the lock is already held
2310 	 */
2311 	spin_lock(&mapping->i_mmap_lock);
2312 	vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2313 		/* Do not unmap the current VMA */
2314 		if (iter_vma == vma)
2315 			continue;
2316 
2317 		/*
2318 		 * Unmap the page from other VMAs without their own reserves.
2319 		 * They get marked to be SIGKILLed if they fault in these
2320 		 * areas. This is because a future no-page fault on this VMA
2321 		 * could insert a zeroed page instead of the data existing
2322 		 * from the time of fork. This would look like data corruption
2323 		 */
2324 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2325 			__unmap_hugepage_range(iter_vma,
2326 				address, address + huge_page_size(h),
2327 				page);
2328 	}
2329 	spin_unlock(&mapping->i_mmap_lock);
2330 
2331 	return 1;
2332 }
2333 
2334 /*
2335  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2336  */
hugetlb_cow(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,pte_t pte,struct page * pagecache_page)2337 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2338 			unsigned long address, pte_t *ptep, pte_t pte,
2339 			struct page *pagecache_page)
2340 {
2341 	struct hstate *h = hstate_vma(vma);
2342 	struct page *old_page, *new_page;
2343 	int avoidcopy;
2344 	int outside_reserve = 0;
2345 
2346 	old_page = pte_page(pte);
2347 
2348 retry_avoidcopy:
2349 	/* If no-one else is actually using this page, avoid the copy
2350 	 * and just make the page writable */
2351 	avoidcopy = (page_mapcount(old_page) == 1);
2352 	if (avoidcopy) {
2353 		if (PageAnon(old_page))
2354 			page_move_anon_rmap(old_page, vma, address);
2355 		set_huge_ptep_writable(vma, address, ptep);
2356 		return 0;
2357 	}
2358 
2359 	/*
2360 	 * If the process that created a MAP_PRIVATE mapping is about to
2361 	 * perform a COW due to a shared page count, attempt to satisfy
2362 	 * the allocation without using the existing reserves. The pagecache
2363 	 * page is used to determine if the reserve at this address was
2364 	 * consumed or not. If reserves were used, a partial faulted mapping
2365 	 * at the time of fork() could consume its reserves on COW instead
2366 	 * of the full address range.
2367 	 */
2368 	if (!(vma->vm_flags & VM_MAYSHARE) &&
2369 			is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2370 			old_page != pagecache_page)
2371 		outside_reserve = 1;
2372 
2373 	page_cache_get(old_page);
2374 
2375 	/* Drop page_table_lock as buddy allocator may be called */
2376 	spin_unlock(&mm->page_table_lock);
2377 	new_page = alloc_huge_page(vma, address, outside_reserve);
2378 
2379 	if (IS_ERR(new_page)) {
2380 		page_cache_release(old_page);
2381 
2382 		/*
2383 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
2384 		 * it is due to references held by a child and an insufficient
2385 		 * huge page pool. To guarantee the original mappers
2386 		 * reliability, unmap the page from child processes. The child
2387 		 * may get SIGKILLed if it later faults.
2388 		 */
2389 		if (outside_reserve) {
2390 			BUG_ON(huge_pte_none(pte));
2391 			if (unmap_ref_private(mm, vma, old_page, address)) {
2392 				BUG_ON(page_count(old_page) != 1);
2393 				BUG_ON(huge_pte_none(pte));
2394 				spin_lock(&mm->page_table_lock);
2395 				goto retry_avoidcopy;
2396 			}
2397 			WARN_ON_ONCE(1);
2398 		}
2399 
2400 		/* Caller expects lock to be held */
2401 		spin_lock(&mm->page_table_lock);
2402 		return -PTR_ERR(new_page);
2403 	}
2404 
2405 	/*
2406 	 * When the original hugepage is shared one, it does not have
2407 	 * anon_vma prepared.
2408 	 */
2409 	if (unlikely(anon_vma_prepare(vma))) {
2410 		/* Caller expects lock to be held */
2411 		spin_lock(&mm->page_table_lock);
2412 		return VM_FAULT_OOM;
2413 	}
2414 
2415 	copy_user_huge_page(new_page, old_page, address, vma,
2416 			    pages_per_huge_page(h));
2417 	__SetPageUptodate(new_page);
2418 
2419 	/*
2420 	 * Retake the page_table_lock to check for racing updates
2421 	 * before the page tables are altered
2422 	 */
2423 	spin_lock(&mm->page_table_lock);
2424 	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2425 	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2426 		/* Break COW */
2427 		mmu_notifier_invalidate_range_start(mm,
2428 			address & huge_page_mask(h),
2429 			(address & huge_page_mask(h)) + huge_page_size(h));
2430 		huge_ptep_clear_flush(vma, address, ptep);
2431 		set_huge_pte_at(mm, address, ptep,
2432 				make_huge_pte(vma, new_page, 1));
2433 		page_remove_rmap(old_page);
2434 		hugepage_add_new_anon_rmap(new_page, vma, address);
2435 		/* Make the old page be freed below */
2436 		new_page = old_page;
2437 		mmu_notifier_invalidate_range_end(mm,
2438 			address & huge_page_mask(h),
2439 			(address & huge_page_mask(h)) + huge_page_size(h));
2440 	}
2441 	page_cache_release(new_page);
2442 	page_cache_release(old_page);
2443 	return 0;
2444 }
2445 
2446 /* Return the pagecache page at a given address within a VMA */
hugetlbfs_pagecache_page(struct hstate * h,struct vm_area_struct * vma,unsigned long address)2447 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2448 			struct vm_area_struct *vma, unsigned long address)
2449 {
2450 	struct address_space *mapping;
2451 	pgoff_t idx;
2452 
2453 	mapping = vma->vm_file->f_mapping;
2454 	idx = vma_hugecache_offset(h, vma, address);
2455 
2456 	return find_lock_page(mapping, idx);
2457 }
2458 
2459 /*
2460  * Return whether there is a pagecache page to back given address within VMA.
2461  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2462  */
hugetlbfs_pagecache_present(struct hstate * h,struct vm_area_struct * vma,unsigned long address)2463 static bool hugetlbfs_pagecache_present(struct hstate *h,
2464 			struct vm_area_struct *vma, unsigned long address)
2465 {
2466 	struct address_space *mapping;
2467 	pgoff_t idx;
2468 	struct page *page;
2469 
2470 	mapping = vma->vm_file->f_mapping;
2471 	idx = vma_hugecache_offset(h, vma, address);
2472 
2473 	page = find_get_page(mapping, idx);
2474 	if (page)
2475 		put_page(page);
2476 	return page != NULL;
2477 }
2478 
hugetlb_no_page(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int flags)2479 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2480 			unsigned long address, pte_t *ptep, unsigned int flags)
2481 {
2482 	struct hstate *h = hstate_vma(vma);
2483 	int ret = VM_FAULT_SIGBUS;
2484 	pgoff_t idx;
2485 	unsigned long size;
2486 	struct page *page;
2487 	struct address_space *mapping;
2488 	pte_t new_pte;
2489 
2490 	/*
2491 	 * Currently, we are forced to kill the process in the event the
2492 	 * original mapper has unmapped pages from the child due to a failed
2493 	 * COW. Warn that such a situation has occurred as it may not be obvious
2494 	 */
2495 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2496 		printk(KERN_WARNING
2497 			"PID %d killed due to inadequate hugepage pool\n",
2498 			current->pid);
2499 		return ret;
2500 	}
2501 
2502 	mapping = vma->vm_file->f_mapping;
2503 	idx = vma_hugecache_offset(h, vma, address);
2504 
2505 	/*
2506 	 * Use page lock to guard against racing truncation
2507 	 * before we get page_table_lock.
2508 	 */
2509 retry:
2510 	page = find_lock_page(mapping, idx);
2511 	if (!page) {
2512 		size = i_size_read(mapping->host) >> huge_page_shift(h);
2513 		if (idx >= size)
2514 			goto out;
2515 		page = alloc_huge_page(vma, address, 0);
2516 		if (IS_ERR(page)) {
2517 			ret = -PTR_ERR(page);
2518 			goto out;
2519 		}
2520 		clear_huge_page(page, address, pages_per_huge_page(h));
2521 		__SetPageUptodate(page);
2522 
2523 		if (vma->vm_flags & VM_MAYSHARE) {
2524 			int err;
2525 			struct inode *inode = mapping->host;
2526 
2527 			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2528 			if (err) {
2529 				put_page(page);
2530 				if (err == -EEXIST)
2531 					goto retry;
2532 				goto out;
2533 			}
2534 
2535 			spin_lock(&inode->i_lock);
2536 			inode->i_blocks += blocks_per_huge_page(h);
2537 			spin_unlock(&inode->i_lock);
2538 			page_dup_rmap(page);
2539 		} else {
2540 			lock_page(page);
2541 			if (unlikely(anon_vma_prepare(vma))) {
2542 				ret = VM_FAULT_OOM;
2543 				goto backout_unlocked;
2544 			}
2545 			hugepage_add_new_anon_rmap(page, vma, address);
2546 		}
2547 	} else {
2548 		/*
2549 		 * If memory error occurs between mmap() and fault, some process
2550 		 * don't have hwpoisoned swap entry for errored virtual address.
2551 		 * So we need to block hugepage fault by PG_hwpoison bit check.
2552 		 */
2553 		if (unlikely(PageHWPoison(page))) {
2554 			ret = VM_FAULT_HWPOISON |
2555 			      VM_FAULT_SET_HINDEX(h - hstates);
2556 			goto backout_unlocked;
2557 		}
2558 		page_dup_rmap(page);
2559 	}
2560 
2561 	/*
2562 	 * If we are going to COW a private mapping later, we examine the
2563 	 * pending reservations for this page now. This will ensure that
2564 	 * any allocations necessary to record that reservation occur outside
2565 	 * the spinlock.
2566 	 */
2567 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2568 		if (vma_needs_reservation(h, vma, address) < 0) {
2569 			ret = VM_FAULT_OOM;
2570 			goto backout_unlocked;
2571 		}
2572 
2573 	spin_lock(&mm->page_table_lock);
2574 	size = i_size_read(mapping->host) >> huge_page_shift(h);
2575 	if (idx >= size)
2576 		goto backout;
2577 
2578 	ret = 0;
2579 	if (!huge_pte_none(huge_ptep_get(ptep)))
2580 		goto backout;
2581 
2582 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2583 				&& (vma->vm_flags & VM_SHARED)));
2584 	set_huge_pte_at(mm, address, ptep, new_pte);
2585 
2586 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2587 		/* Optimization, do the COW without a second fault */
2588 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2589 	}
2590 
2591 	spin_unlock(&mm->page_table_lock);
2592 	unlock_page(page);
2593 out:
2594 	return ret;
2595 
2596 backout:
2597 	spin_unlock(&mm->page_table_lock);
2598 backout_unlocked:
2599 	unlock_page(page);
2600 	put_page(page);
2601 	goto out;
2602 }
2603 
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)2604 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2605 			unsigned long address, unsigned int flags)
2606 {
2607 	pte_t *ptep;
2608 	pte_t entry;
2609 	int ret;
2610 	struct page *page = NULL;
2611 	struct page *pagecache_page = NULL;
2612 	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2613 	struct hstate *h = hstate_vma(vma);
2614 
2615 	ptep = huge_pte_offset(mm, address);
2616 	if (ptep) {
2617 		entry = huge_ptep_get(ptep);
2618 		if (unlikely(is_hugetlb_entry_migration(entry))) {
2619 			migration_entry_wait(mm, (pmd_t *)ptep, address);
2620 			return 0;
2621 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2622 			return VM_FAULT_HWPOISON_LARGE |
2623 			       VM_FAULT_SET_HINDEX(h - hstates);
2624 	}
2625 
2626 	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2627 	if (!ptep)
2628 		return VM_FAULT_OOM;
2629 
2630 	/*
2631 	 * Serialize hugepage allocation and instantiation, so that we don't
2632 	 * get spurious allocation failures if two CPUs race to instantiate
2633 	 * the same page in the page cache.
2634 	 */
2635 	mutex_lock(&hugetlb_instantiation_mutex);
2636 	entry = huge_ptep_get(ptep);
2637 	if (huge_pte_none(entry)) {
2638 		ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2639 		goto out_mutex;
2640 	}
2641 
2642 	ret = 0;
2643 
2644 	/*
2645 	 * If we are going to COW the mapping later, we examine the pending
2646 	 * reservations for this page now. This will ensure that any
2647 	 * allocations necessary to record that reservation occur outside the
2648 	 * spinlock. For private mappings, we also lookup the pagecache
2649 	 * page now as it is used to determine if a reservation has been
2650 	 * consumed.
2651 	 */
2652 	if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2653 		if (vma_needs_reservation(h, vma, address) < 0) {
2654 			ret = VM_FAULT_OOM;
2655 			goto out_mutex;
2656 		}
2657 
2658 		if (!(vma->vm_flags & VM_MAYSHARE))
2659 			pagecache_page = hugetlbfs_pagecache_page(h,
2660 								vma, address);
2661 	}
2662 
2663 	/*
2664 	 * hugetlb_cow() requires page locks of pte_page(entry) and
2665 	 * pagecache_page, so here we need take the former one
2666 	 * when page != pagecache_page or !pagecache_page.
2667 	 * Note that locking order is always pagecache_page -> page,
2668 	 * so no worry about deadlock.
2669 	 */
2670 	page = pte_page(entry);
2671 	if (page != pagecache_page)
2672 		lock_page(page);
2673 
2674 	spin_lock(&mm->page_table_lock);
2675 	/* Check for a racing update before calling hugetlb_cow */
2676 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2677 		goto out_page_table_lock;
2678 
2679 
2680 	if (flags & FAULT_FLAG_WRITE) {
2681 		if (!pte_write(entry)) {
2682 			ret = hugetlb_cow(mm, vma, address, ptep, entry,
2683 							pagecache_page);
2684 			goto out_page_table_lock;
2685 		}
2686 		entry = pte_mkdirty(entry);
2687 	}
2688 	entry = pte_mkyoung(entry);
2689 	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2690 						flags & FAULT_FLAG_WRITE))
2691 		update_mmu_cache(vma, address, ptep);
2692 
2693 out_page_table_lock:
2694 	spin_unlock(&mm->page_table_lock);
2695 
2696 	if (pagecache_page) {
2697 		unlock_page(pagecache_page);
2698 		put_page(pagecache_page);
2699 	}
2700 	if (page != pagecache_page)
2701 		unlock_page(page);
2702 
2703 out_mutex:
2704 	mutex_unlock(&hugetlb_instantiation_mutex);
2705 
2706 	return ret;
2707 }
2708 
2709 /* Can be overriden by architectures */
2710 __attribute__((weak)) struct page *
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int write)2711 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2712 	       pud_t *pud, int write)
2713 {
2714 	BUG();
2715 	return NULL;
2716 }
2717 
follow_hugetlb_page(struct mm_struct * mm,struct vm_area_struct * vma,struct page ** pages,struct vm_area_struct ** vmas,unsigned long * position,int * length,int i,unsigned int flags)2718 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2719 			struct page **pages, struct vm_area_struct **vmas,
2720 			unsigned long *position, int *length, int i,
2721 			unsigned int flags)
2722 {
2723 	unsigned long pfn_offset;
2724 	unsigned long vaddr = *position;
2725 	int remainder = *length;
2726 	struct hstate *h = hstate_vma(vma);
2727 
2728 	spin_lock(&mm->page_table_lock);
2729 	while (vaddr < vma->vm_end && remainder) {
2730 		pte_t *pte;
2731 		int absent;
2732 		struct page *page;
2733 
2734 		/*
2735 		 * Some archs (sparc64, sh*) have multiple pte_ts to
2736 		 * each hugepage.  We have to make sure we get the
2737 		 * first, for the page indexing below to work.
2738 		 */
2739 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2740 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
2741 
2742 		/*
2743 		 * When coredumping, it suits get_dump_page if we just return
2744 		 * an error where there's an empty slot with no huge pagecache
2745 		 * to back it.  This way, we avoid allocating a hugepage, and
2746 		 * the sparse dumpfile avoids allocating disk blocks, but its
2747 		 * huge holes still show up with zeroes where they need to be.
2748 		 */
2749 		if (absent && (flags & FOLL_DUMP) &&
2750 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2751 			remainder = 0;
2752 			break;
2753 		}
2754 
2755 		if (absent ||
2756 		    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2757 			int ret;
2758 
2759 			spin_unlock(&mm->page_table_lock);
2760 			ret = hugetlb_fault(mm, vma, vaddr,
2761 				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2762 			spin_lock(&mm->page_table_lock);
2763 			if (!(ret & VM_FAULT_ERROR))
2764 				continue;
2765 
2766 			remainder = 0;
2767 			break;
2768 		}
2769 
2770 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2771 		page = pte_page(huge_ptep_get(pte));
2772 same_page:
2773 		if (pages) {
2774 			pages[i] = mem_map_offset(page, pfn_offset);
2775 			get_page(pages[i]);
2776 		}
2777 
2778 		if (vmas)
2779 			vmas[i] = vma;
2780 
2781 		vaddr += PAGE_SIZE;
2782 		++pfn_offset;
2783 		--remainder;
2784 		++i;
2785 		if (vaddr < vma->vm_end && remainder &&
2786 				pfn_offset < pages_per_huge_page(h)) {
2787 			/*
2788 			 * We use pfn_offset to avoid touching the pageframes
2789 			 * of this compound page.
2790 			 */
2791 			goto same_page;
2792 		}
2793 	}
2794 	spin_unlock(&mm->page_table_lock);
2795 	*length = remainder;
2796 	*position = vaddr;
2797 
2798 	return i ? i : -EFAULT;
2799 }
2800 
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot)2801 void hugetlb_change_protection(struct vm_area_struct *vma,
2802 		unsigned long address, unsigned long end, pgprot_t newprot)
2803 {
2804 	struct mm_struct *mm = vma->vm_mm;
2805 	unsigned long start = address;
2806 	pte_t *ptep;
2807 	pte_t pte;
2808 	struct hstate *h = hstate_vma(vma);
2809 
2810 	BUG_ON(address >= end);
2811 	flush_cache_range(vma, address, end);
2812 
2813 	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2814 	spin_lock(&mm->page_table_lock);
2815 	for (; address < end; address += huge_page_size(h)) {
2816 		ptep = huge_pte_offset(mm, address);
2817 		if (!ptep)
2818 			continue;
2819 		if (huge_pmd_unshare(mm, &address, ptep))
2820 			continue;
2821 		if (!huge_pte_none(huge_ptep_get(ptep))) {
2822 			pte = huge_ptep_get_and_clear(mm, address, ptep);
2823 			pte = pte_mkhuge(pte_modify(pte, newprot));
2824 			set_huge_pte_at(mm, address, ptep, pte);
2825 		}
2826 	}
2827 	spin_unlock(&mm->page_table_lock);
2828 	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2829 
2830 	flush_tlb_range(vma, start, end);
2831 }
2832 
hugetlb_reserve_pages(struct inode * inode,long from,long to,struct vm_area_struct * vma,int acctflag)2833 int hugetlb_reserve_pages(struct inode *inode,
2834 					long from, long to,
2835 					struct vm_area_struct *vma,
2836 					int acctflag)
2837 {
2838 	long ret, chg;
2839 	struct hstate *h = hstate_inode(inode);
2840 
2841 	/*
2842 	 * Only apply hugepage reservation if asked. At fault time, an
2843 	 * attempt will be made for VM_NORESERVE to allocate a page
2844 	 * and filesystem quota without using reserves
2845 	 */
2846 	if (acctflag & VM_NORESERVE)
2847 		return 0;
2848 
2849 	/*
2850 	 * Shared mappings base their reservation on the number of pages that
2851 	 * are already allocated on behalf of the file. Private mappings need
2852 	 * to reserve the full area even if read-only as mprotect() may be
2853 	 * called to make the mapping read-write. Assume !vma is a shm mapping
2854 	 */
2855 	if (!vma || vma->vm_flags & VM_MAYSHARE)
2856 		chg = region_chg(&inode->i_mapping->private_list, from, to);
2857 	else {
2858 		struct resv_map *resv_map = resv_map_alloc();
2859 		if (!resv_map)
2860 			return -ENOMEM;
2861 
2862 		chg = to - from;
2863 
2864 		set_vma_resv_map(vma, resv_map);
2865 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2866 	}
2867 
2868 	if (chg < 0)
2869 		return chg;
2870 
2871 	/* There must be enough filesystem quota for the mapping */
2872 	if (hugetlb_get_quota(inode->i_mapping, chg))
2873 		return -ENOSPC;
2874 
2875 	/*
2876 	 * Check enough hugepages are available for the reservation.
2877 	 * Hand back the quota if there are not
2878 	 */
2879 	ret = hugetlb_acct_memory(h, chg);
2880 	if (ret < 0) {
2881 		hugetlb_put_quota(inode->i_mapping, chg);
2882 		return ret;
2883 	}
2884 
2885 	/*
2886 	 * Account for the reservations made. Shared mappings record regions
2887 	 * that have reservations as they are shared by multiple VMAs.
2888 	 * When the last VMA disappears, the region map says how much
2889 	 * the reservation was and the page cache tells how much of
2890 	 * the reservation was consumed. Private mappings are per-VMA and
2891 	 * only the consumed reservations are tracked. When the VMA
2892 	 * disappears, the original reservation is the VMA size and the
2893 	 * consumed reservations are stored in the map. Hence, nothing
2894 	 * else has to be done for private mappings here
2895 	 */
2896 	if (!vma || vma->vm_flags & VM_MAYSHARE)
2897 		region_add(&inode->i_mapping->private_list, from, to);
2898 	return 0;
2899 }
2900 
hugetlb_unreserve_pages(struct inode * inode,long offset,long freed)2901 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2902 {
2903 	struct hstate *h = hstate_inode(inode);
2904 	long chg = region_truncate(&inode->i_mapping->private_list, offset);
2905 
2906 	spin_lock(&inode->i_lock);
2907 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
2908 	spin_unlock(&inode->i_lock);
2909 
2910 	hugetlb_put_quota(inode->i_mapping, (chg - freed));
2911 	hugetlb_acct_memory(h, -(chg - freed));
2912 }
2913 
2914 #ifdef CONFIG_MEMORY_FAILURE
2915 
2916 /* Should be called in hugetlb_lock */
is_hugepage_on_freelist(struct page * hpage)2917 static int is_hugepage_on_freelist(struct page *hpage)
2918 {
2919 	struct page *page;
2920 	struct page *tmp;
2921 	struct hstate *h = page_hstate(hpage);
2922 	int nid = page_to_nid(hpage);
2923 
2924 	list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
2925 		if (page == hpage)
2926 			return 1;
2927 	return 0;
2928 }
2929 
2930 /*
2931  * This function is called from memory failure code.
2932  * Assume the caller holds page lock of the head page.
2933  */
dequeue_hwpoisoned_huge_page(struct page * hpage)2934 int dequeue_hwpoisoned_huge_page(struct page *hpage)
2935 {
2936 	struct hstate *h = page_hstate(hpage);
2937 	int nid = page_to_nid(hpage);
2938 	int ret = -EBUSY;
2939 
2940 	spin_lock(&hugetlb_lock);
2941 	if (is_hugepage_on_freelist(hpage)) {
2942 		list_del(&hpage->lru);
2943 		set_page_refcounted(hpage);
2944 		h->free_huge_pages--;
2945 		h->free_huge_pages_node[nid]--;
2946 		ret = 0;
2947 	}
2948 	spin_unlock(&hugetlb_lock);
2949 	return ret;
2950 }
2951 #endif
2952