1 /*
2  *  bootmem - A boot-time physical memory allocator and configurator
3  *
4  *  Copyright (C) 1999 Ingo Molnar
5  *                1999 Kanoj Sarcar, SGI
6  *                2008 Johannes Weiner
7  *
8  * Access to this subsystem has to be serialized externally (which is true
9  * for the boot process anyway).
10  */
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/module.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/memblock.h>
19 
20 #include <asm/bug.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
23 
24 #include "internal.h"
25 
26 #ifndef CONFIG_NEED_MULTIPLE_NODES
27 struct pglist_data __refdata contig_page_data = {
28 	.bdata = &bootmem_node_data[0]
29 };
30 EXPORT_SYMBOL(contig_page_data);
31 #endif
32 
33 unsigned long max_low_pfn;
34 unsigned long min_low_pfn;
35 unsigned long max_pfn;
36 
37 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
38 
39 static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
40 
41 static int bootmem_debug;
42 
bootmem_debug_setup(char * buf)43 static int __init bootmem_debug_setup(char *buf)
44 {
45 	bootmem_debug = 1;
46 	return 0;
47 }
48 early_param("bootmem_debug", bootmem_debug_setup);
49 
50 #define bdebug(fmt, args...) ({				\
51 	if (unlikely(bootmem_debug))			\
52 		printk(KERN_INFO			\
53 			"bootmem::%s " fmt,		\
54 			__func__, ## args);		\
55 })
56 
bootmap_bytes(unsigned long pages)57 static unsigned long __init bootmap_bytes(unsigned long pages)
58 {
59 	unsigned long bytes = (pages + 7) / 8;
60 
61 	return ALIGN(bytes, sizeof(long));
62 }
63 
64 /**
65  * bootmem_bootmap_pages - calculate bitmap size in pages
66  * @pages: number of pages the bitmap has to represent
67  */
bootmem_bootmap_pages(unsigned long pages)68 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
69 {
70 	unsigned long bytes = bootmap_bytes(pages);
71 
72 	return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
73 }
74 
75 /*
76  * link bdata in order
77  */
link_bootmem(bootmem_data_t * bdata)78 static void __init link_bootmem(bootmem_data_t *bdata)
79 {
80 	struct list_head *iter;
81 
82 	list_for_each(iter, &bdata_list) {
83 		bootmem_data_t *ent;
84 
85 		ent = list_entry(iter, bootmem_data_t, list);
86 		if (bdata->node_min_pfn < ent->node_min_pfn)
87 			break;
88 	}
89 	list_add_tail(&bdata->list, iter);
90 }
91 
92 /*
93  * Called once to set up the allocator itself.
94  */
init_bootmem_core(bootmem_data_t * bdata,unsigned long mapstart,unsigned long start,unsigned long end)95 static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
96 	unsigned long mapstart, unsigned long start, unsigned long end)
97 {
98 	unsigned long mapsize;
99 
100 	mminit_validate_memmodel_limits(&start, &end);
101 	bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
102 	bdata->node_min_pfn = start;
103 	bdata->node_low_pfn = end;
104 	link_bootmem(bdata);
105 
106 	/*
107 	 * Initially all pages are reserved - setup_arch() has to
108 	 * register free RAM areas explicitly.
109 	 */
110 	mapsize = bootmap_bytes(end - start);
111 	memset(bdata->node_bootmem_map, 0xff, mapsize);
112 
113 	bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
114 		bdata - bootmem_node_data, start, mapstart, end, mapsize);
115 
116 	return mapsize;
117 }
118 
119 /**
120  * init_bootmem_node - register a node as boot memory
121  * @pgdat: node to register
122  * @freepfn: pfn where the bitmap for this node is to be placed
123  * @startpfn: first pfn on the node
124  * @endpfn: first pfn after the node
125  *
126  * Returns the number of bytes needed to hold the bitmap for this node.
127  */
init_bootmem_node(pg_data_t * pgdat,unsigned long freepfn,unsigned long startpfn,unsigned long endpfn)128 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
129 				unsigned long startpfn, unsigned long endpfn)
130 {
131 	return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
132 }
133 
134 /**
135  * init_bootmem - register boot memory
136  * @start: pfn where the bitmap is to be placed
137  * @pages: number of available physical pages
138  *
139  * Returns the number of bytes needed to hold the bitmap.
140  */
init_bootmem(unsigned long start,unsigned long pages)141 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
142 {
143 	max_low_pfn = pages;
144 	min_low_pfn = start;
145 	return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
146 }
147 
148 /*
149  * free_bootmem_late - free bootmem pages directly to page allocator
150  * @addr: starting address of the range
151  * @size: size of the range in bytes
152  *
153  * This is only useful when the bootmem allocator has already been torn
154  * down, but we are still initializing the system.  Pages are given directly
155  * to the page allocator, no bootmem metadata is updated because it is gone.
156  */
free_bootmem_late(unsigned long addr,unsigned long size)157 void __init free_bootmem_late(unsigned long addr, unsigned long size)
158 {
159 	unsigned long cursor, end;
160 
161 	kmemleak_free_part(__va(addr), size);
162 
163 	cursor = PFN_UP(addr);
164 	end = PFN_DOWN(addr + size);
165 
166 	for (; cursor < end; cursor++) {
167 		__free_pages_bootmem(pfn_to_page(cursor), 0);
168 		totalram_pages++;
169 	}
170 }
171 
free_all_bootmem_core(bootmem_data_t * bdata)172 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
173 {
174 	int aligned;
175 	struct page *page;
176 	unsigned long start, end, pages, count = 0;
177 
178 	if (!bdata->node_bootmem_map)
179 		return 0;
180 
181 	start = bdata->node_min_pfn;
182 	end = bdata->node_low_pfn;
183 
184 	/*
185 	 * If the start is aligned to the machines wordsize, we might
186 	 * be able to free pages in bulks of that order.
187 	 */
188 	aligned = !(start & (BITS_PER_LONG - 1));
189 
190 	bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
191 		bdata - bootmem_node_data, start, end, aligned);
192 
193 	while (start < end) {
194 		unsigned long *map, idx, vec;
195 
196 		map = bdata->node_bootmem_map;
197 		idx = start - bdata->node_min_pfn;
198 		vec = ~map[idx / BITS_PER_LONG];
199 
200 		if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
201 			int order = ilog2(BITS_PER_LONG);
202 
203 			__free_pages_bootmem(pfn_to_page(start), order);
204 			count += BITS_PER_LONG;
205 		} else {
206 			unsigned long off = 0;
207 
208 			while (vec && off < BITS_PER_LONG) {
209 				if (vec & 1) {
210 					page = pfn_to_page(start + off);
211 					__free_pages_bootmem(page, 0);
212 					count++;
213 				}
214 				vec >>= 1;
215 				off++;
216 			}
217 		}
218 		start += BITS_PER_LONG;
219 	}
220 
221 	page = virt_to_page(bdata->node_bootmem_map);
222 	pages = bdata->node_low_pfn - bdata->node_min_pfn;
223 	pages = bootmem_bootmap_pages(pages);
224 	count += pages;
225 	while (pages--)
226 		__free_pages_bootmem(page++, 0);
227 
228 	bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
229 
230 	return count;
231 }
232 
233 /**
234  * free_all_bootmem_node - release a node's free pages to the buddy allocator
235  * @pgdat: node to be released
236  *
237  * Returns the number of pages actually released.
238  */
free_all_bootmem_node(pg_data_t * pgdat)239 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
240 {
241 	register_page_bootmem_info_node(pgdat);
242 	return free_all_bootmem_core(pgdat->bdata);
243 }
244 
245 /**
246  * free_all_bootmem - release free pages to the buddy allocator
247  *
248  * Returns the number of pages actually released.
249  */
free_all_bootmem(void)250 unsigned long __init free_all_bootmem(void)
251 {
252 	unsigned long total_pages = 0;
253 	bootmem_data_t *bdata;
254 
255 	list_for_each_entry(bdata, &bdata_list, list)
256 		total_pages += free_all_bootmem_core(bdata);
257 
258 	return total_pages;
259 }
260 
__free(bootmem_data_t * bdata,unsigned long sidx,unsigned long eidx)261 static void __init __free(bootmem_data_t *bdata,
262 			unsigned long sidx, unsigned long eidx)
263 {
264 	unsigned long idx;
265 
266 	bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
267 		sidx + bdata->node_min_pfn,
268 		eidx + bdata->node_min_pfn);
269 
270 	if (bdata->hint_idx > sidx)
271 		bdata->hint_idx = sidx;
272 
273 	for (idx = sidx; idx < eidx; idx++)
274 		if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
275 			BUG();
276 }
277 
__reserve(bootmem_data_t * bdata,unsigned long sidx,unsigned long eidx,int flags)278 static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
279 			unsigned long eidx, int flags)
280 {
281 	unsigned long idx;
282 	int exclusive = flags & BOOTMEM_EXCLUSIVE;
283 
284 	bdebug("nid=%td start=%lx end=%lx flags=%x\n",
285 		bdata - bootmem_node_data,
286 		sidx + bdata->node_min_pfn,
287 		eidx + bdata->node_min_pfn,
288 		flags);
289 
290 	for (idx = sidx; idx < eidx; idx++)
291 		if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
292 			if (exclusive) {
293 				__free(bdata, sidx, idx);
294 				return -EBUSY;
295 			}
296 			bdebug("silent double reserve of PFN %lx\n",
297 				idx + bdata->node_min_pfn);
298 		}
299 	return 0;
300 }
301 
mark_bootmem_node(bootmem_data_t * bdata,unsigned long start,unsigned long end,int reserve,int flags)302 static int __init mark_bootmem_node(bootmem_data_t *bdata,
303 				unsigned long start, unsigned long end,
304 				int reserve, int flags)
305 {
306 	unsigned long sidx, eidx;
307 
308 	bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
309 		bdata - bootmem_node_data, start, end, reserve, flags);
310 
311 	BUG_ON(start < bdata->node_min_pfn);
312 	BUG_ON(end > bdata->node_low_pfn);
313 
314 	sidx = start - bdata->node_min_pfn;
315 	eidx = end - bdata->node_min_pfn;
316 
317 	if (reserve)
318 		return __reserve(bdata, sidx, eidx, flags);
319 	else
320 		__free(bdata, sidx, eidx);
321 	return 0;
322 }
323 
mark_bootmem(unsigned long start,unsigned long end,int reserve,int flags)324 static int __init mark_bootmem(unsigned long start, unsigned long end,
325 				int reserve, int flags)
326 {
327 	unsigned long pos;
328 	bootmem_data_t *bdata;
329 
330 	pos = start;
331 	list_for_each_entry(bdata, &bdata_list, list) {
332 		int err;
333 		unsigned long max;
334 
335 		if (pos < bdata->node_min_pfn ||
336 		    pos >= bdata->node_low_pfn) {
337 			BUG_ON(pos != start);
338 			continue;
339 		}
340 
341 		max = min(bdata->node_low_pfn, end);
342 
343 		err = mark_bootmem_node(bdata, pos, max, reserve, flags);
344 		if (reserve && err) {
345 			mark_bootmem(start, pos, 0, 0);
346 			return err;
347 		}
348 
349 		if (max == end)
350 			return 0;
351 		pos = bdata->node_low_pfn;
352 	}
353 	BUG();
354 }
355 
356 /**
357  * free_bootmem_node - mark a page range as usable
358  * @pgdat: node the range resides on
359  * @physaddr: starting address of the range
360  * @size: size of the range in bytes
361  *
362  * Partial pages will be considered reserved and left as they are.
363  *
364  * The range must reside completely on the specified node.
365  */
free_bootmem_node(pg_data_t * pgdat,unsigned long physaddr,unsigned long size)366 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
367 			      unsigned long size)
368 {
369 	unsigned long start, end;
370 
371 	kmemleak_free_part(__va(physaddr), size);
372 
373 	start = PFN_UP(physaddr);
374 	end = PFN_DOWN(physaddr + size);
375 
376 	mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
377 }
378 
379 /**
380  * free_bootmem - mark a page range as usable
381  * @addr: starting address of the range
382  * @size: size of the range in bytes
383  *
384  * Partial pages will be considered reserved and left as they are.
385  *
386  * The range must be contiguous but may span node boundaries.
387  */
free_bootmem(unsigned long addr,unsigned long size)388 void __init free_bootmem(unsigned long addr, unsigned long size)
389 {
390 	unsigned long start, end;
391 
392 	kmemleak_free_part(__va(addr), size);
393 
394 	start = PFN_UP(addr);
395 	end = PFN_DOWN(addr + size);
396 
397 	mark_bootmem(start, end, 0, 0);
398 }
399 
400 /**
401  * reserve_bootmem_node - mark a page range as reserved
402  * @pgdat: node the range resides on
403  * @physaddr: starting address of the range
404  * @size: size of the range in bytes
405  * @flags: reservation flags (see linux/bootmem.h)
406  *
407  * Partial pages will be reserved.
408  *
409  * The range must reside completely on the specified node.
410  */
reserve_bootmem_node(pg_data_t * pgdat,unsigned long physaddr,unsigned long size,int flags)411 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
412 				 unsigned long size, int flags)
413 {
414 	unsigned long start, end;
415 
416 	start = PFN_DOWN(physaddr);
417 	end = PFN_UP(physaddr + size);
418 
419 	return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
420 }
421 
422 /**
423  * reserve_bootmem - mark a page range as usable
424  * @addr: starting address of the range
425  * @size: size of the range in bytes
426  * @flags: reservation flags (see linux/bootmem.h)
427  *
428  * Partial pages will be reserved.
429  *
430  * The range must be contiguous but may span node boundaries.
431  */
reserve_bootmem(unsigned long addr,unsigned long size,int flags)432 int __init reserve_bootmem(unsigned long addr, unsigned long size,
433 			    int flags)
434 {
435 	unsigned long start, end;
436 
437 	start = PFN_DOWN(addr);
438 	end = PFN_UP(addr + size);
439 
440 	return mark_bootmem(start, end, 1, flags);
441 }
442 
reserve_bootmem_generic(unsigned long phys,unsigned long len,int flags)443 int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
444 				   int flags)
445 {
446 	return reserve_bootmem(phys, len, flags);
447 }
448 
align_idx(struct bootmem_data * bdata,unsigned long idx,unsigned long step)449 static unsigned long __init align_idx(struct bootmem_data *bdata,
450 				      unsigned long idx, unsigned long step)
451 {
452 	unsigned long base = bdata->node_min_pfn;
453 
454 	/*
455 	 * Align the index with respect to the node start so that the
456 	 * combination of both satisfies the requested alignment.
457 	 */
458 
459 	return ALIGN(base + idx, step) - base;
460 }
461 
align_off(struct bootmem_data * bdata,unsigned long off,unsigned long align)462 static unsigned long __init align_off(struct bootmem_data *bdata,
463 				      unsigned long off, unsigned long align)
464 {
465 	unsigned long base = PFN_PHYS(bdata->node_min_pfn);
466 
467 	/* Same as align_idx for byte offsets */
468 
469 	return ALIGN(base + off, align) - base;
470 }
471 
alloc_bootmem_core(struct bootmem_data * bdata,unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)472 static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
473 					unsigned long size, unsigned long align,
474 					unsigned long goal, unsigned long limit)
475 {
476 	unsigned long fallback = 0;
477 	unsigned long min, max, start, sidx, midx, step;
478 
479 	bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
480 		bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
481 		align, goal, limit);
482 
483 	BUG_ON(!size);
484 	BUG_ON(align & (align - 1));
485 	BUG_ON(limit && goal + size > limit);
486 
487 	if (!bdata->node_bootmem_map)
488 		return NULL;
489 
490 	min = bdata->node_min_pfn;
491 	max = bdata->node_low_pfn;
492 
493 	goal >>= PAGE_SHIFT;
494 	limit >>= PAGE_SHIFT;
495 
496 	if (limit && max > limit)
497 		max = limit;
498 	if (max <= min)
499 		return NULL;
500 
501 	step = max(align >> PAGE_SHIFT, 1UL);
502 
503 	if (goal && min < goal && goal < max)
504 		start = ALIGN(goal, step);
505 	else
506 		start = ALIGN(min, step);
507 
508 	sidx = start - bdata->node_min_pfn;
509 	midx = max - bdata->node_min_pfn;
510 
511 	if (bdata->hint_idx > sidx) {
512 		/*
513 		 * Handle the valid case of sidx being zero and still
514 		 * catch the fallback below.
515 		 */
516 		fallback = sidx + 1;
517 		sidx = align_idx(bdata, bdata->hint_idx, step);
518 	}
519 
520 	while (1) {
521 		int merge;
522 		void *region;
523 		unsigned long eidx, i, start_off, end_off;
524 find_block:
525 		sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
526 		sidx = align_idx(bdata, sidx, step);
527 		eidx = sidx + PFN_UP(size);
528 
529 		if (sidx >= midx || eidx > midx)
530 			break;
531 
532 		for (i = sidx; i < eidx; i++)
533 			if (test_bit(i, bdata->node_bootmem_map)) {
534 				sidx = align_idx(bdata, i, step);
535 				if (sidx == i)
536 					sidx += step;
537 				goto find_block;
538 			}
539 
540 		if (bdata->last_end_off & (PAGE_SIZE - 1) &&
541 				PFN_DOWN(bdata->last_end_off) + 1 == sidx)
542 			start_off = align_off(bdata, bdata->last_end_off, align);
543 		else
544 			start_off = PFN_PHYS(sidx);
545 
546 		merge = PFN_DOWN(start_off) < sidx;
547 		end_off = start_off + size;
548 
549 		bdata->last_end_off = end_off;
550 		bdata->hint_idx = PFN_UP(end_off);
551 
552 		/*
553 		 * Reserve the area now:
554 		 */
555 		if (__reserve(bdata, PFN_DOWN(start_off) + merge,
556 				PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
557 			BUG();
558 
559 		region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
560 				start_off);
561 		memset(region, 0, size);
562 		/*
563 		 * The min_count is set to 0 so that bootmem allocated blocks
564 		 * are never reported as leaks.
565 		 */
566 		kmemleak_alloc(region, size, 0, 0);
567 		return region;
568 	}
569 
570 	if (fallback) {
571 		sidx = align_idx(bdata, fallback - 1, step);
572 		fallback = 0;
573 		goto find_block;
574 	}
575 
576 	return NULL;
577 }
578 
alloc_arch_preferred_bootmem(bootmem_data_t * bdata,unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)579 static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
580 					unsigned long size, unsigned long align,
581 					unsigned long goal, unsigned long limit)
582 {
583 	if (WARN_ON_ONCE(slab_is_available()))
584 		return kzalloc(size, GFP_NOWAIT);
585 
586 #ifdef CONFIG_HAVE_ARCH_BOOTMEM
587 	{
588 		bootmem_data_t *p_bdata;
589 
590 		p_bdata = bootmem_arch_preferred_node(bdata, size, align,
591 							goal, limit);
592 		if (p_bdata)
593 			return alloc_bootmem_core(p_bdata, size, align,
594 							goal, limit);
595 	}
596 #endif
597 	return NULL;
598 }
599 
___alloc_bootmem_nopanic(unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)600 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
601 					unsigned long align,
602 					unsigned long goal,
603 					unsigned long limit)
604 {
605 	bootmem_data_t *bdata;
606 	void *region;
607 
608 restart:
609 	region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
610 	if (region)
611 		return region;
612 
613 	list_for_each_entry(bdata, &bdata_list, list) {
614 		if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
615 			continue;
616 		if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
617 			break;
618 
619 		region = alloc_bootmem_core(bdata, size, align, goal, limit);
620 		if (region)
621 			return region;
622 	}
623 
624 	if (goal) {
625 		goal = 0;
626 		goto restart;
627 	}
628 
629 	return NULL;
630 }
631 
632 /**
633  * __alloc_bootmem_nopanic - allocate boot memory without panicking
634  * @size: size of the request in bytes
635  * @align: alignment of the region
636  * @goal: preferred starting address of the region
637  *
638  * The goal is dropped if it can not be satisfied and the allocation will
639  * fall back to memory below @goal.
640  *
641  * Allocation may happen on any node in the system.
642  *
643  * Returns NULL on failure.
644  */
__alloc_bootmem_nopanic(unsigned long size,unsigned long align,unsigned long goal)645 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
646 					unsigned long goal)
647 {
648 	unsigned long limit = 0;
649 
650 	return ___alloc_bootmem_nopanic(size, align, goal, limit);
651 }
652 
___alloc_bootmem(unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)653 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
654 					unsigned long goal, unsigned long limit)
655 {
656 	void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
657 
658 	if (mem)
659 		return mem;
660 	/*
661 	 * Whoops, we cannot satisfy the allocation request.
662 	 */
663 	printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
664 	panic("Out of memory");
665 	return NULL;
666 }
667 
668 /**
669  * __alloc_bootmem - allocate boot memory
670  * @size: size of the request in bytes
671  * @align: alignment of the region
672  * @goal: preferred starting address of the region
673  *
674  * The goal is dropped if it can not be satisfied and the allocation will
675  * fall back to memory below @goal.
676  *
677  * Allocation may happen on any node in the system.
678  *
679  * The function panics if the request can not be satisfied.
680  */
__alloc_bootmem(unsigned long size,unsigned long align,unsigned long goal)681 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
682 			      unsigned long goal)
683 {
684 	unsigned long limit = 0;
685 
686 	return ___alloc_bootmem(size, align, goal, limit);
687 }
688 
___alloc_bootmem_node(bootmem_data_t * bdata,unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)689 static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
690 				unsigned long size, unsigned long align,
691 				unsigned long goal, unsigned long limit)
692 {
693 	void *ptr;
694 
695 	ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
696 	if (ptr)
697 		return ptr;
698 
699 	ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
700 	if (ptr)
701 		return ptr;
702 
703 	return ___alloc_bootmem(size, align, goal, limit);
704 }
705 
706 /**
707  * __alloc_bootmem_node - allocate boot memory from a specific node
708  * @pgdat: node to allocate from
709  * @size: size of the request in bytes
710  * @align: alignment of the region
711  * @goal: preferred starting address of the region
712  *
713  * The goal is dropped if it can not be satisfied and the allocation will
714  * fall back to memory below @goal.
715  *
716  * Allocation may fall back to any node in the system if the specified node
717  * can not hold the requested memory.
718  *
719  * The function panics if the request can not be satisfied.
720  */
__alloc_bootmem_node(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)721 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
722 				   unsigned long align, unsigned long goal)
723 {
724 	if (WARN_ON_ONCE(slab_is_available()))
725 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
726 
727 	return  ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
728 }
729 
__alloc_bootmem_node_high(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)730 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
731 				   unsigned long align, unsigned long goal)
732 {
733 #ifdef MAX_DMA32_PFN
734 	unsigned long end_pfn;
735 
736 	if (WARN_ON_ONCE(slab_is_available()))
737 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
738 
739 	/* update goal according ...MAX_DMA32_PFN */
740 	end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
741 
742 	if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
743 	    (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
744 		void *ptr;
745 		unsigned long new_goal;
746 
747 		new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
748 		ptr = alloc_bootmem_core(pgdat->bdata, size, align,
749 						 new_goal, 0);
750 		if (ptr)
751 			return ptr;
752 	}
753 #endif
754 
755 	return __alloc_bootmem_node(pgdat, size, align, goal);
756 
757 }
758 
759 #ifdef CONFIG_SPARSEMEM
760 /**
761  * alloc_bootmem_section - allocate boot memory from a specific section
762  * @size: size of the request in bytes
763  * @section_nr: sparse map section to allocate from
764  *
765  * Return NULL on failure.
766  */
alloc_bootmem_section(unsigned long size,unsigned long section_nr)767 void * __init alloc_bootmem_section(unsigned long size,
768 				    unsigned long section_nr)
769 {
770 	bootmem_data_t *bdata;
771 	unsigned long pfn, goal, limit;
772 
773 	pfn = section_nr_to_pfn(section_nr);
774 	goal = pfn << PAGE_SHIFT;
775 	limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
776 	bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
777 
778 	return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
779 }
780 #endif
781 
__alloc_bootmem_node_nopanic(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)782 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
783 				   unsigned long align, unsigned long goal)
784 {
785 	void *ptr;
786 
787 	if (WARN_ON_ONCE(slab_is_available()))
788 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
789 
790 	ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
791 	if (ptr)
792 		return ptr;
793 
794 	ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
795 	if (ptr)
796 		return ptr;
797 
798 	return __alloc_bootmem_nopanic(size, align, goal);
799 }
800 
801 #ifndef ARCH_LOW_ADDRESS_LIMIT
802 #define ARCH_LOW_ADDRESS_LIMIT	0xffffffffUL
803 #endif
804 
805 /**
806  * __alloc_bootmem_low - allocate low boot memory
807  * @size: size of the request in bytes
808  * @align: alignment of the region
809  * @goal: preferred starting address of the region
810  *
811  * The goal is dropped if it can not be satisfied and the allocation will
812  * fall back to memory below @goal.
813  *
814  * Allocation may happen on any node in the system.
815  *
816  * The function panics if the request can not be satisfied.
817  */
__alloc_bootmem_low(unsigned long size,unsigned long align,unsigned long goal)818 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
819 				  unsigned long goal)
820 {
821 	return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
822 }
823 
824 /**
825  * __alloc_bootmem_low_node - allocate low boot memory from a specific node
826  * @pgdat: node to allocate from
827  * @size: size of the request in bytes
828  * @align: alignment of the region
829  * @goal: preferred starting address of the region
830  *
831  * The goal is dropped if it can not be satisfied and the allocation will
832  * fall back to memory below @goal.
833  *
834  * Allocation may fall back to any node in the system if the specified node
835  * can not hold the requested memory.
836  *
837  * The function panics if the request can not be satisfied.
838  */
__alloc_bootmem_low_node(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)839 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
840 				       unsigned long align, unsigned long goal)
841 {
842 	if (WARN_ON_ONCE(slab_is_available()))
843 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
844 
845 	return ___alloc_bootmem_node(pgdat->bdata, size, align,
846 				goal, ARCH_LOW_ADDRESS_LIMIT);
847 }
848