1 /*
2  *  bootmem - A boot-time physical memory allocator and configurator
3  *
4  *  Copyright (C) 1999 Ingo Molnar
5  *                1999 Kanoj Sarcar, SGI
6  *                2008 Johannes Weiner
7  *
8  * Access to this subsystem has to be serialized externally (which is true
9  * for the boot process anyway).
10  */
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/module.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/memblock.h>
19 
20 #include <asm/bug.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
23 
24 #include "internal.h"
25 
26 #ifndef CONFIG_NEED_MULTIPLE_NODES
27 struct pglist_data __refdata contig_page_data;
28 EXPORT_SYMBOL(contig_page_data);
29 #endif
30 
31 unsigned long max_low_pfn;
32 unsigned long min_low_pfn;
33 unsigned long max_pfn;
34 
__alloc_memory_core_early(int nid,u64 size,u64 align,u64 goal,u64 limit)35 static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
36 					u64 goal, u64 limit)
37 {
38 	void *ptr;
39 	u64 addr;
40 
41 	if (limit > memblock.current_limit)
42 		limit = memblock.current_limit;
43 
44 	addr = find_memory_core_early(nid, size, align, goal, limit);
45 
46 	if (addr == MEMBLOCK_ERROR)
47 		return NULL;
48 
49 	ptr = phys_to_virt(addr);
50 	memset(ptr, 0, size);
51 	memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
52 	/*
53 	 * The min_count is set to 0 so that bootmem allocated blocks
54 	 * are never reported as leaks.
55 	 */
56 	kmemleak_alloc(ptr, size, 0, 0);
57 	return ptr;
58 }
59 
60 /*
61  * free_bootmem_late - free bootmem pages directly to page allocator
62  * @addr: starting address of the range
63  * @size: size of the range in bytes
64  *
65  * This is only useful when the bootmem allocator has already been torn
66  * down, but we are still initializing the system.  Pages are given directly
67  * to the page allocator, no bootmem metadata is updated because it is gone.
68  */
free_bootmem_late(unsigned long addr,unsigned long size)69 void __init free_bootmem_late(unsigned long addr, unsigned long size)
70 {
71 	unsigned long cursor, end;
72 
73 	kmemleak_free_part(__va(addr), size);
74 
75 	cursor = PFN_UP(addr);
76 	end = PFN_DOWN(addr + size);
77 
78 	for (; cursor < end; cursor++) {
79 		__free_pages_bootmem(pfn_to_page(cursor), 0);
80 		totalram_pages++;
81 	}
82 }
83 
__free_pages_memory(unsigned long start,unsigned long end)84 static void __init __free_pages_memory(unsigned long start, unsigned long end)
85 {
86 	int i;
87 	unsigned long start_aligned, end_aligned;
88 	int order = ilog2(BITS_PER_LONG);
89 
90 	start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
91 	end_aligned = end & ~(BITS_PER_LONG - 1);
92 
93 	if (end_aligned <= start_aligned) {
94 		for (i = start; i < end; i++)
95 			__free_pages_bootmem(pfn_to_page(i), 0);
96 
97 		return;
98 	}
99 
100 	for (i = start; i < start_aligned; i++)
101 		__free_pages_bootmem(pfn_to_page(i), 0);
102 
103 	for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
104 		__free_pages_bootmem(pfn_to_page(i), order);
105 
106 	for (i = end_aligned; i < end; i++)
107 		__free_pages_bootmem(pfn_to_page(i), 0);
108 }
109 
free_all_memory_core_early(int nodeid)110 unsigned long __init free_all_memory_core_early(int nodeid)
111 {
112 	int i;
113 	u64 start, end;
114 	unsigned long count = 0;
115 	struct range *range = NULL;
116 	int nr_range;
117 
118 	nr_range = get_free_all_memory_range(&range, nodeid);
119 
120 	for (i = 0; i < nr_range; i++) {
121 		start = range[i].start;
122 		end = range[i].end;
123 		count += end - start;
124 		__free_pages_memory(start, end);
125 	}
126 
127 	return count;
128 }
129 
130 /**
131  * free_all_bootmem_node - release a node's free pages to the buddy allocator
132  * @pgdat: node to be released
133  *
134  * Returns the number of pages actually released.
135  */
free_all_bootmem_node(pg_data_t * pgdat)136 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
137 {
138 	register_page_bootmem_info_node(pgdat);
139 
140 	/* free_all_memory_core_early(MAX_NUMNODES) will be called later */
141 	return 0;
142 }
143 
144 /**
145  * free_all_bootmem - release free pages to the buddy allocator
146  *
147  * Returns the number of pages actually released.
148  */
free_all_bootmem(void)149 unsigned long __init free_all_bootmem(void)
150 {
151 	/*
152 	 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
153 	 *  because in some case like Node0 doesn't have RAM installed
154 	 *  low ram will be on Node1
155 	 * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
156 	 *  will be used instead of only Node0 related
157 	 */
158 	return free_all_memory_core_early(MAX_NUMNODES);
159 }
160 
161 /**
162  * free_bootmem_node - mark a page range as usable
163  * @pgdat: node the range resides on
164  * @physaddr: starting address of the range
165  * @size: size of the range in bytes
166  *
167  * Partial pages will be considered reserved and left as they are.
168  *
169  * The range must reside completely on the specified node.
170  */
free_bootmem_node(pg_data_t * pgdat,unsigned long physaddr,unsigned long size)171 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
172 			      unsigned long size)
173 {
174 	kmemleak_free_part(__va(physaddr), size);
175 	memblock_x86_free_range(physaddr, physaddr + size);
176 }
177 
178 /**
179  * free_bootmem - mark a page range as usable
180  * @addr: starting address of the range
181  * @size: size of the range in bytes
182  *
183  * Partial pages will be considered reserved and left as they are.
184  *
185  * The range must be contiguous but may span node boundaries.
186  */
free_bootmem(unsigned long addr,unsigned long size)187 void __init free_bootmem(unsigned long addr, unsigned long size)
188 {
189 	kmemleak_free_part(__va(addr), size);
190 	memblock_x86_free_range(addr, addr + size);
191 }
192 
___alloc_bootmem_nopanic(unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)193 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
194 					unsigned long align,
195 					unsigned long goal,
196 					unsigned long limit)
197 {
198 	void *ptr;
199 
200 	if (WARN_ON_ONCE(slab_is_available()))
201 		return kzalloc(size, GFP_NOWAIT);
202 
203 restart:
204 
205 	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
206 
207 	if (ptr)
208 		return ptr;
209 
210 	if (goal != 0) {
211 		goal = 0;
212 		goto restart;
213 	}
214 
215 	return NULL;
216 }
217 
218 /**
219  * __alloc_bootmem_nopanic - allocate boot memory without panicking
220  * @size: size of the request in bytes
221  * @align: alignment of the region
222  * @goal: preferred starting address of the region
223  *
224  * The goal is dropped if it can not be satisfied and the allocation will
225  * fall back to memory below @goal.
226  *
227  * Allocation may happen on any node in the system.
228  *
229  * Returns NULL on failure.
230  */
__alloc_bootmem_nopanic(unsigned long size,unsigned long align,unsigned long goal)231 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
232 					unsigned long goal)
233 {
234 	unsigned long limit = -1UL;
235 
236 	return ___alloc_bootmem_nopanic(size, align, goal, limit);
237 }
238 
___alloc_bootmem(unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)239 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
240 					unsigned long goal, unsigned long limit)
241 {
242 	void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
243 
244 	if (mem)
245 		return mem;
246 	/*
247 	 * Whoops, we cannot satisfy the allocation request.
248 	 */
249 	printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
250 	panic("Out of memory");
251 	return NULL;
252 }
253 
254 /**
255  * __alloc_bootmem - allocate boot memory
256  * @size: size of the request in bytes
257  * @align: alignment of the region
258  * @goal: preferred starting address of the region
259  *
260  * The goal is dropped if it can not be satisfied and the allocation will
261  * fall back to memory below @goal.
262  *
263  * Allocation may happen on any node in the system.
264  *
265  * The function panics if the request can not be satisfied.
266  */
__alloc_bootmem(unsigned long size,unsigned long align,unsigned long goal)267 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
268 			      unsigned long goal)
269 {
270 	unsigned long limit = -1UL;
271 
272 	return ___alloc_bootmem(size, align, goal, limit);
273 }
274 
275 /**
276  * __alloc_bootmem_node - allocate boot memory from a specific node
277  * @pgdat: node to allocate from
278  * @size: size of the request in bytes
279  * @align: alignment of the region
280  * @goal: preferred starting address of the region
281  *
282  * The goal is dropped if it can not be satisfied and the allocation will
283  * fall back to memory below @goal.
284  *
285  * Allocation may fall back to any node in the system if the specified node
286  * can not hold the requested memory.
287  *
288  * The function panics if the request can not be satisfied.
289  */
__alloc_bootmem_node(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)290 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
291 				   unsigned long align, unsigned long goal)
292 {
293 	void *ptr;
294 
295 	if (WARN_ON_ONCE(slab_is_available()))
296 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
297 
298 	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
299 					 goal, -1ULL);
300 	if (ptr)
301 		return ptr;
302 
303 	return __alloc_memory_core_early(MAX_NUMNODES, size, align,
304 					 goal, -1ULL);
305 }
306 
__alloc_bootmem_node_high(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)307 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
308 				   unsigned long align, unsigned long goal)
309 {
310 #ifdef MAX_DMA32_PFN
311 	unsigned long end_pfn;
312 
313 	if (WARN_ON_ONCE(slab_is_available()))
314 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
315 
316 	/* update goal according ...MAX_DMA32_PFN */
317 	end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
318 
319 	if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
320 	    (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
321 		void *ptr;
322 		unsigned long new_goal;
323 
324 		new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
325 		ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
326 						 new_goal, -1ULL);
327 		if (ptr)
328 			return ptr;
329 	}
330 #endif
331 
332 	return __alloc_bootmem_node(pgdat, size, align, goal);
333 
334 }
335 
336 #ifdef CONFIG_SPARSEMEM
337 /**
338  * alloc_bootmem_section - allocate boot memory from a specific section
339  * @size: size of the request in bytes
340  * @section_nr: sparse map section to allocate from
341  *
342  * Return NULL on failure.
343  */
alloc_bootmem_section(unsigned long size,unsigned long section_nr)344 void * __init alloc_bootmem_section(unsigned long size,
345 				    unsigned long section_nr)
346 {
347 	unsigned long pfn, goal, limit;
348 
349 	pfn = section_nr_to_pfn(section_nr);
350 	goal = pfn << PAGE_SHIFT;
351 	limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
352 
353 	return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
354 					 SMP_CACHE_BYTES, goal, limit);
355 }
356 #endif
357 
__alloc_bootmem_node_nopanic(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)358 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
359 				   unsigned long align, unsigned long goal)
360 {
361 	void *ptr;
362 
363 	if (WARN_ON_ONCE(slab_is_available()))
364 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
365 
366 	ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
367 						 goal, -1ULL);
368 	if (ptr)
369 		return ptr;
370 
371 	return __alloc_bootmem_nopanic(size, align, goal);
372 }
373 
374 #ifndef ARCH_LOW_ADDRESS_LIMIT
375 #define ARCH_LOW_ADDRESS_LIMIT	0xffffffffUL
376 #endif
377 
378 /**
379  * __alloc_bootmem_low - allocate low boot memory
380  * @size: size of the request in bytes
381  * @align: alignment of the region
382  * @goal: preferred starting address of the region
383  *
384  * The goal is dropped if it can not be satisfied and the allocation will
385  * fall back to memory below @goal.
386  *
387  * Allocation may happen on any node in the system.
388  *
389  * The function panics if the request can not be satisfied.
390  */
__alloc_bootmem_low(unsigned long size,unsigned long align,unsigned long goal)391 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
392 				  unsigned long goal)
393 {
394 	return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
395 }
396 
397 /**
398  * __alloc_bootmem_low_node - allocate low boot memory from a specific node
399  * @pgdat: node to allocate from
400  * @size: size of the request in bytes
401  * @align: alignment of the region
402  * @goal: preferred starting address of the region
403  *
404  * The goal is dropped if it can not be satisfied and the allocation will
405  * fall back to memory below @goal.
406  *
407  * Allocation may fall back to any node in the system if the specified node
408  * can not hold the requested memory.
409  *
410  * The function panics if the request can not be satisfied.
411  */
__alloc_bootmem_low_node(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)412 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
413 				       unsigned long align, unsigned long goal)
414 {
415 	void *ptr;
416 
417 	if (WARN_ON_ONCE(slab_is_available()))
418 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
419 
420 	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
421 				goal, ARCH_LOW_ADDRESS_LIMIT);
422 	if (ptr)
423 		return ptr;
424 
425 	return  __alloc_memory_core_early(MAX_NUMNODES, size, align,
426 				goal, ARCH_LOW_ADDRESS_LIMIT);
427 }
428