1 /*
2  *  bootmem - A boot-time physical memory allocator and configurator
3  *
4  *  Copyright (C) 1999 Ingo Molnar
5  *                1999 Kanoj Sarcar, SGI
6  *                2008 Johannes Weiner
7  *
8  * Access to this subsystem has to be serialized externally (which is true
9  * for the boot process anyway).
10  */
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/export.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/memblock.h>
19 
20 #include <asm/bug.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
23 
24 #include "internal.h"
25 
26 #ifndef CONFIG_NEED_MULTIPLE_NODES
27 struct pglist_data __refdata contig_page_data;
28 EXPORT_SYMBOL(contig_page_data);
29 #endif
30 
31 unsigned long max_low_pfn;
32 unsigned long min_low_pfn;
33 unsigned long max_pfn;
34 
__alloc_memory_core_early(int nid,u64 size,u64 align,u64 goal,u64 limit)35 static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
36 					u64 goal, u64 limit)
37 {
38 	void *ptr;
39 	u64 addr;
40 
41 	if (limit > memblock.current_limit)
42 		limit = memblock.current_limit;
43 
44 	addr = memblock_find_in_range_node(goal, limit, size, align, nid);
45 	if (!addr)
46 		return NULL;
47 
48 	ptr = phys_to_virt(addr);
49 	memset(ptr, 0, size);
50 	memblock_reserve(addr, size);
51 	/*
52 	 * The min_count is set to 0 so that bootmem allocated blocks
53 	 * are never reported as leaks.
54 	 */
55 	kmemleak_alloc(ptr, size, 0, 0);
56 	return ptr;
57 }
58 
59 /*
60  * free_bootmem_late - free bootmem pages directly to page allocator
61  * @addr: starting address of the range
62  * @size: size of the range in bytes
63  *
64  * This is only useful when the bootmem allocator has already been torn
65  * down, but we are still initializing the system.  Pages are given directly
66  * to the page allocator, no bootmem metadata is updated because it is gone.
67  */
free_bootmem_late(unsigned long addr,unsigned long size)68 void __init free_bootmem_late(unsigned long addr, unsigned long size)
69 {
70 	unsigned long cursor, end;
71 
72 	kmemleak_free_part(__va(addr), size);
73 
74 	cursor = PFN_UP(addr);
75 	end = PFN_DOWN(addr + size);
76 
77 	for (; cursor < end; cursor++) {
78 		__free_pages_bootmem(pfn_to_page(cursor), 0);
79 		totalram_pages++;
80 	}
81 }
82 
__free_pages_memory(unsigned long start,unsigned long end)83 static void __init __free_pages_memory(unsigned long start, unsigned long end)
84 {
85 	unsigned long i, start_aligned, end_aligned;
86 	int order = ilog2(BITS_PER_LONG);
87 
88 	start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
89 	end_aligned = end & ~(BITS_PER_LONG - 1);
90 
91 	if (end_aligned <= start_aligned) {
92 		for (i = start; i < end; i++)
93 			__free_pages_bootmem(pfn_to_page(i), 0);
94 
95 		return;
96 	}
97 
98 	for (i = start; i < start_aligned; i++)
99 		__free_pages_bootmem(pfn_to_page(i), 0);
100 
101 	for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
102 		__free_pages_bootmem(pfn_to_page(i), order);
103 
104 	for (i = end_aligned; i < end; i++)
105 		__free_pages_bootmem(pfn_to_page(i), 0);
106 }
107 
__free_memory_core(phys_addr_t start,phys_addr_t end)108 static unsigned long __init __free_memory_core(phys_addr_t start,
109 				 phys_addr_t end)
110 {
111 	unsigned long start_pfn = PFN_UP(start);
112 	unsigned long end_pfn = min_t(unsigned long,
113 				      PFN_DOWN(end), max_low_pfn);
114 
115 	if (start_pfn > end_pfn)
116 		return 0;
117 
118 	__free_pages_memory(start_pfn, end_pfn);
119 
120 	return end_pfn - start_pfn;
121 }
122 
free_low_memory_core_early(int nodeid)123 unsigned long __init free_low_memory_core_early(int nodeid)
124 {
125 	unsigned long count = 0;
126 	phys_addr_t start, end, size;
127 	u64 i;
128 
129 	for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
130 		count += __free_memory_core(start, end);
131 
132 	/* free range that is used for reserved array if we allocate it */
133 	size = get_allocated_memblock_reserved_regions_info(&start);
134 	if (size)
135 		count += __free_memory_core(start, start + size);
136 
137 	return count;
138 }
139 
140 /**
141  * free_all_bootmem_node - release a node's free pages to the buddy allocator
142  * @pgdat: node to be released
143  *
144  * Returns the number of pages actually released.
145  */
free_all_bootmem_node(pg_data_t * pgdat)146 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
147 {
148 	register_page_bootmem_info_node(pgdat);
149 
150 	/* free_low_memory_core_early(MAX_NUMNODES) will be called later */
151 	return 0;
152 }
153 
154 /**
155  * free_all_bootmem - release free pages to the buddy allocator
156  *
157  * Returns the number of pages actually released.
158  */
free_all_bootmem(void)159 unsigned long __init free_all_bootmem(void)
160 {
161 	/*
162 	 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
163 	 *  because in some case like Node0 doesn't have RAM installed
164 	 *  low ram will be on Node1
165 	 * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
166 	 *  will be used instead of only Node0 related
167 	 */
168 	return free_low_memory_core_early(MAX_NUMNODES);
169 }
170 
171 /**
172  * free_bootmem_node - mark a page range as usable
173  * @pgdat: node the range resides on
174  * @physaddr: starting address of the range
175  * @size: size of the range in bytes
176  *
177  * Partial pages will be considered reserved and left as they are.
178  *
179  * The range must reside completely on the specified node.
180  */
free_bootmem_node(pg_data_t * pgdat,unsigned long physaddr,unsigned long size)181 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
182 			      unsigned long size)
183 {
184 	kmemleak_free_part(__va(physaddr), size);
185 	memblock_free(physaddr, size);
186 }
187 
188 /**
189  * free_bootmem - mark a page range as usable
190  * @addr: starting address of the range
191  * @size: size of the range in bytes
192  *
193  * Partial pages will be considered reserved and left as they are.
194  *
195  * The range must be contiguous but may span node boundaries.
196  */
free_bootmem(unsigned long addr,unsigned long size)197 void __init free_bootmem(unsigned long addr, unsigned long size)
198 {
199 	kmemleak_free_part(__va(addr), size);
200 	memblock_free(addr, size);
201 }
202 
___alloc_bootmem_nopanic(unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)203 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
204 					unsigned long align,
205 					unsigned long goal,
206 					unsigned long limit)
207 {
208 	void *ptr;
209 
210 	if (WARN_ON_ONCE(slab_is_available()))
211 		return kzalloc(size, GFP_NOWAIT);
212 
213 restart:
214 
215 	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
216 
217 	if (ptr)
218 		return ptr;
219 
220 	if (goal != 0) {
221 		goal = 0;
222 		goto restart;
223 	}
224 
225 	return NULL;
226 }
227 
228 /**
229  * __alloc_bootmem_nopanic - allocate boot memory without panicking
230  * @size: size of the request in bytes
231  * @align: alignment of the region
232  * @goal: preferred starting address of the region
233  *
234  * The goal is dropped if it can not be satisfied and the allocation will
235  * fall back to memory below @goal.
236  *
237  * Allocation may happen on any node in the system.
238  *
239  * Returns NULL on failure.
240  */
__alloc_bootmem_nopanic(unsigned long size,unsigned long align,unsigned long goal)241 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
242 					unsigned long goal)
243 {
244 	unsigned long limit = -1UL;
245 
246 	return ___alloc_bootmem_nopanic(size, align, goal, limit);
247 }
248 
___alloc_bootmem(unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)249 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
250 					unsigned long goal, unsigned long limit)
251 {
252 	void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
253 
254 	if (mem)
255 		return mem;
256 	/*
257 	 * Whoops, we cannot satisfy the allocation request.
258 	 */
259 	printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
260 	panic("Out of memory");
261 	return NULL;
262 }
263 
264 /**
265  * __alloc_bootmem - allocate boot memory
266  * @size: size of the request in bytes
267  * @align: alignment of the region
268  * @goal: preferred starting address of the region
269  *
270  * The goal is dropped if it can not be satisfied and the allocation will
271  * fall back to memory below @goal.
272  *
273  * Allocation may happen on any node in the system.
274  *
275  * The function panics if the request can not be satisfied.
276  */
__alloc_bootmem(unsigned long size,unsigned long align,unsigned long goal)277 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
278 			      unsigned long goal)
279 {
280 	unsigned long limit = -1UL;
281 
282 	return ___alloc_bootmem(size, align, goal, limit);
283 }
284 
285 /**
286  * __alloc_bootmem_node - allocate boot memory from a specific node
287  * @pgdat: node to allocate from
288  * @size: size of the request in bytes
289  * @align: alignment of the region
290  * @goal: preferred starting address of the region
291  *
292  * The goal is dropped if it can not be satisfied and the allocation will
293  * fall back to memory below @goal.
294  *
295  * Allocation may fall back to any node in the system if the specified node
296  * can not hold the requested memory.
297  *
298  * The function panics if the request can not be satisfied.
299  */
__alloc_bootmem_node(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)300 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
301 				   unsigned long align, unsigned long goal)
302 {
303 	void *ptr;
304 
305 	if (WARN_ON_ONCE(slab_is_available()))
306 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
307 
308 again:
309 	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
310 					 goal, -1ULL);
311 	if (ptr)
312 		return ptr;
313 
314 	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
315 					goal, -1ULL);
316 	if (!ptr && goal) {
317 		goal = 0;
318 		goto again;
319 	}
320 	return ptr;
321 }
322 
__alloc_bootmem_node_high(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)323 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
324 				   unsigned long align, unsigned long goal)
325 {
326 	return __alloc_bootmem_node(pgdat, size, align, goal);
327 }
328 
329 #ifdef CONFIG_SPARSEMEM
330 /**
331  * alloc_bootmem_section - allocate boot memory from a specific section
332  * @size: size of the request in bytes
333  * @section_nr: sparse map section to allocate from
334  *
335  * Return NULL on failure.
336  */
alloc_bootmem_section(unsigned long size,unsigned long section_nr)337 void * __init alloc_bootmem_section(unsigned long size,
338 				    unsigned long section_nr)
339 {
340 	unsigned long pfn, goal, limit;
341 
342 	pfn = section_nr_to_pfn(section_nr);
343 	goal = pfn << PAGE_SHIFT;
344 	limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
345 
346 	return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
347 					 SMP_CACHE_BYTES, goal, limit);
348 }
349 #endif
350 
__alloc_bootmem_node_nopanic(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)351 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
352 				   unsigned long align, unsigned long goal)
353 {
354 	void *ptr;
355 
356 	if (WARN_ON_ONCE(slab_is_available()))
357 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
358 
359 	ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
360 						 goal, -1ULL);
361 	if (ptr)
362 		return ptr;
363 
364 	return __alloc_bootmem_nopanic(size, align, goal);
365 }
366 
367 #ifndef ARCH_LOW_ADDRESS_LIMIT
368 #define ARCH_LOW_ADDRESS_LIMIT	0xffffffffUL
369 #endif
370 
371 /**
372  * __alloc_bootmem_low - allocate low boot memory
373  * @size: size of the request in bytes
374  * @align: alignment of the region
375  * @goal: preferred starting address of the region
376  *
377  * The goal is dropped if it can not be satisfied and the allocation will
378  * fall back to memory below @goal.
379  *
380  * Allocation may happen on any node in the system.
381  *
382  * The function panics if the request can not be satisfied.
383  */
__alloc_bootmem_low(unsigned long size,unsigned long align,unsigned long goal)384 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
385 				  unsigned long goal)
386 {
387 	return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
388 }
389 
390 /**
391  * __alloc_bootmem_low_node - allocate low boot memory from a specific node
392  * @pgdat: node to allocate from
393  * @size: size of the request in bytes
394  * @align: alignment of the region
395  * @goal: preferred starting address of the region
396  *
397  * The goal is dropped if it can not be satisfied and the allocation will
398  * fall back to memory below @goal.
399  *
400  * Allocation may fall back to any node in the system if the specified node
401  * can not hold the requested memory.
402  *
403  * The function panics if the request can not be satisfied.
404  */
__alloc_bootmem_low_node(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)405 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
406 				       unsigned long align, unsigned long goal)
407 {
408 	void *ptr;
409 
410 	if (WARN_ON_ONCE(slab_is_available()))
411 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
412 
413 	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
414 				goal, ARCH_LOW_ADDRESS_LIMIT);
415 	if (ptr)
416 		return ptr;
417 
418 	return  __alloc_memory_core_early(MAX_NUMNODES, size, align,
419 				goal, ARCH_LOW_ADDRESS_LIMIT);
420 }
421