1 /*
2  * Procedures for maintaining information about logical memory blocks.
3  *
4  * Peter Bergner, IBM Corp.	June 2001.
5  * Copyright (C) 2001 Peter Bergner.
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
22 
23 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
24 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
25 
26 struct memblock memblock __initdata_memblock = {
27 	.memory.regions		= memblock_memory_init_regions,
28 	.memory.cnt		= 1,	/* empty dummy entry */
29 	.memory.max		= INIT_MEMBLOCK_REGIONS,
30 
31 	.reserved.regions	= memblock_reserved_init_regions,
32 	.reserved.cnt		= 1,	/* empty dummy entry */
33 	.reserved.max		= INIT_MEMBLOCK_REGIONS,
34 
35 	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
36 };
37 
38 int memblock_debug __initdata_memblock;
39 static int memblock_can_resize __initdata_memblock;
40 static int memblock_memory_in_slab __initdata_memblock = 0;
41 static int memblock_reserved_in_slab __initdata_memblock = 0;
42 
43 /* inline so we don't get a warning when pr_debug is compiled out */
memblock_type_name(struct memblock_type * type)44 static inline const char *memblock_type_name(struct memblock_type *type)
45 {
46 	if (type == &memblock.memory)
47 		return "memory";
48 	else if (type == &memblock.reserved)
49 		return "reserved";
50 	else
51 		return "unknown";
52 }
53 
54 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
memblock_cap_size(phys_addr_t base,phys_addr_t * size)55 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
56 {
57 	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
58 }
59 
60 /*
61  * Address comparison utilities
62  */
memblock_addrs_overlap(phys_addr_t base1,phys_addr_t size1,phys_addr_t base2,phys_addr_t size2)63 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
64 				       phys_addr_t base2, phys_addr_t size2)
65 {
66 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
67 }
68 
memblock_overlaps_region(struct memblock_type * type,phys_addr_t base,phys_addr_t size)69 static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
70 					phys_addr_t base, phys_addr_t size)
71 {
72 	unsigned long i;
73 
74 	for (i = 0; i < type->cnt; i++) {
75 		phys_addr_t rgnbase = type->regions[i].base;
76 		phys_addr_t rgnsize = type->regions[i].size;
77 		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
78 			break;
79 	}
80 
81 	return (i < type->cnt) ? i : -1;
82 }
83 
84 /**
85  * memblock_find_in_range_node - find free area in given range and node
86  * @start: start of candidate range
87  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
88  * @size: size of free area to find
89  * @align: alignment of free area to find
90  * @nid: nid of the free area to find, %MAX_NUMNODES for any node
91  *
92  * Find @size free area aligned to @align in the specified range and node.
93  *
94  * RETURNS:
95  * Found address on success, %0 on failure.
96  */
memblock_find_in_range_node(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align,int nid)97 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
98 					phys_addr_t end, phys_addr_t size,
99 					phys_addr_t align, int nid)
100 {
101 	phys_addr_t this_start, this_end, cand;
102 	u64 i;
103 
104 	/* pump up @end */
105 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
106 		end = memblock.current_limit;
107 
108 	/* avoid allocating the first page */
109 	start = max_t(phys_addr_t, start, PAGE_SIZE);
110 	end = max(start, end);
111 
112 	for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
113 		this_start = clamp(this_start, start, end);
114 		this_end = clamp(this_end, start, end);
115 
116 		if (this_end < size)
117 			continue;
118 
119 		cand = round_down(this_end - size, align);
120 		if (cand >= this_start)
121 			return cand;
122 	}
123 	return 0;
124 }
125 
126 /**
127  * memblock_find_in_range - find free area in given range
128  * @start: start of candidate range
129  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
130  * @size: size of free area to find
131  * @align: alignment of free area to find
132  *
133  * Find @size free area aligned to @align in the specified range.
134  *
135  * RETURNS:
136  * Found address on success, %0 on failure.
137  */
memblock_find_in_range(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align)138 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
139 					phys_addr_t end, phys_addr_t size,
140 					phys_addr_t align)
141 {
142 	return memblock_find_in_range_node(start, end, size, align,
143 					   MAX_NUMNODES);
144 }
145 
memblock_remove_region(struct memblock_type * type,unsigned long r)146 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
147 {
148 	type->total_size -= type->regions[r].size;
149 	memmove(&type->regions[r], &type->regions[r + 1],
150 		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
151 	type->cnt--;
152 
153 	/* Special case for empty arrays */
154 	if (type->cnt == 0) {
155 		WARN_ON(type->total_size != 0);
156 		type->cnt = 1;
157 		type->regions[0].base = 0;
158 		type->regions[0].size = 0;
159 		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
160 	}
161 }
162 
get_allocated_memblock_reserved_regions_info(phys_addr_t * addr)163 phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
164 					phys_addr_t *addr)
165 {
166 	if (memblock.reserved.regions == memblock_reserved_init_regions)
167 		return 0;
168 
169 	*addr = __pa(memblock.reserved.regions);
170 
171 	return PAGE_ALIGN(sizeof(struct memblock_region) *
172 			  memblock.reserved.max);
173 }
174 
175 /**
176  * memblock_double_array - double the size of the memblock regions array
177  * @type: memblock type of the regions array being doubled
178  * @new_area_start: starting address of memory range to avoid overlap with
179  * @new_area_size: size of memory range to avoid overlap with
180  *
181  * Double the size of the @type regions array. If memblock is being used to
182  * allocate memory for a new reserved regions array and there is a previously
183  * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
184  * waiting to be reserved, ensure the memory used by the new array does
185  * not overlap.
186  *
187  * RETURNS:
188  * 0 on success, -1 on failure.
189  */
memblock_double_array(struct memblock_type * type,phys_addr_t new_area_start,phys_addr_t new_area_size)190 static int __init_memblock memblock_double_array(struct memblock_type *type,
191 						phys_addr_t new_area_start,
192 						phys_addr_t new_area_size)
193 {
194 	struct memblock_region *new_array, *old_array;
195 	phys_addr_t old_alloc_size, new_alloc_size;
196 	phys_addr_t old_size, new_size, addr;
197 	int use_slab = slab_is_available();
198 	int *in_slab;
199 
200 	/* We don't allow resizing until we know about the reserved regions
201 	 * of memory that aren't suitable for allocation
202 	 */
203 	if (!memblock_can_resize)
204 		return -1;
205 
206 	/* Calculate new doubled size */
207 	old_size = type->max * sizeof(struct memblock_region);
208 	new_size = old_size << 1;
209 	/*
210 	 * We need to allocated new one align to PAGE_SIZE,
211 	 *   so we can free them completely later.
212 	 */
213 	old_alloc_size = PAGE_ALIGN(old_size);
214 	new_alloc_size = PAGE_ALIGN(new_size);
215 
216 	/* Retrieve the slab flag */
217 	if (type == &memblock.memory)
218 		in_slab = &memblock_memory_in_slab;
219 	else
220 		in_slab = &memblock_reserved_in_slab;
221 
222 	/* Try to find some space for it.
223 	 *
224 	 * WARNING: We assume that either slab_is_available() and we use it or
225 	 * we use MEMBLOCK for allocations. That means that this is unsafe to use
226 	 * when bootmem is currently active (unless bootmem itself is implemented
227 	 * on top of MEMBLOCK which isn't the case yet)
228 	 *
229 	 * This should however not be an issue for now, as we currently only
230 	 * call into MEMBLOCK while it's still active, or much later when slab is
231 	 * active for memory hotplug operations
232 	 */
233 	if (use_slab) {
234 		new_array = kmalloc(new_size, GFP_KERNEL);
235 		addr = new_array ? __pa(new_array) : 0;
236 	} else {
237 		/* only exclude range when trying to double reserved.regions */
238 		if (type != &memblock.reserved)
239 			new_area_start = new_area_size = 0;
240 
241 		addr = memblock_find_in_range(new_area_start + new_area_size,
242 						memblock.current_limit,
243 						new_alloc_size, PAGE_SIZE);
244 		if (!addr && new_area_size)
245 			addr = memblock_find_in_range(0,
246 					min(new_area_start, memblock.current_limit),
247 					new_alloc_size, PAGE_SIZE);
248 
249 		new_array = addr ? __va(addr) : 0;
250 	}
251 	if (!addr) {
252 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
253 		       memblock_type_name(type), type->max, type->max * 2);
254 		return -1;
255 	}
256 
257 	memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
258 		 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
259 
260 	/* Found space, we now need to move the array over before
261 	 * we add the reserved region since it may be our reserved
262 	 * array itself that is full.
263 	 */
264 	memcpy(new_array, type->regions, old_size);
265 	memset(new_array + type->max, 0, old_size);
266 	old_array = type->regions;
267 	type->regions = new_array;
268 	type->max <<= 1;
269 
270 	/* Free old array. We needn't free it if the array is the
271 	 * static one
272 	 */
273 	if (*in_slab)
274 		kfree(old_array);
275 	else if (old_array != memblock_memory_init_regions &&
276 		 old_array != memblock_reserved_init_regions)
277 		memblock_free(__pa(old_array), old_alloc_size);
278 
279 	/* Reserve the new array if that comes from the memblock.
280 	 * Otherwise, we needn't do it
281 	 */
282 	if (!use_slab)
283 		BUG_ON(memblock_reserve(addr, new_alloc_size));
284 
285 	/* Update slab flag */
286 	*in_slab = use_slab;
287 
288 	return 0;
289 }
290 
291 /**
292  * memblock_merge_regions - merge neighboring compatible regions
293  * @type: memblock type to scan
294  *
295  * Scan @type and merge neighboring compatible regions.
296  */
memblock_merge_regions(struct memblock_type * type)297 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
298 {
299 	int i = 0;
300 
301 	/* cnt never goes below 1 */
302 	while (i < type->cnt - 1) {
303 		struct memblock_region *this = &type->regions[i];
304 		struct memblock_region *next = &type->regions[i + 1];
305 
306 		if (this->base + this->size != next->base ||
307 		    memblock_get_region_node(this) !=
308 		    memblock_get_region_node(next)) {
309 			BUG_ON(this->base + this->size > next->base);
310 			i++;
311 			continue;
312 		}
313 
314 		this->size += next->size;
315 		memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
316 		type->cnt--;
317 	}
318 }
319 
320 /**
321  * memblock_insert_region - insert new memblock region
322  * @type: memblock type to insert into
323  * @idx: index for the insertion point
324  * @base: base address of the new region
325  * @size: size of the new region
326  *
327  * Insert new memblock region [@base,@base+@size) into @type at @idx.
328  * @type must already have extra room to accomodate the new region.
329  */
memblock_insert_region(struct memblock_type * type,int idx,phys_addr_t base,phys_addr_t size,int nid)330 static void __init_memblock memblock_insert_region(struct memblock_type *type,
331 						   int idx, phys_addr_t base,
332 						   phys_addr_t size, int nid)
333 {
334 	struct memblock_region *rgn = &type->regions[idx];
335 
336 	BUG_ON(type->cnt >= type->max);
337 	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
338 	rgn->base = base;
339 	rgn->size = size;
340 	memblock_set_region_node(rgn, nid);
341 	type->cnt++;
342 	type->total_size += size;
343 }
344 
345 /**
346  * memblock_add_region - add new memblock region
347  * @type: memblock type to add new region into
348  * @base: base address of the new region
349  * @size: size of the new region
350  * @nid: nid of the new region
351  *
352  * Add new memblock region [@base,@base+@size) into @type.  The new region
353  * is allowed to overlap with existing ones - overlaps don't affect already
354  * existing regions.  @type is guaranteed to be minimal (all neighbouring
355  * compatible regions are merged) after the addition.
356  *
357  * RETURNS:
358  * 0 on success, -errno on failure.
359  */
memblock_add_region(struct memblock_type * type,phys_addr_t base,phys_addr_t size,int nid)360 static int __init_memblock memblock_add_region(struct memblock_type *type,
361 				phys_addr_t base, phys_addr_t size, int nid)
362 {
363 	bool insert = false;
364 	phys_addr_t obase = base;
365 	phys_addr_t end = base + memblock_cap_size(base, &size);
366 	int i, nr_new;
367 
368 	if (!size)
369 		return 0;
370 
371 	/* special case for empty array */
372 	if (type->regions[0].size == 0) {
373 		WARN_ON(type->cnt != 1 || type->total_size);
374 		type->regions[0].base = base;
375 		type->regions[0].size = size;
376 		memblock_set_region_node(&type->regions[0], nid);
377 		type->total_size = size;
378 		return 0;
379 	}
380 repeat:
381 	/*
382 	 * The following is executed twice.  Once with %false @insert and
383 	 * then with %true.  The first counts the number of regions needed
384 	 * to accomodate the new area.  The second actually inserts them.
385 	 */
386 	base = obase;
387 	nr_new = 0;
388 
389 	for (i = 0; i < type->cnt; i++) {
390 		struct memblock_region *rgn = &type->regions[i];
391 		phys_addr_t rbase = rgn->base;
392 		phys_addr_t rend = rbase + rgn->size;
393 
394 		if (rbase >= end)
395 			break;
396 		if (rend <= base)
397 			continue;
398 		/*
399 		 * @rgn overlaps.  If it separates the lower part of new
400 		 * area, insert that portion.
401 		 */
402 		if (rbase > base) {
403 			nr_new++;
404 			if (insert)
405 				memblock_insert_region(type, i++, base,
406 						       rbase - base, nid);
407 		}
408 		/* area below @rend is dealt with, forget about it */
409 		base = min(rend, end);
410 	}
411 
412 	/* insert the remaining portion */
413 	if (base < end) {
414 		nr_new++;
415 		if (insert)
416 			memblock_insert_region(type, i, base, end - base, nid);
417 	}
418 
419 	/*
420 	 * If this was the first round, resize array and repeat for actual
421 	 * insertions; otherwise, merge and return.
422 	 */
423 	if (!insert) {
424 		while (type->cnt + nr_new > type->max)
425 			if (memblock_double_array(type, obase, size) < 0)
426 				return -ENOMEM;
427 		insert = true;
428 		goto repeat;
429 	} else {
430 		memblock_merge_regions(type);
431 		return 0;
432 	}
433 }
434 
memblock_add_node(phys_addr_t base,phys_addr_t size,int nid)435 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
436 				       int nid)
437 {
438 	return memblock_add_region(&memblock.memory, base, size, nid);
439 }
440 
memblock_add(phys_addr_t base,phys_addr_t size)441 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
442 {
443 	return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES);
444 }
445 
446 /**
447  * memblock_isolate_range - isolate given range into disjoint memblocks
448  * @type: memblock type to isolate range for
449  * @base: base of range to isolate
450  * @size: size of range to isolate
451  * @start_rgn: out parameter for the start of isolated region
452  * @end_rgn: out parameter for the end of isolated region
453  *
454  * Walk @type and ensure that regions don't cross the boundaries defined by
455  * [@base,@base+@size).  Crossing regions are split at the boundaries,
456  * which may create at most two more regions.  The index of the first
457  * region inside the range is returned in *@start_rgn and end in *@end_rgn.
458  *
459  * RETURNS:
460  * 0 on success, -errno on failure.
461  */
memblock_isolate_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size,int * start_rgn,int * end_rgn)462 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
463 					phys_addr_t base, phys_addr_t size,
464 					int *start_rgn, int *end_rgn)
465 {
466 	phys_addr_t end = base + memblock_cap_size(base, &size);
467 	int i;
468 
469 	*start_rgn = *end_rgn = 0;
470 
471 	if (!size)
472 		return 0;
473 
474 	/* we'll create at most two more regions */
475 	while (type->cnt + 2 > type->max)
476 		if (memblock_double_array(type, base, size) < 0)
477 			return -ENOMEM;
478 
479 	for (i = 0; i < type->cnt; i++) {
480 		struct memblock_region *rgn = &type->regions[i];
481 		phys_addr_t rbase = rgn->base;
482 		phys_addr_t rend = rbase + rgn->size;
483 
484 		if (rbase >= end)
485 			break;
486 		if (rend <= base)
487 			continue;
488 
489 		if (rbase < base) {
490 			/*
491 			 * @rgn intersects from below.  Split and continue
492 			 * to process the next region - the new top half.
493 			 */
494 			rgn->base = base;
495 			rgn->size -= base - rbase;
496 			type->total_size -= base - rbase;
497 			memblock_insert_region(type, i, rbase, base - rbase,
498 					       memblock_get_region_node(rgn));
499 		} else if (rend > end) {
500 			/*
501 			 * @rgn intersects from above.  Split and redo the
502 			 * current region - the new bottom half.
503 			 */
504 			rgn->base = end;
505 			rgn->size -= end - rbase;
506 			type->total_size -= end - rbase;
507 			memblock_insert_region(type, i--, rbase, end - rbase,
508 					       memblock_get_region_node(rgn));
509 		} else {
510 			/* @rgn is fully contained, record it */
511 			if (!*end_rgn)
512 				*start_rgn = i;
513 			*end_rgn = i + 1;
514 		}
515 	}
516 
517 	return 0;
518 }
519 
__memblock_remove(struct memblock_type * type,phys_addr_t base,phys_addr_t size)520 static int __init_memblock __memblock_remove(struct memblock_type *type,
521 					     phys_addr_t base, phys_addr_t size)
522 {
523 	int start_rgn, end_rgn;
524 	int i, ret;
525 
526 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
527 	if (ret)
528 		return ret;
529 
530 	for (i = end_rgn - 1; i >= start_rgn; i--)
531 		memblock_remove_region(type, i);
532 	return 0;
533 }
534 
memblock_remove(phys_addr_t base,phys_addr_t size)535 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
536 {
537 	return __memblock_remove(&memblock.memory, base, size);
538 }
539 
memblock_free(phys_addr_t base,phys_addr_t size)540 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
541 {
542 	memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
543 		     (unsigned long long)base,
544 		     (unsigned long long)base + size,
545 		     (void *)_RET_IP_);
546 
547 	return __memblock_remove(&memblock.reserved, base, size);
548 }
549 
memblock_reserve(phys_addr_t base,phys_addr_t size)550 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
551 {
552 	struct memblock_type *_rgn = &memblock.reserved;
553 
554 	memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
555 		     (unsigned long long)base,
556 		     (unsigned long long)base + size,
557 		     (void *)_RET_IP_);
558 
559 	return memblock_add_region(_rgn, base, size, MAX_NUMNODES);
560 }
561 
562 /**
563  * __next_free_mem_range - next function for for_each_free_mem_range()
564  * @idx: pointer to u64 loop variable
565  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
566  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
567  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
568  * @p_nid: ptr to int for nid of the range, can be %NULL
569  *
570  * Find the first free area from *@idx which matches @nid, fill the out
571  * parameters, and update *@idx for the next iteration.  The lower 32bit of
572  * *@idx contains index into memory region and the upper 32bit indexes the
573  * areas before each reserved region.  For example, if reserved regions
574  * look like the following,
575  *
576  *	0:[0-16), 1:[32-48), 2:[128-130)
577  *
578  * The upper 32bit indexes the following regions.
579  *
580  *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
581  *
582  * As both region arrays are sorted, the function advances the two indices
583  * in lockstep and returns each intersection.
584  */
__next_free_mem_range(u64 * idx,int nid,phys_addr_t * out_start,phys_addr_t * out_end,int * out_nid)585 void __init_memblock __next_free_mem_range(u64 *idx, int nid,
586 					   phys_addr_t *out_start,
587 					   phys_addr_t *out_end, int *out_nid)
588 {
589 	struct memblock_type *mem = &memblock.memory;
590 	struct memblock_type *rsv = &memblock.reserved;
591 	int mi = *idx & 0xffffffff;
592 	int ri = *idx >> 32;
593 
594 	for ( ; mi < mem->cnt; mi++) {
595 		struct memblock_region *m = &mem->regions[mi];
596 		phys_addr_t m_start = m->base;
597 		phys_addr_t m_end = m->base + m->size;
598 
599 		/* only memory regions are associated with nodes, check it */
600 		if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
601 			continue;
602 
603 		/* scan areas before each reservation for intersection */
604 		for ( ; ri < rsv->cnt + 1; ri++) {
605 			struct memblock_region *r = &rsv->regions[ri];
606 			phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
607 			phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
608 
609 			/* if ri advanced past mi, break out to advance mi */
610 			if (r_start >= m_end)
611 				break;
612 			/* if the two regions intersect, we're done */
613 			if (m_start < r_end) {
614 				if (out_start)
615 					*out_start = max(m_start, r_start);
616 				if (out_end)
617 					*out_end = min(m_end, r_end);
618 				if (out_nid)
619 					*out_nid = memblock_get_region_node(m);
620 				/*
621 				 * The region which ends first is advanced
622 				 * for the next iteration.
623 				 */
624 				if (m_end <= r_end)
625 					mi++;
626 				else
627 					ri++;
628 				*idx = (u32)mi | (u64)ri << 32;
629 				return;
630 			}
631 		}
632 	}
633 
634 	/* signal end of iteration */
635 	*idx = ULLONG_MAX;
636 }
637 
638 /**
639  * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
640  * @idx: pointer to u64 loop variable
641  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
642  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
643  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
644  * @p_nid: ptr to int for nid of the range, can be %NULL
645  *
646  * Reverse of __next_free_mem_range().
647  */
__next_free_mem_range_rev(u64 * idx,int nid,phys_addr_t * out_start,phys_addr_t * out_end,int * out_nid)648 void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
649 					   phys_addr_t *out_start,
650 					   phys_addr_t *out_end, int *out_nid)
651 {
652 	struct memblock_type *mem = &memblock.memory;
653 	struct memblock_type *rsv = &memblock.reserved;
654 	int mi = *idx & 0xffffffff;
655 	int ri = *idx >> 32;
656 
657 	if (*idx == (u64)ULLONG_MAX) {
658 		mi = mem->cnt - 1;
659 		ri = rsv->cnt;
660 	}
661 
662 	for ( ; mi >= 0; mi--) {
663 		struct memblock_region *m = &mem->regions[mi];
664 		phys_addr_t m_start = m->base;
665 		phys_addr_t m_end = m->base + m->size;
666 
667 		/* only memory regions are associated with nodes, check it */
668 		if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
669 			continue;
670 
671 		/* scan areas before each reservation for intersection */
672 		for ( ; ri >= 0; ri--) {
673 			struct memblock_region *r = &rsv->regions[ri];
674 			phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
675 			phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
676 
677 			/* if ri advanced past mi, break out to advance mi */
678 			if (r_end <= m_start)
679 				break;
680 			/* if the two regions intersect, we're done */
681 			if (m_end > r_start) {
682 				if (out_start)
683 					*out_start = max(m_start, r_start);
684 				if (out_end)
685 					*out_end = min(m_end, r_end);
686 				if (out_nid)
687 					*out_nid = memblock_get_region_node(m);
688 
689 				if (m_start >= r_start)
690 					mi--;
691 				else
692 					ri--;
693 				*idx = (u32)mi | (u64)ri << 32;
694 				return;
695 			}
696 		}
697 	}
698 
699 	*idx = ULLONG_MAX;
700 }
701 
702 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
703 /*
704  * Common iterator interface used to define for_each_mem_range().
705  */
__next_mem_pfn_range(int * idx,int nid,unsigned long * out_start_pfn,unsigned long * out_end_pfn,int * out_nid)706 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
707 				unsigned long *out_start_pfn,
708 				unsigned long *out_end_pfn, int *out_nid)
709 {
710 	struct memblock_type *type = &memblock.memory;
711 	struct memblock_region *r;
712 
713 	while (++*idx < type->cnt) {
714 		r = &type->regions[*idx];
715 
716 		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
717 			continue;
718 		if (nid == MAX_NUMNODES || nid == r->nid)
719 			break;
720 	}
721 	if (*idx >= type->cnt) {
722 		*idx = -1;
723 		return;
724 	}
725 
726 	if (out_start_pfn)
727 		*out_start_pfn = PFN_UP(r->base);
728 	if (out_end_pfn)
729 		*out_end_pfn = PFN_DOWN(r->base + r->size);
730 	if (out_nid)
731 		*out_nid = r->nid;
732 }
733 
734 /**
735  * memblock_set_node - set node ID on memblock regions
736  * @base: base of area to set node ID for
737  * @size: size of area to set node ID for
738  * @nid: node ID to set
739  *
740  * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
741  * Regions which cross the area boundaries are split as necessary.
742  *
743  * RETURNS:
744  * 0 on success, -errno on failure.
745  */
memblock_set_node(phys_addr_t base,phys_addr_t size,int nid)746 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
747 				      int nid)
748 {
749 	struct memblock_type *type = &memblock.memory;
750 	int start_rgn, end_rgn;
751 	int i, ret;
752 
753 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
754 	if (ret)
755 		return ret;
756 
757 	for (i = start_rgn; i < end_rgn; i++)
758 		type->regions[i].nid = nid;
759 
760 	memblock_merge_regions(type);
761 	return 0;
762 }
763 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
764 
memblock_alloc_base_nid(phys_addr_t size,phys_addr_t align,phys_addr_t max_addr,int nid)765 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
766 					phys_addr_t align, phys_addr_t max_addr,
767 					int nid)
768 {
769 	phys_addr_t found;
770 
771 	/* align @size to avoid excessive fragmentation on reserved array */
772 	size = round_up(size, align);
773 
774 	found = memblock_find_in_range_node(0, max_addr, size, align, nid);
775 	if (found && !memblock_reserve(found, size))
776 		return found;
777 
778 	return 0;
779 }
780 
memblock_alloc_nid(phys_addr_t size,phys_addr_t align,int nid)781 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
782 {
783 	return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
784 }
785 
__memblock_alloc_base(phys_addr_t size,phys_addr_t align,phys_addr_t max_addr)786 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
787 {
788 	return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES);
789 }
790 
memblock_alloc_base(phys_addr_t size,phys_addr_t align,phys_addr_t max_addr)791 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
792 {
793 	phys_addr_t alloc;
794 
795 	alloc = __memblock_alloc_base(size, align, max_addr);
796 
797 	if (alloc == 0)
798 		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
799 		      (unsigned long long) size, (unsigned long long) max_addr);
800 
801 	return alloc;
802 }
803 
memblock_alloc(phys_addr_t size,phys_addr_t align)804 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
805 {
806 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
807 }
808 
memblock_alloc_try_nid(phys_addr_t size,phys_addr_t align,int nid)809 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
810 {
811 	phys_addr_t res = memblock_alloc_nid(size, align, nid);
812 
813 	if (res)
814 		return res;
815 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
816 }
817 
818 
819 /*
820  * Remaining API functions
821  */
822 
memblock_phys_mem_size(void)823 phys_addr_t __init memblock_phys_mem_size(void)
824 {
825 	return memblock.memory.total_size;
826 }
827 
828 /* lowest address */
memblock_start_of_DRAM(void)829 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
830 {
831 	return memblock.memory.regions[0].base;
832 }
833 
memblock_end_of_DRAM(void)834 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
835 {
836 	int idx = memblock.memory.cnt - 1;
837 
838 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
839 }
840 
memblock_enforce_memory_limit(phys_addr_t limit)841 void __init memblock_enforce_memory_limit(phys_addr_t limit)
842 {
843 	unsigned long i;
844 	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
845 
846 	if (!limit)
847 		return;
848 
849 	/* find out max address */
850 	for (i = 0; i < memblock.memory.cnt; i++) {
851 		struct memblock_region *r = &memblock.memory.regions[i];
852 
853 		if (limit <= r->size) {
854 			max_addr = r->base + limit;
855 			break;
856 		}
857 		limit -= r->size;
858 	}
859 
860 	/* truncate both memory and reserved regions */
861 	__memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX);
862 	__memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX);
863 }
864 
memblock_search(struct memblock_type * type,phys_addr_t addr)865 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
866 {
867 	unsigned int left = 0, right = type->cnt;
868 
869 	do {
870 		unsigned int mid = (right + left) / 2;
871 
872 		if (addr < type->regions[mid].base)
873 			right = mid;
874 		else if (addr >= (type->regions[mid].base +
875 				  type->regions[mid].size))
876 			left = mid + 1;
877 		else
878 			return mid;
879 	} while (left < right);
880 	return -1;
881 }
882 
memblock_is_reserved(phys_addr_t addr)883 int __init memblock_is_reserved(phys_addr_t addr)
884 {
885 	return memblock_search(&memblock.reserved, addr) != -1;
886 }
887 
memblock_is_memory(phys_addr_t addr)888 int __init_memblock memblock_is_memory(phys_addr_t addr)
889 {
890 	return memblock_search(&memblock.memory, addr) != -1;
891 }
892 
memblock_is_region_memory(phys_addr_t base,phys_addr_t size)893 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
894 {
895 	int idx = memblock_search(&memblock.memory, base);
896 	phys_addr_t end = base + memblock_cap_size(base, &size);
897 
898 	if (idx == -1)
899 		return 0;
900 	return memblock.memory.regions[idx].base <= base &&
901 		(memblock.memory.regions[idx].base +
902 		 memblock.memory.regions[idx].size) >= end;
903 }
904 
memblock_is_region_reserved(phys_addr_t base,phys_addr_t size)905 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
906 {
907 	memblock_cap_size(base, &size);
908 	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
909 }
910 
memblock_trim_memory(phys_addr_t align)911 void __init_memblock memblock_trim_memory(phys_addr_t align)
912 {
913 	int i;
914 	phys_addr_t start, end, orig_start, orig_end;
915 	struct memblock_type *mem = &memblock.memory;
916 
917 	for (i = 0; i < mem->cnt; i++) {
918 		orig_start = mem->regions[i].base;
919 		orig_end = mem->regions[i].base + mem->regions[i].size;
920 		start = round_up(orig_start, align);
921 		end = round_down(orig_end, align);
922 
923 		if (start == orig_start && end == orig_end)
924 			continue;
925 
926 		if (start < end) {
927 			mem->regions[i].base = start;
928 			mem->regions[i].size = end - start;
929 		} else {
930 			memblock_remove_region(mem, i);
931 			i--;
932 		}
933 	}
934 }
935 
memblock_set_current_limit(phys_addr_t limit)936 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
937 {
938 	memblock.current_limit = limit;
939 }
940 
memblock_dump(struct memblock_type * type,char * name)941 static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
942 {
943 	unsigned long long base, size;
944 	int i;
945 
946 	pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
947 
948 	for (i = 0; i < type->cnt; i++) {
949 		struct memblock_region *rgn = &type->regions[i];
950 		char nid_buf[32] = "";
951 
952 		base = rgn->base;
953 		size = rgn->size;
954 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
955 		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
956 			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
957 				 memblock_get_region_node(rgn));
958 #endif
959 		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
960 			name, i, base, base + size - 1, size, nid_buf);
961 	}
962 }
963 
__memblock_dump_all(void)964 void __init_memblock __memblock_dump_all(void)
965 {
966 	pr_info("MEMBLOCK configuration:\n");
967 	pr_info(" memory size = %#llx reserved size = %#llx\n",
968 		(unsigned long long)memblock.memory.total_size,
969 		(unsigned long long)memblock.reserved.total_size);
970 
971 	memblock_dump(&memblock.memory, "memory");
972 	memblock_dump(&memblock.reserved, "reserved");
973 }
974 
memblock_allow_resize(void)975 void __init memblock_allow_resize(void)
976 {
977 	memblock_can_resize = 1;
978 }
979 
early_memblock(char * p)980 static int __init early_memblock(char *p)
981 {
982 	if (p && strstr(p, "debug"))
983 		memblock_debug = 1;
984 	return 0;
985 }
986 early_param("memblock", early_memblock);
987 
988 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
989 
memblock_debug_show(struct seq_file * m,void * private)990 static int memblock_debug_show(struct seq_file *m, void *private)
991 {
992 	struct memblock_type *type = m->private;
993 	struct memblock_region *reg;
994 	int i;
995 
996 	for (i = 0; i < type->cnt; i++) {
997 		reg = &type->regions[i];
998 		seq_printf(m, "%4d: ", i);
999 		if (sizeof(phys_addr_t) == 4)
1000 			seq_printf(m, "0x%08lx..0x%08lx\n",
1001 				   (unsigned long)reg->base,
1002 				   (unsigned long)(reg->base + reg->size - 1));
1003 		else
1004 			seq_printf(m, "0x%016llx..0x%016llx\n",
1005 				   (unsigned long long)reg->base,
1006 				   (unsigned long long)(reg->base + reg->size - 1));
1007 
1008 	}
1009 	return 0;
1010 }
1011 
memblock_debug_open(struct inode * inode,struct file * file)1012 static int memblock_debug_open(struct inode *inode, struct file *file)
1013 {
1014 	return single_open(file, memblock_debug_show, inode->i_private);
1015 }
1016 
1017 static const struct file_operations memblock_debug_fops = {
1018 	.open = memblock_debug_open,
1019 	.read = seq_read,
1020 	.llseek = seq_lseek,
1021 	.release = single_release,
1022 };
1023 
memblock_init_debugfs(void)1024 static int __init memblock_init_debugfs(void)
1025 {
1026 	struct dentry *root = debugfs_create_dir("memblock", NULL);
1027 	if (!root)
1028 		return -ENXIO;
1029 	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1030 	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1031 
1032 	return 0;
1033 }
1034 __initcall(memblock_init_debugfs);
1035 
1036 #endif /* CONFIG_DEBUG_FS */
1037