1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_exact_nid_api.h"
3 #include "alloc_nid_api.h"
4
5 #define FUNC_NAME "memblock_alloc_exact_nid_raw"
6
7 /*
8 * contains the fraction of MEM_SIZE contained in each node in basis point
9 * units (one hundredth of 1% or 1/10000)
10 */
11 static const unsigned int node_fractions[] = {
12 2500, /* 1/4 */
13 625, /* 1/16 */
14 1250, /* 1/8 */
15 1250, /* 1/8 */
16 625, /* 1/16 */
17 625, /* 1/16 */
18 2500, /* 1/4 */
19 625, /* 1/16 */
20 };
21
22 /*
23 * A test that tries to allocate a memory region in a specific NUMA node that
24 * has enough memory to allocate a region of the requested size.
25 * Expect to allocate an aligned region at the end of the requested node.
26 */
alloc_exact_nid_top_down_numa_simple_check(void)27 static int alloc_exact_nid_top_down_numa_simple_check(void)
28 {
29 int nid_req = 3;
30 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
31 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
32 void *allocated_ptr = NULL;
33 phys_addr_t size;
34 phys_addr_t min_addr;
35 phys_addr_t max_addr;
36
37 PREFIX_PUSH();
38 setup_numa_memblock(node_fractions);
39
40 ASSERT_LE(SZ_4, req_node->size);
41 size = req_node->size / SZ_4;
42 min_addr = memblock_start_of_DRAM();
43 max_addr = memblock_end_of_DRAM();
44
45 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
46 min_addr, max_addr,
47 nid_req);
48
49 ASSERT_NE(allocated_ptr, NULL);
50 ASSERT_MEM_NE(allocated_ptr, 0, size);
51
52 ASSERT_EQ(new_rgn->size, size);
53 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
54 ASSERT_LE(req_node->base, new_rgn->base);
55
56 ASSERT_EQ(memblock.reserved.cnt, 1);
57 ASSERT_EQ(memblock.reserved.total_size, size);
58
59 test_pass_pop();
60
61 return 0;
62 }
63
64 /*
65 * A test that tries to allocate a memory region in a specific NUMA node that
66 * is partially reserved but has enough memory for the allocated region:
67 *
68 * | +---------------------------------------+ |
69 * | | requested | |
70 * +-----------+---------------------------------------+----------+
71 *
72 * | +------------------+ +-----+ |
73 * | | reserved | | new | |
74 * +-----------+------------------+--------------+-----+----------+
75 *
76 * Expect to allocate an aligned region at the end of the requested node. The
77 * region count and total size get updated.
78 */
alloc_exact_nid_top_down_numa_part_reserved_check(void)79 static int alloc_exact_nid_top_down_numa_part_reserved_check(void)
80 {
81 int nid_req = 4;
82 struct memblock_region *new_rgn = &memblock.reserved.regions[1];
83 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
84 void *allocated_ptr = NULL;
85 struct region r1;
86 phys_addr_t size;
87 phys_addr_t min_addr;
88 phys_addr_t max_addr;
89
90 PREFIX_PUSH();
91 setup_numa_memblock(node_fractions);
92
93 ASSERT_LE(SZ_8, req_node->size);
94 r1.base = req_node->base;
95 r1.size = req_node->size / SZ_2;
96 size = r1.size / SZ_4;
97 min_addr = memblock_start_of_DRAM();
98 max_addr = memblock_end_of_DRAM();
99
100 memblock_reserve(r1.base, r1.size);
101 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
102 min_addr, max_addr,
103 nid_req);
104
105 ASSERT_NE(allocated_ptr, NULL);
106 ASSERT_MEM_NE(allocated_ptr, 0, size);
107
108 ASSERT_EQ(new_rgn->size, size);
109 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
110 ASSERT_LE(req_node->base, new_rgn->base);
111
112 ASSERT_EQ(memblock.reserved.cnt, 2);
113 ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
114
115 test_pass_pop();
116
117 return 0;
118 }
119
120 /*
121 * A test that tries to allocate a memory region that spans over the min_addr
122 * and max_addr range and overlaps with two different nodes, where the first
123 * node is the requested node:
124 *
125 * min_addr
126 * | max_addr
127 * | |
128 * v v
129 * | +-----------------------+-----------+ |
130 * | | requested | node3 | |
131 * +-----------+-----------------------+-----------+--------------+
132 * + +
133 * | +-----------+ |
134 * | | rgn | |
135 * +-----------------------+-----------+--------------------------+
136 *
137 * Expect to drop the lower limit and allocate a memory region that ends at
138 * the end of the requested node.
139 */
alloc_exact_nid_top_down_numa_split_range_low_check(void)140 static int alloc_exact_nid_top_down_numa_split_range_low_check(void)
141 {
142 int nid_req = 2;
143 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
144 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
145 void *allocated_ptr = NULL;
146 phys_addr_t size = SZ_512;
147 phys_addr_t min_addr;
148 phys_addr_t max_addr;
149 phys_addr_t req_node_end;
150
151 PREFIX_PUSH();
152 setup_numa_memblock(node_fractions);
153
154 req_node_end = region_end(req_node);
155 min_addr = req_node_end - SZ_256;
156 max_addr = min_addr + size;
157
158 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
159 min_addr, max_addr,
160 nid_req);
161
162 ASSERT_NE(allocated_ptr, NULL);
163 ASSERT_MEM_NE(allocated_ptr, 0, size);
164
165 ASSERT_EQ(new_rgn->size, size);
166 ASSERT_EQ(new_rgn->base, req_node_end - size);
167 ASSERT_LE(req_node->base, new_rgn->base);
168
169 ASSERT_EQ(memblock.reserved.cnt, 1);
170 ASSERT_EQ(memblock.reserved.total_size, size);
171
172 test_pass_pop();
173
174 return 0;
175 }
176
177 /*
178 * A test that tries to allocate a memory region that spans over the min_addr
179 * and max_addr range and overlaps with two different nodes, where the requested
180 * node ends before min_addr:
181 *
182 * min_addr
183 * | max_addr
184 * | |
185 * v v
186 * | +---------------+ +-------------+---------+ |
187 * | | requested | | node1 | node2 | |
188 * +----+---------------+--------+-------------+---------+----------+
189 * + +
190 * | +---------+ |
191 * | | rgn | |
192 * +----------+---------+-------------------------------------------+
193 *
194 * Expect to drop the lower limit and allocate a memory region that ends at
195 * the end of the requested node.
196 */
alloc_exact_nid_top_down_numa_no_overlap_split_check(void)197 static int alloc_exact_nid_top_down_numa_no_overlap_split_check(void)
198 {
199 int nid_req = 2;
200 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
201 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
202 struct memblock_region *node2 = &memblock.memory.regions[6];
203 void *allocated_ptr = NULL;
204 phys_addr_t size;
205 phys_addr_t min_addr;
206 phys_addr_t max_addr;
207
208 PREFIX_PUSH();
209 setup_numa_memblock(node_fractions);
210
211 size = SZ_512;
212 min_addr = node2->base - SZ_256;
213 max_addr = min_addr + size;
214
215 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
216 min_addr, max_addr,
217 nid_req);
218
219 ASSERT_NE(allocated_ptr, NULL);
220 ASSERT_MEM_NE(allocated_ptr, 0, size);
221
222 ASSERT_EQ(new_rgn->size, size);
223 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
224 ASSERT_LE(req_node->base, new_rgn->base);
225
226 ASSERT_EQ(memblock.reserved.cnt, 1);
227 ASSERT_EQ(memblock.reserved.total_size, size);
228
229 test_pass_pop();
230
231 return 0;
232 }
233
234 /*
235 * A test that tries to allocate memory within min_addr and max_add range when
236 * the requested node and the range do not overlap, and requested node ends
237 * before min_addr. The range overlaps with multiple nodes along node
238 * boundaries:
239 *
240 * min_addr
241 * | max_addr
242 * | |
243 * v v
244 * |-----------+ +----------+----...----+----------+ |
245 * | requested | | min node | ... | max node | |
246 * +-----------+-----------+----------+----...----+----------+------+
247 * + +
248 * | +-----+ |
249 * | | rgn | |
250 * +-----+-----+----------------------------------------------------+
251 *
252 * Expect to drop the lower limit and allocate a memory region that ends at
253 * the end of the requested node.
254 */
alloc_exact_nid_top_down_numa_no_overlap_low_check(void)255 static int alloc_exact_nid_top_down_numa_no_overlap_low_check(void)
256 {
257 int nid_req = 0;
258 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
259 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
260 struct memblock_region *min_node = &memblock.memory.regions[2];
261 struct memblock_region *max_node = &memblock.memory.regions[5];
262 void *allocated_ptr = NULL;
263 phys_addr_t size = SZ_64;
264 phys_addr_t max_addr;
265 phys_addr_t min_addr;
266
267 PREFIX_PUSH();
268 setup_numa_memblock(node_fractions);
269
270 min_addr = min_node->base;
271 max_addr = region_end(max_node);
272
273 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
274 min_addr, max_addr,
275 nid_req);
276
277 ASSERT_NE(allocated_ptr, NULL);
278 ASSERT_MEM_NE(allocated_ptr, 0, size);
279
280 ASSERT_EQ(new_rgn->size, size);
281 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
282
283 ASSERT_EQ(memblock.reserved.cnt, 1);
284 ASSERT_EQ(memblock.reserved.total_size, size);
285
286 test_pass_pop();
287
288 return 0;
289 }
290
291 /*
292 * A test that tries to allocate a memory region in a specific NUMA node that
293 * has enough memory to allocate a region of the requested size.
294 * Expect to allocate an aligned region at the beginning of the requested node.
295 */
alloc_exact_nid_bottom_up_numa_simple_check(void)296 static int alloc_exact_nid_bottom_up_numa_simple_check(void)
297 {
298 int nid_req = 3;
299 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
300 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
301 void *allocated_ptr = NULL;
302 phys_addr_t size;
303 phys_addr_t min_addr;
304 phys_addr_t max_addr;
305
306 PREFIX_PUSH();
307 setup_numa_memblock(node_fractions);
308
309 ASSERT_LE(SZ_4, req_node->size);
310 size = req_node->size / SZ_4;
311 min_addr = memblock_start_of_DRAM();
312 max_addr = memblock_end_of_DRAM();
313
314 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
315 min_addr, max_addr,
316 nid_req);
317
318 ASSERT_NE(allocated_ptr, NULL);
319 ASSERT_MEM_NE(allocated_ptr, 0, size);
320
321 ASSERT_EQ(new_rgn->size, size);
322 ASSERT_EQ(new_rgn->base, req_node->base);
323 ASSERT_LE(region_end(new_rgn), region_end(req_node));
324
325 ASSERT_EQ(memblock.reserved.cnt, 1);
326 ASSERT_EQ(memblock.reserved.total_size, size);
327
328 test_pass_pop();
329
330 return 0;
331 }
332
333 /*
334 * A test that tries to allocate a memory region in a specific NUMA node that
335 * is partially reserved but has enough memory for the allocated region:
336 *
337 * | +---------------------------------------+ |
338 * | | requested | |
339 * +-----------+---------------------------------------+---------+
340 *
341 * | +------------------+-----+ |
342 * | | reserved | new | |
343 * +-----------+------------------+-----+------------------------+
344 *
345 * Expect to allocate an aligned region in the requested node that merges with
346 * the existing reserved region. The total size gets updated.
347 */
alloc_exact_nid_bottom_up_numa_part_reserved_check(void)348 static int alloc_exact_nid_bottom_up_numa_part_reserved_check(void)
349 {
350 int nid_req = 4;
351 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
352 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
353 void *allocated_ptr = NULL;
354 struct region r1;
355 phys_addr_t size;
356 phys_addr_t min_addr;
357 phys_addr_t max_addr;
358 phys_addr_t total_size;
359
360 PREFIX_PUSH();
361 setup_numa_memblock(node_fractions);
362
363 ASSERT_LE(SZ_8, req_node->size);
364 r1.base = req_node->base;
365 r1.size = req_node->size / SZ_2;
366 size = r1.size / SZ_4;
367 min_addr = memblock_start_of_DRAM();
368 max_addr = memblock_end_of_DRAM();
369 total_size = size + r1.size;
370
371 memblock_reserve(r1.base, r1.size);
372 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
373 min_addr, max_addr,
374 nid_req);
375
376 ASSERT_NE(allocated_ptr, NULL);
377 ASSERT_MEM_NE(allocated_ptr, 0, size);
378
379 ASSERT_EQ(new_rgn->size, total_size);
380 ASSERT_EQ(new_rgn->base, req_node->base);
381 ASSERT_LE(region_end(new_rgn), region_end(req_node));
382
383 ASSERT_EQ(memblock.reserved.cnt, 1);
384 ASSERT_EQ(memblock.reserved.total_size, total_size);
385
386 test_pass_pop();
387
388 return 0;
389 }
390
391 /*
392 * A test that tries to allocate a memory region that spans over the min_addr
393 * and max_addr range and overlaps with two different nodes, where the first
394 * node is the requested node:
395 *
396 * min_addr
397 * | max_addr
398 * | |
399 * v v
400 * | +-----------------------+-----------+ |
401 * | | requested | node3 | |
402 * +-----------+-----------------------+-----------+--------------+
403 * + +
404 * | +-----------+ |
405 * | | rgn | |
406 * +-----------+-----------+--------------------------------------+
407 *
408 * Expect to drop the lower limit and allocate a memory region at the beginning
409 * of the requested node.
410 */
alloc_exact_nid_bottom_up_numa_split_range_low_check(void)411 static int alloc_exact_nid_bottom_up_numa_split_range_low_check(void)
412 {
413 int nid_req = 2;
414 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
415 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
416 void *allocated_ptr = NULL;
417 phys_addr_t size = SZ_512;
418 phys_addr_t min_addr;
419 phys_addr_t max_addr;
420 phys_addr_t req_node_end;
421
422 PREFIX_PUSH();
423 setup_numa_memblock(node_fractions);
424
425 req_node_end = region_end(req_node);
426 min_addr = req_node_end - SZ_256;
427 max_addr = min_addr + size;
428
429 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
430 min_addr, max_addr,
431 nid_req);
432
433 ASSERT_NE(allocated_ptr, NULL);
434 ASSERT_MEM_NE(allocated_ptr, 0, size);
435
436 ASSERT_EQ(new_rgn->size, size);
437 ASSERT_EQ(new_rgn->base, req_node->base);
438 ASSERT_LE(region_end(new_rgn), req_node_end);
439
440 ASSERT_EQ(memblock.reserved.cnt, 1);
441 ASSERT_EQ(memblock.reserved.total_size, size);
442
443 test_pass_pop();
444
445 return 0;
446 }
447
448 /*
449 * A test that tries to allocate a memory region that spans over the min_addr
450 * and max_addr range and overlaps with two different nodes, where the requested
451 * node ends before min_addr:
452 *
453 * min_addr
454 * | max_addr
455 * | |
456 * v v
457 * | +---------------+ +-------------+---------+ |
458 * | | requested | | node1 | node2 | |
459 * +----+---------------+--------+-------------+---------+---------+
460 * + +
461 * | +---------+ |
462 * | | rgn | |
463 * +----+---------+------------------------------------------------+
464 *
465 * Expect to drop the lower limit and allocate a memory region that starts at
466 * the beginning of the requested node.
467 */
alloc_exact_nid_bottom_up_numa_no_overlap_split_check(void)468 static int alloc_exact_nid_bottom_up_numa_no_overlap_split_check(void)
469 {
470 int nid_req = 2;
471 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
472 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
473 struct memblock_region *node2 = &memblock.memory.regions[6];
474 void *allocated_ptr = NULL;
475 phys_addr_t size;
476 phys_addr_t min_addr;
477 phys_addr_t max_addr;
478
479 PREFIX_PUSH();
480 setup_numa_memblock(node_fractions);
481
482 size = SZ_512;
483 min_addr = node2->base - SZ_256;
484 max_addr = min_addr + size;
485
486 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
487 min_addr, max_addr,
488 nid_req);
489
490 ASSERT_NE(allocated_ptr, NULL);
491 ASSERT_MEM_NE(allocated_ptr, 0, size);
492
493 ASSERT_EQ(new_rgn->size, size);
494 ASSERT_EQ(new_rgn->base, req_node->base);
495 ASSERT_LE(region_end(new_rgn), region_end(req_node));
496
497 ASSERT_EQ(memblock.reserved.cnt, 1);
498 ASSERT_EQ(memblock.reserved.total_size, size);
499
500 test_pass_pop();
501
502 return 0;
503 }
504
505 /*
506 * A test that tries to allocate memory within min_addr and max_add range when
507 * the requested node and the range do not overlap, and requested node ends
508 * before min_addr. The range overlaps with multiple nodes along node
509 * boundaries:
510 *
511 * min_addr
512 * | max_addr
513 * | |
514 * v v
515 * |-----------+ +----------+----...----+----------+ |
516 * | requested | | min node | ... | max node | |
517 * +-----------+-----------+----------+----...----+----------+------+
518 * + +
519 * |-----+ |
520 * | rgn | |
521 * +-----+----------------------------------------------------------+
522 *
523 * Expect to drop the lower limit and allocate a memory region that starts at
524 * the beginning of the requested node.
525 */
alloc_exact_nid_bottom_up_numa_no_overlap_low_check(void)526 static int alloc_exact_nid_bottom_up_numa_no_overlap_low_check(void)
527 {
528 int nid_req = 0;
529 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
530 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
531 struct memblock_region *min_node = &memblock.memory.regions[2];
532 struct memblock_region *max_node = &memblock.memory.regions[5];
533 void *allocated_ptr = NULL;
534 phys_addr_t size = SZ_64;
535 phys_addr_t max_addr;
536 phys_addr_t min_addr;
537
538 PREFIX_PUSH();
539 setup_numa_memblock(node_fractions);
540
541 min_addr = min_node->base;
542 max_addr = region_end(max_node);
543
544 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
545 min_addr, max_addr,
546 nid_req);
547
548 ASSERT_NE(allocated_ptr, NULL);
549 ASSERT_MEM_NE(allocated_ptr, 0, size);
550
551 ASSERT_EQ(new_rgn->size, size);
552 ASSERT_EQ(new_rgn->base, req_node->base);
553 ASSERT_LE(region_end(new_rgn), region_end(req_node));
554
555 ASSERT_EQ(memblock.reserved.cnt, 1);
556 ASSERT_EQ(memblock.reserved.total_size, size);
557
558 test_pass_pop();
559
560 return 0;
561 }
562
563 /*
564 * A test that tries to allocate a memory region in a specific NUMA node that
565 * does not have enough memory to allocate a region of the requested size:
566 *
567 * | +-----+ |
568 * | | req | |
569 * +---+-----+----------------------------+
570 *
571 * +---------+
572 * | rgn |
573 * +---------+
574 *
575 * Expect no allocation to happen.
576 */
alloc_exact_nid_numa_small_node_generic_check(void)577 static int alloc_exact_nid_numa_small_node_generic_check(void)
578 {
579 int nid_req = 1;
580 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
581 void *allocated_ptr = NULL;
582 phys_addr_t size;
583 phys_addr_t min_addr;
584 phys_addr_t max_addr;
585
586 PREFIX_PUSH();
587 setup_numa_memblock(node_fractions);
588
589 size = SZ_2 * req_node->size;
590 min_addr = memblock_start_of_DRAM();
591 max_addr = memblock_end_of_DRAM();
592
593 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
594 min_addr, max_addr,
595 nid_req);
596
597 ASSERT_EQ(allocated_ptr, NULL);
598
599 test_pass_pop();
600
601 return 0;
602 }
603
604 /*
605 * A test that tries to allocate a memory region in a specific NUMA node that
606 * is fully reserved:
607 *
608 * | +---------+ |
609 * | |requested| |
610 * +--------------+---------+-------------+
611 *
612 * | +---------+ |
613 * | | reserved| |
614 * +--------------+---------+-------------+
615 *
616 * Expect no allocation to happen.
617 */
alloc_exact_nid_numa_node_reserved_generic_check(void)618 static int alloc_exact_nid_numa_node_reserved_generic_check(void)
619 {
620 int nid_req = 2;
621 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
622 void *allocated_ptr = NULL;
623 phys_addr_t size;
624 phys_addr_t min_addr;
625 phys_addr_t max_addr;
626
627 PREFIX_PUSH();
628 setup_numa_memblock(node_fractions);
629
630 size = req_node->size;
631 min_addr = memblock_start_of_DRAM();
632 max_addr = memblock_end_of_DRAM();
633
634 memblock_reserve(req_node->base, req_node->size);
635 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
636 min_addr, max_addr,
637 nid_req);
638
639 ASSERT_EQ(allocated_ptr, NULL);
640
641 test_pass_pop();
642
643 return 0;
644 }
645
646 /*
647 * A test that tries to allocate a memory region in a specific NUMA node that
648 * is partially reserved and does not have enough contiguous memory for the
649 * allocated region:
650 *
651 * | +-----------------------+ |
652 * | | requested | |
653 * +-----------+-----------------------+----+
654 *
655 * | +----------+ |
656 * | | reserved | |
657 * +-----------------+----------+-----------+
658 *
659 * Expect no allocation to happen.
660 */
alloc_exact_nid_numa_part_reserved_fail_generic_check(void)661 static int alloc_exact_nid_numa_part_reserved_fail_generic_check(void)
662 {
663 int nid_req = 4;
664 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
665 void *allocated_ptr = NULL;
666 struct region r1;
667 phys_addr_t size;
668 phys_addr_t min_addr;
669 phys_addr_t max_addr;
670
671 PREFIX_PUSH();
672 setup_numa_memblock(node_fractions);
673
674 ASSERT_LE(SZ_4, req_node->size);
675 size = req_node->size / SZ_2;
676 r1.base = req_node->base + (size / SZ_2);
677 r1.size = size;
678
679 min_addr = memblock_start_of_DRAM();
680 max_addr = memblock_end_of_DRAM();
681
682 memblock_reserve(r1.base, r1.size);
683 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
684 min_addr, max_addr,
685 nid_req);
686
687 ASSERT_EQ(allocated_ptr, NULL);
688
689 test_pass_pop();
690
691 return 0;
692 }
693
694 /*
695 * A test that tries to allocate a memory region that spans over the min_addr
696 * and max_addr range and overlaps with two different nodes, where the second
697 * node is the requested node:
698 *
699 * min_addr
700 * | max_addr
701 * | |
702 * v v
703 * | +--------------------------+---------+ |
704 * | | first node |requested| |
705 * +------+--------------------------+---------+----------------+
706 *
707 * Expect no allocation to happen.
708 */
alloc_exact_nid_numa_split_range_high_generic_check(void)709 static int alloc_exact_nid_numa_split_range_high_generic_check(void)
710 {
711 int nid_req = 3;
712 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
713 void *allocated_ptr = NULL;
714 phys_addr_t size = SZ_512;
715 phys_addr_t min_addr;
716 phys_addr_t max_addr;
717
718 PREFIX_PUSH();
719 setup_numa_memblock(node_fractions);
720
721 min_addr = req_node->base - SZ_256;
722 max_addr = min_addr + size;
723
724 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
725 min_addr, max_addr,
726 nid_req);
727
728 ASSERT_EQ(allocated_ptr, NULL);
729
730 test_pass_pop();
731
732 return 0;
733 }
734
735 /*
736 * A test that tries to allocate memory within min_addr and max_add range when
737 * the requested node and the range do not overlap, and requested node starts
738 * after max_addr. The range overlaps with multiple nodes along node
739 * boundaries:
740 *
741 * min_addr
742 * | max_addr
743 * | |
744 * v v
745 * | +----------+----...----+----------+ +-----------+ |
746 * | | min node | ... | max node | | requested | |
747 * +-----+----------+----...----+----------+--------+-----------+---+
748 *
749 * Expect no allocation to happen.
750 */
alloc_exact_nid_numa_no_overlap_high_generic_check(void)751 static int alloc_exact_nid_numa_no_overlap_high_generic_check(void)
752 {
753 int nid_req = 7;
754 struct memblock_region *min_node = &memblock.memory.regions[2];
755 struct memblock_region *max_node = &memblock.memory.regions[5];
756 void *allocated_ptr = NULL;
757 phys_addr_t size = SZ_64;
758 phys_addr_t max_addr;
759 phys_addr_t min_addr;
760
761 PREFIX_PUSH();
762 setup_numa_memblock(node_fractions);
763
764 min_addr = min_node->base;
765 max_addr = region_end(max_node);
766
767 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
768 min_addr, max_addr,
769 nid_req);
770
771 ASSERT_EQ(allocated_ptr, NULL);
772
773 test_pass_pop();
774
775 return 0;
776 }
777
778 /*
779 * A test that tries to allocate a memory region in a specific NUMA node that
780 * does not have enough memory to allocate a region of the requested size.
781 * Additionally, none of the nodes have enough memory to allocate the region:
782 *
783 * +-----------------------------------+
784 * | new |
785 * +-----------------------------------+
786 * |-------+-------+-------+-------+-------+-------+-------+-------|
787 * | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 |
788 * +-------+-------+-------+-------+-------+-------+-------+-------+
789 *
790 * Expect no allocation to happen.
791 */
alloc_exact_nid_numa_large_region_generic_check(void)792 static int alloc_exact_nid_numa_large_region_generic_check(void)
793 {
794 int nid_req = 3;
795 void *allocated_ptr = NULL;
796 phys_addr_t size = MEM_SIZE / SZ_2;
797 phys_addr_t min_addr;
798 phys_addr_t max_addr;
799
800 PREFIX_PUSH();
801 setup_numa_memblock(node_fractions);
802
803 min_addr = memblock_start_of_DRAM();
804 max_addr = memblock_end_of_DRAM();
805
806 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
807 min_addr, max_addr,
808 nid_req);
809 ASSERT_EQ(allocated_ptr, NULL);
810
811 test_pass_pop();
812
813 return 0;
814 }
815
816 /*
817 * A test that tries to allocate memory within min_addr and max_addr range when
818 * there are two reserved regions at the borders. The requested node starts at
819 * min_addr and ends at max_addr and is the same size as the region to be
820 * allocated:
821 *
822 * min_addr
823 * | max_addr
824 * | |
825 * v v
826 * | +-----------+-----------------------+-----------------------|
827 * | | node5 | requested | node7 |
828 * +------+-----------+-----------------------+-----------------------+
829 * + +
830 * | +----+-----------------------+----+ |
831 * | | r2 | new | r1 | |
832 * +-------------+----+-----------------------+----+------------------+
833 *
834 * Expect to merge all of the regions into one. The region counter and total
835 * size fields get updated.
836 */
alloc_exact_nid_numa_reserved_full_merge_generic_check(void)837 static int alloc_exact_nid_numa_reserved_full_merge_generic_check(void)
838 {
839 int nid_req = 6;
840 int nid_next = nid_req + 1;
841 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
842 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
843 struct memblock_region *next_node = &memblock.memory.regions[nid_next];
844 void *allocated_ptr = NULL;
845 struct region r1, r2;
846 phys_addr_t size = req_node->size;
847 phys_addr_t total_size;
848 phys_addr_t max_addr;
849 phys_addr_t min_addr;
850
851 PREFIX_PUSH();
852 setup_numa_memblock(node_fractions);
853
854 r1.base = next_node->base;
855 r1.size = SZ_128;
856
857 r2.size = SZ_128;
858 r2.base = r1.base - (size + r2.size);
859
860 total_size = r1.size + r2.size + size;
861 min_addr = r2.base + r2.size;
862 max_addr = r1.base;
863
864 memblock_reserve(r1.base, r1.size);
865 memblock_reserve(r2.base, r2.size);
866
867 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
868 min_addr, max_addr,
869 nid_req);
870
871 ASSERT_NE(allocated_ptr, NULL);
872 ASSERT_MEM_NE(allocated_ptr, 0, size);
873
874 ASSERT_EQ(new_rgn->size, total_size);
875 ASSERT_EQ(new_rgn->base, r2.base);
876
877 ASSERT_LE(new_rgn->base, req_node->base);
878 ASSERT_LE(region_end(req_node), region_end(new_rgn));
879
880 ASSERT_EQ(memblock.reserved.cnt, 1);
881 ASSERT_EQ(memblock.reserved.total_size, total_size);
882
883 test_pass_pop();
884
885 return 0;
886 }
887
888 /*
889 * A test that tries to allocate memory within min_addr and max_add range,
890 * where the total range can fit the region, but it is split between two nodes
891 * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE
892 * instead of requesting a specific node:
893 *
894 * +-----------+
895 * | new |
896 * +-----------+
897 * | +---------------------+-----------|
898 * | | prev node | next node |
899 * +------+---------------------+-----------+
900 * + +
901 * |----------------------+ +-----|
902 * | r1 | | r2 |
903 * +----------------------+-----------+-----+
904 * ^ ^
905 * | |
906 * | max_addr
907 * |
908 * min_addr
909 *
910 * Expect no allocation to happen.
911 */
alloc_exact_nid_numa_split_all_reserved_generic_check(void)912 static int alloc_exact_nid_numa_split_all_reserved_generic_check(void)
913 {
914 void *allocated_ptr = NULL;
915 struct memblock_region *next_node = &memblock.memory.regions[7];
916 struct region r1, r2;
917 phys_addr_t size = SZ_256;
918 phys_addr_t max_addr;
919 phys_addr_t min_addr;
920
921 PREFIX_PUSH();
922 setup_numa_memblock(node_fractions);
923
924 r2.base = next_node->base + SZ_128;
925 r2.size = memblock_end_of_DRAM() - r2.base;
926
927 r1.size = MEM_SIZE - (r2.size + size);
928 r1.base = memblock_start_of_DRAM();
929
930 min_addr = r1.base + r1.size;
931 max_addr = r2.base;
932
933 memblock_reserve(r1.base, r1.size);
934 memblock_reserve(r2.base, r2.size);
935
936 allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
937 min_addr, max_addr,
938 NUMA_NO_NODE);
939
940 ASSERT_EQ(allocated_ptr, NULL);
941
942 test_pass_pop();
943
944 return 0;
945 }
946
947 /* Test case wrappers for NUMA tests */
alloc_exact_nid_numa_simple_check(void)948 static int alloc_exact_nid_numa_simple_check(void)
949 {
950 test_print("\tRunning %s...\n", __func__);
951 memblock_set_bottom_up(false);
952 alloc_exact_nid_top_down_numa_simple_check();
953 memblock_set_bottom_up(true);
954 alloc_exact_nid_bottom_up_numa_simple_check();
955
956 return 0;
957 }
958
alloc_exact_nid_numa_part_reserved_check(void)959 static int alloc_exact_nid_numa_part_reserved_check(void)
960 {
961 test_print("\tRunning %s...\n", __func__);
962 memblock_set_bottom_up(false);
963 alloc_exact_nid_top_down_numa_part_reserved_check();
964 memblock_set_bottom_up(true);
965 alloc_exact_nid_bottom_up_numa_part_reserved_check();
966
967 return 0;
968 }
969
alloc_exact_nid_numa_split_range_low_check(void)970 static int alloc_exact_nid_numa_split_range_low_check(void)
971 {
972 test_print("\tRunning %s...\n", __func__);
973 memblock_set_bottom_up(false);
974 alloc_exact_nid_top_down_numa_split_range_low_check();
975 memblock_set_bottom_up(true);
976 alloc_exact_nid_bottom_up_numa_split_range_low_check();
977
978 return 0;
979 }
980
alloc_exact_nid_numa_no_overlap_split_check(void)981 static int alloc_exact_nid_numa_no_overlap_split_check(void)
982 {
983 test_print("\tRunning %s...\n", __func__);
984 memblock_set_bottom_up(false);
985 alloc_exact_nid_top_down_numa_no_overlap_split_check();
986 memblock_set_bottom_up(true);
987 alloc_exact_nid_bottom_up_numa_no_overlap_split_check();
988
989 return 0;
990 }
991
alloc_exact_nid_numa_no_overlap_low_check(void)992 static int alloc_exact_nid_numa_no_overlap_low_check(void)
993 {
994 test_print("\tRunning %s...\n", __func__);
995 memblock_set_bottom_up(false);
996 alloc_exact_nid_top_down_numa_no_overlap_low_check();
997 memblock_set_bottom_up(true);
998 alloc_exact_nid_bottom_up_numa_no_overlap_low_check();
999
1000 return 0;
1001 }
1002
alloc_exact_nid_numa_small_node_check(void)1003 static int alloc_exact_nid_numa_small_node_check(void)
1004 {
1005 test_print("\tRunning %s...\n", __func__);
1006 run_top_down(alloc_exact_nid_numa_small_node_generic_check);
1007 run_bottom_up(alloc_exact_nid_numa_small_node_generic_check);
1008
1009 return 0;
1010 }
1011
alloc_exact_nid_numa_node_reserved_check(void)1012 static int alloc_exact_nid_numa_node_reserved_check(void)
1013 {
1014 test_print("\tRunning %s...\n", __func__);
1015 run_top_down(alloc_exact_nid_numa_node_reserved_generic_check);
1016 run_bottom_up(alloc_exact_nid_numa_node_reserved_generic_check);
1017
1018 return 0;
1019 }
1020
alloc_exact_nid_numa_part_reserved_fail_check(void)1021 static int alloc_exact_nid_numa_part_reserved_fail_check(void)
1022 {
1023 test_print("\tRunning %s...\n", __func__);
1024 run_top_down(alloc_exact_nid_numa_part_reserved_fail_generic_check);
1025 run_bottom_up(alloc_exact_nid_numa_part_reserved_fail_generic_check);
1026
1027 return 0;
1028 }
1029
alloc_exact_nid_numa_split_range_high_check(void)1030 static int alloc_exact_nid_numa_split_range_high_check(void)
1031 {
1032 test_print("\tRunning %s...\n", __func__);
1033 run_top_down(alloc_exact_nid_numa_split_range_high_generic_check);
1034 run_bottom_up(alloc_exact_nid_numa_split_range_high_generic_check);
1035
1036 return 0;
1037 }
1038
alloc_exact_nid_numa_no_overlap_high_check(void)1039 static int alloc_exact_nid_numa_no_overlap_high_check(void)
1040 {
1041 test_print("\tRunning %s...\n", __func__);
1042 run_top_down(alloc_exact_nid_numa_no_overlap_high_generic_check);
1043 run_bottom_up(alloc_exact_nid_numa_no_overlap_high_generic_check);
1044
1045 return 0;
1046 }
1047
alloc_exact_nid_numa_large_region_check(void)1048 static int alloc_exact_nid_numa_large_region_check(void)
1049 {
1050 test_print("\tRunning %s...\n", __func__);
1051 run_top_down(alloc_exact_nid_numa_large_region_generic_check);
1052 run_bottom_up(alloc_exact_nid_numa_large_region_generic_check);
1053
1054 return 0;
1055 }
1056
alloc_exact_nid_numa_reserved_full_merge_check(void)1057 static int alloc_exact_nid_numa_reserved_full_merge_check(void)
1058 {
1059 test_print("\tRunning %s...\n", __func__);
1060 run_top_down(alloc_exact_nid_numa_reserved_full_merge_generic_check);
1061 run_bottom_up(alloc_exact_nid_numa_reserved_full_merge_generic_check);
1062
1063 return 0;
1064 }
1065
alloc_exact_nid_numa_split_all_reserved_check(void)1066 static int alloc_exact_nid_numa_split_all_reserved_check(void)
1067 {
1068 test_print("\tRunning %s...\n", __func__);
1069 run_top_down(alloc_exact_nid_numa_split_all_reserved_generic_check);
1070 run_bottom_up(alloc_exact_nid_numa_split_all_reserved_generic_check);
1071
1072 return 0;
1073 }
1074
__memblock_alloc_exact_nid_numa_checks(void)1075 int __memblock_alloc_exact_nid_numa_checks(void)
1076 {
1077 test_print("Running %s NUMA tests...\n", FUNC_NAME);
1078
1079 alloc_exact_nid_numa_simple_check();
1080 alloc_exact_nid_numa_part_reserved_check();
1081 alloc_exact_nid_numa_split_range_low_check();
1082 alloc_exact_nid_numa_no_overlap_split_check();
1083 alloc_exact_nid_numa_no_overlap_low_check();
1084
1085 alloc_exact_nid_numa_small_node_check();
1086 alloc_exact_nid_numa_node_reserved_check();
1087 alloc_exact_nid_numa_part_reserved_fail_check();
1088 alloc_exact_nid_numa_split_range_high_check();
1089 alloc_exact_nid_numa_no_overlap_high_check();
1090 alloc_exact_nid_numa_large_region_check();
1091 alloc_exact_nid_numa_reserved_full_merge_check();
1092 alloc_exact_nid_numa_split_all_reserved_check();
1093
1094 return 0;
1095 }
1096
memblock_alloc_exact_nid_checks(void)1097 int memblock_alloc_exact_nid_checks(void)
1098 {
1099 prefix_reset();
1100 prefix_push(FUNC_NAME);
1101
1102 reset_memblock_attributes();
1103 dummy_physical_memory_init();
1104
1105 memblock_alloc_exact_nid_range_checks();
1106 memblock_alloc_exact_nid_numa_checks();
1107
1108 dummy_physical_memory_cleanup();
1109
1110 prefix_pop();
1111
1112 return 0;
1113 }
1114