1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_nid_api.h"
3 
4 /*
5  * A simple test that tries to allocate a memory region within min_addr and
6  * max_addr range:
7  *
8  *        +                   +
9  *   |    +       +-----------+      |
10  *   |    |       |    rgn    |      |
11  *   +----+-------+-----------+------+
12  *        ^                   ^
13  *        |                   |
14  *        min_addr           max_addr
15  *
16  * Expect to allocate a cleared region that ends at max_addr.
17  */
18 static int alloc_try_nid_top_down_simple_check(void)
19 {
20 	struct memblock_region *rgn = &memblock.reserved.regions[0];
21 	void *allocated_ptr = NULL;
22 
23 	PREFIX_PUSH();
24 
25 	phys_addr_t size = SZ_128;
26 	phys_addr_t min_addr;
27 	phys_addr_t max_addr;
28 	phys_addr_t rgn_end;
29 
30 	setup_memblock();
31 
32 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
33 	max_addr = min_addr + SZ_512;
34 
35 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
36 					       min_addr, max_addr, NUMA_NO_NODE);
37 	rgn_end = rgn->base + rgn->size;
38 
39 	ASSERT_NE(allocated_ptr, NULL);
40 	ASSERT_MEM_EQ(allocated_ptr, 0, size);
41 
42 	ASSERT_EQ(rgn->size, size);
43 	ASSERT_EQ(rgn->base, max_addr - size);
44 	ASSERT_EQ(rgn_end, max_addr);
45 
46 	ASSERT_EQ(memblock.reserved.cnt, 1);
47 	ASSERT_EQ(memblock.reserved.total_size, size);
48 
49 	test_pass_pop();
50 
51 	return 0;
52 }
53 
54 /*
55  * A simple test that tries to allocate a memory region within min_addr and
56  * max_addr range, where the end address is misaligned:
57  *
58  *         +       +            +
59  *  |      +       +---------+  +    |
60  *  |      |       |   rgn   |  |    |
61  *  +------+-------+---------+--+----+
62  *         ^       ^            ^
63  *         |       |            |
64  *       min_add   |            max_addr
65  *                 |
66  *                 Aligned address
67  *                 boundary
68  *
69  * Expect to allocate a cleared, aligned region that ends before max_addr.
70  */
71 static int alloc_try_nid_top_down_end_misaligned_check(void)
72 {
73 	struct memblock_region *rgn = &memblock.reserved.regions[0];
74 	void *allocated_ptr = NULL;
75 
76 	PREFIX_PUSH();
77 
78 	phys_addr_t size = SZ_128;
79 	phys_addr_t misalign = SZ_2;
80 	phys_addr_t min_addr;
81 	phys_addr_t max_addr;
82 	phys_addr_t rgn_end;
83 
84 	setup_memblock();
85 
86 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
87 	max_addr = min_addr + SZ_512 + misalign;
88 
89 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
90 					       min_addr, max_addr, NUMA_NO_NODE);
91 	rgn_end = rgn->base + rgn->size;
92 
93 	ASSERT_NE(allocated_ptr, NULL);
94 	ASSERT_MEM_EQ(allocated_ptr, 0, size);
95 
96 	ASSERT_EQ(rgn->size, size);
97 	ASSERT_EQ(rgn->base, max_addr - size - misalign);
98 	ASSERT_LT(rgn_end, max_addr);
99 
100 	ASSERT_EQ(memblock.reserved.cnt, 1);
101 	ASSERT_EQ(memblock.reserved.total_size, size);
102 
103 	test_pass_pop();
104 
105 	return 0;
106 }
107 
108 /*
109  * A simple test that tries to allocate a memory region, which spans over the
110  * min_addr and max_addr range:
111  *
112  *         +               +
113  *  |      +---------------+       |
114  *  |      |      rgn      |       |
115  *  +------+---------------+-------+
116  *         ^               ^
117  *         |               |
118  *         min_addr        max_addr
119  *
120  * Expect to allocate a cleared region that starts at min_addr and ends at
121  * max_addr, given that min_addr is aligned.
122  */
123 static int alloc_try_nid_exact_address_generic_check(void)
124 {
125 	struct memblock_region *rgn = &memblock.reserved.regions[0];
126 	void *allocated_ptr = NULL;
127 
128 	PREFIX_PUSH();
129 
130 	phys_addr_t size = SZ_1K;
131 	phys_addr_t min_addr;
132 	phys_addr_t max_addr;
133 	phys_addr_t rgn_end;
134 
135 	setup_memblock();
136 
137 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
138 	max_addr = min_addr + size;
139 
140 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
141 					       min_addr, max_addr, NUMA_NO_NODE);
142 	rgn_end = rgn->base + rgn->size;
143 
144 	ASSERT_NE(allocated_ptr, NULL);
145 	ASSERT_MEM_EQ(allocated_ptr, 0, size);
146 
147 	ASSERT_EQ(rgn->size, size);
148 	ASSERT_EQ(rgn->base, min_addr);
149 	ASSERT_EQ(rgn_end, max_addr);
150 
151 	ASSERT_EQ(memblock.reserved.cnt, 1);
152 	ASSERT_EQ(memblock.reserved.total_size, size);
153 
154 	test_pass_pop();
155 
156 	return 0;
157 }
158 
159 /*
160  * A test that tries to allocate a memory region, which can't fit into
161  * min_addr and max_addr range:
162  *
163  *           +          +     +
164  *  |        +----------+-----+    |
165  *  |        |   rgn    +     |    |
166  *  +--------+----------+-----+----+
167  *           ^          ^     ^
168  *           |          |     |
169  *           Aligned    |    max_addr
170  *           address    |
171  *           boundary   min_add
172  *
173  * Expect to drop the lower limit and allocate a cleared memory region which
174  * ends at max_addr (if the address is aligned).
175  */
176 static int alloc_try_nid_top_down_narrow_range_check(void)
177 {
178 	struct memblock_region *rgn = &memblock.reserved.regions[0];
179 	void *allocated_ptr = NULL;
180 
181 	PREFIX_PUSH();
182 
183 	phys_addr_t size = SZ_256;
184 	phys_addr_t min_addr;
185 	phys_addr_t max_addr;
186 
187 	setup_memblock();
188 
189 	min_addr = memblock_start_of_DRAM() + SZ_512;
190 	max_addr = min_addr + SMP_CACHE_BYTES;
191 
192 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
193 					       min_addr, max_addr, NUMA_NO_NODE);
194 
195 	ASSERT_NE(allocated_ptr, NULL);
196 	ASSERT_MEM_EQ(allocated_ptr, 0, size);
197 
198 	ASSERT_EQ(rgn->size, size);
199 	ASSERT_EQ(rgn->base, max_addr - size);
200 
201 	ASSERT_EQ(memblock.reserved.cnt, 1);
202 	ASSERT_EQ(memblock.reserved.total_size, size);
203 
204 	test_pass_pop();
205 
206 	return 0;
207 }
208 
209 /*
210  * A test that tries to allocate a memory region, which can't fit into
211  * min_addr and max_addr range, with the latter being too close to the beginning
212  * of the available memory:
213  *
214  *   +-------------+
215  *   |     new     |
216  *   +-------------+
217  *         +       +
218  *         |       +              |
219  *         |       |              |
220  *         +-------+--------------+
221  *         ^       ^
222  *         |       |
223  *         |       max_addr
224  *         |
225  *         min_addr
226  *
227  * Expect no allocation to happen.
228  */
229 static int alloc_try_nid_low_max_generic_check(void)
230 {
231 	void *allocated_ptr = NULL;
232 
233 	PREFIX_PUSH();
234 
235 	phys_addr_t size = SZ_1K;
236 	phys_addr_t min_addr;
237 	phys_addr_t max_addr;
238 
239 	setup_memblock();
240 
241 	min_addr = memblock_start_of_DRAM();
242 	max_addr = min_addr + SMP_CACHE_BYTES;
243 
244 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
245 					       min_addr, max_addr, NUMA_NO_NODE);
246 
247 	ASSERT_EQ(allocated_ptr, NULL);
248 
249 	test_pass_pop();
250 
251 	return 0;
252 }
253 
254 /*
255  * A test that tries to allocate a memory region within min_addr min_addr range,
256  * with min_addr being so close that it's next to an allocated region:
257  *
258  *          +                        +
259  *  |       +--------+---------------|
260  *  |       |   r1   |      rgn      |
261  *  +-------+--------+---------------+
262  *          ^                        ^
263  *          |                        |
264  *          min_addr                 max_addr
265  *
266  * Expect a merge of both regions. Only the region size gets updated.
267  */
268 static int alloc_try_nid_min_reserved_generic_check(void)
269 {
270 	struct memblock_region *rgn = &memblock.reserved.regions[0];
271 	void *allocated_ptr = NULL;
272 
273 	PREFIX_PUSH();
274 
275 	phys_addr_t r1_size = SZ_128;
276 	phys_addr_t r2_size = SZ_64;
277 	phys_addr_t total_size = r1_size + r2_size;
278 	phys_addr_t min_addr;
279 	phys_addr_t max_addr;
280 	phys_addr_t reserved_base;
281 
282 	setup_memblock();
283 
284 	max_addr = memblock_end_of_DRAM();
285 	min_addr = max_addr - r2_size;
286 	reserved_base = min_addr - r1_size;
287 
288 	memblock_reserve(reserved_base, r1_size);
289 
290 	allocated_ptr = memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES,
291 					       min_addr, max_addr, NUMA_NO_NODE);
292 
293 	ASSERT_NE(allocated_ptr, NULL);
294 	ASSERT_MEM_EQ(allocated_ptr, 0, r2_size);
295 
296 	ASSERT_EQ(rgn->size, total_size);
297 	ASSERT_EQ(rgn->base, reserved_base);
298 
299 	ASSERT_EQ(memblock.reserved.cnt, 1);
300 	ASSERT_EQ(memblock.reserved.total_size, total_size);
301 
302 	test_pass_pop();
303 
304 	return 0;
305 }
306 
307 /*
308  * A test that tries to allocate a memory region within min_addr and max_addr,
309  * with max_addr being so close that it's next to an allocated region:
310  *
311  *             +             +
312  *  |          +-------------+--------|
313  *  |          |     rgn     |   r1   |
314  *  +----------+-------------+--------+
315  *             ^             ^
316  *             |             |
317  *             min_addr      max_addr
318  *
319  * Expect a merge of regions. Only the region size gets updated.
320  */
321 static int alloc_try_nid_max_reserved_generic_check(void)
322 {
323 	struct memblock_region *rgn = &memblock.reserved.regions[0];
324 	void *allocated_ptr = NULL;
325 
326 	PREFIX_PUSH();
327 
328 	phys_addr_t r1_size = SZ_64;
329 	phys_addr_t r2_size = SZ_128;
330 	phys_addr_t total_size = r1_size + r2_size;
331 	phys_addr_t min_addr;
332 	phys_addr_t max_addr;
333 
334 	setup_memblock();
335 
336 	max_addr = memblock_end_of_DRAM() - r1_size;
337 	min_addr = max_addr - r2_size;
338 
339 	memblock_reserve(max_addr, r1_size);
340 
341 	allocated_ptr = memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES,
342 					       min_addr, max_addr, NUMA_NO_NODE);
343 
344 	ASSERT_NE(allocated_ptr, NULL);
345 	ASSERT_MEM_EQ(allocated_ptr, 0, r2_size);
346 
347 	ASSERT_EQ(rgn->size, total_size);
348 	ASSERT_EQ(rgn->base, min_addr);
349 
350 	ASSERT_EQ(memblock.reserved.cnt, 1);
351 	ASSERT_EQ(memblock.reserved.total_size, total_size);
352 
353 	test_pass_pop();
354 
355 	return 0;
356 }
357 
358 /*
359  * A test that tries to allocate memory within min_addr and max_add range, when
360  * there are two reserved regions at the borders, with a gap big enough to fit
361  * a new region:
362  *
363  *                +           +
364  *  |    +--------+   +-------+------+  |
365  *  |    |   r2   |   |  rgn  |  r1  |  |
366  *  +----+--------+---+-------+------+--+
367  *                ^           ^
368  *                |           |
369  *                min_addr    max_addr
370  *
371  * Expect to merge the new region with r1. The second region does not get
372  * updated. The total size field gets updated.
373  */
374 
375 static int alloc_try_nid_top_down_reserved_with_space_check(void)
376 {
377 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
378 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
379 	void *allocated_ptr = NULL;
380 	struct region r1, r2;
381 
382 	PREFIX_PUSH();
383 
384 	phys_addr_t r3_size = SZ_64;
385 	phys_addr_t gap_size = SMP_CACHE_BYTES;
386 	phys_addr_t total_size;
387 	phys_addr_t max_addr;
388 	phys_addr_t min_addr;
389 
390 	setup_memblock();
391 
392 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
393 	r1.size = SMP_CACHE_BYTES;
394 
395 	r2.size = SZ_128;
396 	r2.base = r1.base - (r3_size + gap_size + r2.size);
397 
398 	total_size = r1.size + r2.size + r3_size;
399 	min_addr = r2.base + r2.size;
400 	max_addr = r1.base;
401 
402 	memblock_reserve(r1.base, r1.size);
403 	memblock_reserve(r2.base, r2.size);
404 
405 	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
406 					       min_addr, max_addr, NUMA_NO_NODE);
407 
408 	ASSERT_NE(allocated_ptr, NULL);
409 	ASSERT_MEM_EQ(allocated_ptr, 0, r3_size);
410 
411 	ASSERT_EQ(rgn1->size, r1.size + r3_size);
412 	ASSERT_EQ(rgn1->base, max_addr - r3_size);
413 
414 	ASSERT_EQ(rgn2->size, r2.size);
415 	ASSERT_EQ(rgn2->base, r2.base);
416 
417 	ASSERT_EQ(memblock.reserved.cnt, 2);
418 	ASSERT_EQ(memblock.reserved.total_size, total_size);
419 
420 	test_pass_pop();
421 
422 	return 0;
423 }
424 
425 /*
426  * A test that tries to allocate memory within min_addr and max_add range, when
427  * there are two reserved regions at the borders, with a gap of a size equal to
428  * the size of the new region:
429  *
430  *                 +        +
431  *  |     +--------+--------+--------+     |
432  *  |     |   r2   |   r3   |   r1   |     |
433  *  +-----+--------+--------+--------+-----+
434  *                 ^        ^
435  *                 |        |
436  *                 min_addr max_addr
437  *
438  * Expect to merge all of the regions into one. The region counter and total
439  * size fields get updated.
440  */
441 static int alloc_try_nid_reserved_full_merge_generic_check(void)
442 {
443 	struct memblock_region *rgn = &memblock.reserved.regions[0];
444 	void *allocated_ptr = NULL;
445 	struct region r1, r2;
446 
447 	PREFIX_PUSH();
448 
449 	phys_addr_t r3_size = SZ_64;
450 	phys_addr_t total_size;
451 	phys_addr_t max_addr;
452 	phys_addr_t min_addr;
453 
454 	setup_memblock();
455 
456 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
457 	r1.size = SMP_CACHE_BYTES;
458 
459 	r2.size = SZ_128;
460 	r2.base = r1.base - (r3_size + r2.size);
461 
462 	total_size = r1.size + r2.size + r3_size;
463 	min_addr = r2.base + r2.size;
464 	max_addr = r1.base;
465 
466 	memblock_reserve(r1.base, r1.size);
467 	memblock_reserve(r2.base, r2.size);
468 
469 	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
470 					       min_addr, max_addr, NUMA_NO_NODE);
471 
472 	ASSERT_NE(allocated_ptr, NULL);
473 	ASSERT_MEM_EQ(allocated_ptr, 0, r3_size);
474 
475 	ASSERT_EQ(rgn->size, total_size);
476 	ASSERT_EQ(rgn->base, r2.base);
477 
478 	ASSERT_EQ(memblock.reserved.cnt, 1);
479 	ASSERT_EQ(memblock.reserved.total_size, total_size);
480 
481 	test_pass_pop();
482 
483 	return 0;
484 }
485 
486 /*
487  * A test that tries to allocate memory within min_addr and max_add range, when
488  * there are two reserved regions at the borders, with a gap that can't fit
489  * a new region:
490  *
491  *                       +    +
492  *  |  +----------+------+    +------+   |
493  *  |  |    r3    |  r2  |    |  r1  |   |
494  *  +--+----------+------+----+------+---+
495  *                       ^    ^
496  *                       |    |
497  *                       |    max_addr
498  *                       |
499  *                       min_addr
500  *
501  * Expect to merge the new region with r2. The second region does not get
502  * updated. The total size counter gets updated.
503  */
504 static int alloc_try_nid_top_down_reserved_no_space_check(void)
505 {
506 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
507 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
508 	void *allocated_ptr = NULL;
509 	struct region r1, r2;
510 
511 	PREFIX_PUSH();
512 
513 	phys_addr_t r3_size = SZ_256;
514 	phys_addr_t gap_size = SMP_CACHE_BYTES;
515 	phys_addr_t total_size;
516 	phys_addr_t max_addr;
517 	phys_addr_t min_addr;
518 
519 	setup_memblock();
520 
521 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
522 	r1.size = SMP_CACHE_BYTES;
523 
524 	r2.size = SZ_128;
525 	r2.base = r1.base - (r2.size + gap_size);
526 
527 	total_size = r1.size + r2.size + r3_size;
528 	min_addr = r2.base + r2.size;
529 	max_addr = r1.base;
530 
531 	memblock_reserve(r1.base, r1.size);
532 	memblock_reserve(r2.base, r2.size);
533 
534 	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
535 					       min_addr, max_addr, NUMA_NO_NODE);
536 
537 	ASSERT_NE(allocated_ptr, NULL);
538 	ASSERT_MEM_EQ(allocated_ptr, 0, r3_size);
539 
540 	ASSERT_EQ(rgn1->size, r1.size);
541 	ASSERT_EQ(rgn1->base, r1.base);
542 
543 	ASSERT_EQ(rgn2->size, r2.size + r3_size);
544 	ASSERT_EQ(rgn2->base, r2.base - r3_size);
545 
546 	ASSERT_EQ(memblock.reserved.cnt, 2);
547 	ASSERT_EQ(memblock.reserved.total_size, total_size);
548 
549 	test_pass_pop();
550 
551 	return 0;
552 }
553 
554 /*
555  * A test that tries to allocate memory within min_addr and max_add range, but
556  * it's too narrow and everything else is reserved:
557  *
558  *            +-----------+
559  *            |    new    |
560  *            +-----------+
561  *                 +      +
562  *  |--------------+      +----------|
563  *  |      r2      |      |    r1    |
564  *  +--------------+------+----------+
565  *                 ^      ^
566  *                 |      |
567  *                 |      max_addr
568  *                 |
569  *                 min_addr
570  *
571  * Expect no allocation to happen.
572  */
573 
574 static int alloc_try_nid_reserved_all_generic_check(void)
575 {
576 	void *allocated_ptr = NULL;
577 	struct region r1, r2;
578 
579 	PREFIX_PUSH();
580 
581 	phys_addr_t r3_size = SZ_256;
582 	phys_addr_t gap_size = SMP_CACHE_BYTES;
583 	phys_addr_t max_addr;
584 	phys_addr_t min_addr;
585 
586 	setup_memblock();
587 
588 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
589 	r1.size = SMP_CACHE_BYTES;
590 
591 	r2.size = MEM_SIZE - (r1.size + gap_size);
592 	r2.base = memblock_start_of_DRAM();
593 
594 	min_addr = r2.base + r2.size;
595 	max_addr = r1.base;
596 
597 	memblock_reserve(r1.base, r1.size);
598 	memblock_reserve(r2.base, r2.size);
599 
600 	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
601 					       min_addr, max_addr, NUMA_NO_NODE);
602 
603 	ASSERT_EQ(allocated_ptr, NULL);
604 
605 	test_pass_pop();
606 
607 	return 0;
608 }
609 
610 /*
611  * A test that tries to allocate a memory region, where max_addr is
612  * bigger than the end address of the available memory. Expect to allocate
613  * a cleared region that ends before the end of the memory.
614  */
615 static int alloc_try_nid_top_down_cap_max_check(void)
616 {
617 	struct memblock_region *rgn = &memblock.reserved.regions[0];
618 	void *allocated_ptr = NULL;
619 
620 	PREFIX_PUSH();
621 
622 	phys_addr_t size = SZ_256;
623 	phys_addr_t min_addr;
624 	phys_addr_t max_addr;
625 
626 	setup_memblock();
627 
628 	min_addr = memblock_end_of_DRAM() - SZ_1K;
629 	max_addr = memblock_end_of_DRAM() + SZ_256;
630 
631 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
632 					       min_addr, max_addr, NUMA_NO_NODE);
633 
634 	ASSERT_NE(allocated_ptr, NULL);
635 	ASSERT_MEM_EQ(allocated_ptr, 0, size);
636 
637 	ASSERT_EQ(rgn->size, size);
638 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
639 
640 	ASSERT_EQ(memblock.reserved.cnt, 1);
641 	ASSERT_EQ(memblock.reserved.total_size, size);
642 
643 	test_pass_pop();
644 
645 	return 0;
646 }
647 
648 /*
649  * A test that tries to allocate a memory region, where min_addr is
650  * smaller than the start address of the available memory. Expect to allocate
651  * a cleared region that ends before the end of the memory.
652  */
653 static int alloc_try_nid_top_down_cap_min_check(void)
654 {
655 	struct memblock_region *rgn = &memblock.reserved.regions[0];
656 	void *allocated_ptr = NULL;
657 
658 	PREFIX_PUSH();
659 
660 	phys_addr_t size = SZ_1K;
661 	phys_addr_t min_addr;
662 	phys_addr_t max_addr;
663 
664 	setup_memblock();
665 
666 	min_addr = memblock_start_of_DRAM() - SZ_256;
667 	max_addr = memblock_end_of_DRAM();
668 
669 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
670 					       min_addr, max_addr, NUMA_NO_NODE);
671 
672 	ASSERT_NE(allocated_ptr, NULL);
673 	ASSERT_MEM_EQ(allocated_ptr, 0, size);
674 
675 	ASSERT_EQ(rgn->size, size);
676 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
677 
678 	ASSERT_EQ(memblock.reserved.cnt, 1);
679 	ASSERT_EQ(memblock.reserved.total_size, size);
680 
681 	test_pass_pop();
682 
683 	return 0;
684 }
685 
686 /*
687  * A simple test that tries to allocate a memory region within min_addr and
688  * max_addr range:
689  *
690  *        +                       +
691  *   |    +-----------+           |      |
692  *   |    |    rgn    |           |      |
693  *   +----+-----------+-----------+------+
694  *        ^                       ^
695  *        |                       |
696  *        min_addr                max_addr
697  *
698  * Expect to allocate a cleared region that ends before max_addr.
699  */
700 static int alloc_try_nid_bottom_up_simple_check(void)
701 {
702 	struct memblock_region *rgn = &memblock.reserved.regions[0];
703 	void *allocated_ptr = NULL;
704 
705 	PREFIX_PUSH();
706 
707 	phys_addr_t size = SZ_128;
708 	phys_addr_t min_addr;
709 	phys_addr_t max_addr;
710 	phys_addr_t rgn_end;
711 
712 	setup_memblock();
713 
714 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
715 	max_addr = min_addr + SZ_512;
716 
717 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
718 					       min_addr, max_addr,
719 					       NUMA_NO_NODE);
720 	rgn_end = rgn->base + rgn->size;
721 
722 	ASSERT_NE(allocated_ptr, NULL);
723 	ASSERT_MEM_EQ(allocated_ptr, 0, size);
724 
725 	ASSERT_EQ(rgn->size, size);
726 	ASSERT_EQ(rgn->base, min_addr);
727 	ASSERT_LT(rgn_end, max_addr);
728 
729 	ASSERT_EQ(memblock.reserved.cnt, 1);
730 	ASSERT_EQ(memblock.reserved.total_size, size);
731 
732 	test_pass_pop();
733 
734 	return 0;
735 }
736 
737 /*
738  * A simple test that tries to allocate a memory region within min_addr and
739  * max_addr range, where the start address is misaligned:
740  *
741  *        +                     +
742  *  |     +   +-----------+     +     |
743  *  |     |   |    rgn    |     |     |
744  *  +-----+---+-----------+-----+-----+
745  *        ^   ^----.            ^
746  *        |        |            |
747  *     min_add     |            max_addr
748  *                 |
749  *                 Aligned address
750  *                 boundary
751  *
752  * Expect to allocate a cleared, aligned region that ends before max_addr.
753  */
754 static int alloc_try_nid_bottom_up_start_misaligned_check(void)
755 {
756 	struct memblock_region *rgn = &memblock.reserved.regions[0];
757 	void *allocated_ptr = NULL;
758 
759 	PREFIX_PUSH();
760 
761 	phys_addr_t size = SZ_128;
762 	phys_addr_t misalign = SZ_2;
763 	phys_addr_t min_addr;
764 	phys_addr_t max_addr;
765 	phys_addr_t rgn_end;
766 
767 	setup_memblock();
768 
769 	min_addr = memblock_start_of_DRAM() + misalign;
770 	max_addr = min_addr + SZ_512;
771 
772 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
773 					       min_addr, max_addr,
774 					       NUMA_NO_NODE);
775 	rgn_end = rgn->base + rgn->size;
776 
777 	ASSERT_NE(allocated_ptr, NULL);
778 	ASSERT_MEM_EQ(allocated_ptr, 0, size);
779 
780 	ASSERT_EQ(rgn->size, size);
781 	ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign));
782 	ASSERT_LT(rgn_end, max_addr);
783 
784 	ASSERT_EQ(memblock.reserved.cnt, 1);
785 	ASSERT_EQ(memblock.reserved.total_size, size);
786 
787 	test_pass_pop();
788 
789 	return 0;
790 }
791 
792 /*
793  * A test that tries to allocate a memory region, which can't fit into min_addr
794  * and max_addr range:
795  *
796  *                      +    +
797  *  |---------+         +    +      |
798  *  |   rgn   |         |    |      |
799  *  +---------+---------+----+------+
800  *                      ^    ^
801  *                      |    |
802  *                      |    max_addr
803  *                      |
804  *                      min_add
805  *
806  * Expect to drop the lower limit and allocate a cleared memory region which
807  * starts at the beginning of the available memory.
808  */
809 static int alloc_try_nid_bottom_up_narrow_range_check(void)
810 {
811 	struct memblock_region *rgn = &memblock.reserved.regions[0];
812 	void *allocated_ptr = NULL;
813 
814 	PREFIX_PUSH();
815 
816 	phys_addr_t size = SZ_256;
817 	phys_addr_t min_addr;
818 	phys_addr_t max_addr;
819 
820 	setup_memblock();
821 
822 	min_addr = memblock_start_of_DRAM() + SZ_512;
823 	max_addr = min_addr + SMP_CACHE_BYTES;
824 
825 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
826 					       min_addr, max_addr,
827 					       NUMA_NO_NODE);
828 
829 	ASSERT_NE(allocated_ptr, NULL);
830 	ASSERT_MEM_EQ(allocated_ptr, 0, size);
831 
832 	ASSERT_EQ(rgn->size, size);
833 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
834 
835 	ASSERT_EQ(memblock.reserved.cnt, 1);
836 	ASSERT_EQ(memblock.reserved.total_size, size);
837 
838 	test_pass_pop();
839 
840 	return 0;
841 }
842 
843 /*
844  * A test that tries to allocate memory within min_addr and max_add range, when
845  * there are two reserved regions at the borders, with a gap big enough to fit
846  * a new region:
847  *
848  *                +           +
849  *  |    +--------+-------+   +------+  |
850  *  |    |   r2   |  rgn  |   |  r1  |  |
851  *  +----+--------+-------+---+------+--+
852  *                ^           ^
853  *                |           |
854  *                min_addr    max_addr
855  *
856  * Expect to merge the new region with r2. The second region does not get
857  * updated. The total size field gets updated.
858  */
859 
860 static int alloc_try_nid_bottom_up_reserved_with_space_check(void)
861 {
862 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
863 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
864 	void *allocated_ptr = NULL;
865 	struct region r1, r2;
866 
867 	PREFIX_PUSH();
868 
869 	phys_addr_t r3_size = SZ_64;
870 	phys_addr_t gap_size = SMP_CACHE_BYTES;
871 	phys_addr_t total_size;
872 	phys_addr_t max_addr;
873 	phys_addr_t min_addr;
874 
875 	setup_memblock();
876 
877 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
878 	r1.size = SMP_CACHE_BYTES;
879 
880 	r2.size = SZ_128;
881 	r2.base = r1.base - (r3_size + gap_size + r2.size);
882 
883 	total_size = r1.size + r2.size + r3_size;
884 	min_addr = r2.base + r2.size;
885 	max_addr = r1.base;
886 
887 	memblock_reserve(r1.base, r1.size);
888 	memblock_reserve(r2.base, r2.size);
889 
890 	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
891 					       min_addr, max_addr,
892 					       NUMA_NO_NODE);
893 
894 	ASSERT_NE(allocated_ptr, NULL);
895 	ASSERT_MEM_EQ(allocated_ptr, 0, r3_size);
896 
897 	ASSERT_EQ(rgn1->size, r1.size);
898 	ASSERT_EQ(rgn1->base, max_addr);
899 
900 	ASSERT_EQ(rgn2->size, r2.size + r3_size);
901 	ASSERT_EQ(rgn2->base, r2.base);
902 
903 	ASSERT_EQ(memblock.reserved.cnt, 2);
904 	ASSERT_EQ(memblock.reserved.total_size, total_size);
905 
906 	test_pass_pop();
907 
908 	return 0;
909 }
910 
911 /*
912  * A test that tries to allocate memory within min_addr and max_add range, when
913  * there are two reserved regions at the borders, with a gap of a size equal to
914  * the size of the new region:
915  *
916  *                         +   +
917  *  |----------+    +------+   +----+  |
918  *  |    r3    |    |  r2  |   | r1 |  |
919  *  +----------+----+------+---+----+--+
920  *                         ^   ^
921  *                         |   |
922  *                         |  max_addr
923  *                         |
924  *                         min_addr
925  *
926  * Expect to drop the lower limit and allocate memory at the beginning of the
927  * available memory. The region counter and total size fields get updated.
928  * Other regions are not modified.
929  */
930 
931 static int alloc_try_nid_bottom_up_reserved_no_space_check(void)
932 {
933 	struct memblock_region *rgn1 = &memblock.reserved.regions[2];
934 	struct memblock_region *rgn2 = &memblock.reserved.regions[1];
935 	struct memblock_region *rgn3 = &memblock.reserved.regions[0];
936 	void *allocated_ptr = NULL;
937 	struct region r1, r2;
938 
939 	PREFIX_PUSH();
940 
941 	phys_addr_t r3_size = SZ_256;
942 	phys_addr_t gap_size = SMP_CACHE_BYTES;
943 	phys_addr_t total_size;
944 	phys_addr_t max_addr;
945 	phys_addr_t min_addr;
946 
947 	setup_memblock();
948 
949 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
950 	r1.size = SMP_CACHE_BYTES;
951 
952 	r2.size = SZ_128;
953 	r2.base = r1.base - (r2.size + gap_size);
954 
955 	total_size = r1.size + r2.size + r3_size;
956 	min_addr = r2.base + r2.size;
957 	max_addr = r1.base;
958 
959 	memblock_reserve(r1.base, r1.size);
960 	memblock_reserve(r2.base, r2.size);
961 
962 	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
963 					       min_addr, max_addr,
964 					       NUMA_NO_NODE);
965 
966 	ASSERT_NE(allocated_ptr, NULL);
967 	ASSERT_MEM_EQ(allocated_ptr, 0, r3_size);
968 
969 	ASSERT_EQ(rgn3->size, r3_size);
970 	ASSERT_EQ(rgn3->base, memblock_start_of_DRAM());
971 
972 	ASSERT_EQ(rgn2->size, r2.size);
973 	ASSERT_EQ(rgn2->base, r2.base);
974 
975 	ASSERT_EQ(rgn1->size, r1.size);
976 	ASSERT_EQ(rgn1->base, r1.base);
977 
978 	ASSERT_EQ(memblock.reserved.cnt, 3);
979 	ASSERT_EQ(memblock.reserved.total_size, total_size);
980 
981 	test_pass_pop();
982 
983 	return 0;
984 }
985 
986 /*
987  * A test that tries to allocate a memory region, where max_addr is
988  * bigger than the end address of the available memory. Expect to allocate
989  * a cleared region that starts at the min_addr
990  */
991 static int alloc_try_nid_bottom_up_cap_max_check(void)
992 {
993 	struct memblock_region *rgn = &memblock.reserved.regions[0];
994 	void *allocated_ptr = NULL;
995 
996 	PREFIX_PUSH();
997 
998 	phys_addr_t size = SZ_256;
999 	phys_addr_t min_addr;
1000 	phys_addr_t max_addr;
1001 
1002 	setup_memblock();
1003 
1004 	min_addr = memblock_start_of_DRAM() + SZ_1K;
1005 	max_addr = memblock_end_of_DRAM() + SZ_256;
1006 
1007 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1008 					       min_addr, max_addr,
1009 					       NUMA_NO_NODE);
1010 
1011 	ASSERT_NE(allocated_ptr, NULL);
1012 	ASSERT_MEM_EQ(allocated_ptr, 0, size);
1013 
1014 	ASSERT_EQ(rgn->size, size);
1015 	ASSERT_EQ(rgn->base, min_addr);
1016 
1017 	ASSERT_EQ(memblock.reserved.cnt, 1);
1018 	ASSERT_EQ(memblock.reserved.total_size, size);
1019 
1020 	test_pass_pop();
1021 
1022 	return 0;
1023 }
1024 
1025 /*
1026  * A test that tries to allocate a memory region, where min_addr is
1027  * smaller than the start address of the available memory. Expect to allocate
1028  * a cleared region at the beginning of the available memory.
1029  */
1030 static int alloc_try_nid_bottom_up_cap_min_check(void)
1031 {
1032 	struct memblock_region *rgn = &memblock.reserved.regions[0];
1033 	void *allocated_ptr = NULL;
1034 
1035 	PREFIX_PUSH();
1036 
1037 	phys_addr_t size = SZ_1K;
1038 	phys_addr_t min_addr;
1039 	phys_addr_t max_addr;
1040 
1041 	setup_memblock();
1042 
1043 	min_addr = memblock_start_of_DRAM();
1044 	max_addr = memblock_end_of_DRAM() - SZ_256;
1045 
1046 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1047 					       min_addr, max_addr,
1048 					       NUMA_NO_NODE);
1049 
1050 	ASSERT_NE(allocated_ptr, NULL);
1051 	ASSERT_MEM_EQ(allocated_ptr, 0, size);
1052 
1053 	ASSERT_EQ(rgn->size, size);
1054 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
1055 
1056 	ASSERT_EQ(memblock.reserved.cnt, 1);
1057 	ASSERT_EQ(memblock.reserved.total_size, size);
1058 
1059 	test_pass_pop();
1060 
1061 	return 0;
1062 }
1063 
1064 /* Test case wrappers */
1065 static int alloc_try_nid_simple_check(void)
1066 {
1067 	test_print("\tRunning %s...\n", __func__);
1068 	memblock_set_bottom_up(false);
1069 	alloc_try_nid_top_down_simple_check();
1070 	memblock_set_bottom_up(true);
1071 	alloc_try_nid_bottom_up_simple_check();
1072 
1073 	return 0;
1074 }
1075 
1076 static int alloc_try_nid_misaligned_check(void)
1077 {
1078 	test_print("\tRunning %s...\n", __func__);
1079 	memblock_set_bottom_up(false);
1080 	alloc_try_nid_top_down_end_misaligned_check();
1081 	memblock_set_bottom_up(true);
1082 	alloc_try_nid_bottom_up_start_misaligned_check();
1083 
1084 	return 0;
1085 }
1086 
1087 static int alloc_try_nid_narrow_range_check(void)
1088 {
1089 	test_print("\tRunning %s...\n", __func__);
1090 	memblock_set_bottom_up(false);
1091 	alloc_try_nid_top_down_narrow_range_check();
1092 	memblock_set_bottom_up(true);
1093 	alloc_try_nid_bottom_up_narrow_range_check();
1094 
1095 	return 0;
1096 }
1097 
1098 static int alloc_try_nid_reserved_with_space_check(void)
1099 {
1100 	test_print("\tRunning %s...\n", __func__);
1101 	memblock_set_bottom_up(false);
1102 	alloc_try_nid_top_down_reserved_with_space_check();
1103 	memblock_set_bottom_up(true);
1104 	alloc_try_nid_bottom_up_reserved_with_space_check();
1105 
1106 	return 0;
1107 }
1108 
1109 static int alloc_try_nid_reserved_no_space_check(void)
1110 {
1111 	test_print("\tRunning %s...\n", __func__);
1112 	memblock_set_bottom_up(false);
1113 	alloc_try_nid_top_down_reserved_no_space_check();
1114 	memblock_set_bottom_up(true);
1115 	alloc_try_nid_bottom_up_reserved_no_space_check();
1116 
1117 	return 0;
1118 }
1119 
1120 static int alloc_try_nid_cap_max_check(void)
1121 {
1122 	test_print("\tRunning %s...\n", __func__);
1123 	memblock_set_bottom_up(false);
1124 	alloc_try_nid_top_down_cap_max_check();
1125 	memblock_set_bottom_up(true);
1126 	alloc_try_nid_bottom_up_cap_max_check();
1127 
1128 	return 0;
1129 }
1130 
1131 static int alloc_try_nid_cap_min_check(void)
1132 {
1133 	test_print("\tRunning %s...\n", __func__);
1134 	memblock_set_bottom_up(false);
1135 	alloc_try_nid_top_down_cap_min_check();
1136 	memblock_set_bottom_up(true);
1137 	alloc_try_nid_bottom_up_cap_min_check();
1138 
1139 	return 0;
1140 }
1141 
1142 static int alloc_try_nid_min_reserved_check(void)
1143 {
1144 	test_print("\tRunning %s...\n", __func__);
1145 	run_top_down(alloc_try_nid_min_reserved_generic_check);
1146 	run_bottom_up(alloc_try_nid_min_reserved_generic_check);
1147 
1148 	return 0;
1149 }
1150 
1151 static int alloc_try_nid_max_reserved_check(void)
1152 {
1153 	test_print("\tRunning %s...\n", __func__);
1154 	run_top_down(alloc_try_nid_max_reserved_generic_check);
1155 	run_bottom_up(alloc_try_nid_max_reserved_generic_check);
1156 
1157 	return 0;
1158 }
1159 
1160 static int alloc_try_nid_exact_address_check(void)
1161 {
1162 	test_print("\tRunning %s...\n", __func__);
1163 	run_top_down(alloc_try_nid_exact_address_generic_check);
1164 	run_bottom_up(alloc_try_nid_exact_address_generic_check);
1165 
1166 	return 0;
1167 }
1168 
1169 static int alloc_try_nid_reserved_full_merge_check(void)
1170 {
1171 	test_print("\tRunning %s...\n", __func__);
1172 	run_top_down(alloc_try_nid_reserved_full_merge_generic_check);
1173 	run_bottom_up(alloc_try_nid_reserved_full_merge_generic_check);
1174 
1175 	return 0;
1176 }
1177 
1178 static int alloc_try_nid_reserved_all_check(void)
1179 {
1180 	test_print("\tRunning %s...\n", __func__);
1181 	run_top_down(alloc_try_nid_reserved_all_generic_check);
1182 	run_bottom_up(alloc_try_nid_reserved_all_generic_check);
1183 
1184 	return 0;
1185 }
1186 
1187 static int alloc_try_nid_low_max_check(void)
1188 {
1189 	test_print("\tRunning %s...\n", __func__);
1190 	run_top_down(alloc_try_nid_low_max_generic_check);
1191 	run_bottom_up(alloc_try_nid_low_max_generic_check);
1192 
1193 	return 0;
1194 }
1195 
1196 int memblock_alloc_nid_checks(void)
1197 {
1198 	const char *func_testing = "memblock_alloc_try_nid";
1199 
1200 	prefix_reset();
1201 	prefix_push(func_testing);
1202 	test_print("Running %s tests...\n", func_testing);
1203 
1204 	reset_memblock_attributes();
1205 	dummy_physical_memory_init();
1206 
1207 	alloc_try_nid_simple_check();
1208 	alloc_try_nid_misaligned_check();
1209 	alloc_try_nid_narrow_range_check();
1210 	alloc_try_nid_reserved_with_space_check();
1211 	alloc_try_nid_reserved_no_space_check();
1212 	alloc_try_nid_cap_max_check();
1213 	alloc_try_nid_cap_min_check();
1214 
1215 	alloc_try_nid_min_reserved_check();
1216 	alloc_try_nid_max_reserved_check();
1217 	alloc_try_nid_exact_address_check();
1218 	alloc_try_nid_reserved_full_merge_check();
1219 	alloc_try_nid_reserved_all_check();
1220 	alloc_try_nid_low_max_check();
1221 
1222 	dummy_physical_memory_cleanup();
1223 
1224 	prefix_pop();
1225 
1226 	return 0;
1227 }
1228