1 // SPDX-License-Identifier: GPL-2.0-or-later 2 #include <string.h> 3 #include <linux/memblock.h> 4 #include "basic_api.h" 5 6 #define EXPECTED_MEMBLOCK_REGIONS 128 7 #define FUNC_ADD "memblock_add" 8 #define FUNC_RESERVE "memblock_reserve" 9 #define FUNC_REMOVE "memblock_remove" 10 #define FUNC_FREE "memblock_free" 11 #define FUNC_TRIM "memblock_trim_memory" 12 13 static int memblock_initialization_check(void) 14 { 15 PREFIX_PUSH(); 16 17 ASSERT_NE(memblock.memory.regions, NULL); 18 ASSERT_EQ(memblock.memory.cnt, 1); 19 ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS); 20 ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0); 21 22 ASSERT_NE(memblock.reserved.regions, NULL); 23 ASSERT_EQ(memblock.reserved.cnt, 1); 24 ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS); 25 ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0); 26 27 ASSERT_EQ(memblock.bottom_up, false); 28 ASSERT_EQ(memblock.current_limit, MEMBLOCK_ALLOC_ANYWHERE); 29 30 test_pass_pop(); 31 32 return 0; 33 } 34 35 /* 36 * A simple test that adds a memory block of a specified base address 37 * and size to the collection of available memory regions (memblock.memory). 38 * Expect to create a new entry. The region counter and total memory get 39 * updated. 40 */ 41 static int memblock_add_simple_check(void) 42 { 43 struct memblock_region *rgn; 44 45 rgn = &memblock.memory.regions[0]; 46 47 struct region r = { 48 .base = SZ_1G, 49 .size = SZ_4M 50 }; 51 52 PREFIX_PUSH(); 53 54 reset_memblock_regions(); 55 memblock_add(r.base, r.size); 56 57 ASSERT_EQ(rgn->base, r.base); 58 ASSERT_EQ(rgn->size, r.size); 59 60 ASSERT_EQ(memblock.memory.cnt, 1); 61 ASSERT_EQ(memblock.memory.total_size, r.size); 62 63 test_pass_pop(); 64 65 return 0; 66 } 67 68 /* 69 * A simple test that adds a memory block of a specified base address, size, 70 * NUMA node and memory flags to the collection of available memory regions. 71 * Expect to create a new entry. The region counter and total memory get 72 * updated. 73 */ 74 static int memblock_add_node_simple_check(void) 75 { 76 struct memblock_region *rgn; 77 78 rgn = &memblock.memory.regions[0]; 79 80 struct region r = { 81 .base = SZ_1M, 82 .size = SZ_16M 83 }; 84 85 PREFIX_PUSH(); 86 87 reset_memblock_regions(); 88 memblock_add_node(r.base, r.size, 1, MEMBLOCK_HOTPLUG); 89 90 ASSERT_EQ(rgn->base, r.base); 91 ASSERT_EQ(rgn->size, r.size); 92 #ifdef CONFIG_NUMA 93 ASSERT_EQ(rgn->nid, 1); 94 #endif 95 ASSERT_EQ(rgn->flags, MEMBLOCK_HOTPLUG); 96 97 ASSERT_EQ(memblock.memory.cnt, 1); 98 ASSERT_EQ(memblock.memory.total_size, r.size); 99 100 test_pass_pop(); 101 102 return 0; 103 } 104 105 /* 106 * A test that tries to add two memory blocks that don't overlap with one 107 * another: 108 * 109 * | +--------+ +--------+ | 110 * | | r1 | | r2 | | 111 * +--------+--------+--------+--------+--+ 112 * 113 * Expect to add two correctly initialized entries to the collection of 114 * available memory regions (memblock.memory). The total size and 115 * region counter fields get updated. 116 */ 117 static int memblock_add_disjoint_check(void) 118 { 119 struct memblock_region *rgn1, *rgn2; 120 121 rgn1 = &memblock.memory.regions[0]; 122 rgn2 = &memblock.memory.regions[1]; 123 124 struct region r1 = { 125 .base = SZ_1G, 126 .size = SZ_8K 127 }; 128 struct region r2 = { 129 .base = SZ_1G + SZ_16K, 130 .size = SZ_8K 131 }; 132 133 PREFIX_PUSH(); 134 135 reset_memblock_regions(); 136 memblock_add(r1.base, r1.size); 137 memblock_add(r2.base, r2.size); 138 139 ASSERT_EQ(rgn1->base, r1.base); 140 ASSERT_EQ(rgn1->size, r1.size); 141 142 ASSERT_EQ(rgn2->base, r2.base); 143 ASSERT_EQ(rgn2->size, r2.size); 144 145 ASSERT_EQ(memblock.memory.cnt, 2); 146 ASSERT_EQ(memblock.memory.total_size, r1.size + r2.size); 147 148 test_pass_pop(); 149 150 return 0; 151 } 152 153 /* 154 * A test that tries to add two memory blocks r1 and r2, where r2 overlaps 155 * with the beginning of r1 (that is r1.base < r2.base + r2.size): 156 * 157 * | +----+----+------------+ | 158 * | | |r2 | r1 | | 159 * +----+----+----+------------+----------+ 160 * ^ ^ 161 * | | 162 * | r1.base 163 * | 164 * r2.base 165 * 166 * Expect to merge the two entries into one region that starts at r2.base 167 * and has size of two regions minus their intersection. The total size of 168 * the available memory is updated, and the region counter stays the same. 169 */ 170 static int memblock_add_overlap_top_check(void) 171 { 172 struct memblock_region *rgn; 173 phys_addr_t total_size; 174 175 rgn = &memblock.memory.regions[0]; 176 177 struct region r1 = { 178 .base = SZ_512M, 179 .size = SZ_1G 180 }; 181 struct region r2 = { 182 .base = SZ_256M, 183 .size = SZ_512M 184 }; 185 186 PREFIX_PUSH(); 187 188 total_size = (r1.base - r2.base) + r1.size; 189 190 reset_memblock_regions(); 191 memblock_add(r1.base, r1.size); 192 memblock_add(r2.base, r2.size); 193 194 ASSERT_EQ(rgn->base, r2.base); 195 ASSERT_EQ(rgn->size, total_size); 196 197 ASSERT_EQ(memblock.memory.cnt, 1); 198 ASSERT_EQ(memblock.memory.total_size, total_size); 199 200 test_pass_pop(); 201 202 return 0; 203 } 204 205 /* 206 * A test that tries to add two memory blocks r1 and r2, where r2 overlaps 207 * with the end of r1 (that is r2.base < r1.base + r1.size): 208 * 209 * | +--+------+----------+ | 210 * | | | r1 | r2 | | 211 * +--+--+------+----------+--------------+ 212 * ^ ^ 213 * | | 214 * | r2.base 215 * | 216 * r1.base 217 * 218 * Expect to merge the two entries into one region that starts at r1.base 219 * and has size of two regions minus their intersection. The total size of 220 * the available memory is updated, and the region counter stays the same. 221 */ 222 static int memblock_add_overlap_bottom_check(void) 223 { 224 struct memblock_region *rgn; 225 phys_addr_t total_size; 226 227 rgn = &memblock.memory.regions[0]; 228 229 struct region r1 = { 230 .base = SZ_128M, 231 .size = SZ_512M 232 }; 233 struct region r2 = { 234 .base = SZ_256M, 235 .size = SZ_1G 236 }; 237 238 PREFIX_PUSH(); 239 240 total_size = (r2.base - r1.base) + r2.size; 241 242 reset_memblock_regions(); 243 memblock_add(r1.base, r1.size); 244 memblock_add(r2.base, r2.size); 245 246 ASSERT_EQ(rgn->base, r1.base); 247 ASSERT_EQ(rgn->size, total_size); 248 249 ASSERT_EQ(memblock.memory.cnt, 1); 250 ASSERT_EQ(memblock.memory.total_size, total_size); 251 252 test_pass_pop(); 253 254 return 0; 255 } 256 257 /* 258 * A test that tries to add two memory blocks r1 and r2, where r2 is 259 * within the range of r1 (that is r1.base < r2.base && 260 * r2.base + r2.size < r1.base + r1.size): 261 * 262 * | +-------+--+-----------------------+ 263 * | | |r2| r1 | 264 * +---+-------+--+-----------------------+ 265 * ^ 266 * | 267 * r1.base 268 * 269 * Expect to merge two entries into one region that stays the same. 270 * The counter and total size of available memory are not updated. 271 */ 272 static int memblock_add_within_check(void) 273 { 274 struct memblock_region *rgn; 275 276 rgn = &memblock.memory.regions[0]; 277 278 struct region r1 = { 279 .base = SZ_8M, 280 .size = SZ_32M 281 }; 282 struct region r2 = { 283 .base = SZ_16M, 284 .size = SZ_1M 285 }; 286 287 PREFIX_PUSH(); 288 289 reset_memblock_regions(); 290 memblock_add(r1.base, r1.size); 291 memblock_add(r2.base, r2.size); 292 293 ASSERT_EQ(rgn->base, r1.base); 294 ASSERT_EQ(rgn->size, r1.size); 295 296 ASSERT_EQ(memblock.memory.cnt, 1); 297 ASSERT_EQ(memblock.memory.total_size, r1.size); 298 299 test_pass_pop(); 300 301 return 0; 302 } 303 304 /* 305 * A simple test that tries to add the same memory block twice. Expect 306 * the counter and total size of available memory to not be updated. 307 */ 308 static int memblock_add_twice_check(void) 309 { 310 struct region r = { 311 .base = SZ_16K, 312 .size = SZ_2M 313 }; 314 315 PREFIX_PUSH(); 316 317 reset_memblock_regions(); 318 319 memblock_add(r.base, r.size); 320 memblock_add(r.base, r.size); 321 322 ASSERT_EQ(memblock.memory.cnt, 1); 323 ASSERT_EQ(memblock.memory.total_size, r.size); 324 325 test_pass_pop(); 326 327 return 0; 328 } 329 330 /* 331 * A test that tries to add two memory blocks that don't overlap with one 332 * another and then add a third memory block in the space between the first two: 333 * 334 * | +--------+--------+--------+ | 335 * | | r1 | r3 | r2 | | 336 * +--------+--------+--------+--------+--+ 337 * 338 * Expect to merge the three entries into one region that starts at r1.base 339 * and has size of r1.size + r2.size + r3.size. The region counter and total 340 * size of the available memory are updated. 341 */ 342 static int memblock_add_between_check(void) 343 { 344 struct memblock_region *rgn; 345 phys_addr_t total_size; 346 347 rgn = &memblock.memory.regions[0]; 348 349 struct region r1 = { 350 .base = SZ_1G, 351 .size = SZ_8K 352 }; 353 struct region r2 = { 354 .base = SZ_1G + SZ_16K, 355 .size = SZ_8K 356 }; 357 struct region r3 = { 358 .base = SZ_1G + SZ_8K, 359 .size = SZ_8K 360 }; 361 362 PREFIX_PUSH(); 363 364 total_size = r1.size + r2.size + r3.size; 365 366 reset_memblock_regions(); 367 memblock_add(r1.base, r1.size); 368 memblock_add(r2.base, r2.size); 369 memblock_add(r3.base, r3.size); 370 371 ASSERT_EQ(rgn->base, r1.base); 372 ASSERT_EQ(rgn->size, total_size); 373 374 ASSERT_EQ(memblock.memory.cnt, 1); 375 ASSERT_EQ(memblock.memory.total_size, total_size); 376 377 test_pass_pop(); 378 379 return 0; 380 } 381 382 /* 383 * A simple test that tries to add a memory block r when r extends past 384 * PHYS_ADDR_MAX: 385 * 386 * +--------+ 387 * | r | 388 * +--------+ 389 * | +----+ 390 * | | rgn| 391 * +----------------------------+----+ 392 * 393 * Expect to add a memory block of size PHYS_ADDR_MAX - r.base. Expect the 394 * total size of available memory and the counter to be updated. 395 */ 396 static int memblock_add_near_max_check(void) 397 { 398 struct memblock_region *rgn; 399 phys_addr_t total_size; 400 401 rgn = &memblock.memory.regions[0]; 402 403 struct region r = { 404 .base = PHYS_ADDR_MAX - SZ_1M, 405 .size = SZ_2M 406 }; 407 408 PREFIX_PUSH(); 409 410 total_size = PHYS_ADDR_MAX - r.base; 411 412 reset_memblock_regions(); 413 memblock_add(r.base, r.size); 414 415 ASSERT_EQ(rgn->base, r.base); 416 ASSERT_EQ(rgn->size, total_size); 417 418 ASSERT_EQ(memblock.memory.cnt, 1); 419 ASSERT_EQ(memblock.memory.total_size, total_size); 420 421 test_pass_pop(); 422 423 return 0; 424 } 425 426 /* 427 * A test that trying to add the 129th memory block. 428 * Expect to trigger memblock_double_array() to double the 429 * memblock.memory.max, find a new valid memory as 430 * memory.regions. 431 */ 432 static int memblock_add_many_check(void) 433 { 434 int i; 435 void *orig_region; 436 struct region r = { 437 .base = SZ_16K, 438 .size = SZ_16K, 439 }; 440 phys_addr_t new_memory_regions_size; 441 phys_addr_t base, size = SZ_64; 442 phys_addr_t gap_size = SZ_64; 443 444 PREFIX_PUSH(); 445 446 reset_memblock_regions(); 447 memblock_allow_resize(); 448 449 dummy_physical_memory_init(); 450 /* 451 * We allocated enough memory by using dummy_physical_memory_init(), and 452 * split it into small block. First we split a large enough memory block 453 * as the memory region which will be choosed by memblock_double_array(). 454 */ 455 base = PAGE_ALIGN(dummy_physical_memory_base()); 456 new_memory_regions_size = PAGE_ALIGN(INIT_MEMBLOCK_REGIONS * 2 * 457 sizeof(struct memblock_region)); 458 memblock_add(base, new_memory_regions_size); 459 460 /* This is the base of small memory block. */ 461 base += new_memory_regions_size + gap_size; 462 463 orig_region = memblock.memory.regions; 464 465 for (i = 0; i < INIT_MEMBLOCK_REGIONS; i++) { 466 /* 467 * Add these small block to fulfill the memblock. We keep a 468 * gap between the nearby memory to avoid being merged. 469 */ 470 memblock_add(base, size); 471 base += size + gap_size; 472 473 ASSERT_EQ(memblock.memory.cnt, i + 2); 474 ASSERT_EQ(memblock.memory.total_size, new_memory_regions_size + 475 (i + 1) * size); 476 } 477 478 /* 479 * At there, memblock_double_array() has been succeed, check if it 480 * update the memory.max. 481 */ 482 ASSERT_EQ(memblock.memory.max, INIT_MEMBLOCK_REGIONS * 2); 483 484 /* memblock_double_array() will reserve the memory it used. Check it. */ 485 ASSERT_EQ(memblock.reserved.cnt, 1); 486 ASSERT_EQ(memblock.reserved.total_size, new_memory_regions_size); 487 488 /* 489 * Now memblock_double_array() works fine. Let's check after the 490 * double_array(), the memblock_add() still works as normal. 491 */ 492 memblock_add(r.base, r.size); 493 ASSERT_EQ(memblock.memory.regions[0].base, r.base); 494 ASSERT_EQ(memblock.memory.regions[0].size, r.size); 495 496 ASSERT_EQ(memblock.memory.cnt, INIT_MEMBLOCK_REGIONS + 2); 497 ASSERT_EQ(memblock.memory.total_size, INIT_MEMBLOCK_REGIONS * size + 498 new_memory_regions_size + 499 r.size); 500 ASSERT_EQ(memblock.memory.max, INIT_MEMBLOCK_REGIONS * 2); 501 502 dummy_physical_memory_cleanup(); 503 504 /* 505 * The current memory.regions is occupying a range of memory that 506 * allocated from dummy_physical_memory_init(). After free the memory, 507 * we must not use it. So restore the origin memory region to make sure 508 * the tests can run as normal and not affected by the double array. 509 */ 510 memblock.memory.regions = orig_region; 511 memblock.memory.cnt = INIT_MEMBLOCK_REGIONS; 512 513 test_pass_pop(); 514 515 return 0; 516 } 517 518 static int memblock_add_checks(void) 519 { 520 prefix_reset(); 521 prefix_push(FUNC_ADD); 522 test_print("Running %s tests...\n", FUNC_ADD); 523 524 memblock_add_simple_check(); 525 memblock_add_node_simple_check(); 526 memblock_add_disjoint_check(); 527 memblock_add_overlap_top_check(); 528 memblock_add_overlap_bottom_check(); 529 memblock_add_within_check(); 530 memblock_add_twice_check(); 531 memblock_add_between_check(); 532 memblock_add_near_max_check(); 533 memblock_add_many_check(); 534 535 prefix_pop(); 536 537 return 0; 538 } 539 540 /* 541 * A simple test that marks a memory block of a specified base address 542 * and size as reserved and to the collection of reserved memory regions 543 * (memblock.reserved). Expect to create a new entry. The region counter 544 * and total memory size are updated. 545 */ 546 static int memblock_reserve_simple_check(void) 547 { 548 struct memblock_region *rgn; 549 550 rgn = &memblock.reserved.regions[0]; 551 552 struct region r = { 553 .base = SZ_2G, 554 .size = SZ_128M 555 }; 556 557 PREFIX_PUSH(); 558 559 reset_memblock_regions(); 560 memblock_reserve(r.base, r.size); 561 562 ASSERT_EQ(rgn->base, r.base); 563 ASSERT_EQ(rgn->size, r.size); 564 565 test_pass_pop(); 566 567 return 0; 568 } 569 570 /* 571 * A test that tries to mark two memory blocks that don't overlap as reserved: 572 * 573 * | +--+ +----------------+ | 574 * | |r1| | r2 | | 575 * +--------+--+------+----------------+--+ 576 * 577 * Expect to add two entries to the collection of reserved memory regions 578 * (memblock.reserved). The total size and region counter for 579 * memblock.reserved are updated. 580 */ 581 static int memblock_reserve_disjoint_check(void) 582 { 583 struct memblock_region *rgn1, *rgn2; 584 585 rgn1 = &memblock.reserved.regions[0]; 586 rgn2 = &memblock.reserved.regions[1]; 587 588 struct region r1 = { 589 .base = SZ_256M, 590 .size = SZ_16M 591 }; 592 struct region r2 = { 593 .base = SZ_512M, 594 .size = SZ_512M 595 }; 596 597 PREFIX_PUSH(); 598 599 reset_memblock_regions(); 600 memblock_reserve(r1.base, r1.size); 601 memblock_reserve(r2.base, r2.size); 602 603 ASSERT_EQ(rgn1->base, r1.base); 604 ASSERT_EQ(rgn1->size, r1.size); 605 606 ASSERT_EQ(rgn2->base, r2.base); 607 ASSERT_EQ(rgn2->size, r2.size); 608 609 ASSERT_EQ(memblock.reserved.cnt, 2); 610 ASSERT_EQ(memblock.reserved.total_size, r1.size + r2.size); 611 612 test_pass_pop(); 613 614 return 0; 615 } 616 617 /* 618 * A test that tries to mark two memory blocks r1 and r2 as reserved, 619 * where r2 overlaps with the beginning of r1 (that is 620 * r1.base < r2.base + r2.size): 621 * 622 * | +--------------+--+--------------+ | 623 * | | r2 | | r1 | | 624 * +--+--------------+--+--------------+--+ 625 * ^ ^ 626 * | | 627 * | r1.base 628 * | 629 * r2.base 630 * 631 * Expect to merge two entries into one region that starts at r2.base and 632 * has size of two regions minus their intersection. The total size of the 633 * reserved memory is updated, and the region counter is not updated. 634 */ 635 static int memblock_reserve_overlap_top_check(void) 636 { 637 struct memblock_region *rgn; 638 phys_addr_t total_size; 639 640 rgn = &memblock.reserved.regions[0]; 641 642 struct region r1 = { 643 .base = SZ_1G, 644 .size = SZ_1G 645 }; 646 struct region r2 = { 647 .base = SZ_128M, 648 .size = SZ_1G 649 }; 650 651 PREFIX_PUSH(); 652 653 total_size = (r1.base - r2.base) + r1.size; 654 655 reset_memblock_regions(); 656 memblock_reserve(r1.base, r1.size); 657 memblock_reserve(r2.base, r2.size); 658 659 ASSERT_EQ(rgn->base, r2.base); 660 ASSERT_EQ(rgn->size, total_size); 661 662 ASSERT_EQ(memblock.reserved.cnt, 1); 663 ASSERT_EQ(memblock.reserved.total_size, total_size); 664 665 test_pass_pop(); 666 667 return 0; 668 } 669 670 /* 671 * A test that tries to mark two memory blocks r1 and r2 as reserved, 672 * where r2 overlaps with the end of r1 (that is 673 * r2.base < r1.base + r1.size): 674 * 675 * | +--------------+--+--------------+ | 676 * | | r1 | | r2 | | 677 * +--+--------------+--+--------------+--+ 678 * ^ ^ 679 * | | 680 * | r2.base 681 * | 682 * r1.base 683 * 684 * Expect to merge two entries into one region that starts at r1.base and 685 * has size of two regions minus their intersection. The total size of the 686 * reserved memory is updated, and the region counter is not updated. 687 */ 688 static int memblock_reserve_overlap_bottom_check(void) 689 { 690 struct memblock_region *rgn; 691 phys_addr_t total_size; 692 693 rgn = &memblock.reserved.regions[0]; 694 695 struct region r1 = { 696 .base = SZ_2K, 697 .size = SZ_128K 698 }; 699 struct region r2 = { 700 .base = SZ_128K, 701 .size = SZ_128K 702 }; 703 704 PREFIX_PUSH(); 705 706 total_size = (r2.base - r1.base) + r2.size; 707 708 reset_memblock_regions(); 709 memblock_reserve(r1.base, r1.size); 710 memblock_reserve(r2.base, r2.size); 711 712 ASSERT_EQ(rgn->base, r1.base); 713 ASSERT_EQ(rgn->size, total_size); 714 715 ASSERT_EQ(memblock.reserved.cnt, 1); 716 ASSERT_EQ(memblock.reserved.total_size, total_size); 717 718 test_pass_pop(); 719 720 return 0; 721 } 722 723 /* 724 * A test that tries to mark two memory blocks r1 and r2 as reserved, 725 * where r2 is within the range of r1 (that is 726 * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)): 727 * 728 * | +-----+--+---------------------------| 729 * | | |r2| r1 | 730 * +-+-----+--+---------------------------+ 731 * ^ ^ 732 * | | 733 * | r2.base 734 * | 735 * r1.base 736 * 737 * Expect to merge two entries into one region that stays the same. The 738 * counter and total size of available memory are not updated. 739 */ 740 static int memblock_reserve_within_check(void) 741 { 742 struct memblock_region *rgn; 743 744 rgn = &memblock.reserved.regions[0]; 745 746 struct region r1 = { 747 .base = SZ_1M, 748 .size = SZ_8M 749 }; 750 struct region r2 = { 751 .base = SZ_2M, 752 .size = SZ_64K 753 }; 754 755 PREFIX_PUSH(); 756 757 reset_memblock_regions(); 758 memblock_reserve(r1.base, r1.size); 759 memblock_reserve(r2.base, r2.size); 760 761 ASSERT_EQ(rgn->base, r1.base); 762 ASSERT_EQ(rgn->size, r1.size); 763 764 ASSERT_EQ(memblock.reserved.cnt, 1); 765 ASSERT_EQ(memblock.reserved.total_size, r1.size); 766 767 test_pass_pop(); 768 769 return 0; 770 } 771 772 /* 773 * A simple test that tries to reserve the same memory block twice. 774 * Expect the region counter and total size of reserved memory to not 775 * be updated. 776 */ 777 static int memblock_reserve_twice_check(void) 778 { 779 struct region r = { 780 .base = SZ_16K, 781 .size = SZ_2M 782 }; 783 784 PREFIX_PUSH(); 785 786 reset_memblock_regions(); 787 788 memblock_reserve(r.base, r.size); 789 memblock_reserve(r.base, r.size); 790 791 ASSERT_EQ(memblock.reserved.cnt, 1); 792 ASSERT_EQ(memblock.reserved.total_size, r.size); 793 794 test_pass_pop(); 795 796 return 0; 797 } 798 799 /* 800 * A test that tries to mark two memory blocks that don't overlap as reserved 801 * and then reserve a third memory block in the space between the first two: 802 * 803 * | +--------+--------+--------+ | 804 * | | r1 | r3 | r2 | | 805 * +--------+--------+--------+--------+--+ 806 * 807 * Expect to merge the three entries into one reserved region that starts at 808 * r1.base and has size of r1.size + r2.size + r3.size. The region counter and 809 * total for memblock.reserved are updated. 810 */ 811 static int memblock_reserve_between_check(void) 812 { 813 struct memblock_region *rgn; 814 phys_addr_t total_size; 815 816 rgn = &memblock.reserved.regions[0]; 817 818 struct region r1 = { 819 .base = SZ_1G, 820 .size = SZ_8K 821 }; 822 struct region r2 = { 823 .base = SZ_1G + SZ_16K, 824 .size = SZ_8K 825 }; 826 struct region r3 = { 827 .base = SZ_1G + SZ_8K, 828 .size = SZ_8K 829 }; 830 831 PREFIX_PUSH(); 832 833 total_size = r1.size + r2.size + r3.size; 834 835 reset_memblock_regions(); 836 memblock_reserve(r1.base, r1.size); 837 memblock_reserve(r2.base, r2.size); 838 memblock_reserve(r3.base, r3.size); 839 840 ASSERT_EQ(rgn->base, r1.base); 841 ASSERT_EQ(rgn->size, total_size); 842 843 ASSERT_EQ(memblock.reserved.cnt, 1); 844 ASSERT_EQ(memblock.reserved.total_size, total_size); 845 846 test_pass_pop(); 847 848 return 0; 849 } 850 851 /* 852 * A simple test that tries to reserve a memory block r when r extends past 853 * PHYS_ADDR_MAX: 854 * 855 * +--------+ 856 * | r | 857 * +--------+ 858 * | +----+ 859 * | | rgn| 860 * +----------------------------+----+ 861 * 862 * Expect to reserve a memory block of size PHYS_ADDR_MAX - r.base. Expect the 863 * total size of reserved memory and the counter to be updated. 864 */ 865 static int memblock_reserve_near_max_check(void) 866 { 867 struct memblock_region *rgn; 868 phys_addr_t total_size; 869 870 rgn = &memblock.reserved.regions[0]; 871 872 struct region r = { 873 .base = PHYS_ADDR_MAX - SZ_1M, 874 .size = SZ_2M 875 }; 876 877 PREFIX_PUSH(); 878 879 total_size = PHYS_ADDR_MAX - r.base; 880 881 reset_memblock_regions(); 882 memblock_reserve(r.base, r.size); 883 884 ASSERT_EQ(rgn->base, r.base); 885 ASSERT_EQ(rgn->size, total_size); 886 887 ASSERT_EQ(memblock.reserved.cnt, 1); 888 ASSERT_EQ(memblock.reserved.total_size, total_size); 889 890 test_pass_pop(); 891 892 return 0; 893 } 894 895 static int memblock_reserve_checks(void) 896 { 897 prefix_reset(); 898 prefix_push(FUNC_RESERVE); 899 test_print("Running %s tests...\n", FUNC_RESERVE); 900 901 memblock_reserve_simple_check(); 902 memblock_reserve_disjoint_check(); 903 memblock_reserve_overlap_top_check(); 904 memblock_reserve_overlap_bottom_check(); 905 memblock_reserve_within_check(); 906 memblock_reserve_twice_check(); 907 memblock_reserve_between_check(); 908 memblock_reserve_near_max_check(); 909 910 prefix_pop(); 911 912 return 0; 913 } 914 915 /* 916 * A simple test that tries to remove a region r1 from the array of 917 * available memory regions. By "removing" a region we mean overwriting it 918 * with the next region r2 in memblock.memory: 919 * 920 * | ...... +----------------+ | 921 * | : r1 : | r2 | | 922 * +--+----+----------+----------------+--+ 923 * ^ 924 * | 925 * rgn.base 926 * 927 * Expect to add two memory blocks r1 and r2 and then remove r1 so that 928 * r2 is the first available region. The region counter and total size 929 * are updated. 930 */ 931 static int memblock_remove_simple_check(void) 932 { 933 struct memblock_region *rgn; 934 935 rgn = &memblock.memory.regions[0]; 936 937 struct region r1 = { 938 .base = SZ_2K, 939 .size = SZ_4K 940 }; 941 struct region r2 = { 942 .base = SZ_128K, 943 .size = SZ_4M 944 }; 945 946 PREFIX_PUSH(); 947 948 reset_memblock_regions(); 949 memblock_add(r1.base, r1.size); 950 memblock_add(r2.base, r2.size); 951 memblock_remove(r1.base, r1.size); 952 953 ASSERT_EQ(rgn->base, r2.base); 954 ASSERT_EQ(rgn->size, r2.size); 955 956 ASSERT_EQ(memblock.memory.cnt, 1); 957 ASSERT_EQ(memblock.memory.total_size, r2.size); 958 959 test_pass_pop(); 960 961 return 0; 962 } 963 964 /* 965 * A test that tries to remove a region r2 that was not registered as 966 * available memory (i.e. has no corresponding entry in memblock.memory): 967 * 968 * +----------------+ 969 * | r2 | 970 * +----------------+ 971 * | +----+ | 972 * | | r1 | | 973 * +--+----+------------------------------+ 974 * ^ 975 * | 976 * rgn.base 977 * 978 * Expect the array, regions counter and total size to not be modified. 979 */ 980 static int memblock_remove_absent_check(void) 981 { 982 struct memblock_region *rgn; 983 984 rgn = &memblock.memory.regions[0]; 985 986 struct region r1 = { 987 .base = SZ_512K, 988 .size = SZ_4M 989 }; 990 struct region r2 = { 991 .base = SZ_64M, 992 .size = SZ_1G 993 }; 994 995 PREFIX_PUSH(); 996 997 reset_memblock_regions(); 998 memblock_add(r1.base, r1.size); 999 memblock_remove(r2.base, r2.size); 1000 1001 ASSERT_EQ(rgn->base, r1.base); 1002 ASSERT_EQ(rgn->size, r1.size); 1003 1004 ASSERT_EQ(memblock.memory.cnt, 1); 1005 ASSERT_EQ(memblock.memory.total_size, r1.size); 1006 1007 test_pass_pop(); 1008 1009 return 0; 1010 } 1011 1012 /* 1013 * A test that tries to remove a region r2 that overlaps with the 1014 * beginning of the already existing entry r1 1015 * (that is r1.base < r2.base + r2.size): 1016 * 1017 * +-----------------+ 1018 * | r2 | 1019 * +-----------------+ 1020 * | .........+--------+ | 1021 * | : r1 | rgn | | 1022 * +-----------------+--------+--------+--+ 1023 * ^ ^ 1024 * | | 1025 * | rgn.base 1026 * r1.base 1027 * 1028 * Expect that only the intersection of both regions is removed from the 1029 * available memory pool. The regions counter and total size are updated. 1030 */ 1031 static int memblock_remove_overlap_top_check(void) 1032 { 1033 struct memblock_region *rgn; 1034 phys_addr_t r1_end, r2_end, total_size; 1035 1036 rgn = &memblock.memory.regions[0]; 1037 1038 struct region r1 = { 1039 .base = SZ_32M, 1040 .size = SZ_32M 1041 }; 1042 struct region r2 = { 1043 .base = SZ_16M, 1044 .size = SZ_32M 1045 }; 1046 1047 PREFIX_PUSH(); 1048 1049 r1_end = r1.base + r1.size; 1050 r2_end = r2.base + r2.size; 1051 total_size = r1_end - r2_end; 1052 1053 reset_memblock_regions(); 1054 memblock_add(r1.base, r1.size); 1055 memblock_remove(r2.base, r2.size); 1056 1057 ASSERT_EQ(rgn->base, r1.base + r2.base); 1058 ASSERT_EQ(rgn->size, total_size); 1059 1060 ASSERT_EQ(memblock.memory.cnt, 1); 1061 ASSERT_EQ(memblock.memory.total_size, total_size); 1062 1063 test_pass_pop(); 1064 1065 return 0; 1066 } 1067 1068 /* 1069 * A test that tries to remove a region r2 that overlaps with the end of 1070 * the already existing region r1 (that is r2.base < r1.base + r1.size): 1071 * 1072 * +--------------------------------+ 1073 * | r2 | 1074 * +--------------------------------+ 1075 * | +---+..... | 1076 * | |rgn| r1 : | 1077 * +-+---+----+---------------------------+ 1078 * ^ 1079 * | 1080 * r1.base 1081 * 1082 * Expect that only the intersection of both regions is removed from the 1083 * available memory pool. The regions counter and total size are updated. 1084 */ 1085 static int memblock_remove_overlap_bottom_check(void) 1086 { 1087 struct memblock_region *rgn; 1088 phys_addr_t total_size; 1089 1090 rgn = &memblock.memory.regions[0]; 1091 1092 struct region r1 = { 1093 .base = SZ_2M, 1094 .size = SZ_64M 1095 }; 1096 struct region r2 = { 1097 .base = SZ_32M, 1098 .size = SZ_256M 1099 }; 1100 1101 PREFIX_PUSH(); 1102 1103 total_size = r2.base - r1.base; 1104 1105 reset_memblock_regions(); 1106 memblock_add(r1.base, r1.size); 1107 memblock_remove(r2.base, r2.size); 1108 1109 ASSERT_EQ(rgn->base, r1.base); 1110 ASSERT_EQ(rgn->size, total_size); 1111 1112 ASSERT_EQ(memblock.memory.cnt, 1); 1113 ASSERT_EQ(memblock.memory.total_size, total_size); 1114 1115 test_pass_pop(); 1116 1117 return 0; 1118 } 1119 1120 /* 1121 * A test that tries to remove a region r2 that is within the range of 1122 * the already existing entry r1 (that is 1123 * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)): 1124 * 1125 * +----+ 1126 * | r2 | 1127 * +----+ 1128 * | +-------------+....+---------------+ | 1129 * | | rgn1 | r1 | rgn2 | | 1130 * +-+-------------+----+---------------+-+ 1131 * ^ 1132 * | 1133 * r1.base 1134 * 1135 * Expect that the region is split into two - one that ends at r2.base and 1136 * another that starts at r2.base + r2.size, with appropriate sizes. The 1137 * region counter and total size are updated. 1138 */ 1139 static int memblock_remove_within_check(void) 1140 { 1141 struct memblock_region *rgn1, *rgn2; 1142 phys_addr_t r1_size, r2_size, total_size; 1143 1144 rgn1 = &memblock.memory.regions[0]; 1145 rgn2 = &memblock.memory.regions[1]; 1146 1147 struct region r1 = { 1148 .base = SZ_1M, 1149 .size = SZ_32M 1150 }; 1151 struct region r2 = { 1152 .base = SZ_16M, 1153 .size = SZ_1M 1154 }; 1155 1156 PREFIX_PUSH(); 1157 1158 r1_size = r2.base - r1.base; 1159 r2_size = (r1.base + r1.size) - (r2.base + r2.size); 1160 total_size = r1_size + r2_size; 1161 1162 reset_memblock_regions(); 1163 memblock_add(r1.base, r1.size); 1164 memblock_remove(r2.base, r2.size); 1165 1166 ASSERT_EQ(rgn1->base, r1.base); 1167 ASSERT_EQ(rgn1->size, r1_size); 1168 1169 ASSERT_EQ(rgn2->base, r2.base + r2.size); 1170 ASSERT_EQ(rgn2->size, r2_size); 1171 1172 ASSERT_EQ(memblock.memory.cnt, 2); 1173 ASSERT_EQ(memblock.memory.total_size, total_size); 1174 1175 test_pass_pop(); 1176 1177 return 0; 1178 } 1179 1180 /* 1181 * A simple test that tries to remove a region r1 from the array of 1182 * available memory regions when r1 is the only available region. 1183 * Expect to add a memory block r1 and then remove r1 so that a dummy 1184 * region is added. The region counter stays the same, and the total size 1185 * is updated. 1186 */ 1187 static int memblock_remove_only_region_check(void) 1188 { 1189 struct memblock_region *rgn; 1190 1191 rgn = &memblock.memory.regions[0]; 1192 1193 struct region r1 = { 1194 .base = SZ_2K, 1195 .size = SZ_4K 1196 }; 1197 1198 PREFIX_PUSH(); 1199 1200 reset_memblock_regions(); 1201 memblock_add(r1.base, r1.size); 1202 memblock_remove(r1.base, r1.size); 1203 1204 ASSERT_EQ(rgn->base, 0); 1205 ASSERT_EQ(rgn->size, 0); 1206 1207 ASSERT_EQ(memblock.memory.cnt, 1); 1208 ASSERT_EQ(memblock.memory.total_size, 0); 1209 1210 test_pass_pop(); 1211 1212 return 0; 1213 } 1214 1215 /* 1216 * A simple test that tries remove a region r2 from the array of available 1217 * memory regions when r2 extends past PHYS_ADDR_MAX: 1218 * 1219 * +--------+ 1220 * | r2 | 1221 * +--------+ 1222 * | +---+....+ 1223 * | |rgn| | 1224 * +------------------------+---+----+ 1225 * 1226 * Expect that only the portion between PHYS_ADDR_MAX and r2.base is removed. 1227 * Expect the total size of available memory to be updated and the counter to 1228 * not be updated. 1229 */ 1230 static int memblock_remove_near_max_check(void) 1231 { 1232 struct memblock_region *rgn; 1233 phys_addr_t total_size; 1234 1235 rgn = &memblock.memory.regions[0]; 1236 1237 struct region r1 = { 1238 .base = PHYS_ADDR_MAX - SZ_2M, 1239 .size = SZ_2M 1240 }; 1241 1242 struct region r2 = { 1243 .base = PHYS_ADDR_MAX - SZ_1M, 1244 .size = SZ_2M 1245 }; 1246 1247 PREFIX_PUSH(); 1248 1249 total_size = r1.size - (PHYS_ADDR_MAX - r2.base); 1250 1251 reset_memblock_regions(); 1252 memblock_add(r1.base, r1.size); 1253 memblock_remove(r2.base, r2.size); 1254 1255 ASSERT_EQ(rgn->base, r1.base); 1256 ASSERT_EQ(rgn->size, total_size); 1257 1258 ASSERT_EQ(memblock.memory.cnt, 1); 1259 ASSERT_EQ(memblock.memory.total_size, total_size); 1260 1261 test_pass_pop(); 1262 1263 return 0; 1264 } 1265 1266 /* 1267 * A test that tries to remove a region r3 that overlaps with two existing 1268 * regions r1 and r2: 1269 * 1270 * +----------------+ 1271 * | r3 | 1272 * +----------------+ 1273 * | +----+..... ........+--------+ 1274 * | | |r1 : : |r2 | | 1275 * +----+----+----+---+-------+--------+-----+ 1276 * 1277 * Expect that only the intersections of r1 with r3 and r2 with r3 are removed 1278 * from the available memory pool. Expect the total size of available memory to 1279 * be updated and the counter to not be updated. 1280 */ 1281 static int memblock_remove_overlap_two_check(void) 1282 { 1283 struct memblock_region *rgn1, *rgn2; 1284 phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size; 1285 1286 rgn1 = &memblock.memory.regions[0]; 1287 rgn2 = &memblock.memory.regions[1]; 1288 1289 struct region r1 = { 1290 .base = SZ_16M, 1291 .size = SZ_32M 1292 }; 1293 struct region r2 = { 1294 .base = SZ_64M, 1295 .size = SZ_64M 1296 }; 1297 struct region r3 = { 1298 .base = SZ_32M, 1299 .size = SZ_64M 1300 }; 1301 1302 PREFIX_PUSH(); 1303 1304 r2_end = r2.base + r2.size; 1305 r3_end = r3.base + r3.size; 1306 new_r1_size = r3.base - r1.base; 1307 new_r2_size = r2_end - r3_end; 1308 total_size = new_r1_size + new_r2_size; 1309 1310 reset_memblock_regions(); 1311 memblock_add(r1.base, r1.size); 1312 memblock_add(r2.base, r2.size); 1313 memblock_remove(r3.base, r3.size); 1314 1315 ASSERT_EQ(rgn1->base, r1.base); 1316 ASSERT_EQ(rgn1->size, new_r1_size); 1317 1318 ASSERT_EQ(rgn2->base, r3_end); 1319 ASSERT_EQ(rgn2->size, new_r2_size); 1320 1321 ASSERT_EQ(memblock.memory.cnt, 2); 1322 ASSERT_EQ(memblock.memory.total_size, total_size); 1323 1324 test_pass_pop(); 1325 1326 return 0; 1327 } 1328 1329 static int memblock_remove_checks(void) 1330 { 1331 prefix_reset(); 1332 prefix_push(FUNC_REMOVE); 1333 test_print("Running %s tests...\n", FUNC_REMOVE); 1334 1335 memblock_remove_simple_check(); 1336 memblock_remove_absent_check(); 1337 memblock_remove_overlap_top_check(); 1338 memblock_remove_overlap_bottom_check(); 1339 memblock_remove_within_check(); 1340 memblock_remove_only_region_check(); 1341 memblock_remove_near_max_check(); 1342 memblock_remove_overlap_two_check(); 1343 1344 prefix_pop(); 1345 1346 return 0; 1347 } 1348 1349 /* 1350 * A simple test that tries to free a memory block r1 that was marked 1351 * earlier as reserved. By "freeing" a region we mean overwriting it with 1352 * the next entry r2 in memblock.reserved: 1353 * 1354 * | ...... +----+ | 1355 * | : r1 : | r2 | | 1356 * +--------------+----+-----------+----+-+ 1357 * ^ 1358 * | 1359 * rgn.base 1360 * 1361 * Expect to reserve two memory regions and then erase r1 region with the 1362 * value of r2. The region counter and total size are updated. 1363 */ 1364 static int memblock_free_simple_check(void) 1365 { 1366 struct memblock_region *rgn; 1367 1368 rgn = &memblock.reserved.regions[0]; 1369 1370 struct region r1 = { 1371 .base = SZ_4M, 1372 .size = SZ_1M 1373 }; 1374 struct region r2 = { 1375 .base = SZ_8M, 1376 .size = SZ_1M 1377 }; 1378 1379 PREFIX_PUSH(); 1380 1381 reset_memblock_regions(); 1382 memblock_reserve(r1.base, r1.size); 1383 memblock_reserve(r2.base, r2.size); 1384 memblock_free((void *)r1.base, r1.size); 1385 1386 ASSERT_EQ(rgn->base, r2.base); 1387 ASSERT_EQ(rgn->size, r2.size); 1388 1389 ASSERT_EQ(memblock.reserved.cnt, 1); 1390 ASSERT_EQ(memblock.reserved.total_size, r2.size); 1391 1392 test_pass_pop(); 1393 1394 return 0; 1395 } 1396 1397 /* 1398 * A test that tries to free a region r2 that was not marked as reserved 1399 * (i.e. has no corresponding entry in memblock.reserved): 1400 * 1401 * +----------------+ 1402 * | r2 | 1403 * +----------------+ 1404 * | +----+ | 1405 * | | r1 | | 1406 * +--+----+------------------------------+ 1407 * ^ 1408 * | 1409 * rgn.base 1410 * 1411 * The array, regions counter and total size are not modified. 1412 */ 1413 static int memblock_free_absent_check(void) 1414 { 1415 struct memblock_region *rgn; 1416 1417 rgn = &memblock.reserved.regions[0]; 1418 1419 struct region r1 = { 1420 .base = SZ_2M, 1421 .size = SZ_8K 1422 }; 1423 struct region r2 = { 1424 .base = SZ_16M, 1425 .size = SZ_128M 1426 }; 1427 1428 PREFIX_PUSH(); 1429 1430 reset_memblock_regions(); 1431 memblock_reserve(r1.base, r1.size); 1432 memblock_free((void *)r2.base, r2.size); 1433 1434 ASSERT_EQ(rgn->base, r1.base); 1435 ASSERT_EQ(rgn->size, r1.size); 1436 1437 ASSERT_EQ(memblock.reserved.cnt, 1); 1438 ASSERT_EQ(memblock.reserved.total_size, r1.size); 1439 1440 test_pass_pop(); 1441 1442 return 0; 1443 } 1444 1445 /* 1446 * A test that tries to free a region r2 that overlaps with the beginning 1447 * of the already existing entry r1 (that is r1.base < r2.base + r2.size): 1448 * 1449 * +----+ 1450 * | r2 | 1451 * +----+ 1452 * | ...+--------------+ | 1453 * | : | r1 | | 1454 * +----+--+--------------+---------------+ 1455 * ^ ^ 1456 * | | 1457 * | rgn.base 1458 * | 1459 * r1.base 1460 * 1461 * Expect that only the intersection of both regions is freed. The 1462 * regions counter and total size are updated. 1463 */ 1464 static int memblock_free_overlap_top_check(void) 1465 { 1466 struct memblock_region *rgn; 1467 phys_addr_t total_size; 1468 1469 rgn = &memblock.reserved.regions[0]; 1470 1471 struct region r1 = { 1472 .base = SZ_8M, 1473 .size = SZ_32M 1474 }; 1475 struct region r2 = { 1476 .base = SZ_1M, 1477 .size = SZ_8M 1478 }; 1479 1480 PREFIX_PUSH(); 1481 1482 total_size = (r1.size + r1.base) - (r2.base + r2.size); 1483 1484 reset_memblock_regions(); 1485 memblock_reserve(r1.base, r1.size); 1486 memblock_free((void *)r2.base, r2.size); 1487 1488 ASSERT_EQ(rgn->base, r2.base + r2.size); 1489 ASSERT_EQ(rgn->size, total_size); 1490 1491 ASSERT_EQ(memblock.reserved.cnt, 1); 1492 ASSERT_EQ(memblock.reserved.total_size, total_size); 1493 1494 test_pass_pop(); 1495 1496 return 0; 1497 } 1498 1499 /* 1500 * A test that tries to free a region r2 that overlaps with the end of 1501 * the already existing entry r1 (that is r2.base < r1.base + r1.size): 1502 * 1503 * +----------------+ 1504 * | r2 | 1505 * +----------------+ 1506 * | +-----------+..... | 1507 * | | r1 | : | 1508 * +----+-----------+----+----------------+ 1509 * 1510 * Expect that only the intersection of both regions is freed. The 1511 * regions counter and total size are updated. 1512 */ 1513 static int memblock_free_overlap_bottom_check(void) 1514 { 1515 struct memblock_region *rgn; 1516 phys_addr_t total_size; 1517 1518 rgn = &memblock.reserved.regions[0]; 1519 1520 struct region r1 = { 1521 .base = SZ_8M, 1522 .size = SZ_32M 1523 }; 1524 struct region r2 = { 1525 .base = SZ_32M, 1526 .size = SZ_32M 1527 }; 1528 1529 PREFIX_PUSH(); 1530 1531 total_size = r2.base - r1.base; 1532 1533 reset_memblock_regions(); 1534 memblock_reserve(r1.base, r1.size); 1535 memblock_free((void *)r2.base, r2.size); 1536 1537 ASSERT_EQ(rgn->base, r1.base); 1538 ASSERT_EQ(rgn->size, total_size); 1539 1540 ASSERT_EQ(memblock.reserved.cnt, 1); 1541 ASSERT_EQ(memblock.reserved.total_size, total_size); 1542 1543 test_pass_pop(); 1544 1545 return 0; 1546 } 1547 1548 /* 1549 * A test that tries to free a region r2 that is within the range of the 1550 * already existing entry r1 (that is 1551 * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)): 1552 * 1553 * +----+ 1554 * | r2 | 1555 * +----+ 1556 * | +------------+....+---------------+ 1557 * | | rgn1 | r1 | rgn2 | 1558 * +----+------------+----+---------------+ 1559 * ^ 1560 * | 1561 * r1.base 1562 * 1563 * Expect that the region is split into two - one that ends at r2.base and 1564 * another that starts at r2.base + r2.size, with appropriate sizes. The 1565 * region counter and total size fields are updated. 1566 */ 1567 static int memblock_free_within_check(void) 1568 { 1569 struct memblock_region *rgn1, *rgn2; 1570 phys_addr_t r1_size, r2_size, total_size; 1571 1572 rgn1 = &memblock.reserved.regions[0]; 1573 rgn2 = &memblock.reserved.regions[1]; 1574 1575 struct region r1 = { 1576 .base = SZ_1M, 1577 .size = SZ_8M 1578 }; 1579 struct region r2 = { 1580 .base = SZ_4M, 1581 .size = SZ_1M 1582 }; 1583 1584 PREFIX_PUSH(); 1585 1586 r1_size = r2.base - r1.base; 1587 r2_size = (r1.base + r1.size) - (r2.base + r2.size); 1588 total_size = r1_size + r2_size; 1589 1590 reset_memblock_regions(); 1591 memblock_reserve(r1.base, r1.size); 1592 memblock_free((void *)r2.base, r2.size); 1593 1594 ASSERT_EQ(rgn1->base, r1.base); 1595 ASSERT_EQ(rgn1->size, r1_size); 1596 1597 ASSERT_EQ(rgn2->base, r2.base + r2.size); 1598 ASSERT_EQ(rgn2->size, r2_size); 1599 1600 ASSERT_EQ(memblock.reserved.cnt, 2); 1601 ASSERT_EQ(memblock.reserved.total_size, total_size); 1602 1603 test_pass_pop(); 1604 1605 return 0; 1606 } 1607 1608 /* 1609 * A simple test that tries to free a memory block r1 that was marked 1610 * earlier as reserved when r1 is the only available region. 1611 * Expect to reserve a memory block r1 and then free r1 so that r1 is 1612 * overwritten with a dummy region. The region counter stays the same, 1613 * and the total size is updated. 1614 */ 1615 static int memblock_free_only_region_check(void) 1616 { 1617 struct memblock_region *rgn; 1618 1619 rgn = &memblock.reserved.regions[0]; 1620 1621 struct region r1 = { 1622 .base = SZ_2K, 1623 .size = SZ_4K 1624 }; 1625 1626 PREFIX_PUSH(); 1627 1628 reset_memblock_regions(); 1629 memblock_reserve(r1.base, r1.size); 1630 memblock_free((void *)r1.base, r1.size); 1631 1632 ASSERT_EQ(rgn->base, 0); 1633 ASSERT_EQ(rgn->size, 0); 1634 1635 ASSERT_EQ(memblock.reserved.cnt, 1); 1636 ASSERT_EQ(memblock.reserved.total_size, 0); 1637 1638 test_pass_pop(); 1639 1640 return 0; 1641 } 1642 1643 /* 1644 * A simple test that tries free a region r2 when r2 extends past PHYS_ADDR_MAX: 1645 * 1646 * +--------+ 1647 * | r2 | 1648 * +--------+ 1649 * | +---+....+ 1650 * | |rgn| | 1651 * +------------------------+---+----+ 1652 * 1653 * Expect that only the portion between PHYS_ADDR_MAX and r2.base is freed. 1654 * Expect the total size of reserved memory to be updated and the counter to 1655 * not be updated. 1656 */ 1657 static int memblock_free_near_max_check(void) 1658 { 1659 struct memblock_region *rgn; 1660 phys_addr_t total_size; 1661 1662 rgn = &memblock.reserved.regions[0]; 1663 1664 struct region r1 = { 1665 .base = PHYS_ADDR_MAX - SZ_2M, 1666 .size = SZ_2M 1667 }; 1668 1669 struct region r2 = { 1670 .base = PHYS_ADDR_MAX - SZ_1M, 1671 .size = SZ_2M 1672 }; 1673 1674 PREFIX_PUSH(); 1675 1676 total_size = r1.size - (PHYS_ADDR_MAX - r2.base); 1677 1678 reset_memblock_regions(); 1679 memblock_reserve(r1.base, r1.size); 1680 memblock_free((void *)r2.base, r2.size); 1681 1682 ASSERT_EQ(rgn->base, r1.base); 1683 ASSERT_EQ(rgn->size, total_size); 1684 1685 ASSERT_EQ(memblock.reserved.cnt, 1); 1686 ASSERT_EQ(memblock.reserved.total_size, total_size); 1687 1688 test_pass_pop(); 1689 1690 return 0; 1691 } 1692 1693 /* 1694 * A test that tries to free a reserved region r3 that overlaps with two 1695 * existing reserved regions r1 and r2: 1696 * 1697 * +----------------+ 1698 * | r3 | 1699 * +----------------+ 1700 * | +----+..... ........+--------+ 1701 * | | |r1 : : |r2 | | 1702 * +----+----+----+---+-------+--------+-----+ 1703 * 1704 * Expect that only the intersections of r1 with r3 and r2 with r3 are freed 1705 * from the collection of reserved memory. Expect the total size of reserved 1706 * memory to be updated and the counter to not be updated. 1707 */ 1708 static int memblock_free_overlap_two_check(void) 1709 { 1710 struct memblock_region *rgn1, *rgn2; 1711 phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size; 1712 1713 rgn1 = &memblock.reserved.regions[0]; 1714 rgn2 = &memblock.reserved.regions[1]; 1715 1716 struct region r1 = { 1717 .base = SZ_16M, 1718 .size = SZ_32M 1719 }; 1720 struct region r2 = { 1721 .base = SZ_64M, 1722 .size = SZ_64M 1723 }; 1724 struct region r3 = { 1725 .base = SZ_32M, 1726 .size = SZ_64M 1727 }; 1728 1729 PREFIX_PUSH(); 1730 1731 r2_end = r2.base + r2.size; 1732 r3_end = r3.base + r3.size; 1733 new_r1_size = r3.base - r1.base; 1734 new_r2_size = r2_end - r3_end; 1735 total_size = new_r1_size + new_r2_size; 1736 1737 reset_memblock_regions(); 1738 memblock_reserve(r1.base, r1.size); 1739 memblock_reserve(r2.base, r2.size); 1740 memblock_free((void *)r3.base, r3.size); 1741 1742 ASSERT_EQ(rgn1->base, r1.base); 1743 ASSERT_EQ(rgn1->size, new_r1_size); 1744 1745 ASSERT_EQ(rgn2->base, r3_end); 1746 ASSERT_EQ(rgn2->size, new_r2_size); 1747 1748 ASSERT_EQ(memblock.reserved.cnt, 2); 1749 ASSERT_EQ(memblock.reserved.total_size, total_size); 1750 1751 test_pass_pop(); 1752 1753 return 0; 1754 } 1755 1756 static int memblock_free_checks(void) 1757 { 1758 prefix_reset(); 1759 prefix_push(FUNC_FREE); 1760 test_print("Running %s tests...\n", FUNC_FREE); 1761 1762 memblock_free_simple_check(); 1763 memblock_free_absent_check(); 1764 memblock_free_overlap_top_check(); 1765 memblock_free_overlap_bottom_check(); 1766 memblock_free_within_check(); 1767 memblock_free_only_region_check(); 1768 memblock_free_near_max_check(); 1769 memblock_free_overlap_two_check(); 1770 1771 prefix_pop(); 1772 1773 return 0; 1774 } 1775 1776 static int memblock_set_bottom_up_check(void) 1777 { 1778 prefix_push("memblock_set_bottom_up"); 1779 1780 memblock_set_bottom_up(false); 1781 ASSERT_EQ(memblock.bottom_up, false); 1782 memblock_set_bottom_up(true); 1783 ASSERT_EQ(memblock.bottom_up, true); 1784 1785 reset_memblock_attributes(); 1786 test_pass_pop(); 1787 1788 return 0; 1789 } 1790 1791 static int memblock_bottom_up_check(void) 1792 { 1793 prefix_push("memblock_bottom_up"); 1794 1795 memblock_set_bottom_up(false); 1796 ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up); 1797 ASSERT_EQ(memblock_bottom_up(), false); 1798 memblock_set_bottom_up(true); 1799 ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up); 1800 ASSERT_EQ(memblock_bottom_up(), true); 1801 1802 reset_memblock_attributes(); 1803 test_pass_pop(); 1804 1805 return 0; 1806 } 1807 1808 static int memblock_bottom_up_checks(void) 1809 { 1810 test_print("Running memblock_*bottom_up tests...\n"); 1811 1812 prefix_reset(); 1813 memblock_set_bottom_up_check(); 1814 prefix_reset(); 1815 memblock_bottom_up_check(); 1816 1817 return 0; 1818 } 1819 1820 /* 1821 * A test that tries to trim memory when both ends of the memory region are 1822 * aligned. Expect that the memory will not be trimmed. Expect the counter to 1823 * not be updated. 1824 */ 1825 static int memblock_trim_memory_aligned_check(void) 1826 { 1827 struct memblock_region *rgn; 1828 const phys_addr_t alignment = SMP_CACHE_BYTES; 1829 1830 rgn = &memblock.memory.regions[0]; 1831 1832 struct region r = { 1833 .base = alignment, 1834 .size = alignment * 4 1835 }; 1836 1837 PREFIX_PUSH(); 1838 1839 reset_memblock_regions(); 1840 memblock_add(r.base, r.size); 1841 memblock_trim_memory(alignment); 1842 1843 ASSERT_EQ(rgn->base, r.base); 1844 ASSERT_EQ(rgn->size, r.size); 1845 1846 ASSERT_EQ(memblock.memory.cnt, 1); 1847 1848 test_pass_pop(); 1849 1850 return 0; 1851 } 1852 1853 /* 1854 * A test that tries to trim memory when there are two available regions, r1 and 1855 * r2. Region r1 is aligned on both ends and region r2 is unaligned on one end 1856 * and smaller than the alignment: 1857 * 1858 * alignment 1859 * |--------| 1860 * | +-----------------+ +------+ | 1861 * | | r1 | | r2 | | 1862 * +--------+-----------------+--------+------+---+ 1863 * ^ ^ ^ ^ ^ 1864 * |________|________|________| | 1865 * | Unaligned address 1866 * Aligned addresses 1867 * 1868 * Expect that r1 will not be trimmed and r2 will be removed. Expect the 1869 * counter to be updated. 1870 */ 1871 static int memblock_trim_memory_too_small_check(void) 1872 { 1873 struct memblock_region *rgn; 1874 const phys_addr_t alignment = SMP_CACHE_BYTES; 1875 1876 rgn = &memblock.memory.regions[0]; 1877 1878 struct region r1 = { 1879 .base = alignment, 1880 .size = alignment * 2 1881 }; 1882 struct region r2 = { 1883 .base = alignment * 4, 1884 .size = alignment - SZ_2 1885 }; 1886 1887 PREFIX_PUSH(); 1888 1889 reset_memblock_regions(); 1890 memblock_add(r1.base, r1.size); 1891 memblock_add(r2.base, r2.size); 1892 memblock_trim_memory(alignment); 1893 1894 ASSERT_EQ(rgn->base, r1.base); 1895 ASSERT_EQ(rgn->size, r1.size); 1896 1897 ASSERT_EQ(memblock.memory.cnt, 1); 1898 1899 test_pass_pop(); 1900 1901 return 0; 1902 } 1903 1904 /* 1905 * A test that tries to trim memory when there are two available regions, r1 and 1906 * r2. Region r1 is aligned on both ends and region r2 is unaligned at the base 1907 * and aligned at the end: 1908 * 1909 * Unaligned address 1910 * | 1911 * v 1912 * | +-----------------+ +---------------+ | 1913 * | | r1 | | r2 | | 1914 * +--------+-----------------+----------+---------------+---+ 1915 * ^ ^ ^ ^ ^ ^ 1916 * |________|________|________|________|________| 1917 * | 1918 * Aligned addresses 1919 * 1920 * Expect that r1 will not be trimmed and r2 will be trimmed at the base. 1921 * Expect the counter to not be updated. 1922 */ 1923 static int memblock_trim_memory_unaligned_base_check(void) 1924 { 1925 struct memblock_region *rgn1, *rgn2; 1926 const phys_addr_t alignment = SMP_CACHE_BYTES; 1927 phys_addr_t offset = SZ_2; 1928 phys_addr_t new_r2_base, new_r2_size; 1929 1930 rgn1 = &memblock.memory.regions[0]; 1931 rgn2 = &memblock.memory.regions[1]; 1932 1933 struct region r1 = { 1934 .base = alignment, 1935 .size = alignment * 2 1936 }; 1937 struct region r2 = { 1938 .base = alignment * 4 + offset, 1939 .size = alignment * 2 - offset 1940 }; 1941 1942 PREFIX_PUSH(); 1943 1944 new_r2_base = r2.base + (alignment - offset); 1945 new_r2_size = r2.size - (alignment - offset); 1946 1947 reset_memblock_regions(); 1948 memblock_add(r1.base, r1.size); 1949 memblock_add(r2.base, r2.size); 1950 memblock_trim_memory(alignment); 1951 1952 ASSERT_EQ(rgn1->base, r1.base); 1953 ASSERT_EQ(rgn1->size, r1.size); 1954 1955 ASSERT_EQ(rgn2->base, new_r2_base); 1956 ASSERT_EQ(rgn2->size, new_r2_size); 1957 1958 ASSERT_EQ(memblock.memory.cnt, 2); 1959 1960 test_pass_pop(); 1961 1962 return 0; 1963 } 1964 1965 /* 1966 * A test that tries to trim memory when there are two available regions, r1 and 1967 * r2. Region r1 is aligned on both ends and region r2 is aligned at the base 1968 * and unaligned at the end: 1969 * 1970 * Unaligned address 1971 * | 1972 * v 1973 * | +-----------------+ +---------------+ | 1974 * | | r1 | | r2 | | 1975 * +--------+-----------------+--------+---------------+---+ 1976 * ^ ^ ^ ^ ^ ^ 1977 * |________|________|________|________|________| 1978 * | 1979 * Aligned addresses 1980 * 1981 * Expect that r1 will not be trimmed and r2 will be trimmed at the end. 1982 * Expect the counter to not be updated. 1983 */ 1984 static int memblock_trim_memory_unaligned_end_check(void) 1985 { 1986 struct memblock_region *rgn1, *rgn2; 1987 const phys_addr_t alignment = SMP_CACHE_BYTES; 1988 phys_addr_t offset = SZ_2; 1989 phys_addr_t new_r2_size; 1990 1991 rgn1 = &memblock.memory.regions[0]; 1992 rgn2 = &memblock.memory.regions[1]; 1993 1994 struct region r1 = { 1995 .base = alignment, 1996 .size = alignment * 2 1997 }; 1998 struct region r2 = { 1999 .base = alignment * 4, 2000 .size = alignment * 2 - offset 2001 }; 2002 2003 PREFIX_PUSH(); 2004 2005 new_r2_size = r2.size - (alignment - offset); 2006 2007 reset_memblock_regions(); 2008 memblock_add(r1.base, r1.size); 2009 memblock_add(r2.base, r2.size); 2010 memblock_trim_memory(alignment); 2011 2012 ASSERT_EQ(rgn1->base, r1.base); 2013 ASSERT_EQ(rgn1->size, r1.size); 2014 2015 ASSERT_EQ(rgn2->base, r2.base); 2016 ASSERT_EQ(rgn2->size, new_r2_size); 2017 2018 ASSERT_EQ(memblock.memory.cnt, 2); 2019 2020 test_pass_pop(); 2021 2022 return 0; 2023 } 2024 2025 static int memblock_trim_memory_checks(void) 2026 { 2027 prefix_reset(); 2028 prefix_push(FUNC_TRIM); 2029 test_print("Running %s tests...\n", FUNC_TRIM); 2030 2031 memblock_trim_memory_aligned_check(); 2032 memblock_trim_memory_too_small_check(); 2033 memblock_trim_memory_unaligned_base_check(); 2034 memblock_trim_memory_unaligned_end_check(); 2035 2036 prefix_pop(); 2037 2038 return 0; 2039 } 2040 2041 int memblock_basic_checks(void) 2042 { 2043 memblock_initialization_check(); 2044 memblock_add_checks(); 2045 memblock_reserve_checks(); 2046 memblock_remove_checks(); 2047 memblock_free_checks(); 2048 memblock_bottom_up_checks(); 2049 memblock_trim_memory_checks(); 2050 2051 return 0; 2052 } 2053