1 // SPDX-License-Identifier: GPL-2.0-or-later 2 #include "alloc_api.h" 3 4 /* 5 * A simple test that tries to allocate a small memory region. 6 * Expect to allocate an aligned region near the end of the available memory. 7 */ 8 static int alloc_top_down_simple_check(void) 9 { 10 struct memblock_region *rgn = &memblock.reserved.regions[0]; 11 void *allocated_ptr = NULL; 12 13 PREFIX_PUSH(); 14 15 phys_addr_t size = SZ_2; 16 phys_addr_t expected_start; 17 18 setup_memblock(); 19 20 expected_start = memblock_end_of_DRAM() - SMP_CACHE_BYTES; 21 22 allocated_ptr = memblock_alloc(size, SMP_CACHE_BYTES); 23 24 ASSERT_NE(allocated_ptr, NULL); 25 ASSERT_MEM_EQ(allocated_ptr, 0, size); 26 27 ASSERT_EQ(rgn->size, size); 28 ASSERT_EQ(rgn->base, expected_start); 29 30 ASSERT_EQ(memblock.reserved.cnt, 1); 31 ASSERT_EQ(memblock.reserved.total_size, size); 32 33 test_pass_pop(); 34 35 return 0; 36 } 37 38 /* 39 * A test that tries to allocate memory next to a reserved region that starts at 40 * the misaligned address. Expect to create two separate entries, with the new 41 * entry aligned to the provided alignment: 42 * 43 * + 44 * | +--------+ +--------| 45 * | | rgn2 | | rgn1 | 46 * +------------+--------+---------+--------+ 47 * ^ 48 * | 49 * Aligned address boundary 50 * 51 * The allocation direction is top-down and region arrays are sorted from lower 52 * to higher addresses, so the new region will be the first entry in 53 * memory.reserved array. The previously reserved region does not get modified. 54 * Region counter and total size get updated. 55 */ 56 static int alloc_top_down_disjoint_check(void) 57 { 58 /* After allocation, this will point to the "old" region */ 59 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; 60 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; 61 struct region r1; 62 void *allocated_ptr = NULL; 63 64 PREFIX_PUSH(); 65 66 phys_addr_t r2_size = SZ_16; 67 /* Use custom alignment */ 68 phys_addr_t alignment = SMP_CACHE_BYTES * 2; 69 phys_addr_t total_size; 70 phys_addr_t expected_start; 71 72 setup_memblock(); 73 74 r1.base = memblock_end_of_DRAM() - SZ_2; 75 r1.size = SZ_2; 76 77 total_size = r1.size + r2_size; 78 expected_start = memblock_end_of_DRAM() - alignment; 79 80 memblock_reserve(r1.base, r1.size); 81 82 allocated_ptr = memblock_alloc(r2_size, alignment); 83 84 ASSERT_NE(allocated_ptr, NULL); 85 ASSERT_MEM_EQ(allocated_ptr, 0, r2_size); 86 87 ASSERT_EQ(rgn1->size, r1.size); 88 ASSERT_EQ(rgn1->base, r1.base); 89 90 ASSERT_EQ(rgn2->size, r2_size); 91 ASSERT_EQ(rgn2->base, expected_start); 92 93 ASSERT_EQ(memblock.reserved.cnt, 2); 94 ASSERT_EQ(memblock.reserved.total_size, total_size); 95 96 test_pass_pop(); 97 98 return 0; 99 } 100 101 /* 102 * A test that tries to allocate memory when there is enough space at the end 103 * of the previously reserved block (i.e. first fit): 104 * 105 * | +--------+--------------| 106 * | | r1 | r2 | 107 * +--------------+--------+--------------+ 108 * 109 * Expect a merge of both regions. Only the region size gets updated. 110 */ 111 static int alloc_top_down_before_check(void) 112 { 113 struct memblock_region *rgn = &memblock.reserved.regions[0]; 114 void *allocated_ptr = NULL; 115 116 PREFIX_PUSH(); 117 118 /* 119 * The first region ends at the aligned address to test region merging 120 */ 121 phys_addr_t r1_size = SMP_CACHE_BYTES; 122 phys_addr_t r2_size = SZ_512; 123 phys_addr_t total_size = r1_size + r2_size; 124 125 setup_memblock(); 126 127 memblock_reserve(memblock_end_of_DRAM() - total_size, r1_size); 128 129 allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES); 130 131 ASSERT_NE(allocated_ptr, NULL); 132 ASSERT_MEM_EQ(allocated_ptr, 0, r2_size); 133 134 ASSERT_EQ(rgn->size, total_size); 135 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - total_size); 136 137 ASSERT_EQ(memblock.reserved.cnt, 1); 138 ASSERT_EQ(memblock.reserved.total_size, total_size); 139 140 test_pass_pop(); 141 142 return 0; 143 } 144 145 /* 146 * A test that tries to allocate memory when there is not enough space at the 147 * end of the previously reserved block (i.e. second fit): 148 * 149 * | +-----------+------+ | 150 * | | r2 | r1 | | 151 * +------------+-----------+------+-----+ 152 * 153 * Expect a merge of both regions. Both the base address and size of the region 154 * get updated. 155 */ 156 static int alloc_top_down_after_check(void) 157 { 158 struct memblock_region *rgn = &memblock.reserved.regions[0]; 159 struct region r1; 160 void *allocated_ptr = NULL; 161 162 PREFIX_PUSH(); 163 164 phys_addr_t r2_size = SZ_512; 165 phys_addr_t total_size; 166 167 setup_memblock(); 168 169 /* 170 * The first region starts at the aligned address to test region merging 171 */ 172 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES; 173 r1.size = SZ_8; 174 175 total_size = r1.size + r2_size; 176 177 memblock_reserve(r1.base, r1.size); 178 179 allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES); 180 181 ASSERT_NE(allocated_ptr, NULL); 182 ASSERT_MEM_EQ(allocated_ptr, 0, r2_size); 183 184 ASSERT_EQ(rgn->size, total_size); 185 ASSERT_EQ(rgn->base, r1.base - r2_size); 186 187 ASSERT_EQ(memblock.reserved.cnt, 1); 188 ASSERT_EQ(memblock.reserved.total_size, total_size); 189 190 test_pass_pop(); 191 192 return 0; 193 } 194 195 /* 196 * A test that tries to allocate memory when there are two reserved regions with 197 * a gap too small to fit the new region: 198 * 199 * | +--------+----------+ +------| 200 * | | r3 | r2 | | r1 | 201 * +-------+--------+----------+---+------+ 202 * 203 * Expect to allocate a region before the one that starts at the lower address, 204 * and merge them into one. The region counter and total size fields get 205 * updated. 206 */ 207 static int alloc_top_down_second_fit_check(void) 208 { 209 struct memblock_region *rgn = &memblock.reserved.regions[0]; 210 struct region r1, r2; 211 void *allocated_ptr = NULL; 212 213 PREFIX_PUSH(); 214 215 phys_addr_t r3_size = SZ_1K; 216 phys_addr_t total_size; 217 218 setup_memblock(); 219 220 r1.base = memblock_end_of_DRAM() - SZ_512; 221 r1.size = SZ_512; 222 223 r2.base = r1.base - SZ_512; 224 r2.size = SZ_256; 225 226 total_size = r1.size + r2.size + r3_size; 227 228 memblock_reserve(r1.base, r1.size); 229 memblock_reserve(r2.base, r2.size); 230 231 allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES); 232 233 ASSERT_NE(allocated_ptr, NULL); 234 ASSERT_MEM_EQ(allocated_ptr, 0, r3_size); 235 236 ASSERT_EQ(rgn->size, r2.size + r3_size); 237 ASSERT_EQ(rgn->base, r2.base - r3_size); 238 239 ASSERT_EQ(memblock.reserved.cnt, 2); 240 ASSERT_EQ(memblock.reserved.total_size, total_size); 241 242 test_pass_pop(); 243 244 return 0; 245 } 246 247 /* 248 * A test that tries to allocate memory when there are two reserved regions with 249 * a gap big enough to accommodate the new region: 250 * 251 * | +--------+--------+--------+ | 252 * | | r2 | r3 | r1 | | 253 * +-----+--------+--------+--------+-----+ 254 * 255 * Expect to merge all of them, creating one big entry in memblock.reserved 256 * array. The region counter and total size fields get updated. 257 */ 258 static int alloc_in_between_generic_check(void) 259 { 260 struct memblock_region *rgn = &memblock.reserved.regions[0]; 261 struct region r1, r2; 262 void *allocated_ptr = NULL; 263 264 PREFIX_PUSH(); 265 266 phys_addr_t gap_size = SMP_CACHE_BYTES; 267 phys_addr_t r3_size = SZ_64; 268 /* 269 * Calculate regions size so there's just enough space for the new entry 270 */ 271 phys_addr_t rgn_size = (MEM_SIZE - (2 * gap_size + r3_size)) / 2; 272 phys_addr_t total_size; 273 274 setup_memblock(); 275 276 r1.size = rgn_size; 277 r1.base = memblock_end_of_DRAM() - (gap_size + rgn_size); 278 279 r2.size = rgn_size; 280 r2.base = memblock_start_of_DRAM() + gap_size; 281 282 total_size = r1.size + r2.size + r3_size; 283 284 memblock_reserve(r1.base, r1.size); 285 memblock_reserve(r2.base, r2.size); 286 287 allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES); 288 289 ASSERT_NE(allocated_ptr, NULL); 290 ASSERT_MEM_EQ(allocated_ptr, 0, r3_size); 291 292 ASSERT_EQ(rgn->size, total_size); 293 ASSERT_EQ(rgn->base, r1.base - r2.size - r3_size); 294 295 ASSERT_EQ(memblock.reserved.cnt, 1); 296 ASSERT_EQ(memblock.reserved.total_size, total_size); 297 298 test_pass_pop(); 299 300 return 0; 301 } 302 303 /* 304 * A test that tries to allocate memory when the memory is filled with reserved 305 * regions with memory gaps too small to fit the new region: 306 * 307 * +-------+ 308 * | new | 309 * +--+----+ 310 * | +-----+ +-----+ +-----+ | 311 * | | res | | res | | res | | 312 * +----+-----+----+-----+----+-----+----+ 313 * 314 * Expect no allocation to happen. 315 */ 316 static int alloc_small_gaps_generic_check(void) 317 { 318 void *allocated_ptr = NULL; 319 320 PREFIX_PUSH(); 321 322 phys_addr_t region_size = SZ_1K; 323 phys_addr_t gap_size = SZ_256; 324 phys_addr_t region_end; 325 326 setup_memblock(); 327 328 region_end = memblock_start_of_DRAM(); 329 330 while (region_end < memblock_end_of_DRAM()) { 331 memblock_reserve(region_end + gap_size, region_size); 332 region_end += gap_size + region_size; 333 } 334 335 allocated_ptr = memblock_alloc(region_size, SMP_CACHE_BYTES); 336 337 ASSERT_EQ(allocated_ptr, NULL); 338 339 test_pass_pop(); 340 341 return 0; 342 } 343 344 /* 345 * A test that tries to allocate memory when all memory is reserved. 346 * Expect no allocation to happen. 347 */ 348 static int alloc_all_reserved_generic_check(void) 349 { 350 void *allocated_ptr = NULL; 351 352 PREFIX_PUSH(); 353 354 setup_memblock(); 355 356 /* Simulate full memory */ 357 memblock_reserve(memblock_start_of_DRAM(), MEM_SIZE); 358 359 allocated_ptr = memblock_alloc(SZ_256, SMP_CACHE_BYTES); 360 361 ASSERT_EQ(allocated_ptr, NULL); 362 363 test_pass_pop(); 364 365 return 0; 366 } 367 368 /* 369 * A test that tries to allocate memory when the memory is almost full, 370 * with not enough space left for the new region: 371 * 372 * +-------+ 373 * | new | 374 * +-------+ 375 * |-----------------------------+ | 376 * | reserved | | 377 * +-----------------------------+---+ 378 * 379 * Expect no allocation to happen. 380 */ 381 static int alloc_no_space_generic_check(void) 382 { 383 void *allocated_ptr = NULL; 384 385 PREFIX_PUSH(); 386 387 setup_memblock(); 388 389 phys_addr_t available_size = SZ_256; 390 phys_addr_t reserved_size = MEM_SIZE - available_size; 391 392 /* Simulate almost-full memory */ 393 memblock_reserve(memblock_start_of_DRAM(), reserved_size); 394 395 allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES); 396 397 ASSERT_EQ(allocated_ptr, NULL); 398 399 test_pass_pop(); 400 401 return 0; 402 } 403 404 /* 405 * A test that tries to allocate memory when the memory is almost full, 406 * but there is just enough space left: 407 * 408 * |---------------------------+---------| 409 * | reserved | new | 410 * +---------------------------+---------+ 411 * 412 * Expect to allocate memory and merge all the regions. The total size field 413 * gets updated. 414 */ 415 static int alloc_limited_space_generic_check(void) 416 { 417 struct memblock_region *rgn = &memblock.reserved.regions[0]; 418 void *allocated_ptr = NULL; 419 420 PREFIX_PUSH(); 421 422 phys_addr_t available_size = SZ_256; 423 phys_addr_t reserved_size = MEM_SIZE - available_size; 424 425 setup_memblock(); 426 427 /* Simulate almost-full memory */ 428 memblock_reserve(memblock_start_of_DRAM(), reserved_size); 429 430 allocated_ptr = memblock_alloc(available_size, SMP_CACHE_BYTES); 431 432 ASSERT_NE(allocated_ptr, NULL); 433 ASSERT_MEM_EQ(allocated_ptr, 0, available_size); 434 435 ASSERT_EQ(rgn->size, MEM_SIZE); 436 ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); 437 438 ASSERT_EQ(memblock.reserved.cnt, 1); 439 ASSERT_EQ(memblock.reserved.total_size, MEM_SIZE); 440 441 test_pass_pop(); 442 443 return 0; 444 } 445 446 /* 447 * A test that tries to allocate memory when there is no available memory 448 * registered (i.e. memblock.memory has only a dummy entry). 449 * Expect no allocation to happen. 450 */ 451 static int alloc_no_memory_generic_check(void) 452 { 453 struct memblock_region *rgn = &memblock.reserved.regions[0]; 454 void *allocated_ptr = NULL; 455 456 PREFIX_PUSH(); 457 458 reset_memblock_regions(); 459 460 allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES); 461 462 ASSERT_EQ(allocated_ptr, NULL); 463 ASSERT_EQ(rgn->size, 0); 464 ASSERT_EQ(rgn->base, 0); 465 ASSERT_EQ(memblock.reserved.total_size, 0); 466 467 test_pass_pop(); 468 469 return 0; 470 } 471 472 /* 473 * A simple test that tries to allocate a small memory region. 474 * Expect to allocate an aligned region at the beginning of the available 475 * memory. 476 */ 477 static int alloc_bottom_up_simple_check(void) 478 { 479 struct memblock_region *rgn = &memblock.reserved.regions[0]; 480 void *allocated_ptr = NULL; 481 482 PREFIX_PUSH(); 483 484 setup_memblock(); 485 486 allocated_ptr = memblock_alloc(SZ_2, SMP_CACHE_BYTES); 487 488 ASSERT_NE(allocated_ptr, NULL); 489 ASSERT_MEM_EQ(allocated_ptr, 0, SZ_2); 490 491 ASSERT_EQ(rgn->size, SZ_2); 492 ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); 493 494 ASSERT_EQ(memblock.reserved.cnt, 1); 495 ASSERT_EQ(memblock.reserved.total_size, SZ_2); 496 497 test_pass_pop(); 498 499 return 0; 500 } 501 502 /* 503 * A test that tries to allocate memory next to a reserved region that starts at 504 * the misaligned address. Expect to create two separate entries, with the new 505 * entry aligned to the provided alignment: 506 * 507 * + 508 * | +----------+ +----------+ | 509 * | | rgn1 | | rgn2 | | 510 * +----+----------+---+----------+-----+ 511 * ^ 512 * | 513 * Aligned address boundary 514 * 515 * The allocation direction is bottom-up, so the new region will be the second 516 * entry in memory.reserved array. The previously reserved region does not get 517 * modified. Region counter and total size get updated. 518 */ 519 static int alloc_bottom_up_disjoint_check(void) 520 { 521 struct memblock_region *rgn1 = &memblock.reserved.regions[0]; 522 struct memblock_region *rgn2 = &memblock.reserved.regions[1]; 523 struct region r1; 524 void *allocated_ptr = NULL; 525 526 PREFIX_PUSH(); 527 528 phys_addr_t r2_size = SZ_16; 529 /* Use custom alignment */ 530 phys_addr_t alignment = SMP_CACHE_BYTES * 2; 531 phys_addr_t total_size; 532 phys_addr_t expected_start; 533 534 setup_memblock(); 535 536 r1.base = memblock_start_of_DRAM() + SZ_2; 537 r1.size = SZ_2; 538 539 total_size = r1.size + r2_size; 540 expected_start = memblock_start_of_DRAM() + alignment; 541 542 memblock_reserve(r1.base, r1.size); 543 544 allocated_ptr = memblock_alloc(r2_size, alignment); 545 546 ASSERT_NE(allocated_ptr, NULL); 547 ASSERT_MEM_EQ(allocated_ptr, 0, r2_size); 548 549 ASSERT_EQ(rgn1->size, r1.size); 550 ASSERT_EQ(rgn1->base, r1.base); 551 552 ASSERT_EQ(rgn2->size, r2_size); 553 ASSERT_EQ(rgn2->base, expected_start); 554 555 ASSERT_EQ(memblock.reserved.cnt, 2); 556 ASSERT_EQ(memblock.reserved.total_size, total_size); 557 558 test_pass_pop(); 559 560 return 0; 561 } 562 563 /* 564 * A test that tries to allocate memory when there is enough space at 565 * the beginning of the previously reserved block (i.e. first fit): 566 * 567 * |------------------+--------+ | 568 * | r1 | r2 | | 569 * +------------------+--------+---------+ 570 * 571 * Expect a merge of both regions. Only the region size gets updated. 572 */ 573 static int alloc_bottom_up_before_check(void) 574 { 575 struct memblock_region *rgn = &memblock.reserved.regions[0]; 576 void *allocated_ptr = NULL; 577 578 PREFIX_PUSH(); 579 580 phys_addr_t r1_size = SZ_512; 581 phys_addr_t r2_size = SZ_128; 582 phys_addr_t total_size = r1_size + r2_size; 583 584 setup_memblock(); 585 586 memblock_reserve(memblock_start_of_DRAM() + r1_size, r2_size); 587 588 allocated_ptr = memblock_alloc(r1_size, SMP_CACHE_BYTES); 589 590 ASSERT_NE(allocated_ptr, NULL); 591 ASSERT_MEM_EQ(allocated_ptr, 0, r1_size); 592 593 ASSERT_EQ(rgn->size, total_size); 594 ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); 595 596 ASSERT_EQ(memblock.reserved.cnt, 1); 597 ASSERT_EQ(memblock.reserved.total_size, total_size); 598 599 test_pass_pop(); 600 601 return 0; 602 } 603 604 /* 605 * A test that tries to allocate memory when there is not enough space at 606 * the beginning of the previously reserved block (i.e. second fit): 607 * 608 * | +--------+--------------+ | 609 * | | r1 | r2 | | 610 * +----+--------+--------------+---------+ 611 * 612 * Expect a merge of both regions. Only the region size gets updated. 613 */ 614 static int alloc_bottom_up_after_check(void) 615 { 616 struct memblock_region *rgn = &memblock.reserved.regions[0]; 617 struct region r1; 618 void *allocated_ptr = NULL; 619 620 PREFIX_PUSH(); 621 622 phys_addr_t r2_size = SZ_512; 623 phys_addr_t total_size; 624 625 setup_memblock(); 626 627 /* 628 * The first region starts at the aligned address to test region merging 629 */ 630 r1.base = memblock_start_of_DRAM() + SMP_CACHE_BYTES; 631 r1.size = SZ_64; 632 633 total_size = r1.size + r2_size; 634 635 memblock_reserve(r1.base, r1.size); 636 637 allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES); 638 639 ASSERT_NE(allocated_ptr, NULL); 640 ASSERT_MEM_EQ(allocated_ptr, 0, r2_size); 641 642 ASSERT_EQ(rgn->size, total_size); 643 ASSERT_EQ(rgn->base, r1.base); 644 645 ASSERT_EQ(memblock.reserved.cnt, 1); 646 ASSERT_EQ(memblock.reserved.total_size, total_size); 647 648 test_pass_pop(); 649 650 return 0; 651 } 652 653 /* 654 * A test that tries to allocate memory when there are two reserved regions, the 655 * first one starting at the beginning of the available memory, with a gap too 656 * small to fit the new region: 657 * 658 * |------------+ +--------+--------+ | 659 * | r1 | | r2 | r3 | | 660 * +------------+-----+--------+--------+--+ 661 * 662 * Expect to allocate after the second region, which starts at the higher 663 * address, and merge them into one. The region counter and total size fields 664 * get updated. 665 */ 666 static int alloc_bottom_up_second_fit_check(void) 667 { 668 struct memblock_region *rgn = &memblock.reserved.regions[1]; 669 struct region r1, r2; 670 void *allocated_ptr = NULL; 671 672 PREFIX_PUSH(); 673 674 phys_addr_t r3_size = SZ_1K; 675 phys_addr_t total_size; 676 677 setup_memblock(); 678 679 r1.base = memblock_start_of_DRAM(); 680 r1.size = SZ_512; 681 682 r2.base = r1.base + r1.size + SZ_512; 683 r2.size = SZ_256; 684 685 total_size = r1.size + r2.size + r3_size; 686 687 memblock_reserve(r1.base, r1.size); 688 memblock_reserve(r2.base, r2.size); 689 690 allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES); 691 692 ASSERT_NE(allocated_ptr, NULL); 693 ASSERT_MEM_EQ(allocated_ptr, 0, r3_size); 694 695 ASSERT_EQ(rgn->size, r2.size + r3_size); 696 ASSERT_EQ(rgn->base, r2.base); 697 698 ASSERT_EQ(memblock.reserved.cnt, 2); 699 ASSERT_EQ(memblock.reserved.total_size, total_size); 700 701 test_pass_pop(); 702 703 return 0; 704 } 705 706 /* Test case wrappers */ 707 static int alloc_simple_check(void) 708 { 709 test_print("\tRunning %s...\n", __func__); 710 memblock_set_bottom_up(false); 711 alloc_top_down_simple_check(); 712 memblock_set_bottom_up(true); 713 alloc_bottom_up_simple_check(); 714 715 return 0; 716 } 717 718 static int alloc_disjoint_check(void) 719 { 720 test_print("\tRunning %s...\n", __func__); 721 memblock_set_bottom_up(false); 722 alloc_top_down_disjoint_check(); 723 memblock_set_bottom_up(true); 724 alloc_bottom_up_disjoint_check(); 725 726 return 0; 727 } 728 729 static int alloc_before_check(void) 730 { 731 test_print("\tRunning %s...\n", __func__); 732 memblock_set_bottom_up(false); 733 alloc_top_down_before_check(); 734 memblock_set_bottom_up(true); 735 alloc_bottom_up_before_check(); 736 737 return 0; 738 } 739 740 static int alloc_after_check(void) 741 { 742 test_print("\tRunning %s...\n", __func__); 743 memblock_set_bottom_up(false); 744 alloc_top_down_after_check(); 745 memblock_set_bottom_up(true); 746 alloc_bottom_up_after_check(); 747 748 return 0; 749 } 750 751 static int alloc_in_between_check(void) 752 { 753 test_print("\tRunning %s...\n", __func__); 754 run_top_down(alloc_in_between_generic_check); 755 run_bottom_up(alloc_in_between_generic_check); 756 757 return 0; 758 } 759 760 static int alloc_second_fit_check(void) 761 { 762 test_print("\tRunning %s...\n", __func__); 763 memblock_set_bottom_up(false); 764 alloc_top_down_second_fit_check(); 765 memblock_set_bottom_up(true); 766 alloc_bottom_up_second_fit_check(); 767 768 return 0; 769 } 770 771 static int alloc_small_gaps_check(void) 772 { 773 test_print("\tRunning %s...\n", __func__); 774 run_top_down(alloc_small_gaps_generic_check); 775 run_bottom_up(alloc_small_gaps_generic_check); 776 777 return 0; 778 } 779 780 static int alloc_all_reserved_check(void) 781 { 782 test_print("\tRunning %s...\n", __func__); 783 run_top_down(alloc_all_reserved_generic_check); 784 run_bottom_up(alloc_all_reserved_generic_check); 785 786 return 0; 787 } 788 789 static int alloc_no_space_check(void) 790 { 791 test_print("\tRunning %s...\n", __func__); 792 run_top_down(alloc_no_space_generic_check); 793 run_bottom_up(alloc_no_space_generic_check); 794 795 return 0; 796 } 797 798 static int alloc_limited_space_check(void) 799 { 800 test_print("\tRunning %s...\n", __func__); 801 run_top_down(alloc_limited_space_generic_check); 802 run_bottom_up(alloc_limited_space_generic_check); 803 804 return 0; 805 } 806 807 static int alloc_no_memory_check(void) 808 { 809 test_print("\tRunning %s...\n", __func__); 810 run_top_down(alloc_no_memory_generic_check); 811 run_bottom_up(alloc_no_memory_generic_check); 812 813 return 0; 814 } 815 816 int memblock_alloc_checks(void) 817 { 818 const char *func_testing = "memblock_alloc"; 819 820 prefix_reset(); 821 prefix_push(func_testing); 822 test_print("Running %s tests...\n", func_testing); 823 824 reset_memblock_attributes(); 825 dummy_physical_memory_init(); 826 827 alloc_simple_check(); 828 alloc_disjoint_check(); 829 alloc_before_check(); 830 alloc_after_check(); 831 alloc_second_fit_check(); 832 alloc_small_gaps_check(); 833 alloc_in_between_check(); 834 alloc_all_reserved_check(); 835 alloc_no_space_check(); 836 alloc_limited_space_check(); 837 alloc_no_memory_check(); 838 839 dummy_physical_memory_cleanup(); 840 841 prefix_pop(); 842 843 return 0; 844 } 845