1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdint.h> 7 #include <string.h> 8 #include <inttypes.h> 9 #include <sys/queue.h> 10 11 #include <rte_random.h> 12 #include <rte_cycles.h> 13 #include <rte_memory.h> 14 #include <rte_memzone.h> 15 #include <rte_eal.h> 16 #include <rte_lcore.h> 17 #include <rte_common.h> 18 #include <rte_string_fns.h> 19 #include <rte_errno.h> 20 #include <rte_malloc.h> 21 22 #include "malloc_elem.h" 23 24 #include "test.h" 25 26 /* 27 * Memzone 28 * ======= 29 * 30 * - Search for three reserved zones or reserve them if they do not exist: 31 * 32 * - One is on any socket id. 33 * - The second is on socket 0. 34 * - The last one is on socket 1 (if socket 1 exists). 35 * 36 * - Check that the zones exist. 37 * 38 * - Check that the zones are cache-aligned. 39 * 40 * - Check that zones do not overlap. 41 * 42 * - Check that the zones are on the correct socket id. 43 * 44 * - Check that a lookup of the first zone returns the same pointer. 45 * 46 * - Check that it is not possible to create another zone with the 47 * same name as an existing zone. 48 * 49 * - Check flags for specific huge page size reservation 50 */ 51 52 #define TEST_MEMZONE_NAME(suffix) "MZ_TEST_" suffix 53 54 /* Test if memory overlaps: return 1 if true, or 0 if false. */ 55 static int 56 is_memory_overlap(rte_iova_t ptr1, size_t len1, rte_iova_t ptr2, size_t len2) 57 { 58 if (ptr2 >= ptr1 && (ptr2 - ptr1) < len1) 59 return 1; 60 else if (ptr2 < ptr1 && (ptr1 - ptr2) < len2) 61 return 1; 62 return 0; 63 } 64 65 static int 66 test_memzone_invalid_alignment(void) 67 { 68 const struct rte_memzone * mz; 69 70 mz = rte_memzone_lookup(TEST_MEMZONE_NAME("invalid_alignment")); 71 if (mz != NULL) { 72 printf("Zone with invalid alignment has been reserved\n"); 73 return -1; 74 } 75 76 mz = rte_memzone_reserve_aligned(TEST_MEMZONE_NAME("invalid_alignment"), 77 100, SOCKET_ID_ANY, 0, 100); 78 if (mz != NULL) { 79 printf("Zone with invalid alignment has been reserved\n"); 80 return -1; 81 } 82 return 0; 83 } 84 85 static int 86 test_memzone_invalid_flags(void) 87 { 88 const struct rte_memzone *mz; 89 90 mz = rte_memzone_lookup(TEST_MEMZONE_NAME("invalid_flags")); 91 if (mz != NULL) { 92 printf("Zone with invalid flags has been reserved\n"); 93 return -1; 94 } 95 96 mz = rte_memzone_reserve(TEST_MEMZONE_NAME("invalid_flags"), 97 100, SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG << 1); 98 if (mz != NULL) { 99 printf("Zone with invalid flags has been reserved\n"); 100 return -1; 101 } 102 return 0; 103 } 104 105 static int 106 test_memzone_reserving_zone_size_bigger_than_the_maximum(void) 107 { 108 const struct rte_memzone * mz; 109 110 mz = rte_memzone_lookup( 111 TEST_MEMZONE_NAME("zone_size_bigger_than_the_maximum")); 112 if (mz != NULL) { 113 printf("zone_size_bigger_than_the_maximum has been reserved\n"); 114 return -1; 115 } 116 117 mz = rte_memzone_reserve( 118 TEST_MEMZONE_NAME("zone_size_bigger_than_the_maximum"), 119 (size_t)-1, SOCKET_ID_ANY, 0); 120 if (mz != NULL) { 121 printf("It is impossible to reserve such big a memzone\n"); 122 return -1; 123 } 124 125 return 0; 126 } 127 128 struct walk_arg { 129 int hugepage_2MB_avail; 130 int hugepage_1GB_avail; 131 int hugepage_16MB_avail; 132 int hugepage_16GB_avail; 133 }; 134 static int 135 find_available_pagesz(const struct rte_memseg_list *msl, void *arg) 136 { 137 struct walk_arg *wa = arg; 138 139 if (msl->external) 140 return 0; 141 142 if (msl->page_sz == RTE_PGSIZE_2M) 143 wa->hugepage_2MB_avail = 1; 144 if (msl->page_sz == RTE_PGSIZE_1G) 145 wa->hugepage_1GB_avail = 1; 146 if (msl->page_sz == RTE_PGSIZE_16M) 147 wa->hugepage_16MB_avail = 1; 148 if (msl->page_sz == RTE_PGSIZE_16G) 149 wa->hugepage_16GB_avail = 1; 150 151 return 0; 152 } 153 154 static int 155 test_memzone_reserve_flags(void) 156 { 157 const struct rte_memzone *mz; 158 struct walk_arg wa; 159 int hugepage_2MB_avail, hugepage_1GB_avail; 160 int hugepage_16MB_avail, hugepage_16GB_avail; 161 const size_t size = 100; 162 163 memset(&wa, 0, sizeof(wa)); 164 165 rte_memseg_list_walk(find_available_pagesz, &wa); 166 167 hugepage_2MB_avail = wa.hugepage_2MB_avail; 168 hugepage_1GB_avail = wa.hugepage_1GB_avail; 169 hugepage_16MB_avail = wa.hugepage_16MB_avail; 170 hugepage_16GB_avail = wa.hugepage_16GB_avail; 171 172 /* Display the availability of 2MB ,1GB, 16MB, 16GB pages */ 173 if (hugepage_2MB_avail) 174 printf("2MB Huge pages available\n"); 175 if (hugepage_1GB_avail) 176 printf("1GB Huge pages available\n"); 177 if (hugepage_16MB_avail) 178 printf("16MB Huge pages available\n"); 179 if (hugepage_16GB_avail) 180 printf("16GB Huge pages available\n"); 181 /* 182 * If 2MB pages available, check that a small memzone is correctly 183 * reserved from 2MB huge pages when requested by the RTE_MEMZONE_2MB flag. 184 * Also check that RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an 185 * available page size (i.e 1GB ) when 2MB pages are unavailable. 186 */ 187 if (hugepage_2MB_avail) { 188 mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_2M"), 189 size, SOCKET_ID_ANY, RTE_MEMZONE_2MB); 190 if (mz == NULL) { 191 printf("MEMZONE FLAG 2MB\n"); 192 return -1; 193 } 194 if (mz->hugepage_sz != RTE_PGSIZE_2M) { 195 printf("hugepage_sz not equal 2M\n"); 196 return -1; 197 } 198 if (rte_memzone_free(mz)) { 199 printf("Fail memzone free\n"); 200 return -1; 201 } 202 203 mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_2M_HINT"), 204 size, SOCKET_ID_ANY, 205 RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY); 206 if (mz == NULL) { 207 printf("MEMZONE FLAG 2MB\n"); 208 return -1; 209 } 210 if (mz->hugepage_sz != RTE_PGSIZE_2M) { 211 printf("hugepage_sz not equal 2M\n"); 212 return -1; 213 } 214 if (rte_memzone_free(mz)) { 215 printf("Fail memzone free\n"); 216 return -1; 217 } 218 219 /* Check if 1GB huge pages are unavailable, that function fails unless 220 * HINT flag is indicated 221 */ 222 if (!hugepage_1GB_avail) { 223 mz = rte_memzone_reserve( 224 TEST_MEMZONE_NAME("flag_zone_1G_HINT"), 225 size, SOCKET_ID_ANY, 226 RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY); 227 if (mz == NULL) { 228 printf("MEMZONE FLAG 1GB & HINT\n"); 229 return -1; 230 } 231 if (mz->hugepage_sz != RTE_PGSIZE_2M) { 232 printf("hugepage_sz not equal 2M\n"); 233 return -1; 234 } 235 if (rte_memzone_free(mz)) { 236 printf("Fail memzone free\n"); 237 return -1; 238 } 239 240 mz = rte_memzone_reserve( 241 TEST_MEMZONE_NAME("flag_zone_1G"), size, 242 SOCKET_ID_ANY, RTE_MEMZONE_1GB); 243 if (mz != NULL) { 244 printf("MEMZONE FLAG 1GB\n"); 245 return -1; 246 } 247 } 248 } 249 250 /*As with 2MB tests above for 1GB huge page requests*/ 251 if (hugepage_1GB_avail) { 252 mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_1G"), 253 size, SOCKET_ID_ANY, RTE_MEMZONE_1GB); 254 if (mz == NULL) { 255 printf("MEMZONE FLAG 1GB\n"); 256 return -1; 257 } 258 if (mz->hugepage_sz != RTE_PGSIZE_1G) { 259 printf("hugepage_sz not equal 1G\n"); 260 return -1; 261 } 262 if (rte_memzone_free(mz)) { 263 printf("Fail memzone free\n"); 264 return -1; 265 } 266 267 mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_1G_HINT"), 268 size, SOCKET_ID_ANY, 269 RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY); 270 if (mz == NULL) { 271 printf("MEMZONE FLAG 1GB\n"); 272 return -1; 273 } 274 if (mz->hugepage_sz != RTE_PGSIZE_1G) { 275 printf("hugepage_sz not equal 1G\n"); 276 return -1; 277 } 278 if (rte_memzone_free(mz)) { 279 printf("Fail memzone free\n"); 280 return -1; 281 } 282 283 /* Check if 1GB huge pages are unavailable, that function fails unless 284 * HINT flag is indicated 285 */ 286 if (!hugepage_2MB_avail) { 287 mz = rte_memzone_reserve( 288 TEST_MEMZONE_NAME("flag_zone_2M_HINT"), 289 size, SOCKET_ID_ANY, 290 RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY); 291 if (mz == NULL){ 292 printf("MEMZONE FLAG 2MB & HINT\n"); 293 return -1; 294 } 295 if (mz->hugepage_sz != RTE_PGSIZE_1G) { 296 printf("hugepage_sz not equal 1G\n"); 297 return -1; 298 } 299 if (rte_memzone_free(mz)) { 300 printf("Fail memzone free\n"); 301 return -1; 302 } 303 mz = rte_memzone_reserve( 304 TEST_MEMZONE_NAME("flag_zone_2M"), size, 305 SOCKET_ID_ANY, RTE_MEMZONE_2MB); 306 if (mz != NULL) { 307 printf("MEMZONE FLAG 2MB\n"); 308 return -1; 309 } 310 } 311 312 if (hugepage_2MB_avail && hugepage_1GB_avail) { 313 mz = rte_memzone_reserve( 314 TEST_MEMZONE_NAME("flag_zone_2M_HINT"), 315 size, SOCKET_ID_ANY, 316 RTE_MEMZONE_2MB|RTE_MEMZONE_1GB); 317 if (mz == NULL) { 318 printf("BOTH SIZES SET\n"); 319 return -1; 320 } 321 if (mz->hugepage_sz != RTE_PGSIZE_1G && 322 mz->hugepage_sz != RTE_PGSIZE_2M) { 323 printf("Wrong size when both sizes set\n"); 324 return -1; 325 } 326 if (rte_memzone_free(mz)) { 327 printf("Fail memzone free\n"); 328 return -1; 329 } 330 } 331 } 332 /* 333 * This option is for IBM Power. If 16MB pages available, check 334 * that a small memzone is correctly reserved from 16MB huge pages 335 * when requested by the RTE_MEMZONE_16MB flag. Also check that 336 * RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an available 337 * page size (i.e 16GB ) when 16MB pages are unavailable. 338 */ 339 if (hugepage_16MB_avail) { 340 mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_16M"), 341 size, SOCKET_ID_ANY, RTE_MEMZONE_16MB); 342 if (mz == NULL) { 343 printf("MEMZONE FLAG 16MB\n"); 344 return -1; 345 } 346 if (mz->hugepage_sz != RTE_PGSIZE_16M) { 347 printf("hugepage_sz not equal 16M\n"); 348 return -1; 349 } 350 if (rte_memzone_free(mz)) { 351 printf("Fail memzone free\n"); 352 return -1; 353 } 354 355 mz = rte_memzone_reserve( 356 TEST_MEMZONE_NAME("flag_zone_16M_HINT"), size, 357 SOCKET_ID_ANY, 358 RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY); 359 if (mz == NULL) { 360 printf("MEMZONE FLAG 16MB\n"); 361 return -1; 362 } 363 if (mz->hugepage_sz != RTE_PGSIZE_16M) { 364 printf("hugepage_sz not equal 16M\n"); 365 return -1; 366 } 367 if (rte_memzone_free(mz)) { 368 printf("Fail memzone free\n"); 369 return -1; 370 } 371 372 /* Check if 1GB huge pages are unavailable, that function fails 373 * unless HINT flag is indicated 374 */ 375 if (!hugepage_16GB_avail) { 376 mz = rte_memzone_reserve( 377 TEST_MEMZONE_NAME("flag_zone_16G_HINT"), 378 size, SOCKET_ID_ANY, 379 RTE_MEMZONE_16GB | 380 RTE_MEMZONE_SIZE_HINT_ONLY); 381 if (mz == NULL) { 382 printf("MEMZONE FLAG 16GB & HINT\n"); 383 return -1; 384 } 385 if (mz->hugepage_sz != RTE_PGSIZE_16M) { 386 printf("hugepage_sz not equal 16M\n"); 387 return -1; 388 } 389 if (rte_memzone_free(mz)) { 390 printf("Fail memzone free\n"); 391 return -1; 392 } 393 394 mz = rte_memzone_reserve( 395 TEST_MEMZONE_NAME("flag_zone_16G"), 396 size, 397 SOCKET_ID_ANY, RTE_MEMZONE_16GB); 398 if (mz != NULL) { 399 printf("MEMZONE FLAG 16GB\n"); 400 return -1; 401 } 402 } 403 } 404 /*As with 16MB tests above for 16GB huge page requests*/ 405 if (hugepage_16GB_avail) { 406 mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_16G"), 407 size, SOCKET_ID_ANY, RTE_MEMZONE_16GB); 408 if (mz == NULL) { 409 printf("MEMZONE FLAG 16GB\n"); 410 return -1; 411 } 412 if (mz->hugepage_sz != RTE_PGSIZE_16G) { 413 printf("hugepage_sz not equal 16G\n"); 414 return -1; 415 } 416 if (rte_memzone_free(mz)) { 417 printf("Fail memzone free\n"); 418 return -1; 419 } 420 421 mz = rte_memzone_reserve( 422 TEST_MEMZONE_NAME("flag_zone_16G_HINT"), size, 423 SOCKET_ID_ANY, 424 RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY); 425 if (mz == NULL) { 426 printf("MEMZONE FLAG 16GB\n"); 427 return -1; 428 } 429 if (mz->hugepage_sz != RTE_PGSIZE_16G) { 430 printf("hugepage_sz not equal 16G\n"); 431 return -1; 432 } 433 if (rte_memzone_free(mz)) { 434 printf("Fail memzone free\n"); 435 return -1; 436 } 437 438 /* Check if 1GB huge pages are unavailable, that function fails 439 * unless HINT flag is indicated 440 */ 441 if (!hugepage_16MB_avail) { 442 mz = rte_memzone_reserve( 443 TEST_MEMZONE_NAME("flag_zone_16M_HINT"), 444 size, SOCKET_ID_ANY, 445 RTE_MEMZONE_16MB | 446 RTE_MEMZONE_SIZE_HINT_ONLY); 447 if (mz == NULL) { 448 printf("MEMZONE FLAG 16MB & HINT\n"); 449 return -1; 450 } 451 if (mz->hugepage_sz != RTE_PGSIZE_16G) { 452 printf("hugepage_sz not equal 16G\n"); 453 return -1; 454 } 455 if (rte_memzone_free(mz)) { 456 printf("Fail memzone free\n"); 457 return -1; 458 } 459 mz = rte_memzone_reserve( 460 TEST_MEMZONE_NAME("flag_zone_16M"), 461 size, SOCKET_ID_ANY, RTE_MEMZONE_16MB); 462 if (mz != NULL) { 463 printf("MEMZONE FLAG 16MB\n"); 464 return -1; 465 } 466 } 467 468 if (hugepage_16MB_avail && hugepage_16GB_avail) { 469 mz = rte_memzone_reserve( 470 TEST_MEMZONE_NAME("flag_zone_16M_HINT"), 471 size, SOCKET_ID_ANY, 472 RTE_MEMZONE_16MB|RTE_MEMZONE_16GB); 473 if (mz == NULL) { 474 printf("BOTH SIZES SET\n"); 475 return -1; 476 } 477 if (mz->hugepage_sz != RTE_PGSIZE_16G && 478 mz->hugepage_sz != RTE_PGSIZE_16M) { 479 printf("Wrong size when both sizes set\n"); 480 return -1; 481 } 482 if (rte_memzone_free(mz)) { 483 printf("Fail memzone free\n"); 484 return -1; 485 } 486 } 487 } 488 return 0; 489 } 490 491 492 /* Find the heap with the greatest free block size */ 493 static size_t 494 find_max_block_free_size(unsigned int align, unsigned int socket_id) 495 { 496 struct rte_malloc_socket_stats stats; 497 size_t len, overhead; 498 499 if (rte_malloc_get_socket_stats(socket_id, &stats) < 0) 500 return 0; 501 502 len = stats.greatest_free_size; 503 overhead = MALLOC_ELEM_OVERHEAD; 504 505 if (len == 0) 506 return 0; 507 508 align = RTE_CACHE_LINE_ROUNDUP(align); 509 overhead += align; 510 511 if (len < overhead) 512 return 0; 513 514 return len - overhead; 515 } 516 517 static int 518 test_memzone_reserve_max(void) 519 { 520 unsigned int i; 521 522 for (i = 0; i < rte_socket_count(); i++) { 523 const struct rte_memzone *mz; 524 size_t maxlen; 525 int socket; 526 527 socket = rte_socket_id_by_idx(i); 528 maxlen = find_max_block_free_size(0, socket); 529 530 if (maxlen == 0) { 531 printf("There is no space left!\n"); 532 return 0; 533 } 534 535 mz = rte_memzone_reserve(TEST_MEMZONE_NAME("max_zone"), 0, 536 socket, 0); 537 if (mz == NULL) { 538 printf("Failed to reserve a big chunk of memory - %s\n", 539 rte_strerror(rte_errno)); 540 rte_dump_physmem_layout(stdout); 541 rte_memzone_dump(stdout); 542 return -1; 543 } 544 545 if (mz->len != maxlen) { 546 printf("Memzone reserve with 0 size did not return bigest block\n"); 547 printf("Expected size = %zu, actual size = %zu\n", 548 maxlen, mz->len); 549 rte_dump_physmem_layout(stdout); 550 rte_memzone_dump(stdout); 551 return -1; 552 } 553 554 if (rte_memzone_free(mz)) { 555 printf("Fail memzone free\n"); 556 return -1; 557 } 558 } 559 560 return 0; 561 } 562 563 static int 564 test_memzone_reserve_max_aligned(void) 565 { 566 unsigned int i; 567 568 for (i = 0; i < rte_socket_count(); i++) { 569 const struct rte_memzone *mz; 570 size_t maxlen, minlen = 0; 571 int socket; 572 573 socket = rte_socket_id_by_idx(i); 574 575 /* random alignment */ 576 rte_srand((unsigned int)rte_rdtsc()); 577 const unsigned int align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */ 578 579 /* memzone size may be between size and size - align */ 580 minlen = find_max_block_free_size(align, socket); 581 maxlen = find_max_block_free_size(0, socket); 582 583 if (minlen == 0 || maxlen == 0) { 584 printf("There is no space left for biggest %u-aligned memzone!\n", 585 align); 586 return 0; 587 } 588 589 mz = rte_memzone_reserve_aligned( 590 TEST_MEMZONE_NAME("max_zone_aligned"), 591 0, socket, 0, align); 592 if (mz == NULL) { 593 printf("Failed to reserve a big chunk of memory - %s\n", 594 rte_strerror(rte_errno)); 595 rte_dump_physmem_layout(stdout); 596 rte_memzone_dump(stdout); 597 return -1; 598 } 599 if (mz->addr != RTE_PTR_ALIGN(mz->addr, align)) { 600 printf("Memzone reserve with 0 size and alignment %u did not return aligned block\n", 601 align); 602 rte_dump_physmem_layout(stdout); 603 rte_memzone_dump(stdout); 604 return -1; 605 } 606 607 if (mz->len < minlen || mz->len > maxlen) { 608 printf("Memzone reserve with 0 size and alignment %u did not return" 609 " bigest block\n", align); 610 printf("Expected size = %zu-%zu, actual size = %zu\n", 611 minlen, maxlen, mz->len); 612 rte_dump_physmem_layout(stdout); 613 rte_memzone_dump(stdout); 614 return -1; 615 } 616 617 if (rte_memzone_free(mz)) { 618 printf("Fail memzone free\n"); 619 return -1; 620 } 621 } 622 return 0; 623 } 624 625 static int 626 test_memzone_aligned(void) 627 { 628 const struct rte_memzone *memzone_aligned_32; 629 const struct rte_memzone *memzone_aligned_128; 630 const struct rte_memzone *memzone_aligned_256; 631 const struct rte_memzone *memzone_aligned_512; 632 const struct rte_memzone *memzone_aligned_1024; 633 634 /* memzone that should automatically be adjusted to align on 64 bytes */ 635 memzone_aligned_32 = rte_memzone_reserve_aligned( 636 TEST_MEMZONE_NAME("aligned_32"), 100, SOCKET_ID_ANY, 0, 637 32); 638 639 /* memzone that is supposed to be aligned on a 128 byte boundary */ 640 memzone_aligned_128 = rte_memzone_reserve_aligned( 641 TEST_MEMZONE_NAME("aligned_128"), 100, SOCKET_ID_ANY, 0, 642 128); 643 644 /* memzone that is supposed to be aligned on a 256 byte boundary */ 645 memzone_aligned_256 = rte_memzone_reserve_aligned( 646 TEST_MEMZONE_NAME("aligned_256"), 100, SOCKET_ID_ANY, 0, 647 256); 648 649 /* memzone that is supposed to be aligned on a 512 byte boundary */ 650 memzone_aligned_512 = rte_memzone_reserve_aligned( 651 TEST_MEMZONE_NAME("aligned_512"), 100, SOCKET_ID_ANY, 0, 652 512); 653 654 /* memzone that is supposed to be aligned on a 1024 byte boundary */ 655 memzone_aligned_1024 = rte_memzone_reserve_aligned( 656 TEST_MEMZONE_NAME("aligned_1024"), 100, SOCKET_ID_ANY, 657 0, 1024); 658 659 printf("check alignments and lengths\n"); 660 if (memzone_aligned_32 == NULL) { 661 printf("Unable to reserve 64-byte aligned memzone!\n"); 662 return -1; 663 } 664 if ((memzone_aligned_32->iova & RTE_CACHE_LINE_MASK) != 0) 665 return -1; 666 if (((uintptr_t) memzone_aligned_32->addr & RTE_CACHE_LINE_MASK) != 0) 667 return -1; 668 if ((memzone_aligned_32->len & RTE_CACHE_LINE_MASK) != 0) 669 return -1; 670 671 if (memzone_aligned_128 == NULL) { 672 printf("Unable to reserve 128-byte aligned memzone!\n"); 673 return -1; 674 } 675 if ((memzone_aligned_128->iova & 127) != 0) 676 return -1; 677 if (((uintptr_t) memzone_aligned_128->addr & 127) != 0) 678 return -1; 679 if ((memzone_aligned_128->len & RTE_CACHE_LINE_MASK) != 0) 680 return -1; 681 682 if (memzone_aligned_256 == NULL) { 683 printf("Unable to reserve 256-byte aligned memzone!\n"); 684 return -1; 685 } 686 if ((memzone_aligned_256->iova & 255) != 0) 687 return -1; 688 if (((uintptr_t) memzone_aligned_256->addr & 255) != 0) 689 return -1; 690 if ((memzone_aligned_256->len & RTE_CACHE_LINE_MASK) != 0) 691 return -1; 692 693 if (memzone_aligned_512 == NULL) { 694 printf("Unable to reserve 512-byte aligned memzone!\n"); 695 return -1; 696 } 697 if ((memzone_aligned_512->iova & 511) != 0) 698 return -1; 699 if (((uintptr_t) memzone_aligned_512->addr & 511) != 0) 700 return -1; 701 if ((memzone_aligned_512->len & RTE_CACHE_LINE_MASK) != 0) 702 return -1; 703 704 if (memzone_aligned_1024 == NULL) { 705 printf("Unable to reserve 1024-byte aligned memzone!\n"); 706 return -1; 707 } 708 if ((memzone_aligned_1024->iova & 1023) != 0) 709 return -1; 710 if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0) 711 return -1; 712 if ((memzone_aligned_1024->len & RTE_CACHE_LINE_MASK) != 0) 713 return -1; 714 715 /* check that zones don't overlap */ 716 printf("check overlapping\n"); 717 if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len, 718 memzone_aligned_128->iova, memzone_aligned_128->len)) 719 return -1; 720 if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len, 721 memzone_aligned_256->iova, memzone_aligned_256->len)) 722 return -1; 723 if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len, 724 memzone_aligned_512->iova, memzone_aligned_512->len)) 725 return -1; 726 if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len, 727 memzone_aligned_1024->iova, memzone_aligned_1024->len)) 728 return -1; 729 if (is_memory_overlap(memzone_aligned_128->iova, memzone_aligned_128->len, 730 memzone_aligned_256->iova, memzone_aligned_256->len)) 731 return -1; 732 if (is_memory_overlap(memzone_aligned_128->iova, memzone_aligned_128->len, 733 memzone_aligned_512->iova, memzone_aligned_512->len)) 734 return -1; 735 if (is_memory_overlap(memzone_aligned_128->iova, memzone_aligned_128->len, 736 memzone_aligned_1024->iova, memzone_aligned_1024->len)) 737 return -1; 738 if (is_memory_overlap(memzone_aligned_256->iova, memzone_aligned_256->len, 739 memzone_aligned_512->iova, memzone_aligned_512->len)) 740 return -1; 741 if (is_memory_overlap(memzone_aligned_256->iova, memzone_aligned_256->len, 742 memzone_aligned_1024->iova, memzone_aligned_1024->len)) 743 return -1; 744 if (is_memory_overlap(memzone_aligned_512->iova, memzone_aligned_512->len, 745 memzone_aligned_1024->iova, memzone_aligned_1024->len)) 746 return -1; 747 748 /* free all used zones */ 749 if (rte_memzone_free(memzone_aligned_32)) { 750 printf("Fail memzone free\n"); 751 return -1; 752 } 753 if (rte_memzone_free(memzone_aligned_128)) { 754 printf("Fail memzone free\n"); 755 return -1; 756 } 757 if (rte_memzone_free(memzone_aligned_256)) { 758 printf("Fail memzone free\n"); 759 return -1; 760 } 761 if (rte_memzone_free(memzone_aligned_512)) { 762 printf("Fail memzone free\n"); 763 return -1; 764 } 765 if (rte_memzone_free(memzone_aligned_1024)) { 766 printf("Fail memzone free\n"); 767 return -1; 768 } 769 return 0; 770 } 771 772 static int 773 check_memzone_bounded(const char *name, uint32_t len, uint32_t align, 774 uint32_t bound) 775 { 776 const struct rte_memzone *mz; 777 rte_iova_t bmask; 778 779 bmask = ~((rte_iova_t)bound - 1); 780 781 if ((mz = rte_memzone_reserve_bounded(name, len, SOCKET_ID_ANY, 0, 782 align, bound)) == NULL) { 783 printf("%s(%s): memzone creation failed\n", 784 __func__, name); 785 return -1; 786 } 787 788 if ((mz->iova & ((rte_iova_t)align - 1)) != 0) { 789 printf("%s(%s): invalid phys addr alignment\n", 790 __func__, mz->name); 791 return -1; 792 } 793 794 if (((uintptr_t) mz->addr & ((uintptr_t)align - 1)) != 0) { 795 printf("%s(%s): invalid virtual addr alignment\n", 796 __func__, mz->name); 797 return -1; 798 } 799 800 if ((mz->len & RTE_CACHE_LINE_MASK) != 0 || mz->len < len || 801 mz->len < RTE_CACHE_LINE_SIZE) { 802 printf("%s(%s): invalid length\n", 803 __func__, mz->name); 804 return -1; 805 } 806 807 if ((mz->iova & bmask) != 808 ((mz->iova + mz->len - 1) & bmask)) { 809 printf("%s(%s): invalid memzone boundary %u crossed\n", 810 __func__, mz->name, bound); 811 return -1; 812 } 813 814 if (rte_memzone_free(mz)) { 815 printf("Fail memzone free\n"); 816 return -1; 817 } 818 819 return 0; 820 } 821 822 static int 823 test_memzone_bounded(void) 824 { 825 const struct rte_memzone *memzone_err; 826 int rc; 827 828 /* should fail as boundary is not power of two */ 829 memzone_err = rte_memzone_reserve_bounded( 830 TEST_MEMZONE_NAME("bounded_error_31"), 100, 831 SOCKET_ID_ANY, 0, 32, UINT32_MAX); 832 if (memzone_err != NULL) { 833 printf("%s(%s)created a memzone with invalid boundary " 834 "conditions\n", __func__, memzone_err->name); 835 return -1; 836 } 837 838 /* should fail as len is greater then boundary */ 839 memzone_err = rte_memzone_reserve_bounded( 840 TEST_MEMZONE_NAME("bounded_error_32"), 100, 841 SOCKET_ID_ANY, 0, 32, 32); 842 if (memzone_err != NULL) { 843 printf("%s(%s)created a memzone with invalid boundary " 844 "conditions\n", __func__, memzone_err->name); 845 return -1; 846 } 847 848 rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_128"), 100, 128, 849 128); 850 if (rc != 0) 851 return rc; 852 853 rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_256"), 100, 256, 854 128); 855 if (rc != 0) 856 return rc; 857 858 rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_1K"), 100, 64, 859 1024); 860 if (rc != 0) 861 return rc; 862 863 rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_1K_MAX"), 0, 64, 864 1024); 865 if (rc != 0) 866 return rc; 867 868 return 0; 869 } 870 871 static int 872 test_memzone_free(void) 873 { 874 const struct rte_memzone *mz[RTE_MAX_MEMZONE + 1]; 875 int i; 876 char name[20]; 877 878 mz[0] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone0"), 2000, 879 SOCKET_ID_ANY, 0); 880 mz[1] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone1"), 4000, 881 SOCKET_ID_ANY, 0); 882 883 if (mz[0] > mz[1]) 884 return -1; 885 if (!rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone0"))) 886 return -1; 887 if (!rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone1"))) 888 return -1; 889 890 if (rte_memzone_free(mz[0])) { 891 printf("Fail memzone free - tempzone0\n"); 892 return -1; 893 } 894 if (rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone0"))) { 895 printf("Found previously free memzone - tempzone0\n"); 896 return -1; 897 } 898 mz[2] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone2"), 2000, 899 SOCKET_ID_ANY, 0); 900 901 if (mz[2] > mz[1]) { 902 printf("tempzone2 should have gotten the free entry from tempzone0\n"); 903 return -1; 904 } 905 if (rte_memzone_free(mz[2])) { 906 printf("Fail memzone free - tempzone2\n"); 907 return -1; 908 } 909 if (rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone2"))) { 910 printf("Found previously free memzone - tempzone2\n"); 911 return -1; 912 } 913 if (rte_memzone_free(mz[1])) { 914 printf("Fail memzone free - tempzone1\n"); 915 return -1; 916 } 917 if (rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone1"))) { 918 printf("Found previously free memzone - tempzone1\n"); 919 return -1; 920 } 921 922 i = 0; 923 do { 924 snprintf(name, sizeof(name), TEST_MEMZONE_NAME("tempzone%u"), 925 i); 926 mz[i] = rte_memzone_reserve(name, 1, SOCKET_ID_ANY, 0); 927 } while (mz[i++] != NULL); 928 929 if (rte_memzone_free(mz[0])) { 930 printf("Fail memzone free - tempzone0\n"); 931 return -1; 932 } 933 mz[0] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone0new"), 0, 934 SOCKET_ID_ANY, 0); 935 936 if (mz[0] == NULL) { 937 printf("Fail to create memzone - tempzone0new - when MAX memzones were " 938 "created and one was free\n"); 939 return -1; 940 } 941 942 for (i = i - 2; i >= 0; i--) { 943 if (rte_memzone_free(mz[i])) { 944 printf("Fail memzone free - tempzone%d\n", i); 945 return -1; 946 } 947 } 948 949 return 0; 950 } 951 952 static int test_memzones_left; 953 static int memzone_walk_cnt; 954 static void memzone_walk_clb(const struct rte_memzone *mz, 955 void *arg __rte_unused) 956 { 957 memzone_walk_cnt++; 958 if (!strncmp(TEST_MEMZONE_NAME(""), mz->name, RTE_MEMZONE_NAMESIZE)) 959 test_memzones_left++; 960 } 961 962 static int 963 test_memzone_basic(void) 964 { 965 const struct rte_memzone *memzone1; 966 const struct rte_memzone *memzone2; 967 const struct rte_memzone *memzone3; 968 const struct rte_memzone *memzone4; 969 const struct rte_memzone *mz; 970 int memzone_cnt_after, memzone_cnt_expected; 971 int memzone_cnt_before; 972 973 memzone_walk_cnt = 0; 974 test_memzones_left = 0; 975 rte_memzone_walk(memzone_walk_clb, NULL); 976 memzone_cnt_before = memzone_walk_cnt; 977 978 memzone1 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100, 979 SOCKET_ID_ANY, 0); 980 981 memzone2 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone2"), 1000, 982 0, 0); 983 984 memzone3 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone3"), 1000, 985 1, 0); 986 987 memzone4 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone4"), 1024, 988 SOCKET_ID_ANY, 0); 989 990 /* memzone3 may be NULL if we don't have NUMA */ 991 if (memzone1 == NULL || memzone2 == NULL || memzone4 == NULL) 992 return -1; 993 994 /* check how many memzones we are expecting */ 995 memzone_cnt_expected = memzone_cnt_before + 996 (memzone1 != NULL) + (memzone2 != NULL) + 997 (memzone3 != NULL) + (memzone4 != NULL); 998 999 memzone_walk_cnt = 0; 1000 test_memzones_left = 0; 1001 rte_memzone_walk(memzone_walk_clb, NULL); 1002 memzone_cnt_after = memzone_walk_cnt; 1003 1004 if (memzone_cnt_after != memzone_cnt_expected) 1005 return -1; 1006 1007 1008 rte_memzone_dump(stdout); 1009 1010 /* check cache-line alignments */ 1011 printf("check alignments and lengths\n"); 1012 1013 if ((memzone1->iova & RTE_CACHE_LINE_MASK) != 0) 1014 return -1; 1015 if ((memzone2->iova & RTE_CACHE_LINE_MASK) != 0) 1016 return -1; 1017 if (memzone3 != NULL && (memzone3->iova & RTE_CACHE_LINE_MASK) != 0) 1018 return -1; 1019 if ((memzone1->len & RTE_CACHE_LINE_MASK) != 0 || memzone1->len == 0) 1020 return -1; 1021 if ((memzone2->len & RTE_CACHE_LINE_MASK) != 0 || memzone2->len == 0) 1022 return -1; 1023 if (memzone3 != NULL && ((memzone3->len & RTE_CACHE_LINE_MASK) != 0 || 1024 memzone3->len == 0)) 1025 return -1; 1026 if (memzone4->len != 1024) 1027 return -1; 1028 1029 /* check that zones don't overlap */ 1030 printf("check overlapping\n"); 1031 1032 if (is_memory_overlap(memzone1->iova, memzone1->len, 1033 memzone2->iova, memzone2->len)) 1034 return -1; 1035 if (memzone3 != NULL && 1036 is_memory_overlap(memzone1->iova, memzone1->len, 1037 memzone3->iova, memzone3->len)) 1038 return -1; 1039 if (memzone3 != NULL && 1040 is_memory_overlap(memzone2->iova, memzone2->len, 1041 memzone3->iova, memzone3->len)) 1042 return -1; 1043 1044 printf("check socket ID\n"); 1045 1046 /* memzone2 must be on socket id 0 and memzone3 on socket 1 */ 1047 if (memzone2->socket_id != 0) 1048 return -1; 1049 if (memzone3 != NULL && memzone3->socket_id != 1) 1050 return -1; 1051 1052 printf("test zone lookup\n"); 1053 mz = rte_memzone_lookup(TEST_MEMZONE_NAME("testzone1")); 1054 if (mz != memzone1) 1055 return -1; 1056 1057 printf("test duplcate zone name\n"); 1058 mz = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100, 1059 SOCKET_ID_ANY, 0); 1060 if (mz != NULL) 1061 return -1; 1062 1063 if (rte_memzone_free(memzone1)) { 1064 printf("Fail memzone free - memzone1\n"); 1065 return -1; 1066 } 1067 if (rte_memzone_free(memzone2)) { 1068 printf("Fail memzone free - memzone2\n"); 1069 return -1; 1070 } 1071 if (memzone3 && rte_memzone_free(memzone3)) { 1072 printf("Fail memzone free - memzone3\n"); 1073 return -1; 1074 } 1075 if (rte_memzone_free(memzone4)) { 1076 printf("Fail memzone free - memzone4\n"); 1077 return -1; 1078 } 1079 1080 memzone_walk_cnt = 0; 1081 test_memzones_left = 0; 1082 rte_memzone_walk(memzone_walk_clb, NULL); 1083 memzone_cnt_after = memzone_walk_cnt; 1084 if (memzone_cnt_after != memzone_cnt_before) 1085 return -1; 1086 1087 return 0; 1088 } 1089 1090 static int 1091 test_memzone(void) 1092 { 1093 /* take note of how many memzones were allocated before running */ 1094 int memzone_cnt; 1095 1096 memzone_walk_cnt = 0; 1097 test_memzones_left = 0; 1098 rte_memzone_walk(memzone_walk_clb, NULL); 1099 memzone_cnt = memzone_walk_cnt; 1100 1101 printf("test basic memzone API\n"); 1102 if (test_memzone_basic() < 0) 1103 return -1; 1104 1105 printf("test free memzone\n"); 1106 if (test_memzone_free() < 0) 1107 return -1; 1108 1109 printf("test reserving memzone with bigger size than the maximum\n"); 1110 if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0) 1111 return -1; 1112 1113 printf("test memzone_reserve flags\n"); 1114 if (test_memzone_reserve_flags() < 0) 1115 return -1; 1116 1117 printf("test alignment for memzone_reserve\n"); 1118 if (test_memzone_aligned() < 0) 1119 return -1; 1120 1121 printf("test boundary alignment for memzone_reserve\n"); 1122 if (test_memzone_bounded() < 0) 1123 return -1; 1124 1125 printf("test invalid alignment for memzone_reserve\n"); 1126 if (test_memzone_invalid_alignment() < 0) 1127 return -1; 1128 1129 printf("test invalid flags for memzone_reserve\n"); 1130 if (test_memzone_invalid_flags() < 0) 1131 return -1; 1132 1133 printf("test reserving the largest size memzone possible\n"); 1134 if (test_memzone_reserve_max() < 0) 1135 return -1; 1136 1137 printf("test reserving the largest size aligned memzone possible\n"); 1138 if (test_memzone_reserve_max_aligned() < 0) 1139 return -1; 1140 1141 printf("check memzone cleanup\n"); 1142 memzone_walk_cnt = 0; 1143 test_memzones_left = 0; 1144 rte_memzone_walk(memzone_walk_clb, NULL); 1145 if (memzone_walk_cnt != memzone_cnt || test_memzones_left > 0) { 1146 printf("there are some memzones left after test\n"); 1147 rte_memzone_dump(stdout); 1148 return -1; 1149 } 1150 1151 return 0; 1152 } 1153 1154 REGISTER_TEST_COMMAND(memzone_autotest, test_memzone); 1155