1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include "test.h" 6 7 #include <string.h> 8 #include <stdarg.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <stdint.h> 12 #include <inttypes.h> 13 #include <errno.h> 14 #include <sys/queue.h> 15 16 #include <rte_common.h> 17 #include <rte_errno.h> 18 #include <rte_debug.h> 19 #include <rte_log.h> 20 #include <rte_memory.h> 21 #include <rte_memcpy.h> 22 #include <rte_launch.h> 23 #include <rte_eal.h> 24 #include <rte_per_lcore.h> 25 #include <rte_lcore.h> 26 #include <rte_branch_prediction.h> 27 #include <rte_ring.h> 28 #include <rte_mempool.h> 29 #include <rte_mbuf.h> 30 #include <rte_random.h> 31 #include <rte_cycles.h> 32 #include <rte_malloc.h> 33 #include <rte_ether.h> 34 #include <rte_ip.h> 35 #include <rte_tcp.h> 36 #include <rte_mbuf_dyn.h> 37 38 #define MEMPOOL_CACHE_SIZE 32 39 #define MBUF_DATA_SIZE 2048 40 #define NB_MBUF 128 41 #define MBUF_TEST_DATA_LEN 1464 42 #define MBUF_TEST_DATA_LEN2 50 43 #define MBUF_TEST_DATA_LEN3 256 44 #define MBUF_TEST_HDR1_LEN 20 45 #define MBUF_TEST_HDR2_LEN 30 46 #define MBUF_TEST_ALL_HDRS_LEN (MBUF_TEST_HDR1_LEN+MBUF_TEST_HDR2_LEN) 47 #define MBUF_TEST_SEG_SIZE 64 48 #define MBUF_TEST_BURST 8 49 #define EXT_BUF_TEST_DATA_LEN 1024 50 #define MBUF_MAX_SEG 16 51 #define MBUF_NO_HEADER 0 52 #define MBUF_HEADER 1 53 #define MBUF_NEG_TEST_READ 2 54 #define VAL_NAME(flag) { flag, #flag } 55 56 /* chain length in bulk test */ 57 #define CHAIN_LEN 16 58 59 /* size of private data for mbuf in pktmbuf_pool2 */ 60 #define MBUF2_PRIV_SIZE 128 61 62 #define REFCNT_MAX_ITER 64 63 #define REFCNT_MAX_TIMEOUT 10 64 #define REFCNT_MAX_REF (RTE_MAX_LCORE) 65 #define REFCNT_MBUF_NUM 64 66 #define REFCNT_RING_SIZE (REFCNT_MBUF_NUM * REFCNT_MAX_REF) 67 68 #define MAGIC_DATA 0x42424242 69 70 #define MAKE_STRING(x) # x 71 72 #ifdef RTE_MBUF_REFCNT_ATOMIC 73 74 static volatile uint32_t refcnt_stop_workers; 75 static unsigned refcnt_lcore[RTE_MAX_LCORE]; 76 77 #endif 78 79 /* 80 * MBUF 81 * ==== 82 * 83 * #. Allocate a mbuf pool. 84 * 85 * - The pool contains NB_MBUF elements, where each mbuf is MBUF_SIZE 86 * bytes long. 87 * 88 * #. Test multiple allocations of mbufs from this pool. 89 * 90 * - Allocate NB_MBUF and store pointers in a table. 91 * - If an allocation fails, return an error. 92 * - Free all these mbufs. 93 * - Repeat the same test to check that mbufs were freed correctly. 94 * 95 * #. Test data manipulation in pktmbuf. 96 * 97 * - Alloc an mbuf. 98 * - Append data using rte_pktmbuf_append(). 99 * - Test for error in rte_pktmbuf_append() when len is too large. 100 * - Trim data at the end of mbuf using rte_pktmbuf_trim(). 101 * - Test for error in rte_pktmbuf_trim() when len is too large. 102 * - Prepend a header using rte_pktmbuf_prepend(). 103 * - Test for error in rte_pktmbuf_prepend() when len is too large. 104 * - Remove data at the beginning of mbuf using rte_pktmbuf_adj(). 105 * - Test for error in rte_pktmbuf_adj() when len is too large. 106 * - Check that appended data is not corrupt. 107 * - Free the mbuf. 108 * - Between all these tests, check data_len and pkt_len, and 109 * that the mbuf is contiguous. 110 * - Repeat the test to check that allocation operations 111 * reinitialize the mbuf correctly. 112 * 113 * #. Test packet cloning 114 * - Clone a mbuf and verify the data 115 * - Clone the cloned mbuf and verify the data 116 * - Attach a mbuf to another that does not have the same priv_size. 117 */ 118 119 #define GOTO_FAIL(str, ...) do { \ 120 printf("mbuf test FAILED (l.%d): <" str ">\n", \ 121 __LINE__, ##__VA_ARGS__); \ 122 goto fail; \ 123 } while(0) 124 125 /* 126 * test data manipulation in mbuf with non-ascii data 127 */ 128 static int 129 test_pktmbuf_with_non_ascii_data(struct rte_mempool *pktmbuf_pool) 130 { 131 struct rte_mbuf *m = NULL; 132 char *data; 133 134 m = rte_pktmbuf_alloc(pktmbuf_pool); 135 if (m == NULL) 136 GOTO_FAIL("Cannot allocate mbuf"); 137 if (rte_pktmbuf_pkt_len(m) != 0) 138 GOTO_FAIL("Bad length"); 139 140 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN); 141 if (data == NULL) 142 GOTO_FAIL("Cannot append data"); 143 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) 144 GOTO_FAIL("Bad pkt length"); 145 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) 146 GOTO_FAIL("Bad data length"); 147 memset(data, 0xff, rte_pktmbuf_pkt_len(m)); 148 if (!rte_pktmbuf_is_contiguous(m)) 149 GOTO_FAIL("Buffer should be continuous"); 150 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN); 151 152 rte_pktmbuf_free(m); 153 154 return 0; 155 156 fail: 157 if(m) { 158 rte_pktmbuf_free(m); 159 } 160 return -1; 161 } 162 163 /* 164 * test data manipulation in mbuf 165 */ 166 static int 167 test_one_pktmbuf(struct rte_mempool *pktmbuf_pool) 168 { 169 struct rte_mbuf *m = NULL; 170 char *data, *data2, *hdr; 171 unsigned i; 172 173 printf("Test pktmbuf API\n"); 174 175 /* alloc a mbuf */ 176 177 m = rte_pktmbuf_alloc(pktmbuf_pool); 178 if (m == NULL) 179 GOTO_FAIL("Cannot allocate mbuf"); 180 if (rte_pktmbuf_pkt_len(m) != 0) 181 GOTO_FAIL("Bad length"); 182 183 rte_pktmbuf_dump(stdout, m, 0); 184 185 /* append data */ 186 187 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN); 188 if (data == NULL) 189 GOTO_FAIL("Cannot append data"); 190 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) 191 GOTO_FAIL("Bad pkt length"); 192 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) 193 GOTO_FAIL("Bad data length"); 194 memset(data, 0x66, rte_pktmbuf_pkt_len(m)); 195 if (!rte_pktmbuf_is_contiguous(m)) 196 GOTO_FAIL("Buffer should be continuous"); 197 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN); 198 rte_pktmbuf_dump(stdout, m, 2*MBUF_TEST_DATA_LEN); 199 200 /* this append should fail */ 201 202 data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1)); 203 if (data2 != NULL) 204 GOTO_FAIL("Append should not succeed"); 205 206 /* append some more data */ 207 208 data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2); 209 if (data2 == NULL) 210 GOTO_FAIL("Cannot append data"); 211 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2) 212 GOTO_FAIL("Bad pkt length"); 213 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2) 214 GOTO_FAIL("Bad data length"); 215 if (!rte_pktmbuf_is_contiguous(m)) 216 GOTO_FAIL("Buffer should be continuous"); 217 218 /* trim data at the end of mbuf */ 219 220 if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0) 221 GOTO_FAIL("Cannot trim data"); 222 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) 223 GOTO_FAIL("Bad pkt length"); 224 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) 225 GOTO_FAIL("Bad data length"); 226 if (!rte_pktmbuf_is_contiguous(m)) 227 GOTO_FAIL("Buffer should be continuous"); 228 229 /* this trim should fail */ 230 231 if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0) 232 GOTO_FAIL("trim should not succeed"); 233 234 /* prepend one header */ 235 236 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN); 237 if (hdr == NULL) 238 GOTO_FAIL("Cannot prepend"); 239 if (data - hdr != MBUF_TEST_HDR1_LEN) 240 GOTO_FAIL("Prepend failed"); 241 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN) 242 GOTO_FAIL("Bad pkt length"); 243 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN) 244 GOTO_FAIL("Bad data length"); 245 if (!rte_pktmbuf_is_contiguous(m)) 246 GOTO_FAIL("Buffer should be continuous"); 247 memset(hdr, 0x55, MBUF_TEST_HDR1_LEN); 248 249 /* prepend another header */ 250 251 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN); 252 if (hdr == NULL) 253 GOTO_FAIL("Cannot prepend"); 254 if (data - hdr != MBUF_TEST_ALL_HDRS_LEN) 255 GOTO_FAIL("Prepend failed"); 256 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN) 257 GOTO_FAIL("Bad pkt length"); 258 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN) 259 GOTO_FAIL("Bad data length"); 260 if (!rte_pktmbuf_is_contiguous(m)) 261 GOTO_FAIL("Buffer should be continuous"); 262 memset(hdr, 0x55, MBUF_TEST_HDR2_LEN); 263 264 rte_mbuf_sanity_check(m, 1); 265 rte_mbuf_sanity_check(m, 0); 266 rte_pktmbuf_dump(stdout, m, 0); 267 268 /* this prepend should fail */ 269 270 hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1)); 271 if (hdr != NULL) 272 GOTO_FAIL("prepend should not succeed"); 273 274 /* remove data at beginning of mbuf (adj) */ 275 276 if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN)) 277 GOTO_FAIL("rte_pktmbuf_adj failed"); 278 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) 279 GOTO_FAIL("Bad pkt length"); 280 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) 281 GOTO_FAIL("Bad data length"); 282 if (!rte_pktmbuf_is_contiguous(m)) 283 GOTO_FAIL("Buffer should be continuous"); 284 285 /* this adj should fail */ 286 287 if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL) 288 GOTO_FAIL("rte_pktmbuf_adj should not succeed"); 289 290 /* check data */ 291 292 if (!rte_pktmbuf_is_contiguous(m)) 293 GOTO_FAIL("Buffer should be continuous"); 294 295 for (i=0; i<MBUF_TEST_DATA_LEN; i++) { 296 if (data[i] != 0x66) 297 GOTO_FAIL("Data corrupted at offset %u", i); 298 } 299 300 /* free mbuf */ 301 302 rte_pktmbuf_free(m); 303 m = NULL; 304 return 0; 305 306 fail: 307 rte_pktmbuf_free(m); 308 return -1; 309 } 310 311 static uint16_t 312 testclone_refcnt_read(struct rte_mbuf *m) 313 { 314 return RTE_MBUF_HAS_PINNED_EXTBUF(m) ? 315 rte_mbuf_ext_refcnt_read(m->shinfo) : 316 rte_mbuf_refcnt_read(m); 317 } 318 319 static int 320 testclone_testupdate_testdetach(struct rte_mempool *pktmbuf_pool, 321 struct rte_mempool *clone_pool) 322 { 323 struct rte_mbuf *m = NULL; 324 struct rte_mbuf *clone = NULL; 325 struct rte_mbuf *clone2 = NULL; 326 unaligned_uint32_t *data; 327 328 /* alloc a mbuf */ 329 m = rte_pktmbuf_alloc(pktmbuf_pool); 330 if (m == NULL) 331 GOTO_FAIL("ooops not allocating mbuf"); 332 333 if (rte_pktmbuf_pkt_len(m) != 0) 334 GOTO_FAIL("Bad length"); 335 336 rte_pktmbuf_append(m, sizeof(uint32_t)); 337 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *); 338 *data = MAGIC_DATA; 339 340 /* clone the allocated mbuf */ 341 clone = rte_pktmbuf_clone(m, clone_pool); 342 if (clone == NULL) 343 GOTO_FAIL("cannot clone data\n"); 344 345 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *); 346 if (*data != MAGIC_DATA) 347 GOTO_FAIL("invalid data in clone\n"); 348 349 if (testclone_refcnt_read(m) != 2) 350 GOTO_FAIL("invalid refcnt in m\n"); 351 352 /* free the clone */ 353 rte_pktmbuf_free(clone); 354 clone = NULL; 355 356 /* same test with a chained mbuf */ 357 m->next = rte_pktmbuf_alloc(pktmbuf_pool); 358 if (m->next == NULL) 359 GOTO_FAIL("Next Pkt Null\n"); 360 m->nb_segs = 2; 361 362 rte_pktmbuf_append(m->next, sizeof(uint32_t)); 363 m->pkt_len = 2 * sizeof(uint32_t); 364 365 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *); 366 *data = MAGIC_DATA; 367 368 clone = rte_pktmbuf_clone(m, clone_pool); 369 if (clone == NULL) 370 GOTO_FAIL("cannot clone data\n"); 371 372 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *); 373 if (*data != MAGIC_DATA) 374 GOTO_FAIL("invalid data in clone\n"); 375 376 data = rte_pktmbuf_mtod(clone->next, unaligned_uint32_t *); 377 if (*data != MAGIC_DATA) 378 GOTO_FAIL("invalid data in clone->next\n"); 379 380 if (testclone_refcnt_read(m) != 2) 381 GOTO_FAIL("invalid refcnt in m\n"); 382 383 if (testclone_refcnt_read(m->next) != 2) 384 GOTO_FAIL("invalid refcnt in m->next\n"); 385 386 /* try to clone the clone */ 387 388 clone2 = rte_pktmbuf_clone(clone, clone_pool); 389 if (clone2 == NULL) 390 GOTO_FAIL("cannot clone the clone\n"); 391 392 data = rte_pktmbuf_mtod(clone2, unaligned_uint32_t *); 393 if (*data != MAGIC_DATA) 394 GOTO_FAIL("invalid data in clone2\n"); 395 396 data = rte_pktmbuf_mtod(clone2->next, unaligned_uint32_t *); 397 if (*data != MAGIC_DATA) 398 GOTO_FAIL("invalid data in clone2->next\n"); 399 400 if (testclone_refcnt_read(m) != 3) 401 GOTO_FAIL("invalid refcnt in m\n"); 402 403 if (testclone_refcnt_read(m->next) != 3) 404 GOTO_FAIL("invalid refcnt in m->next\n"); 405 406 /* free mbuf */ 407 rte_pktmbuf_free(m); 408 rte_pktmbuf_free(clone); 409 rte_pktmbuf_free(clone2); 410 411 m = NULL; 412 clone = NULL; 413 clone2 = NULL; 414 printf("%s ok\n", __func__); 415 return 0; 416 417 fail: 418 rte_pktmbuf_free(m); 419 rte_pktmbuf_free(clone); 420 rte_pktmbuf_free(clone2); 421 return -1; 422 } 423 424 static int 425 test_pktmbuf_copy(struct rte_mempool *pktmbuf_pool, 426 struct rte_mempool *clone_pool) 427 { 428 struct rte_mbuf *m = NULL; 429 struct rte_mbuf *copy = NULL; 430 struct rte_mbuf *copy2 = NULL; 431 struct rte_mbuf *clone = NULL; 432 unaligned_uint32_t *data; 433 434 /* alloc a mbuf */ 435 m = rte_pktmbuf_alloc(pktmbuf_pool); 436 if (m == NULL) 437 GOTO_FAIL("ooops not allocating mbuf"); 438 439 if (rte_pktmbuf_pkt_len(m) != 0) 440 GOTO_FAIL("Bad length"); 441 442 rte_pktmbuf_append(m, sizeof(uint32_t)); 443 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *); 444 *data = MAGIC_DATA; 445 446 /* copy the allocated mbuf */ 447 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX); 448 if (copy == NULL) 449 GOTO_FAIL("cannot copy data\n"); 450 451 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t)) 452 GOTO_FAIL("copy length incorrect\n"); 453 454 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t)) 455 GOTO_FAIL("copy data length incorrect\n"); 456 457 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *); 458 if (*data != MAGIC_DATA) 459 GOTO_FAIL("invalid data in copy\n"); 460 461 /* free the copy */ 462 rte_pktmbuf_free(copy); 463 copy = NULL; 464 465 /* same test with a cloned mbuf */ 466 clone = rte_pktmbuf_clone(m, clone_pool); 467 if (clone == NULL) 468 GOTO_FAIL("cannot clone data\n"); 469 470 if ((!RTE_MBUF_HAS_PINNED_EXTBUF(m) && 471 !RTE_MBUF_CLONED(clone)) || 472 (RTE_MBUF_HAS_PINNED_EXTBUF(m) && 473 !RTE_MBUF_HAS_EXTBUF(clone))) 474 GOTO_FAIL("clone did not give a cloned mbuf\n"); 475 476 copy = rte_pktmbuf_copy(clone, pktmbuf_pool, 0, UINT32_MAX); 477 if (copy == NULL) 478 GOTO_FAIL("cannot copy cloned mbuf\n"); 479 480 if (RTE_MBUF_CLONED(copy)) 481 GOTO_FAIL("copy of clone is cloned?\n"); 482 483 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t)) 484 GOTO_FAIL("copy clone length incorrect\n"); 485 486 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t)) 487 GOTO_FAIL("copy clone data length incorrect\n"); 488 489 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *); 490 if (*data != MAGIC_DATA) 491 GOTO_FAIL("invalid data in clone copy\n"); 492 rte_pktmbuf_free(clone); 493 rte_pktmbuf_free(copy); 494 copy = NULL; 495 clone = NULL; 496 497 498 /* same test with a chained mbuf */ 499 m->next = rte_pktmbuf_alloc(pktmbuf_pool); 500 if (m->next == NULL) 501 GOTO_FAIL("Next Pkt Null\n"); 502 m->nb_segs = 2; 503 504 rte_pktmbuf_append(m->next, sizeof(uint32_t)); 505 m->pkt_len = 2 * sizeof(uint32_t); 506 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *); 507 *data = MAGIC_DATA + 1; 508 509 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX); 510 if (copy == NULL) 511 GOTO_FAIL("cannot copy data\n"); 512 513 if (rte_pktmbuf_pkt_len(copy) != 2 * sizeof(uint32_t)) 514 GOTO_FAIL("chain copy length incorrect\n"); 515 516 if (rte_pktmbuf_data_len(copy) != 2 * sizeof(uint32_t)) 517 GOTO_FAIL("chain copy data length incorrect\n"); 518 519 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *); 520 if (data[0] != MAGIC_DATA || data[1] != MAGIC_DATA + 1) 521 GOTO_FAIL("invalid data in copy\n"); 522 523 rte_pktmbuf_free(copy2); 524 525 /* test offset copy */ 526 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool, 527 sizeof(uint32_t), UINT32_MAX); 528 if (copy2 == NULL) 529 GOTO_FAIL("cannot copy the copy\n"); 530 531 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t)) 532 GOTO_FAIL("copy with offset, length incorrect\n"); 533 534 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t)) 535 GOTO_FAIL("copy with offset, data length incorrect\n"); 536 537 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *); 538 if (data[0] != MAGIC_DATA + 1) 539 GOTO_FAIL("copy with offset, invalid data\n"); 540 541 rte_pktmbuf_free(copy2); 542 543 /* test truncation copy */ 544 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool, 545 0, sizeof(uint32_t)); 546 if (copy2 == NULL) 547 GOTO_FAIL("cannot copy the copy\n"); 548 549 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t)) 550 GOTO_FAIL("copy with truncate, length incorrect\n"); 551 552 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t)) 553 GOTO_FAIL("copy with truncate, data length incorrect\n"); 554 555 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *); 556 if (data[0] != MAGIC_DATA) 557 GOTO_FAIL("copy with truncate, invalid data\n"); 558 559 /* free mbuf */ 560 rte_pktmbuf_free(m); 561 rte_pktmbuf_free(copy); 562 rte_pktmbuf_free(copy2); 563 564 m = NULL; 565 copy = NULL; 566 copy2 = NULL; 567 printf("%s ok\n", __func__); 568 return 0; 569 570 fail: 571 rte_pktmbuf_free(m); 572 rte_pktmbuf_free(copy); 573 rte_pktmbuf_free(copy2); 574 return -1; 575 } 576 577 static int 578 test_attach_from_different_pool(struct rte_mempool *pktmbuf_pool, 579 struct rte_mempool *pktmbuf_pool2) 580 { 581 struct rte_mbuf *m = NULL; 582 struct rte_mbuf *clone = NULL; 583 struct rte_mbuf *clone2 = NULL; 584 char *data, *c_data, *c_data2; 585 586 /* alloc a mbuf */ 587 m = rte_pktmbuf_alloc(pktmbuf_pool); 588 if (m == NULL) 589 GOTO_FAIL("cannot allocate mbuf"); 590 591 if (rte_pktmbuf_pkt_len(m) != 0) 592 GOTO_FAIL("Bad length"); 593 594 data = rte_pktmbuf_mtod(m, char *); 595 596 /* allocate a new mbuf from the second pool, and attach it to the first 597 * mbuf */ 598 clone = rte_pktmbuf_alloc(pktmbuf_pool2); 599 if (clone == NULL) 600 GOTO_FAIL("cannot allocate mbuf from second pool\n"); 601 602 /* check data room size and priv size, and erase priv */ 603 if (rte_pktmbuf_data_room_size(clone->pool) != 0) 604 GOTO_FAIL("data room size should be 0\n"); 605 if (rte_pktmbuf_priv_size(clone->pool) != MBUF2_PRIV_SIZE) 606 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE); 607 memset(clone + 1, 0, MBUF2_PRIV_SIZE); 608 609 /* save data pointer to compare it after detach() */ 610 c_data = rte_pktmbuf_mtod(clone, char *); 611 if (c_data != (char *)clone + sizeof(*clone) + MBUF2_PRIV_SIZE) 612 GOTO_FAIL("bad data pointer in clone"); 613 if (rte_pktmbuf_headroom(clone) != 0) 614 GOTO_FAIL("bad headroom in clone"); 615 616 rte_pktmbuf_attach(clone, m); 617 618 if (rte_pktmbuf_mtod(clone, char *) != data) 619 GOTO_FAIL("clone was not attached properly\n"); 620 if (rte_pktmbuf_headroom(clone) != RTE_PKTMBUF_HEADROOM) 621 GOTO_FAIL("bad headroom in clone after attach"); 622 if (rte_mbuf_refcnt_read(m) != 2) 623 GOTO_FAIL("invalid refcnt in m\n"); 624 625 /* allocate a new mbuf from the second pool, and attach it to the first 626 * cloned mbuf */ 627 clone2 = rte_pktmbuf_alloc(pktmbuf_pool2); 628 if (clone2 == NULL) 629 GOTO_FAIL("cannot allocate clone2 from second pool\n"); 630 631 /* check data room size and priv size, and erase priv */ 632 if (rte_pktmbuf_data_room_size(clone2->pool) != 0) 633 GOTO_FAIL("data room size should be 0\n"); 634 if (rte_pktmbuf_priv_size(clone2->pool) != MBUF2_PRIV_SIZE) 635 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE); 636 memset(clone2 + 1, 0, MBUF2_PRIV_SIZE); 637 638 /* save data pointer to compare it after detach() */ 639 c_data2 = rte_pktmbuf_mtod(clone2, char *); 640 if (c_data2 != (char *)clone2 + sizeof(*clone2) + MBUF2_PRIV_SIZE) 641 GOTO_FAIL("bad data pointer in clone2"); 642 if (rte_pktmbuf_headroom(clone2) != 0) 643 GOTO_FAIL("bad headroom in clone2"); 644 645 rte_pktmbuf_attach(clone2, clone); 646 647 if (rte_pktmbuf_mtod(clone2, char *) != data) 648 GOTO_FAIL("clone2 was not attached properly\n"); 649 if (rte_pktmbuf_headroom(clone2) != RTE_PKTMBUF_HEADROOM) 650 GOTO_FAIL("bad headroom in clone2 after attach"); 651 if (rte_mbuf_refcnt_read(m) != 3) 652 GOTO_FAIL("invalid refcnt in m\n"); 653 654 /* detach the clones */ 655 rte_pktmbuf_detach(clone); 656 if (c_data != rte_pktmbuf_mtod(clone, char *)) 657 GOTO_FAIL("clone was not detached properly\n"); 658 if (rte_mbuf_refcnt_read(m) != 2) 659 GOTO_FAIL("invalid refcnt in m\n"); 660 661 rte_pktmbuf_detach(clone2); 662 if (c_data2 != rte_pktmbuf_mtod(clone2, char *)) 663 GOTO_FAIL("clone2 was not detached properly\n"); 664 if (rte_mbuf_refcnt_read(m) != 1) 665 GOTO_FAIL("invalid refcnt in m\n"); 666 667 /* free the clones and the initial mbuf */ 668 rte_pktmbuf_free(clone2); 669 rte_pktmbuf_free(clone); 670 rte_pktmbuf_free(m); 671 printf("%s ok\n", __func__); 672 return 0; 673 674 fail: 675 rte_pktmbuf_free(m); 676 rte_pktmbuf_free(clone); 677 rte_pktmbuf_free(clone2); 678 return -1; 679 } 680 681 /* 682 * test allocation and free of mbufs 683 */ 684 static int 685 test_pktmbuf_pool(struct rte_mempool *pktmbuf_pool) 686 { 687 unsigned i; 688 struct rte_mbuf *m[NB_MBUF]; 689 int ret = 0; 690 691 for (i=0; i<NB_MBUF; i++) 692 m[i] = NULL; 693 694 /* alloc NB_MBUF mbufs */ 695 for (i=0; i<NB_MBUF; i++) { 696 m[i] = rte_pktmbuf_alloc(pktmbuf_pool); 697 if (m[i] == NULL) { 698 printf("rte_pktmbuf_alloc() failed (%u)\n", i); 699 ret = -1; 700 } 701 } 702 struct rte_mbuf *extra = NULL; 703 extra = rte_pktmbuf_alloc(pktmbuf_pool); 704 if(extra != NULL) { 705 printf("Error pool not empty"); 706 ret = -1; 707 } 708 extra = rte_pktmbuf_clone(m[0], pktmbuf_pool); 709 if(extra != NULL) { 710 printf("Error pool not empty"); 711 ret = -1; 712 } 713 /* free them */ 714 for (i=0; i<NB_MBUF; i++) { 715 rte_pktmbuf_free(m[i]); 716 } 717 718 return ret; 719 } 720 721 /* 722 * test bulk allocation and bulk free of mbufs 723 */ 724 static int 725 test_pktmbuf_pool_bulk(void) 726 { 727 struct rte_mempool *pool = NULL; 728 struct rte_mempool *pool2 = NULL; 729 unsigned int i; 730 struct rte_mbuf *m; 731 struct rte_mbuf *mbufs[NB_MBUF]; 732 int ret = 0; 733 734 /* We cannot use the preallocated mbuf pools because their caches 735 * prevent us from bulk allocating all objects in them. 736 * So we create our own mbuf pools without caches. 737 */ 738 printf("Create mbuf pools for bulk allocation.\n"); 739 pool = rte_pktmbuf_pool_create("test_pktmbuf_bulk", 740 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY); 741 if (pool == NULL) { 742 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n", 743 rte_errno); 744 goto err; 745 } 746 pool2 = rte_pktmbuf_pool_create("test_pktmbuf_bulk2", 747 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY); 748 if (pool2 == NULL) { 749 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n", 750 rte_errno); 751 goto err; 752 } 753 754 /* Preconditions: Mempools must be full. */ 755 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) { 756 printf("Test precondition failed: mempools not full\n"); 757 goto err; 758 } 759 if (!(rte_mempool_avail_count(pool) == NB_MBUF && 760 rte_mempool_avail_count(pool2) == NB_MBUF)) { 761 printf("Test precondition failed: mempools: %u+%u != %u+%u", 762 rte_mempool_avail_count(pool), 763 rte_mempool_avail_count(pool2), 764 NB_MBUF, NB_MBUF); 765 goto err; 766 } 767 768 printf("Test single bulk alloc, followed by multiple bulk free.\n"); 769 770 /* Bulk allocate all mbufs in the pool, in one go. */ 771 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF); 772 if (ret != 0) { 773 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret); 774 goto err; 775 } 776 /* Test that they have been removed from the pool. */ 777 if (!rte_mempool_empty(pool)) { 778 printf("mempool not empty\n"); 779 goto err; 780 } 781 /* Bulk free all mbufs, in four steps. */ 782 RTE_BUILD_BUG_ON(NB_MBUF % 4 != 0); 783 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) { 784 rte_pktmbuf_free_bulk(&mbufs[i], NB_MBUF / 4); 785 /* Test that they have been returned to the pool. */ 786 if (rte_mempool_avail_count(pool) != i + NB_MBUF / 4) { 787 printf("mempool avail count incorrect\n"); 788 goto err; 789 } 790 } 791 792 printf("Test multiple bulk alloc, followed by single bulk free.\n"); 793 794 /* Bulk allocate all mbufs in the pool, in four steps. */ 795 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) { 796 ret = rte_pktmbuf_alloc_bulk(pool, &mbufs[i], NB_MBUF / 4); 797 if (ret != 0) { 798 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret); 799 goto err; 800 } 801 } 802 /* Test that they have been removed from the pool. */ 803 if (!rte_mempool_empty(pool)) { 804 printf("mempool not empty\n"); 805 goto err; 806 } 807 /* Bulk free all mbufs, in one go. */ 808 rte_pktmbuf_free_bulk(mbufs, NB_MBUF); 809 /* Test that they have been returned to the pool. */ 810 if (!rte_mempool_full(pool)) { 811 printf("mempool not full\n"); 812 goto err; 813 } 814 815 printf("Test bulk free of single long chain.\n"); 816 817 /* Bulk allocate all mbufs in the pool, in one go. */ 818 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF); 819 if (ret != 0) { 820 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret); 821 goto err; 822 } 823 /* Create a long mbuf chain. */ 824 for (i = 1; i < NB_MBUF; i++) { 825 ret = rte_pktmbuf_chain(mbufs[0], mbufs[i]); 826 if (ret != 0) { 827 printf("rte_pktmbuf_chain() failed: %d\n", ret); 828 goto err; 829 } 830 mbufs[i] = NULL; 831 } 832 /* Free the mbuf chain containing all the mbufs. */ 833 rte_pktmbuf_free_bulk(mbufs, 1); 834 /* Test that they have been returned to the pool. */ 835 if (!rte_mempool_full(pool)) { 836 printf("mempool not full\n"); 837 goto err; 838 } 839 840 printf("Test bulk free of multiple chains using multiple pools.\n"); 841 842 /* Create mbuf chains containing mbufs from different pools. */ 843 RTE_BUILD_BUG_ON(CHAIN_LEN % 2 != 0); 844 RTE_BUILD_BUG_ON(NB_MBUF % (CHAIN_LEN / 2) != 0); 845 for (i = 0; i < NB_MBUF * 2; i++) { 846 m = rte_pktmbuf_alloc((i & 4) ? pool2 : pool); 847 if (m == NULL) { 848 printf("rte_pktmbuf_alloc() failed (%u)\n", i); 849 goto err; 850 } 851 if ((i % CHAIN_LEN) == 0) 852 mbufs[i / CHAIN_LEN] = m; 853 else 854 rte_pktmbuf_chain(mbufs[i / CHAIN_LEN], m); 855 } 856 /* Test that both pools have been emptied. */ 857 if (!(rte_mempool_empty(pool) && rte_mempool_empty(pool2))) { 858 printf("mempools not empty\n"); 859 goto err; 860 } 861 /* Free one mbuf chain. */ 862 rte_pktmbuf_free_bulk(mbufs, 1); 863 /* Test that the segments have been returned to the pools. */ 864 if (!(rte_mempool_avail_count(pool) == CHAIN_LEN / 2 && 865 rte_mempool_avail_count(pool2) == CHAIN_LEN / 2)) { 866 printf("all segments of first mbuf have not been returned\n"); 867 goto err; 868 } 869 /* Free the remaining mbuf chains. */ 870 rte_pktmbuf_free_bulk(&mbufs[1], NB_MBUF * 2 / CHAIN_LEN - 1); 871 /* Test that they have been returned to the pools. */ 872 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) { 873 printf("mempools not full\n"); 874 goto err; 875 } 876 877 ret = 0; 878 goto done; 879 880 err: 881 ret = -1; 882 883 done: 884 printf("Free mbuf pools for bulk allocation.\n"); 885 rte_mempool_free(pool); 886 rte_mempool_free(pool2); 887 return ret; 888 } 889 890 /* 891 * test that the pointer to the data on a packet mbuf is set properly 892 */ 893 static int 894 test_pktmbuf_pool_ptr(struct rte_mempool *pktmbuf_pool) 895 { 896 unsigned i; 897 struct rte_mbuf *m[NB_MBUF]; 898 int ret = 0; 899 900 for (i=0; i<NB_MBUF; i++) 901 m[i] = NULL; 902 903 /* alloc NB_MBUF mbufs */ 904 for (i=0; i<NB_MBUF; i++) { 905 m[i] = rte_pktmbuf_alloc(pktmbuf_pool); 906 if (m[i] == NULL) { 907 printf("rte_pktmbuf_alloc() failed (%u)\n", i); 908 ret = -1; 909 break; 910 } 911 m[i]->data_off += 64; 912 } 913 914 /* free them */ 915 for (i=0; i<NB_MBUF; i++) { 916 rte_pktmbuf_free(m[i]); 917 } 918 919 for (i=0; i<NB_MBUF; i++) 920 m[i] = NULL; 921 922 /* alloc NB_MBUF mbufs */ 923 for (i=0; i<NB_MBUF; i++) { 924 m[i] = rte_pktmbuf_alloc(pktmbuf_pool); 925 if (m[i] == NULL) { 926 printf("rte_pktmbuf_alloc() failed (%u)\n", i); 927 ret = -1; 928 break; 929 } 930 if (m[i]->data_off != RTE_PKTMBUF_HEADROOM) { 931 printf("invalid data_off\n"); 932 ret = -1; 933 } 934 } 935 936 /* free them */ 937 for (i=0; i<NB_MBUF; i++) { 938 rte_pktmbuf_free(m[i]); 939 } 940 941 return ret; 942 } 943 944 static int 945 test_pktmbuf_free_segment(struct rte_mempool *pktmbuf_pool) 946 { 947 unsigned i; 948 struct rte_mbuf *m[NB_MBUF]; 949 int ret = 0; 950 951 for (i=0; i<NB_MBUF; i++) 952 m[i] = NULL; 953 954 /* alloc NB_MBUF mbufs */ 955 for (i=0; i<NB_MBUF; i++) { 956 m[i] = rte_pktmbuf_alloc(pktmbuf_pool); 957 if (m[i] == NULL) { 958 printf("rte_pktmbuf_alloc() failed (%u)\n", i); 959 ret = -1; 960 } 961 } 962 963 /* free them */ 964 for (i=0; i<NB_MBUF; i++) { 965 if (m[i] != NULL) { 966 struct rte_mbuf *mb, *mt; 967 968 mb = m[i]; 969 while(mb != NULL) { 970 mt = mb; 971 mb = mb->next; 972 rte_pktmbuf_free_seg(mt); 973 } 974 } 975 } 976 977 return ret; 978 } 979 980 /* 981 * Stress test for rte_mbuf atomic refcnt. 982 * Implies that RTE_MBUF_REFCNT_ATOMIC is defined. 983 * For more efficiency, recommended to run with RTE_LIBRTE_MBUF_DEBUG defined. 984 */ 985 986 #ifdef RTE_MBUF_REFCNT_ATOMIC 987 988 static int 989 test_refcnt_worker(void *arg) 990 { 991 unsigned lcore, free; 992 void *mp = 0; 993 struct rte_ring *refcnt_mbuf_ring = arg; 994 995 lcore = rte_lcore_id(); 996 printf("%s started at lcore %u\n", __func__, lcore); 997 998 free = 0; 999 while (refcnt_stop_workers == 0) { 1000 if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) { 1001 free++; 1002 rte_pktmbuf_free(mp); 1003 } 1004 } 1005 1006 refcnt_lcore[lcore] += free; 1007 printf("%s finished at lcore %u, " 1008 "number of freed mbufs: %u\n", 1009 __func__, lcore, free); 1010 return 0; 1011 } 1012 1013 static void 1014 test_refcnt_iter(unsigned int lcore, unsigned int iter, 1015 struct rte_mempool *refcnt_pool, 1016 struct rte_ring *refcnt_mbuf_ring) 1017 { 1018 uint16_t ref; 1019 unsigned i, n, tref, wn; 1020 struct rte_mbuf *m; 1021 1022 tref = 0; 1023 1024 /* For each mbuf in the pool: 1025 * - allocate mbuf, 1026 * - increment it's reference up to N+1, 1027 * - enqueue it N times into the ring for worker cores to free. 1028 */ 1029 for (i = 0, n = rte_mempool_avail_count(refcnt_pool); 1030 i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL; 1031 i++) { 1032 ref = RTE_MAX(rte_rand() % REFCNT_MAX_REF, 1UL); 1033 tref += ref; 1034 if ((ref & 1) != 0) { 1035 rte_pktmbuf_refcnt_update(m, ref); 1036 while (ref-- != 0) 1037 rte_ring_enqueue(refcnt_mbuf_ring, m); 1038 } else { 1039 while (ref-- != 0) { 1040 rte_pktmbuf_refcnt_update(m, 1); 1041 rte_ring_enqueue(refcnt_mbuf_ring, m); 1042 } 1043 } 1044 rte_pktmbuf_free(m); 1045 } 1046 1047 if (i != n) 1048 rte_panic("(lcore=%u, iter=%u): was able to allocate only " 1049 "%u from %u mbufs\n", lcore, iter, i, n); 1050 1051 /* wait till worker lcores will consume all mbufs */ 1052 while (!rte_ring_empty(refcnt_mbuf_ring)) 1053 ; 1054 1055 /* check that all mbufs are back into mempool by now */ 1056 for (wn = 0; wn != REFCNT_MAX_TIMEOUT; wn++) { 1057 if ((i = rte_mempool_avail_count(refcnt_pool)) == n) { 1058 refcnt_lcore[lcore] += tref; 1059 printf("%s(lcore=%u, iter=%u) completed, " 1060 "%u references processed\n", 1061 __func__, lcore, iter, tref); 1062 return; 1063 } 1064 rte_delay_ms(100); 1065 } 1066 1067 rte_panic("(lcore=%u, iter=%u): after %us only " 1068 "%u of %u mbufs left free\n", lcore, iter, wn, i, n); 1069 } 1070 1071 static int 1072 test_refcnt_main(struct rte_mempool *refcnt_pool, 1073 struct rte_ring *refcnt_mbuf_ring) 1074 { 1075 unsigned i, lcore; 1076 1077 lcore = rte_lcore_id(); 1078 printf("%s started at lcore %u\n", __func__, lcore); 1079 1080 for (i = 0; i != REFCNT_MAX_ITER; i++) 1081 test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring); 1082 1083 refcnt_stop_workers = 1; 1084 rte_wmb(); 1085 1086 printf("%s finished at lcore %u\n", __func__, lcore); 1087 return 0; 1088 } 1089 1090 #endif 1091 1092 static int 1093 test_refcnt_mbuf(void) 1094 { 1095 #ifdef RTE_MBUF_REFCNT_ATOMIC 1096 unsigned int main_lcore, worker, tref; 1097 int ret = -1; 1098 struct rte_mempool *refcnt_pool = NULL; 1099 struct rte_ring *refcnt_mbuf_ring = NULL; 1100 1101 if (rte_lcore_count() < 2) { 1102 printf("Not enough cores for test_refcnt_mbuf, expecting at least 2\n"); 1103 return TEST_SKIPPED; 1104 } 1105 1106 printf("starting %s, at %u lcores\n", __func__, rte_lcore_count()); 1107 1108 /* create refcnt pool & ring if they don't exist */ 1109 1110 refcnt_pool = rte_pktmbuf_pool_create(MAKE_STRING(refcnt_pool), 1111 REFCNT_MBUF_NUM, 0, 0, 0, 1112 SOCKET_ID_ANY); 1113 if (refcnt_pool == NULL) { 1114 printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n", 1115 __func__); 1116 return -1; 1117 } 1118 1119 refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring", 1120 rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY, 1121 RING_F_SP_ENQ); 1122 if (refcnt_mbuf_ring == NULL) { 1123 printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring) 1124 "\n", __func__); 1125 goto err; 1126 } 1127 1128 refcnt_stop_workers = 0; 1129 memset(refcnt_lcore, 0, sizeof (refcnt_lcore)); 1130 1131 rte_eal_mp_remote_launch(test_refcnt_worker, refcnt_mbuf_ring, SKIP_MAIN); 1132 1133 test_refcnt_main(refcnt_pool, refcnt_mbuf_ring); 1134 1135 rte_eal_mp_wait_lcore(); 1136 1137 /* check that we processed all references */ 1138 tref = 0; 1139 main_lcore = rte_get_main_lcore(); 1140 1141 RTE_LCORE_FOREACH_WORKER(worker) 1142 tref += refcnt_lcore[worker]; 1143 1144 if (tref != refcnt_lcore[main_lcore]) 1145 rte_panic("referenced mbufs: %u, freed mbufs: %u\n", 1146 tref, refcnt_lcore[main_lcore]); 1147 1148 rte_mempool_dump(stdout, refcnt_pool); 1149 rte_ring_dump(stdout, refcnt_mbuf_ring); 1150 1151 ret = 0; 1152 1153 err: 1154 rte_mempool_free(refcnt_pool); 1155 rte_ring_free(refcnt_mbuf_ring); 1156 return ret; 1157 #else 1158 return 0; 1159 #endif 1160 } 1161 1162 #ifdef RTE_EXEC_ENV_WINDOWS 1163 static int 1164 test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool) 1165 { 1166 RTE_SET_USED(pktmbuf_pool); 1167 return TEST_SKIPPED; 1168 } 1169 #else 1170 1171 #include <unistd.h> 1172 #include <sys/resource.h> 1173 #include <sys/time.h> 1174 #include <sys/wait.h> 1175 1176 /* use fork() to test mbuf errors panic */ 1177 static int 1178 verify_mbuf_check_panics(struct rte_mbuf *buf) 1179 { 1180 int pid; 1181 int status; 1182 1183 pid = fork(); 1184 1185 if (pid == 0) { 1186 struct rlimit rl; 1187 1188 /* No need to generate a coredump when panicking. */ 1189 rl.rlim_cur = rl.rlim_max = 0; 1190 setrlimit(RLIMIT_CORE, &rl); 1191 rte_mbuf_sanity_check(buf, 1); /* should panic */ 1192 exit(0); /* return normally if it doesn't panic */ 1193 } else if (pid < 0) { 1194 printf("Fork Failed\n"); 1195 return -1; 1196 } 1197 wait(&status); 1198 if(status == 0) 1199 return -1; 1200 1201 return 0; 1202 } 1203 1204 static int 1205 test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool) 1206 { 1207 struct rte_mbuf *buf; 1208 struct rte_mbuf badbuf; 1209 1210 printf("Checking rte_mbuf_sanity_check for failure conditions\n"); 1211 1212 /* get a good mbuf to use to make copies */ 1213 buf = rte_pktmbuf_alloc(pktmbuf_pool); 1214 if (buf == NULL) 1215 return -1; 1216 1217 printf("Checking good mbuf initially\n"); 1218 if (verify_mbuf_check_panics(buf) != -1) 1219 return -1; 1220 1221 printf("Now checking for error conditions\n"); 1222 1223 if (verify_mbuf_check_panics(NULL)) { 1224 printf("Error with NULL mbuf test\n"); 1225 return -1; 1226 } 1227 1228 badbuf = *buf; 1229 badbuf.pool = NULL; 1230 if (verify_mbuf_check_panics(&badbuf)) { 1231 printf("Error with bad-pool mbuf test\n"); 1232 return -1; 1233 } 1234 1235 badbuf = *buf; 1236 badbuf.buf_iova = 0; 1237 if (verify_mbuf_check_panics(&badbuf)) { 1238 printf("Error with bad-physaddr mbuf test\n"); 1239 return -1; 1240 } 1241 1242 badbuf = *buf; 1243 badbuf.buf_addr = NULL; 1244 if (verify_mbuf_check_panics(&badbuf)) { 1245 printf("Error with bad-addr mbuf test\n"); 1246 return -1; 1247 } 1248 1249 badbuf = *buf; 1250 badbuf.refcnt = 0; 1251 if (verify_mbuf_check_panics(&badbuf)) { 1252 printf("Error with bad-refcnt(0) mbuf test\n"); 1253 return -1; 1254 } 1255 1256 badbuf = *buf; 1257 badbuf.refcnt = UINT16_MAX; 1258 if (verify_mbuf_check_panics(&badbuf)) { 1259 printf("Error with bad-refcnt(MAX) mbuf test\n"); 1260 return -1; 1261 } 1262 1263 return 0; 1264 } 1265 1266 #endif /* !RTE_EXEC_ENV_WINDOWS */ 1267 1268 static int 1269 test_mbuf_linearize(struct rte_mempool *pktmbuf_pool, int pkt_len, 1270 int nb_segs) 1271 { 1272 1273 struct rte_mbuf *m = NULL, *mbuf = NULL; 1274 uint8_t *data; 1275 int data_len = 0; 1276 int remain; 1277 int seg, seg_len; 1278 int i; 1279 1280 if (pkt_len < 1) { 1281 printf("Packet size must be 1 or more (is %d)\n", pkt_len); 1282 return -1; 1283 } 1284 1285 if (nb_segs < 1) { 1286 printf("Number of segments must be 1 or more (is %d)\n", 1287 nb_segs); 1288 return -1; 1289 } 1290 1291 seg_len = pkt_len / nb_segs; 1292 if (seg_len == 0) 1293 seg_len = 1; 1294 1295 remain = pkt_len; 1296 1297 /* Create chained mbuf_src and fill it generated data */ 1298 for (seg = 0; remain > 0; seg++) { 1299 1300 m = rte_pktmbuf_alloc(pktmbuf_pool); 1301 if (m == NULL) { 1302 printf("Cannot create segment for source mbuf"); 1303 goto fail; 1304 } 1305 1306 /* Make sure if tailroom is zeroed */ 1307 memset(rte_pktmbuf_mtod(m, uint8_t *), 0, 1308 rte_pktmbuf_tailroom(m)); 1309 1310 data_len = remain; 1311 if (data_len > seg_len) 1312 data_len = seg_len; 1313 1314 data = (uint8_t *)rte_pktmbuf_append(m, data_len); 1315 if (data == NULL) { 1316 printf("Cannot append %d bytes to the mbuf\n", 1317 data_len); 1318 goto fail; 1319 } 1320 1321 for (i = 0; i < data_len; i++) 1322 data[i] = (seg * seg_len + i) % 0x0ff; 1323 1324 if (seg == 0) 1325 mbuf = m; 1326 else 1327 rte_pktmbuf_chain(mbuf, m); 1328 1329 remain -= data_len; 1330 } 1331 1332 /* Create destination buffer to store coalesced data */ 1333 if (rte_pktmbuf_linearize(mbuf)) { 1334 printf("Mbuf linearization failed\n"); 1335 goto fail; 1336 } 1337 1338 if (!rte_pktmbuf_is_contiguous(mbuf)) { 1339 printf("Source buffer should be contiguous after " 1340 "linearization\n"); 1341 goto fail; 1342 } 1343 1344 data = rte_pktmbuf_mtod(mbuf, uint8_t *); 1345 1346 for (i = 0; i < pkt_len; i++) 1347 if (data[i] != (i % 0x0ff)) { 1348 printf("Incorrect data in linearized mbuf\n"); 1349 goto fail; 1350 } 1351 1352 rte_pktmbuf_free(mbuf); 1353 return 0; 1354 1355 fail: 1356 rte_pktmbuf_free(mbuf); 1357 return -1; 1358 } 1359 1360 static int 1361 test_mbuf_linearize_check(struct rte_mempool *pktmbuf_pool) 1362 { 1363 struct test_mbuf_array { 1364 int size; 1365 int nb_segs; 1366 } mbuf_array[] = { 1367 { 128, 1 }, 1368 { 64, 64 }, 1369 { 512, 10 }, 1370 { 250, 11 }, 1371 { 123, 8 }, 1372 }; 1373 unsigned int i; 1374 1375 printf("Test mbuf linearize API\n"); 1376 1377 for (i = 0; i < RTE_DIM(mbuf_array); i++) 1378 if (test_mbuf_linearize(pktmbuf_pool, mbuf_array[i].size, 1379 mbuf_array[i].nb_segs)) { 1380 printf("Test failed for %d, %d\n", mbuf_array[i].size, 1381 mbuf_array[i].nb_segs); 1382 return -1; 1383 } 1384 1385 return 0; 1386 } 1387 1388 /* 1389 * Helper function for test_tx_ofload 1390 */ 1391 static inline void 1392 set_tx_offload(struct rte_mbuf *mb, uint64_t il2, uint64_t il3, uint64_t il4, 1393 uint64_t tso, uint64_t ol3, uint64_t ol2) 1394 { 1395 mb->l2_len = il2; 1396 mb->l3_len = il3; 1397 mb->l4_len = il4; 1398 mb->tso_segsz = tso; 1399 mb->outer_l3_len = ol3; 1400 mb->outer_l2_len = ol2; 1401 } 1402 1403 static int 1404 test_tx_offload(void) 1405 { 1406 struct rte_mbuf *mb; 1407 uint64_t tm, v1, v2; 1408 size_t sz; 1409 uint32_t i; 1410 1411 static volatile struct { 1412 uint16_t l2; 1413 uint16_t l3; 1414 uint16_t l4; 1415 uint16_t tso; 1416 } txof; 1417 1418 const uint32_t num = 0x10000; 1419 1420 txof.l2 = rte_rand() % (1 << RTE_MBUF_L2_LEN_BITS); 1421 txof.l3 = rte_rand() % (1 << RTE_MBUF_L3_LEN_BITS); 1422 txof.l4 = rte_rand() % (1 << RTE_MBUF_L4_LEN_BITS); 1423 txof.tso = rte_rand() % (1 << RTE_MBUF_TSO_SEGSZ_BITS); 1424 1425 printf("%s started, tx_offload = {\n" 1426 "\tl2_len=%#hx,\n" 1427 "\tl3_len=%#hx,\n" 1428 "\tl4_len=%#hx,\n" 1429 "\ttso_segsz=%#hx,\n" 1430 "\touter_l3_len=%#x,\n" 1431 "\touter_l2_len=%#x,\n" 1432 "};\n", 1433 __func__, 1434 txof.l2, txof.l3, txof.l4, txof.tso, txof.l3, txof.l2); 1435 1436 sz = sizeof(*mb) * num; 1437 mb = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE); 1438 if (mb == NULL) { 1439 printf("%s failed, out of memory\n", __func__); 1440 return -ENOMEM; 1441 } 1442 1443 memset(mb, 0, sz); 1444 tm = rte_rdtsc_precise(); 1445 1446 for (i = 0; i != num; i++) 1447 set_tx_offload(mb + i, txof.l2, txof.l3, txof.l4, 1448 txof.tso, txof.l3, txof.l2); 1449 1450 tm = rte_rdtsc_precise() - tm; 1451 printf("%s set tx_offload by bit-fields: %u iterations, %" 1452 PRIu64 " cycles, %#Lf cycles/iter\n", 1453 __func__, num, tm, (long double)tm / num); 1454 1455 v1 = mb[rte_rand() % num].tx_offload; 1456 1457 memset(mb, 0, sz); 1458 tm = rte_rdtsc_precise(); 1459 1460 for (i = 0; i != num; i++) 1461 mb[i].tx_offload = rte_mbuf_tx_offload(txof.l2, txof.l3, 1462 txof.l4, txof.tso, txof.l3, txof.l2, 0); 1463 1464 tm = rte_rdtsc_precise() - tm; 1465 printf("%s set raw tx_offload: %u iterations, %" 1466 PRIu64 " cycles, %#Lf cycles/iter\n", 1467 __func__, num, tm, (long double)tm / num); 1468 1469 v2 = mb[rte_rand() % num].tx_offload; 1470 1471 rte_free(mb); 1472 1473 printf("%s finished\n" 1474 "expected tx_offload value: 0x%" PRIx64 ";\n" 1475 "rte_mbuf_tx_offload value: 0x%" PRIx64 ";\n", 1476 __func__, v1, v2); 1477 1478 return (v1 == v2) ? 0 : -EINVAL; 1479 } 1480 1481 static int 1482 test_get_rx_ol_flag_list(void) 1483 { 1484 int len = 6, ret = 0; 1485 char buf[256] = ""; 1486 int buflen = 0; 1487 1488 /* Test case to check with null buffer */ 1489 ret = rte_get_rx_ol_flag_list(0, NULL, 0); 1490 if (ret != -1) 1491 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret); 1492 1493 /* Test case to check with zero buffer len */ 1494 ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_L4_CKSUM_MASK, buf, 0); 1495 if (ret != -1) 1496 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret); 1497 1498 buflen = strlen(buf); 1499 if (buflen != 0) 1500 GOTO_FAIL("%s buffer should be empty, received = %d\n", 1501 __func__, buflen); 1502 1503 /* Test case to check with reduced buffer len */ 1504 ret = rte_get_rx_ol_flag_list(0, buf, len); 1505 if (ret != -1) 1506 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret); 1507 1508 buflen = strlen(buf); 1509 if (buflen != (len - 1)) 1510 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d," 1511 "received = %d\n", __func__, 1512 (len - 1), buflen); 1513 1514 /* Test case to check with zero mask value */ 1515 ret = rte_get_rx_ol_flag_list(0, buf, sizeof(buf)); 1516 if (ret != 0) 1517 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret); 1518 1519 buflen = strlen(buf); 1520 if (buflen == 0) 1521 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__, 1522 "non-zero, buffer should not be empty"); 1523 1524 /* Test case to check with valid mask value */ 1525 ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_SEC_OFFLOAD, buf, 1526 sizeof(buf)); 1527 if (ret != 0) 1528 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret); 1529 1530 buflen = strlen(buf); 1531 if (buflen == 0) 1532 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__, 1533 "non-zero, buffer should not be empty"); 1534 1535 return 0; 1536 fail: 1537 return -1; 1538 } 1539 1540 static int 1541 test_get_tx_ol_flag_list(void) 1542 { 1543 int len = 6, ret = 0; 1544 char buf[256] = ""; 1545 int buflen = 0; 1546 1547 /* Test case to check with null buffer */ 1548 ret = rte_get_tx_ol_flag_list(0, NULL, 0); 1549 if (ret != -1) 1550 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret); 1551 1552 /* Test case to check with zero buffer len */ 1553 ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_IP_CKSUM, buf, 0); 1554 if (ret != -1) 1555 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret); 1556 1557 buflen = strlen(buf); 1558 if (buflen != 0) { 1559 GOTO_FAIL("%s buffer should be empty, received = %d\n", 1560 __func__, buflen); 1561 } 1562 1563 /* Test case to check with reduced buffer len */ 1564 ret = rte_get_tx_ol_flag_list(0, buf, len); 1565 if (ret != -1) 1566 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret); 1567 1568 buflen = strlen(buf); 1569 if (buflen != (len - 1)) 1570 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d," 1571 "received = %d\n", __func__, 1572 (len - 1), buflen); 1573 1574 /* Test case to check with zero mask value */ 1575 ret = rte_get_tx_ol_flag_list(0, buf, sizeof(buf)); 1576 if (ret != 0) 1577 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret); 1578 1579 buflen = strlen(buf); 1580 if (buflen == 0) 1581 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__, 1582 "non-zero, buffer should not be empty"); 1583 1584 /* Test case to check with valid mask value */ 1585 ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_UDP_CKSUM, buf, 1586 sizeof(buf)); 1587 if (ret != 0) 1588 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret); 1589 1590 buflen = strlen(buf); 1591 if (buflen == 0) 1592 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__, 1593 "non-zero, buffer should not be empty"); 1594 1595 return 0; 1596 fail: 1597 return -1; 1598 1599 } 1600 1601 struct flag_name { 1602 uint64_t flag; 1603 const char *name; 1604 }; 1605 1606 static int 1607 test_get_rx_ol_flag_name(void) 1608 { 1609 uint16_t i; 1610 const char *flag_str = NULL; 1611 const struct flag_name rx_flags[] = { 1612 VAL_NAME(RTE_MBUF_F_RX_VLAN), 1613 VAL_NAME(RTE_MBUF_F_RX_RSS_HASH), 1614 VAL_NAME(RTE_MBUF_F_RX_FDIR), 1615 VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_BAD), 1616 VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_GOOD), 1617 VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_NONE), 1618 VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_BAD), 1619 VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_GOOD), 1620 VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_NONE), 1621 VAL_NAME(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD), 1622 VAL_NAME(RTE_MBUF_F_RX_VLAN_STRIPPED), 1623 VAL_NAME(RTE_MBUF_F_RX_IEEE1588_PTP), 1624 VAL_NAME(RTE_MBUF_F_RX_IEEE1588_TMST), 1625 VAL_NAME(RTE_MBUF_F_RX_FDIR_ID), 1626 VAL_NAME(RTE_MBUF_F_RX_FDIR_FLX), 1627 VAL_NAME(RTE_MBUF_F_RX_QINQ_STRIPPED), 1628 VAL_NAME(RTE_MBUF_F_RX_LRO), 1629 VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD), 1630 VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED), 1631 VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD), 1632 VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD), 1633 VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID), 1634 }; 1635 1636 /* Test case to check with valid flag */ 1637 for (i = 0; i < RTE_DIM(rx_flags); i++) { 1638 flag_str = rte_get_rx_ol_flag_name(rx_flags[i].flag); 1639 if (flag_str == NULL) 1640 GOTO_FAIL("%s: Expected flagname = %s; received null\n", 1641 __func__, rx_flags[i].name); 1642 if (strcmp(flag_str, rx_flags[i].name) != 0) 1643 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n", 1644 __func__, rx_flags[i].name, flag_str); 1645 } 1646 /* Test case to check with invalid flag */ 1647 flag_str = rte_get_rx_ol_flag_name(0); 1648 if (flag_str != NULL) { 1649 GOTO_FAIL("%s: Expected flag name = null; received = %s\n", 1650 __func__, flag_str); 1651 } 1652 1653 return 0; 1654 fail: 1655 return -1; 1656 } 1657 1658 static int 1659 test_get_tx_ol_flag_name(void) 1660 { 1661 uint16_t i; 1662 const char *flag_str = NULL; 1663 const struct flag_name tx_flags[] = { 1664 VAL_NAME(RTE_MBUF_F_TX_VLAN), 1665 VAL_NAME(RTE_MBUF_F_TX_IP_CKSUM), 1666 VAL_NAME(RTE_MBUF_F_TX_TCP_CKSUM), 1667 VAL_NAME(RTE_MBUF_F_TX_SCTP_CKSUM), 1668 VAL_NAME(RTE_MBUF_F_TX_UDP_CKSUM), 1669 VAL_NAME(RTE_MBUF_F_TX_IEEE1588_TMST), 1670 VAL_NAME(RTE_MBUF_F_TX_TCP_SEG), 1671 VAL_NAME(RTE_MBUF_F_TX_IPV4), 1672 VAL_NAME(RTE_MBUF_F_TX_IPV6), 1673 VAL_NAME(RTE_MBUF_F_TX_OUTER_IP_CKSUM), 1674 VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV4), 1675 VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV6), 1676 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN), 1677 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GRE), 1678 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IPIP), 1679 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GENEVE), 1680 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_MPLSINUDP), 1681 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE), 1682 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IP), 1683 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_UDP), 1684 VAL_NAME(RTE_MBUF_F_TX_QINQ), 1685 VAL_NAME(RTE_MBUF_F_TX_MACSEC), 1686 VAL_NAME(RTE_MBUF_F_TX_SEC_OFFLOAD), 1687 VAL_NAME(RTE_MBUF_F_TX_UDP_SEG), 1688 VAL_NAME(RTE_MBUF_F_TX_OUTER_UDP_CKSUM), 1689 }; 1690 1691 /* Test case to check with valid flag */ 1692 for (i = 0; i < RTE_DIM(tx_flags); i++) { 1693 flag_str = rte_get_tx_ol_flag_name(tx_flags[i].flag); 1694 if (flag_str == NULL) 1695 GOTO_FAIL("%s: Expected flagname = %s; received null\n", 1696 __func__, tx_flags[i].name); 1697 if (strcmp(flag_str, tx_flags[i].name) != 0) 1698 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n", 1699 __func__, tx_flags[i].name, flag_str); 1700 } 1701 /* Test case to check with invalid flag */ 1702 flag_str = rte_get_tx_ol_flag_name(0); 1703 if (flag_str != NULL) { 1704 GOTO_FAIL("%s: Expected flag name = null; received = %s\n", 1705 __func__, flag_str); 1706 } 1707 1708 return 0; 1709 fail: 1710 return -1; 1711 1712 } 1713 1714 static int 1715 test_mbuf_validate_tx_offload(const char *test_name, 1716 struct rte_mempool *pktmbuf_pool, 1717 uint64_t ol_flags, 1718 uint16_t segsize, 1719 int expected_retval) 1720 { 1721 struct rte_mbuf *m = NULL; 1722 int ret = 0; 1723 1724 /* alloc a mbuf and do sanity check */ 1725 m = rte_pktmbuf_alloc(pktmbuf_pool); 1726 if (m == NULL) 1727 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__); 1728 if (rte_pktmbuf_pkt_len(m) != 0) 1729 GOTO_FAIL("%s: Bad packet length\n", __func__); 1730 rte_mbuf_sanity_check(m, 0); 1731 m->ol_flags = ol_flags; 1732 m->tso_segsz = segsize; 1733 ret = rte_validate_tx_offload(m); 1734 if (ret != expected_retval) 1735 GOTO_FAIL("%s(%s): expected ret val: %d; received: %d\n", 1736 __func__, test_name, expected_retval, ret); 1737 rte_pktmbuf_free(m); 1738 m = NULL; 1739 return 0; 1740 fail: 1741 if (m) { 1742 rte_pktmbuf_free(m); 1743 m = NULL; 1744 } 1745 return -1; 1746 } 1747 1748 static int 1749 test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool) 1750 { 1751 /* test to validate tx offload flags */ 1752 uint64_t ol_flags = 0; 1753 1754 /* test to validate if IP checksum is counted only for IPV4 packet */ 1755 /* set both IP checksum and IPV6 flags */ 1756 ol_flags |= RTE_MBUF_F_TX_IP_CKSUM; 1757 ol_flags |= RTE_MBUF_F_TX_IPV6; 1758 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_CKSUM_IPV6_SET", 1759 pktmbuf_pool, 1760 ol_flags, 0, -EINVAL) < 0) 1761 GOTO_FAIL("%s failed: IP cksum is set incorrect.\n", __func__); 1762 /* resetting ol_flags for next testcase */ 1763 ol_flags = 0; 1764 1765 /* test to validate if IP type is set when required */ 1766 ol_flags |= RTE_MBUF_F_TX_L4_MASK; 1767 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET", 1768 pktmbuf_pool, 1769 ol_flags, 0, -EINVAL) < 0) 1770 GOTO_FAIL("%s failed: IP type is not set.\n", __func__); 1771 1772 /* test if IP type is set when TCP SEG is on */ 1773 ol_flags |= RTE_MBUF_F_TX_TCP_SEG; 1774 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET", 1775 pktmbuf_pool, 1776 ol_flags, 0, -EINVAL) < 0) 1777 GOTO_FAIL("%s failed: IP type is not set.\n", __func__); 1778 1779 ol_flags = 0; 1780 /* test to confirm IP type (IPV4/IPV6) is set */ 1781 ol_flags = RTE_MBUF_F_TX_L4_MASK; 1782 ol_flags |= RTE_MBUF_F_TX_IPV6; 1783 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_SET", 1784 pktmbuf_pool, 1785 ol_flags, 0, 0) < 0) 1786 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__); 1787 1788 ol_flags = 0; 1789 /* test to check TSO segment size is non-zero */ 1790 ol_flags |= RTE_MBUF_F_TX_IPV4; 1791 ol_flags |= RTE_MBUF_F_TX_TCP_SEG; 1792 /* set 0 tso segment size */ 1793 if (test_mbuf_validate_tx_offload("MBUF_TEST_NULL_TSO_SEGSZ", 1794 pktmbuf_pool, 1795 ol_flags, 0, -EINVAL) < 0) 1796 GOTO_FAIL("%s failed: tso segment size is null.\n", __func__); 1797 1798 /* retain IPV4 and RTE_MBUF_F_TX_TCP_SEG mask */ 1799 /* set valid tso segment size but IP CKSUM not set */ 1800 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_NOT_SET", 1801 pktmbuf_pool, 1802 ol_flags, 512, -EINVAL) < 0) 1803 GOTO_FAIL("%s failed: IP CKSUM is not set.\n", __func__); 1804 1805 /* test to validate if IP checksum is set for TSO capability */ 1806 /* retain IPV4, TCP_SEG, tso_seg size */ 1807 ol_flags |= RTE_MBUF_F_TX_IP_CKSUM; 1808 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_SET", 1809 pktmbuf_pool, 1810 ol_flags, 512, 0) < 0) 1811 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__); 1812 1813 /* test to confirm TSO for IPV6 type */ 1814 ol_flags = 0; 1815 ol_flags |= RTE_MBUF_F_TX_IPV6; 1816 ol_flags |= RTE_MBUF_F_TX_TCP_SEG; 1817 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IPV6_SET", 1818 pktmbuf_pool, 1819 ol_flags, 512, 0) < 0) 1820 GOTO_FAIL("%s failed: TSO req not met.\n", __func__); 1821 1822 ol_flags = 0; 1823 /* test if outer IP checksum set for non outer IPv4 packet */ 1824 ol_flags |= RTE_MBUF_F_TX_IPV6; 1825 ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM; 1826 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_NOT_SET", 1827 pktmbuf_pool, 1828 ol_flags, 512, -EINVAL) < 0) 1829 GOTO_FAIL("%s failed: Outer IP cksum set.\n", __func__); 1830 1831 ol_flags = 0; 1832 /* test to confirm outer IP checksum is set for outer IPV4 packet */ 1833 ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM; 1834 ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4; 1835 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_SET", 1836 pktmbuf_pool, 1837 ol_flags, 512, 0) < 0) 1838 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__); 1839 1840 ol_flags = 0; 1841 /* test to confirm if packets with no TX_OFFLOAD_MASK are skipped */ 1842 if (test_mbuf_validate_tx_offload("MBUF_TEST_OL_MASK_NOT_SET", 1843 pktmbuf_pool, 1844 ol_flags, 512, 0) < 0) 1845 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__); 1846 return 0; 1847 fail: 1848 return -1; 1849 } 1850 1851 /* 1852 * Test for allocating a bulk of mbufs 1853 * define an array with positive sizes for mbufs allocations. 1854 */ 1855 static int 1856 test_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool) 1857 { 1858 int ret = 0; 1859 unsigned int idx, loop; 1860 unsigned int alloc_counts[] = { 1861 0, 1862 MEMPOOL_CACHE_SIZE - 1, 1863 MEMPOOL_CACHE_SIZE + 1, 1864 MEMPOOL_CACHE_SIZE * 1.5, 1865 MEMPOOL_CACHE_SIZE * 2, 1866 MEMPOOL_CACHE_SIZE * 2 - 1, 1867 MEMPOOL_CACHE_SIZE * 2 + 1, 1868 MEMPOOL_CACHE_SIZE, 1869 }; 1870 1871 /* allocate a large array of mbuf pointers */ 1872 struct rte_mbuf *mbufs[NB_MBUF] = { 0 }; 1873 for (idx = 0; idx < RTE_DIM(alloc_counts); idx++) { 1874 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs, 1875 alloc_counts[idx]); 1876 if (ret == 0) { 1877 for (loop = 0; loop < alloc_counts[idx] && 1878 mbufs[loop] != NULL; loop++) 1879 rte_pktmbuf_free(mbufs[loop]); 1880 } else if (ret != 0) { 1881 printf("%s: Bulk alloc failed count(%u); ret val(%d)\n", 1882 __func__, alloc_counts[idx], ret); 1883 return -1; 1884 } 1885 } 1886 return 0; 1887 } 1888 1889 /* 1890 * Negative testing for allocating a bulk of mbufs 1891 */ 1892 static int 1893 test_neg_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool) 1894 { 1895 int ret = 0; 1896 unsigned int idx, loop; 1897 unsigned int neg_alloc_counts[] = { 1898 MEMPOOL_CACHE_SIZE - NB_MBUF, 1899 NB_MBUF + 1, 1900 NB_MBUF * 8, 1901 UINT_MAX 1902 }; 1903 struct rte_mbuf *mbufs[NB_MBUF * 8] = { 0 }; 1904 1905 for (idx = 0; idx < RTE_DIM(neg_alloc_counts); idx++) { 1906 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs, 1907 neg_alloc_counts[idx]); 1908 if (ret == 0) { 1909 printf("%s: Bulk alloc must fail! count(%u); ret(%d)\n", 1910 __func__, neg_alloc_counts[idx], ret); 1911 for (loop = 0; loop < neg_alloc_counts[idx] && 1912 mbufs[loop] != NULL; loop++) 1913 rte_pktmbuf_free(mbufs[loop]); 1914 return -1; 1915 } 1916 } 1917 return 0; 1918 } 1919 1920 /* 1921 * Test to read mbuf packet using rte_pktmbuf_read 1922 */ 1923 static int 1924 test_pktmbuf_read(struct rte_mempool *pktmbuf_pool) 1925 { 1926 struct rte_mbuf *m = NULL; 1927 char *data = NULL; 1928 const char *data_copy = NULL; 1929 int off; 1930 1931 /* alloc a mbuf */ 1932 m = rte_pktmbuf_alloc(pktmbuf_pool); 1933 if (m == NULL) 1934 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__); 1935 if (rte_pktmbuf_pkt_len(m) != 0) 1936 GOTO_FAIL("%s: Bad packet length\n", __func__); 1937 rte_mbuf_sanity_check(m, 0); 1938 1939 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2); 1940 if (data == NULL) 1941 GOTO_FAIL("%s: Cannot append data\n", __func__); 1942 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN2) 1943 GOTO_FAIL("%s: Bad packet length\n", __func__); 1944 memset(data, 0xfe, MBUF_TEST_DATA_LEN2); 1945 1946 /* read the data from mbuf */ 1947 data_copy = rte_pktmbuf_read(m, 0, MBUF_TEST_DATA_LEN2, NULL); 1948 if (data_copy == NULL) 1949 GOTO_FAIL("%s: Error in reading data!\n", __func__); 1950 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) { 1951 if (data_copy[off] != (char)0xfe) 1952 GOTO_FAIL("Data corrupted at offset %u", off); 1953 } 1954 rte_pktmbuf_free(m); 1955 m = NULL; 1956 1957 return 0; 1958 fail: 1959 if (m) { 1960 rte_pktmbuf_free(m); 1961 m = NULL; 1962 } 1963 return -1; 1964 } 1965 1966 /* 1967 * Test to read mbuf packet data from offset 1968 */ 1969 static int 1970 test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool) 1971 { 1972 struct rte_mbuf *m = NULL; 1973 struct ether_hdr *hdr = NULL; 1974 char *data = NULL; 1975 const char *data_copy = NULL; 1976 unsigned int off; 1977 unsigned int hdr_len = sizeof(struct rte_ether_hdr); 1978 1979 /* alloc a mbuf */ 1980 m = rte_pktmbuf_alloc(pktmbuf_pool); 1981 if (m == NULL) 1982 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__); 1983 1984 if (rte_pktmbuf_pkt_len(m) != 0) 1985 GOTO_FAIL("%s: Bad packet length\n", __func__); 1986 rte_mbuf_sanity_check(m, 0); 1987 1988 /* prepend an ethernet header */ 1989 hdr = (struct ether_hdr *)rte_pktmbuf_prepend(m, hdr_len); 1990 if (hdr == NULL) 1991 GOTO_FAIL("%s: Cannot prepend header\n", __func__); 1992 if (rte_pktmbuf_pkt_len(m) != hdr_len) 1993 GOTO_FAIL("%s: Bad pkt length", __func__); 1994 if (rte_pktmbuf_data_len(m) != hdr_len) 1995 GOTO_FAIL("%s: Bad data length", __func__); 1996 memset(hdr, 0xde, hdr_len); 1997 1998 /* read mbuf header info from 0 offset */ 1999 data_copy = rte_pktmbuf_read(m, 0, hdr_len, NULL); 2000 if (data_copy == NULL) 2001 GOTO_FAIL("%s: Error in reading header!\n", __func__); 2002 for (off = 0; off < hdr_len; off++) { 2003 if (data_copy[off] != (char)0xde) 2004 GOTO_FAIL("Header info corrupted at offset %u", off); 2005 } 2006 2007 /* append sample data after ethernet header */ 2008 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2); 2009 if (data == NULL) 2010 GOTO_FAIL("%s: Cannot append data\n", __func__); 2011 if (rte_pktmbuf_pkt_len(m) != hdr_len + MBUF_TEST_DATA_LEN2) 2012 GOTO_FAIL("%s: Bad packet length\n", __func__); 2013 if (rte_pktmbuf_data_len(m) != hdr_len + MBUF_TEST_DATA_LEN2) 2014 GOTO_FAIL("%s: Bad data length\n", __func__); 2015 memset(data, 0xcc, MBUF_TEST_DATA_LEN2); 2016 2017 /* read mbuf data after header info */ 2018 data_copy = rte_pktmbuf_read(m, hdr_len, MBUF_TEST_DATA_LEN2, NULL); 2019 if (data_copy == NULL) 2020 GOTO_FAIL("%s: Error in reading header data!\n", __func__); 2021 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) { 2022 if (data_copy[off] != (char)0xcc) 2023 GOTO_FAIL("Data corrupted at offset %u", off); 2024 } 2025 2026 /* partial reading of mbuf data */ 2027 data_copy = rte_pktmbuf_read(m, hdr_len + 5, MBUF_TEST_DATA_LEN2 - 5, 2028 NULL); 2029 if (data_copy == NULL) 2030 GOTO_FAIL("%s: Error in reading packet data!\n", __func__); 2031 for (off = 0; off < MBUF_TEST_DATA_LEN2 - 5; off++) { 2032 if (data_copy[off] != (char)0xcc) 2033 GOTO_FAIL("Data corrupted at offset %u", off); 2034 } 2035 2036 /* read length greater than mbuf data_len */ 2037 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_data_len(m) + 1, 2038 NULL) != NULL) 2039 GOTO_FAIL("%s: Requested len is larger than mbuf data len!\n", 2040 __func__); 2041 2042 /* read length greater than mbuf pkt_len */ 2043 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_pkt_len(m) + 1, 2044 NULL) != NULL) 2045 GOTO_FAIL("%s: Requested len is larger than mbuf pkt len!\n", 2046 __func__); 2047 2048 /* read data of zero len from valid offset */ 2049 data_copy = rte_pktmbuf_read(m, hdr_len, 0, NULL); 2050 if (data_copy == NULL) 2051 GOTO_FAIL("%s: Error in reading packet data!\n", __func__); 2052 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) { 2053 if (data_copy[off] != (char)0xcc) 2054 GOTO_FAIL("Data corrupted at offset %u", off); 2055 } 2056 2057 /* read data of zero length from zero offset */ 2058 data_copy = rte_pktmbuf_read(m, 0, 0, NULL); 2059 if (data_copy == NULL) 2060 GOTO_FAIL("%s: Error in reading packet data!\n", __func__); 2061 /* check if the received address is the beginning of header info */ 2062 if (hdr != (const struct ether_hdr *)data_copy) 2063 GOTO_FAIL("%s: Corrupted data address!\n", __func__); 2064 2065 /* read data of max length from valid offset */ 2066 data_copy = rte_pktmbuf_read(m, hdr_len, UINT_MAX, NULL); 2067 if (data_copy == NULL) 2068 GOTO_FAIL("%s: Error in reading packet data!\n", __func__); 2069 /* check if the received address is the beginning of data segment */ 2070 if (data_copy != data) 2071 GOTO_FAIL("%s: Corrupted data address!\n", __func__); 2072 2073 /* try to read from mbuf with max size offset */ 2074 data_copy = rte_pktmbuf_read(m, UINT_MAX, 0, NULL); 2075 if (data_copy != NULL) 2076 GOTO_FAIL("%s: Error in reading packet data!\n", __func__); 2077 2078 /* try to read from mbuf with max size offset and len */ 2079 data_copy = rte_pktmbuf_read(m, UINT_MAX, UINT_MAX, NULL); 2080 if (data_copy != NULL) 2081 GOTO_FAIL("%s: Error in reading packet data!\n", __func__); 2082 2083 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); 2084 2085 rte_pktmbuf_free(m); 2086 m = NULL; 2087 2088 return 0; 2089 fail: 2090 if (m) { 2091 rte_pktmbuf_free(m); 2092 m = NULL; 2093 } 2094 return -1; 2095 } 2096 2097 struct test_case { 2098 unsigned int seg_count; 2099 unsigned int flags; 2100 uint32_t read_off; 2101 uint32_t read_len; 2102 unsigned int seg_lengths[MBUF_MAX_SEG]; 2103 }; 2104 2105 /* create a mbuf with different sized segments 2106 * and fill with data [0x00 0x01 0x02 ...] 2107 */ 2108 static struct rte_mbuf * 2109 create_packet(struct rte_mempool *pktmbuf_pool, 2110 struct test_case *test_data) 2111 { 2112 uint16_t i, ret, seg, seg_len = 0; 2113 uint32_t last_index = 0; 2114 unsigned int seg_lengths[MBUF_MAX_SEG]; 2115 unsigned int hdr_len; 2116 struct rte_mbuf *pkt = NULL; 2117 struct rte_mbuf *pkt_seg = NULL; 2118 char *hdr = NULL; 2119 char *data = NULL; 2120 2121 memcpy(seg_lengths, test_data->seg_lengths, 2122 sizeof(unsigned int)*test_data->seg_count); 2123 for (seg = 0; seg < test_data->seg_count; seg++) { 2124 hdr_len = 0; 2125 seg_len = seg_lengths[seg]; 2126 pkt_seg = rte_pktmbuf_alloc(pktmbuf_pool); 2127 if (pkt_seg == NULL) 2128 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__); 2129 if (rte_pktmbuf_pkt_len(pkt_seg) != 0) 2130 GOTO_FAIL("%s: Bad packet length\n", __func__); 2131 rte_mbuf_sanity_check(pkt_seg, 0); 2132 /* Add header only for the first segment */ 2133 if (test_data->flags == MBUF_HEADER && seg == 0) { 2134 hdr_len = sizeof(struct rte_ether_hdr); 2135 /* prepend a header and fill with dummy data */ 2136 hdr = (char *)rte_pktmbuf_prepend(pkt_seg, hdr_len); 2137 if (hdr == NULL) 2138 GOTO_FAIL("%s: Cannot prepend header\n", 2139 __func__); 2140 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len) 2141 GOTO_FAIL("%s: Bad pkt length", __func__); 2142 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len) 2143 GOTO_FAIL("%s: Bad data length", __func__); 2144 for (i = 0; i < hdr_len; i++) 2145 hdr[i] = (last_index + i) % 0xffff; 2146 last_index += hdr_len; 2147 } 2148 /* skip appending segment with 0 length */ 2149 if (seg_len == 0) 2150 continue; 2151 data = rte_pktmbuf_append(pkt_seg, seg_len); 2152 if (data == NULL) 2153 GOTO_FAIL("%s: Cannot append data segment\n", __func__); 2154 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len + seg_len) 2155 GOTO_FAIL("%s: Bad packet segment length: %d\n", 2156 __func__, rte_pktmbuf_pkt_len(pkt_seg)); 2157 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len + seg_len) 2158 GOTO_FAIL("%s: Bad data length\n", __func__); 2159 for (i = 0; i < seg_len; i++) 2160 data[i] = (last_index + i) % 0xffff; 2161 /* to fill continuous data from one seg to another */ 2162 last_index += i; 2163 /* create chained mbufs */ 2164 if (seg == 0) 2165 pkt = pkt_seg; 2166 else { 2167 ret = rte_pktmbuf_chain(pkt, pkt_seg); 2168 if (ret != 0) 2169 GOTO_FAIL("%s:FAIL: Chained mbuf creation %d\n", 2170 __func__, ret); 2171 } 2172 2173 pkt_seg = pkt_seg->next; 2174 } 2175 return pkt; 2176 fail: 2177 if (pkt != NULL) { 2178 rte_pktmbuf_free(pkt); 2179 pkt = NULL; 2180 } 2181 if (pkt_seg != NULL) { 2182 rte_pktmbuf_free(pkt_seg); 2183 pkt_seg = NULL; 2184 } 2185 return NULL; 2186 } 2187 2188 static int 2189 test_pktmbuf_read_from_chain(struct rte_mempool *pktmbuf_pool) 2190 { 2191 struct rte_mbuf *m; 2192 struct test_case test_cases[] = { 2193 { 2194 .seg_lengths = { 100, 100, 100 }, 2195 .seg_count = 3, 2196 .flags = MBUF_NO_HEADER, 2197 .read_off = 0, 2198 .read_len = 300 2199 }, 2200 { 2201 .seg_lengths = { 100, 125, 150 }, 2202 .seg_count = 3, 2203 .flags = MBUF_NO_HEADER, 2204 .read_off = 99, 2205 .read_len = 201 2206 }, 2207 { 2208 .seg_lengths = { 100, 100 }, 2209 .seg_count = 2, 2210 .flags = MBUF_NO_HEADER, 2211 .read_off = 0, 2212 .read_len = 100 2213 }, 2214 { 2215 .seg_lengths = { 100, 200 }, 2216 .seg_count = 2, 2217 .flags = MBUF_HEADER, 2218 .read_off = sizeof(struct rte_ether_hdr), 2219 .read_len = 150 2220 }, 2221 { 2222 .seg_lengths = { 1000, 100 }, 2223 .seg_count = 2, 2224 .flags = MBUF_NO_HEADER, 2225 .read_off = 0, 2226 .read_len = 1000 2227 }, 2228 { 2229 .seg_lengths = { 1024, 0, 100 }, 2230 .seg_count = 3, 2231 .flags = MBUF_NO_HEADER, 2232 .read_off = 100, 2233 .read_len = 1001 2234 }, 2235 { 2236 .seg_lengths = { 1000, 1, 1000 }, 2237 .seg_count = 3, 2238 .flags = MBUF_NO_HEADER, 2239 .read_off = 1000, 2240 .read_len = 2 2241 }, 2242 { 2243 .seg_lengths = { MBUF_TEST_DATA_LEN, 2244 MBUF_TEST_DATA_LEN2, 2245 MBUF_TEST_DATA_LEN3, 800, 10 }, 2246 .seg_count = 5, 2247 .flags = MBUF_NEG_TEST_READ, 2248 .read_off = 1000, 2249 .read_len = MBUF_DATA_SIZE 2250 }, 2251 }; 2252 2253 uint32_t i, pos; 2254 const char *data_copy = NULL; 2255 char data_buf[MBUF_DATA_SIZE]; 2256 2257 memset(data_buf, 0, MBUF_DATA_SIZE); 2258 2259 for (i = 0; i < RTE_DIM(test_cases); i++) { 2260 m = create_packet(pktmbuf_pool, &test_cases[i]); 2261 if (m == NULL) 2262 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__); 2263 2264 data_copy = rte_pktmbuf_read(m, test_cases[i].read_off, 2265 test_cases[i].read_len, data_buf); 2266 if (test_cases[i].flags == MBUF_NEG_TEST_READ) { 2267 if (data_copy != NULL) 2268 GOTO_FAIL("%s: mbuf data read should fail!\n", 2269 __func__); 2270 else { 2271 rte_pktmbuf_free(m); 2272 m = NULL; 2273 continue; 2274 } 2275 } 2276 if (data_copy == NULL) 2277 GOTO_FAIL("%s: Error in reading packet data!\n", 2278 __func__); 2279 for (pos = 0; pos < test_cases[i].read_len; pos++) { 2280 if (data_copy[pos] != 2281 (char)((test_cases[i].read_off + pos) 2282 % 0xffff)) 2283 GOTO_FAIL("Data corrupted at offset %u is %2X", 2284 pos, data_copy[pos]); 2285 } 2286 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); 2287 rte_pktmbuf_free(m); 2288 m = NULL; 2289 } 2290 return 0; 2291 2292 fail: 2293 if (m != NULL) { 2294 rte_pktmbuf_free(m); 2295 m = NULL; 2296 } 2297 return -1; 2298 } 2299 2300 /* Define a free call back function to be used for external buffer */ 2301 static void 2302 ext_buf_free_callback_fn(void *addr, void *opaque) 2303 { 2304 bool *freed = opaque; 2305 2306 if (addr == NULL) { 2307 printf("External buffer address is invalid\n"); 2308 return; 2309 } 2310 rte_free(addr); 2311 *freed = true; 2312 printf("External buffer freed via callback\n"); 2313 } 2314 2315 /* 2316 * Test to initialize shared data in external buffer before attaching to mbuf 2317 * - Allocate mbuf with no data. 2318 * - Allocate external buffer with size should be large enough to accommodate 2319 * rte_mbuf_ext_shared_info. 2320 * - Invoke pktmbuf_ext_shinfo_init_helper to initialize shared data. 2321 * - Invoke rte_pktmbuf_attach_extbuf to attach external buffer to the mbuf. 2322 * - Clone another mbuf and attach the same external buffer to it. 2323 * - Invoke rte_pktmbuf_detach_extbuf to detach the external buffer from mbuf. 2324 */ 2325 static int 2326 test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool) 2327 { 2328 struct rte_mbuf *m = NULL; 2329 struct rte_mbuf *clone = NULL; 2330 struct rte_mbuf_ext_shared_info *ret_shinfo = NULL; 2331 rte_iova_t buf_iova; 2332 void *ext_buf_addr = NULL; 2333 uint16_t buf_len = EXT_BUF_TEST_DATA_LEN + 2334 sizeof(struct rte_mbuf_ext_shared_info); 2335 bool freed = false; 2336 2337 /* alloc a mbuf */ 2338 m = rte_pktmbuf_alloc(pktmbuf_pool); 2339 if (m == NULL) 2340 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__); 2341 if (rte_pktmbuf_pkt_len(m) != 0) 2342 GOTO_FAIL("%s: Bad packet length\n", __func__); 2343 rte_mbuf_sanity_check(m, 0); 2344 2345 ext_buf_addr = rte_malloc("External buffer", buf_len, 2346 RTE_CACHE_LINE_SIZE); 2347 if (ext_buf_addr == NULL) 2348 GOTO_FAIL("%s: External buffer allocation failed\n", __func__); 2349 2350 ret_shinfo = rte_pktmbuf_ext_shinfo_init_helper(ext_buf_addr, &buf_len, 2351 ext_buf_free_callback_fn, &freed); 2352 if (ret_shinfo == NULL) 2353 GOTO_FAIL("%s: Shared info initialization failed!\n", __func__); 2354 2355 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1) 2356 GOTO_FAIL("%s: External refcount is not 1\n", __func__); 2357 2358 if (rte_mbuf_refcnt_read(m) != 1) 2359 GOTO_FAIL("%s: Invalid refcnt in mbuf\n", __func__); 2360 2361 buf_iova = rte_mem_virt2iova(ext_buf_addr); 2362 rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len, 2363 ret_shinfo); 2364 if (m->ol_flags != RTE_MBUF_F_EXTERNAL) 2365 GOTO_FAIL("%s: External buffer is not attached to mbuf\n", 2366 __func__); 2367 2368 /* allocate one more mbuf */ 2369 clone = rte_pktmbuf_clone(m, pktmbuf_pool); 2370 if (clone == NULL) 2371 GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__); 2372 if (rte_pktmbuf_pkt_len(clone) != 0) 2373 GOTO_FAIL("%s: Bad packet length\n", __func__); 2374 2375 /* attach the same external buffer to the cloned mbuf */ 2376 rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len, 2377 ret_shinfo); 2378 if (clone->ol_flags != RTE_MBUF_F_EXTERNAL) 2379 GOTO_FAIL("%s: External buffer is not attached to mbuf\n", 2380 __func__); 2381 2382 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2) 2383 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__); 2384 if (freed) 2385 GOTO_FAIL("%s: extbuf should not be freed\n", __func__); 2386 2387 /* test to manually update ext_buf_ref_cnt from 2 to 3*/ 2388 rte_mbuf_ext_refcnt_update(ret_shinfo, 1); 2389 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 3) 2390 GOTO_FAIL("%s: Update ext_buf ref_cnt failed\n", __func__); 2391 if (freed) 2392 GOTO_FAIL("%s: extbuf should not be freed\n", __func__); 2393 2394 /* reset the ext_refcnt before freeing the external buffer */ 2395 rte_mbuf_ext_refcnt_set(ret_shinfo, 2); 2396 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2) 2397 GOTO_FAIL("%s: set ext_buf ref_cnt failed\n", __func__); 2398 if (freed) 2399 GOTO_FAIL("%s: extbuf should not be freed\n", __func__); 2400 2401 /* detach the external buffer from mbufs */ 2402 rte_pktmbuf_detach_extbuf(m); 2403 /* check if ref cnt is decremented */ 2404 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1) 2405 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__); 2406 if (freed) 2407 GOTO_FAIL("%s: extbuf should not be freed\n", __func__); 2408 2409 rte_pktmbuf_detach_extbuf(clone); 2410 if (!freed) 2411 GOTO_FAIL("%s: extbuf should be freed\n", __func__); 2412 freed = false; 2413 2414 rte_pktmbuf_free(m); 2415 m = NULL; 2416 rte_pktmbuf_free(clone); 2417 clone = NULL; 2418 2419 return 0; 2420 2421 fail: 2422 if (m) { 2423 rte_pktmbuf_free(m); 2424 m = NULL; 2425 } 2426 if (clone) { 2427 rte_pktmbuf_free(clone); 2428 clone = NULL; 2429 } 2430 if (ext_buf_addr != NULL) { 2431 rte_free(ext_buf_addr); 2432 ext_buf_addr = NULL; 2433 } 2434 return -1; 2435 } 2436 2437 /* 2438 * Test the mbuf pool with pinned external data buffers 2439 * - Allocate memory zone for external buffer 2440 * - Create the mbuf pool with pinned external buffer 2441 * - Check the created pool with relevant mbuf pool unit tests 2442 */ 2443 static int 2444 test_pktmbuf_ext_pinned_buffer(struct rte_mempool *std_pool) 2445 { 2446 2447 struct rte_pktmbuf_extmem ext_mem; 2448 struct rte_mempool *pinned_pool = NULL; 2449 const struct rte_memzone *mz = NULL; 2450 2451 printf("Test mbuf pool with external pinned data buffers\n"); 2452 2453 /* Allocate memzone for the external data buffer */ 2454 mz = rte_memzone_reserve("pinned_pool", 2455 NB_MBUF * MBUF_DATA_SIZE, 2456 SOCKET_ID_ANY, 2457 RTE_MEMZONE_2MB | RTE_MEMZONE_SIZE_HINT_ONLY); 2458 if (mz == NULL) 2459 GOTO_FAIL("%s: Memzone allocation failed\n", __func__); 2460 2461 /* Create the mbuf pool with pinned external data buffer */ 2462 ext_mem.buf_ptr = mz->addr; 2463 ext_mem.buf_iova = mz->iova; 2464 ext_mem.buf_len = mz->len; 2465 ext_mem.elt_size = MBUF_DATA_SIZE; 2466 2467 pinned_pool = rte_pktmbuf_pool_create_extbuf("test_pinned_pool", 2468 NB_MBUF, MEMPOOL_CACHE_SIZE, 0, 2469 MBUF_DATA_SIZE, SOCKET_ID_ANY, 2470 &ext_mem, 1); 2471 if (pinned_pool == NULL) 2472 GOTO_FAIL("%s: Mbuf pool with pinned external" 2473 " buffer creation failed\n", __func__); 2474 /* test multiple mbuf alloc */ 2475 if (test_pktmbuf_pool(pinned_pool) < 0) 2476 GOTO_FAIL("%s: test_mbuf_pool(pinned) failed\n", 2477 __func__); 2478 2479 /* do it another time to check that all mbufs were freed */ 2480 if (test_pktmbuf_pool(pinned_pool) < 0) 2481 GOTO_FAIL("%s: test_mbuf_pool(pinned) failed (2)\n", 2482 __func__); 2483 2484 /* test that the data pointer on a packet mbuf is set properly */ 2485 if (test_pktmbuf_pool_ptr(pinned_pool) < 0) 2486 GOTO_FAIL("%s: test_pktmbuf_pool_ptr(pinned) failed\n", 2487 __func__); 2488 2489 /* test data manipulation in mbuf with non-ascii data */ 2490 if (test_pktmbuf_with_non_ascii_data(pinned_pool) < 0) 2491 GOTO_FAIL("%s: test_pktmbuf_with_non_ascii_data(pinned)" 2492 " failed\n", __func__); 2493 2494 /* test free pktmbuf segment one by one */ 2495 if (test_pktmbuf_free_segment(pinned_pool) < 0) 2496 GOTO_FAIL("%s: test_pktmbuf_free_segment(pinned) failed\n", 2497 __func__); 2498 2499 if (testclone_testupdate_testdetach(pinned_pool, std_pool) < 0) 2500 GOTO_FAIL("%s: testclone_and_testupdate(pinned) failed\n", 2501 __func__); 2502 2503 if (test_pktmbuf_copy(pinned_pool, std_pool) < 0) 2504 GOTO_FAIL("%s: test_pktmbuf_copy(pinned) failed\n", 2505 __func__); 2506 2507 if (test_failing_mbuf_sanity_check(pinned_pool) < 0) 2508 GOTO_FAIL("%s: test_failing_mbuf_sanity_check(pinned)" 2509 " failed\n", __func__); 2510 2511 if (test_mbuf_linearize_check(pinned_pool) < 0) 2512 GOTO_FAIL("%s: test_mbuf_linearize_check(pinned) failed\n", 2513 __func__); 2514 2515 /* test for allocating a bulk of mbufs with various sizes */ 2516 if (test_pktmbuf_alloc_bulk(pinned_pool) < 0) 2517 GOTO_FAIL("%s: test_rte_pktmbuf_alloc_bulk(pinned) failed\n", 2518 __func__); 2519 2520 /* test for allocating a bulk of mbufs with various sizes */ 2521 if (test_neg_pktmbuf_alloc_bulk(pinned_pool) < 0) 2522 GOTO_FAIL("%s: test_neg_rte_pktmbuf_alloc_bulk(pinned)" 2523 " failed\n", __func__); 2524 2525 /* test to read mbuf packet */ 2526 if (test_pktmbuf_read(pinned_pool) < 0) 2527 GOTO_FAIL("%s: test_rte_pktmbuf_read(pinned) failed\n", 2528 __func__); 2529 2530 /* test to read mbuf packet from offset */ 2531 if (test_pktmbuf_read_from_offset(pinned_pool) < 0) 2532 GOTO_FAIL("%s: test_rte_pktmbuf_read_from_offset(pinned)" 2533 " failed\n", __func__); 2534 2535 /* test to read data from chain of mbufs with data segments */ 2536 if (test_pktmbuf_read_from_chain(pinned_pool) < 0) 2537 GOTO_FAIL("%s: test_rte_pktmbuf_read_from_chain(pinned)" 2538 " failed\n", __func__); 2539 2540 RTE_SET_USED(std_pool); 2541 rte_mempool_free(pinned_pool); 2542 rte_memzone_free(mz); 2543 return 0; 2544 2545 fail: 2546 rte_mempool_free(pinned_pool); 2547 rte_memzone_free(mz); 2548 return -1; 2549 } 2550 2551 static int 2552 test_mbuf_dyn(struct rte_mempool *pktmbuf_pool) 2553 { 2554 const struct rte_mbuf_dynfield dynfield = { 2555 .name = "test-dynfield", 2556 .size = sizeof(uint8_t), 2557 .align = __alignof__(uint8_t), 2558 .flags = 0, 2559 }; 2560 const struct rte_mbuf_dynfield dynfield2 = { 2561 .name = "test-dynfield2", 2562 .size = sizeof(uint16_t), 2563 .align = __alignof__(uint16_t), 2564 .flags = 0, 2565 }; 2566 const struct rte_mbuf_dynfield dynfield3 = { 2567 .name = "test-dynfield3", 2568 .size = sizeof(uint8_t), 2569 .align = __alignof__(uint8_t), 2570 .flags = 0, 2571 }; 2572 const struct rte_mbuf_dynfield dynfield_fail_big = { 2573 .name = "test-dynfield-fail-big", 2574 .size = 256, 2575 .align = 1, 2576 .flags = 0, 2577 }; 2578 const struct rte_mbuf_dynfield dynfield_fail_align = { 2579 .name = "test-dynfield-fail-align", 2580 .size = 1, 2581 .align = 3, 2582 .flags = 0, 2583 }; 2584 const struct rte_mbuf_dynfield dynfield_fail_flag = { 2585 .name = "test-dynfield", 2586 .size = sizeof(uint8_t), 2587 .align = __alignof__(uint8_t), 2588 .flags = 1, 2589 }; 2590 const struct rte_mbuf_dynflag dynflag_fail_flag = { 2591 .name = "test-dynflag", 2592 .flags = 1, 2593 }; 2594 const struct rte_mbuf_dynflag dynflag = { 2595 .name = "test-dynflag", 2596 .flags = 0, 2597 }; 2598 const struct rte_mbuf_dynflag dynflag2 = { 2599 .name = "test-dynflag2", 2600 .flags = 0, 2601 }; 2602 const struct rte_mbuf_dynflag dynflag3 = { 2603 .name = "test-dynflag3", 2604 .flags = 0, 2605 }; 2606 struct rte_mbuf *m = NULL; 2607 int offset, offset2, offset3; 2608 int flag, flag2, flag3; 2609 int ret; 2610 2611 printf("Test mbuf dynamic fields and flags\n"); 2612 rte_mbuf_dyn_dump(stdout); 2613 2614 offset = rte_mbuf_dynfield_register(&dynfield); 2615 if (offset == -1) 2616 GOTO_FAIL("failed to register dynamic field, offset=%d: %s", 2617 offset, strerror(errno)); 2618 2619 ret = rte_mbuf_dynfield_register(&dynfield); 2620 if (ret != offset) 2621 GOTO_FAIL("failed to lookup dynamic field, ret=%d: %s", 2622 ret, strerror(errno)); 2623 2624 offset2 = rte_mbuf_dynfield_register(&dynfield2); 2625 if (offset2 == -1 || offset2 == offset || (offset2 & 1)) 2626 GOTO_FAIL("failed to register dynamic field 2, offset2=%d: %s", 2627 offset2, strerror(errno)); 2628 2629 offset3 = rte_mbuf_dynfield_register_offset(&dynfield3, 2630 offsetof(struct rte_mbuf, dynfield1[1])); 2631 if (offset3 != offsetof(struct rte_mbuf, dynfield1[1])) { 2632 if (rte_errno == EBUSY) 2633 printf("mbuf test error skipped: dynfield is busy\n"); 2634 else 2635 GOTO_FAIL("failed to register dynamic field 3, offset=" 2636 "%d: %s", offset3, strerror(errno)); 2637 } 2638 2639 printf("dynfield: offset=%d, offset2=%d, offset3=%d\n", 2640 offset, offset2, offset3); 2641 2642 ret = rte_mbuf_dynfield_register(&dynfield_fail_big); 2643 if (ret != -1) 2644 GOTO_FAIL("dynamic field creation should fail (too big)"); 2645 2646 ret = rte_mbuf_dynfield_register(&dynfield_fail_align); 2647 if (ret != -1) 2648 GOTO_FAIL("dynamic field creation should fail (bad alignment)"); 2649 2650 ret = rte_mbuf_dynfield_register_offset(&dynfield_fail_align, 2651 offsetof(struct rte_mbuf, ol_flags)); 2652 if (ret != -1) 2653 GOTO_FAIL("dynamic field creation should fail (not avail)"); 2654 2655 ret = rte_mbuf_dynfield_register(&dynfield_fail_flag); 2656 if (ret != -1) 2657 GOTO_FAIL("dynamic field creation should fail (invalid flag)"); 2658 2659 ret = rte_mbuf_dynflag_register(&dynflag_fail_flag); 2660 if (ret != -1) 2661 GOTO_FAIL("dynamic flag creation should fail (invalid flag)"); 2662 2663 flag = rte_mbuf_dynflag_register(&dynflag); 2664 if (flag == -1) 2665 GOTO_FAIL("failed to register dynamic flag, flag=%d: %s", 2666 flag, strerror(errno)); 2667 2668 ret = rte_mbuf_dynflag_register(&dynflag); 2669 if (ret != flag) 2670 GOTO_FAIL("failed to lookup dynamic flag, ret=%d: %s", 2671 ret, strerror(errno)); 2672 2673 flag2 = rte_mbuf_dynflag_register(&dynflag2); 2674 if (flag2 == -1 || flag2 == flag) 2675 GOTO_FAIL("failed to register dynamic flag 2, flag2=%d: %s", 2676 flag2, strerror(errno)); 2677 2678 flag3 = rte_mbuf_dynflag_register_bitnum(&dynflag3, 2679 rte_bsf64(RTE_MBUF_F_LAST_FREE)); 2680 if (flag3 != rte_bsf64(RTE_MBUF_F_LAST_FREE)) 2681 GOTO_FAIL("failed to register dynamic flag 3, flag3=%d: %s", 2682 flag3, strerror(errno)); 2683 2684 printf("dynflag: flag=%d, flag2=%d, flag3=%d\n", flag, flag2, flag3); 2685 2686 /* set, get dynamic field */ 2687 m = rte_pktmbuf_alloc(pktmbuf_pool); 2688 if (m == NULL) 2689 GOTO_FAIL("Cannot allocate mbuf"); 2690 2691 *RTE_MBUF_DYNFIELD(m, offset, uint8_t *) = 1; 2692 if (*RTE_MBUF_DYNFIELD(m, offset, uint8_t *) != 1) 2693 GOTO_FAIL("failed to read dynamic field"); 2694 *RTE_MBUF_DYNFIELD(m, offset2, uint16_t *) = 1000; 2695 if (*RTE_MBUF_DYNFIELD(m, offset2, uint16_t *) != 1000) 2696 GOTO_FAIL("failed to read dynamic field"); 2697 2698 /* set a dynamic flag */ 2699 m->ol_flags |= (1ULL << flag); 2700 2701 rte_mbuf_dyn_dump(stdout); 2702 rte_pktmbuf_free(m); 2703 return 0; 2704 fail: 2705 rte_pktmbuf_free(m); 2706 return -1; 2707 } 2708 2709 /* check that m->nb_segs and m->next are reset on mbuf free */ 2710 static int 2711 test_nb_segs_and_next_reset(void) 2712 { 2713 struct rte_mbuf *m0 = NULL, *m1 = NULL, *m2 = NULL; 2714 struct rte_mempool *pool = NULL; 2715 2716 pool = rte_pktmbuf_pool_create("test_mbuf_reset", 2717 3, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY); 2718 if (pool == NULL) 2719 GOTO_FAIL("Failed to create mbuf pool"); 2720 2721 /* alloc mbufs */ 2722 m0 = rte_pktmbuf_alloc(pool); 2723 m1 = rte_pktmbuf_alloc(pool); 2724 m2 = rte_pktmbuf_alloc(pool); 2725 if (m0 == NULL || m1 == NULL || m2 == NULL) 2726 GOTO_FAIL("Failed to allocate mbuf"); 2727 2728 /* append data in all of them */ 2729 if (rte_pktmbuf_append(m0, 500) == NULL || 2730 rte_pktmbuf_append(m1, 500) == NULL || 2731 rte_pktmbuf_append(m2, 500) == NULL) 2732 GOTO_FAIL("Failed to append data in mbuf"); 2733 2734 /* chain them in one mbuf m0 */ 2735 rte_pktmbuf_chain(m1, m2); 2736 rte_pktmbuf_chain(m0, m1); 2737 if (m0->nb_segs != 3 || m0->next != m1 || m1->next != m2 || 2738 m2->next != NULL) { 2739 m1 = m2 = NULL; 2740 GOTO_FAIL("Failed to chain mbufs"); 2741 } 2742 2743 /* split m0 chain in two, between m1 and m2 */ 2744 m0->nb_segs = 2; 2745 m1->next = NULL; 2746 m2->nb_segs = 1; 2747 2748 /* free the 2 mbuf chains m0 and m2 */ 2749 rte_pktmbuf_free(m0); 2750 rte_pktmbuf_free(m2); 2751 2752 /* realloc the 3 mbufs */ 2753 m0 = rte_mbuf_raw_alloc(pool); 2754 m1 = rte_mbuf_raw_alloc(pool); 2755 m2 = rte_mbuf_raw_alloc(pool); 2756 if (m0 == NULL || m1 == NULL || m2 == NULL) 2757 GOTO_FAIL("Failed to reallocate mbuf"); 2758 2759 /* ensure that m->next and m->nb_segs are reset allocated mbufs */ 2760 if (m0->nb_segs != 1 || m0->next != NULL || 2761 m1->nb_segs != 1 || m1->next != NULL || 2762 m2->nb_segs != 1 || m2->next != NULL) 2763 GOTO_FAIL("nb_segs or next was not reset properly"); 2764 2765 return 0; 2766 2767 fail: 2768 rte_mempool_free(pool); 2769 return -1; 2770 } 2771 2772 static int 2773 test_mbuf(void) 2774 { 2775 int ret = -1; 2776 struct rte_mempool *pktmbuf_pool = NULL; 2777 struct rte_mempool *pktmbuf_pool2 = NULL; 2778 2779 2780 RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != RTE_CACHE_LINE_MIN_SIZE * 2); 2781 2782 /* create pktmbuf pool if it does not exist */ 2783 pktmbuf_pool = rte_pktmbuf_pool_create("test_pktmbuf_pool", 2784 NB_MBUF, MEMPOOL_CACHE_SIZE, 0, MBUF_DATA_SIZE, 2785 SOCKET_ID_ANY); 2786 2787 if (pktmbuf_pool == NULL) { 2788 printf("cannot allocate mbuf pool\n"); 2789 goto err; 2790 } 2791 2792 /* test registration of dynamic fields and flags */ 2793 if (test_mbuf_dyn(pktmbuf_pool) < 0) { 2794 printf("mbuf dynflag test failed\n"); 2795 goto err; 2796 } 2797 2798 /* create a specific pktmbuf pool with a priv_size != 0 and no data 2799 * room size */ 2800 pktmbuf_pool2 = rte_pktmbuf_pool_create("test_pktmbuf_pool2", 2801 NB_MBUF, MEMPOOL_CACHE_SIZE, MBUF2_PRIV_SIZE, 0, 2802 SOCKET_ID_ANY); 2803 2804 if (pktmbuf_pool2 == NULL) { 2805 printf("cannot allocate mbuf pool\n"); 2806 goto err; 2807 } 2808 2809 /* test multiple mbuf alloc */ 2810 if (test_pktmbuf_pool(pktmbuf_pool) < 0) { 2811 printf("test_mbuf_pool() failed\n"); 2812 goto err; 2813 } 2814 2815 /* do it another time to check that all mbufs were freed */ 2816 if (test_pktmbuf_pool(pktmbuf_pool) < 0) { 2817 printf("test_mbuf_pool() failed (2)\n"); 2818 goto err; 2819 } 2820 2821 /* test bulk mbuf alloc and free */ 2822 if (test_pktmbuf_pool_bulk() < 0) { 2823 printf("test_pktmbuf_pool_bulk() failed\n"); 2824 goto err; 2825 } 2826 2827 /* test that the pointer to the data on a packet mbuf is set properly */ 2828 if (test_pktmbuf_pool_ptr(pktmbuf_pool) < 0) { 2829 printf("test_pktmbuf_pool_ptr() failed\n"); 2830 goto err; 2831 } 2832 2833 /* test data manipulation in mbuf */ 2834 if (test_one_pktmbuf(pktmbuf_pool) < 0) { 2835 printf("test_one_mbuf() failed\n"); 2836 goto err; 2837 } 2838 2839 2840 /* 2841 * do it another time, to check that allocation reinitialize 2842 * the mbuf correctly 2843 */ 2844 if (test_one_pktmbuf(pktmbuf_pool) < 0) { 2845 printf("test_one_mbuf() failed (2)\n"); 2846 goto err; 2847 } 2848 2849 if (test_pktmbuf_with_non_ascii_data(pktmbuf_pool) < 0) { 2850 printf("test_pktmbuf_with_non_ascii_data() failed\n"); 2851 goto err; 2852 } 2853 2854 /* test free pktmbuf segment one by one */ 2855 if (test_pktmbuf_free_segment(pktmbuf_pool) < 0) { 2856 printf("test_pktmbuf_free_segment() failed.\n"); 2857 goto err; 2858 } 2859 2860 if (testclone_testupdate_testdetach(pktmbuf_pool, pktmbuf_pool) < 0) { 2861 printf("testclone_and_testupdate() failed \n"); 2862 goto err; 2863 } 2864 2865 if (test_pktmbuf_copy(pktmbuf_pool, pktmbuf_pool) < 0) { 2866 printf("test_pktmbuf_copy() failed\n"); 2867 goto err; 2868 } 2869 2870 if (test_attach_from_different_pool(pktmbuf_pool, pktmbuf_pool2) < 0) { 2871 printf("test_attach_from_different_pool() failed\n"); 2872 goto err; 2873 } 2874 2875 if (test_refcnt_mbuf() < 0) { 2876 printf("test_refcnt_mbuf() failed \n"); 2877 goto err; 2878 } 2879 2880 if (test_failing_mbuf_sanity_check(pktmbuf_pool) < 0) { 2881 printf("test_failing_mbuf_sanity_check() failed\n"); 2882 goto err; 2883 } 2884 2885 if (test_mbuf_linearize_check(pktmbuf_pool) < 0) { 2886 printf("test_mbuf_linearize_check() failed\n"); 2887 goto err; 2888 } 2889 2890 if (test_tx_offload() < 0) { 2891 printf("test_tx_offload() failed\n"); 2892 goto err; 2893 } 2894 2895 if (test_get_rx_ol_flag_list() < 0) { 2896 printf("test_rte_get_rx_ol_flag_list() failed\n"); 2897 goto err; 2898 } 2899 2900 if (test_get_tx_ol_flag_list() < 0) { 2901 printf("test_rte_get_tx_ol_flag_list() failed\n"); 2902 goto err; 2903 } 2904 2905 if (test_get_rx_ol_flag_name() < 0) { 2906 printf("test_rte_get_rx_ol_flag_name() failed\n"); 2907 goto err; 2908 } 2909 2910 if (test_get_tx_ol_flag_name() < 0) { 2911 printf("test_rte_get_tx_ol_flag_name() failed\n"); 2912 goto err; 2913 } 2914 2915 if (test_mbuf_validate_tx_offload_one(pktmbuf_pool) < 0) { 2916 printf("test_mbuf_validate_tx_offload_one() failed\n"); 2917 goto err; 2918 } 2919 2920 /* test for allocating a bulk of mbufs with various sizes */ 2921 if (test_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) { 2922 printf("test_rte_pktmbuf_alloc_bulk() failed\n"); 2923 goto err; 2924 } 2925 2926 /* test for allocating a bulk of mbufs with various sizes */ 2927 if (test_neg_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) { 2928 printf("test_neg_rte_pktmbuf_alloc_bulk() failed\n"); 2929 goto err; 2930 } 2931 2932 /* test to read mbuf packet */ 2933 if (test_pktmbuf_read(pktmbuf_pool) < 0) { 2934 printf("test_rte_pktmbuf_read() failed\n"); 2935 goto err; 2936 } 2937 2938 /* test to read mbuf packet from offset */ 2939 if (test_pktmbuf_read_from_offset(pktmbuf_pool) < 0) { 2940 printf("test_rte_pktmbuf_read_from_offset() failed\n"); 2941 goto err; 2942 } 2943 2944 /* test to read data from chain of mbufs with data segments */ 2945 if (test_pktmbuf_read_from_chain(pktmbuf_pool) < 0) { 2946 printf("test_rte_pktmbuf_read_from_chain() failed\n"); 2947 goto err; 2948 } 2949 2950 /* test to initialize shared info. at the end of external buffer */ 2951 if (test_pktmbuf_ext_shinfo_init_helper(pktmbuf_pool) < 0) { 2952 printf("test_pktmbuf_ext_shinfo_init_helper() failed\n"); 2953 goto err; 2954 } 2955 2956 /* test the mbuf pool with pinned external data buffers */ 2957 if (test_pktmbuf_ext_pinned_buffer(pktmbuf_pool) < 0) { 2958 printf("test_pktmbuf_ext_pinned_buffer() failed\n"); 2959 goto err; 2960 } 2961 2962 /* test reset of m->nb_segs and m->next on mbuf free */ 2963 if (test_nb_segs_and_next_reset() < 0) { 2964 printf("test_nb_segs_and_next_reset() failed\n"); 2965 goto err; 2966 } 2967 2968 ret = 0; 2969 err: 2970 rte_mempool_free(pktmbuf_pool); 2971 rte_mempool_free(pktmbuf_pool2); 2972 return ret; 2973 } 2974 #undef GOTO_FAIL 2975 2976 REGISTER_TEST_COMMAND(mbuf_autotest, test_mbuf); 2977