1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <string.h>
6 #include <stdarg.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <stdint.h>
10 #include <inttypes.h>
11 #include <errno.h>
12 #include <sys/queue.h>
13
14 #include <rte_common.h>
15 #include <rte_errno.h>
16 #include <rte_debug.h>
17 #include <rte_log.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_launch.h>
21 #include <rte_eal.h>
22 #include <rte_per_lcore.h>
23 #include <rte_lcore.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_ring.h>
27 #include <rte_mempool.h>
28 #include <rte_mbuf.h>
29 #include <rte_random.h>
30 #include <rte_cycles.h>
31 #include <rte_malloc.h>
32 #include <rte_ether.h>
33 #include <rte_ip.h>
34 #include <rte_tcp.h>
35 #include <rte_mbuf_dyn.h>
36
37 #include "test.h"
38
39 #define MEMPOOL_CACHE_SIZE 32
40 #define MBUF_DATA_SIZE 2048
41 #define NB_MBUF 128
42 #define MBUF_TEST_DATA_LEN 1464
43 #define MBUF_TEST_DATA_LEN2 50
44 #define MBUF_TEST_DATA_LEN3 256
45 #define MBUF_TEST_HDR1_LEN 20
46 #define MBUF_TEST_HDR2_LEN 30
47 #define MBUF_TEST_ALL_HDRS_LEN (MBUF_TEST_HDR1_LEN+MBUF_TEST_HDR2_LEN)
48 #define MBUF_TEST_SEG_SIZE 64
49 #define MBUF_TEST_BURST 8
50 #define EXT_BUF_TEST_DATA_LEN 1024
51 #define MBUF_MAX_SEG 16
52 #define MBUF_NO_HEADER 0
53 #define MBUF_HEADER 1
54 #define MBUF_NEG_TEST_READ 2
55 #define VAL_NAME(flag) { flag, #flag }
56
57 /* chain length in bulk test */
58 #define CHAIN_LEN 16
59
60 /* size of private data for mbuf in pktmbuf_pool2 */
61 #define MBUF2_PRIV_SIZE 128
62
63 #define REFCNT_MAX_ITER 64
64 #define REFCNT_MAX_TIMEOUT 10
65 #define REFCNT_MAX_REF (RTE_MAX_LCORE)
66 #define REFCNT_MBUF_NUM 64
67 #define REFCNT_RING_SIZE (REFCNT_MBUF_NUM * REFCNT_MAX_REF)
68
69 #define MAGIC_DATA 0x42424242
70
71 #define MAKE_STRING(x) # x
72
73 #ifdef RTE_MBUF_REFCNT_ATOMIC
74
75 static volatile uint32_t refcnt_stop_workers;
76 static unsigned refcnt_lcore[RTE_MAX_LCORE];
77
78 #endif
79
80 /*
81 * MBUF
82 * ====
83 *
84 * #. Allocate a mbuf pool.
85 *
86 * - The pool contains NB_MBUF elements, where each mbuf is MBUF_SIZE
87 * bytes long.
88 *
89 * #. Test multiple allocations of mbufs from this pool.
90 *
91 * - Allocate NB_MBUF and store pointers in a table.
92 * - If an allocation fails, return an error.
93 * - Free all these mbufs.
94 * - Repeat the same test to check that mbufs were freed correctly.
95 *
96 * #. Test data manipulation in pktmbuf.
97 *
98 * - Alloc an mbuf.
99 * - Append data using rte_pktmbuf_append().
100 * - Test for error in rte_pktmbuf_append() when len is too large.
101 * - Trim data at the end of mbuf using rte_pktmbuf_trim().
102 * - Test for error in rte_pktmbuf_trim() when len is too large.
103 * - Prepend a header using rte_pktmbuf_prepend().
104 * - Test for error in rte_pktmbuf_prepend() when len is too large.
105 * - Remove data at the beginning of mbuf using rte_pktmbuf_adj().
106 * - Test for error in rte_pktmbuf_adj() when len is too large.
107 * - Check that appended data is not corrupt.
108 * - Free the mbuf.
109 * - Between all these tests, check data_len and pkt_len, and
110 * that the mbuf is contiguous.
111 * - Repeat the test to check that allocation operations
112 * reinitialize the mbuf correctly.
113 *
114 * #. Test packet cloning
115 * - Clone a mbuf and verify the data
116 * - Clone the cloned mbuf and verify the data
117 * - Attach a mbuf to another that does not have the same priv_size.
118 */
119
120 #define GOTO_FAIL(str, ...) do { \
121 printf("mbuf test FAILED (l.%d): <" str ">\n", \
122 __LINE__, ##__VA_ARGS__); \
123 goto fail; \
124 } while(0)
125
126 /*
127 * test data manipulation in mbuf with non-ascii data
128 */
129 static int
test_pktmbuf_with_non_ascii_data(struct rte_mempool * pktmbuf_pool)130 test_pktmbuf_with_non_ascii_data(struct rte_mempool *pktmbuf_pool)
131 {
132 struct rte_mbuf *m = NULL;
133 char *data;
134
135 m = rte_pktmbuf_alloc(pktmbuf_pool);
136 if (m == NULL)
137 GOTO_FAIL("Cannot allocate mbuf");
138 if (rte_pktmbuf_pkt_len(m) != 0)
139 GOTO_FAIL("Bad length");
140
141 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
142 if (data == NULL)
143 GOTO_FAIL("Cannot append data");
144 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
145 GOTO_FAIL("Bad pkt length");
146 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
147 GOTO_FAIL("Bad data length");
148 memset(data, 0xff, rte_pktmbuf_pkt_len(m));
149 if (!rte_pktmbuf_is_contiguous(m))
150 GOTO_FAIL("Buffer should be continuous");
151 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN);
152
153 rte_pktmbuf_free(m);
154
155 return 0;
156
157 fail:
158 if(m) {
159 rte_pktmbuf_free(m);
160 }
161 return -1;
162 }
163
164 /*
165 * test data manipulation in mbuf
166 */
167 static int
test_one_pktmbuf(struct rte_mempool * pktmbuf_pool)168 test_one_pktmbuf(struct rte_mempool *pktmbuf_pool)
169 {
170 struct rte_mbuf *m = NULL;
171 char *data, *data2, *hdr;
172 unsigned i;
173
174 printf("Test pktmbuf API\n");
175
176 /* alloc a mbuf */
177
178 m = rte_pktmbuf_alloc(pktmbuf_pool);
179 if (m == NULL)
180 GOTO_FAIL("Cannot allocate mbuf");
181 if (rte_pktmbuf_pkt_len(m) != 0)
182 GOTO_FAIL("Bad length");
183
184 rte_pktmbuf_dump(stdout, m, 0);
185
186 /* append data */
187
188 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
189 if (data == NULL)
190 GOTO_FAIL("Cannot append data");
191 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
192 GOTO_FAIL("Bad pkt length");
193 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
194 GOTO_FAIL("Bad data length");
195 memset(data, 0x66, rte_pktmbuf_pkt_len(m));
196 if (!rte_pktmbuf_is_contiguous(m))
197 GOTO_FAIL("Buffer should be continuous");
198 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN);
199 rte_pktmbuf_dump(stdout, m, 2*MBUF_TEST_DATA_LEN);
200
201 /* this append should fail */
202
203 data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1));
204 if (data2 != NULL)
205 GOTO_FAIL("Append should not succeed");
206
207 /* append some more data */
208
209 data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
210 if (data2 == NULL)
211 GOTO_FAIL("Cannot append data");
212 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
213 GOTO_FAIL("Bad pkt length");
214 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
215 GOTO_FAIL("Bad data length");
216 if (!rte_pktmbuf_is_contiguous(m))
217 GOTO_FAIL("Buffer should be continuous");
218
219 /* trim data at the end of mbuf */
220
221 if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0)
222 GOTO_FAIL("Cannot trim data");
223 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
224 GOTO_FAIL("Bad pkt length");
225 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
226 GOTO_FAIL("Bad data length");
227 if (!rte_pktmbuf_is_contiguous(m))
228 GOTO_FAIL("Buffer should be continuous");
229
230 /* this trim should fail */
231
232 if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0)
233 GOTO_FAIL("trim should not succeed");
234
235 /* prepend one header */
236
237 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN);
238 if (hdr == NULL)
239 GOTO_FAIL("Cannot prepend");
240 if (data - hdr != MBUF_TEST_HDR1_LEN)
241 GOTO_FAIL("Prepend failed");
242 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
243 GOTO_FAIL("Bad pkt length");
244 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
245 GOTO_FAIL("Bad data length");
246 if (!rte_pktmbuf_is_contiguous(m))
247 GOTO_FAIL("Buffer should be continuous");
248 memset(hdr, 0x55, MBUF_TEST_HDR1_LEN);
249
250 /* prepend another header */
251
252 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN);
253 if (hdr == NULL)
254 GOTO_FAIL("Cannot prepend");
255 if (data - hdr != MBUF_TEST_ALL_HDRS_LEN)
256 GOTO_FAIL("Prepend failed");
257 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
258 GOTO_FAIL("Bad pkt length");
259 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
260 GOTO_FAIL("Bad data length");
261 if (!rte_pktmbuf_is_contiguous(m))
262 GOTO_FAIL("Buffer should be continuous");
263 memset(hdr, 0x55, MBUF_TEST_HDR2_LEN);
264
265 rte_mbuf_sanity_check(m, 1);
266 rte_mbuf_sanity_check(m, 0);
267 rte_pktmbuf_dump(stdout, m, 0);
268
269 /* this prepend should fail */
270
271 hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1));
272 if (hdr != NULL)
273 GOTO_FAIL("prepend should not succeed");
274
275 /* remove data at beginning of mbuf (adj) */
276
277 if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN))
278 GOTO_FAIL("rte_pktmbuf_adj failed");
279 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
280 GOTO_FAIL("Bad pkt length");
281 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
282 GOTO_FAIL("Bad data length");
283 if (!rte_pktmbuf_is_contiguous(m))
284 GOTO_FAIL("Buffer should be continuous");
285
286 /* this adj should fail */
287
288 if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL)
289 GOTO_FAIL("rte_pktmbuf_adj should not succeed");
290
291 /* check data */
292
293 if (!rte_pktmbuf_is_contiguous(m))
294 GOTO_FAIL("Buffer should be continuous");
295
296 for (i=0; i<MBUF_TEST_DATA_LEN; i++) {
297 if (data[i] != 0x66)
298 GOTO_FAIL("Data corrupted at offset %u", i);
299 }
300
301 /* free mbuf */
302
303 rte_pktmbuf_free(m);
304 m = NULL;
305 return 0;
306
307 fail:
308 if (m)
309 rte_pktmbuf_free(m);
310 return -1;
311 }
312
313 static uint16_t
testclone_refcnt_read(struct rte_mbuf * m)314 testclone_refcnt_read(struct rte_mbuf *m)
315 {
316 return RTE_MBUF_HAS_PINNED_EXTBUF(m) ?
317 rte_mbuf_ext_refcnt_read(m->shinfo) :
318 rte_mbuf_refcnt_read(m);
319 }
320
321 static int
testclone_testupdate_testdetach(struct rte_mempool * pktmbuf_pool,struct rte_mempool * clone_pool)322 testclone_testupdate_testdetach(struct rte_mempool *pktmbuf_pool,
323 struct rte_mempool *clone_pool)
324 {
325 struct rte_mbuf *m = NULL;
326 struct rte_mbuf *clone = NULL;
327 struct rte_mbuf *clone2 = NULL;
328 unaligned_uint32_t *data;
329
330 /* alloc a mbuf */
331 m = rte_pktmbuf_alloc(pktmbuf_pool);
332 if (m == NULL)
333 GOTO_FAIL("ooops not allocating mbuf");
334
335 if (rte_pktmbuf_pkt_len(m) != 0)
336 GOTO_FAIL("Bad length");
337
338 rte_pktmbuf_append(m, sizeof(uint32_t));
339 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *);
340 *data = MAGIC_DATA;
341
342 /* clone the allocated mbuf */
343 clone = rte_pktmbuf_clone(m, clone_pool);
344 if (clone == NULL)
345 GOTO_FAIL("cannot clone data\n");
346
347 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *);
348 if (*data != MAGIC_DATA)
349 GOTO_FAIL("invalid data in clone\n");
350
351 if (testclone_refcnt_read(m) != 2)
352 GOTO_FAIL("invalid refcnt in m\n");
353
354 /* free the clone */
355 rte_pktmbuf_free(clone);
356 clone = NULL;
357
358 /* same test with a chained mbuf */
359 m->next = rte_pktmbuf_alloc(pktmbuf_pool);
360 if (m->next == NULL)
361 GOTO_FAIL("Next Pkt Null\n");
362 m->nb_segs = 2;
363
364 rte_pktmbuf_append(m->next, sizeof(uint32_t));
365 m->pkt_len = 2 * sizeof(uint32_t);
366
367 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
368 *data = MAGIC_DATA;
369
370 clone = rte_pktmbuf_clone(m, clone_pool);
371 if (clone == NULL)
372 GOTO_FAIL("cannot clone data\n");
373
374 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *);
375 if (*data != MAGIC_DATA)
376 GOTO_FAIL("invalid data in clone\n");
377
378 data = rte_pktmbuf_mtod(clone->next, unaligned_uint32_t *);
379 if (*data != MAGIC_DATA)
380 GOTO_FAIL("invalid data in clone->next\n");
381
382 if (testclone_refcnt_read(m) != 2)
383 GOTO_FAIL("invalid refcnt in m\n");
384
385 if (testclone_refcnt_read(m->next) != 2)
386 GOTO_FAIL("invalid refcnt in m->next\n");
387
388 /* try to clone the clone */
389
390 clone2 = rte_pktmbuf_clone(clone, clone_pool);
391 if (clone2 == NULL)
392 GOTO_FAIL("cannot clone the clone\n");
393
394 data = rte_pktmbuf_mtod(clone2, unaligned_uint32_t *);
395 if (*data != MAGIC_DATA)
396 GOTO_FAIL("invalid data in clone2\n");
397
398 data = rte_pktmbuf_mtod(clone2->next, unaligned_uint32_t *);
399 if (*data != MAGIC_DATA)
400 GOTO_FAIL("invalid data in clone2->next\n");
401
402 if (testclone_refcnt_read(m) != 3)
403 GOTO_FAIL("invalid refcnt in m\n");
404
405 if (testclone_refcnt_read(m->next) != 3)
406 GOTO_FAIL("invalid refcnt in m->next\n");
407
408 /* free mbuf */
409 rte_pktmbuf_free(m);
410 rte_pktmbuf_free(clone);
411 rte_pktmbuf_free(clone2);
412
413 m = NULL;
414 clone = NULL;
415 clone2 = NULL;
416 printf("%s ok\n", __func__);
417 return 0;
418
419 fail:
420 if (m)
421 rte_pktmbuf_free(m);
422 if (clone)
423 rte_pktmbuf_free(clone);
424 if (clone2)
425 rte_pktmbuf_free(clone2);
426 return -1;
427 }
428
429 static int
test_pktmbuf_copy(struct rte_mempool * pktmbuf_pool,struct rte_mempool * clone_pool)430 test_pktmbuf_copy(struct rte_mempool *pktmbuf_pool,
431 struct rte_mempool *clone_pool)
432 {
433 struct rte_mbuf *m = NULL;
434 struct rte_mbuf *copy = NULL;
435 struct rte_mbuf *copy2 = NULL;
436 struct rte_mbuf *clone = NULL;
437 unaligned_uint32_t *data;
438
439 /* alloc a mbuf */
440 m = rte_pktmbuf_alloc(pktmbuf_pool);
441 if (m == NULL)
442 GOTO_FAIL("ooops not allocating mbuf");
443
444 if (rte_pktmbuf_pkt_len(m) != 0)
445 GOTO_FAIL("Bad length");
446
447 rte_pktmbuf_append(m, sizeof(uint32_t));
448 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *);
449 *data = MAGIC_DATA;
450
451 /* copy the allocated mbuf */
452 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX);
453 if (copy == NULL)
454 GOTO_FAIL("cannot copy data\n");
455
456 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t))
457 GOTO_FAIL("copy length incorrect\n");
458
459 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t))
460 GOTO_FAIL("copy data length incorrect\n");
461
462 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
463 if (*data != MAGIC_DATA)
464 GOTO_FAIL("invalid data in copy\n");
465
466 /* free the copy */
467 rte_pktmbuf_free(copy);
468 copy = NULL;
469
470 /* same test with a cloned mbuf */
471 clone = rte_pktmbuf_clone(m, clone_pool);
472 if (clone == NULL)
473 GOTO_FAIL("cannot clone data\n");
474
475 if ((!RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
476 !RTE_MBUF_CLONED(clone)) ||
477 (RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
478 !RTE_MBUF_HAS_EXTBUF(clone)))
479 GOTO_FAIL("clone did not give a cloned mbuf\n");
480
481 copy = rte_pktmbuf_copy(clone, pktmbuf_pool, 0, UINT32_MAX);
482 if (copy == NULL)
483 GOTO_FAIL("cannot copy cloned mbuf\n");
484
485 if (RTE_MBUF_CLONED(copy))
486 GOTO_FAIL("copy of clone is cloned?\n");
487
488 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t))
489 GOTO_FAIL("copy clone length incorrect\n");
490
491 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t))
492 GOTO_FAIL("copy clone data length incorrect\n");
493
494 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
495 if (*data != MAGIC_DATA)
496 GOTO_FAIL("invalid data in clone copy\n");
497 rte_pktmbuf_free(clone);
498 rte_pktmbuf_free(copy);
499 copy = NULL;
500 clone = NULL;
501
502
503 /* same test with a chained mbuf */
504 m->next = rte_pktmbuf_alloc(pktmbuf_pool);
505 if (m->next == NULL)
506 GOTO_FAIL("Next Pkt Null\n");
507 m->nb_segs = 2;
508
509 rte_pktmbuf_append(m->next, sizeof(uint32_t));
510 m->pkt_len = 2 * sizeof(uint32_t);
511 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
512 *data = MAGIC_DATA + 1;
513
514 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX);
515 if (copy == NULL)
516 GOTO_FAIL("cannot copy data\n");
517
518 if (rte_pktmbuf_pkt_len(copy) != 2 * sizeof(uint32_t))
519 GOTO_FAIL("chain copy length incorrect\n");
520
521 if (rte_pktmbuf_data_len(copy) != 2 * sizeof(uint32_t))
522 GOTO_FAIL("chain copy data length incorrect\n");
523
524 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
525 if (data[0] != MAGIC_DATA || data[1] != MAGIC_DATA + 1)
526 GOTO_FAIL("invalid data in copy\n");
527
528 rte_pktmbuf_free(copy2);
529
530 /* test offset copy */
531 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool,
532 sizeof(uint32_t), UINT32_MAX);
533 if (copy2 == NULL)
534 GOTO_FAIL("cannot copy the copy\n");
535
536 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t))
537 GOTO_FAIL("copy with offset, length incorrect\n");
538
539 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t))
540 GOTO_FAIL("copy with offset, data length incorrect\n");
541
542 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *);
543 if (data[0] != MAGIC_DATA + 1)
544 GOTO_FAIL("copy with offset, invalid data\n");
545
546 rte_pktmbuf_free(copy2);
547
548 /* test truncation copy */
549 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool,
550 0, sizeof(uint32_t));
551 if (copy2 == NULL)
552 GOTO_FAIL("cannot copy the copy\n");
553
554 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t))
555 GOTO_FAIL("copy with truncate, length incorrect\n");
556
557 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t))
558 GOTO_FAIL("copy with truncate, data length incorrect\n");
559
560 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *);
561 if (data[0] != MAGIC_DATA)
562 GOTO_FAIL("copy with truncate, invalid data\n");
563
564 /* free mbuf */
565 rte_pktmbuf_free(m);
566 rte_pktmbuf_free(copy);
567 rte_pktmbuf_free(copy2);
568
569 m = NULL;
570 copy = NULL;
571 copy2 = NULL;
572 printf("%s ok\n", __func__);
573 return 0;
574
575 fail:
576 if (m)
577 rte_pktmbuf_free(m);
578 if (copy)
579 rte_pktmbuf_free(copy);
580 if (copy2)
581 rte_pktmbuf_free(copy2);
582 return -1;
583 }
584
585 static int
test_attach_from_different_pool(struct rte_mempool * pktmbuf_pool,struct rte_mempool * pktmbuf_pool2)586 test_attach_from_different_pool(struct rte_mempool *pktmbuf_pool,
587 struct rte_mempool *pktmbuf_pool2)
588 {
589 struct rte_mbuf *m = NULL;
590 struct rte_mbuf *clone = NULL;
591 struct rte_mbuf *clone2 = NULL;
592 char *data, *c_data, *c_data2;
593
594 /* alloc a mbuf */
595 m = rte_pktmbuf_alloc(pktmbuf_pool);
596 if (m == NULL)
597 GOTO_FAIL("cannot allocate mbuf");
598
599 if (rte_pktmbuf_pkt_len(m) != 0)
600 GOTO_FAIL("Bad length");
601
602 data = rte_pktmbuf_mtod(m, char *);
603
604 /* allocate a new mbuf from the second pool, and attach it to the first
605 * mbuf */
606 clone = rte_pktmbuf_alloc(pktmbuf_pool2);
607 if (clone == NULL)
608 GOTO_FAIL("cannot allocate mbuf from second pool\n");
609
610 /* check data room size and priv size, and erase priv */
611 if (rte_pktmbuf_data_room_size(clone->pool) != 0)
612 GOTO_FAIL("data room size should be 0\n");
613 if (rte_pktmbuf_priv_size(clone->pool) != MBUF2_PRIV_SIZE)
614 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE);
615 memset(clone + 1, 0, MBUF2_PRIV_SIZE);
616
617 /* save data pointer to compare it after detach() */
618 c_data = rte_pktmbuf_mtod(clone, char *);
619 if (c_data != (char *)clone + sizeof(*clone) + MBUF2_PRIV_SIZE)
620 GOTO_FAIL("bad data pointer in clone");
621 if (rte_pktmbuf_headroom(clone) != 0)
622 GOTO_FAIL("bad headroom in clone");
623
624 rte_pktmbuf_attach(clone, m);
625
626 if (rte_pktmbuf_mtod(clone, char *) != data)
627 GOTO_FAIL("clone was not attached properly\n");
628 if (rte_pktmbuf_headroom(clone) != RTE_PKTMBUF_HEADROOM)
629 GOTO_FAIL("bad headroom in clone after attach");
630 if (rte_mbuf_refcnt_read(m) != 2)
631 GOTO_FAIL("invalid refcnt in m\n");
632
633 /* allocate a new mbuf from the second pool, and attach it to the first
634 * cloned mbuf */
635 clone2 = rte_pktmbuf_alloc(pktmbuf_pool2);
636 if (clone2 == NULL)
637 GOTO_FAIL("cannot allocate clone2 from second pool\n");
638
639 /* check data room size and priv size, and erase priv */
640 if (rte_pktmbuf_data_room_size(clone2->pool) != 0)
641 GOTO_FAIL("data room size should be 0\n");
642 if (rte_pktmbuf_priv_size(clone2->pool) != MBUF2_PRIV_SIZE)
643 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE);
644 memset(clone2 + 1, 0, MBUF2_PRIV_SIZE);
645
646 /* save data pointer to compare it after detach() */
647 c_data2 = rte_pktmbuf_mtod(clone2, char *);
648 if (c_data2 != (char *)clone2 + sizeof(*clone2) + MBUF2_PRIV_SIZE)
649 GOTO_FAIL("bad data pointer in clone2");
650 if (rte_pktmbuf_headroom(clone2) != 0)
651 GOTO_FAIL("bad headroom in clone2");
652
653 rte_pktmbuf_attach(clone2, clone);
654
655 if (rte_pktmbuf_mtod(clone2, char *) != data)
656 GOTO_FAIL("clone2 was not attached properly\n");
657 if (rte_pktmbuf_headroom(clone2) != RTE_PKTMBUF_HEADROOM)
658 GOTO_FAIL("bad headroom in clone2 after attach");
659 if (rte_mbuf_refcnt_read(m) != 3)
660 GOTO_FAIL("invalid refcnt in m\n");
661
662 /* detach the clones */
663 rte_pktmbuf_detach(clone);
664 if (c_data != rte_pktmbuf_mtod(clone, char *))
665 GOTO_FAIL("clone was not detached properly\n");
666 if (rte_mbuf_refcnt_read(m) != 2)
667 GOTO_FAIL("invalid refcnt in m\n");
668
669 rte_pktmbuf_detach(clone2);
670 if (c_data2 != rte_pktmbuf_mtod(clone2, char *))
671 GOTO_FAIL("clone2 was not detached properly\n");
672 if (rte_mbuf_refcnt_read(m) != 1)
673 GOTO_FAIL("invalid refcnt in m\n");
674
675 /* free the clones and the initial mbuf */
676 rte_pktmbuf_free(clone2);
677 rte_pktmbuf_free(clone);
678 rte_pktmbuf_free(m);
679 printf("%s ok\n", __func__);
680 return 0;
681
682 fail:
683 if (m)
684 rte_pktmbuf_free(m);
685 if (clone)
686 rte_pktmbuf_free(clone);
687 if (clone2)
688 rte_pktmbuf_free(clone2);
689 return -1;
690 }
691
692 /*
693 * test allocation and free of mbufs
694 */
695 static int
test_pktmbuf_pool(struct rte_mempool * pktmbuf_pool)696 test_pktmbuf_pool(struct rte_mempool *pktmbuf_pool)
697 {
698 unsigned i;
699 struct rte_mbuf *m[NB_MBUF];
700 int ret = 0;
701
702 for (i=0; i<NB_MBUF; i++)
703 m[i] = NULL;
704
705 /* alloc NB_MBUF mbufs */
706 for (i=0; i<NB_MBUF; i++) {
707 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
708 if (m[i] == NULL) {
709 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
710 ret = -1;
711 }
712 }
713 struct rte_mbuf *extra = NULL;
714 extra = rte_pktmbuf_alloc(pktmbuf_pool);
715 if(extra != NULL) {
716 printf("Error pool not empty");
717 ret = -1;
718 }
719 extra = rte_pktmbuf_clone(m[0], pktmbuf_pool);
720 if(extra != NULL) {
721 printf("Error pool not empty");
722 ret = -1;
723 }
724 /* free them */
725 for (i=0; i<NB_MBUF; i++) {
726 if (m[i] != NULL)
727 rte_pktmbuf_free(m[i]);
728 }
729
730 return ret;
731 }
732
733 /*
734 * test bulk allocation and bulk free of mbufs
735 */
736 static int
test_pktmbuf_pool_bulk(void)737 test_pktmbuf_pool_bulk(void)
738 {
739 struct rte_mempool *pool = NULL;
740 struct rte_mempool *pool2 = NULL;
741 unsigned int i;
742 struct rte_mbuf *m;
743 struct rte_mbuf *mbufs[NB_MBUF];
744 int ret = 0;
745
746 /* We cannot use the preallocated mbuf pools because their caches
747 * prevent us from bulk allocating all objects in them.
748 * So we create our own mbuf pools without caches.
749 */
750 printf("Create mbuf pools for bulk allocation.\n");
751 pool = rte_pktmbuf_pool_create("test_pktmbuf_bulk",
752 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
753 if (pool == NULL) {
754 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n",
755 rte_errno);
756 goto err;
757 }
758 pool2 = rte_pktmbuf_pool_create("test_pktmbuf_bulk2",
759 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
760 if (pool2 == NULL) {
761 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n",
762 rte_errno);
763 goto err;
764 }
765
766 /* Preconditions: Mempools must be full. */
767 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) {
768 printf("Test precondition failed: mempools not full\n");
769 goto err;
770 }
771 if (!(rte_mempool_avail_count(pool) == NB_MBUF &&
772 rte_mempool_avail_count(pool2) == NB_MBUF)) {
773 printf("Test precondition failed: mempools: %u+%u != %u+%u",
774 rte_mempool_avail_count(pool),
775 rte_mempool_avail_count(pool2),
776 NB_MBUF, NB_MBUF);
777 goto err;
778 }
779
780 printf("Test single bulk alloc, followed by multiple bulk free.\n");
781
782 /* Bulk allocate all mbufs in the pool, in one go. */
783 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF);
784 if (ret != 0) {
785 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
786 goto err;
787 }
788 /* Test that they have been removed from the pool. */
789 if (!rte_mempool_empty(pool)) {
790 printf("mempool not empty\n");
791 goto err;
792 }
793 /* Bulk free all mbufs, in four steps. */
794 RTE_BUILD_BUG_ON(NB_MBUF % 4 != 0);
795 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) {
796 rte_pktmbuf_free_bulk(&mbufs[i], NB_MBUF / 4);
797 /* Test that they have been returned to the pool. */
798 if (rte_mempool_avail_count(pool) != i + NB_MBUF / 4) {
799 printf("mempool avail count incorrect\n");
800 goto err;
801 }
802 }
803
804 printf("Test multiple bulk alloc, followed by single bulk free.\n");
805
806 /* Bulk allocate all mbufs in the pool, in four steps. */
807 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) {
808 ret = rte_pktmbuf_alloc_bulk(pool, &mbufs[i], NB_MBUF / 4);
809 if (ret != 0) {
810 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
811 goto err;
812 }
813 }
814 /* Test that they have been removed from the pool. */
815 if (!rte_mempool_empty(pool)) {
816 printf("mempool not empty\n");
817 goto err;
818 }
819 /* Bulk free all mbufs, in one go. */
820 rte_pktmbuf_free_bulk(mbufs, NB_MBUF);
821 /* Test that they have been returned to the pool. */
822 if (!rte_mempool_full(pool)) {
823 printf("mempool not full\n");
824 goto err;
825 }
826
827 printf("Test bulk free of single long chain.\n");
828
829 /* Bulk allocate all mbufs in the pool, in one go. */
830 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF);
831 if (ret != 0) {
832 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
833 goto err;
834 }
835 /* Create a long mbuf chain. */
836 for (i = 1; i < NB_MBUF; i++) {
837 ret = rte_pktmbuf_chain(mbufs[0], mbufs[i]);
838 if (ret != 0) {
839 printf("rte_pktmbuf_chain() failed: %d\n", ret);
840 goto err;
841 }
842 mbufs[i] = NULL;
843 }
844 /* Free the mbuf chain containing all the mbufs. */
845 rte_pktmbuf_free_bulk(mbufs, 1);
846 /* Test that they have been returned to the pool. */
847 if (!rte_mempool_full(pool)) {
848 printf("mempool not full\n");
849 goto err;
850 }
851
852 printf("Test bulk free of multiple chains using multiple pools.\n");
853
854 /* Create mbuf chains containing mbufs from different pools. */
855 RTE_BUILD_BUG_ON(CHAIN_LEN % 2 != 0);
856 RTE_BUILD_BUG_ON(NB_MBUF % (CHAIN_LEN / 2) != 0);
857 for (i = 0; i < NB_MBUF * 2; i++) {
858 m = rte_pktmbuf_alloc((i & 4) ? pool2 : pool);
859 if (m == NULL) {
860 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
861 goto err;
862 }
863 if ((i % CHAIN_LEN) == 0)
864 mbufs[i / CHAIN_LEN] = m;
865 else
866 rte_pktmbuf_chain(mbufs[i / CHAIN_LEN], m);
867 }
868 /* Test that both pools have been emptied. */
869 if (!(rte_mempool_empty(pool) && rte_mempool_empty(pool2))) {
870 printf("mempools not empty\n");
871 goto err;
872 }
873 /* Free one mbuf chain. */
874 rte_pktmbuf_free_bulk(mbufs, 1);
875 /* Test that the segments have been returned to the pools. */
876 if (!(rte_mempool_avail_count(pool) == CHAIN_LEN / 2 &&
877 rte_mempool_avail_count(pool2) == CHAIN_LEN / 2)) {
878 printf("all segments of first mbuf have not been returned\n");
879 goto err;
880 }
881 /* Free the remaining mbuf chains. */
882 rte_pktmbuf_free_bulk(&mbufs[1], NB_MBUF * 2 / CHAIN_LEN - 1);
883 /* Test that they have been returned to the pools. */
884 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) {
885 printf("mempools not full\n");
886 goto err;
887 }
888
889 ret = 0;
890 goto done;
891
892 err:
893 ret = -1;
894
895 done:
896 printf("Free mbuf pools for bulk allocation.\n");
897 rte_mempool_free(pool);
898 rte_mempool_free(pool2);
899 return ret;
900 }
901
902 /*
903 * test that the pointer to the data on a packet mbuf is set properly
904 */
905 static int
test_pktmbuf_pool_ptr(struct rte_mempool * pktmbuf_pool)906 test_pktmbuf_pool_ptr(struct rte_mempool *pktmbuf_pool)
907 {
908 unsigned i;
909 struct rte_mbuf *m[NB_MBUF];
910 int ret = 0;
911
912 for (i=0; i<NB_MBUF; i++)
913 m[i] = NULL;
914
915 /* alloc NB_MBUF mbufs */
916 for (i=0; i<NB_MBUF; i++) {
917 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
918 if (m[i] == NULL) {
919 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
920 ret = -1;
921 break;
922 }
923 m[i]->data_off += 64;
924 }
925
926 /* free them */
927 for (i=0; i<NB_MBUF; i++) {
928 if (m[i] != NULL)
929 rte_pktmbuf_free(m[i]);
930 }
931
932 for (i=0; i<NB_MBUF; i++)
933 m[i] = NULL;
934
935 /* alloc NB_MBUF mbufs */
936 for (i=0; i<NB_MBUF; i++) {
937 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
938 if (m[i] == NULL) {
939 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
940 ret = -1;
941 break;
942 }
943 if (m[i]->data_off != RTE_PKTMBUF_HEADROOM) {
944 printf("invalid data_off\n");
945 ret = -1;
946 }
947 }
948
949 /* free them */
950 for (i=0; i<NB_MBUF; i++) {
951 if (m[i] != NULL)
952 rte_pktmbuf_free(m[i]);
953 }
954
955 return ret;
956 }
957
958 static int
test_pktmbuf_free_segment(struct rte_mempool * pktmbuf_pool)959 test_pktmbuf_free_segment(struct rte_mempool *pktmbuf_pool)
960 {
961 unsigned i;
962 struct rte_mbuf *m[NB_MBUF];
963 int ret = 0;
964
965 for (i=0; i<NB_MBUF; i++)
966 m[i] = NULL;
967
968 /* alloc NB_MBUF mbufs */
969 for (i=0; i<NB_MBUF; i++) {
970 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
971 if (m[i] == NULL) {
972 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
973 ret = -1;
974 }
975 }
976
977 /* free them */
978 for (i=0; i<NB_MBUF; i++) {
979 if (m[i] != NULL) {
980 struct rte_mbuf *mb, *mt;
981
982 mb = m[i];
983 while(mb != NULL) {
984 mt = mb;
985 mb = mb->next;
986 rte_pktmbuf_free_seg(mt);
987 }
988 }
989 }
990
991 return ret;
992 }
993
994 /*
995 * Stress test for rte_mbuf atomic refcnt.
996 * Implies that RTE_MBUF_REFCNT_ATOMIC is defined.
997 * For more efficiency, recommended to run with RTE_LIBRTE_MBUF_DEBUG defined.
998 */
999
1000 #ifdef RTE_MBUF_REFCNT_ATOMIC
1001
1002 static int
test_refcnt_worker(void * arg)1003 test_refcnt_worker(void *arg)
1004 {
1005 unsigned lcore, free;
1006 void *mp = 0;
1007 struct rte_ring *refcnt_mbuf_ring = arg;
1008
1009 lcore = rte_lcore_id();
1010 printf("%s started at lcore %u\n", __func__, lcore);
1011
1012 free = 0;
1013 while (refcnt_stop_workers == 0) {
1014 if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
1015 free++;
1016 rte_pktmbuf_free(mp);
1017 }
1018 }
1019
1020 refcnt_lcore[lcore] += free;
1021 printf("%s finished at lcore %u, "
1022 "number of freed mbufs: %u\n",
1023 __func__, lcore, free);
1024 return 0;
1025 }
1026
1027 static void
test_refcnt_iter(unsigned int lcore,unsigned int iter,struct rte_mempool * refcnt_pool,struct rte_ring * refcnt_mbuf_ring)1028 test_refcnt_iter(unsigned int lcore, unsigned int iter,
1029 struct rte_mempool *refcnt_pool,
1030 struct rte_ring *refcnt_mbuf_ring)
1031 {
1032 uint16_t ref;
1033 unsigned i, n, tref, wn;
1034 struct rte_mbuf *m;
1035
1036 tref = 0;
1037
1038 /* For each mbuf in the pool:
1039 * - allocate mbuf,
1040 * - increment it's reference up to N+1,
1041 * - enqueue it N times into the ring for worker cores to free.
1042 */
1043 for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
1044 i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
1045 i++) {
1046 ref = RTE_MAX(rte_rand() % REFCNT_MAX_REF, 1UL);
1047 tref += ref;
1048 if ((ref & 1) != 0) {
1049 rte_pktmbuf_refcnt_update(m, ref);
1050 while (ref-- != 0)
1051 rte_ring_enqueue(refcnt_mbuf_ring, m);
1052 } else {
1053 while (ref-- != 0) {
1054 rte_pktmbuf_refcnt_update(m, 1);
1055 rte_ring_enqueue(refcnt_mbuf_ring, m);
1056 }
1057 }
1058 rte_pktmbuf_free(m);
1059 }
1060
1061 if (i != n)
1062 rte_panic("(lcore=%u, iter=%u): was able to allocate only "
1063 "%u from %u mbufs\n", lcore, iter, i, n);
1064
1065 /* wait till worker lcores will consume all mbufs */
1066 while (!rte_ring_empty(refcnt_mbuf_ring))
1067 ;
1068
1069 /* check that all mbufs are back into mempool by now */
1070 for (wn = 0; wn != REFCNT_MAX_TIMEOUT; wn++) {
1071 if ((i = rte_mempool_avail_count(refcnt_pool)) == n) {
1072 refcnt_lcore[lcore] += tref;
1073 printf("%s(lcore=%u, iter=%u) completed, "
1074 "%u references processed\n",
1075 __func__, lcore, iter, tref);
1076 return;
1077 }
1078 rte_delay_ms(100);
1079 }
1080
1081 rte_panic("(lcore=%u, iter=%u): after %us only "
1082 "%u of %u mbufs left free\n", lcore, iter, wn, i, n);
1083 }
1084
1085 static int
test_refcnt_main(struct rte_mempool * refcnt_pool,struct rte_ring * refcnt_mbuf_ring)1086 test_refcnt_main(struct rte_mempool *refcnt_pool,
1087 struct rte_ring *refcnt_mbuf_ring)
1088 {
1089 unsigned i, lcore;
1090
1091 lcore = rte_lcore_id();
1092 printf("%s started at lcore %u\n", __func__, lcore);
1093
1094 for (i = 0; i != REFCNT_MAX_ITER; i++)
1095 test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring);
1096
1097 refcnt_stop_workers = 1;
1098 rte_wmb();
1099
1100 printf("%s finished at lcore %u\n", __func__, lcore);
1101 return 0;
1102 }
1103
1104 #endif
1105
1106 static int
test_refcnt_mbuf(void)1107 test_refcnt_mbuf(void)
1108 {
1109 #ifdef RTE_MBUF_REFCNT_ATOMIC
1110 unsigned int main_lcore, worker, tref;
1111 int ret = -1;
1112 struct rte_mempool *refcnt_pool = NULL;
1113 struct rte_ring *refcnt_mbuf_ring = NULL;
1114
1115 if (rte_lcore_count() < 2) {
1116 printf("Not enough cores for test_refcnt_mbuf, expecting at least 2\n");
1117 return TEST_SKIPPED;
1118 }
1119
1120 printf("starting %s, at %u lcores\n", __func__, rte_lcore_count());
1121
1122 /* create refcnt pool & ring if they don't exist */
1123
1124 refcnt_pool = rte_pktmbuf_pool_create(MAKE_STRING(refcnt_pool),
1125 REFCNT_MBUF_NUM, 0, 0, 0,
1126 SOCKET_ID_ANY);
1127 if (refcnt_pool == NULL) {
1128 printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
1129 __func__);
1130 return -1;
1131 }
1132
1133 refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
1134 rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
1135 RING_F_SP_ENQ);
1136 if (refcnt_mbuf_ring == NULL) {
1137 printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
1138 "\n", __func__);
1139 goto err;
1140 }
1141
1142 refcnt_stop_workers = 0;
1143 memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
1144
1145 rte_eal_mp_remote_launch(test_refcnt_worker, refcnt_mbuf_ring, SKIP_MAIN);
1146
1147 test_refcnt_main(refcnt_pool, refcnt_mbuf_ring);
1148
1149 rte_eal_mp_wait_lcore();
1150
1151 /* check that we porcessed all references */
1152 tref = 0;
1153 main_lcore = rte_get_main_lcore();
1154
1155 RTE_LCORE_FOREACH_WORKER(worker)
1156 tref += refcnt_lcore[worker];
1157
1158 if (tref != refcnt_lcore[main_lcore])
1159 rte_panic("referenced mbufs: %u, freed mbufs: %u\n",
1160 tref, refcnt_lcore[main_lcore]);
1161
1162 rte_mempool_dump(stdout, refcnt_pool);
1163 rte_ring_dump(stdout, refcnt_mbuf_ring);
1164
1165 ret = 0;
1166
1167 err:
1168 rte_mempool_free(refcnt_pool);
1169 rte_ring_free(refcnt_mbuf_ring);
1170 return ret;
1171 #else
1172 return 0;
1173 #endif
1174 }
1175
1176 #include <unistd.h>
1177 #include <sys/wait.h>
1178
1179 /* use fork() to test mbuf errors panic */
1180 static int
verify_mbuf_check_panics(struct rte_mbuf * buf)1181 verify_mbuf_check_panics(struct rte_mbuf *buf)
1182 {
1183 int pid;
1184 int status;
1185
1186 pid = fork();
1187
1188 if (pid == 0) {
1189 rte_mbuf_sanity_check(buf, 1); /* should panic */
1190 exit(0); /* return normally if it doesn't panic */
1191 } else if (pid < 0){
1192 printf("Fork Failed\n");
1193 return -1;
1194 }
1195 wait(&status);
1196 if(status == 0)
1197 return -1;
1198
1199 return 0;
1200 }
1201
1202 static int
test_failing_mbuf_sanity_check(struct rte_mempool * pktmbuf_pool)1203 test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool)
1204 {
1205 struct rte_mbuf *buf;
1206 struct rte_mbuf badbuf;
1207
1208 printf("Checking rte_mbuf_sanity_check for failure conditions\n");
1209
1210 /* get a good mbuf to use to make copies */
1211 buf = rte_pktmbuf_alloc(pktmbuf_pool);
1212 if (buf == NULL)
1213 return -1;
1214
1215 printf("Checking good mbuf initially\n");
1216 if (verify_mbuf_check_panics(buf) != -1)
1217 return -1;
1218
1219 printf("Now checking for error conditions\n");
1220
1221 if (verify_mbuf_check_panics(NULL)) {
1222 printf("Error with NULL mbuf test\n");
1223 return -1;
1224 }
1225
1226 badbuf = *buf;
1227 badbuf.pool = NULL;
1228 if (verify_mbuf_check_panics(&badbuf)) {
1229 printf("Error with bad-pool mbuf test\n");
1230 return -1;
1231 }
1232
1233 badbuf = *buf;
1234 badbuf.buf_iova = 0;
1235 if (verify_mbuf_check_panics(&badbuf)) {
1236 printf("Error with bad-physaddr mbuf test\n");
1237 return -1;
1238 }
1239
1240 badbuf = *buf;
1241 badbuf.buf_addr = NULL;
1242 if (verify_mbuf_check_panics(&badbuf)) {
1243 printf("Error with bad-addr mbuf test\n");
1244 return -1;
1245 }
1246
1247 badbuf = *buf;
1248 badbuf.refcnt = 0;
1249 if (verify_mbuf_check_panics(&badbuf)) {
1250 printf("Error with bad-refcnt(0) mbuf test\n");
1251 return -1;
1252 }
1253
1254 badbuf = *buf;
1255 badbuf.refcnt = UINT16_MAX;
1256 if (verify_mbuf_check_panics(&badbuf)) {
1257 printf("Error with bad-refcnt(MAX) mbuf test\n");
1258 return -1;
1259 }
1260
1261 return 0;
1262 }
1263
1264 static int
test_mbuf_linearize(struct rte_mempool * pktmbuf_pool,int pkt_len,int nb_segs)1265 test_mbuf_linearize(struct rte_mempool *pktmbuf_pool, int pkt_len,
1266 int nb_segs)
1267 {
1268
1269 struct rte_mbuf *m = NULL, *mbuf = NULL;
1270 uint8_t *data;
1271 int data_len = 0;
1272 int remain;
1273 int seg, seg_len;
1274 int i;
1275
1276 if (pkt_len < 1) {
1277 printf("Packet size must be 1 or more (is %d)\n", pkt_len);
1278 return -1;
1279 }
1280
1281 if (nb_segs < 1) {
1282 printf("Number of segments must be 1 or more (is %d)\n",
1283 nb_segs);
1284 return -1;
1285 }
1286
1287 seg_len = pkt_len / nb_segs;
1288 if (seg_len == 0)
1289 seg_len = 1;
1290
1291 remain = pkt_len;
1292
1293 /* Create chained mbuf_src and fill it generated data */
1294 for (seg = 0; remain > 0; seg++) {
1295
1296 m = rte_pktmbuf_alloc(pktmbuf_pool);
1297 if (m == NULL) {
1298 printf("Cannot create segment for source mbuf");
1299 goto fail;
1300 }
1301
1302 /* Make sure if tailroom is zeroed */
1303 memset(rte_pktmbuf_mtod(m, uint8_t *), 0,
1304 rte_pktmbuf_tailroom(m));
1305
1306 data_len = remain;
1307 if (data_len > seg_len)
1308 data_len = seg_len;
1309
1310 data = (uint8_t *)rte_pktmbuf_append(m, data_len);
1311 if (data == NULL) {
1312 printf("Cannot append %d bytes to the mbuf\n",
1313 data_len);
1314 goto fail;
1315 }
1316
1317 for (i = 0; i < data_len; i++)
1318 data[i] = (seg * seg_len + i) % 0x0ff;
1319
1320 if (seg == 0)
1321 mbuf = m;
1322 else
1323 rte_pktmbuf_chain(mbuf, m);
1324
1325 remain -= data_len;
1326 }
1327
1328 /* Create destination buffer to store coalesced data */
1329 if (rte_pktmbuf_linearize(mbuf)) {
1330 printf("Mbuf linearization failed\n");
1331 goto fail;
1332 }
1333
1334 if (!rte_pktmbuf_is_contiguous(mbuf)) {
1335 printf("Source buffer should be contiguous after "
1336 "linearization\n");
1337 goto fail;
1338 }
1339
1340 data = rte_pktmbuf_mtod(mbuf, uint8_t *);
1341
1342 for (i = 0; i < pkt_len; i++)
1343 if (data[i] != (i % 0x0ff)) {
1344 printf("Incorrect data in linearized mbuf\n");
1345 goto fail;
1346 }
1347
1348 rte_pktmbuf_free(mbuf);
1349 return 0;
1350
1351 fail:
1352 if (mbuf)
1353 rte_pktmbuf_free(mbuf);
1354 return -1;
1355 }
1356
1357 static int
test_mbuf_linearize_check(struct rte_mempool * pktmbuf_pool)1358 test_mbuf_linearize_check(struct rte_mempool *pktmbuf_pool)
1359 {
1360 struct test_mbuf_array {
1361 int size;
1362 int nb_segs;
1363 } mbuf_array[] = {
1364 { 128, 1 },
1365 { 64, 64 },
1366 { 512, 10 },
1367 { 250, 11 },
1368 { 123, 8 },
1369 };
1370 unsigned int i;
1371
1372 printf("Test mbuf linearize API\n");
1373
1374 for (i = 0; i < RTE_DIM(mbuf_array); i++)
1375 if (test_mbuf_linearize(pktmbuf_pool, mbuf_array[i].size,
1376 mbuf_array[i].nb_segs)) {
1377 printf("Test failed for %d, %d\n", mbuf_array[i].size,
1378 mbuf_array[i].nb_segs);
1379 return -1;
1380 }
1381
1382 return 0;
1383 }
1384
1385 /*
1386 * Helper function for test_tx_ofload
1387 */
1388 static inline void
set_tx_offload(struct rte_mbuf * mb,uint64_t il2,uint64_t il3,uint64_t il4,uint64_t tso,uint64_t ol3,uint64_t ol2)1389 set_tx_offload(struct rte_mbuf *mb, uint64_t il2, uint64_t il3, uint64_t il4,
1390 uint64_t tso, uint64_t ol3, uint64_t ol2)
1391 {
1392 mb->l2_len = il2;
1393 mb->l3_len = il3;
1394 mb->l4_len = il4;
1395 mb->tso_segsz = tso;
1396 mb->outer_l3_len = ol3;
1397 mb->outer_l2_len = ol2;
1398 }
1399
1400 static int
test_tx_offload(void)1401 test_tx_offload(void)
1402 {
1403 struct rte_mbuf *mb;
1404 uint64_t tm, v1, v2;
1405 size_t sz;
1406 uint32_t i;
1407
1408 static volatile struct {
1409 uint16_t l2;
1410 uint16_t l3;
1411 uint16_t l4;
1412 uint16_t tso;
1413 } txof;
1414
1415 const uint32_t num = 0x10000;
1416
1417 txof.l2 = rte_rand() % (1 << RTE_MBUF_L2_LEN_BITS);
1418 txof.l3 = rte_rand() % (1 << RTE_MBUF_L3_LEN_BITS);
1419 txof.l4 = rte_rand() % (1 << RTE_MBUF_L4_LEN_BITS);
1420 txof.tso = rte_rand() % (1 << RTE_MBUF_TSO_SEGSZ_BITS);
1421
1422 printf("%s started, tx_offload = {\n"
1423 "\tl2_len=%#hx,\n"
1424 "\tl3_len=%#hx,\n"
1425 "\tl4_len=%#hx,\n"
1426 "\ttso_segsz=%#hx,\n"
1427 "\touter_l3_len=%#x,\n"
1428 "\touter_l2_len=%#x,\n"
1429 "};\n",
1430 __func__,
1431 txof.l2, txof.l3, txof.l4, txof.tso, txof.l3, txof.l2);
1432
1433 sz = sizeof(*mb) * num;
1434 mb = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
1435 if (mb == NULL) {
1436 printf("%s failed, out of memory\n", __func__);
1437 return -ENOMEM;
1438 }
1439
1440 memset(mb, 0, sz);
1441 tm = rte_rdtsc_precise();
1442
1443 for (i = 0; i != num; i++)
1444 set_tx_offload(mb + i, txof.l2, txof.l3, txof.l4,
1445 txof.tso, txof.l3, txof.l2);
1446
1447 tm = rte_rdtsc_precise() - tm;
1448 printf("%s set tx_offload by bit-fields: %u iterations, %"
1449 PRIu64 " cycles, %#Lf cycles/iter\n",
1450 __func__, num, tm, (long double)tm / num);
1451
1452 v1 = mb[rte_rand() % num].tx_offload;
1453
1454 memset(mb, 0, sz);
1455 tm = rte_rdtsc_precise();
1456
1457 for (i = 0; i != num; i++)
1458 mb[i].tx_offload = rte_mbuf_tx_offload(txof.l2, txof.l3,
1459 txof.l4, txof.tso, txof.l3, txof.l2, 0);
1460
1461 tm = rte_rdtsc_precise() - tm;
1462 printf("%s set raw tx_offload: %u iterations, %"
1463 PRIu64 " cycles, %#Lf cycles/iter\n",
1464 __func__, num, tm, (long double)tm / num);
1465
1466 v2 = mb[rte_rand() % num].tx_offload;
1467
1468 rte_free(mb);
1469
1470 printf("%s finished\n"
1471 "expected tx_offload value: 0x%" PRIx64 ";\n"
1472 "rte_mbuf_tx_offload value: 0x%" PRIx64 ";\n",
1473 __func__, v1, v2);
1474
1475 return (v1 == v2) ? 0 : -EINVAL;
1476 }
1477
1478 static int
test_get_rx_ol_flag_list(void)1479 test_get_rx_ol_flag_list(void)
1480 {
1481 int len = 6, ret = 0;
1482 char buf[256] = "";
1483 int buflen = 0;
1484
1485 /* Test case to check with null buffer */
1486 ret = rte_get_rx_ol_flag_list(0, NULL, 0);
1487 if (ret != -1)
1488 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1489
1490 /* Test case to check with zero buffer len */
1491 ret = rte_get_rx_ol_flag_list(PKT_RX_L4_CKSUM_MASK, buf, 0);
1492 if (ret != -1)
1493 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1494
1495 buflen = strlen(buf);
1496 if (buflen != 0)
1497 GOTO_FAIL("%s buffer should be empty, received = %d\n",
1498 __func__, buflen);
1499
1500 /* Test case to check with reduced buffer len */
1501 ret = rte_get_rx_ol_flag_list(0, buf, len);
1502 if (ret != -1)
1503 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1504
1505 buflen = strlen(buf);
1506 if (buflen != (len - 1))
1507 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d,"
1508 "received = %d\n", __func__,
1509 (len - 1), buflen);
1510
1511 /* Test case to check with zero mask value */
1512 ret = rte_get_rx_ol_flag_list(0, buf, sizeof(buf));
1513 if (ret != 0)
1514 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1515
1516 buflen = strlen(buf);
1517 if (buflen == 0)
1518 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1519 "non-zero, buffer should not be empty");
1520
1521 /* Test case to check with valid mask value */
1522 ret = rte_get_rx_ol_flag_list(PKT_RX_SEC_OFFLOAD, buf, sizeof(buf));
1523 if (ret != 0)
1524 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1525
1526 buflen = strlen(buf);
1527 if (buflen == 0)
1528 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1529 "non-zero, buffer should not be empty");
1530
1531 return 0;
1532 fail:
1533 return -1;
1534 }
1535
1536 static int
test_get_tx_ol_flag_list(void)1537 test_get_tx_ol_flag_list(void)
1538 {
1539 int len = 6, ret = 0;
1540 char buf[256] = "";
1541 int buflen = 0;
1542
1543 /* Test case to check with null buffer */
1544 ret = rte_get_tx_ol_flag_list(0, NULL, 0);
1545 if (ret != -1)
1546 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1547
1548 /* Test case to check with zero buffer len */
1549 ret = rte_get_tx_ol_flag_list(PKT_TX_IP_CKSUM, buf, 0);
1550 if (ret != -1)
1551 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1552
1553 buflen = strlen(buf);
1554 if (buflen != 0) {
1555 GOTO_FAIL("%s buffer should be empty, received = %d\n",
1556 __func__, buflen);
1557 }
1558
1559 /* Test case to check with reduced buffer len */
1560 ret = rte_get_tx_ol_flag_list(0, buf, len);
1561 if (ret != -1)
1562 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1563
1564 buflen = strlen(buf);
1565 if (buflen != (len - 1))
1566 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d,"
1567 "received = %d\n", __func__,
1568 (len - 1), buflen);
1569
1570 /* Test case to check with zero mask value */
1571 ret = rte_get_tx_ol_flag_list(0, buf, sizeof(buf));
1572 if (ret != 0)
1573 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1574
1575 buflen = strlen(buf);
1576 if (buflen == 0)
1577 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1578 "non-zero, buffer should not be empty");
1579
1580 /* Test case to check with valid mask value */
1581 ret = rte_get_tx_ol_flag_list(PKT_TX_UDP_CKSUM, buf, sizeof(buf));
1582 if (ret != 0)
1583 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1584
1585 buflen = strlen(buf);
1586 if (buflen == 0)
1587 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1588 "non-zero, buffer should not be empty");
1589
1590 return 0;
1591 fail:
1592 return -1;
1593
1594 }
1595
1596 struct flag_name {
1597 uint64_t flag;
1598 const char *name;
1599 };
1600
1601 static int
test_get_rx_ol_flag_name(void)1602 test_get_rx_ol_flag_name(void)
1603 {
1604 uint16_t i;
1605 const char *flag_str = NULL;
1606 const struct flag_name rx_flags[] = {
1607 VAL_NAME(PKT_RX_VLAN),
1608 VAL_NAME(PKT_RX_RSS_HASH),
1609 VAL_NAME(PKT_RX_FDIR),
1610 VAL_NAME(PKT_RX_L4_CKSUM_BAD),
1611 VAL_NAME(PKT_RX_L4_CKSUM_GOOD),
1612 VAL_NAME(PKT_RX_L4_CKSUM_NONE),
1613 VAL_NAME(PKT_RX_IP_CKSUM_BAD),
1614 VAL_NAME(PKT_RX_IP_CKSUM_GOOD),
1615 VAL_NAME(PKT_RX_IP_CKSUM_NONE),
1616 VAL_NAME(PKT_RX_EIP_CKSUM_BAD),
1617 VAL_NAME(PKT_RX_VLAN_STRIPPED),
1618 VAL_NAME(PKT_RX_IEEE1588_PTP),
1619 VAL_NAME(PKT_RX_IEEE1588_TMST),
1620 VAL_NAME(PKT_RX_FDIR_ID),
1621 VAL_NAME(PKT_RX_FDIR_FLX),
1622 VAL_NAME(PKT_RX_QINQ_STRIPPED),
1623 VAL_NAME(PKT_RX_LRO),
1624 VAL_NAME(PKT_RX_SEC_OFFLOAD),
1625 VAL_NAME(PKT_RX_SEC_OFFLOAD_FAILED),
1626 VAL_NAME(PKT_RX_OUTER_L4_CKSUM_BAD),
1627 VAL_NAME(PKT_RX_OUTER_L4_CKSUM_GOOD),
1628 VAL_NAME(PKT_RX_OUTER_L4_CKSUM_INVALID),
1629 };
1630
1631 /* Test case to check with valid flag */
1632 for (i = 0; i < RTE_DIM(rx_flags); i++) {
1633 flag_str = rte_get_rx_ol_flag_name(rx_flags[i].flag);
1634 if (flag_str == NULL)
1635 GOTO_FAIL("%s: Expected flagname = %s; received null\n",
1636 __func__, rx_flags[i].name);
1637 if (strcmp(flag_str, rx_flags[i].name) != 0)
1638 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n",
1639 __func__, rx_flags[i].name, flag_str);
1640 }
1641 /* Test case to check with invalid flag */
1642 flag_str = rte_get_rx_ol_flag_name(0);
1643 if (flag_str != NULL) {
1644 GOTO_FAIL("%s: Expected flag name = null; received = %s\n",
1645 __func__, flag_str);
1646 }
1647
1648 return 0;
1649 fail:
1650 return -1;
1651 }
1652
1653 static int
test_get_tx_ol_flag_name(void)1654 test_get_tx_ol_flag_name(void)
1655 {
1656 uint16_t i;
1657 const char *flag_str = NULL;
1658 const struct flag_name tx_flags[] = {
1659 VAL_NAME(PKT_TX_VLAN),
1660 VAL_NAME(PKT_TX_IP_CKSUM),
1661 VAL_NAME(PKT_TX_TCP_CKSUM),
1662 VAL_NAME(PKT_TX_SCTP_CKSUM),
1663 VAL_NAME(PKT_TX_UDP_CKSUM),
1664 VAL_NAME(PKT_TX_IEEE1588_TMST),
1665 VAL_NAME(PKT_TX_TCP_SEG),
1666 VAL_NAME(PKT_TX_IPV4),
1667 VAL_NAME(PKT_TX_IPV6),
1668 VAL_NAME(PKT_TX_OUTER_IP_CKSUM),
1669 VAL_NAME(PKT_TX_OUTER_IPV4),
1670 VAL_NAME(PKT_TX_OUTER_IPV6),
1671 VAL_NAME(PKT_TX_TUNNEL_VXLAN),
1672 VAL_NAME(PKT_TX_TUNNEL_GRE),
1673 VAL_NAME(PKT_TX_TUNNEL_IPIP),
1674 VAL_NAME(PKT_TX_TUNNEL_GENEVE),
1675 VAL_NAME(PKT_TX_TUNNEL_MPLSINUDP),
1676 VAL_NAME(PKT_TX_TUNNEL_VXLAN_GPE),
1677 VAL_NAME(PKT_TX_TUNNEL_IP),
1678 VAL_NAME(PKT_TX_TUNNEL_UDP),
1679 VAL_NAME(PKT_TX_QINQ),
1680 VAL_NAME(PKT_TX_MACSEC),
1681 VAL_NAME(PKT_TX_SEC_OFFLOAD),
1682 VAL_NAME(PKT_TX_UDP_SEG),
1683 VAL_NAME(PKT_TX_OUTER_UDP_CKSUM),
1684 };
1685
1686 /* Test case to check with valid flag */
1687 for (i = 0; i < RTE_DIM(tx_flags); i++) {
1688 flag_str = rte_get_tx_ol_flag_name(tx_flags[i].flag);
1689 if (flag_str == NULL)
1690 GOTO_FAIL("%s: Expected flagname = %s; received null\n",
1691 __func__, tx_flags[i].name);
1692 if (strcmp(flag_str, tx_flags[i].name) != 0)
1693 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n",
1694 __func__, tx_flags[i].name, flag_str);
1695 }
1696 /* Test case to check with invalid flag */
1697 flag_str = rte_get_tx_ol_flag_name(0);
1698 if (flag_str != NULL) {
1699 GOTO_FAIL("%s: Expected flag name = null; received = %s\n",
1700 __func__, flag_str);
1701 }
1702
1703 return 0;
1704 fail:
1705 return -1;
1706
1707 }
1708
1709 static int
test_mbuf_validate_tx_offload(const char * test_name,struct rte_mempool * pktmbuf_pool,uint64_t ol_flags,uint16_t segsize,int expected_retval)1710 test_mbuf_validate_tx_offload(const char *test_name,
1711 struct rte_mempool *pktmbuf_pool,
1712 uint64_t ol_flags,
1713 uint16_t segsize,
1714 int expected_retval)
1715 {
1716 struct rte_mbuf *m = NULL;
1717 int ret = 0;
1718
1719 /* alloc a mbuf and do sanity check */
1720 m = rte_pktmbuf_alloc(pktmbuf_pool);
1721 if (m == NULL)
1722 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1723 if (rte_pktmbuf_pkt_len(m) != 0)
1724 GOTO_FAIL("%s: Bad packet length\n", __func__);
1725 rte_mbuf_sanity_check(m, 0);
1726 m->ol_flags = ol_flags;
1727 m->tso_segsz = segsize;
1728 ret = rte_validate_tx_offload(m);
1729 if (ret != expected_retval)
1730 GOTO_FAIL("%s(%s): expected ret val: %d; received: %d\n",
1731 __func__, test_name, expected_retval, ret);
1732 rte_pktmbuf_free(m);
1733 m = NULL;
1734 return 0;
1735 fail:
1736 if (m) {
1737 rte_pktmbuf_free(m);
1738 m = NULL;
1739 }
1740 return -1;
1741 }
1742
1743 static int
test_mbuf_validate_tx_offload_one(struct rte_mempool * pktmbuf_pool)1744 test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
1745 {
1746 /* test to validate tx offload flags */
1747 uint64_t ol_flags = 0;
1748
1749 /* test to validate if IP checksum is counted only for IPV4 packet */
1750 /* set both IP checksum and IPV6 flags */
1751 ol_flags |= PKT_TX_IP_CKSUM;
1752 ol_flags |= PKT_TX_IPV6;
1753 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_CKSUM_IPV6_SET",
1754 pktmbuf_pool,
1755 ol_flags, 0, -EINVAL) < 0)
1756 GOTO_FAIL("%s failed: IP cksum is set incorrect.\n", __func__);
1757 /* resetting ol_flags for next testcase */
1758 ol_flags = 0;
1759
1760 /* test to validate if IP type is set when required */
1761 ol_flags |= PKT_TX_L4_MASK;
1762 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
1763 pktmbuf_pool,
1764 ol_flags, 0, -EINVAL) < 0)
1765 GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
1766
1767 /* test if IP type is set when TCP SEG is on */
1768 ol_flags |= PKT_TX_TCP_SEG;
1769 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
1770 pktmbuf_pool,
1771 ol_flags, 0, -EINVAL) < 0)
1772 GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
1773
1774 ol_flags = 0;
1775 /* test to confirm IP type (IPV4/IPV6) is set */
1776 ol_flags = PKT_TX_L4_MASK;
1777 ol_flags |= PKT_TX_IPV6;
1778 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_SET",
1779 pktmbuf_pool,
1780 ol_flags, 0, 0) < 0)
1781 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1782
1783 ol_flags = 0;
1784 /* test to check TSO segment size is non-zero */
1785 ol_flags |= PKT_TX_IPV4;
1786 ol_flags |= PKT_TX_TCP_SEG;
1787 /* set 0 tso segment size */
1788 if (test_mbuf_validate_tx_offload("MBUF_TEST_NULL_TSO_SEGSZ",
1789 pktmbuf_pool,
1790 ol_flags, 0, -EINVAL) < 0)
1791 GOTO_FAIL("%s failed: tso segment size is null.\n", __func__);
1792
1793 /* retain IPV4 and PKT_TX_TCP_SEG mask */
1794 /* set valid tso segment size but IP CKSUM not set */
1795 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_NOT_SET",
1796 pktmbuf_pool,
1797 ol_flags, 512, -EINVAL) < 0)
1798 GOTO_FAIL("%s failed: IP CKSUM is not set.\n", __func__);
1799
1800 /* test to validate if IP checksum is set for TSO capability */
1801 /* retain IPV4, TCP_SEG, tso_seg size */
1802 ol_flags |= PKT_TX_IP_CKSUM;
1803 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_SET",
1804 pktmbuf_pool,
1805 ol_flags, 512, 0) < 0)
1806 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1807
1808 /* test to confirm TSO for IPV6 type */
1809 ol_flags = 0;
1810 ol_flags |= PKT_TX_IPV6;
1811 ol_flags |= PKT_TX_TCP_SEG;
1812 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IPV6_SET",
1813 pktmbuf_pool,
1814 ol_flags, 512, 0) < 0)
1815 GOTO_FAIL("%s failed: TSO req not met.\n", __func__);
1816
1817 ol_flags = 0;
1818 /* test if outer IP checksum set for non outer IPv4 packet */
1819 ol_flags |= PKT_TX_IPV6;
1820 ol_flags |= PKT_TX_OUTER_IP_CKSUM;
1821 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_NOT_SET",
1822 pktmbuf_pool,
1823 ol_flags, 512, -EINVAL) < 0)
1824 GOTO_FAIL("%s failed: Outer IP cksum set.\n", __func__);
1825
1826 ol_flags = 0;
1827 /* test to confirm outer IP checksum is set for outer IPV4 packet */
1828 ol_flags |= PKT_TX_OUTER_IP_CKSUM;
1829 ol_flags |= PKT_TX_OUTER_IPV4;
1830 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_SET",
1831 pktmbuf_pool,
1832 ol_flags, 512, 0) < 0)
1833 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1834
1835 ol_flags = 0;
1836 /* test to confirm if packets with no TX_OFFLOAD_MASK are skipped */
1837 if (test_mbuf_validate_tx_offload("MBUF_TEST_OL_MASK_NOT_SET",
1838 pktmbuf_pool,
1839 ol_flags, 512, 0) < 0)
1840 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1841 return 0;
1842 fail:
1843 return -1;
1844 }
1845
1846 /*
1847 * Test for allocating a bulk of mbufs
1848 * define an array with positive sizes for mbufs allocations.
1849 */
1850 static int
test_pktmbuf_alloc_bulk(struct rte_mempool * pktmbuf_pool)1851 test_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
1852 {
1853 int ret = 0;
1854 unsigned int idx, loop;
1855 unsigned int alloc_counts[] = {
1856 0,
1857 MEMPOOL_CACHE_SIZE - 1,
1858 MEMPOOL_CACHE_SIZE + 1,
1859 MEMPOOL_CACHE_SIZE * 1.5,
1860 MEMPOOL_CACHE_SIZE * 2,
1861 MEMPOOL_CACHE_SIZE * 2 - 1,
1862 MEMPOOL_CACHE_SIZE * 2 + 1,
1863 MEMPOOL_CACHE_SIZE,
1864 };
1865
1866 /* allocate a large array of mbuf pointers */
1867 struct rte_mbuf *mbufs[NB_MBUF] = { 0 };
1868 for (idx = 0; idx < RTE_DIM(alloc_counts); idx++) {
1869 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
1870 alloc_counts[idx]);
1871 if (ret == 0) {
1872 for (loop = 0; loop < alloc_counts[idx] &&
1873 mbufs[loop] != NULL; loop++)
1874 rte_pktmbuf_free(mbufs[loop]);
1875 } else if (ret != 0) {
1876 printf("%s: Bulk alloc failed count(%u); ret val(%d)\n",
1877 __func__, alloc_counts[idx], ret);
1878 return -1;
1879 }
1880 }
1881 return 0;
1882 }
1883
1884 /*
1885 * Negative testing for allocating a bulk of mbufs
1886 */
1887 static int
test_neg_pktmbuf_alloc_bulk(struct rte_mempool * pktmbuf_pool)1888 test_neg_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
1889 {
1890 int ret = 0;
1891 unsigned int idx, loop;
1892 unsigned int neg_alloc_counts[] = {
1893 MEMPOOL_CACHE_SIZE - NB_MBUF,
1894 NB_MBUF + 1,
1895 NB_MBUF * 8,
1896 UINT_MAX
1897 };
1898 struct rte_mbuf *mbufs[NB_MBUF * 8] = { 0 };
1899
1900 for (idx = 0; idx < RTE_DIM(neg_alloc_counts); idx++) {
1901 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
1902 neg_alloc_counts[idx]);
1903 if (ret == 0) {
1904 printf("%s: Bulk alloc must fail! count(%u); ret(%d)\n",
1905 __func__, neg_alloc_counts[idx], ret);
1906 for (loop = 0; loop < neg_alloc_counts[idx] &&
1907 mbufs[loop] != NULL; loop++)
1908 rte_pktmbuf_free(mbufs[loop]);
1909 return -1;
1910 }
1911 }
1912 return 0;
1913 }
1914
1915 /*
1916 * Test to read mbuf packet using rte_pktmbuf_read
1917 */
1918 static int
test_pktmbuf_read(struct rte_mempool * pktmbuf_pool)1919 test_pktmbuf_read(struct rte_mempool *pktmbuf_pool)
1920 {
1921 struct rte_mbuf *m = NULL;
1922 char *data = NULL;
1923 const char *data_copy = NULL;
1924 int off;
1925
1926 /* alloc a mbuf */
1927 m = rte_pktmbuf_alloc(pktmbuf_pool);
1928 if (m == NULL)
1929 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1930 if (rte_pktmbuf_pkt_len(m) != 0)
1931 GOTO_FAIL("%s: Bad packet length\n", __func__);
1932 rte_mbuf_sanity_check(m, 0);
1933
1934 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
1935 if (data == NULL)
1936 GOTO_FAIL("%s: Cannot append data\n", __func__);
1937 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN2)
1938 GOTO_FAIL("%s: Bad packet length\n", __func__);
1939 memset(data, 0xfe, MBUF_TEST_DATA_LEN2);
1940
1941 /* read the data from mbuf */
1942 data_copy = rte_pktmbuf_read(m, 0, MBUF_TEST_DATA_LEN2, NULL);
1943 if (data_copy == NULL)
1944 GOTO_FAIL("%s: Error in reading data!\n", __func__);
1945 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
1946 if (data_copy[off] != (char)0xfe)
1947 GOTO_FAIL("Data corrupted at offset %u", off);
1948 }
1949 rte_pktmbuf_free(m);
1950 m = NULL;
1951
1952 return 0;
1953 fail:
1954 if (m) {
1955 rte_pktmbuf_free(m);
1956 m = NULL;
1957 }
1958 return -1;
1959 }
1960
1961 /*
1962 * Test to read mbuf packet data from offset
1963 */
1964 static int
test_pktmbuf_read_from_offset(struct rte_mempool * pktmbuf_pool)1965 test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool)
1966 {
1967 struct rte_mbuf *m = NULL;
1968 struct ether_hdr *hdr = NULL;
1969 char *data = NULL;
1970 const char *data_copy = NULL;
1971 unsigned int off;
1972 unsigned int hdr_len = sizeof(struct rte_ether_hdr);
1973
1974 /* alloc a mbuf */
1975 m = rte_pktmbuf_alloc(pktmbuf_pool);
1976 if (m == NULL)
1977 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1978
1979 if (rte_pktmbuf_pkt_len(m) != 0)
1980 GOTO_FAIL("%s: Bad packet length\n", __func__);
1981 rte_mbuf_sanity_check(m, 0);
1982
1983 /* prepend an ethernet header */
1984 hdr = (struct ether_hdr *)rte_pktmbuf_prepend(m, hdr_len);
1985 if (hdr == NULL)
1986 GOTO_FAIL("%s: Cannot prepend header\n", __func__);
1987 if (rte_pktmbuf_pkt_len(m) != hdr_len)
1988 GOTO_FAIL("%s: Bad pkt length", __func__);
1989 if (rte_pktmbuf_data_len(m) != hdr_len)
1990 GOTO_FAIL("%s: Bad data length", __func__);
1991 memset(hdr, 0xde, hdr_len);
1992
1993 /* read mbuf header info from 0 offset */
1994 data_copy = rte_pktmbuf_read(m, 0, hdr_len, NULL);
1995 if (data_copy == NULL)
1996 GOTO_FAIL("%s: Error in reading header!\n", __func__);
1997 for (off = 0; off < hdr_len; off++) {
1998 if (data_copy[off] != (char)0xde)
1999 GOTO_FAIL("Header info corrupted at offset %u", off);
2000 }
2001
2002 /* append sample data after ethernet header */
2003 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
2004 if (data == NULL)
2005 GOTO_FAIL("%s: Cannot append data\n", __func__);
2006 if (rte_pktmbuf_pkt_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
2007 GOTO_FAIL("%s: Bad packet length\n", __func__);
2008 if (rte_pktmbuf_data_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
2009 GOTO_FAIL("%s: Bad data length\n", __func__);
2010 memset(data, 0xcc, MBUF_TEST_DATA_LEN2);
2011
2012 /* read mbuf data after header info */
2013 data_copy = rte_pktmbuf_read(m, hdr_len, MBUF_TEST_DATA_LEN2, NULL);
2014 if (data_copy == NULL)
2015 GOTO_FAIL("%s: Error in reading header data!\n", __func__);
2016 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
2017 if (data_copy[off] != (char)0xcc)
2018 GOTO_FAIL("Data corrupted at offset %u", off);
2019 }
2020
2021 /* partial reading of mbuf data */
2022 data_copy = rte_pktmbuf_read(m, hdr_len + 5, MBUF_TEST_DATA_LEN2 - 5,
2023 NULL);
2024 if (data_copy == NULL)
2025 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2026 if (strlen(data_copy) != MBUF_TEST_DATA_LEN2 - 5)
2027 GOTO_FAIL("%s: Incorrect data length!\n", __func__);
2028 for (off = 0; off < MBUF_TEST_DATA_LEN2 - 5; off++) {
2029 if (data_copy[off] != (char)0xcc)
2030 GOTO_FAIL("Data corrupted at offset %u", off);
2031 }
2032
2033 /* read length greater than mbuf data_len */
2034 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_data_len(m) + 1,
2035 NULL) != NULL)
2036 GOTO_FAIL("%s: Requested len is larger than mbuf data len!\n",
2037 __func__);
2038
2039 /* read length greater than mbuf pkt_len */
2040 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_pkt_len(m) + 1,
2041 NULL) != NULL)
2042 GOTO_FAIL("%s: Requested len is larger than mbuf pkt len!\n",
2043 __func__);
2044
2045 /* read data of zero len from valid offset */
2046 data_copy = rte_pktmbuf_read(m, hdr_len, 0, NULL);
2047 if (data_copy == NULL)
2048 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2049 if (strlen(data_copy) != MBUF_TEST_DATA_LEN2)
2050 GOTO_FAIL("%s: Corrupted data content!\n", __func__);
2051 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
2052 if (data_copy[off] != (char)0xcc)
2053 GOTO_FAIL("Data corrupted at offset %u", off);
2054 }
2055
2056 /* read data of zero length from zero offset */
2057 data_copy = rte_pktmbuf_read(m, 0, 0, NULL);
2058 if (data_copy == NULL)
2059 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2060 /* check if the received address is the beginning of header info */
2061 if (hdr != (const struct ether_hdr *)data_copy)
2062 GOTO_FAIL("%s: Corrupted data address!\n", __func__);
2063
2064 /* read data of max length from valid offset */
2065 data_copy = rte_pktmbuf_read(m, hdr_len, UINT_MAX, NULL);
2066 if (data_copy == NULL)
2067 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2068 /* check if the received address is the beginning of data segment */
2069 if (data_copy != data)
2070 GOTO_FAIL("%s: Corrupted data address!\n", __func__);
2071
2072 /* try to read from mbuf with max size offset */
2073 data_copy = rte_pktmbuf_read(m, UINT_MAX, 0, NULL);
2074 if (data_copy != NULL)
2075 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2076
2077 /* try to read from mbuf with max size offset and len */
2078 data_copy = rte_pktmbuf_read(m, UINT_MAX, UINT_MAX, NULL);
2079 if (data_copy != NULL)
2080 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2081
2082 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
2083
2084 rte_pktmbuf_free(m);
2085 m = NULL;
2086
2087 return 0;
2088 fail:
2089 if (m) {
2090 rte_pktmbuf_free(m);
2091 m = NULL;
2092 }
2093 return -1;
2094 }
2095
2096 struct test_case {
2097 unsigned int seg_count;
2098 unsigned int flags;
2099 uint32_t read_off;
2100 uint32_t read_len;
2101 unsigned int seg_lengths[MBUF_MAX_SEG];
2102 };
2103
2104 /* create a mbuf with different sized segments
2105 * and fill with data [0x00 0x01 0x02 ...]
2106 */
2107 static struct rte_mbuf *
create_packet(struct rte_mempool * pktmbuf_pool,struct test_case * test_data)2108 create_packet(struct rte_mempool *pktmbuf_pool,
2109 struct test_case *test_data)
2110 {
2111 uint16_t i, ret, seg, seg_len = 0;
2112 uint32_t last_index = 0;
2113 unsigned int seg_lengths[MBUF_MAX_SEG];
2114 unsigned int hdr_len;
2115 struct rte_mbuf *pkt = NULL;
2116 struct rte_mbuf *pkt_seg = NULL;
2117 char *hdr = NULL;
2118 char *data = NULL;
2119
2120 memcpy(seg_lengths, test_data->seg_lengths,
2121 sizeof(unsigned int)*test_data->seg_count);
2122 for (seg = 0; seg < test_data->seg_count; seg++) {
2123 hdr_len = 0;
2124 seg_len = seg_lengths[seg];
2125 pkt_seg = rte_pktmbuf_alloc(pktmbuf_pool);
2126 if (pkt_seg == NULL)
2127 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2128 if (rte_pktmbuf_pkt_len(pkt_seg) != 0)
2129 GOTO_FAIL("%s: Bad packet length\n", __func__);
2130 rte_mbuf_sanity_check(pkt_seg, 0);
2131 /* Add header only for the first segment */
2132 if (test_data->flags == MBUF_HEADER && seg == 0) {
2133 hdr_len = sizeof(struct rte_ether_hdr);
2134 /* prepend a header and fill with dummy data */
2135 hdr = (char *)rte_pktmbuf_prepend(pkt_seg, hdr_len);
2136 if (hdr == NULL)
2137 GOTO_FAIL("%s: Cannot prepend header\n",
2138 __func__);
2139 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len)
2140 GOTO_FAIL("%s: Bad pkt length", __func__);
2141 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len)
2142 GOTO_FAIL("%s: Bad data length", __func__);
2143 for (i = 0; i < hdr_len; i++)
2144 hdr[i] = (last_index + i) % 0xffff;
2145 last_index += hdr_len;
2146 }
2147 /* skip appending segment with 0 length */
2148 if (seg_len == 0)
2149 continue;
2150 data = rte_pktmbuf_append(pkt_seg, seg_len);
2151 if (data == NULL)
2152 GOTO_FAIL("%s: Cannot append data segment\n", __func__);
2153 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len + seg_len)
2154 GOTO_FAIL("%s: Bad packet segment length: %d\n",
2155 __func__, rte_pktmbuf_pkt_len(pkt_seg));
2156 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len + seg_len)
2157 GOTO_FAIL("%s: Bad data length\n", __func__);
2158 for (i = 0; i < seg_len; i++)
2159 data[i] = (last_index + i) % 0xffff;
2160 /* to fill continuous data from one seg to another */
2161 last_index += i;
2162 /* create chained mbufs */
2163 if (seg == 0)
2164 pkt = pkt_seg;
2165 else {
2166 ret = rte_pktmbuf_chain(pkt, pkt_seg);
2167 if (ret != 0)
2168 GOTO_FAIL("%s:FAIL: Chained mbuf creation %d\n",
2169 __func__, ret);
2170 }
2171
2172 pkt_seg = pkt_seg->next;
2173 }
2174 return pkt;
2175 fail:
2176 if (pkt != NULL) {
2177 rte_pktmbuf_free(pkt);
2178 pkt = NULL;
2179 }
2180 if (pkt_seg != NULL) {
2181 rte_pktmbuf_free(pkt_seg);
2182 pkt_seg = NULL;
2183 }
2184 return NULL;
2185 }
2186
2187 static int
test_pktmbuf_read_from_chain(struct rte_mempool * pktmbuf_pool)2188 test_pktmbuf_read_from_chain(struct rte_mempool *pktmbuf_pool)
2189 {
2190 struct rte_mbuf *m;
2191 struct test_case test_cases[] = {
2192 {
2193 .seg_lengths = { 100, 100, 100 },
2194 .seg_count = 3,
2195 .flags = MBUF_NO_HEADER,
2196 .read_off = 0,
2197 .read_len = 300
2198 },
2199 {
2200 .seg_lengths = { 100, 125, 150 },
2201 .seg_count = 3,
2202 .flags = MBUF_NO_HEADER,
2203 .read_off = 99,
2204 .read_len = 201
2205 },
2206 {
2207 .seg_lengths = { 100, 100 },
2208 .seg_count = 2,
2209 .flags = MBUF_NO_HEADER,
2210 .read_off = 0,
2211 .read_len = 100
2212 },
2213 {
2214 .seg_lengths = { 100, 200 },
2215 .seg_count = 2,
2216 .flags = MBUF_HEADER,
2217 .read_off = sizeof(struct rte_ether_hdr),
2218 .read_len = 150
2219 },
2220 {
2221 .seg_lengths = { 1000, 100 },
2222 .seg_count = 2,
2223 .flags = MBUF_NO_HEADER,
2224 .read_off = 0,
2225 .read_len = 1000
2226 },
2227 {
2228 .seg_lengths = { 1024, 0, 100 },
2229 .seg_count = 3,
2230 .flags = MBUF_NO_HEADER,
2231 .read_off = 100,
2232 .read_len = 1001
2233 },
2234 {
2235 .seg_lengths = { 1000, 1, 1000 },
2236 .seg_count = 3,
2237 .flags = MBUF_NO_HEADER,
2238 .read_off = 1000,
2239 .read_len = 2
2240 },
2241 {
2242 .seg_lengths = { MBUF_TEST_DATA_LEN,
2243 MBUF_TEST_DATA_LEN2,
2244 MBUF_TEST_DATA_LEN3, 800, 10 },
2245 .seg_count = 5,
2246 .flags = MBUF_NEG_TEST_READ,
2247 .read_off = 1000,
2248 .read_len = MBUF_DATA_SIZE
2249 },
2250 };
2251
2252 uint32_t i, pos;
2253 const char *data_copy = NULL;
2254 char data_buf[MBUF_DATA_SIZE];
2255
2256 memset(data_buf, 0, MBUF_DATA_SIZE);
2257
2258 for (i = 0; i < RTE_DIM(test_cases); i++) {
2259 m = create_packet(pktmbuf_pool, &test_cases[i]);
2260 if (m == NULL)
2261 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2262
2263 data_copy = rte_pktmbuf_read(m, test_cases[i].read_off,
2264 test_cases[i].read_len, data_buf);
2265 if (test_cases[i].flags == MBUF_NEG_TEST_READ) {
2266 if (data_copy != NULL)
2267 GOTO_FAIL("%s: mbuf data read should fail!\n",
2268 __func__);
2269 else {
2270 rte_pktmbuf_free(m);
2271 m = NULL;
2272 continue;
2273 }
2274 }
2275 if (data_copy == NULL)
2276 GOTO_FAIL("%s: Error in reading packet data!\n",
2277 __func__);
2278 for (pos = 0; pos < test_cases[i].read_len; pos++) {
2279 if (data_copy[pos] !=
2280 (char)((test_cases[i].read_off + pos)
2281 % 0xffff))
2282 GOTO_FAIL("Data corrupted at offset %u is %2X",
2283 pos, data_copy[pos]);
2284 }
2285 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
2286 rte_pktmbuf_free(m);
2287 m = NULL;
2288 }
2289 return 0;
2290
2291 fail:
2292 if (m != NULL) {
2293 rte_pktmbuf_free(m);
2294 m = NULL;
2295 }
2296 return -1;
2297 }
2298
2299 /* Define a free call back function to be used for external buffer */
2300 static void
ext_buf_free_callback_fn(void * addr __rte_unused,void * opaque)2301 ext_buf_free_callback_fn(void *addr __rte_unused, void *opaque)
2302 {
2303 void *ext_buf_addr = opaque;
2304
2305 if (ext_buf_addr == NULL) {
2306 printf("External buffer address is invalid\n");
2307 return;
2308 }
2309 rte_free(ext_buf_addr);
2310 ext_buf_addr = NULL;
2311 printf("External buffer freed via callback\n");
2312 }
2313
2314 /*
2315 * Test to initialize shared data in external buffer before attaching to mbuf
2316 * - Allocate mbuf with no data.
2317 * - Allocate external buffer with size should be large enough to accommodate
2318 * rte_mbuf_ext_shared_info.
2319 * - Invoke pktmbuf_ext_shinfo_init_helper to initialize shared data.
2320 * - Invoke rte_pktmbuf_attach_extbuf to attach external buffer to the mbuf.
2321 * - Clone another mbuf and attach the same external buffer to it.
2322 * - Invoke rte_pktmbuf_detach_extbuf to detach the external buffer from mbuf.
2323 */
2324 static int
test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool * pktmbuf_pool)2325 test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool)
2326 {
2327 struct rte_mbuf *m = NULL;
2328 struct rte_mbuf *clone = NULL;
2329 struct rte_mbuf_ext_shared_info *ret_shinfo = NULL;
2330 rte_iova_t buf_iova;
2331 void *ext_buf_addr = NULL;
2332 uint16_t buf_len = EXT_BUF_TEST_DATA_LEN +
2333 sizeof(struct rte_mbuf_ext_shared_info);
2334
2335 /* alloc a mbuf */
2336 m = rte_pktmbuf_alloc(pktmbuf_pool);
2337 if (m == NULL)
2338 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2339 if (rte_pktmbuf_pkt_len(m) != 0)
2340 GOTO_FAIL("%s: Bad packet length\n", __func__);
2341 rte_mbuf_sanity_check(m, 0);
2342
2343 ext_buf_addr = rte_malloc("External buffer", buf_len,
2344 RTE_CACHE_LINE_SIZE);
2345 if (ext_buf_addr == NULL)
2346 GOTO_FAIL("%s: External buffer allocation failed\n", __func__);
2347
2348 ret_shinfo = rte_pktmbuf_ext_shinfo_init_helper(ext_buf_addr, &buf_len,
2349 ext_buf_free_callback_fn, ext_buf_addr);
2350 if (ret_shinfo == NULL)
2351 GOTO_FAIL("%s: Shared info initialization failed!\n", __func__);
2352
2353 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
2354 GOTO_FAIL("%s: External refcount is not 1\n", __func__);
2355
2356 if (rte_mbuf_refcnt_read(m) != 1)
2357 GOTO_FAIL("%s: Invalid refcnt in mbuf\n", __func__);
2358
2359 buf_iova = rte_mempool_virt2iova(ext_buf_addr);
2360 rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len,
2361 ret_shinfo);
2362 if (m->ol_flags != EXT_ATTACHED_MBUF)
2363 GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
2364 __func__);
2365
2366 /* allocate one more mbuf */
2367 clone = rte_pktmbuf_clone(m, pktmbuf_pool);
2368 if (clone == NULL)
2369 GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__);
2370 if (rte_pktmbuf_pkt_len(clone) != 0)
2371 GOTO_FAIL("%s: Bad packet length\n", __func__);
2372
2373 /* attach the same external buffer to the cloned mbuf */
2374 rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
2375 ret_shinfo);
2376 if (clone->ol_flags != EXT_ATTACHED_MBUF)
2377 GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
2378 __func__);
2379
2380 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
2381 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2382
2383 /* test to manually update ext_buf_ref_cnt from 2 to 3*/
2384 rte_mbuf_ext_refcnt_update(ret_shinfo, 1);
2385 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 3)
2386 GOTO_FAIL("%s: Update ext_buf ref_cnt failed\n", __func__);
2387
2388 /* reset the ext_refcnt before freeing the external buffer */
2389 rte_mbuf_ext_refcnt_set(ret_shinfo, 2);
2390 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
2391 GOTO_FAIL("%s: set ext_buf ref_cnt failed\n", __func__);
2392
2393 /* detach the external buffer from mbufs */
2394 rte_pktmbuf_detach_extbuf(m);
2395 /* check if ref cnt is decremented */
2396 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
2397 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2398
2399 rte_pktmbuf_detach_extbuf(clone);
2400 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 0)
2401 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2402
2403 rte_pktmbuf_free(m);
2404 m = NULL;
2405 rte_pktmbuf_free(clone);
2406 clone = NULL;
2407
2408 return 0;
2409
2410 fail:
2411 if (m) {
2412 rte_pktmbuf_free(m);
2413 m = NULL;
2414 }
2415 if (clone) {
2416 rte_pktmbuf_free(clone);
2417 clone = NULL;
2418 }
2419 if (ext_buf_addr != NULL) {
2420 rte_free(ext_buf_addr);
2421 ext_buf_addr = NULL;
2422 }
2423 return -1;
2424 }
2425
2426 /*
2427 * Test the mbuf pool with pinned external data buffers
2428 * - Allocate memory zone for external buffer
2429 * - Create the mbuf pool with pinned external buffer
2430 * - Check the created pool with relevant mbuf pool unit tests
2431 */
2432 static int
test_pktmbuf_ext_pinned_buffer(struct rte_mempool * std_pool)2433 test_pktmbuf_ext_pinned_buffer(struct rte_mempool *std_pool)
2434 {
2435
2436 struct rte_pktmbuf_extmem ext_mem;
2437 struct rte_mempool *pinned_pool = NULL;
2438 const struct rte_memzone *mz = NULL;
2439
2440 printf("Test mbuf pool with external pinned data buffers\n");
2441
2442 /* Allocate memzone for the external data buffer */
2443 mz = rte_memzone_reserve("pinned_pool",
2444 NB_MBUF * MBUF_DATA_SIZE,
2445 SOCKET_ID_ANY,
2446 RTE_MEMZONE_2MB | RTE_MEMZONE_SIZE_HINT_ONLY);
2447 if (mz == NULL)
2448 GOTO_FAIL("%s: Memzone allocation failed\n", __func__);
2449
2450 /* Create the mbuf pool with pinned external data buffer */
2451 ext_mem.buf_ptr = mz->addr;
2452 ext_mem.buf_iova = mz->iova;
2453 ext_mem.buf_len = mz->len;
2454 ext_mem.elt_size = MBUF_DATA_SIZE;
2455
2456 pinned_pool = rte_pktmbuf_pool_create_extbuf("test_pinned_pool",
2457 NB_MBUF, MEMPOOL_CACHE_SIZE, 0,
2458 MBUF_DATA_SIZE, SOCKET_ID_ANY,
2459 &ext_mem, 1);
2460 if (pinned_pool == NULL)
2461 GOTO_FAIL("%s: Mbuf pool with pinned external"
2462 " buffer creation failed\n", __func__);
2463 /* test multiple mbuf alloc */
2464 if (test_pktmbuf_pool(pinned_pool) < 0)
2465 GOTO_FAIL("%s: test_mbuf_pool(pinned) failed\n",
2466 __func__);
2467
2468 /* do it another time to check that all mbufs were freed */
2469 if (test_pktmbuf_pool(pinned_pool) < 0)
2470 GOTO_FAIL("%s: test_mbuf_pool(pinned) failed (2)\n",
2471 __func__);
2472
2473 /* test that the data pointer on a packet mbuf is set properly */
2474 if (test_pktmbuf_pool_ptr(pinned_pool) < 0)
2475 GOTO_FAIL("%s: test_pktmbuf_pool_ptr(pinned) failed\n",
2476 __func__);
2477
2478 /* test data manipulation in mbuf with non-ascii data */
2479 if (test_pktmbuf_with_non_ascii_data(pinned_pool) < 0)
2480 GOTO_FAIL("%s: test_pktmbuf_with_non_ascii_data(pinned)"
2481 " failed\n", __func__);
2482
2483 /* test free pktmbuf segment one by one */
2484 if (test_pktmbuf_free_segment(pinned_pool) < 0)
2485 GOTO_FAIL("%s: test_pktmbuf_free_segment(pinned) failed\n",
2486 __func__);
2487
2488 if (testclone_testupdate_testdetach(pinned_pool, std_pool) < 0)
2489 GOTO_FAIL("%s: testclone_and_testupdate(pinned) failed\n",
2490 __func__);
2491
2492 if (test_pktmbuf_copy(pinned_pool, std_pool) < 0)
2493 GOTO_FAIL("%s: test_pktmbuf_copy(pinned) failed\n",
2494 __func__);
2495
2496 if (test_failing_mbuf_sanity_check(pinned_pool) < 0)
2497 GOTO_FAIL("%s: test_failing_mbuf_sanity_check(pinned)"
2498 " failed\n", __func__);
2499
2500 if (test_mbuf_linearize_check(pinned_pool) < 0)
2501 GOTO_FAIL("%s: test_mbuf_linearize_check(pinned) failed\n",
2502 __func__);
2503
2504 /* test for allocating a bulk of mbufs with various sizes */
2505 if (test_pktmbuf_alloc_bulk(pinned_pool) < 0)
2506 GOTO_FAIL("%s: test_rte_pktmbuf_alloc_bulk(pinned) failed\n",
2507 __func__);
2508
2509 /* test for allocating a bulk of mbufs with various sizes */
2510 if (test_neg_pktmbuf_alloc_bulk(pinned_pool) < 0)
2511 GOTO_FAIL("%s: test_neg_rte_pktmbuf_alloc_bulk(pinned)"
2512 " failed\n", __func__);
2513
2514 /* test to read mbuf packet */
2515 if (test_pktmbuf_read(pinned_pool) < 0)
2516 GOTO_FAIL("%s: test_rte_pktmbuf_read(pinned) failed\n",
2517 __func__);
2518
2519 /* test to read mbuf packet from offset */
2520 if (test_pktmbuf_read_from_offset(pinned_pool) < 0)
2521 GOTO_FAIL("%s: test_rte_pktmbuf_read_from_offset(pinned)"
2522 " failed\n", __func__);
2523
2524 /* test to read data from chain of mbufs with data segments */
2525 if (test_pktmbuf_read_from_chain(pinned_pool) < 0)
2526 GOTO_FAIL("%s: test_rte_pktmbuf_read_from_chain(pinned)"
2527 " failed\n", __func__);
2528
2529 RTE_SET_USED(std_pool);
2530 rte_mempool_free(pinned_pool);
2531 rte_memzone_free(mz);
2532 return 0;
2533
2534 fail:
2535 rte_mempool_free(pinned_pool);
2536 rte_memzone_free(mz);
2537 return -1;
2538 }
2539
2540 static int
test_mbuf_dyn(struct rte_mempool * pktmbuf_pool)2541 test_mbuf_dyn(struct rte_mempool *pktmbuf_pool)
2542 {
2543 const struct rte_mbuf_dynfield dynfield = {
2544 .name = "test-dynfield",
2545 .size = sizeof(uint8_t),
2546 .align = __alignof__(uint8_t),
2547 .flags = 0,
2548 };
2549 const struct rte_mbuf_dynfield dynfield2 = {
2550 .name = "test-dynfield2",
2551 .size = sizeof(uint16_t),
2552 .align = __alignof__(uint16_t),
2553 .flags = 0,
2554 };
2555 const struct rte_mbuf_dynfield dynfield3 = {
2556 .name = "test-dynfield3",
2557 .size = sizeof(uint8_t),
2558 .align = __alignof__(uint8_t),
2559 .flags = 0,
2560 };
2561 const struct rte_mbuf_dynfield dynfield_fail_big = {
2562 .name = "test-dynfield-fail-big",
2563 .size = 256,
2564 .align = 1,
2565 .flags = 0,
2566 };
2567 const struct rte_mbuf_dynfield dynfield_fail_align = {
2568 .name = "test-dynfield-fail-align",
2569 .size = 1,
2570 .align = 3,
2571 .flags = 0,
2572 };
2573 const struct rte_mbuf_dynflag dynflag = {
2574 .name = "test-dynflag",
2575 .flags = 0,
2576 };
2577 const struct rte_mbuf_dynflag dynflag2 = {
2578 .name = "test-dynflag2",
2579 .flags = 0,
2580 };
2581 const struct rte_mbuf_dynflag dynflag3 = {
2582 .name = "test-dynflag3",
2583 .flags = 0,
2584 };
2585 struct rte_mbuf *m = NULL;
2586 int offset, offset2, offset3;
2587 int flag, flag2, flag3;
2588 int ret;
2589
2590 printf("Test mbuf dynamic fields and flags\n");
2591 rte_mbuf_dyn_dump(stdout);
2592
2593 offset = rte_mbuf_dynfield_register(&dynfield);
2594 if (offset == -1)
2595 GOTO_FAIL("failed to register dynamic field, offset=%d: %s",
2596 offset, strerror(errno));
2597
2598 ret = rte_mbuf_dynfield_register(&dynfield);
2599 if (ret != offset)
2600 GOTO_FAIL("failed to lookup dynamic field, ret=%d: %s",
2601 ret, strerror(errno));
2602
2603 offset2 = rte_mbuf_dynfield_register(&dynfield2);
2604 if (offset2 == -1 || offset2 == offset || (offset2 & 1))
2605 GOTO_FAIL("failed to register dynamic field 2, offset2=%d: %s",
2606 offset2, strerror(errno));
2607
2608 offset3 = rte_mbuf_dynfield_register_offset(&dynfield3,
2609 offsetof(struct rte_mbuf, dynfield1[1]));
2610 if (offset3 != offsetof(struct rte_mbuf, dynfield1[1])) {
2611 if (rte_errno == EBUSY)
2612 printf("mbuf test error skipped: dynfield is busy\n");
2613 else
2614 GOTO_FAIL("failed to register dynamic field 3, offset="
2615 "%d: %s", offset3, strerror(errno));
2616 }
2617
2618 printf("dynfield: offset=%d, offset2=%d, offset3=%d\n",
2619 offset, offset2, offset3);
2620
2621 ret = rte_mbuf_dynfield_register(&dynfield_fail_big);
2622 if (ret != -1)
2623 GOTO_FAIL("dynamic field creation should fail (too big)");
2624
2625 ret = rte_mbuf_dynfield_register(&dynfield_fail_align);
2626 if (ret != -1)
2627 GOTO_FAIL("dynamic field creation should fail (bad alignment)");
2628
2629 ret = rte_mbuf_dynfield_register_offset(&dynfield_fail_align,
2630 offsetof(struct rte_mbuf, ol_flags));
2631 if (ret != -1)
2632 GOTO_FAIL("dynamic field creation should fail (not avail)");
2633
2634 flag = rte_mbuf_dynflag_register(&dynflag);
2635 if (flag == -1)
2636 GOTO_FAIL("failed to register dynamic flag, flag=%d: %s",
2637 flag, strerror(errno));
2638
2639 ret = rte_mbuf_dynflag_register(&dynflag);
2640 if (ret != flag)
2641 GOTO_FAIL("failed to lookup dynamic flag, ret=%d: %s",
2642 ret, strerror(errno));
2643
2644 flag2 = rte_mbuf_dynflag_register(&dynflag2);
2645 if (flag2 == -1 || flag2 == flag)
2646 GOTO_FAIL("failed to register dynamic flag 2, flag2=%d: %s",
2647 flag2, strerror(errno));
2648
2649 flag3 = rte_mbuf_dynflag_register_bitnum(&dynflag3,
2650 rte_bsf64(PKT_LAST_FREE));
2651 if (flag3 != rte_bsf64(PKT_LAST_FREE))
2652 GOTO_FAIL("failed to register dynamic flag 3, flag3=%d: %s",
2653 flag3, strerror(errno));
2654
2655 printf("dynflag: flag=%d, flag2=%d, flag3=%d\n", flag, flag2, flag3);
2656
2657 /* set, get dynamic field */
2658 m = rte_pktmbuf_alloc(pktmbuf_pool);
2659 if (m == NULL)
2660 GOTO_FAIL("Cannot allocate mbuf");
2661
2662 *RTE_MBUF_DYNFIELD(m, offset, uint8_t *) = 1;
2663 if (*RTE_MBUF_DYNFIELD(m, offset, uint8_t *) != 1)
2664 GOTO_FAIL("failed to read dynamic field");
2665 *RTE_MBUF_DYNFIELD(m, offset2, uint16_t *) = 1000;
2666 if (*RTE_MBUF_DYNFIELD(m, offset2, uint16_t *) != 1000)
2667 GOTO_FAIL("failed to read dynamic field");
2668
2669 /* set a dynamic flag */
2670 m->ol_flags |= (1ULL << flag);
2671
2672 rte_mbuf_dyn_dump(stdout);
2673 rte_pktmbuf_free(m);
2674 return 0;
2675 fail:
2676 rte_pktmbuf_free(m);
2677 return -1;
2678 }
2679
2680 static int
test_mbuf(void)2681 test_mbuf(void)
2682 {
2683 int ret = -1;
2684 struct rte_mempool *pktmbuf_pool = NULL;
2685 struct rte_mempool *pktmbuf_pool2 = NULL;
2686
2687
2688 RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != RTE_CACHE_LINE_MIN_SIZE * 2);
2689
2690 /* create pktmbuf pool if it does not exist */
2691 pktmbuf_pool = rte_pktmbuf_pool_create("test_pktmbuf_pool",
2692 NB_MBUF, MEMPOOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
2693 SOCKET_ID_ANY);
2694
2695 if (pktmbuf_pool == NULL) {
2696 printf("cannot allocate mbuf pool\n");
2697 goto err;
2698 }
2699
2700 /* test registration of dynamic fields and flags */
2701 if (test_mbuf_dyn(pktmbuf_pool) < 0) {
2702 printf("mbuf dynflag test failed\n");
2703 goto err;
2704 }
2705
2706 /* create a specific pktmbuf pool with a priv_size != 0 and no data
2707 * room size */
2708 pktmbuf_pool2 = rte_pktmbuf_pool_create("test_pktmbuf_pool2",
2709 NB_MBUF, MEMPOOL_CACHE_SIZE, MBUF2_PRIV_SIZE, 0,
2710 SOCKET_ID_ANY);
2711
2712 if (pktmbuf_pool2 == NULL) {
2713 printf("cannot allocate mbuf pool\n");
2714 goto err;
2715 }
2716
2717 /* test multiple mbuf alloc */
2718 if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
2719 printf("test_mbuf_pool() failed\n");
2720 goto err;
2721 }
2722
2723 /* do it another time to check that all mbufs were freed */
2724 if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
2725 printf("test_mbuf_pool() failed (2)\n");
2726 goto err;
2727 }
2728
2729 /* test bulk mbuf alloc and free */
2730 if (test_pktmbuf_pool_bulk() < 0) {
2731 printf("test_pktmbuf_pool_bulk() failed\n");
2732 goto err;
2733 }
2734
2735 /* test that the pointer to the data on a packet mbuf is set properly */
2736 if (test_pktmbuf_pool_ptr(pktmbuf_pool) < 0) {
2737 printf("test_pktmbuf_pool_ptr() failed\n");
2738 goto err;
2739 }
2740
2741 /* test data manipulation in mbuf */
2742 if (test_one_pktmbuf(pktmbuf_pool) < 0) {
2743 printf("test_one_mbuf() failed\n");
2744 goto err;
2745 }
2746
2747
2748 /*
2749 * do it another time, to check that allocation reinitialize
2750 * the mbuf correctly
2751 */
2752 if (test_one_pktmbuf(pktmbuf_pool) < 0) {
2753 printf("test_one_mbuf() failed (2)\n");
2754 goto err;
2755 }
2756
2757 if (test_pktmbuf_with_non_ascii_data(pktmbuf_pool) < 0) {
2758 printf("test_pktmbuf_with_non_ascii_data() failed\n");
2759 goto err;
2760 }
2761
2762 /* test free pktmbuf segment one by one */
2763 if (test_pktmbuf_free_segment(pktmbuf_pool) < 0) {
2764 printf("test_pktmbuf_free_segment() failed.\n");
2765 goto err;
2766 }
2767
2768 if (testclone_testupdate_testdetach(pktmbuf_pool, pktmbuf_pool) < 0) {
2769 printf("testclone_and_testupdate() failed \n");
2770 goto err;
2771 }
2772
2773 if (test_pktmbuf_copy(pktmbuf_pool, pktmbuf_pool) < 0) {
2774 printf("test_pktmbuf_copy() failed\n");
2775 goto err;
2776 }
2777
2778 if (test_attach_from_different_pool(pktmbuf_pool, pktmbuf_pool2) < 0) {
2779 printf("test_attach_from_different_pool() failed\n");
2780 goto err;
2781 }
2782
2783 if (test_refcnt_mbuf() < 0) {
2784 printf("test_refcnt_mbuf() failed \n");
2785 goto err;
2786 }
2787
2788 if (test_failing_mbuf_sanity_check(pktmbuf_pool) < 0) {
2789 printf("test_failing_mbuf_sanity_check() failed\n");
2790 goto err;
2791 }
2792
2793 if (test_mbuf_linearize_check(pktmbuf_pool) < 0) {
2794 printf("test_mbuf_linearize_check() failed\n");
2795 goto err;
2796 }
2797
2798 if (test_tx_offload() < 0) {
2799 printf("test_tx_offload() failed\n");
2800 goto err;
2801 }
2802
2803 if (test_get_rx_ol_flag_list() < 0) {
2804 printf("test_rte_get_rx_ol_flag_list() failed\n");
2805 goto err;
2806 }
2807
2808 if (test_get_tx_ol_flag_list() < 0) {
2809 printf("test_rte_get_tx_ol_flag_list() failed\n");
2810 goto err;
2811 }
2812
2813 if (test_get_rx_ol_flag_name() < 0) {
2814 printf("test_rte_get_rx_ol_flag_name() failed\n");
2815 goto err;
2816 }
2817
2818 if (test_get_tx_ol_flag_name() < 0) {
2819 printf("test_rte_get_tx_ol_flag_name() failed\n");
2820 goto err;
2821 }
2822
2823 if (test_mbuf_validate_tx_offload_one(pktmbuf_pool) < 0) {
2824 printf("test_mbuf_validate_tx_offload_one() failed\n");
2825 goto err;
2826 }
2827
2828 /* test for allocating a bulk of mbufs with various sizes */
2829 if (test_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
2830 printf("test_rte_pktmbuf_alloc_bulk() failed\n");
2831 goto err;
2832 }
2833
2834 /* test for allocating a bulk of mbufs with various sizes */
2835 if (test_neg_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
2836 printf("test_neg_rte_pktmbuf_alloc_bulk() failed\n");
2837 goto err;
2838 }
2839
2840 /* test to read mbuf packet */
2841 if (test_pktmbuf_read(pktmbuf_pool) < 0) {
2842 printf("test_rte_pktmbuf_read() failed\n");
2843 goto err;
2844 }
2845
2846 /* test to read mbuf packet from offset */
2847 if (test_pktmbuf_read_from_offset(pktmbuf_pool) < 0) {
2848 printf("test_rte_pktmbuf_read_from_offset() failed\n");
2849 goto err;
2850 }
2851
2852 /* test to read data from chain of mbufs with data segments */
2853 if (test_pktmbuf_read_from_chain(pktmbuf_pool) < 0) {
2854 printf("test_rte_pktmbuf_read_from_chain() failed\n");
2855 goto err;
2856 }
2857
2858 /* test to initialize shared info. at the end of external buffer */
2859 if (test_pktmbuf_ext_shinfo_init_helper(pktmbuf_pool) < 0) {
2860 printf("test_pktmbuf_ext_shinfo_init_helper() failed\n");
2861 goto err;
2862 }
2863
2864 /* test the mbuf pool with pinned external data buffers */
2865 if (test_pktmbuf_ext_pinned_buffer(pktmbuf_pool) < 0) {
2866 printf("test_pktmbuf_ext_pinned_buffer() failed\n");
2867 goto err;
2868 }
2869
2870
2871 ret = 0;
2872 err:
2873 rte_mempool_free(pktmbuf_pool);
2874 rte_mempool_free(pktmbuf_pool2);
2875 return ret;
2876 }
2877 #undef GOTO_FAIL
2878
2879 REGISTER_TEST_COMMAND(mbuf_autotest, test_mbuf);
2880