xref: /f-stack/dpdk/app/test/test_distributor.c (revision ebf5cedb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include "test.h"
6 
7 #include <unistd.h>
8 #include <string.h>
9 #include <rte_cycles.h>
10 #include <rte_errno.h>
11 #include <rte_mempool.h>
12 #include <rte_mbuf.h>
13 #include <rte_distributor.h>
14 #include <rte_string_fns.h>
15 
16 #define ITER_POWER 20 /* log 2 of how many iterations we do when timing. */
17 #define BURST 32
18 #define BIG_BATCH 1024
19 
20 struct worker_params {
21 	char name[64];
22 	struct rte_distributor *dist;
23 };
24 
25 struct worker_params worker_params;
26 
27 /* statics - all zero-initialized by default */
28 static volatile int quit;      /**< general quit variable for all threads */
29 static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
30 static volatile unsigned worker_idx;
31 
32 struct worker_stats {
33 	volatile unsigned handled_packets;
34 } __rte_cache_aligned;
35 struct worker_stats worker_stats[RTE_MAX_LCORE];
36 
37 /* returns the total count of the number of packets handled by the worker
38  * functions given below.
39  */
40 static inline unsigned
41 total_packet_count(void)
42 {
43 	unsigned i, count = 0;
44 	for (i = 0; i < worker_idx; i++)
45 		count += worker_stats[i].handled_packets;
46 	return count;
47 }
48 
49 /* resets the packet counts for a new test */
50 static inline void
51 clear_packet_count(void)
52 {
53 	memset(&worker_stats, 0, sizeof(worker_stats));
54 }
55 
56 /* this is the basic worker function for sanity test
57  * it does nothing but return packets and count them.
58  */
59 static int
60 handle_work(void *arg)
61 {
62 	struct rte_mbuf *buf[8] __rte_cache_aligned;
63 	struct worker_params *wp = arg;
64 	struct rte_distributor *db = wp->dist;
65 	unsigned int count = 0, num = 0;
66 	unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
67 	int i;
68 
69 	for (i = 0; i < 8; i++)
70 		buf[i] = NULL;
71 	num = rte_distributor_get_pkt(db, id, buf, buf, num);
72 	while (!quit) {
73 		__atomic_fetch_add(&worker_stats[id].handled_packets, num,
74 				__ATOMIC_RELAXED);
75 		count += num;
76 		num = rte_distributor_get_pkt(db, id,
77 				buf, buf, num);
78 	}
79 	__atomic_fetch_add(&worker_stats[id].handled_packets, num,
80 			__ATOMIC_RELAXED);
81 	count += num;
82 	rte_distributor_return_pkt(db, id, buf, num);
83 	return 0;
84 }
85 
86 /* do basic sanity testing of the distributor. This test tests the following:
87  * - send 32 packets through distributor with the same tag and ensure they
88  *   all go to the one worker
89  * - send 32 packets through the distributor with two different tags and
90  *   verify that they go equally to two different workers.
91  * - send 32 packets with different tags through the distributors and
92  *   just verify we get all packets back.
93  * - send 1024 packets through the distributor, gathering the returned packets
94  *   as we go. Then verify that we correctly got all 1024 pointers back again,
95  *   not necessarily in the same order (as different flows).
96  */
97 static int
98 sanity_test(struct worker_params *wp, struct rte_mempool *p)
99 {
100 	struct rte_distributor *db = wp->dist;
101 	struct rte_mbuf *bufs[BURST];
102 	struct rte_mbuf *returns[BURST*2];
103 	unsigned int i, count;
104 	unsigned int retries;
105 
106 	printf("=== Basic distributor sanity tests ===\n");
107 	clear_packet_count();
108 	if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
109 		printf("line %d: Error getting mbufs from pool\n", __LINE__);
110 		return -1;
111 	}
112 
113 	/* now set all hash values in all buffers to zero, so all pkts go to the
114 	 * one worker thread */
115 	for (i = 0; i < BURST; i++)
116 		bufs[i]->hash.usr = 0;
117 
118 	rte_distributor_process(db, bufs, BURST);
119 	count = 0;
120 	do {
121 
122 		rte_distributor_flush(db);
123 		count += rte_distributor_returned_pkts(db,
124 				returns, BURST*2);
125 	} while (count < BURST);
126 
127 	if (total_packet_count() != BURST) {
128 		printf("Line %d: Error, not all packets flushed. "
129 				"Expected %u, got %u\n",
130 				__LINE__, BURST, total_packet_count());
131 		return -1;
132 	}
133 
134 	for (i = 0; i < rte_lcore_count() - 1; i++)
135 		printf("Worker %u handled %u packets\n", i,
136 				worker_stats[i].handled_packets);
137 	printf("Sanity test with all zero hashes done.\n");
138 
139 	/* pick two flows and check they go correctly */
140 	if (rte_lcore_count() >= 3) {
141 		clear_packet_count();
142 		for (i = 0; i < BURST; i++)
143 			bufs[i]->hash.usr = (i & 1) << 8;
144 
145 		rte_distributor_process(db, bufs, BURST);
146 		count = 0;
147 		do {
148 			rte_distributor_flush(db);
149 			count += rte_distributor_returned_pkts(db,
150 					returns, BURST*2);
151 		} while (count < BURST);
152 		if (total_packet_count() != BURST) {
153 			printf("Line %d: Error, not all packets flushed. "
154 					"Expected %u, got %u\n",
155 					__LINE__, BURST, total_packet_count());
156 			return -1;
157 		}
158 
159 		for (i = 0; i < rte_lcore_count() - 1; i++)
160 			printf("Worker %u handled %u packets\n", i,
161 					worker_stats[i].handled_packets);
162 		printf("Sanity test with two hash values done\n");
163 	}
164 
165 	/* give a different hash value to each packet,
166 	 * so load gets distributed */
167 	clear_packet_count();
168 	for (i = 0; i < BURST; i++)
169 		bufs[i]->hash.usr = i+1;
170 
171 	rte_distributor_process(db, bufs, BURST);
172 	count = 0;
173 	do {
174 		rte_distributor_flush(db);
175 		count += rte_distributor_returned_pkts(db,
176 				returns, BURST*2);
177 	} while (count < BURST);
178 	if (total_packet_count() != BURST) {
179 		printf("Line %d: Error, not all packets flushed. "
180 				"Expected %u, got %u\n",
181 				__LINE__, BURST, total_packet_count());
182 		return -1;
183 	}
184 
185 	for (i = 0; i < rte_lcore_count() - 1; i++)
186 		printf("Worker %u handled %u packets\n", i,
187 				worker_stats[i].handled_packets);
188 	printf("Sanity test with non-zero hashes done\n");
189 
190 	rte_mempool_put_bulk(p, (void *)bufs, BURST);
191 
192 	/* sanity test with BIG_BATCH packets to ensure they all arrived back
193 	 * from the returned packets function */
194 	clear_packet_count();
195 	struct rte_mbuf *many_bufs[BIG_BATCH], *return_bufs[BIG_BATCH];
196 	unsigned num_returned = 0;
197 
198 	/* flush out any remaining packets */
199 	rte_distributor_flush(db);
200 	rte_distributor_clear_returns(db);
201 
202 	if (rte_mempool_get_bulk(p, (void *)many_bufs, BIG_BATCH) != 0) {
203 		printf("line %d: Error getting mbufs from pool\n", __LINE__);
204 		return -1;
205 	}
206 	for (i = 0; i < BIG_BATCH; i++)
207 		many_bufs[i]->hash.usr = i << 2;
208 
209 	printf("=== testing big burst (%s) ===\n", wp->name);
210 	for (i = 0; i < BIG_BATCH/BURST; i++) {
211 		rte_distributor_process(db,
212 				&many_bufs[i*BURST], BURST);
213 		count = rte_distributor_returned_pkts(db,
214 				&return_bufs[num_returned],
215 				BIG_BATCH - num_returned);
216 		num_returned += count;
217 	}
218 	rte_distributor_flush(db);
219 	count = rte_distributor_returned_pkts(db,
220 		&return_bufs[num_returned],
221 			BIG_BATCH - num_returned);
222 	num_returned += count;
223 	retries = 0;
224 	do {
225 		rte_distributor_flush(db);
226 		count = rte_distributor_returned_pkts(db,
227 				&return_bufs[num_returned],
228 				BIG_BATCH - num_returned);
229 		num_returned += count;
230 		retries++;
231 	} while ((num_returned < BIG_BATCH) && (retries < 100));
232 
233 	if (num_returned != BIG_BATCH) {
234 		printf("line %d: Missing packets, expected %d\n",
235 				__LINE__, num_returned);
236 		return -1;
237 	}
238 
239 	/* big check -  make sure all packets made it back!! */
240 	for (i = 0; i < BIG_BATCH; i++) {
241 		unsigned j;
242 		struct rte_mbuf *src = many_bufs[i];
243 		for (j = 0; j < BIG_BATCH; j++) {
244 			if (return_bufs[j] == src)
245 				break;
246 		}
247 
248 		if (j == BIG_BATCH) {
249 			printf("Error: could not find source packet #%u\n", i);
250 			return -1;
251 		}
252 	}
253 	printf("Sanity test of returned packets done\n");
254 
255 	rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH);
256 
257 	printf("\n");
258 	return 0;
259 }
260 
261 
262 /* to test that the distributor does not lose packets, we use this worker
263  * function which frees mbufs when it gets them. The distributor thread does
264  * the mbuf allocation. If distributor drops packets we'll eventually run out
265  * of mbufs.
266  */
267 static int
268 handle_work_with_free_mbufs(void *arg)
269 {
270 	struct rte_mbuf *buf[8] __rte_cache_aligned;
271 	struct worker_params *wp = arg;
272 	struct rte_distributor *d = wp->dist;
273 	unsigned int count = 0;
274 	unsigned int i;
275 	unsigned int num = 0;
276 	unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
277 
278 	for (i = 0; i < 8; i++)
279 		buf[i] = NULL;
280 	num = rte_distributor_get_pkt(d, id, buf, buf, num);
281 	while (!quit) {
282 		worker_stats[id].handled_packets += num;
283 		count += num;
284 		for (i = 0; i < num; i++)
285 			rte_pktmbuf_free(buf[i]);
286 		num = rte_distributor_get_pkt(d,
287 				id, buf, buf, num);
288 	}
289 	worker_stats[id].handled_packets += num;
290 	count += num;
291 	rte_distributor_return_pkt(d, id, buf, num);
292 	return 0;
293 }
294 
295 /* Perform a sanity test of the distributor with a large number of packets,
296  * where we allocate a new set of mbufs for each burst. The workers then
297  * free the mbufs. This ensures that we don't have any packet leaks in the
298  * library.
299  */
300 static int
301 sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p)
302 {
303 	struct rte_distributor *d = wp->dist;
304 	unsigned i;
305 	struct rte_mbuf *bufs[BURST];
306 
307 	printf("=== Sanity test with mbuf alloc/free (%s) ===\n", wp->name);
308 
309 	clear_packet_count();
310 	for (i = 0; i < ((1<<ITER_POWER)); i += BURST) {
311 		unsigned j;
312 		while (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)
313 			rte_distributor_process(d, NULL, 0);
314 		for (j = 0; j < BURST; j++) {
315 			bufs[j]->hash.usr = (i+j) << 1;
316 			rte_mbuf_refcnt_set(bufs[j], 1);
317 		}
318 
319 		rte_distributor_process(d, bufs, BURST);
320 	}
321 
322 	rte_distributor_flush(d);
323 
324 	rte_delay_us(10000);
325 
326 	if (total_packet_count() < (1<<ITER_POWER)) {
327 		printf("Line %u: Packet count is incorrect, %u, expected %u\n",
328 				__LINE__, total_packet_count(),
329 				(1<<ITER_POWER));
330 		return -1;
331 	}
332 
333 	printf("Sanity test with mbuf alloc/free passed\n\n");
334 	return 0;
335 }
336 
337 static int
338 handle_work_for_shutdown_test(void *arg)
339 {
340 	struct rte_mbuf *pkt = NULL;
341 	struct rte_mbuf *buf[8] __rte_cache_aligned;
342 	struct worker_params *wp = arg;
343 	struct rte_distributor *d = wp->dist;
344 	unsigned int count = 0;
345 	unsigned int num = 0;
346 	unsigned int total = 0;
347 	unsigned int i;
348 	unsigned int returned = 0;
349 	const unsigned int id = __atomic_fetch_add(&worker_idx, 1,
350 			__ATOMIC_RELAXED);
351 
352 	num = rte_distributor_get_pkt(d, id, buf, buf, num);
353 
354 	/* wait for quit single globally, or for worker zero, wait
355 	 * for zero_quit */
356 	while (!quit && !(id == 0 && zero_quit)) {
357 		worker_stats[id].handled_packets += num;
358 		count += num;
359 		for (i = 0; i < num; i++)
360 			rte_pktmbuf_free(buf[i]);
361 		num = rte_distributor_get_pkt(d,
362 				id, buf, buf, num);
363 		total += num;
364 	}
365 	worker_stats[id].handled_packets += num;
366 	count += num;
367 	returned = rte_distributor_return_pkt(d, id, buf, num);
368 
369 	if (id == 0) {
370 		/* for worker zero, allow it to restart to pick up last packet
371 		 * when all workers are shutting down.
372 		 */
373 		while (zero_quit)
374 			usleep(100);
375 
376 		num = rte_distributor_get_pkt(d,
377 				id, buf, buf, num);
378 
379 		while (!quit) {
380 			worker_stats[id].handled_packets += num;
381 			count += num;
382 			rte_pktmbuf_free(pkt);
383 			num = rte_distributor_get_pkt(d, id, buf, buf, num);
384 		}
385 		returned = rte_distributor_return_pkt(d,
386 				id, buf, num);
387 		printf("Num returned = %d\n", returned);
388 	}
389 	return 0;
390 }
391 
392 
393 /* Perform a sanity test of the distributor with a large number of packets,
394  * where we allocate a new set of mbufs for each burst. The workers then
395  * free the mbufs. This ensures that we don't have any packet leaks in the
396  * library.
397  */
398 static int
399 sanity_test_with_worker_shutdown(struct worker_params *wp,
400 		struct rte_mempool *p)
401 {
402 	struct rte_distributor *d = wp->dist;
403 	struct rte_mbuf *bufs[BURST];
404 	unsigned i;
405 
406 	printf("=== Sanity test of worker shutdown ===\n");
407 
408 	clear_packet_count();
409 
410 	if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
411 		printf("line %d: Error getting mbufs from pool\n", __LINE__);
412 		return -1;
413 	}
414 
415 	/*
416 	 * Now set all hash values in all buffers to same value so all
417 	 * pkts go to the one worker thread
418 	 */
419 	for (i = 0; i < BURST; i++)
420 		bufs[i]->hash.usr = 1;
421 
422 	rte_distributor_process(d, bufs, BURST);
423 	rte_distributor_flush(d);
424 
425 	/* at this point, we will have processed some packets and have a full
426 	 * backlog for the other ones at worker 0.
427 	 */
428 
429 	/* get more buffers to queue up, again setting them to the same flow */
430 	if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
431 		printf("line %d: Error getting mbufs from pool\n", __LINE__);
432 		return -1;
433 	}
434 	for (i = 0; i < BURST; i++)
435 		bufs[i]->hash.usr = 1;
436 
437 	/* get worker zero to quit */
438 	zero_quit = 1;
439 	rte_distributor_process(d, bufs, BURST);
440 
441 	/* flush the distributor */
442 	rte_distributor_flush(d);
443 	rte_delay_us(10000);
444 
445 	for (i = 0; i < rte_lcore_count() - 1; i++)
446 		printf("Worker %u handled %u packets\n", i,
447 				worker_stats[i].handled_packets);
448 
449 	if (total_packet_count() != BURST * 2) {
450 		printf("Line %d: Error, not all packets flushed. "
451 				"Expected %u, got %u\n",
452 				__LINE__, BURST * 2, total_packet_count());
453 		return -1;
454 	}
455 
456 	printf("Sanity test with worker shutdown passed\n\n");
457 	return 0;
458 }
459 
460 /* Test that the flush function is able to move packets between workers when
461  * one worker shuts down..
462  */
463 static int
464 test_flush_with_worker_shutdown(struct worker_params *wp,
465 		struct rte_mempool *p)
466 {
467 	struct rte_distributor *d = wp->dist;
468 	struct rte_mbuf *bufs[BURST];
469 	unsigned i;
470 
471 	printf("=== Test flush fn with worker shutdown (%s) ===\n", wp->name);
472 
473 	clear_packet_count();
474 	if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
475 		printf("line %d: Error getting mbufs from pool\n", __LINE__);
476 		return -1;
477 	}
478 
479 	/* now set all hash values in all buffers to zero, so all pkts go to the
480 	 * one worker thread */
481 	for (i = 0; i < BURST; i++)
482 		bufs[i]->hash.usr = 0;
483 
484 	rte_distributor_process(d, bufs, BURST);
485 	/* at this point, we will have processed some packets and have a full
486 	 * backlog for the other ones at worker 0.
487 	 */
488 
489 	/* get worker zero to quit */
490 	zero_quit = 1;
491 
492 	/* flush the distributor */
493 	rte_distributor_flush(d);
494 
495 	rte_delay_us(10000);
496 
497 	zero_quit = 0;
498 	for (i = 0; i < rte_lcore_count() - 1; i++)
499 		printf("Worker %u handled %u packets\n", i,
500 				worker_stats[i].handled_packets);
501 
502 	if (total_packet_count() != BURST) {
503 		printf("Line %d: Error, not all packets flushed. "
504 				"Expected %u, got %u\n",
505 				__LINE__, BURST, total_packet_count());
506 		return -1;
507 	}
508 
509 	printf("Flush test with worker shutdown passed\n\n");
510 	return 0;
511 }
512 
513 static
514 int test_error_distributor_create_name(void)
515 {
516 	struct rte_distributor *d = NULL;
517 	struct rte_distributor *db = NULL;
518 	char *name = NULL;
519 
520 	d = rte_distributor_create(name, rte_socket_id(),
521 			rte_lcore_count() - 1,
522 			RTE_DIST_ALG_SINGLE);
523 	if (d != NULL || rte_errno != EINVAL) {
524 		printf("ERROR: No error on create() with NULL name param\n");
525 		return -1;
526 	}
527 
528 	db = rte_distributor_create(name, rte_socket_id(),
529 			rte_lcore_count() - 1,
530 			RTE_DIST_ALG_BURST);
531 	if (db != NULL || rte_errno != EINVAL) {
532 		printf("ERROR: No error on create() with NULL param\n");
533 		return -1;
534 	}
535 
536 	return 0;
537 }
538 
539 
540 static
541 int test_error_distributor_create_numworkers(void)
542 {
543 	struct rte_distributor *ds = NULL;
544 	struct rte_distributor *db = NULL;
545 
546 	ds = rte_distributor_create("test_numworkers", rte_socket_id(),
547 			RTE_MAX_LCORE + 10,
548 			RTE_DIST_ALG_SINGLE);
549 	if (ds != NULL || rte_errno != EINVAL) {
550 		printf("ERROR: No error on create() with num_workers > MAX\n");
551 		return -1;
552 	}
553 
554 	db = rte_distributor_create("test_numworkers", rte_socket_id(),
555 			RTE_MAX_LCORE + 10,
556 			RTE_DIST_ALG_BURST);
557 	if (db != NULL || rte_errno != EINVAL) {
558 		printf("ERROR: No error on create() num_workers > MAX\n");
559 		return -1;
560 	}
561 
562 	return 0;
563 }
564 
565 
566 /* Useful function which ensures that all worker functions terminate */
567 static void
568 quit_workers(struct worker_params *wp, struct rte_mempool *p)
569 {
570 	struct rte_distributor *d = wp->dist;
571 	const unsigned num_workers = rte_lcore_count() - 1;
572 	unsigned i;
573 	struct rte_mbuf *bufs[RTE_MAX_LCORE];
574 	rte_mempool_get_bulk(p, (void *)bufs, num_workers);
575 
576 	zero_quit = 0;
577 	quit = 1;
578 	for (i = 0; i < num_workers; i++)
579 		bufs[i]->hash.usr = i << 1;
580 	rte_distributor_process(d, bufs, num_workers);
581 
582 	rte_mempool_put_bulk(p, (void *)bufs, num_workers);
583 
584 	rte_distributor_process(d, NULL, 0);
585 	rte_distributor_flush(d);
586 	rte_eal_mp_wait_lcore();
587 	quit = 0;
588 	worker_idx = 0;
589 }
590 
591 static int
592 test_distributor(void)
593 {
594 	static struct rte_distributor *ds;
595 	static struct rte_distributor *db;
596 	static struct rte_distributor *dist[2];
597 	static struct rte_mempool *p;
598 	int i;
599 
600 	if (rte_lcore_count() < 2) {
601 		printf("Not enough cores for distributor_autotest, expecting at least 2\n");
602 		return TEST_SKIPPED;
603 	}
604 
605 	if (db == NULL) {
606 		db = rte_distributor_create("Test_dist_burst", rte_socket_id(),
607 				rte_lcore_count() - 1,
608 				RTE_DIST_ALG_BURST);
609 		if (db == NULL) {
610 			printf("Error creating burst distributor\n");
611 			return -1;
612 		}
613 	} else {
614 		rte_distributor_flush(db);
615 		rte_distributor_clear_returns(db);
616 	}
617 
618 	if (ds == NULL) {
619 		ds = rte_distributor_create("Test_dist_single",
620 				rte_socket_id(),
621 				rte_lcore_count() - 1,
622 			RTE_DIST_ALG_SINGLE);
623 		if (ds == NULL) {
624 			printf("Error creating single distributor\n");
625 			return -1;
626 		}
627 	} else {
628 		rte_distributor_flush(ds);
629 		rte_distributor_clear_returns(ds);
630 	}
631 
632 	const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
633 			(BIG_BATCH * 2) - 1 : (511 * rte_lcore_count());
634 	if (p == NULL) {
635 		p = rte_pktmbuf_pool_create("DT_MBUF_POOL", nb_bufs, BURST,
636 			0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
637 		if (p == NULL) {
638 			printf("Error creating mempool\n");
639 			return -1;
640 		}
641 	}
642 
643 	dist[0] = ds;
644 	dist[1] = db;
645 
646 	for (i = 0; i < 2; i++) {
647 
648 		worker_params.dist = dist[i];
649 		if (i)
650 			strlcpy(worker_params.name, "burst",
651 					sizeof(worker_params.name));
652 		else
653 			strlcpy(worker_params.name, "single",
654 					sizeof(worker_params.name));
655 
656 		rte_eal_mp_remote_launch(handle_work,
657 				&worker_params, SKIP_MASTER);
658 		if (sanity_test(&worker_params, p) < 0)
659 			goto err;
660 		quit_workers(&worker_params, p);
661 
662 		rte_eal_mp_remote_launch(handle_work_with_free_mbufs,
663 				&worker_params, SKIP_MASTER);
664 		if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0)
665 			goto err;
666 		quit_workers(&worker_params, p);
667 
668 		if (rte_lcore_count() > 2) {
669 			rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
670 					&worker_params,
671 					SKIP_MASTER);
672 			if (sanity_test_with_worker_shutdown(&worker_params,
673 					p) < 0)
674 				goto err;
675 			quit_workers(&worker_params, p);
676 
677 			rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
678 					&worker_params,
679 					SKIP_MASTER);
680 			if (test_flush_with_worker_shutdown(&worker_params,
681 					p) < 0)
682 				goto err;
683 			quit_workers(&worker_params, p);
684 
685 		} else {
686 			printf("Too few cores to run worker shutdown test\n");
687 		}
688 
689 	}
690 
691 	if (test_error_distributor_create_numworkers() == -1 ||
692 			test_error_distributor_create_name() == -1) {
693 		printf("rte_distributor_create parameter check tests failed");
694 		return -1;
695 	}
696 
697 	return 0;
698 
699 err:
700 	quit_workers(&worker_params, p);
701 	return -1;
702 }
703 
704 REGISTER_TEST_COMMAND(distributor_autotest, test_distributor);
705