xref: /f-stack/dpdk/drivers/event/dlb/dlb_selftest.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <string.h>
7 #include <stdint.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/queue.h>
11 
12 #include <rte_memory.h>
13 #include <rte_memzone.h>
14 #include <rte_launch.h>
15 #include <rte_eal.h>
16 #include <rte_lcore.h>
17 #include <rte_debug.h>
18 #include <rte_cycles.h>
19 #include <rte_eventdev.h>
20 #include <rte_mempool.h>
21 #include <rte_mbuf.h>
22 
23 #include "dlb_priv.h"
24 #include "rte_pmd_dlb.h"
25 
26 #define MAX_PORTS 32
27 #define MAX_QIDS 32
28 #define DEFAULT_NUM_SEQ_NUMS 32
29 
30 static struct rte_mempool *eventdev_func_mempool;
31 static int evdev;
32 
33 struct test {
34 	struct rte_mempool *mbuf_pool;
35 	int nb_qids;
36 };
37 
38 /* initialization and config */
39 static inline int
init(struct test * t,int nb_queues,int nb_ports)40 init(struct test *t, int nb_queues, int nb_ports)
41 {
42 	struct rte_event_dev_config config = {0};
43 	struct rte_event_dev_info info;
44 	int ret;
45 
46 	memset(t, 0, sizeof(*t));
47 
48 	t->mbuf_pool = eventdev_func_mempool;
49 
50 	if (rte_event_dev_info_get(evdev, &info)) {
51 		printf("%d: Error querying device info\n", __LINE__);
52 		return -1;
53 	}
54 
55 	config.nb_event_queues = nb_queues;
56 	config.nb_event_ports = nb_ports;
57 	config.nb_event_queue_flows = info.max_event_queue_flows;
58 	config.nb_events_limit = info.max_num_events;
59 	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
60 	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
61 	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
62 	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
63 
64 	ret = rte_event_dev_configure(evdev, &config);
65 	if (ret < 0)
66 		printf("%d: Error configuring device\n", __LINE__);
67 
68 	return ret;
69 }
70 
71 static inline int
create_ports(int num_ports)72 create_ports(int num_ports)
73 {
74 	int i;
75 
76 	if (num_ports > MAX_PORTS)
77 		return -1;
78 
79 	for (i = 0; i < num_ports; i++) {
80 		struct rte_event_port_conf conf;
81 
82 		if (rte_event_port_default_conf_get(evdev, i, &conf)) {
83 			printf("%d: Error querying default port conf\n",
84 			       __LINE__);
85 			return -1;
86 		}
87 
88 		if (rte_event_port_setup(evdev, i, &conf) < 0) {
89 			printf("%d: Error setting up port %d\n", __LINE__, i);
90 			return -1;
91 		}
92 	}
93 
94 	return 0;
95 }
96 
97 static inline int
create_lb_qids(struct test * t,int num_qids,uint32_t flags)98 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
99 {
100 	int i;
101 
102 	for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
103 		struct rte_event_queue_conf conf;
104 
105 		if (rte_event_queue_default_conf_get(evdev, i, &conf)) {
106 			printf("%d: Error querying default queue conf\n",
107 			       __LINE__);
108 			return -1;
109 		}
110 
111 		conf.schedule_type = flags;
112 
113 		if (conf.schedule_type == RTE_SCHED_TYPE_PARALLEL)
114 			conf.nb_atomic_order_sequences = 0;
115 		else
116 			conf.nb_atomic_order_sequences = DEFAULT_NUM_SEQ_NUMS;
117 
118 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
119 			printf("%d: error creating qid %d\n", __LINE__, i);
120 			return -1;
121 		}
122 	}
123 
124 	t->nb_qids += num_qids;
125 	if (t->nb_qids > MAX_QIDS)
126 		return -1;
127 
128 	return 0;
129 }
130 
131 static inline int
create_atomic_qids(struct test * t,int num_qids)132 create_atomic_qids(struct test *t, int num_qids)
133 {
134 	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
135 }
136 
137 /* destruction */
138 static inline int
cleanup(void)139 cleanup(void)
140 {
141 	rte_event_dev_stop(evdev);
142 	return rte_event_dev_close(evdev);
143 };
144 
145 static inline int
enqueue_timeout(uint8_t port_id,struct rte_event * ev,uint64_t tmo_us)146 enqueue_timeout(uint8_t port_id, struct rte_event *ev, uint64_t tmo_us)
147 {
148 	const uint64_t start = rte_get_timer_cycles();
149 	const uint64_t ticks = (tmo_us * rte_get_timer_hz()) / 1E6;
150 
151 	while ((rte_get_timer_cycles() - start) < ticks) {
152 		if (rte_event_enqueue_burst(evdev, port_id, ev, 1) == 1)
153 			return 0;
154 
155 		if (rte_errno != -ENOSPC)
156 			return -1;
157 	}
158 
159 	return -1;
160 }
161 
162 static void
flush(uint8_t id __rte_unused,struct rte_event event,void * arg __rte_unused)163 flush(uint8_t id __rte_unused, struct rte_event event, void *arg __rte_unused)
164 {
165 	rte_pktmbuf_free(event.mbuf);
166 }
167 
168 static int
test_stop_flush(struct test * t)169 test_stop_flush(struct test *t) /* test to check we can properly flush events */
170 {
171 	struct rte_event ev;
172 	uint32_t dequeue_depth;
173 	unsigned int i, count;
174 	uint8_t queue_id;
175 
176 	ev.op = RTE_EVENT_OP_NEW;
177 
178 	if (init(t, 2, 1) < 0 ||
179 	    create_ports(1) < 0 ||
180 	    create_atomic_qids(t, 2) < 0) {
181 		printf("%d: Error initializing device\n", __LINE__);
182 		return -1;
183 	}
184 
185 	if (rte_event_port_link(evdev, 0, NULL, NULL, 0) != 2) {
186 		printf("%d: Error linking queues to the port\n", __LINE__);
187 		goto err;
188 	}
189 
190 	if (rte_event_dev_start(evdev) < 0) {
191 		printf("%d: Error with start call\n", __LINE__);
192 		goto err;
193 	}
194 
195 	/* Unlink queue 1 so the PMD's stop callback has to cleanup an unlinked
196 	 * queue.
197 	 */
198 	queue_id = 1;
199 
200 	if (rte_event_port_unlink(evdev, 0, &queue_id, 1) != 1) {
201 		printf("%d: Error unlinking queue 1 from port\n", __LINE__);
202 		goto err;
203 	}
204 
205 	if (t->mbuf_pool)
206 		count = rte_mempool_avail_count(t->mbuf_pool);
207 	else {
208 		printf("%d: mbuf_pool is NULL\n", __LINE__);
209 		goto err;
210 	}
211 
212 	if (rte_event_port_attr_get(evdev,
213 				    0,
214 				    RTE_EVENT_PORT_ATTR_DEQ_DEPTH,
215 				    &dequeue_depth)) {
216 		printf("%d: Error retrieveing dequeue depth\n", __LINE__);
217 		goto err;
218 	}
219 
220 	/* Send QEs to queue 0 */
221 	for (i = 0; i < dequeue_depth + 1; i++) {
222 		ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
223 		ev.queue_id = 0;
224 		ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
225 
226 		if (enqueue_timeout(0, &ev, 1000)) {
227 			printf("%d: Error enqueuing events\n", __LINE__);
228 			goto err;
229 		}
230 	}
231 
232 	/* Send QEs to queue 1 */
233 	for (i = 0; i < dequeue_depth + 1; i++) {
234 		ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
235 		ev.queue_id = 1;
236 		ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
237 
238 		if (enqueue_timeout(0, &ev, 1000)) {
239 			printf("%d: Error enqueuing events\n", __LINE__);
240 			goto err;
241 		}
242 	}
243 
244 	/* Now the DLB is scheduling events from the port to the IQ, and at
245 	 * least one event should be remaining in each queue.
246 	 */
247 
248 	if (rte_event_dev_stop_flush_callback_register(evdev, flush, NULL)) {
249 		printf("%d: Error installing the flush callback\n", __LINE__);
250 		goto err;
251 	}
252 
253 	cleanup();
254 
255 	if (count != rte_mempool_avail_count(t->mbuf_pool)) {
256 		printf("%d: Error executing the flush callback\n", __LINE__);
257 		goto err;
258 	}
259 
260 	if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
261 		printf("%d: Error uninstalling the flush callback\n", __LINE__);
262 		goto err;
263 	}
264 
265 	return 0;
266 err:
267 	cleanup();
268 	return -1;
269 }
270 
271 static int
test_single_link(void)272 test_single_link(void)
273 {
274 	struct rte_event_dev_config config = {0};
275 	struct rte_event_queue_conf queue_conf;
276 	struct rte_event_port_conf port_conf;
277 	struct rte_event_dev_info info;
278 	uint8_t queue_id;
279 	int ret;
280 
281 	if (rte_event_dev_info_get(evdev, &info)) {
282 		printf("%d: Error querying device info\n", __LINE__);
283 		return -1;
284 	}
285 
286 	config.nb_event_queues = 2;
287 	config.nb_event_ports = 2;
288 	config.nb_single_link_event_port_queues = 1;
289 	config.nb_event_queue_flows = info.max_event_queue_flows;
290 	config.nb_events_limit = info.max_num_events;
291 	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
292 	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
293 	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
294 	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
295 
296 	ret = rte_event_dev_configure(evdev, &config);
297 	if (ret < 0) {
298 		printf("%d: Error configuring device\n", __LINE__);
299 		return -1;
300 	}
301 
302 	/* Create a directed port */
303 	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
304 		printf("%d: Error querying default port conf\n", __LINE__);
305 		goto err;
306 	}
307 
308 	port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
309 
310 	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
311 		printf("%d: port 0 setup expected to succeed\n", __LINE__);
312 		goto err;
313 	}
314 
315 	/* Attempt to create another directed port */
316 	if (rte_event_port_setup(evdev, 1, &port_conf) == 0) {
317 		printf("%d: port 1 setup expected to fail\n", __LINE__);
318 		goto err;
319 	}
320 
321 	port_conf.event_port_cfg = 0;
322 
323 	/* Create a load-balanced port */
324 	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
325 		printf("%d: port 1 setup expected to succeed\n", __LINE__);
326 		goto err;
327 	}
328 
329 	/* Create a directed queue */
330 	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
331 		printf("%d: Error querying default queue conf\n", __LINE__);
332 		goto err;
333 	}
334 
335 	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
336 
337 	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
338 		printf("%d: queue 0 setup expected to succeed\n", __LINE__);
339 		goto err;
340 	}
341 
342 	/* Attempt to create another directed queue */
343 	if (rte_event_queue_setup(evdev, 1, &queue_conf) == 0) {
344 		printf("%d: queue 1 setup expected to fail\n", __LINE__);
345 		goto err;
346 	}
347 
348 	/* Create a load-balanced queue */
349 	queue_conf.event_queue_cfg = 0;
350 
351 	if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
352 		printf("%d: queue 1 setup expected to succeed\n", __LINE__);
353 		goto err;
354 	}
355 
356 	/* Attempt to link directed and load-balanced resources */
357 	queue_id = 1;
358 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) == 1) {
359 		printf("%d: port 0 link expected to fail\n", __LINE__);
360 		goto err;
361 	}
362 
363 	queue_id = 0;
364 	if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) == 1) {
365 		printf("%d: port 1 link expected to fail\n", __LINE__);
366 		goto err;
367 	}
368 
369 	/* Link ports to queues */
370 	queue_id = 0;
371 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
372 		printf("%d: port 0 link expected to succeed\n", __LINE__);
373 		goto err;
374 	}
375 
376 	queue_id = 1;
377 	if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
378 		printf("%d: port 1 link expected to succeed\n", __LINE__);
379 		goto err;
380 	}
381 
382 	return rte_event_dev_close(evdev);
383 
384 err:
385 	rte_event_dev_close(evdev);
386 	return -1;
387 }
388 
389 #define NUM_LDB_PORTS 64
390 #define NUM_LDB_QUEUES 128
391 
392 static int
test_info_get(void)393 test_info_get(void)
394 {
395 	struct rte_event_dev_config config = {0};
396 	struct rte_event_dev_info info;
397 	int ret;
398 
399 	if (rte_event_dev_info_get(evdev, &info)) {
400 		printf("%d: Error querying device info\n", __LINE__);
401 		return -1;
402 	}
403 
404 	if (info.max_event_ports != NUM_LDB_PORTS) {
405 		printf("%d: Got %u ports, expected %u\n",
406 		       __LINE__, info.max_event_ports, NUM_LDB_PORTS);
407 		goto err;
408 	}
409 
410 	if (info.max_event_queues != NUM_LDB_QUEUES) {
411 		printf("%d: Got %u queues, expected %u\n",
412 		       __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
413 		goto err;
414 	}
415 
416 	config.nb_event_ports = info.max_event_ports;
417 	config.nb_event_queues = NUM_LDB_QUEUES + info.max_event_ports / 2;
418 	config.nb_single_link_event_port_queues = info.max_event_ports / 2;
419 	config.nb_event_queue_flows = info.max_event_queue_flows;
420 	config.nb_events_limit = info.max_num_events;
421 	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
422 	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
423 	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
424 	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
425 
426 	ret = rte_event_dev_configure(evdev, &config);
427 	if (ret < 0) {
428 		printf("%d: Error configuring device\n", __LINE__);
429 		return -1;
430 	}
431 
432 	if (rte_event_dev_info_get(evdev, &info)) {
433 		printf("%d: Error querying device info\n", __LINE__);
434 		goto err;
435 	}
436 
437 	/* The DLB PMD only reports load-balanced ports and queues in its
438 	 * info_get function. Confirm that these values don't include the
439 	 * directed port or queue counts.
440 	 */
441 
442 	if (info.max_event_ports != NUM_LDB_PORTS) {
443 		printf("%d: Got %u ports, expected %u\n",
444 		       __LINE__, info.max_event_ports, NUM_LDB_PORTS);
445 		goto err;
446 	}
447 
448 	if (info.max_event_queues != NUM_LDB_QUEUES) {
449 		printf("%d: Got %u queues, expected %u\n",
450 		       __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
451 		goto err;
452 	}
453 
454 	ret = rte_event_dev_close(evdev);
455 	if (ret) {
456 		printf("rte_event_dev_close err %d\n", ret);
457 		goto err;
458 	}
459 
460 	return 0;
461 
462 err:
463 	rte_event_dev_close(evdev);
464 	return -1;
465 }
466 
467 static int
test_reconfiguration_link(void)468 test_reconfiguration_link(void)
469 {
470 	struct rte_event_dev_config config = {0};
471 	struct rte_event_queue_conf queue_conf;
472 	struct rte_event_port_conf port_conf;
473 	struct rte_event_dev_info info;
474 	uint8_t queue_id;
475 	int ret, i;
476 
477 	if (rte_event_dev_info_get(evdev, &info)) {
478 		printf("%d: Error querying device info\n", __LINE__);
479 		return -1;
480 	}
481 
482 	config.nb_event_queues = 2;
483 	config.nb_event_ports = 2;
484 	config.nb_single_link_event_port_queues = 0;
485 	config.nb_event_queue_flows = info.max_event_queue_flows;
486 	config.nb_events_limit = info.max_num_events;
487 	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
488 	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
489 	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
490 	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
491 
492 	/* Configure the device with 2 LDB ports and 2 LDB queues */
493 	ret = rte_event_dev_configure(evdev, &config);
494 	if (ret < 0) {
495 		printf("%d: Error configuring device\n", __LINE__);
496 		return -1;
497 	}
498 
499 	/* Configure the ports and queues */
500 	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
501 		printf("%d: Error querying default port conf\n", __LINE__);
502 		goto err;
503 	}
504 
505 	for (i = 0; i < 2; i++) {
506 		if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
507 			printf("%d: port %d setup expected to succeed\n",
508 			       __LINE__, i);
509 			goto err;
510 		}
511 	}
512 
513 	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
514 		printf("%d: Error querying default queue conf\n", __LINE__);
515 		goto err;
516 	}
517 
518 	for (i = 0; i < 2; i++) {
519 		if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
520 			printf("%d: queue %d setup expected to succeed\n",
521 			       __LINE__, i);
522 			goto err;
523 		}
524 	}
525 
526 	/* Link P0->Q0 and P1->Q1 */
527 	for (i = 0; i < 2; i++) {
528 		queue_id = i;
529 
530 		if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
531 			printf("%d: port %d link expected to succeed\n",
532 			       __LINE__, i);
533 			goto err;
534 		}
535 	}
536 
537 	/* Start the device */
538 	if (rte_event_dev_start(evdev) < 0) {
539 		printf("%d: device start failed\n", __LINE__);
540 		goto err;
541 	}
542 
543 	/* Stop the device */
544 	rte_event_dev_stop(evdev);
545 
546 	/* Reconfigure device */
547 	ret = rte_event_dev_configure(evdev, &config);
548 	if (ret < 0) {
549 		printf("%d: Error re-configuring device\n", __LINE__);
550 		return -1;
551 	}
552 
553 	/* Configure P1 and Q1, leave P0 and Q0 to be configured by the PMD. */
554 	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
555 		printf("%d: port 1 setup expected to succeed\n",
556 		       __LINE__);
557 		goto err;
558 	}
559 
560 	if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
561 		printf("%d: queue 1 setup expected to succeed\n",
562 		       __LINE__);
563 		goto err;
564 	}
565 
566 	/* Link P0->Q0 and Q1 */
567 	for (i = 0; i < 2; i++) {
568 		queue_id = i;
569 
570 		if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
571 			printf("%d: P0->Q%d link expected to succeed\n",
572 			       __LINE__, i);
573 			goto err;
574 		}
575 	}
576 
577 	/* Link P1->Q0 and Q1 */
578 	for (i = 0; i < 2; i++) {
579 		queue_id = i;
580 
581 		if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
582 			printf("%d: P1->Q%d link expected to succeed\n",
583 			       __LINE__, i);
584 			goto err;
585 		}
586 	}
587 
588 	/* Start the device */
589 	if (rte_event_dev_start(evdev) < 0) {
590 		printf("%d: device start failed\n", __LINE__);
591 		goto err;
592 	}
593 
594 	/* Stop the device */
595 	rte_event_dev_stop(evdev);
596 
597 	/* Configure device with 2 DIR ports and 2 DIR queues */
598 	config.nb_single_link_event_port_queues = 2;
599 
600 	ret = rte_event_dev_configure(evdev, &config);
601 	if (ret < 0) {
602 		printf("%d: Error configuring device\n", __LINE__);
603 		return -1;
604 	}
605 
606 	/* Configure the ports and queues */
607 	port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
608 
609 	for (i = 0; i < 2; i++) {
610 		if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
611 			printf("%d: port %d setup expected to succeed\n",
612 			       __LINE__, i);
613 			goto err;
614 		}
615 	}
616 
617 	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
618 
619 	for (i = 0; i < 2; i++) {
620 		if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
621 			printf("%d: queue %d setup expected to succeed\n",
622 			       __LINE__, i);
623 			goto err;
624 		}
625 	}
626 
627 	/* Link P0->Q0 and P1->Q1 */
628 	for (i = 0; i < 2; i++) {
629 		queue_id = i;
630 
631 		if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
632 			printf("%d: port %d link expected to succeed\n",
633 			       __LINE__, i);
634 			goto err;
635 		}
636 	}
637 
638 	/* Start the device */
639 	if (rte_event_dev_start(evdev) < 0) {
640 		printf("%d: device start failed\n", __LINE__);
641 		goto err;
642 	}
643 
644 	/* Stop the device */
645 	rte_event_dev_stop(evdev);
646 
647 	/* Reconfigure device */
648 	ret = rte_event_dev_configure(evdev, &config);
649 	if (ret < 0) {
650 		printf("%d: Error re-configuring device\n", __LINE__);
651 		return -1;
652 	}
653 
654 	/* Configure P1 and Q0, leave P0 and Q1 to be configured by the PMD. */
655 	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
656 		printf("%d: port 1 setup expected to succeed\n",
657 		       __LINE__);
658 		goto err;
659 	}
660 
661 	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
662 		printf("%d: queue 1 setup expected to succeed\n",
663 		       __LINE__);
664 		goto err;
665 	}
666 
667 	/* Link P0->Q1 */
668 	queue_id = 1;
669 
670 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
671 		printf("%d: P0->Q%d link expected to succeed\n",
672 		       __LINE__, i);
673 		goto err;
674 	}
675 
676 	/* Link P1->Q0 */
677 	queue_id = 0;
678 
679 	if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
680 		printf("%d: P1->Q%d link expected to succeed\n",
681 		       __LINE__, i);
682 		goto err;
683 	}
684 
685 	/* Start the device */
686 	if (rte_event_dev_start(evdev) < 0) {
687 		printf("%d: device start failed\n", __LINE__);
688 		goto err;
689 	}
690 
691 	rte_event_dev_stop(evdev);
692 
693 	config.nb_event_queues = 5;
694 	config.nb_event_ports = 5;
695 	config.nb_single_link_event_port_queues = 1;
696 
697 	ret = rte_event_dev_configure(evdev, &config);
698 	if (ret < 0) {
699 		printf("%d: Error re-configuring device\n", __LINE__);
700 		return -1;
701 	}
702 
703 	for (i = 0; i < config.nb_event_queues - 1; i++) {
704 		port_conf.event_port_cfg = 0;
705 		queue_conf.event_queue_cfg = 0;
706 
707 		if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
708 			printf("%d: port %d setup expected to succeed\n",
709 			       __LINE__, i);
710 			goto err;
711 		}
712 
713 		if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
714 			printf("%d: queue %d setup expected to succeed\n",
715 			       __LINE__, i);
716 			goto err;
717 		}
718 
719 		queue_id = i;
720 
721 		if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
722 			printf("%d: P%d->Q%d link expected to succeed\n",
723 			       __LINE__, i, i);
724 			goto err;
725 		}
726 	}
727 
728 	port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
729 	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
730 
731 	if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
732 		printf("%d: port %d setup expected to succeed\n",
733 		       __LINE__, i);
734 		goto err;
735 	}
736 
737 	if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
738 		printf("%d: queue %d setup expected to succeed\n",
739 		       __LINE__, i);
740 		goto err;
741 	}
742 
743 	queue_id = i;
744 
745 	if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
746 		printf("%d: P%d->Q%d link expected to succeed\n",
747 		       __LINE__, i, i);
748 		goto err;
749 	}
750 
751 	/* Start the device */
752 	if (rte_event_dev_start(evdev) < 0) {
753 		printf("%d: device start failed\n", __LINE__);
754 		goto err;
755 	}
756 
757 	/* Stop the device */
758 	rte_event_dev_stop(evdev);
759 
760 	config.nb_event_ports += 1;
761 
762 	/* Reconfigure device with 1 more load-balanced port */
763 	ret = rte_event_dev_configure(evdev, &config);
764 	if (ret < 0) {
765 		printf("%d: Error re-configuring device\n", __LINE__);
766 		return -1;
767 	}
768 
769 	port_conf.event_port_cfg = 0;
770 
771 	/* Configure the new port */
772 	if (rte_event_port_setup(evdev, config.nb_event_ports - 1,
773 				 &port_conf) < 0) {
774 		printf("%d: port 1 setup expected to succeed\n",
775 		       __LINE__);
776 		goto err;
777 	}
778 
779 	/* Start the device */
780 	if (rte_event_dev_start(evdev) < 0) {
781 		printf("%d: device start failed\n", __LINE__);
782 		goto err;
783 	}
784 
785 	cleanup();
786 	return 0;
787 
788 err:
789 	cleanup();
790 	return -1;
791 }
792 
793 static int
test_load_balanced_traffic(void)794 test_load_balanced_traffic(void)
795 {
796 	uint64_t timeout;
797 	struct rte_event_dev_config config = {0};
798 	struct rte_event_queue_conf queue_conf;
799 	struct rte_event_port_conf port_conf;
800 	struct rte_event_dev_info info;
801 	struct rte_event ev;
802 	uint8_t queue_id;
803 	int ret;
804 
805 	if (rte_event_dev_info_get(evdev, &info)) {
806 		printf("%d: Error querying device info\n", __LINE__);
807 		return -1;
808 	}
809 
810 	config.nb_event_queues = 1;
811 	config.nb_event_ports = 1;
812 	config.nb_single_link_event_port_queues = 0;
813 	config.nb_event_queue_flows = info.max_event_queue_flows;
814 	config.nb_events_limit = info.max_num_events;
815 	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
816 	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
817 	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
818 	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
819 
820 	/* Configure the device with 1 LDB port and queue */
821 	ret = rte_event_dev_configure(evdev, &config);
822 	if (ret < 0) {
823 		printf("%d: Error configuring device\n", __LINE__);
824 		return -1;
825 	}
826 
827 	/* Configure the ports and queues */
828 	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
829 		printf("%d: Error querying default port conf\n", __LINE__);
830 		goto err;
831 	}
832 
833 	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
834 		printf("%d: port 0 setup expected to succeed\n",
835 		       __LINE__);
836 		goto err;
837 	}
838 
839 	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
840 		printf("%d: Error querying default queue conf\n", __LINE__);
841 		goto err;
842 	}
843 
844 	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
845 		printf("%d: queue 0 setup expected to succeed\n",
846 		       __LINE__);
847 		goto err;
848 	}
849 
850 	/* Link P0->Q0 */
851 	queue_id = 0;
852 
853 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
854 		printf("%d: port 0 link expected to succeed\n",
855 		       __LINE__);
856 		goto err;
857 	}
858 
859 	/* Start the device */
860 	if (rte_event_dev_start(evdev) < 0) {
861 		printf("%d: device start failed\n", __LINE__);
862 		goto err;
863 	}
864 
865 	/* Enqueue 1 NEW event */
866 	ev.op = RTE_EVENT_OP_NEW;
867 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
868 	ev.queue_id = 0;
869 	ev.priority = 0;
870 	ev.u64 = 0;
871 
872 	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
873 		printf("%d: NEW enqueue expected to succeed\n",
874 		       __LINE__);
875 		goto err;
876 	}
877 
878 	/* Dequeue and enqueue 1 FORWARD event */
879 	timeout = 0xFFFFFFFFF;
880 	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
881 		printf("%d: event dequeue expected to succeed\n",
882 		       __LINE__);
883 		goto err;
884 	}
885 
886 	ev.op = RTE_EVENT_OP_FORWARD;
887 
888 	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
889 		printf("%d: NEW enqueue expected to succeed\n",
890 		       __LINE__);
891 		goto err;
892 	}
893 
894 	/* Dequeue and enqueue 1 RELEASE operation */
895 	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
896 		printf("%d: event dequeue expected to succeed\n",
897 		       __LINE__);
898 		goto err;
899 	}
900 
901 	ev.op = RTE_EVENT_OP_RELEASE;
902 
903 	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
904 		printf("%d: NEW enqueue expected to succeed\n",
905 		       __LINE__);
906 		goto err;
907 	}
908 
909 	cleanup();
910 	return 0;
911 
912 err:
913 	cleanup();
914 	return -1;
915 }
916 
917 static int
test_directed_traffic(void)918 test_directed_traffic(void)
919 {
920 	uint64_t timeout;
921 	struct rte_event_dev_config config = {0};
922 	struct rte_event_queue_conf queue_conf;
923 	struct rte_event_port_conf port_conf;
924 	struct rte_event_dev_info info;
925 	struct rte_event ev;
926 	uint8_t queue_id;
927 	int ret;
928 
929 	if (rte_event_dev_info_get(evdev, &info)) {
930 		printf("%d: Error querying device info\n", __LINE__);
931 		return -1;
932 	}
933 
934 	config.nb_event_queues = 1;
935 	config.nb_event_ports = 1;
936 	config.nb_single_link_event_port_queues = 1;
937 	config.nb_event_queue_flows = info.max_event_queue_flows;
938 	config.nb_events_limit = info.max_num_events;
939 	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
940 	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
941 	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
942 	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
943 
944 	/* Configure the device with 1 DIR port and queue */
945 	ret = rte_event_dev_configure(evdev, &config);
946 	if (ret < 0) {
947 		printf("%d: Error configuring device\n", __LINE__);
948 		return -1;
949 	}
950 
951 	/* Configure the ports and queues */
952 	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
953 		printf("%d: Error querying default port conf\n", __LINE__);
954 		goto err;
955 	}
956 
957 	port_conf.event_port_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
958 
959 	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
960 		printf("%d: port 0 setup expected to succeed\n",
961 		       __LINE__);
962 		goto err;
963 	}
964 
965 	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
966 		printf("%d: Error querying default queue conf\n", __LINE__);
967 		goto err;
968 	}
969 
970 	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
971 
972 	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
973 		printf("%d: queue 0 setup expected to succeed\n",
974 		       __LINE__);
975 		goto err;
976 	}
977 
978 	/* Link P0->Q0 */
979 	queue_id = 0;
980 
981 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
982 		printf("%d: port 0 link expected to succeed\n",
983 		       __LINE__);
984 		goto err;
985 	}
986 
987 	/* Start the device */
988 	if (rte_event_dev_start(evdev) < 0) {
989 		printf("%d: device start failed\n", __LINE__);
990 		goto err;
991 	}
992 
993 	/* Enqueue 1 NEW event */
994 	ev.op = RTE_EVENT_OP_NEW;
995 	ev.queue_id = 0;
996 	ev.priority = 0;
997 	ev.u64 = 0;
998 
999 	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1000 		printf("%d: NEW enqueue expected to succeed\n",
1001 		       __LINE__);
1002 		goto err;
1003 	}
1004 
1005 	/* Dequeue and enqueue 1 FORWARD event */
1006 	timeout = 0xFFFFFFFFF;
1007 	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1008 		printf("%d: event dequeue expected to succeed\n",
1009 		       __LINE__);
1010 		goto err;
1011 	}
1012 
1013 	if (ev.queue_id != 0) {
1014 		printf("%d: invalid dequeued event queue ID (%d)\n",
1015 		       __LINE__, ev.queue_id);
1016 		goto err;
1017 	}
1018 
1019 	ev.op = RTE_EVENT_OP_FORWARD;
1020 
1021 	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1022 		printf("%d: NEW enqueue expected to succeed\n",
1023 		       __LINE__);
1024 		goto err;
1025 	}
1026 
1027 	/* Dequeue and enqueue 1 RELEASE operation */
1028 	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1029 		printf("%d: event dequeue expected to succeed\n",
1030 		       __LINE__);
1031 		goto err;
1032 	}
1033 
1034 	ev.op = RTE_EVENT_OP_RELEASE;
1035 
1036 	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1037 		printf("%d: NEW enqueue expected to succeed\n",
1038 		       __LINE__);
1039 		goto err;
1040 	}
1041 
1042 	cleanup();
1043 	return 0;
1044 
1045 err:
1046 	cleanup();
1047 	return -1;
1048 }
1049 
1050 static int
test_deferred_sched(void)1051 test_deferred_sched(void)
1052 {
1053 	uint64_t timeout;
1054 	struct rte_event_dev_config config = {0};
1055 	struct rte_event_queue_conf queue_conf;
1056 	struct rte_event_port_conf port_conf;
1057 	struct rte_event_dev_info info;
1058 	const int num_events = 128;
1059 	struct rte_event ev;
1060 	uint8_t queue_id;
1061 	int ret, i;
1062 
1063 	if (rte_event_dev_info_get(evdev, &info)) {
1064 		printf("%d: Error querying device info\n", __LINE__);
1065 		return -1;
1066 	}
1067 
1068 	config.nb_event_queues = 1;
1069 	config.nb_event_ports = 2;
1070 	config.nb_single_link_event_port_queues = 0;
1071 	config.nb_event_queue_flows = info.max_event_queue_flows;
1072 	config.nb_events_limit = info.max_num_events;
1073 	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
1074 	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
1075 	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
1076 	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
1077 
1078 	/* Configure the device with 2 LDB ports and 1 queue */
1079 	ret = rte_event_dev_configure(evdev, &config);
1080 	if (ret < 0) {
1081 		printf("%d: Error configuring device\n", __LINE__);
1082 		return -1;
1083 	}
1084 
1085 	ret = rte_pmd_dlb_set_token_pop_mode(evdev, 0, DEFERRED_POP);
1086 	if (ret < 0) {
1087 		printf("%d: Error setting deferred scheduling\n", __LINE__);
1088 		goto err;
1089 	}
1090 
1091 	ret = rte_pmd_dlb_set_token_pop_mode(evdev, 1, DEFERRED_POP);
1092 	if (ret < 0) {
1093 		printf("%d: Error setting deferred scheduling\n", __LINE__);
1094 		goto err;
1095 	}
1096 
1097 	/* Configure the ports and queues */
1098 	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
1099 		printf("%d: Error querying default port conf\n", __LINE__);
1100 		goto err;
1101 	}
1102 
1103 	port_conf.dequeue_depth = 1;
1104 
1105 	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1106 		printf("%d: port 0 setup expected to succeed\n",
1107 		       __LINE__);
1108 		goto err;
1109 	}
1110 
1111 	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1112 		printf("%d: port 1 setup expected to succeed\n",
1113 		       __LINE__);
1114 		goto err;
1115 	}
1116 
1117 	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
1118 		printf("%d: Error querying default queue conf\n", __LINE__);
1119 		goto err;
1120 	}
1121 
1122 	queue_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
1123 	queue_conf.nb_atomic_order_sequences = 0;
1124 
1125 	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
1126 		printf("%d: queue 0 setup expected to succeed\n",
1127 		       __LINE__);
1128 		goto err;
1129 	}
1130 
1131 	/* Link P0->Q0 and P1->Q0 */
1132 	queue_id = 0;
1133 
1134 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1135 		printf("%d: port 0 link expected to succeed\n",
1136 		       __LINE__);
1137 		goto err;
1138 	}
1139 
1140 	if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
1141 		printf("%d: port 1 link expected to succeed\n",
1142 		       __LINE__);
1143 		goto err;
1144 	}
1145 
1146 	/* Start the device */
1147 	if (rte_event_dev_start(evdev) < 0) {
1148 		printf("%d: device start failed\n", __LINE__);
1149 		goto err;
1150 	}
1151 
1152 	/* Enqueue 128 NEW events */
1153 	ev.op = RTE_EVENT_OP_NEW;
1154 	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1155 	ev.queue_id = 0;
1156 	ev.priority = 0;
1157 	ev.u64 = 0;
1158 
1159 	for (i = 0; i < num_events; i++) {
1160 		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1161 			printf("%d: NEW enqueue expected to succeed\n",
1162 			       __LINE__);
1163 			goto err;
1164 		}
1165 	}
1166 
1167 	/* Dequeue two events from port 0 (dequeue_depth * 2 due to the
1168 	 * reserved token scheme)
1169 	 */
1170 	timeout = 0xFFFFFFFFF;
1171 	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1172 		printf("%d: event dequeue expected to succeed\n",
1173 		       __LINE__);
1174 		goto err;
1175 	}
1176 
1177 	if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1178 		printf("%d: event dequeue expected to succeed\n",
1179 		       __LINE__);
1180 		goto err;
1181 	}
1182 
1183 	/* Dequeue (and release) all other events from port 1. Deferred
1184 	 * scheduling ensures no other events are scheduled to port 0 without a
1185 	 * subsequent rte_event_dequeue_burst() call.
1186 	 */
1187 	for (i = 0; i < num_events - 2; i++) {
1188 		if (rte_event_dequeue_burst(evdev, 1, &ev, 1, timeout) != 1) {
1189 			printf("%d: event dequeue expected to succeed\n",
1190 			       __LINE__);
1191 			goto err;
1192 		}
1193 
1194 		ev.op = RTE_EVENT_OP_RELEASE;
1195 
1196 		if (rte_event_enqueue_burst(evdev, 1, &ev, 1) != 1) {
1197 			printf("%d: RELEASE enqueue expected to succeed\n",
1198 			       __LINE__);
1199 			goto err;
1200 		}
1201 	}
1202 
1203 	cleanup();
1204 	return 0;
1205 
1206 err:
1207 	cleanup();
1208 	return -1;
1209 }
1210 
1211 static int
test_delayed_pop(void)1212 test_delayed_pop(void)
1213 {
1214 	uint64_t timeout;
1215 	struct rte_event_dev_config config = {0};
1216 	struct rte_event_queue_conf queue_conf;
1217 	struct rte_event_port_conf port_conf;
1218 	struct rte_event_dev_info info;
1219 	int ret, i, num_events;
1220 	struct rte_event ev;
1221 	uint8_t queue_id;
1222 
1223 	if (rte_event_dev_info_get(evdev, &info)) {
1224 		printf("%d: Error querying device info\n", __LINE__);
1225 		return -1;
1226 	}
1227 
1228 	config.nb_event_queues = 1;
1229 	config.nb_event_ports = 1;
1230 	config.nb_single_link_event_port_queues = 0;
1231 	config.nb_event_queue_flows = info.max_event_queue_flows;
1232 	config.nb_events_limit = info.max_num_events;
1233 	config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
1234 	config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
1235 	config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
1236 	config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
1237 
1238 	/* Configure the device with 1 LDB port and queue */
1239 	ret = rte_event_dev_configure(evdev, &config);
1240 	if (ret < 0) {
1241 		printf("%d: Error configuring device\n", __LINE__);
1242 		return -1;
1243 	}
1244 
1245 	ret = rte_pmd_dlb_set_token_pop_mode(evdev, 0, DELAYED_POP);
1246 	if (ret < 0) {
1247 		printf("%d: Error setting deferred scheduling\n", __LINE__);
1248 		goto err;
1249 	}
1250 
1251 	/* Configure the ports and queues */
1252 	if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
1253 		printf("%d: Error querying default port conf\n", __LINE__);
1254 		goto err;
1255 	}
1256 
1257 	port_conf.dequeue_depth = 16;
1258 	port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
1259 
1260 	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1261 		printf("%d: port 0 setup expected to succeed\n",
1262 		       __LINE__);
1263 		goto err;
1264 	}
1265 
1266 	if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
1267 		printf("%d: Error querying default queue conf\n", __LINE__);
1268 		goto err;
1269 	}
1270 
1271 	if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
1272 		printf("%d: queue 0 setup expected to succeed\n",
1273 		       __LINE__);
1274 		goto err;
1275 	}
1276 
1277 	/* Link P0->Q0 */
1278 	queue_id = 0;
1279 
1280 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1281 		printf("%d: port 0 link expected to succeed\n",
1282 		       __LINE__);
1283 		goto err;
1284 	}
1285 
1286 	/* Start the device */
1287 	if (rte_event_dev_start(evdev) < 0) {
1288 		printf("%d: device start failed\n", __LINE__);
1289 		goto err;
1290 	}
1291 
1292 	num_events = 2 * port_conf.dequeue_depth;
1293 
1294 	/* Enqueue 2 * dequeue_depth NEW events. Due to the PMD's reserved
1295 	 * token scheme, the port will initially behave as though its
1296 	 * dequeue_depth is twice the requested size.
1297 	 */
1298 	ev.op = RTE_EVENT_OP_NEW;
1299 	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1300 	ev.queue_id = 0;
1301 	ev.priority = 0;
1302 	ev.u64 = 0;
1303 
1304 	for (i = 0; i < num_events; i++) {
1305 		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1306 			printf("%d: NEW enqueue expected to succeed\n",
1307 			       __LINE__);
1308 			goto err;
1309 		}
1310 	}
1311 
1312 	/* Flush these events out of the CQ */
1313 	timeout = 0xFFFFFFFFF;
1314 
1315 	for (i = 0; i < num_events; i++) {
1316 		if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1317 			printf("%d: event dequeue expected to succeed\n",
1318 			       __LINE__);
1319 			goto err;
1320 		}
1321 	}
1322 
1323 	ev.op = RTE_EVENT_OP_RELEASE;
1324 
1325 	for (i = 0; i < num_events; i++) {
1326 		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1327 			printf("%d: RELEASE enqueue expected to succeed\n",
1328 			       __LINE__);
1329 			goto err;
1330 		}
1331 	}
1332 
1333 	/* Enqueue 2 * dequeue_depth NEW events again */
1334 	ev.op = RTE_EVENT_OP_NEW;
1335 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
1336 	ev.queue_id = 0;
1337 	ev.priority = 0;
1338 	ev.u64 = 0;
1339 
1340 	for (i = 0; i < num_events; i++) {
1341 		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1342 			printf("%d: NEW enqueue expected to succeed\n",
1343 			       __LINE__);
1344 			goto err;
1345 		}
1346 	}
1347 
1348 	/* Dequeue dequeue_depth events but only release dequeue_depth - 1.
1349 	 * Delayed pop won't perform the pop and no more events will be
1350 	 * scheduled.
1351 	 */
1352 	for (i = 0; i < port_conf.dequeue_depth; i++) {
1353 		if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1354 			printf("%d: event dequeue expected to succeed\n",
1355 			       __LINE__);
1356 			goto err;
1357 		}
1358 	}
1359 
1360 	ev.op = RTE_EVENT_OP_RELEASE;
1361 
1362 	for (i = 0; i < port_conf.dequeue_depth - 1; i++) {
1363 		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1364 			printf("%d: RELEASE enqueue expected to succeed\n",
1365 			       __LINE__);
1366 			goto err;
1367 		}
1368 	}
1369 
1370 	timeout = 0x10000;
1371 
1372 	ret = rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout);
1373 	if (ret != 0) {
1374 		printf("%d: event dequeue expected to fail (ret = %d)\n",
1375 		       __LINE__, ret);
1376 		goto err;
1377 	}
1378 
1379 	/* Release one more event. This will trigger the token pop, and
1380 	 * another batch of events will be scheduled to the device.
1381 	 */
1382 	ev.op = RTE_EVENT_OP_RELEASE;
1383 
1384 	if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1385 		printf("%d: RELEASE enqueue expected to succeed\n",
1386 		       __LINE__);
1387 		goto err;
1388 	}
1389 
1390 	timeout = 0xFFFFFFFFF;
1391 
1392 	for (i = 0; i < port_conf.dequeue_depth; i++) {
1393 		if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1394 			printf("%d: event dequeue expected to succeed\n",
1395 			       __LINE__);
1396 			goto err;
1397 		}
1398 	}
1399 
1400 	cleanup();
1401 	return 0;
1402 
1403 err:
1404 	cleanup();
1405 	return -1;
1406 }
1407 
1408 static int
do_selftest(void)1409 do_selftest(void)
1410 {
1411 	struct test t;
1412 	int ret;
1413 
1414 	/* Only create mbuf pool once, reuse for each test run */
1415 	if (!eventdev_func_mempool) {
1416 		eventdev_func_mempool =
1417 			rte_pktmbuf_pool_create("EVENTDEV_DLB_SA_MBUF_POOL",
1418 						(1 << 12), /* 4k buffers */
1419 						32 /*MBUF_CACHE_SIZE*/,
1420 						0,
1421 						512, /* use very small mbufs */
1422 						rte_socket_id());
1423 		if (!eventdev_func_mempool) {
1424 			printf("ERROR creating mempool\n");
1425 			goto test_fail;
1426 		}
1427 	}
1428 	t.mbuf_pool = eventdev_func_mempool;
1429 
1430 	printf("*** Running Stop Flush test...\n");
1431 	ret = test_stop_flush(&t);
1432 	if (ret != 0) {
1433 		printf("ERROR - Stop Flush test FAILED.\n");
1434 		return ret;
1435 	}
1436 
1437 	printf("*** Running Single Link test...\n");
1438 	ret = test_single_link();
1439 	if (ret != 0) {
1440 		printf("ERROR - Single Link test FAILED.\n");
1441 
1442 		goto test_fail;
1443 	}
1444 
1445 	printf("*** Running Info Get test...\n");
1446 	ret = test_info_get();
1447 	if (ret != 0) {
1448 		printf("ERROR - Stop Flush test FAILED.\n");
1449 		return ret;
1450 	}
1451 
1452 	printf("*** Running Reconfiguration Link test...\n");
1453 	ret = test_reconfiguration_link();
1454 	if (ret != 0) {
1455 		printf("ERROR - Reconfiguration Link test FAILED.\n");
1456 
1457 		goto test_fail;
1458 	}
1459 
1460 	printf("*** Running Load-Balanced Traffic test...\n");
1461 	ret = test_load_balanced_traffic();
1462 	if (ret != 0) {
1463 		printf("ERROR - Load-Balanced Traffic test FAILED.\n");
1464 
1465 		goto test_fail;
1466 	}
1467 
1468 	printf("*** Running Directed Traffic test...\n");
1469 	ret = test_directed_traffic();
1470 	if (ret != 0) {
1471 		printf("ERROR - Directed Traffic test FAILED.\n");
1472 
1473 		goto test_fail;
1474 	}
1475 
1476 	printf("*** Running Deferred Scheduling test...\n");
1477 	ret = test_deferred_sched();
1478 	if (ret != 0) {
1479 		printf("ERROR - Deferred Scheduling test FAILED.\n");
1480 
1481 		goto test_fail;
1482 	}
1483 
1484 	printf("*** Running Delayed Pop test...\n");
1485 	ret = test_delayed_pop();
1486 	if (ret != 0) {
1487 		printf("ERROR - Delayed Pop test FAILED.\n");
1488 
1489 		goto test_fail;
1490 	}
1491 
1492 	return 0;
1493 
1494 test_fail:
1495 	return -1;
1496 }
1497 
1498 int
test_dlb_eventdev(void)1499 test_dlb_eventdev(void)
1500 {
1501 	const char *dlb_eventdev_name = "dlb_event";
1502 	uint8_t num_evdevs = rte_event_dev_count();
1503 	int i, ret = 0;
1504 	int found = 0, skipped = 0, passed = 0, failed = 0;
1505 	struct rte_event_dev_info info;
1506 
1507 	for (i = 0; found + skipped < num_evdevs && i < RTE_EVENT_MAX_DEVS;
1508 	     i++) {
1509 		ret = rte_event_dev_info_get(i, &info);
1510 		if (ret < 0)
1511 			continue;
1512 
1513 		/* skip non-dlb event devices */
1514 		if (strncmp(info.driver_name, dlb_eventdev_name,
1515 			    sizeof(*info.driver_name)) != 0) {
1516 			skipped++;
1517 			continue;
1518 		}
1519 
1520 		evdev = rte_event_dev_get_dev_id(info.driver_name);
1521 		if (evdev < 0) {
1522 			printf("Could not get dev_id for eventdev with name %s, i=%d\n",
1523 			       info.driver_name, i);
1524 			skipped++;
1525 			continue;
1526 		}
1527 		found++;
1528 		printf("Running selftest on eventdev %s\n", info.driver_name);
1529 		ret = do_selftest();
1530 		if (ret == 0) {
1531 			passed++;
1532 			printf("Selftest passed for eventdev %s\n",
1533 			       info.driver_name);
1534 		} else {
1535 			failed++;
1536 			printf("Selftest failed for eventdev %s, err=%d\n",
1537 			       info.driver_name, ret);
1538 		}
1539 	}
1540 
1541 	printf("Ran selftest on %d eventdevs, %d skipped, %d passed, %d failed\n",
1542 	       found, skipped, passed, failed);
1543 	return ret;
1544 }
1545