1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_debug.h>
9 #include <rte_eal.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_bus_vdev.h>
21 #include <rte_test.h>
22
23 #include "ssovf_evdev.h"
24
25 #define NUM_PACKETS (1 << 18)
26 #define MAX_EVENTS (16 * 1024)
27
28 #define OCTEONTX_TEST_RUN(setup, teardown, test) \
29 octeontx_test_run(setup, teardown, test, #test)
30
31 static int total;
32 static int passed;
33 static int failed;
34 static int unsupported;
35
36 static int evdev;
37 static struct rte_mempool *eventdev_test_mempool;
38
39 struct event_attr {
40 uint32_t flow_id;
41 uint8_t event_type;
42 uint8_t sub_event_type;
43 uint8_t sched_type;
44 uint8_t queue;
45 uint8_t port;
46 };
47
48 static uint32_t seqn_list_index;
49 static int seqn_list[NUM_PACKETS];
50
51 static inline void
seqn_list_init(void)52 seqn_list_init(void)
53 {
54 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
55 memset(seqn_list, 0, sizeof(seqn_list));
56 seqn_list_index = 0;
57 }
58
59 static inline int
seqn_list_update(int val)60 seqn_list_update(int val)
61 {
62 if (seqn_list_index >= NUM_PACKETS)
63 return -1;
64
65 seqn_list[seqn_list_index++] = val;
66 rte_smp_wmb();
67 return 0;
68 }
69
70 static inline int
seqn_list_check(int limit)71 seqn_list_check(int limit)
72 {
73 int i;
74
75 for (i = 0; i < limit; i++) {
76 if (seqn_list[i] != i) {
77 ssovf_log_dbg("Seqn mismatch %d %d", seqn_list[i], i);
78 return -1;
79 }
80 }
81 return 0;
82 }
83
84 struct test_core_param {
85 rte_atomic32_t *total_events;
86 uint64_t dequeue_tmo_ticks;
87 uint8_t port;
88 uint8_t sched_type;
89 };
90
91 static int
testsuite_setup(void)92 testsuite_setup(void)
93 {
94 const char *eventdev_name = "event_octeontx";
95
96 evdev = rte_event_dev_get_dev_id(eventdev_name);
97 if (evdev < 0) {
98 ssovf_log_dbg("%d: Eventdev %s not found - creating.",
99 __LINE__, eventdev_name);
100 if (rte_vdev_init(eventdev_name, NULL) < 0) {
101 ssovf_log_dbg("Error creating eventdev %s",
102 eventdev_name);
103 return -1;
104 }
105 evdev = rte_event_dev_get_dev_id(eventdev_name);
106 if (evdev < 0) {
107 ssovf_log_dbg("Error finding newly created eventdev");
108 return -1;
109 }
110 }
111
112 return 0;
113 }
114
115 static void
testsuite_teardown(void)116 testsuite_teardown(void)
117 {
118 rte_event_dev_close(evdev);
119 }
120
121 static inline void
devconf_set_default_sane_values(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)122 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
123 struct rte_event_dev_info *info)
124 {
125 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
126 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
127 dev_conf->nb_event_ports = info->max_event_ports;
128 dev_conf->nb_event_queues = info->max_event_queues;
129 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
130 dev_conf->nb_event_port_dequeue_depth =
131 info->max_event_port_dequeue_depth;
132 dev_conf->nb_event_port_enqueue_depth =
133 info->max_event_port_enqueue_depth;
134 dev_conf->nb_event_port_enqueue_depth =
135 info->max_event_port_enqueue_depth;
136 dev_conf->nb_events_limit =
137 info->max_num_events;
138 }
139
140 enum {
141 TEST_EVENTDEV_SETUP_DEFAULT,
142 TEST_EVENTDEV_SETUP_PRIORITY,
143 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
144 };
145
146 static inline int
_eventdev_setup(int mode)147 _eventdev_setup(int mode)
148 {
149 int i, ret;
150 struct rte_event_dev_config dev_conf;
151 struct rte_event_dev_info info;
152 const char *pool_name = "evdev_octeontx_test_pool";
153
154 /* Create and destrory pool for each test case to make it standalone */
155 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
156 MAX_EVENTS,
157 0 /*MBUF_CACHE_SIZE*/,
158 0,
159 512, /* Use very small mbufs */
160 rte_socket_id());
161 if (!eventdev_test_mempool) {
162 ssovf_log_dbg("ERROR creating mempool");
163 return -1;
164 }
165
166 ret = rte_event_dev_info_get(evdev, &info);
167 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
168 RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
169 "ERROR max_num_events=%d < max_events=%d",
170 info.max_num_events, MAX_EVENTS);
171
172 devconf_set_default_sane_values(&dev_conf, &info);
173 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
174 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
175
176 ret = rte_event_dev_configure(evdev, &dev_conf);
177 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
178
179 uint32_t queue_count;
180 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
181 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
182 &queue_count), "Queue count get failed");
183
184 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
185 if (queue_count > 8) {
186 ssovf_log_dbg(
187 "test expects the unique priority per queue");
188 return -ENOTSUP;
189 }
190
191 /* Configure event queues(0 to n) with
192 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
193 * RTE_EVENT_DEV_PRIORITY_LOWEST
194 */
195 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
196 queue_count;
197 for (i = 0; i < (int)queue_count; i++) {
198 struct rte_event_queue_conf queue_conf;
199
200 ret = rte_event_queue_default_conf_get(evdev, i,
201 &queue_conf);
202 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
203 i);
204 queue_conf.priority = i * step;
205 ret = rte_event_queue_setup(evdev, i, &queue_conf);
206 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
207 i);
208 }
209
210 } else {
211 /* Configure event queues with default priority */
212 for (i = 0; i < (int)queue_count; i++) {
213 ret = rte_event_queue_setup(evdev, i, NULL);
214 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
215 i);
216 }
217 }
218 /* Configure event ports */
219 uint32_t port_count;
220 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
221 RTE_EVENT_DEV_ATTR_PORT_COUNT,
222 &port_count), "Port count get failed");
223 for (i = 0; i < (int)port_count; i++) {
224 ret = rte_event_port_setup(evdev, i, NULL);
225 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
226 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
227 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
228 i);
229 }
230
231 ret = rte_event_dev_start(evdev);
232 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
233
234 return 0;
235 }
236
237 static inline int
eventdev_setup(void)238 eventdev_setup(void)
239 {
240 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
241 }
242
243 static inline int
eventdev_setup_priority(void)244 eventdev_setup_priority(void)
245 {
246 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
247 }
248
249 static inline int
eventdev_setup_dequeue_timeout(void)250 eventdev_setup_dequeue_timeout(void)
251 {
252 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
253 }
254
255 static inline void
eventdev_teardown(void)256 eventdev_teardown(void)
257 {
258 rte_event_dev_stop(evdev);
259 rte_mempool_free(eventdev_test_mempool);
260 }
261
262 static inline void
update_event_and_validation_attr(struct rte_mbuf * m,struct rte_event * ev,uint32_t flow_id,uint8_t event_type,uint8_t sub_event_type,uint8_t sched_type,uint8_t queue,uint8_t port)263 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
264 uint32_t flow_id, uint8_t event_type,
265 uint8_t sub_event_type, uint8_t sched_type,
266 uint8_t queue, uint8_t port)
267 {
268 struct event_attr *attr;
269
270 /* Store the event attributes in mbuf for future reference */
271 attr = rte_pktmbuf_mtod(m, struct event_attr *);
272 attr->flow_id = flow_id;
273 attr->event_type = event_type;
274 attr->sub_event_type = sub_event_type;
275 attr->sched_type = sched_type;
276 attr->queue = queue;
277 attr->port = port;
278
279 ev->flow_id = flow_id;
280 ev->sub_event_type = sub_event_type;
281 ev->event_type = event_type;
282 /* Inject the new event */
283 ev->op = RTE_EVENT_OP_NEW;
284 ev->sched_type = sched_type;
285 ev->queue_id = queue;
286 ev->mbuf = m;
287 }
288
289 static inline int
inject_events(uint32_t flow_id,uint8_t event_type,uint8_t sub_event_type,uint8_t sched_type,uint8_t queue,uint8_t port,unsigned int events)290 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
291 uint8_t sched_type, uint8_t queue, uint8_t port,
292 unsigned int events)
293 {
294 struct rte_mbuf *m;
295 unsigned int i;
296
297 for (i = 0; i < events; i++) {
298 struct rte_event ev = {.event = 0, .u64 = 0};
299
300 m = rte_pktmbuf_alloc(eventdev_test_mempool);
301 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
302
303 *rte_event_pmd_selftest_seqn(m) = i;
304 update_event_and_validation_attr(m, &ev, flow_id, event_type,
305 sub_event_type, sched_type, queue, port);
306 rte_event_enqueue_burst(evdev, port, &ev, 1);
307 }
308 return 0;
309 }
310
311 static inline int
check_excess_events(uint8_t port)312 check_excess_events(uint8_t port)
313 {
314 int i;
315 uint16_t valid_event;
316 struct rte_event ev;
317
318 /* Check for excess events, try for a few times and exit */
319 for (i = 0; i < 32; i++) {
320 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
321
322 RTE_TEST_ASSERT_SUCCESS(valid_event,
323 "Unexpected valid event=%d",
324 *rte_event_pmd_selftest_seqn(ev.mbuf));
325 }
326 return 0;
327 }
328
329 static inline int
generate_random_events(const unsigned int total_events)330 generate_random_events(const unsigned int total_events)
331 {
332 struct rte_event_dev_info info;
333 unsigned int i;
334 int ret;
335
336 uint32_t queue_count;
337 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
338 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
339 &queue_count), "Queue count get failed");
340
341 ret = rte_event_dev_info_get(evdev, &info);
342 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
343 for (i = 0; i < total_events; i++) {
344 ret = inject_events(
345 rte_rand() % info.max_event_queue_flows /*flow_id */,
346 RTE_EVENT_TYPE_CPU /* event_type */,
347 rte_rand() % 256 /* sub_event_type */,
348 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
349 rte_rand() % queue_count /* queue */,
350 0 /* port */,
351 1 /* events */);
352 if (ret)
353 return -1;
354 }
355 return ret;
356 }
357
358
359 static inline int
validate_event(struct rte_event * ev)360 validate_event(struct rte_event *ev)
361 {
362 struct event_attr *attr;
363
364 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
365 RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
366 "flow_id mismatch enq=%d deq =%d",
367 attr->flow_id, ev->flow_id);
368 RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
369 "event_type mismatch enq=%d deq =%d",
370 attr->event_type, ev->event_type);
371 RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
372 "sub_event_type mismatch enq=%d deq =%d",
373 attr->sub_event_type, ev->sub_event_type);
374 RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
375 "sched_type mismatch enq=%d deq =%d",
376 attr->sched_type, ev->sched_type);
377 RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
378 "queue mismatch enq=%d deq =%d",
379 attr->queue, ev->queue_id);
380 return 0;
381 }
382
383 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
384 struct rte_event *ev);
385
386 static inline int
consume_events(uint8_t port,const uint32_t total_events,validate_event_cb fn)387 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
388 {
389 int ret;
390 uint16_t valid_event;
391 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
392 struct rte_event ev;
393
394 while (1) {
395 if (++forward_progress_cnt > UINT16_MAX) {
396 ssovf_log_dbg("Detected deadlock");
397 return -1;
398 }
399
400 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
401 if (!valid_event)
402 continue;
403
404 forward_progress_cnt = 0;
405 ret = validate_event(&ev);
406 if (ret)
407 return -1;
408
409 if (fn != NULL) {
410 ret = fn(index, port, &ev);
411 RTE_TEST_ASSERT_SUCCESS(ret,
412 "Failed to validate test specific event");
413 }
414
415 ++index;
416
417 rte_pktmbuf_free(ev.mbuf);
418 if (++events >= total_events)
419 break;
420 }
421
422 return check_excess_events(port);
423 }
424
425 static int
validate_simple_enqdeq(uint32_t index,uint8_t port,struct rte_event * ev)426 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
427 {
428 RTE_SET_USED(port);
429 RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
430 "index=%d != seqn=%d", index,
431 *rte_event_pmd_selftest_seqn(ev->mbuf));
432 return 0;
433 }
434
435 static inline int
test_simple_enqdeq(uint8_t sched_type)436 test_simple_enqdeq(uint8_t sched_type)
437 {
438 int ret;
439
440 ret = inject_events(0 /*flow_id */,
441 RTE_EVENT_TYPE_CPU /* event_type */,
442 0 /* sub_event_type */,
443 sched_type,
444 0 /* queue */,
445 0 /* port */,
446 MAX_EVENTS);
447 if (ret)
448 return -1;
449
450 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
451 }
452
453 static int
test_simple_enqdeq_ordered(void)454 test_simple_enqdeq_ordered(void)
455 {
456 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
457 }
458
459 static int
test_simple_enqdeq_atomic(void)460 test_simple_enqdeq_atomic(void)
461 {
462 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
463 }
464
465 static int
test_simple_enqdeq_parallel(void)466 test_simple_enqdeq_parallel(void)
467 {
468 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
469 }
470
471 /*
472 * Generate a prescribed number of events and spread them across available
473 * queues. On dequeue, using single event port(port 0) verify the enqueued
474 * event attributes
475 */
476 static int
test_multi_queue_enq_single_port_deq(void)477 test_multi_queue_enq_single_port_deq(void)
478 {
479 int ret;
480
481 ret = generate_random_events(MAX_EVENTS);
482 if (ret)
483 return -1;
484
485 return consume_events(0 /* port */, MAX_EVENTS, NULL);
486 }
487
488 /*
489 * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
490 * operation
491 *
492 * For example, Inject 32 events over 0..7 queues
493 * enqueue events 0, 8, 16, 24 in queue 0
494 * enqueue events 1, 9, 17, 25 in queue 1
495 * ..
496 * ..
497 * enqueue events 7, 15, 23, 31 in queue 7
498 *
499 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
500 * order from queue0(highest priority) to queue7(lowest_priority)
501 */
502 static int
validate_queue_priority(uint32_t index,uint8_t port,struct rte_event * ev)503 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
504 {
505 uint32_t queue_count;
506 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
507 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
508 &queue_count), "Queue count get failed");
509 uint32_t range = MAX_EVENTS / queue_count;
510 uint32_t expected_val = (index % range) * queue_count;
511
512 expected_val += ev->queue_id;
513 RTE_SET_USED(port);
514 RTE_TEST_ASSERT_EQUAL(*rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
515 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
516 *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val, range,
517 queue_count, MAX_EVENTS);
518 return 0;
519 }
520
521 static int
test_multi_queue_priority(void)522 test_multi_queue_priority(void)
523 {
524 uint8_t queue;
525 struct rte_mbuf *m;
526 int i, max_evts_roundoff;
527
528 /* See validate_queue_priority() comments for priority validate logic */
529 uint32_t queue_count;
530 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
531 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
532 &queue_count), "Queue count get failed");
533 max_evts_roundoff = MAX_EVENTS / queue_count;
534 max_evts_roundoff *= queue_count;
535
536 for (i = 0; i < max_evts_roundoff; i++) {
537 struct rte_event ev = {.event = 0, .u64 = 0};
538
539 m = rte_pktmbuf_alloc(eventdev_test_mempool);
540 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
541
542 *rte_event_pmd_selftest_seqn(m) = i;
543 queue = i % queue_count;
544 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
545 0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
546 rte_event_enqueue_burst(evdev, 0, &ev, 1);
547 }
548
549 return consume_events(0, max_evts_roundoff, validate_queue_priority);
550 }
551
552 static int
worker_multi_port_fn(void * arg)553 worker_multi_port_fn(void *arg)
554 {
555 struct test_core_param *param = arg;
556 struct rte_event ev;
557 uint16_t valid_event;
558 uint8_t port = param->port;
559 rte_atomic32_t *total_events = param->total_events;
560 int ret;
561
562 while (rte_atomic32_read(total_events) > 0) {
563 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
564 if (!valid_event)
565 continue;
566
567 ret = validate_event(&ev);
568 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
569 rte_pktmbuf_free(ev.mbuf);
570 rte_atomic32_sub(total_events, 1);
571 }
572 return 0;
573 }
574
575 static inline int
wait_workers_to_join(int lcore,const rte_atomic32_t * count)576 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
577 {
578 uint64_t cycles, print_cycles;
579 RTE_SET_USED(count);
580
581 print_cycles = cycles = rte_get_timer_cycles();
582 while (rte_eal_get_lcore_state(lcore) != FINISHED) {
583 uint64_t new_cycles = rte_get_timer_cycles();
584
585 if (new_cycles - print_cycles > rte_get_timer_hz()) {
586 ssovf_log_dbg("\r%s: events %d", __func__,
587 rte_atomic32_read(count));
588 print_cycles = new_cycles;
589 }
590 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
591 ssovf_log_dbg(
592 "%s: No schedules for seconds, deadlock (%d)",
593 __func__,
594 rte_atomic32_read(count));
595 rte_event_dev_dump(evdev, stdout);
596 cycles = new_cycles;
597 return -1;
598 }
599 }
600 rte_eal_mp_wait_lcore();
601 return 0;
602 }
603
604
605 static inline int
launch_workers_and_wait(int (* main_worker)(void *),int (* worker)(void *),uint32_t total_events,uint8_t nb_workers,uint8_t sched_type)606 launch_workers_and_wait(int (*main_worker)(void *),
607 int (*worker)(void *), uint32_t total_events,
608 uint8_t nb_workers, uint8_t sched_type)
609 {
610 uint8_t port = 0;
611 int w_lcore;
612 int ret;
613 struct test_core_param *param;
614 rte_atomic32_t atomic_total_events;
615 uint64_t dequeue_tmo_ticks;
616
617 if (!nb_workers)
618 return 0;
619
620 rte_atomic32_set(&atomic_total_events, total_events);
621 seqn_list_init();
622
623 param = malloc(sizeof(struct test_core_param) * nb_workers);
624 if (!param)
625 return -1;
626
627 ret = rte_event_dequeue_timeout_ticks(evdev,
628 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
629 if (ret) {
630 free(param);
631 return -1;
632 }
633
634 param[0].total_events = &atomic_total_events;
635 param[0].sched_type = sched_type;
636 param[0].port = 0;
637 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
638 rte_smp_wmb();
639
640 w_lcore = rte_get_next_lcore(
641 /* start core */ -1,
642 /* skip main */ 1,
643 /* wrap */ 0);
644 rte_eal_remote_launch(main_worker, ¶m[0], w_lcore);
645
646 for (port = 1; port < nb_workers; port++) {
647 param[port].total_events = &atomic_total_events;
648 param[port].sched_type = sched_type;
649 param[port].port = port;
650 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
651 rte_smp_wmb();
652 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
653 rte_eal_remote_launch(worker, ¶m[port], w_lcore);
654 }
655
656 ret = wait_workers_to_join(w_lcore, &atomic_total_events);
657 free(param);
658 return ret;
659 }
660
661 /*
662 * Generate a prescribed number of events and spread them across available
663 * queues. Dequeue the events through multiple ports and verify the enqueued
664 * event attributes
665 */
666 static int
test_multi_queue_enq_multi_port_deq(void)667 test_multi_queue_enq_multi_port_deq(void)
668 {
669 const unsigned int total_events = MAX_EVENTS;
670 uint32_t nr_ports;
671 int ret;
672
673 ret = generate_random_events(total_events);
674 if (ret)
675 return -1;
676
677 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
678 RTE_EVENT_DEV_ATTR_PORT_COUNT,
679 &nr_ports), "Port count get failed");
680 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
681
682 if (!nr_ports) {
683 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
684 nr_ports, rte_lcore_count() - 1);
685 return 0;
686 }
687
688 return launch_workers_and_wait(worker_multi_port_fn,
689 worker_multi_port_fn, total_events,
690 nr_ports, 0xff /* invalid */);
691 }
692
693 static
flush(uint8_t dev_id,struct rte_event event,void * arg)694 void flush(uint8_t dev_id, struct rte_event event, void *arg)
695 {
696 unsigned int *count = arg;
697
698 RTE_SET_USED(dev_id);
699 if (event.event_type == RTE_EVENT_TYPE_CPU)
700 *count = *count + 1;
701
702 }
703
704 static int
test_dev_stop_flush(void)705 test_dev_stop_flush(void)
706 {
707 unsigned int total_events = MAX_EVENTS, count = 0;
708 int ret;
709
710 ret = generate_random_events(total_events);
711 if (ret)
712 return -1;
713
714 ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
715 if (ret)
716 return -2;
717 rte_event_dev_stop(evdev);
718 ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
719 if (ret)
720 return -3;
721 RTE_TEST_ASSERT_EQUAL(total_events, count,
722 "count mismatch total_events=%d count=%d",
723 total_events, count);
724 return 0;
725 }
726
727 static int
validate_queue_to_port_single_link(uint32_t index,uint8_t port,struct rte_event * ev)728 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
729 struct rte_event *ev)
730 {
731 RTE_SET_USED(index);
732 RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
733 "queue mismatch enq=%d deq =%d",
734 port, ev->queue_id);
735 return 0;
736 }
737
738 /*
739 * Link queue x to port x and check correctness of link by checking
740 * queue_id == x on dequeue on the specific port x
741 */
742 static int
test_queue_to_port_single_link(void)743 test_queue_to_port_single_link(void)
744 {
745 int i, nr_links, ret;
746
747 uint32_t port_count;
748 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
749 RTE_EVENT_DEV_ATTR_PORT_COUNT,
750 &port_count), "Port count get failed");
751
752 /* Unlink all connections that created in eventdev_setup */
753 for (i = 0; i < (int)port_count; i++) {
754 ret = rte_event_port_unlink(evdev, i, NULL, 0);
755 RTE_TEST_ASSERT(ret >= 0,
756 "Failed to unlink all queues port=%d", i);
757 }
758
759 uint32_t queue_count;
760 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
761 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
762 &queue_count), "Queue count get failed");
763
764 nr_links = RTE_MIN(port_count, queue_count);
765 const unsigned int total_events = MAX_EVENTS / nr_links;
766
767 /* Link queue x to port x and inject events to queue x through port x */
768 for (i = 0; i < nr_links; i++) {
769 uint8_t queue = (uint8_t)i;
770
771 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
772 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
773
774 ret = inject_events(
775 0x100 /*flow_id */,
776 RTE_EVENT_TYPE_CPU /* event_type */,
777 rte_rand() % 256 /* sub_event_type */,
778 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
779 queue /* queue */,
780 i /* port */,
781 total_events /* events */);
782 if (ret)
783 return -1;
784 }
785
786 /* Verify the events generated from correct queue */
787 for (i = 0; i < nr_links; i++) {
788 ret = consume_events(i /* port */, total_events,
789 validate_queue_to_port_single_link);
790 if (ret)
791 return -1;
792 }
793
794 return 0;
795 }
796
797 static int
validate_queue_to_port_multi_link(uint32_t index,uint8_t port,struct rte_event * ev)798 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
799 struct rte_event *ev)
800 {
801 RTE_SET_USED(index);
802 RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
803 "queue mismatch enq=%d deq =%d",
804 port, ev->queue_id);
805 return 0;
806 }
807
808 /*
809 * Link all even number of queues to port 0 and all odd number of queues to
810 * port 1 and verify the link connection on dequeue
811 */
812 static int
test_queue_to_port_multi_link(void)813 test_queue_to_port_multi_link(void)
814 {
815 int ret, port0_events = 0, port1_events = 0;
816 uint8_t queue, port;
817 uint32_t nr_queues = 0;
818 uint32_t nr_ports = 0;
819
820 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
821 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
822 &nr_queues), "Queue count get failed");
823
824 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
825 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
826 &nr_queues), "Queue count get failed");
827 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
828 RTE_EVENT_DEV_ATTR_PORT_COUNT,
829 &nr_ports), "Port count get failed");
830
831 if (nr_ports < 2) {
832 ssovf_log_dbg("%s: Not enough ports to test ports=%d",
833 __func__, nr_ports);
834 return 0;
835 }
836
837 /* Unlink all connections that created in eventdev_setup */
838 for (port = 0; port < nr_ports; port++) {
839 ret = rte_event_port_unlink(evdev, port, NULL, 0);
840 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
841 port);
842 }
843
844 const unsigned int total_events = MAX_EVENTS / nr_queues;
845
846 /* Link all even number of queues to port0 and odd numbers to port 1*/
847 for (queue = 0; queue < nr_queues; queue++) {
848 port = queue & 0x1;
849 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
850 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
851 queue, port);
852
853 ret = inject_events(
854 0x100 /*flow_id */,
855 RTE_EVENT_TYPE_CPU /* event_type */,
856 rte_rand() % 256 /* sub_event_type */,
857 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
858 queue /* queue */,
859 port /* port */,
860 total_events /* events */);
861 if (ret)
862 return -1;
863
864 if (port == 0)
865 port0_events += total_events;
866 else
867 port1_events += total_events;
868 }
869
870 ret = consume_events(0 /* port */, port0_events,
871 validate_queue_to_port_multi_link);
872 if (ret)
873 return -1;
874 ret = consume_events(1 /* port */, port1_events,
875 validate_queue_to_port_multi_link);
876 if (ret)
877 return -1;
878
879 return 0;
880 }
881
882 static int
worker_flow_based_pipeline(void * arg)883 worker_flow_based_pipeline(void *arg)
884 {
885 struct test_core_param *param = arg;
886 struct rte_event ev;
887 uint16_t valid_event;
888 uint8_t port = param->port;
889 uint8_t new_sched_type = param->sched_type;
890 rte_atomic32_t *total_events = param->total_events;
891 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
892
893 while (rte_atomic32_read(total_events) > 0) {
894 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
895 dequeue_tmo_ticks);
896 if (!valid_event)
897 continue;
898
899 /* Events from stage 0 */
900 if (ev.sub_event_type == 0) {
901 /* Move to atomic flow to maintain the ordering */
902 ev.flow_id = 0x2;
903 ev.event_type = RTE_EVENT_TYPE_CPU;
904 ev.sub_event_type = 1; /* stage 1 */
905 ev.sched_type = new_sched_type;
906 ev.op = RTE_EVENT_OP_FORWARD;
907 rte_event_enqueue_burst(evdev, port, &ev, 1);
908 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
909 if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
910 rte_pktmbuf_free(ev.mbuf);
911 rte_atomic32_sub(total_events, 1);
912 } else {
913 ssovf_log_dbg("Failed to update seqn_list");
914 return -1;
915 }
916 } else {
917 ssovf_log_dbg("Invalid ev.sub_event_type = %d",
918 ev.sub_event_type);
919 return -1;
920 }
921 }
922 return 0;
923 }
924
925 static int
test_multiport_flow_sched_type_test(uint8_t in_sched_type,uint8_t out_sched_type)926 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
927 uint8_t out_sched_type)
928 {
929 const unsigned int total_events = MAX_EVENTS;
930 uint32_t nr_ports;
931 int ret;
932
933 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
934 RTE_EVENT_DEV_ATTR_PORT_COUNT,
935 &nr_ports), "Port count get failed");
936 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
937
938 if (!nr_ports) {
939 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
940 nr_ports, rte_lcore_count() - 1);
941 return 0;
942 }
943
944 /* Injects events with a 0 sequence number to total_events */
945 ret = inject_events(
946 0x1 /*flow_id */,
947 RTE_EVENT_TYPE_CPU /* event_type */,
948 0 /* sub_event_type (stage 0) */,
949 in_sched_type,
950 0 /* queue */,
951 0 /* port */,
952 total_events /* events */);
953 if (ret)
954 return -1;
955
956 ret = launch_workers_and_wait(worker_flow_based_pipeline,
957 worker_flow_based_pipeline,
958 total_events, nr_ports, out_sched_type);
959 if (ret)
960 return -1;
961
962 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
963 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
964 /* Check the events order maintained or not */
965 return seqn_list_check(total_events);
966 }
967 return 0;
968 }
969
970
971 /* Multi port ordered to atomic transaction */
972 static int
test_multi_port_flow_ordered_to_atomic(void)973 test_multi_port_flow_ordered_to_atomic(void)
974 {
975 /* Ingress event order test */
976 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
977 RTE_SCHED_TYPE_ATOMIC);
978 }
979
980 static int
test_multi_port_flow_ordered_to_ordered(void)981 test_multi_port_flow_ordered_to_ordered(void)
982 {
983 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
984 RTE_SCHED_TYPE_ORDERED);
985 }
986
987 static int
test_multi_port_flow_ordered_to_parallel(void)988 test_multi_port_flow_ordered_to_parallel(void)
989 {
990 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
991 RTE_SCHED_TYPE_PARALLEL);
992 }
993
994 static int
test_multi_port_flow_atomic_to_atomic(void)995 test_multi_port_flow_atomic_to_atomic(void)
996 {
997 /* Ingress event order test */
998 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
999 RTE_SCHED_TYPE_ATOMIC);
1000 }
1001
1002 static int
test_multi_port_flow_atomic_to_ordered(void)1003 test_multi_port_flow_atomic_to_ordered(void)
1004 {
1005 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1006 RTE_SCHED_TYPE_ORDERED);
1007 }
1008
1009 static int
test_multi_port_flow_atomic_to_parallel(void)1010 test_multi_port_flow_atomic_to_parallel(void)
1011 {
1012 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1013 RTE_SCHED_TYPE_PARALLEL);
1014 }
1015
1016 static int
test_multi_port_flow_parallel_to_atomic(void)1017 test_multi_port_flow_parallel_to_atomic(void)
1018 {
1019 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1020 RTE_SCHED_TYPE_ATOMIC);
1021 }
1022
1023 static int
test_multi_port_flow_parallel_to_ordered(void)1024 test_multi_port_flow_parallel_to_ordered(void)
1025 {
1026 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1027 RTE_SCHED_TYPE_ORDERED);
1028 }
1029
1030 static int
test_multi_port_flow_parallel_to_parallel(void)1031 test_multi_port_flow_parallel_to_parallel(void)
1032 {
1033 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1034 RTE_SCHED_TYPE_PARALLEL);
1035 }
1036
1037 static int
worker_group_based_pipeline(void * arg)1038 worker_group_based_pipeline(void *arg)
1039 {
1040 struct test_core_param *param = arg;
1041 struct rte_event ev;
1042 uint16_t valid_event;
1043 uint8_t port = param->port;
1044 uint8_t new_sched_type = param->sched_type;
1045 rte_atomic32_t *total_events = param->total_events;
1046 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
1047
1048 while (rte_atomic32_read(total_events) > 0) {
1049 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
1050 dequeue_tmo_ticks);
1051 if (!valid_event)
1052 continue;
1053
1054 /* Events from stage 0(group 0) */
1055 if (ev.queue_id == 0) {
1056 /* Move to atomic flow to maintain the ordering */
1057 ev.flow_id = 0x2;
1058 ev.event_type = RTE_EVENT_TYPE_CPU;
1059 ev.sched_type = new_sched_type;
1060 ev.queue_id = 1; /* Stage 1*/
1061 ev.op = RTE_EVENT_OP_FORWARD;
1062 rte_event_enqueue_burst(evdev, port, &ev, 1);
1063 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1064 if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
1065 rte_pktmbuf_free(ev.mbuf);
1066 rte_atomic32_sub(total_events, 1);
1067 } else {
1068 ssovf_log_dbg("Failed to update seqn_list");
1069 return -1;
1070 }
1071 } else {
1072 ssovf_log_dbg("Invalid ev.queue_id = %d", ev.queue_id);
1073 return -1;
1074 }
1075 }
1076
1077
1078 return 0;
1079 }
1080
1081 static int
test_multiport_queue_sched_type_test(uint8_t in_sched_type,uint8_t out_sched_type)1082 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1083 uint8_t out_sched_type)
1084 {
1085 const unsigned int total_events = MAX_EVENTS;
1086 uint32_t nr_ports;
1087 int ret;
1088
1089 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1090 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1091 &nr_ports), "Port count get failed");
1092
1093 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1094
1095 uint32_t queue_count;
1096 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1097 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1098 &queue_count), "Queue count get failed");
1099 if (queue_count < 2 || !nr_ports) {
1100 ssovf_log_dbg("%s: Not enough queues=%d ports=%d or workers=%d",
1101 __func__, queue_count, nr_ports,
1102 rte_lcore_count() - 1);
1103 return 0;
1104 }
1105
1106 /* Injects events with a 0 sequence number to total_events */
1107 ret = inject_events(
1108 0x1 /*flow_id */,
1109 RTE_EVENT_TYPE_CPU /* event_type */,
1110 0 /* sub_event_type (stage 0) */,
1111 in_sched_type,
1112 0 /* queue */,
1113 0 /* port */,
1114 total_events /* events */);
1115 if (ret)
1116 return -1;
1117
1118 ret = launch_workers_and_wait(worker_group_based_pipeline,
1119 worker_group_based_pipeline,
1120 total_events, nr_ports, out_sched_type);
1121 if (ret)
1122 return -1;
1123
1124 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1125 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1126 /* Check the events order maintained or not */
1127 return seqn_list_check(total_events);
1128 }
1129 return 0;
1130 }
1131
1132 static int
test_multi_port_queue_ordered_to_atomic(void)1133 test_multi_port_queue_ordered_to_atomic(void)
1134 {
1135 /* Ingress event order test */
1136 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1137 RTE_SCHED_TYPE_ATOMIC);
1138 }
1139
1140 static int
test_multi_port_queue_ordered_to_ordered(void)1141 test_multi_port_queue_ordered_to_ordered(void)
1142 {
1143 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1144 RTE_SCHED_TYPE_ORDERED);
1145 }
1146
1147 static int
test_multi_port_queue_ordered_to_parallel(void)1148 test_multi_port_queue_ordered_to_parallel(void)
1149 {
1150 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1151 RTE_SCHED_TYPE_PARALLEL);
1152 }
1153
1154 static int
test_multi_port_queue_atomic_to_atomic(void)1155 test_multi_port_queue_atomic_to_atomic(void)
1156 {
1157 /* Ingress event order test */
1158 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1159 RTE_SCHED_TYPE_ATOMIC);
1160 }
1161
1162 static int
test_multi_port_queue_atomic_to_ordered(void)1163 test_multi_port_queue_atomic_to_ordered(void)
1164 {
1165 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1166 RTE_SCHED_TYPE_ORDERED);
1167 }
1168
1169 static int
test_multi_port_queue_atomic_to_parallel(void)1170 test_multi_port_queue_atomic_to_parallel(void)
1171 {
1172 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1173 RTE_SCHED_TYPE_PARALLEL);
1174 }
1175
1176 static int
test_multi_port_queue_parallel_to_atomic(void)1177 test_multi_port_queue_parallel_to_atomic(void)
1178 {
1179 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1180 RTE_SCHED_TYPE_ATOMIC);
1181 }
1182
1183 static int
test_multi_port_queue_parallel_to_ordered(void)1184 test_multi_port_queue_parallel_to_ordered(void)
1185 {
1186 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1187 RTE_SCHED_TYPE_ORDERED);
1188 }
1189
1190 static int
test_multi_port_queue_parallel_to_parallel(void)1191 test_multi_port_queue_parallel_to_parallel(void)
1192 {
1193 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1194 RTE_SCHED_TYPE_PARALLEL);
1195 }
1196
1197 static int
worker_flow_based_pipeline_max_stages_rand_sched_type(void * arg)1198 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1199 {
1200 struct test_core_param *param = arg;
1201 struct rte_event ev;
1202 uint16_t valid_event;
1203 uint8_t port = param->port;
1204 rte_atomic32_t *total_events = param->total_events;
1205
1206 while (rte_atomic32_read(total_events) > 0) {
1207 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1208 if (!valid_event)
1209 continue;
1210
1211 if (ev.sub_event_type == 255) { /* last stage */
1212 rte_pktmbuf_free(ev.mbuf);
1213 rte_atomic32_sub(total_events, 1);
1214 } else {
1215 ev.event_type = RTE_EVENT_TYPE_CPU;
1216 ev.sub_event_type++;
1217 ev.sched_type =
1218 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1219 ev.op = RTE_EVENT_OP_FORWARD;
1220 rte_event_enqueue_burst(evdev, port, &ev, 1);
1221 }
1222 }
1223 return 0;
1224 }
1225
1226 static int
launch_multi_port_max_stages_random_sched_type(int (* fn)(void *))1227 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1228 {
1229 uint32_t nr_ports;
1230 int ret;
1231
1232 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1233 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1234 &nr_ports), "Port count get failed");
1235 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1236
1237 if (!nr_ports) {
1238 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
1239 nr_ports, rte_lcore_count() - 1);
1240 return 0;
1241 }
1242
1243 /* Injects events with a 0 sequence number to total_events */
1244 ret = inject_events(
1245 0x1 /*flow_id */,
1246 RTE_EVENT_TYPE_CPU /* event_type */,
1247 0 /* sub_event_type (stage 0) */,
1248 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1249 0 /* queue */,
1250 0 /* port */,
1251 MAX_EVENTS /* events */);
1252 if (ret)
1253 return -1;
1254
1255 return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1256 0xff /* invalid */);
1257 }
1258
1259 /* Flow based pipeline with maximum stages with random sched type */
1260 static int
test_multi_port_flow_max_stages_random_sched_type(void)1261 test_multi_port_flow_max_stages_random_sched_type(void)
1262 {
1263 return launch_multi_port_max_stages_random_sched_type(
1264 worker_flow_based_pipeline_max_stages_rand_sched_type);
1265 }
1266
1267 static int
worker_queue_based_pipeline_max_stages_rand_sched_type(void * arg)1268 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1269 {
1270 struct test_core_param *param = arg;
1271 struct rte_event ev;
1272 uint16_t valid_event;
1273 uint8_t port = param->port;
1274 uint32_t queue_count;
1275 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1276 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1277 &queue_count), "Queue count get failed");
1278 uint8_t nr_queues = queue_count;
1279 rte_atomic32_t *total_events = param->total_events;
1280
1281 while (rte_atomic32_read(total_events) > 0) {
1282 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1283 if (!valid_event)
1284 continue;
1285
1286 if (ev.queue_id == nr_queues - 1) { /* last stage */
1287 rte_pktmbuf_free(ev.mbuf);
1288 rte_atomic32_sub(total_events, 1);
1289 } else {
1290 ev.event_type = RTE_EVENT_TYPE_CPU;
1291 ev.queue_id++;
1292 ev.sched_type =
1293 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1294 ev.op = RTE_EVENT_OP_FORWARD;
1295 rte_event_enqueue_burst(evdev, port, &ev, 1);
1296 }
1297 }
1298 return 0;
1299 }
1300
1301 /* Queue based pipeline with maximum stages with random sched type */
1302 static int
test_multi_port_queue_max_stages_random_sched_type(void)1303 test_multi_port_queue_max_stages_random_sched_type(void)
1304 {
1305 return launch_multi_port_max_stages_random_sched_type(
1306 worker_queue_based_pipeline_max_stages_rand_sched_type);
1307 }
1308
1309 static int
worker_mixed_pipeline_max_stages_rand_sched_type(void * arg)1310 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1311 {
1312 struct test_core_param *param = arg;
1313 struct rte_event ev;
1314 uint16_t valid_event;
1315 uint8_t port = param->port;
1316 uint32_t queue_count;
1317 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1318 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1319 &queue_count), "Queue count get failed");
1320 uint8_t nr_queues = queue_count;
1321 rte_atomic32_t *total_events = param->total_events;
1322
1323 while (rte_atomic32_read(total_events) > 0) {
1324 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1325 if (!valid_event)
1326 continue;
1327
1328 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1329 rte_pktmbuf_free(ev.mbuf);
1330 rte_atomic32_sub(total_events, 1);
1331 } else {
1332 ev.event_type = RTE_EVENT_TYPE_CPU;
1333 ev.queue_id++;
1334 ev.sub_event_type = rte_rand() % 256;
1335 ev.sched_type =
1336 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1337 ev.op = RTE_EVENT_OP_FORWARD;
1338 rte_event_enqueue_burst(evdev, port, &ev, 1);
1339 }
1340 }
1341 return 0;
1342 }
1343
1344 /* Queue and flow based pipeline with maximum stages with random sched type */
1345 static int
test_multi_port_mixed_max_stages_random_sched_type(void)1346 test_multi_port_mixed_max_stages_random_sched_type(void)
1347 {
1348 return launch_multi_port_max_stages_random_sched_type(
1349 worker_mixed_pipeline_max_stages_rand_sched_type);
1350 }
1351
1352 static int
worker_ordered_flow_producer(void * arg)1353 worker_ordered_flow_producer(void *arg)
1354 {
1355 struct test_core_param *param = arg;
1356 uint8_t port = param->port;
1357 struct rte_mbuf *m;
1358 int counter = 0;
1359
1360 while (counter < NUM_PACKETS) {
1361 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1362 if (m == NULL)
1363 continue;
1364
1365 *rte_event_pmd_selftest_seqn(m) = counter++;
1366
1367 struct rte_event ev = {.event = 0, .u64 = 0};
1368
1369 ev.flow_id = 0x1; /* Generate a fat flow */
1370 ev.sub_event_type = 0;
1371 /* Inject the new event */
1372 ev.op = RTE_EVENT_OP_NEW;
1373 ev.event_type = RTE_EVENT_TYPE_CPU;
1374 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1375 ev.queue_id = 0;
1376 ev.mbuf = m;
1377 rte_event_enqueue_burst(evdev, port, &ev, 1);
1378 }
1379
1380 return 0;
1381 }
1382
1383 static inline int
test_producer_consumer_ingress_order_test(int (* fn)(void *))1384 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1385 {
1386 uint32_t nr_ports;
1387
1388 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1389 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1390 &nr_ports), "Port count get failed");
1391 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1392
1393 if (rte_lcore_count() < 3 || nr_ports < 2) {
1394 ssovf_log_dbg("### Not enough cores for %s test.", __func__);
1395 return 0;
1396 }
1397
1398 launch_workers_and_wait(worker_ordered_flow_producer, fn,
1399 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1400 /* Check the events order maintained or not */
1401 return seqn_list_check(NUM_PACKETS);
1402 }
1403
1404 /* Flow based producer consumer ingress order test */
1405 static int
test_flow_producer_consumer_ingress_order_test(void)1406 test_flow_producer_consumer_ingress_order_test(void)
1407 {
1408 return test_producer_consumer_ingress_order_test(
1409 worker_flow_based_pipeline);
1410 }
1411
1412 /* Queue based producer consumer ingress order test */
1413 static int
test_queue_producer_consumer_ingress_order_test(void)1414 test_queue_producer_consumer_ingress_order_test(void)
1415 {
1416 return test_producer_consumer_ingress_order_test(
1417 worker_group_based_pipeline);
1418 }
1419
octeontx_test_run(int (* setup)(void),void (* tdown)(void),int (* test)(void),const char * name)1420 static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
1421 int (*test)(void), const char *name)
1422 {
1423 if (setup() < 0) {
1424 ssovf_log_selftest("Error setting up test %s", name);
1425 unsupported++;
1426 } else {
1427 if (test() < 0) {
1428 failed++;
1429 ssovf_log_selftest("%s Failed", name);
1430 } else {
1431 passed++;
1432 ssovf_log_selftest("%s Passed", name);
1433 }
1434 }
1435
1436 total++;
1437 tdown();
1438 }
1439
1440 int
test_eventdev_octeontx(void)1441 test_eventdev_octeontx(void)
1442 {
1443 testsuite_setup();
1444
1445 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1446 test_simple_enqdeq_ordered);
1447 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1448 test_simple_enqdeq_atomic);
1449 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1450 test_simple_enqdeq_parallel);
1451 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1452 test_multi_queue_enq_single_port_deq);
1453 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1454 test_dev_stop_flush);
1455 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1456 test_multi_queue_enq_multi_port_deq);
1457 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1458 test_queue_to_port_single_link);
1459 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1460 test_queue_to_port_multi_link);
1461 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1462 test_multi_port_flow_ordered_to_atomic);
1463 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1464 test_multi_port_flow_ordered_to_ordered);
1465 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1466 test_multi_port_flow_ordered_to_parallel);
1467 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1468 test_multi_port_flow_atomic_to_atomic);
1469 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1470 test_multi_port_flow_atomic_to_ordered);
1471 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1472 test_multi_port_flow_atomic_to_parallel);
1473 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1474 test_multi_port_flow_parallel_to_atomic);
1475 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1476 test_multi_port_flow_parallel_to_ordered);
1477 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1478 test_multi_port_flow_parallel_to_parallel);
1479 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1480 test_multi_port_queue_ordered_to_atomic);
1481 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1482 test_multi_port_queue_ordered_to_ordered);
1483 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1484 test_multi_port_queue_ordered_to_parallel);
1485 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1486 test_multi_port_queue_atomic_to_atomic);
1487 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1488 test_multi_port_queue_atomic_to_ordered);
1489 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1490 test_multi_port_queue_atomic_to_parallel);
1491 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1492 test_multi_port_queue_parallel_to_atomic);
1493 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1494 test_multi_port_queue_parallel_to_ordered);
1495 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1496 test_multi_port_queue_parallel_to_parallel);
1497 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1498 test_multi_port_flow_max_stages_random_sched_type);
1499 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1500 test_multi_port_queue_max_stages_random_sched_type);
1501 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1502 test_multi_port_mixed_max_stages_random_sched_type);
1503 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1504 test_flow_producer_consumer_ingress_order_test);
1505 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1506 test_queue_producer_consumer_ingress_order_test);
1507 OCTEONTX_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
1508 test_multi_queue_priority);
1509 OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1510 test_multi_port_flow_ordered_to_atomic);
1511 OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1512 test_multi_port_queue_ordered_to_atomic);
1513
1514 ssovf_log_selftest("Total tests : %d", total);
1515 ssovf_log_selftest("Passed : %d", passed);
1516 ssovf_log_selftest("Failed : %d", failed);
1517 ssovf_log_selftest("Not supported : %d", unsupported);
1518
1519 testsuite_teardown();
1520
1521 if (failed)
1522 return -1;
1523
1524 return 0;
1525 }
1526