1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
3 */
4
5 #include <rte_common.h>
6 #include <rte_hexdump.h>
7 #include <rte_mbuf.h>
8 #include <rte_malloc.h>
9 #include <rte_memcpy.h>
10 #include <rte_eventdev.h>
11 #include <rte_dev.h>
12 #include <rte_bus_vdev.h>
13
14 #include "test.h"
15
16 #define TEST_DEV_ID 0
17
18 static int
testsuite_setup(void)19 testsuite_setup(void)
20 {
21 RTE_BUILD_BUG_ON(sizeof(struct rte_event) != 16);
22 uint8_t count;
23 count = rte_event_dev_count();
24 if (!count) {
25 printf("Failed to find a valid event device,"
26 " testing with event_skeleton device\n");
27 return rte_vdev_init("event_skeleton", NULL);
28 }
29 return TEST_SUCCESS;
30 }
31
32 static void
testsuite_teardown(void)33 testsuite_teardown(void)
34 {
35 }
36
37 static int
test_eventdev_count(void)38 test_eventdev_count(void)
39 {
40 uint8_t count;
41 count = rte_event_dev_count();
42 TEST_ASSERT(count > 0, "Invalid eventdev count %" PRIu8, count);
43 return TEST_SUCCESS;
44 }
45
46 static int
test_eventdev_get_dev_id(void)47 test_eventdev_get_dev_id(void)
48 {
49 int ret;
50 ret = rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
51 TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d", ret);
52 return TEST_SUCCESS;
53 }
54
55 static int
test_eventdev_socket_id(void)56 test_eventdev_socket_id(void)
57 {
58 int socket_id;
59 socket_id = rte_event_dev_socket_id(TEST_DEV_ID);
60 TEST_ASSERT(socket_id != -EINVAL, "Failed to get socket_id %d",
61 socket_id);
62 socket_id = rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS);
63 TEST_ASSERT(socket_id == -EINVAL, "Expected -EINVAL %d", socket_id);
64
65 return TEST_SUCCESS;
66 }
67
68 static int
test_eventdev_info_get(void)69 test_eventdev_info_get(void)
70 {
71 int ret;
72 struct rte_event_dev_info info;
73 ret = rte_event_dev_info_get(TEST_DEV_ID, NULL);
74 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
75 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
76 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
77 TEST_ASSERT(info.max_event_ports > 0,
78 "Not enough event ports %d", info.max_event_ports);
79 TEST_ASSERT(info.max_event_queues > 0,
80 "Not enough event queues %d", info.max_event_queues);
81 return TEST_SUCCESS;
82 }
83
84 static inline void
devconf_set_default_sane_values(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)85 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
86 struct rte_event_dev_info *info)
87 {
88 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
89 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
90 dev_conf->nb_event_ports = info->max_event_ports;
91 dev_conf->nb_event_queues = info->max_event_queues;
92 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
93 dev_conf->nb_event_port_dequeue_depth =
94 info->max_event_port_dequeue_depth;
95 dev_conf->nb_event_port_enqueue_depth =
96 info->max_event_port_enqueue_depth;
97 dev_conf->nb_event_port_enqueue_depth =
98 info->max_event_port_enqueue_depth;
99 dev_conf->nb_events_limit =
100 info->max_num_events;
101 }
102
103 static int
test_ethdev_config_run(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info,void (* fn)(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info))104 test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
105 struct rte_event_dev_info *info,
106 void (*fn)(struct rte_event_dev_config *dev_conf,
107 struct rte_event_dev_info *info))
108 {
109 devconf_set_default_sane_values(dev_conf, info);
110 fn(dev_conf, info);
111 return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
112 }
113
114 static void
max_dequeue_limit(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)115 max_dequeue_limit(struct rte_event_dev_config *dev_conf,
116 struct rte_event_dev_info *info)
117 {
118 dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1;
119 }
120
121 static void
max_events_limit(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)122 max_events_limit(struct rte_event_dev_config *dev_conf,
123 struct rte_event_dev_info *info)
124 {
125 dev_conf->nb_events_limit = info->max_num_events + 1;
126 }
127
128 static void
max_event_ports(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)129 max_event_ports(struct rte_event_dev_config *dev_conf,
130 struct rte_event_dev_info *info)
131 {
132 dev_conf->nb_event_ports = info->max_event_ports + 1;
133 }
134
135 static void
max_event_queues(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)136 max_event_queues(struct rte_event_dev_config *dev_conf,
137 struct rte_event_dev_info *info)
138 {
139 dev_conf->nb_event_queues = info->max_event_queues + 1;
140 }
141
142 static void
max_event_queue_flows(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)143 max_event_queue_flows(struct rte_event_dev_config *dev_conf,
144 struct rte_event_dev_info *info)
145 {
146 dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1;
147 }
148
149 static void
max_event_port_dequeue_depth(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)150 max_event_port_dequeue_depth(struct rte_event_dev_config *dev_conf,
151 struct rte_event_dev_info *info)
152 {
153 dev_conf->nb_event_port_dequeue_depth =
154 info->max_event_port_dequeue_depth + 1;
155 }
156
157 static void
max_event_port_enqueue_depth(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)158 max_event_port_enqueue_depth(struct rte_event_dev_config *dev_conf,
159 struct rte_event_dev_info *info)
160 {
161 dev_conf->nb_event_port_enqueue_depth =
162 info->max_event_port_enqueue_depth + 1;
163 }
164
165
166 static int
test_eventdev_configure(void)167 test_eventdev_configure(void)
168 {
169 int ret;
170 struct rte_event_dev_config dev_conf;
171 struct rte_event_dev_info info;
172 ret = rte_event_dev_configure(TEST_DEV_ID, NULL);
173 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
174
175 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
176 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
177
178 /* Check limits */
179 TEST_ASSERT_EQUAL(-EINVAL,
180 test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
181 "Config negative test failed");
182 TEST_ASSERT_EQUAL(-EINVAL,
183 test_ethdev_config_run(&dev_conf, &info, max_events_limit),
184 "Config negative test failed");
185 TEST_ASSERT_EQUAL(-EINVAL,
186 test_ethdev_config_run(&dev_conf, &info, max_event_ports),
187 "Config negative test failed");
188 TEST_ASSERT_EQUAL(-EINVAL,
189 test_ethdev_config_run(&dev_conf, &info, max_event_queues),
190 "Config negative test failed");
191 TEST_ASSERT_EQUAL(-EINVAL,
192 test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
193 "Config negative test failed");
194
195 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) {
196 TEST_ASSERT_EQUAL(-EINVAL,
197 test_ethdev_config_run(&dev_conf, &info,
198 max_event_port_dequeue_depth),
199 "Config negative test failed");
200 TEST_ASSERT_EQUAL(-EINVAL,
201 test_ethdev_config_run(&dev_conf, &info,
202 max_event_port_enqueue_depth),
203 "Config negative test failed");
204 }
205
206 /* Positive case */
207 devconf_set_default_sane_values(&dev_conf, &info);
208 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
209 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
210
211 /* re-configure */
212 devconf_set_default_sane_values(&dev_conf, &info);
213 dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1);
214 dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1);
215 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
216 TEST_ASSERT_SUCCESS(ret, "Failed to re configure eventdev");
217
218 /* re-configure back to max_event_queues and max_event_ports */
219 devconf_set_default_sane_values(&dev_conf, &info);
220 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
221 TEST_ASSERT_SUCCESS(ret, "Failed to re-configure eventdev");
222
223 return TEST_SUCCESS;
224
225 }
226
227 static int
eventdev_configure_setup(void)228 eventdev_configure_setup(void)
229 {
230 int ret;
231 struct rte_event_dev_config dev_conf;
232 struct rte_event_dev_info info;
233
234 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
235 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
236 devconf_set_default_sane_values(&dev_conf, &info);
237 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
238 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
239
240 return TEST_SUCCESS;
241 }
242
243 static int
test_eventdev_queue_default_conf_get(void)244 test_eventdev_queue_default_conf_get(void)
245 {
246 int i, ret;
247 struct rte_event_queue_conf qconf;
248
249 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
250 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
251
252 uint32_t queue_count;
253 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
254 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
255 "Queue count get failed");
256
257 for (i = 0; i < (int)queue_count; i++) {
258 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
259 &qconf);
260 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
261 }
262
263 return TEST_SUCCESS;
264 }
265
266 static int
test_eventdev_queue_setup(void)267 test_eventdev_queue_setup(void)
268 {
269 int i, ret;
270 struct rte_event_dev_info info;
271 struct rte_event_queue_conf qconf;
272
273 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
274 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
275
276 /* Negative cases */
277 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
278 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
279 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
280 qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
281 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
282 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
283
284 qconf.nb_atomic_flows = info.max_event_queue_flows;
285 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
286 qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
287 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
288 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
289
290 ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues,
291 &qconf);
292 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
293
294 /* Positive case */
295 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
296 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
297 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
298 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
299
300 uint32_t queue_count;
301 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
302 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
303 "Queue count get failed");
304
305 for (i = 0; i < (int)queue_count; i++) {
306 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
307 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
308 }
309
310 return TEST_SUCCESS;
311 }
312
313 static int
test_eventdev_queue_count(void)314 test_eventdev_queue_count(void)
315 {
316 int ret;
317 struct rte_event_dev_info info;
318
319 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
320 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
321
322 uint32_t queue_count;
323 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
324 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
325 "Queue count get failed");
326 TEST_ASSERT_EQUAL(queue_count, info.max_event_queues,
327 "Wrong queue count");
328
329 return TEST_SUCCESS;
330 }
331
332 static int
test_eventdev_queue_attr_priority(void)333 test_eventdev_queue_attr_priority(void)
334 {
335 int i, ret;
336 struct rte_event_dev_info info;
337 struct rte_event_queue_conf qconf;
338 uint8_t priority;
339
340 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
341 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
342
343 uint32_t queue_count;
344 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
345 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
346 "Queue count get failed");
347
348 for (i = 0; i < (int)queue_count; i++) {
349 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
350 &qconf);
351 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
352 qconf.priority = i % RTE_EVENT_DEV_PRIORITY_LOWEST;
353 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
354 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
355 }
356
357 for (i = 0; i < (int)queue_count; i++) {
358 uint32_t tmp;
359 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
360 RTE_EVENT_QUEUE_ATTR_PRIORITY, &tmp),
361 "Queue priority get failed");
362 priority = tmp;
363
364 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
365 TEST_ASSERT_EQUAL(priority,
366 i % RTE_EVENT_DEV_PRIORITY_LOWEST,
367 "Wrong priority value for queue%d", i);
368 else
369 TEST_ASSERT_EQUAL(priority,
370 RTE_EVENT_DEV_PRIORITY_NORMAL,
371 "Wrong priority value for queue%d", i);
372 }
373
374 return TEST_SUCCESS;
375 }
376
377 static int
test_eventdev_queue_attr_nb_atomic_flows(void)378 test_eventdev_queue_attr_nb_atomic_flows(void)
379 {
380 int i, ret;
381 struct rte_event_dev_info info;
382 struct rte_event_queue_conf qconf;
383 uint32_t nb_atomic_flows;
384
385 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
386 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
387
388 uint32_t queue_count;
389 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
390 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
391 "Queue count get failed");
392
393 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
394 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
395
396 if (qconf.nb_atomic_flows == 0)
397 /* Assume PMD doesn't support atomic flows, return early */
398 return -ENOTSUP;
399
400 qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
401
402 for (i = 0; i < (int)queue_count; i++) {
403 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
404 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
405 }
406
407 for (i = 0; i < (int)queue_count; i++) {
408 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
409 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS,
410 &nb_atomic_flows),
411 "Queue nb_atomic_flows get failed");
412
413 TEST_ASSERT_EQUAL(nb_atomic_flows, qconf.nb_atomic_flows,
414 "Wrong atomic flows value for queue%d", i);
415 }
416
417 return TEST_SUCCESS;
418 }
419
420 static int
test_eventdev_queue_attr_nb_atomic_order_sequences(void)421 test_eventdev_queue_attr_nb_atomic_order_sequences(void)
422 {
423 int i, ret;
424 struct rte_event_dev_info info;
425 struct rte_event_queue_conf qconf;
426 uint32_t nb_atomic_order_sequences;
427
428 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
429 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
430
431 uint32_t queue_count;
432 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
433 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
434 "Queue count get failed");
435
436 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
437 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
438
439 if (qconf.nb_atomic_order_sequences == 0)
440 /* Assume PMD doesn't support reordering */
441 return -ENOTSUP;
442
443 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
444
445 for (i = 0; i < (int)queue_count; i++) {
446 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
447 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
448 }
449
450 for (i = 0; i < (int)queue_count; i++) {
451 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
452 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES,
453 &nb_atomic_order_sequences),
454 "Queue nb_atomic_order_sequencess get failed");
455
456 TEST_ASSERT_EQUAL(nb_atomic_order_sequences,
457 qconf.nb_atomic_order_sequences,
458 "Wrong atomic order sequences value for queue%d",
459 i);
460 }
461
462 return TEST_SUCCESS;
463 }
464
465 static int
test_eventdev_queue_attr_event_queue_cfg(void)466 test_eventdev_queue_attr_event_queue_cfg(void)
467 {
468 int i, ret;
469 struct rte_event_dev_info info;
470 struct rte_event_queue_conf qconf;
471 uint32_t event_queue_cfg;
472
473 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
474 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
475
476 uint32_t queue_count;
477 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
478 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
479 "Queue count get failed");
480
481 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
482 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
483
484 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
485
486 for (i = 0; i < (int)queue_count; i++) {
487 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
488 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
489 }
490
491 for (i = 0; i < (int)queue_count; i++) {
492 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
493 RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG,
494 &event_queue_cfg),
495 "Queue event_queue_cfg get failed");
496
497 TEST_ASSERT_EQUAL(event_queue_cfg, qconf.event_queue_cfg,
498 "Wrong event_queue_cfg value for queue%d",
499 i);
500 }
501
502 return TEST_SUCCESS;
503 }
504
505 static int
test_eventdev_port_default_conf_get(void)506 test_eventdev_port_default_conf_get(void)
507 {
508 int i, ret;
509 struct rte_event_port_conf pconf;
510
511 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
512 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
513
514 uint32_t port_count;
515 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
516 RTE_EVENT_DEV_ATTR_PORT_COUNT,
517 &port_count), "Port count get failed");
518
519 ret = rte_event_port_default_conf_get(TEST_DEV_ID,
520 port_count + 1, NULL);
521 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
522
523 for (i = 0; i < (int)port_count; i++) {
524 ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
525 &pconf);
526 TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
527 }
528
529 return TEST_SUCCESS;
530 }
531
532 static int
test_eventdev_port_setup(void)533 test_eventdev_port_setup(void)
534 {
535 int i, ret;
536 struct rte_event_dev_info info;
537 struct rte_event_port_conf pconf;
538
539 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
540 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
541
542 /* Negative cases */
543 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
544 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
545 pconf.new_event_threshold = info.max_num_events + 1;
546 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
547 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
548
549 pconf.new_event_threshold = info.max_num_events;
550 pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1;
551 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
552 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
553
554 pconf.dequeue_depth = info.max_event_port_dequeue_depth;
555 pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1;
556 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
557 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
558
559 if (!(info.event_dev_cap &
560 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
561 pconf.enqueue_depth = info.max_event_port_enqueue_depth;
562 pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
563 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
564 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
565 pconf.event_port_cfg = 0;
566 }
567
568 ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
569 &pconf);
570 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
571
572 /* Positive case */
573 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
574 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
575 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
576 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
577
578 uint32_t port_count;
579 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
580 RTE_EVENT_DEV_ATTR_PORT_COUNT,
581 &port_count), "Port count get failed");
582
583 for (i = 0; i < (int)port_count; i++) {
584 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
585 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
586 }
587
588 return TEST_SUCCESS;
589 }
590
591 static int
test_eventdev_port_attr_dequeue_depth(void)592 test_eventdev_port_attr_dequeue_depth(void)
593 {
594 int ret;
595 struct rte_event_dev_info info;
596 struct rte_event_port_conf pconf;
597
598 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
599 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
600
601 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
602 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
603 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
604 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
605
606 uint32_t value;
607 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
608 RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
609 0, "Call to get port dequeue depth failed");
610 TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
611 "Wrong port dequeue depth");
612
613 return TEST_SUCCESS;
614 }
615
616 static int
test_eventdev_port_attr_enqueue_depth(void)617 test_eventdev_port_attr_enqueue_depth(void)
618 {
619 int ret;
620 struct rte_event_dev_info info;
621 struct rte_event_port_conf pconf;
622
623 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
624 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
625
626 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
627 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
628 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
629 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
630
631 uint32_t value;
632 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
633 RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
634 0, "Call to get port enqueue depth failed");
635 TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
636 "Wrong port enqueue depth");
637
638 return TEST_SUCCESS;
639 }
640
641 static int
test_eventdev_port_attr_new_event_threshold(void)642 test_eventdev_port_attr_new_event_threshold(void)
643 {
644 int ret;
645 struct rte_event_dev_info info;
646 struct rte_event_port_conf pconf;
647
648 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
649 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
650
651 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
652 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
653 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
654 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
655
656 uint32_t value;
657 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
658 RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD, &value),
659 0, "Call to get port new event threshold failed");
660 TEST_ASSERT_EQUAL((int32_t) value, pconf.new_event_threshold,
661 "Wrong port new event threshold");
662
663 return TEST_SUCCESS;
664 }
665
666 static int
test_eventdev_port_count(void)667 test_eventdev_port_count(void)
668 {
669 int ret;
670 struct rte_event_dev_info info;
671
672 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
673 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
674
675 uint32_t port_count;
676 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
677 RTE_EVENT_DEV_ATTR_PORT_COUNT,
678 &port_count), "Port count get failed");
679 TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count");
680
681 return TEST_SUCCESS;
682 }
683
684 static int
test_eventdev_timeout_ticks(void)685 test_eventdev_timeout_ticks(void)
686 {
687 int ret;
688 uint64_t timeout_ticks;
689
690 ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
691 if (ret != -ENOTSUP)
692 TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
693
694 return ret;
695 }
696
697
698 static int
test_eventdev_start_stop(void)699 test_eventdev_start_stop(void)
700 {
701 int i, ret;
702
703 ret = eventdev_configure_setup();
704 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
705
706 uint32_t queue_count;
707 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
708 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
709 "Queue count get failed");
710 for (i = 0; i < (int)queue_count; i++) {
711 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
712 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
713 }
714
715 uint32_t port_count;
716 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
717 RTE_EVENT_DEV_ATTR_PORT_COUNT,
718 &port_count), "Port count get failed");
719
720 for (i = 0; i < (int)port_count; i++) {
721 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
722 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
723 }
724
725 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
726 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
727 TEST_DEV_ID);
728
729 ret = rte_event_dev_start(TEST_DEV_ID);
730 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
731
732 rte_event_dev_stop(TEST_DEV_ID);
733 return TEST_SUCCESS;
734 }
735
736
737 static int
eventdev_setup_device(void)738 eventdev_setup_device(void)
739 {
740 int i, ret;
741
742 ret = eventdev_configure_setup();
743 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
744
745 uint32_t queue_count;
746 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
747 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
748 "Queue count get failed");
749 for (i = 0; i < (int)queue_count; i++) {
750 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
751 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
752 }
753
754 uint32_t port_count;
755 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
756 RTE_EVENT_DEV_ATTR_PORT_COUNT,
757 &port_count), "Port count get failed");
758
759 for (i = 0; i < (int)port_count; i++) {
760 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
761 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
762 }
763
764 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
765 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
766 TEST_DEV_ID);
767
768 ret = rte_event_dev_start(TEST_DEV_ID);
769 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
770
771 return TEST_SUCCESS;
772 }
773
774 static void
eventdev_stop_device(void)775 eventdev_stop_device(void)
776 {
777 rte_event_dev_stop(TEST_DEV_ID);
778 }
779
780 static int
test_eventdev_link(void)781 test_eventdev_link(void)
782 {
783 int ret, nb_queues, i;
784 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
785 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
786
787 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
788 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
789 TEST_DEV_ID);
790
791 uint32_t queue_count;
792 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
793 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
794 "Queue count get failed");
795 nb_queues = queue_count;
796 for (i = 0; i < nb_queues; i++) {
797 queues[i] = i;
798 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
799 }
800
801 ret = rte_event_port_link(TEST_DEV_ID, 0, queues,
802 priorities, nb_queues);
803 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
804 TEST_DEV_ID, ret);
805 return TEST_SUCCESS;
806 }
807
808 static int
test_eventdev_unlink(void)809 test_eventdev_unlink(void)
810 {
811 int ret, nb_queues, i;
812 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
813
814 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
815 TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
816 TEST_DEV_ID);
817
818 uint32_t queue_count;
819 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
820 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
821 "Queue count get failed");
822 nb_queues = queue_count;
823 for (i = 0; i < nb_queues; i++)
824 queues[i] = i;
825
826 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
827 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
828 TEST_DEV_ID);
829
830 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
831 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
832 TEST_DEV_ID, ret);
833 return TEST_SUCCESS;
834 }
835
836 static int
test_eventdev_link_get(void)837 test_eventdev_link_get(void)
838 {
839 int ret, i;
840 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
841 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
842
843 /* link all queues */
844 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
845 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
846 TEST_DEV_ID);
847
848 uint32_t queue_count;
849 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
850 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
851 "Queue count get failed");
852 const int nb_queues = queue_count;
853 for (i = 0; i < nb_queues; i++)
854 queues[i] = i;
855
856 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
857 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
858 TEST_DEV_ID, ret);
859
860 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
861 TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
862
863 /* link all queues and get the links */
864 for (i = 0; i < nb_queues; i++) {
865 queues[i] = i;
866 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
867 }
868 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
869 nb_queues);
870 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
871 TEST_DEV_ID, ret);
872 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
873 TEST_ASSERT(ret == nb_queues, "(%d)Wrong link get ret=%d expected=%d",
874 TEST_DEV_ID, ret, nb_queues);
875 /* unlink all*/
876 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
877 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
878 TEST_DEV_ID, ret);
879 /* link just one queue */
880 queues[0] = 0;
881 priorities[0] = RTE_EVENT_DEV_PRIORITY_NORMAL;
882
883 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities, 1);
884 TEST_ASSERT(ret == 1, "Failed to link(device%d) ret=%d",
885 TEST_DEV_ID, ret);
886 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
887 TEST_ASSERT(ret == 1, "(%d)Wrong link get ret=%d expected=%d",
888 TEST_DEV_ID, ret, 1);
889 /* unlink the queue */
890 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
891 TEST_ASSERT(ret == 1, "Failed to unlink(device%d) ret=%d",
892 TEST_DEV_ID, ret);
893
894 /* 4links and 2 unlinks */
895 if (nb_queues >= 4) {
896 for (i = 0; i < 4; i++) {
897 queues[i] = i;
898 priorities[i] = 0x40;
899 }
900 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
901 4);
902 TEST_ASSERT(ret == 4, "Failed to link(device%d) ret=%d",
903 TEST_DEV_ID, ret);
904
905 for (i = 0; i < 2; i++)
906 queues[i] = i;
907
908 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, 2);
909 TEST_ASSERT(ret == 2, "Failed to unlink(device%d) ret=%d",
910 TEST_DEV_ID, ret);
911 ret = rte_event_port_links_get(TEST_DEV_ID, 0,
912 queues, priorities);
913 TEST_ASSERT(ret == 2, "(%d)Wrong link get ret=%d expected=%d",
914 TEST_DEV_ID, ret, 2);
915 TEST_ASSERT(queues[0] == 2, "ret=%d expected=%d", ret, 2);
916 TEST_ASSERT(priorities[0] == 0x40, "ret=%d expected=%d",
917 ret, 0x40);
918 TEST_ASSERT(queues[1] == 3, "ret=%d expected=%d", ret, 3);
919 TEST_ASSERT(priorities[1] == 0x40, "ret=%d expected=%d",
920 ret, 0x40);
921 }
922
923 return TEST_SUCCESS;
924 }
925
926 static int
test_eventdev_close(void)927 test_eventdev_close(void)
928 {
929 rte_event_dev_stop(TEST_DEV_ID);
930 return rte_event_dev_close(TEST_DEV_ID);
931 }
932
933 static struct unit_test_suite eventdev_common_testsuite = {
934 .suite_name = "eventdev common code unit test suite",
935 .setup = testsuite_setup,
936 .teardown = testsuite_teardown,
937 .unit_test_cases = {
938 TEST_CASE_ST(NULL, NULL,
939 test_eventdev_count),
940 TEST_CASE_ST(NULL, NULL,
941 test_eventdev_get_dev_id),
942 TEST_CASE_ST(NULL, NULL,
943 test_eventdev_socket_id),
944 TEST_CASE_ST(NULL, NULL,
945 test_eventdev_info_get),
946 TEST_CASE_ST(NULL, NULL,
947 test_eventdev_configure),
948 TEST_CASE_ST(eventdev_configure_setup, NULL,
949 test_eventdev_queue_default_conf_get),
950 TEST_CASE_ST(eventdev_configure_setup, NULL,
951 test_eventdev_queue_setup),
952 TEST_CASE_ST(eventdev_configure_setup, NULL,
953 test_eventdev_queue_count),
954 TEST_CASE_ST(eventdev_configure_setup, NULL,
955 test_eventdev_queue_attr_priority),
956 TEST_CASE_ST(eventdev_configure_setup, NULL,
957 test_eventdev_queue_attr_nb_atomic_flows),
958 TEST_CASE_ST(eventdev_configure_setup, NULL,
959 test_eventdev_queue_attr_nb_atomic_order_sequences),
960 TEST_CASE_ST(eventdev_configure_setup, NULL,
961 test_eventdev_queue_attr_event_queue_cfg),
962 TEST_CASE_ST(eventdev_configure_setup, NULL,
963 test_eventdev_port_default_conf_get),
964 TEST_CASE_ST(eventdev_configure_setup, NULL,
965 test_eventdev_port_setup),
966 TEST_CASE_ST(eventdev_configure_setup, NULL,
967 test_eventdev_port_attr_dequeue_depth),
968 TEST_CASE_ST(eventdev_configure_setup, NULL,
969 test_eventdev_port_attr_enqueue_depth),
970 TEST_CASE_ST(eventdev_configure_setup, NULL,
971 test_eventdev_port_attr_new_event_threshold),
972 TEST_CASE_ST(eventdev_configure_setup, NULL,
973 test_eventdev_port_count),
974 TEST_CASE_ST(eventdev_configure_setup, NULL,
975 test_eventdev_timeout_ticks),
976 TEST_CASE_ST(NULL, NULL,
977 test_eventdev_start_stop),
978 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
979 test_eventdev_link),
980 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
981 test_eventdev_unlink),
982 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
983 test_eventdev_link_get),
984 TEST_CASE_ST(eventdev_setup_device, NULL,
985 test_eventdev_close),
986 TEST_CASES_END() /**< NULL terminate unit test array */
987 }
988 };
989
990 static int
test_eventdev_common(void)991 test_eventdev_common(void)
992 {
993 return unit_test_suite_runner(&eventdev_common_testsuite);
994 }
995
996 static int
test_eventdev_selftest_impl(const char * pmd,const char * opts)997 test_eventdev_selftest_impl(const char *pmd, const char *opts)
998 {
999 int ret = 0;
1000
1001 if (rte_event_dev_get_dev_id(pmd) == -ENODEV)
1002 ret = rte_vdev_init(pmd, opts);
1003 if (ret)
1004 return TEST_SKIPPED;
1005
1006 return rte_event_dev_selftest(rte_event_dev_get_dev_id(pmd));
1007 }
1008
1009 static int
test_eventdev_selftest_sw(void)1010 test_eventdev_selftest_sw(void)
1011 {
1012 return test_eventdev_selftest_impl("event_sw", "");
1013 }
1014
1015 static int
test_eventdev_selftest_octeontx(void)1016 test_eventdev_selftest_octeontx(void)
1017 {
1018 return test_eventdev_selftest_impl("event_octeontx", "");
1019 }
1020
1021 static int
test_eventdev_selftest_octeontx2(void)1022 test_eventdev_selftest_octeontx2(void)
1023 {
1024 return test_eventdev_selftest_impl("event_octeontx2", "");
1025 }
1026
1027 static int
test_eventdev_selftest_dpaa2(void)1028 test_eventdev_selftest_dpaa2(void)
1029 {
1030 return test_eventdev_selftest_impl("event_dpaa2", "");
1031 }
1032
1033 static int
test_eventdev_selftest_dlb(void)1034 test_eventdev_selftest_dlb(void)
1035 {
1036 return test_eventdev_selftest_impl("dlb_event", "");
1037 }
1038
1039 static int
test_eventdev_selftest_dlb2(void)1040 test_eventdev_selftest_dlb2(void)
1041 {
1042 return test_eventdev_selftest_impl("dlb2_event", "");
1043 }
1044
1045 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
1046 REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
1047 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
1048 test_eventdev_selftest_octeontx);
1049 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx2,
1050 test_eventdev_selftest_octeontx2);
1051 REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
1052 REGISTER_TEST_COMMAND(eventdev_selftest_dlb, test_eventdev_selftest_dlb);
1053 REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2);
1054