1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
3 */
4
5 #include "test.h"
6
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_mbuf.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12
13 #ifdef RTE_EXEC_ENV_WINDOWS
14 static int
test_eventdev_common(void)15 test_eventdev_common(void)
16 {
17 printf("eventdev_common not supported on Windows, skipping test\n");
18 return TEST_SKIPPED;
19 }
20
21 #else
22
23 #include <rte_eventdev.h>
24 #include <rte_dev.h>
25 #include <rte_bus_vdev.h>
26
27 #define TEST_DEV_ID 0
28
29 static int
testsuite_setup(void)30 testsuite_setup(void)
31 {
32 RTE_BUILD_BUG_ON(sizeof(struct rte_event) != 16);
33 uint8_t count;
34 count = rte_event_dev_count();
35 if (!count) {
36 printf("Failed to find a valid event device,"
37 " testing with event_skeleton device\n");
38 return rte_vdev_init("event_skeleton", NULL);
39 }
40 return TEST_SUCCESS;
41 }
42
43 static void
testsuite_teardown(void)44 testsuite_teardown(void)
45 {
46 }
47
48 static int
test_eventdev_count(void)49 test_eventdev_count(void)
50 {
51 uint8_t count;
52 count = rte_event_dev_count();
53 TEST_ASSERT(count > 0, "Invalid eventdev count %" PRIu8, count);
54 return TEST_SUCCESS;
55 }
56
57 static int
test_eventdev_get_dev_id(void)58 test_eventdev_get_dev_id(void)
59 {
60 int ret;
61 ret = rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
62 TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d", ret);
63 return TEST_SUCCESS;
64 }
65
66 static int
test_eventdev_socket_id(void)67 test_eventdev_socket_id(void)
68 {
69 int socket_id;
70 socket_id = rte_event_dev_socket_id(TEST_DEV_ID);
71 TEST_ASSERT(socket_id != -EINVAL, "Failed to get socket_id %d",
72 socket_id);
73 socket_id = rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS);
74 TEST_ASSERT(socket_id == -EINVAL, "Expected -EINVAL %d", socket_id);
75
76 return TEST_SUCCESS;
77 }
78
79 static int
test_eventdev_info_get(void)80 test_eventdev_info_get(void)
81 {
82 int ret;
83 struct rte_event_dev_info info;
84 ret = rte_event_dev_info_get(TEST_DEV_ID, NULL);
85 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
86 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
87 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
88 TEST_ASSERT(info.max_event_ports > 0,
89 "Not enough event ports %d", info.max_event_ports);
90 TEST_ASSERT(info.max_event_queues > 0,
91 "Not enough event queues %d", info.max_event_queues);
92 return TEST_SUCCESS;
93 }
94
95 static inline void
devconf_set_default_sane_values(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)96 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
97 struct rte_event_dev_info *info)
98 {
99 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
100 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
101 dev_conf->nb_event_ports = info->max_event_ports;
102 dev_conf->nb_event_queues = info->max_event_queues;
103 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
104 dev_conf->nb_event_port_dequeue_depth =
105 info->max_event_port_dequeue_depth;
106 dev_conf->nb_event_port_enqueue_depth =
107 info->max_event_port_enqueue_depth;
108 dev_conf->nb_event_port_enqueue_depth =
109 info->max_event_port_enqueue_depth;
110 dev_conf->nb_events_limit =
111 info->max_num_events;
112 }
113
114 static int
test_ethdev_config_run(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info,void (* fn)(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info))115 test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
116 struct rte_event_dev_info *info,
117 void (*fn)(struct rte_event_dev_config *dev_conf,
118 struct rte_event_dev_info *info))
119 {
120 devconf_set_default_sane_values(dev_conf, info);
121 fn(dev_conf, info);
122 return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
123 }
124
125 static void
max_dequeue_limit(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)126 max_dequeue_limit(struct rte_event_dev_config *dev_conf,
127 struct rte_event_dev_info *info)
128 {
129 dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1;
130 }
131
132 static void
max_events_limit(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)133 max_events_limit(struct rte_event_dev_config *dev_conf,
134 struct rte_event_dev_info *info)
135 {
136 dev_conf->nb_events_limit = info->max_num_events + 1;
137 }
138
139 static void
max_event_ports(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)140 max_event_ports(struct rte_event_dev_config *dev_conf,
141 struct rte_event_dev_info *info)
142 {
143 dev_conf->nb_event_ports = info->max_event_ports + 1;
144 }
145
146 static void
max_event_queues(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)147 max_event_queues(struct rte_event_dev_config *dev_conf,
148 struct rte_event_dev_info *info)
149 {
150 dev_conf->nb_event_queues = info->max_event_queues + 1;
151 }
152
153 static void
max_event_queue_flows(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)154 max_event_queue_flows(struct rte_event_dev_config *dev_conf,
155 struct rte_event_dev_info *info)
156 {
157 dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1;
158 }
159
160 static void
max_event_port_dequeue_depth(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)161 max_event_port_dequeue_depth(struct rte_event_dev_config *dev_conf,
162 struct rte_event_dev_info *info)
163 {
164 dev_conf->nb_event_port_dequeue_depth =
165 info->max_event_port_dequeue_depth + 1;
166 }
167
168 static void
max_event_port_enqueue_depth(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)169 max_event_port_enqueue_depth(struct rte_event_dev_config *dev_conf,
170 struct rte_event_dev_info *info)
171 {
172 dev_conf->nb_event_port_enqueue_depth =
173 info->max_event_port_enqueue_depth + 1;
174 }
175
176
177 static int
test_eventdev_configure(void)178 test_eventdev_configure(void)
179 {
180 int ret;
181 struct rte_event_dev_config dev_conf;
182 struct rte_event_dev_info info;
183 ret = rte_event_dev_configure(TEST_DEV_ID, NULL);
184 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
185
186 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
187 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
188
189 /* Check limits */
190 TEST_ASSERT_EQUAL(-EINVAL,
191 test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
192 "Config negative test failed");
193 TEST_ASSERT_EQUAL(-EINVAL,
194 test_ethdev_config_run(&dev_conf, &info, max_events_limit),
195 "Config negative test failed");
196 TEST_ASSERT_EQUAL(-EINVAL,
197 test_ethdev_config_run(&dev_conf, &info, max_event_ports),
198 "Config negative test failed");
199 TEST_ASSERT_EQUAL(-EINVAL,
200 test_ethdev_config_run(&dev_conf, &info, max_event_queues),
201 "Config negative test failed");
202 TEST_ASSERT_EQUAL(-EINVAL,
203 test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
204 "Config negative test failed");
205
206 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) {
207 TEST_ASSERT_EQUAL(-EINVAL,
208 test_ethdev_config_run(&dev_conf, &info,
209 max_event_port_dequeue_depth),
210 "Config negative test failed");
211 TEST_ASSERT_EQUAL(-EINVAL,
212 test_ethdev_config_run(&dev_conf, &info,
213 max_event_port_enqueue_depth),
214 "Config negative test failed");
215 }
216
217 /* Positive case */
218 devconf_set_default_sane_values(&dev_conf, &info);
219 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
220 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
221
222 /* re-configure */
223 devconf_set_default_sane_values(&dev_conf, &info);
224 dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1);
225 dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1);
226 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
227 TEST_ASSERT_SUCCESS(ret, "Failed to re configure eventdev");
228
229 /* re-configure back to max_event_queues and max_event_ports */
230 devconf_set_default_sane_values(&dev_conf, &info);
231 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
232 TEST_ASSERT_SUCCESS(ret, "Failed to re-configure eventdev");
233
234 return TEST_SUCCESS;
235
236 }
237
238 static int
eventdev_configure_setup(void)239 eventdev_configure_setup(void)
240 {
241 int ret;
242 struct rte_event_dev_config dev_conf;
243 struct rte_event_dev_info info;
244
245 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
246 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
247 devconf_set_default_sane_values(&dev_conf, &info);
248 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
249 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
250
251 return TEST_SUCCESS;
252 }
253
254 static int
test_eventdev_queue_default_conf_get(void)255 test_eventdev_queue_default_conf_get(void)
256 {
257 int i, ret;
258 struct rte_event_queue_conf qconf;
259
260 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
261 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
262
263 uint32_t queue_count;
264 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
265 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
266 "Queue count get failed");
267
268 for (i = 0; i < (int)queue_count; i++) {
269 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
270 &qconf);
271 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
272 }
273
274 return TEST_SUCCESS;
275 }
276
277 static int
test_eventdev_queue_setup(void)278 test_eventdev_queue_setup(void)
279 {
280 int i, ret;
281 struct rte_event_dev_info info;
282 struct rte_event_queue_conf qconf;
283
284 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
285 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
286
287 /* Negative cases */
288 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
289 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
290 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
291 qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
292 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
293 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
294
295 qconf.nb_atomic_flows = info.max_event_queue_flows;
296 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
297 qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
298 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
299 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
300
301 ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues,
302 &qconf);
303 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
304
305 /* Positive case */
306 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
307 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
308 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
309 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
310
311 uint32_t queue_count;
312 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
313 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
314 "Queue count get failed");
315
316 for (i = 0; i < (int)queue_count; i++) {
317 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
318 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
319 }
320
321 return TEST_SUCCESS;
322 }
323
324 static int
test_eventdev_queue_count(void)325 test_eventdev_queue_count(void)
326 {
327 int ret;
328 struct rte_event_dev_info info;
329
330 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
331 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
332
333 uint32_t queue_count;
334 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
335 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
336 "Queue count get failed");
337 TEST_ASSERT_EQUAL(queue_count, info.max_event_queues,
338 "Wrong queue count");
339
340 return TEST_SUCCESS;
341 }
342
343 static int
test_eventdev_queue_attr_priority(void)344 test_eventdev_queue_attr_priority(void)
345 {
346 int i, ret;
347 struct rte_event_dev_info info;
348 struct rte_event_queue_conf qconf;
349 uint8_t priority;
350
351 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
352 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
353
354 uint32_t queue_count;
355 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
356 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
357 "Queue count get failed");
358
359 for (i = 0; i < (int)queue_count; i++) {
360 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
361 &qconf);
362 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
363 qconf.priority = i % RTE_EVENT_DEV_PRIORITY_LOWEST;
364 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
365 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
366 }
367
368 for (i = 0; i < (int)queue_count; i++) {
369 uint32_t tmp;
370 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
371 RTE_EVENT_QUEUE_ATTR_PRIORITY, &tmp),
372 "Queue priority get failed");
373 priority = tmp;
374
375 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
376 TEST_ASSERT_EQUAL(priority,
377 i % RTE_EVENT_DEV_PRIORITY_LOWEST,
378 "Wrong priority value for queue%d", i);
379 else
380 TEST_ASSERT_EQUAL(priority,
381 RTE_EVENT_DEV_PRIORITY_NORMAL,
382 "Wrong priority value for queue%d", i);
383 }
384
385 return TEST_SUCCESS;
386 }
387
388 static int
test_eventdev_queue_attr_priority_runtime(void)389 test_eventdev_queue_attr_priority_runtime(void)
390 {
391 uint32_t queue_count, queue_req, prio, deq_cnt;
392 struct rte_event_queue_conf qconf;
393 struct rte_event_port_conf pconf;
394 struct rte_event_dev_info info;
395 struct rte_event event = {
396 .op = RTE_EVENT_OP_NEW,
397 .event_type = RTE_EVENT_TYPE_CPU,
398 .sched_type = RTE_SCHED_TYPE_ATOMIC,
399 .u64 = 0xbadbadba,
400 };
401 int i, ret;
402
403 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
404 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
405
406 if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR))
407 return TEST_SKIPPED;
408
409 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(
410 TEST_DEV_ID, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
411 &queue_count),
412 "Queue count get failed");
413
414 /* Need at least 2 queues to test LOW and HIGH priority. */
415 TEST_ASSERT(queue_count > 1, "Not enough event queues, needed 2");
416 queue_req = 2;
417
418 for (i = 0; i < (int)queue_count; i++) {
419 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i, &qconf);
420 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
421 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
422 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
423 }
424
425 ret = rte_event_queue_attr_set(TEST_DEV_ID, 0,
426 RTE_EVENT_QUEUE_ATTR_PRIORITY,
427 RTE_EVENT_DEV_PRIORITY_LOWEST);
428 if (ret == -ENOTSUP)
429 return TEST_SKIPPED;
430 TEST_ASSERT_SUCCESS(ret, "Queue0 priority set failed");
431
432 ret = rte_event_queue_attr_set(TEST_DEV_ID, 1,
433 RTE_EVENT_QUEUE_ATTR_PRIORITY,
434 RTE_EVENT_DEV_PRIORITY_HIGHEST);
435 if (ret == -ENOTSUP)
436 return TEST_SKIPPED;
437 TEST_ASSERT_SUCCESS(ret, "Queue1 priority set failed");
438
439 /* Setup event port 0 */
440 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
441 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
442 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
443 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
444 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
445 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
446 TEST_DEV_ID);
447
448 ret = rte_event_dev_start(TEST_DEV_ID);
449 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
450
451 for (i = 0; i < (int)queue_req; i++) {
452 event.queue_id = i;
453 while (rte_event_enqueue_burst(TEST_DEV_ID, 0, &event, 1) != 1)
454 rte_pause();
455 }
456
457 prio = RTE_EVENT_DEV_PRIORITY_HIGHEST;
458 deq_cnt = 0;
459 while (deq_cnt < queue_req) {
460 uint32_t queue_prio;
461
462 if (rte_event_dequeue_burst(TEST_DEV_ID, 0, &event, 1, 0) == 0)
463 continue;
464
465 ret = rte_event_queue_attr_get(TEST_DEV_ID, event.queue_id,
466 RTE_EVENT_QUEUE_ATTR_PRIORITY,
467 &queue_prio);
468 if (ret == -ENOTSUP)
469 return TEST_SKIPPED;
470
471 TEST_ASSERT_SUCCESS(ret, "Queue priority get failed");
472 TEST_ASSERT(queue_prio >= prio,
473 "Received event from a lower priority queue first");
474 prio = queue_prio;
475 deq_cnt++;
476 }
477
478 return TEST_SUCCESS;
479 }
480
481 static int
test_eventdev_queue_attr_weight_runtime(void)482 test_eventdev_queue_attr_weight_runtime(void)
483 {
484 struct rte_event_queue_conf qconf;
485 struct rte_event_dev_info info;
486 uint32_t queue_count;
487 int i, ret;
488
489 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
490 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
491
492 if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR))
493 return TEST_SKIPPED;
494
495 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(
496 TEST_DEV_ID, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
497 &queue_count),
498 "Queue count get failed");
499
500 for (i = 0; i < (int)queue_count; i++) {
501 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i, &qconf);
502 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
503 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
504 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
505 }
506
507 for (i = 0; i < (int)queue_count; i++) {
508 uint32_t get_val;
509 uint64_t set_val;
510
511 set_val = i % RTE_EVENT_QUEUE_WEIGHT_HIGHEST;
512 ret = rte_event_queue_attr_set(
513 TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_WEIGHT, set_val);
514 if (ret == -ENOTSUP)
515 return TEST_SKIPPED;
516
517 TEST_ASSERT_SUCCESS(ret, "Queue weight set failed");
518
519 ret = rte_event_queue_attr_get(
520 TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_WEIGHT, &get_val);
521 if (ret == -ENOTSUP)
522 return TEST_SKIPPED;
523
524 TEST_ASSERT_SUCCESS(ret, "Queue weight get failed");
525 TEST_ASSERT_EQUAL(get_val, set_val,
526 "Wrong weight value for queue%d", i);
527 }
528
529 return TEST_SUCCESS;
530 }
531
532 static int
test_eventdev_queue_attr_affinity_runtime(void)533 test_eventdev_queue_attr_affinity_runtime(void)
534 {
535 struct rte_event_queue_conf qconf;
536 struct rte_event_dev_info info;
537 uint32_t queue_count;
538 int i, ret;
539
540 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
541 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
542
543 if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR))
544 return TEST_SKIPPED;
545
546 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(
547 TEST_DEV_ID, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
548 &queue_count),
549 "Queue count get failed");
550
551 for (i = 0; i < (int)queue_count; i++) {
552 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i, &qconf);
553 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
554 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
555 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
556 }
557
558 for (i = 0; i < (int)queue_count; i++) {
559 uint32_t get_val;
560 uint64_t set_val;
561
562 set_val = i % RTE_EVENT_QUEUE_AFFINITY_HIGHEST;
563 ret = rte_event_queue_attr_set(
564 TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_AFFINITY, set_val);
565 if (ret == -ENOTSUP)
566 return TEST_SKIPPED;
567
568 TEST_ASSERT_SUCCESS(ret, "Queue affinity set failed");
569
570 ret = rte_event_queue_attr_get(
571 TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_AFFINITY, &get_val);
572 if (ret == -ENOTSUP)
573 return TEST_SKIPPED;
574
575 TEST_ASSERT_SUCCESS(ret, "Queue affinity get failed");
576 TEST_ASSERT_EQUAL(get_val, set_val,
577 "Wrong affinity value for queue%d", i);
578 }
579
580 return TEST_SUCCESS;
581 }
582
583 static int
test_eventdev_queue_attr_nb_atomic_flows(void)584 test_eventdev_queue_attr_nb_atomic_flows(void)
585 {
586 int i, ret;
587 struct rte_event_dev_info info;
588 struct rte_event_queue_conf qconf;
589 uint32_t nb_atomic_flows;
590
591 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
592 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
593
594 uint32_t queue_count;
595 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
596 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
597 "Queue count get failed");
598
599 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
600 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
601
602 if (qconf.nb_atomic_flows == 0)
603 /* Assume PMD doesn't support atomic flows, return early */
604 return -ENOTSUP;
605
606 qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
607
608 for (i = 0; i < (int)queue_count; i++) {
609 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
610 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
611 }
612
613 for (i = 0; i < (int)queue_count; i++) {
614 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
615 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS,
616 &nb_atomic_flows),
617 "Queue nb_atomic_flows get failed");
618
619 TEST_ASSERT_EQUAL(nb_atomic_flows, qconf.nb_atomic_flows,
620 "Wrong atomic flows value for queue%d", i);
621 }
622
623 return TEST_SUCCESS;
624 }
625
626 static int
test_eventdev_queue_attr_nb_atomic_order_sequences(void)627 test_eventdev_queue_attr_nb_atomic_order_sequences(void)
628 {
629 int i, ret;
630 struct rte_event_dev_info info;
631 struct rte_event_queue_conf qconf;
632 uint32_t nb_atomic_order_sequences;
633
634 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
635 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
636
637 uint32_t queue_count;
638 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
639 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
640 "Queue count get failed");
641
642 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
643 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
644
645 if (qconf.nb_atomic_order_sequences == 0)
646 /* Assume PMD doesn't support reordering */
647 return -ENOTSUP;
648
649 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
650
651 for (i = 0; i < (int)queue_count; i++) {
652 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
653 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
654 }
655
656 for (i = 0; i < (int)queue_count; i++) {
657 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
658 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES,
659 &nb_atomic_order_sequences),
660 "Queue nb_atomic_order_sequencess get failed");
661
662 TEST_ASSERT_EQUAL(nb_atomic_order_sequences,
663 qconf.nb_atomic_order_sequences,
664 "Wrong atomic order sequences value for queue%d",
665 i);
666 }
667
668 return TEST_SUCCESS;
669 }
670
671 static int
test_eventdev_queue_attr_event_queue_cfg(void)672 test_eventdev_queue_attr_event_queue_cfg(void)
673 {
674 int i, ret;
675 struct rte_event_dev_info info;
676 struct rte_event_queue_conf qconf;
677 uint32_t event_queue_cfg;
678
679 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
680 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
681
682 uint32_t queue_count;
683 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
684 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
685 "Queue count get failed");
686
687 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
688 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
689
690 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
691
692 for (i = 0; i < (int)queue_count; i++) {
693 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
694 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
695 }
696
697 for (i = 0; i < (int)queue_count; i++) {
698 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
699 RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG,
700 &event_queue_cfg),
701 "Queue event_queue_cfg get failed");
702
703 TEST_ASSERT_EQUAL(event_queue_cfg, qconf.event_queue_cfg,
704 "Wrong event_queue_cfg value for queue%d",
705 i);
706 }
707
708 return TEST_SUCCESS;
709 }
710
711 static int
test_eventdev_port_default_conf_get(void)712 test_eventdev_port_default_conf_get(void)
713 {
714 int i, ret;
715 struct rte_event_port_conf pconf;
716
717 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
718 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
719
720 uint32_t port_count;
721 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
722 RTE_EVENT_DEV_ATTR_PORT_COUNT,
723 &port_count), "Port count get failed");
724
725 ret = rte_event_port_default_conf_get(TEST_DEV_ID,
726 port_count + 1, NULL);
727 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
728
729 for (i = 0; i < (int)port_count; i++) {
730 ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
731 &pconf);
732 TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
733 }
734
735 return TEST_SUCCESS;
736 }
737
738 static int
test_eventdev_port_setup(void)739 test_eventdev_port_setup(void)
740 {
741 int i, ret;
742 struct rte_event_dev_info info;
743 struct rte_event_port_conf pconf;
744
745 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
746 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
747
748 /* Negative cases */
749 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
750 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
751 pconf.new_event_threshold = info.max_num_events + 1;
752 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
753 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
754
755 pconf.new_event_threshold = info.max_num_events;
756 pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1;
757 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
758 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
759
760 pconf.dequeue_depth = info.max_event_port_dequeue_depth;
761 pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1;
762 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
763 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
764
765 if (!(info.event_dev_cap &
766 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
767 pconf.enqueue_depth = info.max_event_port_enqueue_depth;
768 pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
769 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
770 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
771 pconf.event_port_cfg = 0;
772 }
773
774 ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
775 &pconf);
776 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
777
778 /* Positive case */
779 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
780 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
781 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
782 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
783
784 uint32_t port_count;
785 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
786 RTE_EVENT_DEV_ATTR_PORT_COUNT,
787 &port_count), "Port count get failed");
788
789 for (i = 0; i < (int)port_count; i++) {
790 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
791 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
792 }
793
794 return TEST_SUCCESS;
795 }
796
797 static int
test_eventdev_port_attr_dequeue_depth(void)798 test_eventdev_port_attr_dequeue_depth(void)
799 {
800 int ret;
801 struct rte_event_dev_info info;
802 struct rte_event_port_conf pconf;
803
804 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
805 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
806
807 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
808 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
809 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
810 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
811
812 uint32_t value;
813 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
814 RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
815 0, "Call to get port dequeue depth failed");
816 TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
817 "Wrong port dequeue depth");
818
819 return TEST_SUCCESS;
820 }
821
822 static int
test_eventdev_port_attr_enqueue_depth(void)823 test_eventdev_port_attr_enqueue_depth(void)
824 {
825 int ret;
826 struct rte_event_dev_info info;
827 struct rte_event_port_conf pconf;
828
829 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
830 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
831
832 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
833 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
834 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
835 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
836
837 uint32_t value;
838 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
839 RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
840 0, "Call to get port enqueue depth failed");
841 TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
842 "Wrong port enqueue depth");
843
844 return TEST_SUCCESS;
845 }
846
847 static int
test_eventdev_port_attr_new_event_threshold(void)848 test_eventdev_port_attr_new_event_threshold(void)
849 {
850 int ret;
851 struct rte_event_dev_info info;
852 struct rte_event_port_conf pconf;
853
854 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
855 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
856
857 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
858 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
859 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
860 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
861
862 uint32_t value;
863 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
864 RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD, &value),
865 0, "Call to get port new event threshold failed");
866 TEST_ASSERT_EQUAL((int32_t) value, pconf.new_event_threshold,
867 "Wrong port new event threshold");
868
869 return TEST_SUCCESS;
870 }
871
872 static int
test_eventdev_port_count(void)873 test_eventdev_port_count(void)
874 {
875 int ret;
876 struct rte_event_dev_info info;
877
878 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
879 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
880
881 uint32_t port_count;
882 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
883 RTE_EVENT_DEV_ATTR_PORT_COUNT,
884 &port_count), "Port count get failed");
885 TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count");
886
887 return TEST_SUCCESS;
888 }
889
890 static int
test_eventdev_timeout_ticks(void)891 test_eventdev_timeout_ticks(void)
892 {
893 int ret;
894 uint64_t timeout_ticks;
895
896 ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
897 if (ret != -ENOTSUP)
898 TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
899
900 return ret;
901 }
902
903
904 static int
test_eventdev_start_stop(void)905 test_eventdev_start_stop(void)
906 {
907 int i, ret;
908
909 ret = eventdev_configure_setup();
910 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
911
912 uint32_t queue_count;
913 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
914 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
915 "Queue count get failed");
916 for (i = 0; i < (int)queue_count; i++) {
917 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
918 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
919 }
920
921 uint32_t port_count;
922 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
923 RTE_EVENT_DEV_ATTR_PORT_COUNT,
924 &port_count), "Port count get failed");
925
926 for (i = 0; i < (int)port_count; i++) {
927 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
928 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
929 }
930
931 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
932 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
933 TEST_DEV_ID);
934
935 ret = rte_event_dev_start(TEST_DEV_ID);
936 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
937
938 rte_event_dev_stop(TEST_DEV_ID);
939 return TEST_SUCCESS;
940 }
941
942
943 static int
eventdev_setup_device(void)944 eventdev_setup_device(void)
945 {
946 int i, ret;
947
948 ret = eventdev_configure_setup();
949 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
950
951 uint32_t queue_count;
952 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
953 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
954 "Queue count get failed");
955 for (i = 0; i < (int)queue_count; i++) {
956 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
957 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
958 }
959
960 uint32_t port_count;
961 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
962 RTE_EVENT_DEV_ATTR_PORT_COUNT,
963 &port_count), "Port count get failed");
964
965 for (i = 0; i < (int)port_count; i++) {
966 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
967 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
968 }
969
970 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
971 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
972 TEST_DEV_ID);
973
974 ret = rte_event_dev_start(TEST_DEV_ID);
975 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
976
977 return TEST_SUCCESS;
978 }
979
980 static void
eventdev_stop_device(void)981 eventdev_stop_device(void)
982 {
983 rte_event_dev_stop(TEST_DEV_ID);
984 }
985
986 static int
test_eventdev_link(void)987 test_eventdev_link(void)
988 {
989 int ret, nb_queues, i;
990 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
991 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
992
993 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
994 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
995 TEST_DEV_ID);
996
997 uint32_t queue_count;
998 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
999 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1000 "Queue count get failed");
1001 nb_queues = queue_count;
1002 for (i = 0; i < nb_queues; i++) {
1003 queues[i] = i;
1004 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
1005 }
1006
1007 ret = rte_event_port_link(TEST_DEV_ID, 0, queues,
1008 priorities, nb_queues);
1009 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
1010 TEST_DEV_ID, ret);
1011 return TEST_SUCCESS;
1012 }
1013
1014 static int
test_eventdev_unlink(void)1015 test_eventdev_unlink(void)
1016 {
1017 int ret, nb_queues, i;
1018 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1019
1020 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
1021 TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
1022 TEST_DEV_ID);
1023
1024 uint32_t queue_count;
1025 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
1026 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1027 "Queue count get failed");
1028 nb_queues = queue_count;
1029 for (i = 0; i < nb_queues; i++)
1030 queues[i] = i;
1031
1032 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
1033 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
1034 TEST_DEV_ID);
1035
1036 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
1037 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
1038 TEST_DEV_ID, ret);
1039 return TEST_SUCCESS;
1040 }
1041
1042 static int
test_eventdev_link_get(void)1043 test_eventdev_link_get(void)
1044 {
1045 int ret, i;
1046 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1047 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1048
1049 /* link all queues */
1050 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
1051 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
1052 TEST_DEV_ID);
1053
1054 uint32_t queue_count;
1055 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
1056 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1057 "Queue count get failed");
1058 const int nb_queues = queue_count;
1059 for (i = 0; i < nb_queues; i++)
1060 queues[i] = i;
1061
1062 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
1063 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
1064 TEST_DEV_ID, ret);
1065
1066 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
1067 TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
1068
1069 /* link all queues and get the links */
1070 for (i = 0; i < nb_queues; i++) {
1071 queues[i] = i;
1072 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
1073 }
1074 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
1075 nb_queues);
1076 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
1077 TEST_DEV_ID, ret);
1078 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
1079 TEST_ASSERT(ret == nb_queues, "(%d)Wrong link get ret=%d expected=%d",
1080 TEST_DEV_ID, ret, nb_queues);
1081 /* unlink all*/
1082 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
1083 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
1084 TEST_DEV_ID, ret);
1085 /* link just one queue */
1086 queues[0] = 0;
1087 priorities[0] = RTE_EVENT_DEV_PRIORITY_NORMAL;
1088
1089 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities, 1);
1090 TEST_ASSERT(ret == 1, "Failed to link(device%d) ret=%d",
1091 TEST_DEV_ID, ret);
1092 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
1093 TEST_ASSERT(ret == 1, "(%d)Wrong link get ret=%d expected=%d",
1094 TEST_DEV_ID, ret, 1);
1095 /* unlink the queue */
1096 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
1097 TEST_ASSERT(ret == 1, "Failed to unlink(device%d) ret=%d",
1098 TEST_DEV_ID, ret);
1099
1100 /* 4links and 2 unlinks */
1101 if (nb_queues >= 4) {
1102 for (i = 0; i < 4; i++) {
1103 queues[i] = i;
1104 priorities[i] = 0x40;
1105 }
1106 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
1107 4);
1108 TEST_ASSERT(ret == 4, "Failed to link(device%d) ret=%d",
1109 TEST_DEV_ID, ret);
1110
1111 for (i = 0; i < 2; i++)
1112 queues[i] = i;
1113
1114 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, 2);
1115 TEST_ASSERT(ret == 2, "Failed to unlink(device%d) ret=%d",
1116 TEST_DEV_ID, ret);
1117 ret = rte_event_port_links_get(TEST_DEV_ID, 0,
1118 queues, priorities);
1119 TEST_ASSERT(ret == 2, "(%d)Wrong link get ret=%d expected=%d",
1120 TEST_DEV_ID, ret, 2);
1121 TEST_ASSERT(queues[0] == 2, "ret=%d expected=%d", ret, 2);
1122 TEST_ASSERT(priorities[0] == 0x40, "ret=%d expected=%d",
1123 ret, 0x40);
1124 TEST_ASSERT(queues[1] == 3, "ret=%d expected=%d", ret, 3);
1125 TEST_ASSERT(priorities[1] == 0x40, "ret=%d expected=%d",
1126 ret, 0x40);
1127 }
1128
1129 return TEST_SUCCESS;
1130 }
1131
1132 static int
test_eventdev_close(void)1133 test_eventdev_close(void)
1134 {
1135 rte_event_dev_stop(TEST_DEV_ID);
1136 return rte_event_dev_close(TEST_DEV_ID);
1137 }
1138
1139 static struct unit_test_suite eventdev_common_testsuite = {
1140 .suite_name = "eventdev common code unit test suite",
1141 .setup = testsuite_setup,
1142 .teardown = testsuite_teardown,
1143 .unit_test_cases = {
1144 TEST_CASE_ST(NULL, NULL,
1145 test_eventdev_count),
1146 TEST_CASE_ST(NULL, NULL,
1147 test_eventdev_get_dev_id),
1148 TEST_CASE_ST(NULL, NULL,
1149 test_eventdev_socket_id),
1150 TEST_CASE_ST(NULL, NULL,
1151 test_eventdev_info_get),
1152 TEST_CASE_ST(NULL, NULL,
1153 test_eventdev_configure),
1154 TEST_CASE_ST(eventdev_configure_setup, NULL,
1155 test_eventdev_queue_default_conf_get),
1156 TEST_CASE_ST(eventdev_configure_setup, NULL,
1157 test_eventdev_queue_setup),
1158 TEST_CASE_ST(eventdev_configure_setup, NULL,
1159 test_eventdev_queue_count),
1160 TEST_CASE_ST(eventdev_configure_setup, NULL,
1161 test_eventdev_queue_attr_priority),
1162 TEST_CASE_ST(eventdev_configure_setup, eventdev_stop_device,
1163 test_eventdev_queue_attr_priority_runtime),
1164 TEST_CASE_ST(eventdev_configure_setup, NULL,
1165 test_eventdev_queue_attr_weight_runtime),
1166 TEST_CASE_ST(eventdev_configure_setup, NULL,
1167 test_eventdev_queue_attr_affinity_runtime),
1168 TEST_CASE_ST(eventdev_configure_setup, NULL,
1169 test_eventdev_queue_attr_nb_atomic_flows),
1170 TEST_CASE_ST(eventdev_configure_setup, NULL,
1171 test_eventdev_queue_attr_nb_atomic_order_sequences),
1172 TEST_CASE_ST(eventdev_configure_setup, NULL,
1173 test_eventdev_queue_attr_event_queue_cfg),
1174 TEST_CASE_ST(eventdev_configure_setup, NULL,
1175 test_eventdev_port_default_conf_get),
1176 TEST_CASE_ST(eventdev_configure_setup, NULL,
1177 test_eventdev_port_setup),
1178 TEST_CASE_ST(eventdev_configure_setup, NULL,
1179 test_eventdev_port_attr_dequeue_depth),
1180 TEST_CASE_ST(eventdev_configure_setup, NULL,
1181 test_eventdev_port_attr_enqueue_depth),
1182 TEST_CASE_ST(eventdev_configure_setup, NULL,
1183 test_eventdev_port_attr_new_event_threshold),
1184 TEST_CASE_ST(eventdev_configure_setup, NULL,
1185 test_eventdev_port_count),
1186 TEST_CASE_ST(eventdev_configure_setup, NULL,
1187 test_eventdev_timeout_ticks),
1188 TEST_CASE_ST(NULL, NULL,
1189 test_eventdev_start_stop),
1190 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
1191 test_eventdev_link),
1192 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
1193 test_eventdev_unlink),
1194 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
1195 test_eventdev_link_get),
1196 TEST_CASE_ST(eventdev_setup_device, NULL,
1197 test_eventdev_close),
1198 TEST_CASES_END() /**< NULL terminate unit test array */
1199 }
1200 };
1201
1202 static int
test_eventdev_common(void)1203 test_eventdev_common(void)
1204 {
1205 return unit_test_suite_runner(&eventdev_common_testsuite);
1206 }
1207
1208 static int
test_eventdev_selftest_impl(const char * pmd,const char * opts)1209 test_eventdev_selftest_impl(const char *pmd, const char *opts)
1210 {
1211 int ret = 0;
1212
1213 if (rte_event_dev_get_dev_id(pmd) == -ENODEV)
1214 ret = rte_vdev_init(pmd, opts);
1215 if (ret)
1216 return TEST_SKIPPED;
1217
1218 return rte_event_dev_selftest(rte_event_dev_get_dev_id(pmd));
1219 }
1220
1221 static int
test_eventdev_selftest_sw(void)1222 test_eventdev_selftest_sw(void)
1223 {
1224 return test_eventdev_selftest_impl("event_sw", "");
1225 }
1226
1227 static int
test_eventdev_selftest_octeontx(void)1228 test_eventdev_selftest_octeontx(void)
1229 {
1230 return test_eventdev_selftest_impl("event_octeontx", "");
1231 }
1232
1233 static int
test_eventdev_selftest_dpaa2(void)1234 test_eventdev_selftest_dpaa2(void)
1235 {
1236 return test_eventdev_selftest_impl("event_dpaa2", "");
1237 }
1238
1239 static int
test_eventdev_selftest_dlb2(void)1240 test_eventdev_selftest_dlb2(void)
1241 {
1242 return test_eventdev_selftest_impl("dlb2_event", "");
1243 }
1244
1245 static int
test_eventdev_selftest_cn9k(void)1246 test_eventdev_selftest_cn9k(void)
1247 {
1248 return test_eventdev_selftest_impl("event_cn9k", "");
1249 }
1250
1251 static int
test_eventdev_selftest_cn10k(void)1252 test_eventdev_selftest_cn10k(void)
1253 {
1254 return test_eventdev_selftest_impl("event_cn10k", "");
1255 }
1256
1257 #endif /* !RTE_EXEC_ENV_WINDOWS */
1258
1259 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
1260
1261 #ifndef RTE_EXEC_ENV_WINDOWS
1262 REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
1263 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
1264 test_eventdev_selftest_octeontx);
1265 REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
1266 REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2);
1267 REGISTER_TEST_COMMAND(eventdev_selftest_cn9k, test_eventdev_selftest_cn9k);
1268 REGISTER_TEST_COMMAND(eventdev_selftest_cn10k, test_eventdev_selftest_cn10k);
1269
1270 #endif /* !RTE_EXEC_ENV_WINDOWS */
1271