1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4 #include <string.h>
5 #include <rte_common.h>
6 #include <rte_mempool.h>
7 #include <rte_mbuf.h>
8 #include <rte_ethdev.h>
9 #include <rte_eventdev.h>
10 #include <rte_bus_vdev.h>
11
12 #include <rte_event_eth_rx_adapter.h>
13
14 #include "test.h"
15
16 #define MAX_NUM_RX_QUEUE 64
17 #define NB_MBUFS (8192 * num_ports * MAX_NUM_RX_QUEUE)
18 #define MBUF_CACHE_SIZE 512
19 #define MBUF_PRIV_SIZE 0
20 #define TEST_INST_ID 0
21 #define TEST_DEV_ID 0
22 #define TEST_ETHDEV_ID 0
23
24 struct event_eth_rx_adapter_test_params {
25 struct rte_mempool *mp;
26 uint16_t rx_rings, tx_rings;
27 uint32_t caps;
28 int rx_intr_port_inited;
29 uint16_t rx_intr_port;
30 };
31
32 static struct event_eth_rx_adapter_test_params default_params;
33 static bool event_dev_created;
34 static bool eth_dev_created;
35
36 static inline int
port_init_common(uint16_t port,const struct rte_eth_conf * port_conf,struct rte_mempool * mp)37 port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
38 struct rte_mempool *mp)
39 {
40 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
41 int retval;
42 uint16_t q;
43 struct rte_eth_dev_info dev_info;
44
45 if (!rte_eth_dev_is_valid_port(port))
46 return -1;
47
48 retval = rte_eth_dev_configure(port, 0, 0, port_conf);
49
50 retval = rte_eth_dev_info_get(port, &dev_info);
51 if (retval != 0)
52 return retval;
53
54 default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues,
55 MAX_NUM_RX_QUEUE);
56 default_params.tx_rings = 1;
57
58 /* Configure the Ethernet device. */
59 retval = rte_eth_dev_configure(port, default_params.rx_rings,
60 default_params.tx_rings, port_conf);
61 if (retval != 0)
62 return retval;
63
64 for (q = 0; q < default_params.rx_rings; q++) {
65 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
66 rte_eth_dev_socket_id(port), NULL, mp);
67 if (retval < 0)
68 return retval;
69 }
70
71 /* Allocate and set up 1 TX queue per Ethernet port. */
72 for (q = 0; q < default_params.tx_rings; q++) {
73 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
74 rte_eth_dev_socket_id(port), NULL);
75 if (retval < 0)
76 return retval;
77 }
78
79 /* Start the Ethernet port. */
80 retval = rte_eth_dev_start(port);
81 if (retval < 0)
82 return retval;
83
84 /* Display the port MAC address. */
85 struct rte_ether_addr addr;
86 retval = rte_eth_macaddr_get(port, &addr);
87 if (retval < 0)
88 return retval;
89 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
90 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
91 (unsigned int)port,
92 addr.addr_bytes[0], addr.addr_bytes[1],
93 addr.addr_bytes[2], addr.addr_bytes[3],
94 addr.addr_bytes[4], addr.addr_bytes[5]);
95
96 /* Enable RX in promiscuous mode for the Ethernet device. */
97 retval = rte_eth_promiscuous_enable(port);
98 if (retval != 0)
99 return retval;
100
101 return 0;
102 }
103
104 static inline int
port_init_rx_intr(uint16_t port,struct rte_mempool * mp)105 port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
106 {
107 static const struct rte_eth_conf port_conf_default = {
108 .rxmode = {
109 .mq_mode = ETH_MQ_RX_NONE,
110 },
111 .intr_conf = {
112 .rxq = 1,
113 },
114 };
115
116 return port_init_common(port, &port_conf_default, mp);
117 }
118
119 static inline int
port_init(uint16_t port,struct rte_mempool * mp)120 port_init(uint16_t port, struct rte_mempool *mp)
121 {
122 static const struct rte_eth_conf port_conf_default = {
123 .rxmode = {
124 .mq_mode = ETH_MQ_RX_NONE,
125 },
126 };
127
128 return port_init_common(port, &port_conf_default, mp);
129 }
130
131 static int
init_port_rx_intr(int num_ports)132 init_port_rx_intr(int num_ports)
133 {
134 int retval;
135 uint16_t portid;
136 int err;
137
138 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
139 NB_MBUFS,
140 MBUF_CACHE_SIZE,
141 MBUF_PRIV_SIZE,
142 RTE_MBUF_DEFAULT_BUF_SIZE,
143 rte_socket_id());
144 if (!default_params.mp)
145 return -ENOMEM;
146
147 RTE_ETH_FOREACH_DEV(portid) {
148 retval = port_init_rx_intr(portid, default_params.mp);
149 if (retval)
150 continue;
151 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, portid,
152 &default_params.caps);
153 if (err)
154 continue;
155 if (!(default_params.caps &
156 RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
157 default_params.rx_intr_port_inited = 1;
158 default_params.rx_intr_port = portid;
159 return 0;
160 }
161 retval = rte_eth_dev_stop(portid);
162 TEST_ASSERT(retval == 0, "Failed to stop port %u: %d\n",
163 portid, retval);
164 }
165 return 0;
166 }
167
168 static int
init_ports(int num_ports)169 init_ports(int num_ports)
170 {
171 uint16_t portid;
172 int retval;
173
174 struct rte_mempool *ptr = rte_mempool_lookup("packet_pool");
175
176 if (ptr == NULL)
177 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
178 NB_MBUFS,
179 MBUF_CACHE_SIZE,
180 MBUF_PRIV_SIZE,
181 RTE_MBUF_DEFAULT_BUF_SIZE,
182 rte_socket_id());
183 else
184 default_params.mp = ptr;
185
186 if (!default_params.mp)
187 return -ENOMEM;
188
189 RTE_ETH_FOREACH_DEV(portid) {
190 retval = port_init(portid, default_params.mp);
191 if (retval)
192 return retval;
193 }
194
195 return 0;
196 }
197
198 static int
testsuite_setup(void)199 testsuite_setup(void)
200 {
201 int err;
202 uint8_t count;
203 struct rte_event_dev_info dev_info;
204
205 count = rte_event_dev_count();
206 if (!count) {
207 printf("Failed to find a valid event device,"
208 " testing with event_skeleton device\n");
209 err = rte_vdev_init("event_skeleton", NULL);
210 TEST_ASSERT(err == 0, "Failed to create event_skeleton. err=%d",
211 err);
212 event_dev_created = true;
213 }
214
215 struct rte_event_dev_config config = {
216 .nb_event_queues = 1,
217 .nb_event_ports = 1,
218 };
219
220 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
221 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
222 config.nb_event_port_dequeue_depth =
223 dev_info.max_event_port_dequeue_depth;
224 config.nb_event_port_enqueue_depth =
225 dev_info.max_event_port_enqueue_depth;
226 config.nb_events_limit =
227 dev_info.max_num_events;
228 err = rte_event_dev_configure(TEST_DEV_ID, &config);
229 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
230 err);
231
232 count = rte_eth_dev_count_total();
233 if (!count) {
234 printf("Testing with net_null device\n");
235 err = rte_vdev_init("net_null", NULL);
236 TEST_ASSERT(err == 0, "Failed to create net_null. err=%d",
237 err);
238 eth_dev_created = true;
239 }
240
241 /*
242 * eth devices like octeontx use event device to receive packets
243 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
244 * call init_ports after rte_event_dev_configure
245 */
246 err = init_ports(rte_eth_dev_count_total());
247 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
248
249 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
250 &default_params.caps);
251 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n",
252 err);
253
254 return err;
255 }
256
257 static int
testsuite_setup_rx_intr(void)258 testsuite_setup_rx_intr(void)
259 {
260 int err;
261 uint8_t count;
262 struct rte_event_dev_info dev_info;
263
264 count = rte_event_dev_count();
265 if (!count) {
266 printf("Failed to find a valid event device,"
267 " testing with event_skeleton device\n");
268 err = rte_vdev_init("event_skeleton", NULL);
269 TEST_ASSERT(err == 0, "Failed to create event_skeleton. err=%d",
270 err);
271 event_dev_created = true;
272 }
273
274 struct rte_event_dev_config config = {
275 .nb_event_queues = 1,
276 .nb_event_ports = 1,
277 };
278
279 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
280 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
281 config.nb_event_port_dequeue_depth =
282 dev_info.max_event_port_dequeue_depth;
283 config.nb_event_port_enqueue_depth =
284 dev_info.max_event_port_enqueue_depth;
285 config.nb_events_limit =
286 dev_info.max_num_events;
287
288 err = rte_event_dev_configure(TEST_DEV_ID, &config);
289 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
290 err);
291
292 count = rte_eth_dev_count_total();
293 if (!count) {
294 printf("Testing with net_null device\n");
295 err = rte_vdev_init("net_null", NULL);
296 TEST_ASSERT(err == 0, "Failed to create net_null. err=%d",
297 err);
298 eth_dev_created = true;
299 }
300
301 /*
302 * eth devices like octeontx use event device to receive packets
303 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
304 * call init_ports after rte_event_dev_configure
305 */
306 err = init_port_rx_intr(rte_eth_dev_count_total());
307 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
308
309 if (!default_params.rx_intr_port_inited)
310 return 0;
311
312 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID,
313 default_params.rx_intr_port,
314 &default_params.caps);
315 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
316
317 return err;
318 }
319
320 static void
testsuite_teardown(void)321 testsuite_teardown(void)
322 {
323 int err;
324 uint32_t i;
325 RTE_ETH_FOREACH_DEV(i)
326 rte_eth_dev_stop(i);
327
328 if (eth_dev_created) {
329 err = rte_vdev_uninit("net_null");
330 if (err)
331 printf("Failed to delete net_null. err=%d", err);
332 eth_dev_created = false;
333 }
334
335 rte_mempool_free(default_params.mp);
336 if (event_dev_created) {
337 err = rte_vdev_uninit("event_skeleton");
338 if (err)
339 printf("Failed to delete event_skeleton. err=%d", err);
340 event_dev_created = false;
341 }
342
343 memset(&default_params, 0, sizeof(default_params));
344 }
345
346 static void
testsuite_teardown_rx_intr(void)347 testsuite_teardown_rx_intr(void)
348 {
349 int err;
350 if (!default_params.rx_intr_port_inited)
351 return;
352
353 rte_eth_dev_stop(default_params.rx_intr_port);
354 if (eth_dev_created) {
355 err = rte_vdev_uninit("net_null");
356 if (err)
357 printf("Failed to delete net_null. err=%d", err);
358 eth_dev_created = false;
359 }
360 rte_mempool_free(default_params.mp);
361 if (event_dev_created) {
362 err = rte_vdev_uninit("event_skeleton");
363 if (err)
364 printf("Failed to delete event_skeleton. err=%d", err);
365 event_dev_created = false;
366 }
367
368 memset(&default_params, 0, sizeof(default_params));
369 }
370
371 static int
adapter_create(void)372 adapter_create(void)
373 {
374 int err;
375 struct rte_event_dev_info dev_info;
376 struct rte_event_port_conf rx_p_conf;
377
378 memset(&rx_p_conf, 0, sizeof(rx_p_conf));
379
380 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
381 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
382
383 rx_p_conf.new_event_threshold = dev_info.max_num_events;
384 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
385 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
386 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
387 &rx_p_conf);
388 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
389
390 return err;
391 }
392
393 static void
adapter_free(void)394 adapter_free(void)
395 {
396 rte_event_eth_rx_adapter_free(TEST_INST_ID);
397 }
398
399 static int
adapter_create_free(void)400 adapter_create_free(void)
401 {
402 int err;
403
404 struct rte_event_port_conf rx_p_conf = {
405 .dequeue_depth = 8,
406 .enqueue_depth = 8,
407 .new_event_threshold = 1200,
408 };
409
410 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
411 NULL);
412 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
413
414 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
415 &rx_p_conf);
416 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
417
418 err = rte_event_eth_rx_adapter_create(TEST_INST_ID,
419 TEST_DEV_ID, &rx_p_conf);
420 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
421
422 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
423 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
424
425 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
426 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
427
428 err = rte_event_eth_rx_adapter_free(1);
429 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
430
431 return TEST_SUCCESS;
432 }
433
434 static int
adapter_queue_add_del(void)435 adapter_queue_add_del(void)
436 {
437 int err;
438 struct rte_event ev;
439 uint32_t cap;
440
441 struct rte_event_eth_rx_adapter_queue_conf queue_config;
442
443 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
444 &cap);
445 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
446
447 ev.queue_id = 0;
448 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
449 ev.priority = 0;
450
451 queue_config.rx_queue_flags = 0;
452 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
453 ev.flow_id = 1;
454 queue_config.rx_queue_flags =
455 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
456 }
457 queue_config.ev = ev;
458 queue_config.servicing_weight = 1;
459
460 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
461 rte_eth_dev_count_total(),
462 -1, &queue_config);
463 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
464
465 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
466 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
467 TEST_ETHDEV_ID, 0,
468 &queue_config);
469 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
470
471 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
472 TEST_ETHDEV_ID, 0);
473 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
474
475 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
476 TEST_ETHDEV_ID,
477 -1,
478 &queue_config);
479 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
480
481 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
482 TEST_ETHDEV_ID,
483 -1);
484 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
485 } else {
486 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
487 TEST_ETHDEV_ID,
488 0,
489 &queue_config);
490 TEST_ASSERT(err == -EINVAL, "Expected EINVAL got %d", err);
491
492 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
493 TEST_ETHDEV_ID, -1,
494 &queue_config);
495 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
496
497 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
498 TEST_ETHDEV_ID, 0);
499 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
500
501 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
502 TEST_ETHDEV_ID, -1);
503 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
504
505 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
506 TEST_ETHDEV_ID, -1);
507 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
508 }
509
510 err = rte_event_eth_rx_adapter_queue_add(1, TEST_ETHDEV_ID, -1,
511 &queue_config);
512 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
513
514 err = rte_event_eth_rx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
515 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
516
517 return TEST_SUCCESS;
518 }
519
520 static int
adapter_multi_eth_add_del(void)521 adapter_multi_eth_add_del(void)
522 {
523 int err;
524 struct rte_event ev;
525
526 uint16_t port_index, port_index_base, drv_id = 0;
527 char driver_name[50];
528
529 struct rte_event_eth_rx_adapter_queue_conf queue_config;
530
531 ev.queue_id = 0;
532 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
533 ev.priority = 0;
534
535 queue_config.rx_queue_flags = 0;
536 queue_config.ev = ev;
537 queue_config.servicing_weight = 1;
538
539 /* stop eth devices for existing */
540 port_index = 0;
541 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
542 err = rte_eth_dev_stop(port_index);
543 TEST_ASSERT(err == 0, "Failed to stop port %u: %d\n",
544 port_index, err);
545 }
546
547 /* add the max port for rx_adapter */
548 port_index = rte_eth_dev_count_total();
549 port_index_base = port_index;
550 for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
551 snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
552 drv_id);
553 err = rte_vdev_init(driver_name, NULL);
554 TEST_ASSERT(err == 0, "Failed driver %s got %d",
555 driver_name, err);
556 drv_id += 1;
557 }
558
559 err = init_ports(rte_eth_dev_count_total());
560 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
561
562 /* eth_rx_adapter_queue_add for n ports */
563 port_index = 0;
564 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
565 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
566 port_index, -1,
567 &queue_config);
568 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
569 }
570
571 /* eth_rx_adapter_queue_del n ports */
572 port_index = 0;
573 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
574 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
575 port_index, -1);
576 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
577 }
578
579 /* delete vdev ports */
580 for (drv_id = 0, port_index = port_index_base;
581 port_index < RTE_MAX_ETHPORTS;
582 drv_id += 1, port_index += 1) {
583 snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
584 drv_id);
585 err = rte_vdev_uninit(driver_name);
586 TEST_ASSERT(err == 0, "Failed driver %s got %d",
587 driver_name, err);
588 }
589
590 return TEST_SUCCESS;
591 }
592
593 static int
adapter_intr_queue_add_del(void)594 adapter_intr_queue_add_del(void)
595 {
596 int err;
597 struct rte_event ev;
598 uint32_t cap;
599 uint16_t eth_port;
600 struct rte_event_eth_rx_adapter_queue_conf queue_config;
601
602 if (!default_params.rx_intr_port_inited)
603 return 0;
604
605 eth_port = default_params.rx_intr_port;
606 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, eth_port, &cap);
607 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
608
609 ev.queue_id = 0;
610 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
611 ev.priority = 0;
612
613 queue_config.rx_queue_flags = 0;
614 queue_config.ev = ev;
615
616 /* weight = 0 => interrupt mode */
617 queue_config.servicing_weight = 0;
618
619 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
620 /* add queue 0 */
621 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
622 TEST_ETHDEV_ID, 0,
623 &queue_config);
624 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
625 }
626
627 /* add all queues */
628 queue_config.servicing_weight = 0;
629 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
630 TEST_ETHDEV_ID,
631 -1,
632 &queue_config);
633 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
634
635 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
636 /* del queue 0 */
637 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
638 TEST_ETHDEV_ID,
639 0);
640 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
641 }
642
643 /* del remaining queues */
644 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
645 TEST_ETHDEV_ID,
646 -1);
647 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
648
649 /* add all queues */
650 queue_config.servicing_weight = 0;
651 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
652 TEST_ETHDEV_ID,
653 -1,
654 &queue_config);
655 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
656
657 /* intr -> poll mode queue */
658 queue_config.servicing_weight = 1;
659
660 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
661 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
662 TEST_ETHDEV_ID,
663 0,
664 &queue_config);
665 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
666 }
667
668 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
669 TEST_ETHDEV_ID,
670 -1,
671 &queue_config);
672 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
673
674 /* del queues */
675 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
676 TEST_ETHDEV_ID,
677 -1);
678 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
679
680 return TEST_SUCCESS;
681 }
682
683 static int
adapter_start_stop(void)684 adapter_start_stop(void)
685 {
686 int err;
687 struct rte_event ev;
688
689 ev.queue_id = 0;
690 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
691 ev.priority = 0;
692
693 struct rte_event_eth_rx_adapter_queue_conf queue_config;
694
695 queue_config.rx_queue_flags = 0;
696 if (default_params.caps &
697 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
698 ev.flow_id = 1;
699 queue_config.rx_queue_flags =
700 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
701 }
702
703 queue_config.ev = ev;
704 queue_config.servicing_weight = 1;
705
706 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
707 -1, &queue_config);
708 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
709
710 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
711 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
712
713 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
714 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
715
716 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
717 -1);
718 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
719
720 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
721 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
722
723 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
724 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
725
726 err = rte_event_eth_rx_adapter_start(1);
727 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
728
729 err = rte_event_eth_rx_adapter_stop(1);
730 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
731
732 return TEST_SUCCESS;
733 }
734
735 static int
adapter_stats(void)736 adapter_stats(void)
737 {
738 int err;
739 struct rte_event_eth_rx_adapter_stats stats;
740
741 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, NULL);
742 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
743
744 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, &stats);
745 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
746
747 err = rte_event_eth_rx_adapter_stats_get(1, &stats);
748 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
749
750 return TEST_SUCCESS;
751 }
752
753 static struct unit_test_suite event_eth_rx_tests = {
754 .suite_name = "rx event eth adapter test suite",
755 .setup = testsuite_setup,
756 .teardown = testsuite_teardown,
757 .unit_test_cases = {
758 TEST_CASE_ST(NULL, NULL, adapter_create_free),
759 TEST_CASE_ST(adapter_create, adapter_free,
760 adapter_queue_add_del),
761 TEST_CASE_ST(adapter_create, adapter_free,
762 adapter_multi_eth_add_del),
763 TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
764 TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
765 TEST_CASES_END() /**< NULL terminate unit test array */
766 }
767 };
768
769 static struct unit_test_suite event_eth_rx_intr_tests = {
770 .suite_name = "rx event eth adapter test suite",
771 .setup = testsuite_setup_rx_intr,
772 .teardown = testsuite_teardown_rx_intr,
773 .unit_test_cases = {
774 TEST_CASE_ST(adapter_create, adapter_free,
775 adapter_intr_queue_add_del),
776 TEST_CASES_END() /**< NULL terminate unit test array */
777 }
778 };
779
780 static int
test_event_eth_rx_adapter_common(void)781 test_event_eth_rx_adapter_common(void)
782 {
783 return unit_test_suite_runner(&event_eth_rx_tests);
784 }
785
786 static int
test_event_eth_rx_intr_adapter_common(void)787 test_event_eth_rx_intr_adapter_common(void)
788 {
789 return unit_test_suite_runner(&event_eth_rx_intr_tests);
790 }
791
792 REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
793 test_event_eth_rx_adapter_common);
794 REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest,
795 test_event_eth_rx_intr_adapter_common);
796