1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5 #include <string.h>
6
7 #include <rte_bus_vdev.h>
8 #include <rte_common.h>
9 #include <rte_ethdev.h>
10 #include <rte_eth_ring.h>
11 #include <rte_eventdev.h>
12 #include <rte_event_eth_tx_adapter.h>
13 #include <rte_mbuf.h>
14 #include <rte_mempool.h>
15 #include <rte_service.h>
16
17 #include "test.h"
18
19 #ifdef RTE_EXEC_ENV_WINDOWS
20 static int
test_event_eth_tx_adapter_common(void)21 test_event_eth_tx_adapter_common(void)
22 {
23 printf("event_eth_tx_adapter not supported on Windows, skipping test\n");
24 return TEST_SKIPPED;
25 }
26
27 #else
28
29 #define MAX_NUM_QUEUE RTE_PMD_RING_MAX_RX_RINGS
30 #define TEST_INST_ID 0
31 #define TEST_DEV_ID 0
32 #define SOCKET0 0
33 #define RING_SIZE 256
34 #define ETH_NAME_LEN 32
35 #define NUM_ETH_PAIR 1
36 #define NUM_ETH_DEV (2 * NUM_ETH_PAIR)
37 #define NB_MBUF 512
38 #define PAIR_PORT_INDEX(p) ((p) + NUM_ETH_PAIR)
39 #define PORT(p) default_params.port[(p)]
40 #define TEST_ETHDEV_ID PORT(0)
41 #define TEST_ETHDEV_PAIR_ID PORT(PAIR_PORT_INDEX(0))
42
43 #define EDEV_RETRY 0xffff
44
45 struct event_eth_tx_adapter_test_params {
46 struct rte_mempool *mp;
47 uint16_t rx_rings, tx_rings;
48 struct rte_ring *r[NUM_ETH_DEV][MAX_NUM_QUEUE];
49 int port[NUM_ETH_DEV];
50 };
51
52 static int event_dev_delete;
53 static struct event_eth_tx_adapter_test_params default_params;
54 static uint64_t eid = ~0ULL;
55 static uint32_t tid;
56
57 static inline int
port_init_common(uint16_t port,const struct rte_eth_conf * port_conf,struct rte_mempool * mp)58 port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
59 struct rte_mempool *mp)
60 {
61 const uint16_t rx_ring_size = RING_SIZE, tx_ring_size = RING_SIZE;
62 int retval;
63 uint16_t q;
64
65 if (!rte_eth_dev_is_valid_port(port))
66 return -1;
67
68 default_params.rx_rings = MAX_NUM_QUEUE;
69 default_params.tx_rings = MAX_NUM_QUEUE;
70
71 /* Configure the Ethernet device. */
72 retval = rte_eth_dev_configure(port, default_params.rx_rings,
73 default_params.tx_rings, port_conf);
74 if (retval != 0)
75 return retval;
76
77 for (q = 0; q < default_params.rx_rings; q++) {
78 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
79 rte_eth_dev_socket_id(port), NULL, mp);
80 if (retval < 0)
81 return retval;
82 }
83
84 for (q = 0; q < default_params.tx_rings; q++) {
85 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
86 rte_eth_dev_socket_id(port), NULL);
87 if (retval < 0)
88 return retval;
89 }
90
91 /* Start the Ethernet port. */
92 retval = rte_eth_dev_start(port);
93 if (retval < 0)
94 return retval;
95
96 /* Display the port MAC address. */
97 struct rte_ether_addr addr;
98 retval = rte_eth_macaddr_get(port, &addr);
99 if (retval < 0)
100 return retval;
101 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
102 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
103 (unsigned int)port, RTE_ETHER_ADDR_BYTES(&addr));
104
105 /* Enable RX in promiscuous mode for the Ethernet device. */
106 retval = rte_eth_promiscuous_enable(port);
107 if (retval != 0)
108 return retval;
109
110 return 0;
111 }
112
113 static inline int
port_init(uint16_t port,struct rte_mempool * mp)114 port_init(uint16_t port, struct rte_mempool *mp)
115 {
116 struct rte_eth_conf conf = { 0 };
117 return port_init_common(port, &conf, mp);
118 }
119
120 #define RING_NAME_LEN 20
121 #define DEV_NAME_LEN 20
122
123 static int
init_ports(void)124 init_ports(void)
125 {
126 char ring_name[ETH_NAME_LEN];
127 unsigned int i, j;
128 struct rte_ring * const *c1;
129 struct rte_ring * const *c2;
130 int err;
131
132 if (!default_params.mp)
133 default_params.mp = rte_pktmbuf_pool_create("mbuf_pool",
134 NB_MBUF, 32,
135 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
136
137 if (!default_params.mp)
138 return -ENOMEM;
139
140 for (i = 0; i < NUM_ETH_DEV; i++) {
141 for (j = 0; j < MAX_NUM_QUEUE; j++) {
142 snprintf(ring_name, sizeof(ring_name), "R%u%u", i, j);
143 default_params.r[i][j] = rte_ring_create(ring_name,
144 RING_SIZE,
145 SOCKET0,
146 RING_F_SP_ENQ | RING_F_SC_DEQ);
147 TEST_ASSERT((default_params.r[i][j] != NULL),
148 "Failed to allocate ring");
149 }
150 }
151
152 /*
153 * To create two pseudo-Ethernet ports where the traffic is
154 * switched between them, that is, traffic sent to port 1 is
155 * read back from port 2 and vice-versa
156 */
157 for (i = 0; i < NUM_ETH_PAIR; i++) {
158 char dev_name[DEV_NAME_LEN];
159 int p;
160
161 c1 = default_params.r[i];
162 c2 = default_params.r[PAIR_PORT_INDEX(i)];
163
164 snprintf(dev_name, DEV_NAME_LEN, "%u-%u", i, i + NUM_ETH_PAIR);
165 p = rte_eth_from_rings(dev_name, c1, MAX_NUM_QUEUE,
166 c2, MAX_NUM_QUEUE, SOCKET0);
167 TEST_ASSERT(p >= 0, "Port creation failed %s", dev_name);
168 err = port_init(p, default_params.mp);
169 TEST_ASSERT(err == 0, "Port init failed %s", dev_name);
170 default_params.port[i] = p;
171
172 snprintf(dev_name, DEV_NAME_LEN, "%u-%u", i + NUM_ETH_PAIR, i);
173 p = rte_eth_from_rings(dev_name, c2, MAX_NUM_QUEUE,
174 c1, MAX_NUM_QUEUE, SOCKET0);
175 TEST_ASSERT(p > 0, "Port creation failed %s", dev_name);
176 err = port_init(p, default_params.mp);
177 TEST_ASSERT(err == 0, "Port init failed %s", dev_name);
178 default_params.port[PAIR_PORT_INDEX(i)] = p;
179 }
180
181 return 0;
182 }
183
184 static void
deinit_ports(void)185 deinit_ports(void)
186 {
187 uint16_t i, j;
188 char name[ETH_NAME_LEN];
189
190 for (i = 0; i < RTE_DIM(default_params.port); i++) {
191 rte_eth_dev_stop(default_params.port[i]);
192 rte_eth_dev_get_name_by_port(default_params.port[i], name);
193 rte_vdev_uninit(name);
194 for (j = 0; j < RTE_DIM(default_params.r[i]); j++)
195 rte_ring_free(default_params.r[i][j]);
196 }
197 }
198
199 static int
testsuite_setup(void)200 testsuite_setup(void)
201 {
202 const char *vdev_name = "event_sw0";
203
204 int err = init_ports();
205 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
206
207 if (rte_event_dev_count() == 0) {
208 printf("Failed to find a valid event device,"
209 " testing with event_sw0 device\n");
210 err = rte_vdev_init(vdev_name, NULL);
211 TEST_ASSERT(err == 0, "vdev %s creation failed %d\n",
212 vdev_name, err);
213 event_dev_delete = 1;
214 }
215 return err;
216 }
217
218 #define DEVICE_ID_SIZE 64
219
220 static void
testsuite_teardown(void)221 testsuite_teardown(void)
222 {
223 deinit_ports();
224 rte_mempool_free(default_params.mp);
225 default_params.mp = NULL;
226 if (event_dev_delete)
227 rte_vdev_uninit("event_sw0");
228 }
229
230 static int
tx_adapter_create(void)231 tx_adapter_create(void)
232 {
233 int err;
234 struct rte_event_dev_info dev_info;
235 struct rte_event_port_conf tx_p_conf;
236 uint8_t priority;
237 uint8_t queue_id;
238
239 struct rte_event_dev_config config = {
240 .nb_event_queues = 1,
241 .nb_event_ports = 1,
242 };
243
244 struct rte_event_queue_conf wkr_q_conf = {
245 .schedule_type = RTE_SCHED_TYPE_ORDERED,
246 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
247 .nb_atomic_flows = 1024,
248 .nb_atomic_order_sequences = 1024,
249 };
250
251 memset(&tx_p_conf, 0, sizeof(tx_p_conf));
252 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
253 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
254 config.nb_event_port_dequeue_depth =
255 dev_info.max_event_port_dequeue_depth;
256 config.nb_event_port_enqueue_depth =
257 dev_info.max_event_port_enqueue_depth;
258 config.nb_events_limit =
259 dev_info.max_num_events;
260
261 err = rte_event_dev_configure(TEST_DEV_ID, &config);
262 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
263 err);
264
265 queue_id = 0;
266 err = rte_event_queue_setup(TEST_DEV_ID, 0, &wkr_q_conf);
267 TEST_ASSERT(err == 0, "Event queue setup failed %d\n", err);
268
269 err = rte_event_port_setup(TEST_DEV_ID, 0, NULL);
270 TEST_ASSERT(err == 0, "Event port setup failed %d\n", err);
271
272 priority = RTE_EVENT_DEV_PRIORITY_LOWEST;
273 err = rte_event_port_link(TEST_DEV_ID, 0, &queue_id, &priority, 1);
274 TEST_ASSERT(err == 1, "Error linking port %s\n",
275 rte_strerror(rte_errno));
276 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
277 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
278
279 tx_p_conf.new_event_threshold = dev_info.max_num_events;
280 tx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
281 tx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
282 err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
283 &tx_p_conf);
284 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
285
286 return err;
287 }
288
289 static void
tx_adapter_free(void)290 tx_adapter_free(void)
291 {
292 rte_event_eth_tx_adapter_free(TEST_INST_ID);
293 }
294
295 static int
tx_adapter_create_free(void)296 tx_adapter_create_free(void)
297 {
298 int err;
299 struct rte_event_dev_info dev_info;
300 struct rte_event_port_conf tx_p_conf;
301
302 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
303 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
304
305 tx_p_conf.new_event_threshold = dev_info.max_num_events;
306 tx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
307 tx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
308
309 err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
310 NULL);
311 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
312
313 err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
314 &tx_p_conf);
315 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
316
317 err = rte_event_eth_tx_adapter_create(TEST_INST_ID,
318 TEST_DEV_ID, &tx_p_conf);
319 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
320
321 err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
322 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
323
324 err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
325 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
326
327 err = rte_event_eth_tx_adapter_free(1);
328 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
329
330 return TEST_SUCCESS;
331 }
332
333 static int
tx_adapter_queue_add_del(void)334 tx_adapter_queue_add_del(void)
335 {
336 int err;
337 uint32_t cap;
338
339 err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
340 &cap);
341 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
342
343
344 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
345 rte_eth_dev_count_total(),
346 -1);
347 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
348
349 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
350 TEST_ETHDEV_ID,
351 0);
352 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
353
354 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
355 TEST_ETHDEV_ID,
356 -1);
357 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
358
359 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
360 TEST_ETHDEV_ID,
361 0);
362 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
363
364 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
365 TEST_ETHDEV_ID,
366 -1);
367 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
368
369 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
370 TEST_ETHDEV_ID,
371 -1);
372 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
373
374 err = rte_event_eth_tx_adapter_queue_add(1, TEST_ETHDEV_ID, -1);
375 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
376
377 err = rte_event_eth_tx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
378 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
379
380 return TEST_SUCCESS;
381 }
382
383 static int
tx_adapter_start_stop(void)384 tx_adapter_start_stop(void)
385 {
386 int err;
387
388 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
389 -1);
390 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
391
392 err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
393 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
394
395 err = rte_event_eth_tx_adapter_stop(TEST_INST_ID);
396 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
397
398 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
399 -1);
400 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
401
402 err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
403 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
404
405 err = rte_event_eth_tx_adapter_stop(TEST_INST_ID);
406 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
407
408 err = rte_event_eth_tx_adapter_start(1);
409
410 err = rte_event_eth_tx_adapter_stop(1);
411 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
412
413 return TEST_SUCCESS;
414 }
415
416
417 static int
tx_adapter_single(uint16_t port,uint16_t tx_queue_id,struct rte_mbuf * m,uint8_t qid,uint8_t sched_type)418 tx_adapter_single(uint16_t port, uint16_t tx_queue_id,
419 struct rte_mbuf *m, uint8_t qid,
420 uint8_t sched_type)
421 {
422 struct rte_event event;
423 struct rte_mbuf *r;
424 int ret;
425 unsigned int l;
426
427 event.queue_id = qid;
428 event.op = RTE_EVENT_OP_NEW;
429 event.event_type = RTE_EVENT_TYPE_CPU;
430 event.sched_type = sched_type;
431 event.mbuf = m;
432
433 m->port = port;
434 rte_event_eth_tx_adapter_txq_set(m, tx_queue_id);
435
436 l = 0;
437 while (rte_event_enqueue_burst(TEST_DEV_ID, 0, &event, 1) != 1) {
438 l++;
439 if (l > EDEV_RETRY)
440 break;
441 }
442
443 TEST_ASSERT(l < EDEV_RETRY, "Unable to enqueue to eventdev");
444 l = 0;
445 while (l++ < EDEV_RETRY) {
446
447 if (eid != ~0ULL) {
448 ret = rte_service_run_iter_on_app_lcore(eid, 0);
449 TEST_ASSERT(ret == 0, "failed to run service %d", ret);
450 }
451
452 ret = rte_service_run_iter_on_app_lcore(tid, 0);
453 TEST_ASSERT(ret == 0, "failed to run service %d", ret);
454
455 if (rte_eth_rx_burst(TEST_ETHDEV_PAIR_ID, tx_queue_id,
456 &r, 1)) {
457 TEST_ASSERT_EQUAL(r, m, "mbuf comparison failed"
458 " expected %p received %p", m, r);
459 return 0;
460 }
461 }
462
463 TEST_ASSERT(0, "Failed to receive packet");
464 return -1;
465 }
466
467 static int
tx_adapter_service(void)468 tx_adapter_service(void)
469 {
470 struct rte_event_eth_tx_adapter_stats stats;
471 uint32_t i;
472 int err;
473 uint8_t ev_port, ev_qid;
474 struct rte_mbuf bufs[RING_SIZE];
475 struct rte_mbuf *pbufs[RING_SIZE];
476 struct rte_event_dev_info dev_info;
477 struct rte_event_dev_config dev_conf;
478 struct rte_event_queue_conf qconf;
479 uint32_t qcnt, pcnt;
480 uint16_t q;
481 int internal_port;
482 uint32_t cap;
483
484 memset(&dev_conf, 0, sizeof(dev_conf));
485 err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
486 &cap);
487 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
488
489 internal_port = !!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
490 if (internal_port)
491 return TEST_SUCCESS;
492
493 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
494 -1);
495 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
496
497 err = rte_event_eth_tx_adapter_event_port_get(TEST_INST_ID,
498 &ev_port);
499 TEST_ASSERT_SUCCESS(err, "Failed to get event port %d", err);
500
501 err = rte_event_dev_attr_get(TEST_DEV_ID, RTE_EVENT_DEV_ATTR_PORT_COUNT,
502 &pcnt);
503 TEST_ASSERT_SUCCESS(err, "Port count get failed");
504
505 err = rte_event_dev_attr_get(TEST_DEV_ID,
506 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &qcnt);
507 TEST_ASSERT_SUCCESS(err, "Queue count get failed");
508
509 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
510 TEST_ASSERT_SUCCESS(err, "Dev info failed");
511
512 dev_conf.nb_event_queue_flows = dev_info.max_event_queue_flows;
513 dev_conf.nb_event_port_dequeue_depth =
514 dev_info.max_event_port_dequeue_depth;
515 dev_conf.nb_event_port_enqueue_depth =
516 dev_info.max_event_port_enqueue_depth;
517 dev_conf.nb_events_limit =
518 dev_info.max_num_events;
519 dev_conf.nb_event_queues = qcnt + 1;
520 dev_conf.nb_event_ports = pcnt;
521 err = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
522 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
523 err);
524
525 ev_qid = qcnt;
526 qconf.nb_atomic_flows = dev_info.max_event_queue_flows;
527 qconf.nb_atomic_order_sequences = 32;
528 qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
529 qconf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
530 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
531 err = rte_event_queue_setup(TEST_DEV_ID, ev_qid, &qconf);
532 TEST_ASSERT_SUCCESS(err, "Failed to setup queue %u", ev_qid);
533
534 /*
535 * Setup ports again so that the newly added queue is visible
536 * to them
537 */
538 for (i = 0; i < pcnt; i++) {
539
540 int n_links;
541 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
542 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
543
544 if (i == ev_port)
545 continue;
546
547 n_links = rte_event_port_links_get(TEST_DEV_ID, i, queues,
548 priorities);
549 TEST_ASSERT(n_links > 0, "Failed to get port links %d\n",
550 n_links);
551 err = rte_event_port_setup(TEST_DEV_ID, i, NULL);
552 TEST_ASSERT(err == 0, "Failed to setup port err %d\n", err);
553 err = rte_event_port_link(TEST_DEV_ID, i, queues, priorities,
554 n_links);
555 TEST_ASSERT(n_links == err, "Failed to link all queues"
556 " err %s\n", rte_strerror(rte_errno));
557 }
558
559 err = rte_event_port_link(TEST_DEV_ID, ev_port, &ev_qid, NULL, 1);
560 TEST_ASSERT(err == 1, "Failed to link queue port %u",
561 ev_port);
562
563 err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
564 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
565
566 if (!(dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
567 err = rte_event_dev_service_id_get(0, (uint32_t *)&eid);
568 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
569
570 err = rte_service_runstate_set(eid, 1);
571 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
572
573 err = rte_service_set_runstate_mapped_check(eid, 0);
574 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
575 }
576
577 err = rte_event_eth_tx_adapter_service_id_get(TEST_INST_ID, &tid);
578 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
579
580 err = rte_service_runstate_set(tid, 1);
581 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
582
583 err = rte_service_set_runstate_mapped_check(tid, 0);
584 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
585
586 err = rte_event_dev_start(TEST_DEV_ID);
587 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
588
589 for (q = 0; q < MAX_NUM_QUEUE; q++) {
590 for (i = 0; i < RING_SIZE; i++)
591 pbufs[i] = &bufs[i];
592 for (i = 0; i < RING_SIZE; i++) {
593 pbufs[i] = &bufs[i];
594 err = tx_adapter_single(TEST_ETHDEV_ID, q, pbufs[i],
595 ev_qid,
596 RTE_SCHED_TYPE_ORDERED);
597 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
598 }
599 for (i = 0; i < RING_SIZE; i++) {
600 TEST_ASSERT_EQUAL(pbufs[i], &bufs[i],
601 "Error: received data does not match"
602 " that transmitted");
603 }
604 }
605
606 err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, NULL);
607 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
608
609 err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, &stats);
610 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
611 TEST_ASSERT_EQUAL(stats.tx_packets, MAX_NUM_QUEUE * RING_SIZE,
612 "stats.tx_packets expected %u got %"PRIu64,
613 MAX_NUM_QUEUE * RING_SIZE,
614 stats.tx_packets);
615
616 err = rte_event_eth_tx_adapter_stats_reset(TEST_INST_ID);
617 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
618
619 err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, &stats);
620 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
621 TEST_ASSERT_EQUAL(stats.tx_packets, 0,
622 "stats.tx_packets expected %u got %"PRIu64,
623 0,
624 stats.tx_packets);
625
626 err = rte_event_eth_tx_adapter_stats_get(1, &stats);
627 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
628
629 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
630 -1);
631 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
632
633 err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
634 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
635
636 rte_event_dev_stop(TEST_DEV_ID);
637
638 return TEST_SUCCESS;
639 }
640
641 static int
tx_adapter_dynamic_device(void)642 tx_adapter_dynamic_device(void)
643 {
644 uint16_t port_id = rte_eth_dev_count_avail();
645 const char *null_dev[2] = { "eth_null0", "eth_null1" };
646 struct rte_eth_conf dev_conf;
647 int ret;
648 size_t i;
649
650 memset(&dev_conf, 0, sizeof(dev_conf));
651 for (i = 0; i < RTE_DIM(null_dev); i++) {
652 ret = rte_vdev_init(null_dev[i], NULL);
653 TEST_ASSERT_SUCCESS(ret, "%s Port creation failed %d",
654 null_dev[i], ret);
655
656 if (i == 0) {
657 ret = tx_adapter_create();
658 TEST_ASSERT_SUCCESS(ret, "Adapter create failed %d",
659 ret);
660 }
661
662 ret = rte_eth_dev_configure(port_id + i, MAX_NUM_QUEUE,
663 MAX_NUM_QUEUE, &dev_conf);
664 TEST_ASSERT_SUCCESS(ret, "Failed to configure device %d", ret);
665
666 ret = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
667 port_id + i, 0);
668 TEST_ASSERT_SUCCESS(ret, "Failed to add queues %d", ret);
669
670 }
671
672 for (i = 0; i < RTE_DIM(null_dev); i++) {
673 ret = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
674 port_id + i, -1);
675 TEST_ASSERT_SUCCESS(ret, "Failed to delete queues %d", ret);
676 }
677
678 tx_adapter_free();
679
680 for (i = 0; i < RTE_DIM(null_dev); i++)
681 rte_vdev_uninit(null_dev[i]);
682
683 return TEST_SUCCESS;
684 }
685
686 static struct unit_test_suite event_eth_tx_tests = {
687 .setup = testsuite_setup,
688 .teardown = testsuite_teardown,
689 .suite_name = "tx event eth adapter test suite",
690 .unit_test_cases = {
691 TEST_CASE_ST(NULL, NULL, tx_adapter_create_free),
692 TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
693 tx_adapter_queue_add_del),
694 TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
695 tx_adapter_start_stop),
696 TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
697 tx_adapter_service),
698 TEST_CASE_ST(NULL, NULL, tx_adapter_dynamic_device),
699 TEST_CASES_END() /**< NULL terminate unit test array */
700 }
701 };
702
703 static int
test_event_eth_tx_adapter_common(void)704 test_event_eth_tx_adapter_common(void)
705 {
706 return unit_test_suite_runner(&event_eth_tx_tests);
707 }
708
709 #endif /* !RTE_EXEC_ENV_WINDOWS */
710
711 REGISTER_TEST_COMMAND(event_eth_tx_adapter_autotest,
712 test_event_eth_tx_adapter_common);
713