1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
3 */
4 #include <rte_spinlock.h>
5 #include <rte_service_component.h>
6 #include <rte_ethdev.h>
7
8 #include "rte_eventdev_pmd.h"
9 #include "rte_eventdev_trace.h"
10 #include "rte_event_eth_tx_adapter.h"
11
12 #define TXA_BATCH_SIZE 32
13 #define TXA_SERVICE_NAME_LEN 32
14 #define TXA_MEM_NAME_LEN 32
15 #define TXA_FLUSH_THRESHOLD 1024
16 #define TXA_RETRY_CNT 100
17 #define TXA_MAX_NB_TX 128
18 #define TXA_INVALID_DEV_ID INT32_C(-1)
19 #define TXA_INVALID_SERVICE_ID INT64_C(-1)
20
21 #define txa_evdev(id) (&rte_eventdevs[txa_dev_id_array[(id)]])
22
23 #define txa_dev_caps_get(id) txa_evdev((id))->dev_ops->eth_tx_adapter_caps_get
24
25 #define txa_dev_adapter_create(t) txa_evdev(t)->dev_ops->eth_tx_adapter_create
26
27 #define txa_dev_adapter_create_ext(t) \
28 txa_evdev(t)->dev_ops->eth_tx_adapter_create
29
30 #define txa_dev_adapter_free(t) txa_evdev(t)->dev_ops->eth_tx_adapter_free
31
32 #define txa_dev_queue_add(id) txa_evdev(id)->dev_ops->eth_tx_adapter_queue_add
33
34 #define txa_dev_queue_del(t) txa_evdev(t)->dev_ops->eth_tx_adapter_queue_del
35
36 #define txa_dev_start(t) txa_evdev(t)->dev_ops->eth_tx_adapter_start
37
38 #define txa_dev_stop(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stop
39
40 #define txa_dev_stats_reset(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stats_reset
41
42 #define txa_dev_stats_get(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stats_get
43
44 #define RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) \
45 do { \
46 if (!txa_valid_id(id)) { \
47 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \
48 return retval; \
49 } \
50 } while (0)
51
52 #define TXA_CHECK_OR_ERR_RET(id) \
53 do {\
54 int ret; \
55 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET((id), -EINVAL); \
56 ret = txa_init(); \
57 if (ret != 0) \
58 return ret; \
59 if (!txa_adapter_exist((id))) \
60 return -EINVAL; \
61 } while (0)
62
63 #define TXA_CHECK_TXQ(dev, queue) \
64 do {\
65 if ((dev)->data->nb_tx_queues == 0) { \
66 RTE_EDEV_LOG_ERR("No tx queues configured"); \
67 return -EINVAL; \
68 } \
69 if ((queue) != -1 && \
70 (uint16_t)(queue) >= (dev)->data->nb_tx_queues) { \
71 RTE_EDEV_LOG_ERR("Invalid tx queue_id %" PRIu16, \
72 (uint16_t)(queue)); \
73 return -EINVAL; \
74 } \
75 } while (0)
76
77 /* Tx retry callback structure */
78 struct txa_retry {
79 /* Ethernet port id */
80 uint16_t port_id;
81 /* Tx queue */
82 uint16_t tx_queue;
83 /* Adapter ID */
84 uint8_t id;
85 };
86
87 /* Per queue structure */
88 struct txa_service_queue_info {
89 /* Queue has been added */
90 uint8_t added;
91 /* Retry callback argument */
92 struct txa_retry txa_retry;
93 /* Tx buffer */
94 struct rte_eth_dev_tx_buffer *tx_buf;
95 };
96
97 /* PMD private structure */
98 struct txa_service_data {
99 /* Max mbufs processed in any service function invocation */
100 uint32_t max_nb_tx;
101 /* Number of Tx queues in adapter */
102 uint32_t nb_queues;
103 /* Synchronization with data path */
104 rte_spinlock_t tx_lock;
105 /* Event port ID */
106 uint8_t port_id;
107 /* Event device identifier */
108 uint8_t eventdev_id;
109 /* Highest port id supported + 1 */
110 uint16_t dev_count;
111 /* Loop count to flush Tx buffers */
112 int loop_cnt;
113 /* Per ethernet device structure */
114 struct txa_service_ethdev *txa_ethdev;
115 /* Statistics */
116 struct rte_event_eth_tx_adapter_stats stats;
117 /* Adapter Identifier */
118 uint8_t id;
119 /* Conf arg must be freed */
120 uint8_t conf_free;
121 /* Configuration callback */
122 rte_event_eth_tx_adapter_conf_cb conf_cb;
123 /* Configuration callback argument */
124 void *conf_arg;
125 /* socket id */
126 int socket_id;
127 /* Per adapter EAL service */
128 int64_t service_id;
129 /* Memory allocation name */
130 char mem_name[TXA_MEM_NAME_LEN];
131 } __rte_cache_aligned;
132
133 /* Per eth device structure */
134 struct txa_service_ethdev {
135 /* Pointer to ethernet device */
136 struct rte_eth_dev *dev;
137 /* Number of queues added */
138 uint16_t nb_queues;
139 /* PMD specific queue data */
140 void *queues;
141 };
142
143 /* Array of adapter instances, initialized with event device id
144 * when adapter is created
145 */
146 static int *txa_dev_id_array;
147
148 /* Array of pointers to service implementation data */
149 static struct txa_service_data **txa_service_data_array;
150
151 static int32_t txa_service_func(void *args);
152 static int txa_service_adapter_create_ext(uint8_t id,
153 struct rte_eventdev *dev,
154 rte_event_eth_tx_adapter_conf_cb conf_cb,
155 void *conf_arg);
156 static int txa_service_queue_del(uint8_t id,
157 const struct rte_eth_dev *dev,
158 int32_t tx_queue_id);
159
160 static int
txa_adapter_exist(uint8_t id)161 txa_adapter_exist(uint8_t id)
162 {
163 return txa_dev_id_array[id] != TXA_INVALID_DEV_ID;
164 }
165
166 static inline int
txa_valid_id(uint8_t id)167 txa_valid_id(uint8_t id)
168 {
169 return id < RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE;
170 }
171
172 static void *
txa_memzone_array_get(const char * name,unsigned int elt_size,int nb_elems)173 txa_memzone_array_get(const char *name, unsigned int elt_size, int nb_elems)
174 {
175 const struct rte_memzone *mz;
176 unsigned int sz;
177
178 sz = elt_size * nb_elems;
179 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
180
181 mz = rte_memzone_lookup(name);
182 if (mz == NULL) {
183 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
184 RTE_CACHE_LINE_SIZE);
185 if (mz == NULL) {
186 RTE_EDEV_LOG_ERR("failed to reserve memzone"
187 " name = %s err = %"
188 PRId32, name, rte_errno);
189 return NULL;
190 }
191 }
192
193 return mz->addr;
194 }
195
196 static int
txa_dev_id_array_init(void)197 txa_dev_id_array_init(void)
198 {
199 if (txa_dev_id_array == NULL) {
200 int i;
201
202 txa_dev_id_array = txa_memzone_array_get("txa_adapter_array",
203 sizeof(int),
204 RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE);
205 if (txa_dev_id_array == NULL)
206 return -ENOMEM;
207
208 for (i = 0; i < RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE; i++)
209 txa_dev_id_array[i] = TXA_INVALID_DEV_ID;
210 }
211
212 return 0;
213 }
214
215 static int
txa_init(void)216 txa_init(void)
217 {
218 return txa_dev_id_array_init();
219 }
220
221 static int
txa_service_data_init(void)222 txa_service_data_init(void)
223 {
224 if (txa_service_data_array == NULL) {
225 txa_service_data_array =
226 txa_memzone_array_get("txa_service_data_array",
227 sizeof(int),
228 RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE);
229 if (txa_service_data_array == NULL)
230 return -ENOMEM;
231 }
232
233 return 0;
234 }
235
236 static inline struct txa_service_data *
txa_service_id_to_data(uint8_t id)237 txa_service_id_to_data(uint8_t id)
238 {
239 return txa_service_data_array[id];
240 }
241
242 static inline struct txa_service_queue_info *
txa_service_queue(struct txa_service_data * txa,uint16_t port_id,uint16_t tx_queue_id)243 txa_service_queue(struct txa_service_data *txa, uint16_t port_id,
244 uint16_t tx_queue_id)
245 {
246 struct txa_service_queue_info *tqi;
247
248 if (unlikely(txa->txa_ethdev == NULL || txa->dev_count < port_id + 1))
249 return NULL;
250
251 tqi = txa->txa_ethdev[port_id].queues;
252
253 return likely(tqi != NULL) ? tqi + tx_queue_id : NULL;
254 }
255
256 static int
txa_service_conf_cb(uint8_t __rte_unused id,uint8_t dev_id,struct rte_event_eth_tx_adapter_conf * conf,void * arg)257 txa_service_conf_cb(uint8_t __rte_unused id, uint8_t dev_id,
258 struct rte_event_eth_tx_adapter_conf *conf, void *arg)
259 {
260 int ret;
261 struct rte_eventdev *dev;
262 struct rte_event_port_conf *pc;
263 struct rte_event_dev_config dev_conf;
264 int started;
265 uint8_t port_id;
266
267 pc = arg;
268 dev = &rte_eventdevs[dev_id];
269 dev_conf = dev->data->dev_conf;
270
271 started = dev->data->dev_started;
272 if (started)
273 rte_event_dev_stop(dev_id);
274
275 port_id = dev_conf.nb_event_ports;
276 dev_conf.nb_event_ports += 1;
277
278 ret = rte_event_dev_configure(dev_id, &dev_conf);
279 if (ret) {
280 RTE_EDEV_LOG_ERR("failed to configure event dev %u",
281 dev_id);
282 if (started) {
283 if (rte_event_dev_start(dev_id))
284 return -EIO;
285 }
286 return ret;
287 }
288
289 pc->event_port_cfg = 0;
290 ret = rte_event_port_setup(dev_id, port_id, pc);
291 if (ret) {
292 RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
293 port_id);
294 if (started) {
295 if (rte_event_dev_start(dev_id))
296 return -EIO;
297 }
298 return ret;
299 }
300
301 conf->event_port_id = port_id;
302 conf->max_nb_tx = TXA_MAX_NB_TX;
303 if (started)
304 ret = rte_event_dev_start(dev_id);
305 return ret;
306 }
307
308 static int
txa_service_ethdev_alloc(struct txa_service_data * txa)309 txa_service_ethdev_alloc(struct txa_service_data *txa)
310 {
311 struct txa_service_ethdev *txa_ethdev;
312 uint16_t i, dev_count;
313
314 dev_count = rte_eth_dev_count_avail();
315 if (txa->txa_ethdev && dev_count == txa->dev_count)
316 return 0;
317
318 txa_ethdev = rte_zmalloc_socket(txa->mem_name,
319 dev_count * sizeof(*txa_ethdev),
320 0,
321 txa->socket_id);
322 if (txa_ethdev == NULL) {
323 RTE_EDEV_LOG_ERR("Failed to alloc txa::txa_ethdev ");
324 return -ENOMEM;
325 }
326
327 if (txa->dev_count)
328 memcpy(txa_ethdev, txa->txa_ethdev,
329 txa->dev_count * sizeof(*txa_ethdev));
330
331 RTE_ETH_FOREACH_DEV(i) {
332 if (i == dev_count)
333 break;
334 txa_ethdev[i].dev = &rte_eth_devices[i];
335 }
336
337 txa->txa_ethdev = txa_ethdev;
338 txa->dev_count = dev_count;
339 return 0;
340 }
341
342 static int
txa_service_queue_array_alloc(struct txa_service_data * txa,uint16_t port_id)343 txa_service_queue_array_alloc(struct txa_service_data *txa,
344 uint16_t port_id)
345 {
346 struct txa_service_queue_info *tqi;
347 uint16_t nb_queue;
348 int ret;
349
350 ret = txa_service_ethdev_alloc(txa);
351 if (ret != 0)
352 return ret;
353
354 if (txa->txa_ethdev[port_id].queues)
355 return 0;
356
357 nb_queue = txa->txa_ethdev[port_id].dev->data->nb_tx_queues;
358 tqi = rte_zmalloc_socket(txa->mem_name,
359 nb_queue *
360 sizeof(struct txa_service_queue_info), 0,
361 txa->socket_id);
362 if (tqi == NULL)
363 return -ENOMEM;
364 txa->txa_ethdev[port_id].queues = tqi;
365 return 0;
366 }
367
368 static void
txa_service_queue_array_free(struct txa_service_data * txa,uint16_t port_id)369 txa_service_queue_array_free(struct txa_service_data *txa,
370 uint16_t port_id)
371 {
372 struct txa_service_ethdev *txa_ethdev;
373 struct txa_service_queue_info *tqi;
374
375 txa_ethdev = &txa->txa_ethdev[port_id];
376 if (txa->txa_ethdev == NULL || txa_ethdev->nb_queues != 0)
377 return;
378
379 tqi = txa_ethdev->queues;
380 txa_ethdev->queues = NULL;
381 rte_free(tqi);
382
383 if (txa->nb_queues == 0) {
384 rte_free(txa->txa_ethdev);
385 txa->txa_ethdev = NULL;
386 }
387 }
388
389 static void
txa_service_unregister(struct txa_service_data * txa)390 txa_service_unregister(struct txa_service_data *txa)
391 {
392 if (txa->service_id != TXA_INVALID_SERVICE_ID) {
393 rte_service_component_runstate_set(txa->service_id, 0);
394 while (rte_service_may_be_active(txa->service_id))
395 rte_pause();
396 rte_service_component_unregister(txa->service_id);
397 }
398 txa->service_id = TXA_INVALID_SERVICE_ID;
399 }
400
401 static int
txa_service_register(struct txa_service_data * txa)402 txa_service_register(struct txa_service_data *txa)
403 {
404 int ret;
405 struct rte_service_spec service;
406 struct rte_event_eth_tx_adapter_conf conf;
407
408 if (txa->service_id != TXA_INVALID_SERVICE_ID)
409 return 0;
410
411 memset(&service, 0, sizeof(service));
412 snprintf(service.name, TXA_SERVICE_NAME_LEN, "txa_%d", txa->id);
413 service.socket_id = txa->socket_id;
414 service.callback = txa_service_func;
415 service.callback_userdata = txa;
416 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
417 ret = rte_service_component_register(&service,
418 (uint32_t *)&txa->service_id);
419 if (ret) {
420 RTE_EDEV_LOG_ERR("failed to register service %s err = %"
421 PRId32, service.name, ret);
422 return ret;
423 }
424
425 ret = txa->conf_cb(txa->id, txa->eventdev_id, &conf, txa->conf_arg);
426 if (ret) {
427 txa_service_unregister(txa);
428 return ret;
429 }
430
431 rte_service_component_runstate_set(txa->service_id, 1);
432 txa->port_id = conf.event_port_id;
433 txa->max_nb_tx = conf.max_nb_tx;
434 return 0;
435 }
436
437 static struct rte_eth_dev_tx_buffer *
txa_service_tx_buf_alloc(struct txa_service_data * txa,const struct rte_eth_dev * dev)438 txa_service_tx_buf_alloc(struct txa_service_data *txa,
439 const struct rte_eth_dev *dev)
440 {
441 struct rte_eth_dev_tx_buffer *tb;
442 uint16_t port_id;
443
444 port_id = dev->data->port_id;
445 tb = rte_zmalloc_socket(txa->mem_name,
446 RTE_ETH_TX_BUFFER_SIZE(TXA_BATCH_SIZE),
447 0,
448 rte_eth_dev_socket_id(port_id));
449 if (tb == NULL)
450 RTE_EDEV_LOG_ERR("Failed to allocate memory for tx buffer");
451 return tb;
452 }
453
454 static int
txa_service_is_queue_added(struct txa_service_data * txa,const struct rte_eth_dev * dev,uint16_t tx_queue_id)455 txa_service_is_queue_added(struct txa_service_data *txa,
456 const struct rte_eth_dev *dev,
457 uint16_t tx_queue_id)
458 {
459 struct txa_service_queue_info *tqi;
460
461 tqi = txa_service_queue(txa, dev->data->port_id, tx_queue_id);
462 return tqi && tqi->added;
463 }
464
465 static int
txa_service_ctrl(uint8_t id,int start)466 txa_service_ctrl(uint8_t id, int start)
467 {
468 int ret;
469 struct txa_service_data *txa;
470
471 txa = txa_service_id_to_data(id);
472 if (txa->service_id == TXA_INVALID_SERVICE_ID)
473 return 0;
474
475 ret = rte_service_runstate_set(txa->service_id, start);
476 if (ret == 0 && !start) {
477 while (rte_service_may_be_active(txa->service_id))
478 rte_pause();
479 }
480 return ret;
481 }
482
483 static void
txa_service_buffer_retry(struct rte_mbuf ** pkts,uint16_t unsent,void * userdata)484 txa_service_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
485 void *userdata)
486 {
487 struct txa_retry *tr;
488 struct txa_service_data *data;
489 struct rte_event_eth_tx_adapter_stats *stats;
490 uint16_t sent = 0;
491 unsigned int retry = 0;
492 uint16_t i, n;
493
494 tr = (struct txa_retry *)(uintptr_t)userdata;
495 data = txa_service_id_to_data(tr->id);
496 stats = &data->stats;
497
498 do {
499 n = rte_eth_tx_burst(tr->port_id, tr->tx_queue,
500 &pkts[sent], unsent - sent);
501
502 sent += n;
503 } while (sent != unsent && retry++ < TXA_RETRY_CNT);
504
505 for (i = sent; i < unsent; i++)
506 rte_pktmbuf_free(pkts[i]);
507
508 stats->tx_retry += retry;
509 stats->tx_packets += sent;
510 stats->tx_dropped += unsent - sent;
511 }
512
513 static void
txa_service_tx(struct txa_service_data * txa,struct rte_event * ev,uint32_t n)514 txa_service_tx(struct txa_service_data *txa, struct rte_event *ev,
515 uint32_t n)
516 {
517 uint32_t i;
518 uint16_t nb_tx;
519 struct rte_event_eth_tx_adapter_stats *stats;
520
521 stats = &txa->stats;
522
523 nb_tx = 0;
524 for (i = 0; i < n; i++) {
525 struct rte_mbuf *m;
526 uint16_t port;
527 uint16_t queue;
528 struct txa_service_queue_info *tqi;
529
530 m = ev[i].mbuf;
531 port = m->port;
532 queue = rte_event_eth_tx_adapter_txq_get(m);
533
534 tqi = txa_service_queue(txa, port, queue);
535 if (unlikely(tqi == NULL || !tqi->added)) {
536 rte_pktmbuf_free(m);
537 continue;
538 }
539
540 nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, m);
541 }
542
543 stats->tx_packets += nb_tx;
544 }
545
546 static int32_t
txa_service_func(void * args)547 txa_service_func(void *args)
548 {
549 struct txa_service_data *txa = args;
550 uint8_t dev_id;
551 uint8_t port;
552 uint16_t n;
553 uint32_t nb_tx, max_nb_tx;
554 struct rte_event ev[TXA_BATCH_SIZE];
555
556 dev_id = txa->eventdev_id;
557 max_nb_tx = txa->max_nb_tx;
558 port = txa->port_id;
559
560 if (txa->nb_queues == 0)
561 return 0;
562
563 if (!rte_spinlock_trylock(&txa->tx_lock))
564 return 0;
565
566 for (nb_tx = 0; nb_tx < max_nb_tx; nb_tx += n) {
567
568 n = rte_event_dequeue_burst(dev_id, port, ev, RTE_DIM(ev), 0);
569 if (!n)
570 break;
571 txa_service_tx(txa, ev, n);
572 }
573
574 if ((txa->loop_cnt++ & (TXA_FLUSH_THRESHOLD - 1)) == 0) {
575
576 struct txa_service_ethdev *tdi;
577 struct txa_service_queue_info *tqi;
578 struct rte_eth_dev *dev;
579 uint16_t i;
580
581 tdi = txa->txa_ethdev;
582 nb_tx = 0;
583
584 RTE_ETH_FOREACH_DEV(i) {
585 uint16_t q;
586
587 if (i == txa->dev_count)
588 break;
589
590 dev = tdi[i].dev;
591 if (tdi[i].nb_queues == 0)
592 continue;
593 for (q = 0; q < dev->data->nb_tx_queues; q++) {
594
595 tqi = txa_service_queue(txa, i, q);
596 if (unlikely(tqi == NULL || !tqi->added))
597 continue;
598
599 nb_tx += rte_eth_tx_buffer_flush(i, q,
600 tqi->tx_buf);
601 }
602 }
603
604 txa->stats.tx_packets += nb_tx;
605 }
606 rte_spinlock_unlock(&txa->tx_lock);
607 return 0;
608 }
609
610 static int
txa_service_adapter_create(uint8_t id,struct rte_eventdev * dev,struct rte_event_port_conf * port_conf)611 txa_service_adapter_create(uint8_t id, struct rte_eventdev *dev,
612 struct rte_event_port_conf *port_conf)
613 {
614 struct txa_service_data *txa;
615 struct rte_event_port_conf *cb_conf;
616 int ret;
617
618 cb_conf = rte_malloc(NULL, sizeof(*cb_conf), 0);
619 if (cb_conf == NULL)
620 return -ENOMEM;
621
622 *cb_conf = *port_conf;
623 ret = txa_service_adapter_create_ext(id, dev, txa_service_conf_cb,
624 cb_conf);
625 if (ret) {
626 rte_free(cb_conf);
627 return ret;
628 }
629
630 txa = txa_service_id_to_data(id);
631 txa->conf_free = 1;
632 return ret;
633 }
634
635 static int
txa_service_adapter_create_ext(uint8_t id,struct rte_eventdev * dev,rte_event_eth_tx_adapter_conf_cb conf_cb,void * conf_arg)636 txa_service_adapter_create_ext(uint8_t id, struct rte_eventdev *dev,
637 rte_event_eth_tx_adapter_conf_cb conf_cb,
638 void *conf_arg)
639 {
640 struct txa_service_data *txa;
641 int socket_id;
642 char mem_name[TXA_SERVICE_NAME_LEN];
643 int ret;
644
645 if (conf_cb == NULL)
646 return -EINVAL;
647
648 socket_id = dev->data->socket_id;
649 snprintf(mem_name, TXA_MEM_NAME_LEN,
650 "rte_event_eth_txa_%d",
651 id);
652
653 ret = txa_service_data_init();
654 if (ret != 0)
655 return ret;
656
657 txa = rte_zmalloc_socket(mem_name,
658 sizeof(*txa),
659 RTE_CACHE_LINE_SIZE, socket_id);
660 if (txa == NULL) {
661 RTE_EDEV_LOG_ERR("failed to get mem for tx adapter");
662 return -ENOMEM;
663 }
664
665 txa->id = id;
666 txa->eventdev_id = dev->data->dev_id;
667 txa->socket_id = socket_id;
668 strncpy(txa->mem_name, mem_name, TXA_SERVICE_NAME_LEN);
669 txa->conf_cb = conf_cb;
670 txa->conf_arg = conf_arg;
671 txa->service_id = TXA_INVALID_SERVICE_ID;
672 rte_spinlock_init(&txa->tx_lock);
673 txa_service_data_array[id] = txa;
674
675 return 0;
676 }
677
678 static int
txa_service_event_port_get(uint8_t id,uint8_t * port)679 txa_service_event_port_get(uint8_t id, uint8_t *port)
680 {
681 struct txa_service_data *txa;
682
683 txa = txa_service_id_to_data(id);
684 if (txa->service_id == TXA_INVALID_SERVICE_ID)
685 return -ENODEV;
686
687 *port = txa->port_id;
688 return 0;
689 }
690
691 static int
txa_service_adapter_free(uint8_t id)692 txa_service_adapter_free(uint8_t id)
693 {
694 struct txa_service_data *txa;
695
696 txa = txa_service_id_to_data(id);
697 if (txa->nb_queues) {
698 RTE_EDEV_LOG_ERR("%" PRIu16 " Tx queues not deleted",
699 txa->nb_queues);
700 return -EBUSY;
701 }
702
703 if (txa->conf_free)
704 rte_free(txa->conf_arg);
705 rte_free(txa);
706 return 0;
707 }
708
709 static int
txa_service_queue_add(uint8_t id,__rte_unused struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev,int32_t tx_queue_id)710 txa_service_queue_add(uint8_t id,
711 __rte_unused struct rte_eventdev *dev,
712 const struct rte_eth_dev *eth_dev,
713 int32_t tx_queue_id)
714 {
715 struct txa_service_data *txa;
716 struct txa_service_ethdev *tdi;
717 struct txa_service_queue_info *tqi;
718 struct rte_eth_dev_tx_buffer *tb;
719 struct txa_retry *txa_retry;
720 int ret = 0;
721
722 txa = txa_service_id_to_data(id);
723
724 if (tx_queue_id == -1) {
725 int nb_queues;
726 uint16_t i, j;
727 uint16_t *qdone;
728
729 nb_queues = eth_dev->data->nb_tx_queues;
730 if (txa->dev_count > eth_dev->data->port_id) {
731 tdi = &txa->txa_ethdev[eth_dev->data->port_id];
732 nb_queues -= tdi->nb_queues;
733 }
734
735 qdone = rte_zmalloc(txa->mem_name,
736 nb_queues * sizeof(*qdone), 0);
737 if (qdone == NULL)
738 return -ENOMEM;
739 j = 0;
740 for (i = 0; i < nb_queues; i++) {
741 if (txa_service_is_queue_added(txa, eth_dev, i))
742 continue;
743 ret = txa_service_queue_add(id, dev, eth_dev, i);
744 if (ret == 0)
745 qdone[j++] = i;
746 else
747 break;
748 }
749
750 if (i != nb_queues) {
751 for (i = 0; i < j; i++)
752 txa_service_queue_del(id, eth_dev, qdone[i]);
753 }
754 rte_free(qdone);
755 return ret;
756 }
757
758 ret = txa_service_register(txa);
759 if (ret)
760 return ret;
761
762 rte_spinlock_lock(&txa->tx_lock);
763
764 if (txa_service_is_queue_added(txa, eth_dev, tx_queue_id)) {
765 rte_spinlock_unlock(&txa->tx_lock);
766 return 0;
767 }
768
769 ret = txa_service_queue_array_alloc(txa, eth_dev->data->port_id);
770 if (ret)
771 goto err_unlock;
772
773 tb = txa_service_tx_buf_alloc(txa, eth_dev);
774 if (tb == NULL)
775 goto err_unlock;
776
777 tdi = &txa->txa_ethdev[eth_dev->data->port_id];
778 tqi = txa_service_queue(txa, eth_dev->data->port_id, tx_queue_id);
779
780 txa_retry = &tqi->txa_retry;
781 txa_retry->id = txa->id;
782 txa_retry->port_id = eth_dev->data->port_id;
783 txa_retry->tx_queue = tx_queue_id;
784
785 rte_eth_tx_buffer_init(tb, TXA_BATCH_SIZE);
786 rte_eth_tx_buffer_set_err_callback(tb,
787 txa_service_buffer_retry, txa_retry);
788
789 tqi->tx_buf = tb;
790 tqi->added = 1;
791 tdi->nb_queues++;
792 txa->nb_queues++;
793
794 err_unlock:
795 if (txa->nb_queues == 0) {
796 txa_service_queue_array_free(txa,
797 eth_dev->data->port_id);
798 txa_service_unregister(txa);
799 }
800
801 rte_spinlock_unlock(&txa->tx_lock);
802 return 0;
803 }
804
805 static int
txa_service_queue_del(uint8_t id,const struct rte_eth_dev * dev,int32_t tx_queue_id)806 txa_service_queue_del(uint8_t id,
807 const struct rte_eth_dev *dev,
808 int32_t tx_queue_id)
809 {
810 struct txa_service_data *txa;
811 struct txa_service_queue_info *tqi;
812 struct rte_eth_dev_tx_buffer *tb;
813 uint16_t port_id;
814
815 txa = txa_service_id_to_data(id);
816 port_id = dev->data->port_id;
817
818 if (tx_queue_id == -1) {
819 uint16_t i, q, nb_queues;
820 int ret = 0;
821
822 nb_queues = txa->nb_queues;
823 if (nb_queues == 0)
824 return 0;
825
826 i = 0;
827 q = 0;
828 tqi = txa->txa_ethdev[port_id].queues;
829
830 while (i < nb_queues) {
831
832 if (tqi[q].added) {
833 ret = txa_service_queue_del(id, dev, q);
834 if (ret != 0)
835 break;
836 }
837 i++;
838 q++;
839 }
840 return ret;
841 }
842
843 txa = txa_service_id_to_data(id);
844
845 tqi = txa_service_queue(txa, port_id, tx_queue_id);
846 if (tqi == NULL || !tqi->added)
847 return 0;
848
849 tb = tqi->tx_buf;
850 tqi->added = 0;
851 tqi->tx_buf = NULL;
852 rte_free(tb);
853 txa->nb_queues--;
854 txa->txa_ethdev[port_id].nb_queues--;
855
856 txa_service_queue_array_free(txa, port_id);
857 return 0;
858 }
859
860 static int
txa_service_id_get(uint8_t id,uint32_t * service_id)861 txa_service_id_get(uint8_t id, uint32_t *service_id)
862 {
863 struct txa_service_data *txa;
864
865 txa = txa_service_id_to_data(id);
866 if (txa->service_id == TXA_INVALID_SERVICE_ID)
867 return -ESRCH;
868
869 if (service_id == NULL)
870 return -EINVAL;
871
872 *service_id = txa->service_id;
873 return 0;
874 }
875
876 static int
txa_service_start(uint8_t id)877 txa_service_start(uint8_t id)
878 {
879 return txa_service_ctrl(id, 1);
880 }
881
882 static int
txa_service_stats_get(uint8_t id,struct rte_event_eth_tx_adapter_stats * stats)883 txa_service_stats_get(uint8_t id,
884 struct rte_event_eth_tx_adapter_stats *stats)
885 {
886 struct txa_service_data *txa;
887
888 txa = txa_service_id_to_data(id);
889 *stats = txa->stats;
890 return 0;
891 }
892
893 static int
txa_service_stats_reset(uint8_t id)894 txa_service_stats_reset(uint8_t id)
895 {
896 struct txa_service_data *txa;
897
898 txa = txa_service_id_to_data(id);
899 memset(&txa->stats, 0, sizeof(txa->stats));
900 return 0;
901 }
902
903 static int
txa_service_stop(uint8_t id)904 txa_service_stop(uint8_t id)
905 {
906 return txa_service_ctrl(id, 0);
907 }
908
909
910 int
rte_event_eth_tx_adapter_create(uint8_t id,uint8_t dev_id,struct rte_event_port_conf * port_conf)911 rte_event_eth_tx_adapter_create(uint8_t id, uint8_t dev_id,
912 struct rte_event_port_conf *port_conf)
913 {
914 struct rte_eventdev *dev;
915 int ret;
916
917 if (port_conf == NULL)
918 return -EINVAL;
919
920 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
921 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
922
923 dev = &rte_eventdevs[dev_id];
924
925 ret = txa_init();
926 if (ret != 0)
927 return ret;
928
929 if (txa_adapter_exist(id))
930 return -EEXIST;
931
932 txa_dev_id_array[id] = dev_id;
933 if (txa_dev_adapter_create(id))
934 ret = txa_dev_adapter_create(id)(id, dev);
935
936 if (ret != 0) {
937 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
938 return ret;
939 }
940
941 ret = txa_service_adapter_create(id, dev, port_conf);
942 if (ret != 0) {
943 if (txa_dev_adapter_free(id))
944 txa_dev_adapter_free(id)(id, dev);
945 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
946 return ret;
947 }
948 rte_eventdev_trace_eth_tx_adapter_create(id, dev_id, NULL, port_conf,
949 ret);
950 txa_dev_id_array[id] = dev_id;
951 return 0;
952 }
953
954 int
rte_event_eth_tx_adapter_create_ext(uint8_t id,uint8_t dev_id,rte_event_eth_tx_adapter_conf_cb conf_cb,void * conf_arg)955 rte_event_eth_tx_adapter_create_ext(uint8_t id, uint8_t dev_id,
956 rte_event_eth_tx_adapter_conf_cb conf_cb,
957 void *conf_arg)
958 {
959 struct rte_eventdev *dev;
960 int ret;
961
962 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
963 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
964
965 ret = txa_init();
966 if (ret != 0)
967 return ret;
968
969 if (txa_adapter_exist(id))
970 return -EINVAL;
971
972 dev = &rte_eventdevs[dev_id];
973
974 txa_dev_id_array[id] = dev_id;
975 if (txa_dev_adapter_create_ext(id))
976 ret = txa_dev_adapter_create_ext(id)(id, dev);
977
978 if (ret != 0) {
979 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
980 return ret;
981 }
982
983 ret = txa_service_adapter_create_ext(id, dev, conf_cb, conf_arg);
984 if (ret != 0) {
985 if (txa_dev_adapter_free(id))
986 txa_dev_adapter_free(id)(id, dev);
987 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
988 return ret;
989 }
990
991 rte_eventdev_trace_eth_tx_adapter_create(id, dev_id, conf_cb, conf_arg,
992 ret);
993 txa_dev_id_array[id] = dev_id;
994 return 0;
995 }
996
997
998 int
rte_event_eth_tx_adapter_event_port_get(uint8_t id,uint8_t * event_port_id)999 rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1000 {
1001 TXA_CHECK_OR_ERR_RET(id);
1002
1003 return txa_service_event_port_get(id, event_port_id);
1004 }
1005
1006 int
rte_event_eth_tx_adapter_free(uint8_t id)1007 rte_event_eth_tx_adapter_free(uint8_t id)
1008 {
1009 int ret;
1010
1011 TXA_CHECK_OR_ERR_RET(id);
1012
1013 ret = txa_dev_adapter_free(id) ?
1014 txa_dev_adapter_free(id)(id, txa_evdev(id)) :
1015 0;
1016
1017 if (ret == 0)
1018 ret = txa_service_adapter_free(id);
1019 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
1020
1021 rte_eventdev_trace_eth_tx_adapter_free(id, ret);
1022 return ret;
1023 }
1024
1025 int
rte_event_eth_tx_adapter_queue_add(uint8_t id,uint16_t eth_dev_id,int32_t queue)1026 rte_event_eth_tx_adapter_queue_add(uint8_t id,
1027 uint16_t eth_dev_id,
1028 int32_t queue)
1029 {
1030 struct rte_eth_dev *eth_dev;
1031 int ret;
1032 uint32_t caps;
1033
1034 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
1035 TXA_CHECK_OR_ERR_RET(id);
1036
1037 eth_dev = &rte_eth_devices[eth_dev_id];
1038 TXA_CHECK_TXQ(eth_dev, queue);
1039
1040 caps = 0;
1041 if (txa_dev_caps_get(id))
1042 txa_dev_caps_get(id)(txa_evdev(id), eth_dev, &caps);
1043
1044 if (caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)
1045 ret = txa_dev_queue_add(id) ?
1046 txa_dev_queue_add(id)(id,
1047 txa_evdev(id),
1048 eth_dev,
1049 queue) : 0;
1050 else
1051 ret = txa_service_queue_add(id, txa_evdev(id), eth_dev, queue);
1052
1053 rte_eventdev_trace_eth_tx_adapter_queue_add(id, eth_dev_id, queue,
1054 ret);
1055 return ret;
1056 }
1057
1058 int
rte_event_eth_tx_adapter_queue_del(uint8_t id,uint16_t eth_dev_id,int32_t queue)1059 rte_event_eth_tx_adapter_queue_del(uint8_t id,
1060 uint16_t eth_dev_id,
1061 int32_t queue)
1062 {
1063 struct rte_eth_dev *eth_dev;
1064 int ret;
1065 uint32_t caps;
1066
1067 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
1068 TXA_CHECK_OR_ERR_RET(id);
1069
1070 eth_dev = &rte_eth_devices[eth_dev_id];
1071
1072 caps = 0;
1073
1074 if (txa_dev_caps_get(id))
1075 txa_dev_caps_get(id)(txa_evdev(id), eth_dev, &caps);
1076
1077 if (caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)
1078 ret = txa_dev_queue_del(id) ?
1079 txa_dev_queue_del(id)(id, txa_evdev(id),
1080 eth_dev,
1081 queue) : 0;
1082 else
1083 ret = txa_service_queue_del(id, eth_dev, queue);
1084
1085 rte_eventdev_trace_eth_tx_adapter_queue_del(id, eth_dev_id, queue,
1086 ret);
1087 return ret;
1088 }
1089
1090 int
rte_event_eth_tx_adapter_service_id_get(uint8_t id,uint32_t * service_id)1091 rte_event_eth_tx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1092 {
1093 TXA_CHECK_OR_ERR_RET(id);
1094
1095 return txa_service_id_get(id, service_id);
1096 }
1097
1098 int
rte_event_eth_tx_adapter_start(uint8_t id)1099 rte_event_eth_tx_adapter_start(uint8_t id)
1100 {
1101 int ret;
1102
1103 TXA_CHECK_OR_ERR_RET(id);
1104
1105 ret = txa_dev_start(id) ? txa_dev_start(id)(id, txa_evdev(id)) : 0;
1106 if (ret == 0)
1107 ret = txa_service_start(id);
1108 rte_eventdev_trace_eth_tx_adapter_start(id, ret);
1109 return ret;
1110 }
1111
1112 int
rte_event_eth_tx_adapter_stats_get(uint8_t id,struct rte_event_eth_tx_adapter_stats * stats)1113 rte_event_eth_tx_adapter_stats_get(uint8_t id,
1114 struct rte_event_eth_tx_adapter_stats *stats)
1115 {
1116 int ret;
1117
1118 TXA_CHECK_OR_ERR_RET(id);
1119
1120 if (stats == NULL)
1121 return -EINVAL;
1122
1123 *stats = (struct rte_event_eth_tx_adapter_stats){0};
1124
1125 ret = txa_dev_stats_get(id) ?
1126 txa_dev_stats_get(id)(id, txa_evdev(id), stats) : 0;
1127
1128 if (ret == 0 && txa_service_id_get(id, NULL) != ESRCH) {
1129 if (txa_dev_stats_get(id)) {
1130 struct rte_event_eth_tx_adapter_stats service_stats;
1131
1132 ret = txa_service_stats_get(id, &service_stats);
1133 if (ret == 0) {
1134 stats->tx_retry += service_stats.tx_retry;
1135 stats->tx_packets += service_stats.tx_packets;
1136 stats->tx_dropped += service_stats.tx_dropped;
1137 }
1138 } else
1139 ret = txa_service_stats_get(id, stats);
1140 }
1141
1142 return ret;
1143 }
1144
1145 int
rte_event_eth_tx_adapter_stats_reset(uint8_t id)1146 rte_event_eth_tx_adapter_stats_reset(uint8_t id)
1147 {
1148 int ret;
1149
1150 TXA_CHECK_OR_ERR_RET(id);
1151
1152 ret = txa_dev_stats_reset(id) ?
1153 txa_dev_stats_reset(id)(id, txa_evdev(id)) : 0;
1154 if (ret == 0)
1155 ret = txa_service_stats_reset(id);
1156 return ret;
1157 }
1158
1159 int
rte_event_eth_tx_adapter_stop(uint8_t id)1160 rte_event_eth_tx_adapter_stop(uint8_t id)
1161 {
1162 int ret;
1163
1164 TXA_CHECK_OR_ERR_RET(id);
1165
1166 ret = txa_dev_stop(id) ? txa_dev_stop(id)(id, txa_evdev(id)) : 0;
1167 if (ret == 0)
1168 ret = txa_service_stop(id);
1169 rte_eventdev_trace_eth_tx_adapter_stop(id, ret);
1170 return ret;
1171 }
1172