1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
3 */
4 #include <rte_bitmap.h>
5 #include <rte_ethdev.h>
6 #include <rte_eventdev.h>
7 #include <rte_event_eth_rx_adapter.h>
8 #include <rte_event_eth_tx_adapter.h>
9 #include <rte_malloc.h>
10 #include <stdbool.h>
11
12 #include "event_helper.h"
13 #include "ipsec-secgw.h"
14
15 #define DEFAULT_VECTOR_SIZE 16
16 #define DEFAULT_VECTOR_TMO 102400
17
18 static volatile bool eth_core_running;
19
20 static int
eh_get_enabled_cores(struct rte_bitmap * eth_core_mask)21 eh_get_enabled_cores(struct rte_bitmap *eth_core_mask)
22 {
23 int i, count = 0;
24
25 RTE_LCORE_FOREACH(i) {
26 /* Check if this core is enabled in core mask*/
27 if (rte_bitmap_get(eth_core_mask, i)) {
28 /* Found enabled core */
29 count++;
30 }
31 }
32 return count;
33 }
34
35 static inline unsigned int
eh_get_next_eth_core(struct eventmode_conf * em_conf)36 eh_get_next_eth_core(struct eventmode_conf *em_conf)
37 {
38 static unsigned int prev_core = -1;
39 unsigned int next_core;
40
41 /*
42 * Make sure we have at least one eth core running, else the following
43 * logic would lead to an infinite loop.
44 */
45 if (eh_get_enabled_cores(em_conf->eth_core_mask) == 0) {
46 EH_LOG_ERR("No enabled eth core found");
47 return RTE_MAX_LCORE;
48 }
49
50 /* Only some cores are marked as eth cores, skip others */
51 do {
52 /* Get the next core */
53 next_core = rte_get_next_lcore(prev_core, 0, 1);
54
55 /* Check if we have reached max lcores */
56 if (next_core == RTE_MAX_LCORE)
57 return next_core;
58
59 /* Update prev_core */
60 prev_core = next_core;
61 } while (!(rte_bitmap_get(em_conf->eth_core_mask, next_core)));
62
63 return next_core;
64 }
65
66 static inline unsigned int
eh_get_next_active_core(struct eventmode_conf * em_conf,unsigned int prev_core)67 eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core)
68 {
69 unsigned int next_core;
70
71 /* Get next active core skipping cores reserved as eth cores */
72 do {
73 /* Get the next core */
74 next_core = rte_get_next_lcore(prev_core, 0, 0);
75
76 /* Check if we have reached max lcores */
77 if (next_core == RTE_MAX_LCORE)
78 return next_core;
79
80 prev_core = next_core;
81 } while (rte_bitmap_get(em_conf->eth_core_mask, next_core));
82
83 return next_core;
84 }
85
86 static struct eventdev_params *
eh_get_eventdev_params(struct eventmode_conf * em_conf,uint8_t eventdev_id)87 eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id)
88 {
89 int i;
90
91 for (i = 0; i < em_conf->nb_eventdev; i++) {
92 if (em_conf->eventdev_config[i].eventdev_id == eventdev_id)
93 break;
94 }
95
96 /* No match */
97 if (i == em_conf->nb_eventdev)
98 return NULL;
99
100 return &(em_conf->eventdev_config[i]);
101 }
102
103 static inline bool
eh_dev_has_rx_internal_port(uint8_t eventdev_id)104 eh_dev_has_rx_internal_port(uint8_t eventdev_id)
105 {
106 bool flag = true;
107 int j, ret;
108
109 RTE_ETH_FOREACH_DEV(j) {
110 uint32_t caps = 0;
111
112 ret = rte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps);
113 if (ret < 0)
114 return false;
115
116 if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
117 flag = false;
118 }
119 return flag;
120 }
121
122 static inline bool
eh_dev_has_tx_internal_port(uint8_t eventdev_id)123 eh_dev_has_tx_internal_port(uint8_t eventdev_id)
124 {
125 bool flag = true;
126 int j, ret;
127
128 RTE_ETH_FOREACH_DEV(j) {
129 uint32_t caps = 0;
130
131 ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps);
132 if (ret < 0)
133 return false;
134
135 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
136 flag = false;
137 }
138 return flag;
139 }
140
141 static inline bool
eh_dev_has_burst_mode(uint8_t dev_id)142 eh_dev_has_burst_mode(uint8_t dev_id)
143 {
144 struct rte_event_dev_info dev_info;
145
146 rte_event_dev_info_get(dev_id, &dev_info);
147 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
148 true : false;
149 }
150
151 static int
eh_set_default_conf_eventdev(struct eventmode_conf * em_conf)152 eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
153 {
154 int lcore_count, nb_eventdev, nb_eth_dev, ret;
155 struct eventdev_params *eventdev_config;
156 struct rte_event_dev_info dev_info;
157
158 /* Get the number of event devices */
159 nb_eventdev = rte_event_dev_count();
160 if (nb_eventdev == 0) {
161 EH_LOG_ERR("No event devices detected");
162 return -EINVAL;
163 }
164
165 if (nb_eventdev != 1) {
166 EH_LOG_ERR("Event mode does not support multiple event devices. "
167 "Please provide only one event device.");
168 return -EINVAL;
169 }
170
171 /* Get the number of eth devs */
172 nb_eth_dev = rte_eth_dev_count_avail();
173 if (nb_eth_dev == 0) {
174 EH_LOG_ERR("No eth devices detected");
175 return -EINVAL;
176 }
177
178 /* Get the number of lcores */
179 lcore_count = rte_lcore_count();
180
181 /* Read event device info */
182 ret = rte_event_dev_info_get(0, &dev_info);
183 if (ret < 0) {
184 EH_LOG_ERR("Failed to read event device info %d", ret);
185 return ret;
186 }
187
188 /* Check if enough ports are available */
189 if (dev_info.max_event_ports < 2) {
190 EH_LOG_ERR("Not enough event ports available");
191 return -EINVAL;
192 }
193
194 /* Get the first event dev conf */
195 eventdev_config = &(em_conf->eventdev_config[0]);
196
197 /* Save number of queues & ports available */
198 eventdev_config->eventdev_id = 0;
199 eventdev_config->nb_eventqueue = dev_info.max_event_queues;
200 eventdev_config->nb_eventport = dev_info.max_event_ports;
201 eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
202
203 /* Check if there are more queues than required */
204 if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
205 /* One queue is reserved for Tx */
206 eventdev_config->nb_eventqueue = nb_eth_dev + 1;
207 }
208
209 /* Check if there are more ports than required */
210 if (eventdev_config->nb_eventport > lcore_count) {
211 /* One port per lcore is enough */
212 eventdev_config->nb_eventport = lcore_count;
213 }
214
215 /* Update the number of event devices */
216 em_conf->nb_eventdev++;
217
218 return 0;
219 }
220
221 static void
eh_do_capability_check(struct eventmode_conf * em_conf)222 eh_do_capability_check(struct eventmode_conf *em_conf)
223 {
224 struct eventdev_params *eventdev_config;
225 int all_internal_ports = 1;
226 uint32_t eventdev_id;
227 int i;
228
229 for (i = 0; i < em_conf->nb_eventdev; i++) {
230
231 /* Get the event dev conf */
232 eventdev_config = &(em_conf->eventdev_config[i]);
233 eventdev_id = eventdev_config->eventdev_id;
234
235 /* Check if event device has internal port for Rx & Tx */
236 if (eh_dev_has_rx_internal_port(eventdev_id) &&
237 eh_dev_has_tx_internal_port(eventdev_id)) {
238 eventdev_config->all_internal_ports = 1;
239 } else {
240 all_internal_ports = 0;
241 }
242 }
243
244 /*
245 * If Rx & Tx internal ports are supported by all event devices then
246 * eth cores won't be required. Override the eth core mask requested
247 * and decrement number of event queues by one as it won't be needed
248 * for Tx.
249 */
250 if (all_internal_ports) {
251 rte_bitmap_reset(em_conf->eth_core_mask);
252 for (i = 0; i < em_conf->nb_eventdev; i++)
253 em_conf->eventdev_config[i].nb_eventqueue--;
254 }
255 }
256
257 static int
eh_set_default_conf_link(struct eventmode_conf * em_conf)258 eh_set_default_conf_link(struct eventmode_conf *em_conf)
259 {
260 struct eventdev_params *eventdev_config;
261 struct eh_event_link_info *link;
262 unsigned int lcore_id = -1;
263 int i, link_index;
264
265 /*
266 * Create a 1:1 mapping from event ports to cores. If the number
267 * of event ports is lesser than the cores, some cores won't
268 * execute worker. If there are more event ports, then some ports
269 * won't be used.
270 *
271 */
272
273 /*
274 * The event queue-port mapping is done according to the link. Since
275 * we are falling back to the default link config, enabling
276 * "all_ev_queue_to_ev_port" mode flag. This will map all queues
277 * to the port.
278 */
279 em_conf->ext_params.all_ev_queue_to_ev_port = 1;
280
281 /* Get first event dev conf */
282 eventdev_config = &(em_conf->eventdev_config[0]);
283
284 /* Loop through the ports */
285 for (i = 0; i < eventdev_config->nb_eventport; i++) {
286
287 /* Get next active core id */
288 lcore_id = eh_get_next_active_core(em_conf,
289 lcore_id);
290
291 if (lcore_id == RTE_MAX_LCORE) {
292 /* Reached max cores */
293 return 0;
294 }
295
296 /* Save the current combination as one link */
297
298 /* Get the index */
299 link_index = em_conf->nb_link;
300
301 /* Get the corresponding link */
302 link = &(em_conf->link[link_index]);
303
304 /* Save link */
305 link->eventdev_id = eventdev_config->eventdev_id;
306 link->event_port_id = i;
307 link->lcore_id = lcore_id;
308
309 /*
310 * Don't set eventq_id as by default all queues
311 * need to be mapped to the port, which is controlled
312 * by the operating mode.
313 */
314
315 /* Update number of links */
316 em_conf->nb_link++;
317 }
318
319 return 0;
320 }
321
322 static int
eh_set_default_conf_rx_adapter(struct eventmode_conf * em_conf)323 eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
324 {
325 struct rx_adapter_connection_info *conn;
326 struct eventdev_params *eventdev_config;
327 struct rx_adapter_conf *adapter;
328 bool rx_internal_port = true;
329 bool single_ev_queue = false;
330 int nb_eventqueue;
331 uint32_t caps = 0;
332 int eventdev_id;
333 int nb_eth_dev;
334 int adapter_id;
335 int conn_id;
336 int ret;
337 int i;
338
339 /* Create one adapter with eth queues mapped to event queue(s) */
340
341 if (em_conf->nb_eventdev == 0) {
342 EH_LOG_ERR("No event devs registered");
343 return -EINVAL;
344 }
345
346 /* Get the number of eth devs */
347 nb_eth_dev = rte_eth_dev_count_avail();
348
349 /* Use the first event dev */
350 eventdev_config = &(em_conf->eventdev_config[0]);
351
352 /* Get eventdev ID */
353 eventdev_id = eventdev_config->eventdev_id;
354 adapter_id = 0;
355
356 /* Get adapter conf */
357 adapter = &(em_conf->rx_adapter[adapter_id]);
358
359 /* Set adapter conf */
360 adapter->eventdev_id = eventdev_id;
361 adapter->adapter_id = adapter_id;
362
363 /*
364 * If event device does not have internal ports for passing
365 * packets then reserved one queue for Tx path
366 */
367 nb_eventqueue = eventdev_config->all_internal_ports ?
368 eventdev_config->nb_eventqueue :
369 eventdev_config->nb_eventqueue - 1;
370
371 /*
372 * Map all queues of eth device (port) to an event queue. If there
373 * are more event queues than eth ports then create 1:1 mapping.
374 * Otherwise map all eth ports to a single event queue.
375 */
376 if (nb_eth_dev > nb_eventqueue)
377 single_ev_queue = true;
378
379 for (i = 0; i < nb_eth_dev; i++) {
380
381 /* Use only the ports enabled */
382 if ((em_conf->eth_portmask & (1 << i)) == 0)
383 continue;
384
385 /* Get the connection id */
386 conn_id = adapter->nb_connections;
387
388 /* Get the connection */
389 conn = &(adapter->conn[conn_id]);
390
391 /* Set mapping between eth ports & event queues*/
392 conn->ethdev_id = i;
393 conn->eventq_id = single_ev_queue ? 0 : i;
394
395 /* Add all eth queues eth port to event queue */
396 conn->ethdev_rx_qid = -1;
397
398 /* Get Rx adapter capabilities */
399 ret = rte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps);
400 if (ret < 0) {
401 EH_LOG_ERR("Failed to get event device %d eth rx adapter"
402 " capabilities for port %d", eventdev_id, i);
403 return ret;
404 }
405 if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
406 rx_internal_port = false;
407
408 /* Update no of connections */
409 adapter->nb_connections++;
410
411 }
412
413 if (rx_internal_port) {
414 /* Rx core is not required */
415 adapter->rx_core_id = -1;
416 } else {
417 /* Rx core is required */
418 adapter->rx_core_id = eh_get_next_eth_core(em_conf);
419 }
420
421 /* We have setup one adapter */
422 em_conf->nb_rx_adapter = 1;
423
424 return 0;
425 }
426
427 static int
eh_set_default_conf_tx_adapter(struct eventmode_conf * em_conf)428 eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
429 {
430 struct tx_adapter_connection_info *conn;
431 struct eventdev_params *eventdev_config;
432 struct tx_adapter_conf *tx_adapter;
433 bool tx_internal_port = true;
434 uint32_t caps = 0;
435 int eventdev_id;
436 int adapter_id;
437 int nb_eth_dev;
438 int conn_id;
439 int ret;
440 int i;
441
442 /*
443 * Create one Tx adapter with all eth queues mapped to event queues
444 * 1:1.
445 */
446
447 if (em_conf->nb_eventdev == 0) {
448 EH_LOG_ERR("No event devs registered");
449 return -EINVAL;
450 }
451
452 /* Get the number of eth devs */
453 nb_eth_dev = rte_eth_dev_count_avail();
454
455 /* Use the first event dev */
456 eventdev_config = &(em_conf->eventdev_config[0]);
457
458 /* Get eventdev ID */
459 eventdev_id = eventdev_config->eventdev_id;
460 adapter_id = 0;
461
462 /* Get adapter conf */
463 tx_adapter = &(em_conf->tx_adapter[adapter_id]);
464
465 /* Set adapter conf */
466 tx_adapter->eventdev_id = eventdev_id;
467 tx_adapter->adapter_id = adapter_id;
468
469 /*
470 * Map all Tx queues of the eth device (port) to the event device.
471 */
472
473 /* Set defaults for connections */
474
475 /*
476 * One eth device (port) is one connection. Map all Tx queues
477 * of the device to the Tx adapter.
478 */
479
480 for (i = 0; i < nb_eth_dev; i++) {
481
482 /* Use only the ports enabled */
483 if ((em_conf->eth_portmask & (1 << i)) == 0)
484 continue;
485
486 /* Get the connection id */
487 conn_id = tx_adapter->nb_connections;
488
489 /* Get the connection */
490 conn = &(tx_adapter->conn[conn_id]);
491
492 /* Add ethdev to connections */
493 conn->ethdev_id = i;
494
495 /* Add all eth tx queues to adapter */
496 conn->ethdev_tx_qid = -1;
497
498 /* Get Tx adapter capabilities */
499 ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);
500 if (ret < 0) {
501 EH_LOG_ERR("Failed to get event device %d eth tx adapter"
502 " capabilities for port %d", eventdev_id, i);
503 return ret;
504 }
505 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
506 tx_internal_port = false;
507
508 /* Update no of connections */
509 tx_adapter->nb_connections++;
510 }
511
512 if (tx_internal_port) {
513 /* Tx core is not required */
514 tx_adapter->tx_core_id = -1;
515 } else {
516 /* Tx core is required */
517 tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
518
519 /*
520 * Use one event queue per adapter for submitting packets
521 * for Tx. Reserving the last queue available
522 */
523 /* Queue numbers start at 0 */
524 tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
525 }
526
527 /* We have setup one adapter */
528 em_conf->nb_tx_adapter = 1;
529 return 0;
530 }
531
532 static int
eh_validate_conf(struct eventmode_conf * em_conf)533 eh_validate_conf(struct eventmode_conf *em_conf)
534 {
535 int ret;
536
537 /*
538 * Check if event devs are specified. Else probe the event devices
539 * and initialize the config with all ports & queues available
540 */
541 if (em_conf->nb_eventdev == 0) {
542 ret = eh_set_default_conf_eventdev(em_conf);
543 if (ret != 0)
544 return ret;
545 }
546
547 /* Perform capability check for the selected event devices */
548 eh_do_capability_check(em_conf);
549
550 /*
551 * Check if links are specified. Else generate a default config for
552 * the event ports used.
553 */
554 if (em_conf->nb_link == 0) {
555 ret = eh_set_default_conf_link(em_conf);
556 if (ret != 0)
557 return ret;
558 }
559
560 /*
561 * Check if rx adapters are specified. Else generate a default config
562 * with one rx adapter and all eth queues - event queue mapped.
563 */
564 if (em_conf->nb_rx_adapter == 0) {
565 ret = eh_set_default_conf_rx_adapter(em_conf);
566 if (ret != 0)
567 return ret;
568 }
569
570 /*
571 * Check if tx adapters are specified. Else generate a default config
572 * with one tx adapter.
573 */
574 if (em_conf->nb_tx_adapter == 0) {
575 ret = eh_set_default_conf_tx_adapter(em_conf);
576 if (ret != 0)
577 return ret;
578 }
579
580 return 0;
581 }
582
583 static int
eh_initialize_eventdev(struct eventmode_conf * em_conf)584 eh_initialize_eventdev(struct eventmode_conf *em_conf)
585 {
586 struct rte_event_queue_conf eventq_conf = {0};
587 struct rte_event_dev_info evdev_default_conf;
588 struct rte_event_dev_config eventdev_conf;
589 struct eventdev_params *eventdev_config;
590 int nb_eventdev = em_conf->nb_eventdev;
591 struct eh_event_link_info *link;
592 uint8_t *queue = NULL;
593 uint8_t eventdev_id;
594 int nb_eventqueue;
595 uint8_t i, j;
596 int ret;
597
598 for (i = 0; i < nb_eventdev; i++) {
599
600 /* Get eventdev config */
601 eventdev_config = &(em_conf->eventdev_config[i]);
602
603 /* Get event dev ID */
604 eventdev_id = eventdev_config->eventdev_id;
605
606 /* Get the number of queues */
607 nb_eventqueue = eventdev_config->nb_eventqueue;
608
609 /* Reset the default conf */
610 memset(&evdev_default_conf, 0,
611 sizeof(struct rte_event_dev_info));
612
613 /* Get default conf of eventdev */
614 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
615 if (ret < 0) {
616 EH_LOG_ERR(
617 "Error in getting event device info[devID:%d]",
618 eventdev_id);
619 return ret;
620 }
621
622 memset(&eventdev_conf, 0, sizeof(struct rte_event_dev_config));
623 eventdev_conf.nb_events_limit =
624 evdev_default_conf.max_num_events;
625 eventdev_conf.nb_event_queues = nb_eventqueue;
626 eventdev_conf.nb_event_ports =
627 eventdev_config->nb_eventport;
628 eventdev_conf.nb_event_queue_flows =
629 evdev_default_conf.max_event_queue_flows;
630 eventdev_conf.nb_event_port_dequeue_depth =
631 evdev_default_conf.max_event_port_dequeue_depth;
632 eventdev_conf.nb_event_port_enqueue_depth =
633 evdev_default_conf.max_event_port_enqueue_depth;
634
635 /* Configure event device */
636 ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
637 if (ret < 0) {
638 EH_LOG_ERR("Error in configuring event device");
639 return ret;
640 }
641
642 /* Configure event queues */
643 for (j = 0; j < nb_eventqueue; j++) {
644
645 memset(&eventq_conf, 0,
646 sizeof(struct rte_event_queue_conf));
647
648 /* Per event dev queues can be ATQ or SINGLE LINK */
649 eventq_conf.event_queue_cfg =
650 eventdev_config->ev_queue_mode;
651 /*
652 * All queues need to be set with sched_type as
653 * schedule type for the application stage. One
654 * queue would be reserved for the final eth tx
655 * stage if event device does not have internal
656 * ports. This will be an atomic queue.
657 */
658 if (!eventdev_config->all_internal_ports &&
659 j == nb_eventqueue-1) {
660 eventq_conf.schedule_type =
661 RTE_SCHED_TYPE_ATOMIC;
662 } else {
663 eventq_conf.schedule_type =
664 em_conf->ext_params.sched_type;
665 }
666
667 /* Set max atomic flows to 1024 */
668 eventq_conf.nb_atomic_flows = 1024;
669 eventq_conf.nb_atomic_order_sequences = 1024;
670
671 /* Setup the queue */
672 ret = rte_event_queue_setup(eventdev_id, j,
673 &eventq_conf);
674 if (ret < 0) {
675 EH_LOG_ERR("Failed to setup event queue %d",
676 ret);
677 return ret;
678 }
679 }
680
681 /* Configure event ports */
682 for (j = 0; j < eventdev_config->nb_eventport; j++) {
683 ret = rte_event_port_setup(eventdev_id, j, NULL);
684 if (ret < 0) {
685 EH_LOG_ERR("Failed to setup event port %d",
686 ret);
687 return ret;
688 }
689 }
690 }
691
692 /* Make event queue - event port link */
693 for (j = 0; j < em_conf->nb_link; j++) {
694
695 /* Get link info */
696 link = &(em_conf->link[j]);
697
698 /* Get event dev ID */
699 eventdev_id = link->eventdev_id;
700
701 /*
702 * If "all_ev_queue_to_ev_port" params flag is selected, all
703 * queues need to be mapped to the port.
704 */
705 if (em_conf->ext_params.all_ev_queue_to_ev_port)
706 queue = NULL;
707 else
708 queue = &(link->eventq_id);
709
710 /* Link queue to port */
711 ret = rte_event_port_link(eventdev_id, link->event_port_id,
712 queue, NULL, 1);
713 if (ret < 0) {
714 EH_LOG_ERR("Failed to link event port %d", ret);
715 return ret;
716 }
717 }
718
719 return 0;
720 }
721
722 static int
eh_start_eventdev(struct eventmode_conf * em_conf)723 eh_start_eventdev(struct eventmode_conf *em_conf)
724 {
725 struct eventdev_params *eventdev_config;
726 int nb_eventdev = em_conf->nb_eventdev;
727 int i, ret;
728
729 /* Start event devices */
730 for (i = 0; i < nb_eventdev; i++) {
731
732 /* Get eventdev config */
733 eventdev_config = &(em_conf->eventdev_config[i]);
734
735 ret = rte_event_dev_start(eventdev_config->eventdev_id);
736 if (ret < 0) {
737 EH_LOG_ERR("Failed to start event device %d, %d",
738 i, ret);
739 return ret;
740 }
741 }
742 return 0;
743 }
744
745 static int
eh_event_vector_limits_validate(struct eventmode_conf * em_conf,uint8_t ev_dev_id,uint8_t ethdev_id)746 eh_event_vector_limits_validate(struct eventmode_conf *em_conf,
747 uint8_t ev_dev_id, uint8_t ethdev_id)
748 {
749 struct rte_event_eth_rx_adapter_vector_limits limits = {0};
750 uint16_t vector_size = em_conf->ext_params.vector_size;
751 int ret;
752
753 ret = rte_event_eth_rx_adapter_vector_limits_get(ev_dev_id, ethdev_id,
754 &limits);
755 if (ret) {
756 EH_LOG_ERR("failed to get vector limits");
757 return ret;
758 }
759
760 if (vector_size < limits.min_sz || vector_size > limits.max_sz) {
761 EH_LOG_ERR("Vector size [%d] not within limits min[%d] max[%d]",
762 vector_size, limits.min_sz, limits.max_sz);
763 return -EINVAL;
764 }
765
766 if (limits.log2_sz && !rte_is_power_of_2(vector_size)) {
767 EH_LOG_ERR("Vector size [%d] not power of 2", vector_size);
768 return -EINVAL;
769 }
770
771 if (em_conf->vector_tmo_ns > limits.max_timeout_ns ||
772 em_conf->vector_tmo_ns < limits.min_timeout_ns) {
773 EH_LOG_ERR("Vector timeout [%" PRIu64
774 "] not within limits max[%" PRIu64
775 "] min[%" PRIu64 "]",
776 em_conf->vector_tmo_ns,
777 limits.max_timeout_ns,
778 limits.min_timeout_ns);
779 return -EINVAL;
780 }
781 return 0;
782 }
783
784 static int
eh_rx_adapter_configure(struct eventmode_conf * em_conf,struct rx_adapter_conf * adapter)785 eh_rx_adapter_configure(struct eventmode_conf *em_conf,
786 struct rx_adapter_conf *adapter)
787 {
788 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
789 struct rte_event_dev_info evdev_default_conf = {0};
790 struct rte_event_port_conf port_conf = {0};
791 struct rx_adapter_connection_info *conn;
792 uint32_t service_id, socket_id, nb_elem;
793 struct rte_mempool *vector_pool = NULL;
794 uint32_t lcore_id = rte_lcore_id();
795 int ret, portid, nb_ports = 0;
796 uint8_t eventdev_id;
797 int j;
798
799 /* Get event dev ID */
800 eventdev_id = adapter->eventdev_id;
801
802 /* Get default configuration of event dev */
803 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
804 if (ret < 0) {
805 EH_LOG_ERR("Failed to get event dev info %d", ret);
806 return ret;
807 }
808
809 RTE_ETH_FOREACH_DEV(portid)
810 if ((em_conf->eth_portmask & (1 << portid)))
811 nb_ports++;
812
813 if (em_conf->ext_params.event_vector) {
814 socket_id = rte_lcore_to_socket_id(lcore_id);
815
816 if (em_conf->vector_pool_sz) {
817 nb_elem = em_conf->vector_pool_sz;
818 } else {
819 nb_elem = (nb_bufs_in_pool /
820 em_conf->ext_params.vector_size) + 1;
821 if (per_port_pool)
822 nb_elem = nb_ports * nb_elem;
823 }
824
825 vector_pool = rte_event_vector_pool_create(
826 "vector_pool", nb_elem, 0,
827 em_conf->ext_params.vector_size,
828 socket_id);
829 if (vector_pool == NULL) {
830 EH_LOG_ERR("failed to create event vector pool");
831 return -ENOMEM;
832 }
833 }
834 /* Setup port conf */
835 port_conf.new_event_threshold = 1200;
836 port_conf.dequeue_depth =
837 evdev_default_conf.max_event_port_dequeue_depth;
838 port_conf.enqueue_depth =
839 evdev_default_conf.max_event_port_enqueue_depth;
840
841 /* Create Rx adapter */
842 ret = rte_event_eth_rx_adapter_create(adapter->adapter_id,
843 adapter->eventdev_id, &port_conf);
844 if (ret < 0) {
845 EH_LOG_ERR("Failed to create rx adapter %d", ret);
846 return ret;
847 }
848
849 /* Setup various connections in the adapter */
850 for (j = 0; j < adapter->nb_connections; j++) {
851 /* Get connection */
852 conn = &(adapter->conn[j]);
853
854 /* Setup queue conf */
855 queue_conf.ev.queue_id = conn->eventq_id;
856 queue_conf.ev.sched_type = em_conf->ext_params.sched_type;
857 queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
858
859 if (em_conf->ext_params.event_vector) {
860 ret = eh_event_vector_limits_validate(em_conf,
861 eventdev_id,
862 conn->ethdev_id);
863 if (ret)
864 return ret;
865
866 queue_conf.vector_sz = em_conf->ext_params.vector_size;
867 queue_conf.vector_timeout_ns = em_conf->vector_tmo_ns;
868 queue_conf.vector_mp = vector_pool;
869 queue_conf.rx_queue_flags =
870 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
871 }
872
873 /* Add queue to the adapter */
874 ret = rte_event_eth_rx_adapter_queue_add(adapter->adapter_id,
875 conn->ethdev_id, conn->ethdev_rx_qid,
876 &queue_conf);
877 if (ret < 0) {
878 EH_LOG_ERR("Failed to add eth queue to rx adapter %d",
879 ret);
880 return ret;
881 }
882 }
883
884 /* Get the service ID used by rx adapter */
885 ret = rte_event_eth_rx_adapter_service_id_get(adapter->adapter_id,
886 &service_id);
887 if (ret != -ESRCH && ret < 0) {
888 EH_LOG_ERR("Failed to get service id used by rx adapter %d",
889 ret);
890 return ret;
891 }
892
893 rte_service_set_runstate_mapped_check(service_id, 0);
894
895 /* Start adapter */
896 ret = rte_event_eth_rx_adapter_start(adapter->adapter_id);
897 if (ret < 0) {
898 EH_LOG_ERR("Failed to start rx adapter %d", ret);
899 return ret;
900 }
901
902 return 0;
903 }
904
905 static int
eh_initialize_rx_adapter(struct eventmode_conf * em_conf)906 eh_initialize_rx_adapter(struct eventmode_conf *em_conf)
907 {
908 struct rx_adapter_conf *adapter;
909 int i, ret;
910
911 /* Configure rx adapters */
912 for (i = 0; i < em_conf->nb_rx_adapter; i++) {
913 adapter = &(em_conf->rx_adapter[i]);
914 ret = eh_rx_adapter_configure(em_conf, adapter);
915 if (ret < 0) {
916 EH_LOG_ERR("Failed to configure rx adapter %d", ret);
917 return ret;
918 }
919 }
920 return 0;
921 }
922
923 static int32_t
eh_start_worker_eth_core(struct eventmode_conf * conf,uint32_t lcore_id)924 eh_start_worker_eth_core(struct eventmode_conf *conf, uint32_t lcore_id)
925 {
926 uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE];
927 struct rx_adapter_conf *rx_adapter;
928 struct tx_adapter_conf *tx_adapter;
929 int service_count = 0;
930 int adapter_id;
931 int32_t ret;
932 int i;
933
934 EH_LOG_INFO("Entering eth_core processing on lcore %u", lcore_id);
935
936 /*
937 * Parse adapter config to check which of all Rx adapters need
938 * to be handled by this core.
939 */
940 for (i = 0; i < conf->nb_rx_adapter; i++) {
941 /* Check if we have exceeded the max allowed */
942 if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) {
943 EH_LOG_ERR(
944 "Exceeded the max allowed adapters per rx core");
945 break;
946 }
947
948 rx_adapter = &(conf->rx_adapter[i]);
949 if (rx_adapter->rx_core_id != lcore_id)
950 continue;
951
952 /* Adapter is handled by this core */
953 adapter_id = rx_adapter->adapter_id;
954
955 /* Get the service ID for the adapters */
956 ret = rte_event_eth_rx_adapter_service_id_get(adapter_id,
957 &(service_id[service_count]));
958
959 if (ret != -ESRCH && ret < 0) {
960 EH_LOG_ERR(
961 "Failed to get service id used by rx adapter");
962 return ret;
963 }
964
965 /* Update service count */
966 service_count++;
967 }
968
969 /*
970 * Parse adapter config to see which of all Tx adapters need
971 * to be handled by this core.
972 */
973 for (i = 0; i < conf->nb_tx_adapter; i++) {
974 /* Check if we have exceeded the max allowed */
975 if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE) {
976 EH_LOG_ERR(
977 "Exceeded the max allowed adapters per tx core");
978 break;
979 }
980
981 tx_adapter = &conf->tx_adapter[i];
982 if (tx_adapter->tx_core_id != lcore_id)
983 continue;
984
985 /* Adapter is handled by this core */
986 adapter_id = tx_adapter->adapter_id;
987
988 /* Get the service ID for the adapters */
989 ret = rte_event_eth_tx_adapter_service_id_get(adapter_id,
990 &(service_id[service_count]));
991
992 if (ret != -ESRCH && ret < 0) {
993 EH_LOG_ERR(
994 "Failed to get service id used by tx adapter");
995 return ret;
996 }
997
998 /* Update service count */
999 service_count++;
1000 }
1001
1002 eth_core_running = true;
1003
1004 while (eth_core_running) {
1005 for (i = 0; i < service_count; i++) {
1006 /* Initiate adapter service */
1007 rte_service_run_iter_on_app_lcore(service_id[i], 0);
1008 }
1009 }
1010
1011 return 0;
1012 }
1013
1014 static int32_t
eh_stop_worker_eth_core(void)1015 eh_stop_worker_eth_core(void)
1016 {
1017 if (eth_core_running) {
1018 EH_LOG_INFO("Stopping eth cores");
1019 eth_core_running = false;
1020 }
1021 return 0;
1022 }
1023
1024 static struct eh_app_worker_params *
eh_find_worker(uint32_t lcore_id,struct eh_conf * conf,struct eh_app_worker_params * app_wrkrs,uint8_t nb_wrkr_param)1025 eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,
1026 struct eh_app_worker_params *app_wrkrs, uint8_t nb_wrkr_param)
1027 {
1028 struct eh_app_worker_params curr_conf = { {{0} }, NULL};
1029 struct eh_event_link_info *link = NULL;
1030 struct eh_app_worker_params *tmp_wrkr;
1031 struct eventmode_conf *em_conf;
1032 uint8_t eventdev_id;
1033 int i;
1034
1035 /* Get eventmode config */
1036 em_conf = conf->mode_params;
1037
1038 /*
1039 * Use event device from the first lcore-event link.
1040 *
1041 * Assumption: All lcore-event links tied to a core are using the
1042 * same event device. In other words, one core would be polling on
1043 * queues of a single event device only.
1044 */
1045
1046 /* Get a link for this lcore */
1047 for (i = 0; i < em_conf->nb_link; i++) {
1048 link = &(em_conf->link[i]);
1049 if (link->lcore_id == lcore_id)
1050 break;
1051 }
1052
1053 if (link == NULL) {
1054 EH_LOG_ERR("No valid link found for lcore %d", lcore_id);
1055 return NULL;
1056 }
1057
1058 /* Get event dev ID */
1059 eventdev_id = link->eventdev_id;
1060
1061 /* Populate the curr_conf with the capabilities */
1062
1063 /* Check for Tx internal port */
1064 if (eh_dev_has_tx_internal_port(eventdev_id))
1065 curr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1066 else
1067 curr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT;
1068
1069 /* Check for burst mode */
1070 if (eh_dev_has_burst_mode(eventdev_id))
1071 curr_conf.cap.burst = EH_RX_TYPE_BURST;
1072 else
1073 curr_conf.cap.burst = EH_RX_TYPE_NON_BURST;
1074
1075 curr_conf.cap.ipsec_mode = conf->ipsec_mode;
1076
1077 /* Parse the passed list and see if we have matching capabilities */
1078
1079 /* Initialize the pointer used to traverse the list */
1080 tmp_wrkr = app_wrkrs;
1081
1082 for (i = 0; i < nb_wrkr_param; i++, tmp_wrkr++) {
1083
1084 /* Skip this if capabilities are not matching */
1085 if (tmp_wrkr->cap.u64 != curr_conf.cap.u64)
1086 continue;
1087
1088 /* If the checks pass, we have a match */
1089 return tmp_wrkr;
1090 }
1091
1092 return NULL;
1093 }
1094
1095 static int
eh_verify_match_worker(struct eh_app_worker_params * match_wrkr)1096 eh_verify_match_worker(struct eh_app_worker_params *match_wrkr)
1097 {
1098 /* Verify registered worker */
1099 if (match_wrkr->worker_thread == NULL) {
1100 EH_LOG_ERR("No worker registered");
1101 return 0;
1102 }
1103
1104 /* Success */
1105 return 1;
1106 }
1107
1108 static uint8_t
eh_get_event_lcore_links(uint32_t lcore_id,struct eh_conf * conf,struct eh_event_link_info ** links)1109 eh_get_event_lcore_links(uint32_t lcore_id, struct eh_conf *conf,
1110 struct eh_event_link_info **links)
1111 {
1112 struct eh_event_link_info *link_cache;
1113 struct eventmode_conf *em_conf = NULL;
1114 struct eh_event_link_info *link;
1115 uint8_t lcore_nb_link = 0;
1116 size_t single_link_size;
1117 size_t cache_size;
1118 int index = 0;
1119 int i;
1120
1121 if (conf == NULL || links == NULL) {
1122 EH_LOG_ERR("Invalid args");
1123 return -EINVAL;
1124 }
1125
1126 /* Get eventmode conf */
1127 em_conf = conf->mode_params;
1128
1129 if (em_conf == NULL) {
1130 EH_LOG_ERR("Invalid event mode parameters");
1131 return -EINVAL;
1132 }
1133
1134 /* Get the number of links registered */
1135 for (i = 0; i < em_conf->nb_link; i++) {
1136
1137 /* Get link */
1138 link = &(em_conf->link[i]);
1139
1140 /* Check if we have link intended for this lcore */
1141 if (link->lcore_id == lcore_id) {
1142
1143 /* Update the number of links for this core */
1144 lcore_nb_link++;
1145
1146 }
1147 }
1148
1149 /* Compute size of one entry to be copied */
1150 single_link_size = sizeof(struct eh_event_link_info);
1151
1152 /* Compute size of the buffer required */
1153 cache_size = lcore_nb_link * sizeof(struct eh_event_link_info);
1154
1155 /* Compute size of the buffer required */
1156 link_cache = calloc(1, cache_size);
1157
1158 /* Get the number of links registered */
1159 for (i = 0; i < em_conf->nb_link; i++) {
1160
1161 /* Get link */
1162 link = &(em_conf->link[i]);
1163
1164 /* Check if we have link intended for this lcore */
1165 if (link->lcore_id == lcore_id) {
1166
1167 /* Cache the link */
1168 memcpy(&link_cache[index], link, single_link_size);
1169
1170 /* Update index */
1171 index++;
1172 }
1173 }
1174
1175 /* Update the links for application to use the cached links */
1176 *links = link_cache;
1177
1178 /* Return the number of cached links */
1179 return lcore_nb_link;
1180 }
1181
1182 static int
eh_tx_adapter_configure(struct eventmode_conf * em_conf,struct tx_adapter_conf * adapter)1183 eh_tx_adapter_configure(struct eventmode_conf *em_conf,
1184 struct tx_adapter_conf *adapter)
1185 {
1186 struct rte_event_dev_info evdev_default_conf = {0};
1187 struct rte_event_port_conf port_conf = {0};
1188 struct tx_adapter_connection_info *conn;
1189 struct eventdev_params *eventdev_config;
1190 uint8_t tx_port_id = 0;
1191 uint8_t eventdev_id;
1192 uint32_t service_id;
1193 int ret, j;
1194
1195 /* Get event dev ID */
1196 eventdev_id = adapter->eventdev_id;
1197
1198 /* Get event device conf */
1199 eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
1200
1201 /* Create Tx adapter */
1202
1203 /* Get default configuration of event dev */
1204 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
1205 if (ret < 0) {
1206 EH_LOG_ERR("Failed to get event dev info %d", ret);
1207 return ret;
1208 }
1209
1210 /* Setup port conf */
1211 port_conf.new_event_threshold =
1212 evdev_default_conf.max_num_events;
1213 port_conf.dequeue_depth =
1214 evdev_default_conf.max_event_port_dequeue_depth;
1215 port_conf.enqueue_depth =
1216 evdev_default_conf.max_event_port_enqueue_depth;
1217
1218 /* Create adapter */
1219 ret = rte_event_eth_tx_adapter_create(adapter->adapter_id,
1220 adapter->eventdev_id, &port_conf);
1221 if (ret < 0) {
1222 EH_LOG_ERR("Failed to create tx adapter %d", ret);
1223 return ret;
1224 }
1225
1226 /* Setup various connections in the adapter */
1227 for (j = 0; j < adapter->nb_connections; j++) {
1228
1229 /* Get connection */
1230 conn = &(adapter->conn[j]);
1231
1232 /* Add queue to the adapter */
1233 ret = rte_event_eth_tx_adapter_queue_add(adapter->adapter_id,
1234 conn->ethdev_id, conn->ethdev_tx_qid);
1235 if (ret < 0) {
1236 EH_LOG_ERR("Failed to add eth queue to tx adapter %d",
1237 ret);
1238 return ret;
1239 }
1240 }
1241
1242 /*
1243 * Check if Tx core is assigned. If Tx core is not assigned then
1244 * the adapter has internal port for submitting Tx packets and
1245 * Tx event queue & port setup is not required
1246 */
1247 if (adapter->tx_core_id == (uint32_t) (-1)) {
1248 /* Internal port is present */
1249 goto skip_tx_queue_port_setup;
1250 }
1251
1252 /* Setup Tx queue & port */
1253
1254 /* Get event port used by the adapter */
1255 ret = rte_event_eth_tx_adapter_event_port_get(
1256 adapter->adapter_id, &tx_port_id);
1257 if (ret) {
1258 EH_LOG_ERR("Failed to get tx adapter port id %d", ret);
1259 return ret;
1260 }
1261
1262 /*
1263 * Tx event queue is reserved for Tx adapter. Unlink this queue
1264 * from all other ports
1265 *
1266 */
1267 for (j = 0; j < eventdev_config->nb_eventport; j++) {
1268 rte_event_port_unlink(eventdev_id, j,
1269 &(adapter->tx_ev_queue), 1);
1270 }
1271
1272 /* Link Tx event queue to Tx port */
1273 ret = rte_event_port_link(eventdev_id, tx_port_id,
1274 &(adapter->tx_ev_queue), NULL, 1);
1275 if (ret != 1) {
1276 EH_LOG_ERR("Failed to link event queue to port");
1277 return ret;
1278 }
1279
1280 /* Get the service ID used by Tx adapter */
1281 ret = rte_event_eth_tx_adapter_service_id_get(adapter->adapter_id,
1282 &service_id);
1283 if (ret != -ESRCH && ret < 0) {
1284 EH_LOG_ERR("Failed to get service id used by tx adapter %d",
1285 ret);
1286 return ret;
1287 }
1288
1289 rte_service_set_runstate_mapped_check(service_id, 0);
1290
1291 skip_tx_queue_port_setup:
1292 /* Start adapter */
1293 ret = rte_event_eth_tx_adapter_start(adapter->adapter_id);
1294 if (ret < 0) {
1295 EH_LOG_ERR("Failed to start tx adapter %d", ret);
1296 return ret;
1297 }
1298
1299 return 0;
1300 }
1301
1302 static int
eh_initialize_tx_adapter(struct eventmode_conf * em_conf)1303 eh_initialize_tx_adapter(struct eventmode_conf *em_conf)
1304 {
1305 struct tx_adapter_conf *adapter;
1306 int i, ret;
1307
1308 /* Configure Tx adapters */
1309 for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1310 adapter = &(em_conf->tx_adapter[i]);
1311 ret = eh_tx_adapter_configure(em_conf, adapter);
1312 if (ret < 0) {
1313 EH_LOG_ERR("Failed to configure tx adapter %d", ret);
1314 return ret;
1315 }
1316 }
1317 return 0;
1318 }
1319
1320 static void
eh_display_operating_mode(struct eventmode_conf * em_conf)1321 eh_display_operating_mode(struct eventmode_conf *em_conf)
1322 {
1323 char sched_types[][32] = {
1324 "RTE_SCHED_TYPE_ORDERED",
1325 "RTE_SCHED_TYPE_ATOMIC",
1326 "RTE_SCHED_TYPE_PARALLEL",
1327 };
1328 EH_LOG_INFO("Operating mode:");
1329
1330 EH_LOG_INFO("\tScheduling type: \t%s",
1331 sched_types[em_conf->ext_params.sched_type]);
1332
1333 EH_LOG_INFO("");
1334 }
1335
1336 static void
eh_display_event_dev_conf(struct eventmode_conf * em_conf)1337 eh_display_event_dev_conf(struct eventmode_conf *em_conf)
1338 {
1339 char queue_mode[][32] = {
1340 "",
1341 "ATQ (ALL TYPE QUEUE)",
1342 "SINGLE LINK",
1343 };
1344 char print_buf[256] = { 0 };
1345 int i;
1346
1347 EH_LOG_INFO("Event Device Configuration:");
1348
1349 for (i = 0; i < em_conf->nb_eventdev; i++) {
1350 sprintf(print_buf,
1351 "\tDev ID: %-2d \tQueues: %-2d \tPorts: %-2d",
1352 em_conf->eventdev_config[i].eventdev_id,
1353 em_conf->eventdev_config[i].nb_eventqueue,
1354 em_conf->eventdev_config[i].nb_eventport);
1355 sprintf(print_buf + strlen(print_buf),
1356 "\tQueue mode: %s",
1357 queue_mode[em_conf->eventdev_config[i].ev_queue_mode]);
1358 EH_LOG_INFO("%s", print_buf);
1359 }
1360 EH_LOG_INFO("");
1361 }
1362
1363 static void
eh_display_rx_adapter_conf(struct eventmode_conf * em_conf)1364 eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)
1365 {
1366 int nb_rx_adapter = em_conf->nb_rx_adapter;
1367 struct rx_adapter_connection_info *conn;
1368 struct rx_adapter_conf *adapter;
1369 char print_buf[256] = { 0 };
1370 int i, j;
1371
1372 EH_LOG_INFO("Rx adapters configured: %d", nb_rx_adapter);
1373
1374 for (i = 0; i < nb_rx_adapter; i++) {
1375 adapter = &(em_conf->rx_adapter[i]);
1376 sprintf(print_buf,
1377 "\tRx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1378 adapter->adapter_id,
1379 adapter->nb_connections,
1380 adapter->eventdev_id);
1381 if (adapter->rx_core_id == (uint32_t)-1)
1382 sprintf(print_buf + strlen(print_buf),
1383 "\tRx core: %-2s", "[INTERNAL PORT]");
1384 else if (adapter->rx_core_id == RTE_MAX_LCORE)
1385 sprintf(print_buf + strlen(print_buf),
1386 "\tRx core: %-2s", "[NONE]");
1387 else
1388 sprintf(print_buf + strlen(print_buf),
1389 "\tRx core: %-2d", adapter->rx_core_id);
1390
1391 EH_LOG_INFO("%s", print_buf);
1392
1393 for (j = 0; j < adapter->nb_connections; j++) {
1394 conn = &(adapter->conn[j]);
1395
1396 sprintf(print_buf,
1397 "\t\tEthdev ID: %-2d", conn->ethdev_id);
1398
1399 if (conn->ethdev_rx_qid == -1)
1400 sprintf(print_buf + strlen(print_buf),
1401 "\tEth rx queue: %-2s", "ALL");
1402 else
1403 sprintf(print_buf + strlen(print_buf),
1404 "\tEth rx queue: %-2d",
1405 conn->ethdev_rx_qid);
1406
1407 sprintf(print_buf + strlen(print_buf),
1408 "\tEvent queue: %-2d", conn->eventq_id);
1409 EH_LOG_INFO("%s", print_buf);
1410 }
1411 }
1412 EH_LOG_INFO("");
1413 }
1414
1415 static void
eh_display_tx_adapter_conf(struct eventmode_conf * em_conf)1416 eh_display_tx_adapter_conf(struct eventmode_conf *em_conf)
1417 {
1418 int nb_tx_adapter = em_conf->nb_tx_adapter;
1419 struct tx_adapter_connection_info *conn;
1420 struct tx_adapter_conf *adapter;
1421 char print_buf[256] = { 0 };
1422 int i, j;
1423
1424 EH_LOG_INFO("Tx adapters configured: %d", nb_tx_adapter);
1425
1426 for (i = 0; i < nb_tx_adapter; i++) {
1427 adapter = &(em_conf->tx_adapter[i]);
1428 sprintf(print_buf,
1429 "\tTx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1430 adapter->adapter_id,
1431 adapter->nb_connections,
1432 adapter->eventdev_id);
1433 if (adapter->tx_core_id == (uint32_t)-1)
1434 sprintf(print_buf + strlen(print_buf),
1435 "\tTx core: %-2s", "[INTERNAL PORT]");
1436 else if (adapter->tx_core_id == RTE_MAX_LCORE)
1437 sprintf(print_buf + strlen(print_buf),
1438 "\tTx core: %-2s", "[NONE]");
1439 else
1440 sprintf(print_buf + strlen(print_buf),
1441 "\tTx core: %-2d,\tInput event queue: %-2d",
1442 adapter->tx_core_id, adapter->tx_ev_queue);
1443
1444 EH_LOG_INFO("%s", print_buf);
1445
1446 for (j = 0; j < adapter->nb_connections; j++) {
1447 conn = &(adapter->conn[j]);
1448
1449 sprintf(print_buf,
1450 "\t\tEthdev ID: %-2d", conn->ethdev_id);
1451
1452 if (conn->ethdev_tx_qid == -1)
1453 sprintf(print_buf + strlen(print_buf),
1454 "\tEth tx queue: %-2s", "ALL");
1455 else
1456 sprintf(print_buf + strlen(print_buf),
1457 "\tEth tx queue: %-2d",
1458 conn->ethdev_tx_qid);
1459 EH_LOG_INFO("%s", print_buf);
1460 }
1461 }
1462 EH_LOG_INFO("");
1463 }
1464
1465 static void
eh_display_link_conf(struct eventmode_conf * em_conf)1466 eh_display_link_conf(struct eventmode_conf *em_conf)
1467 {
1468 struct eh_event_link_info *link;
1469 char print_buf[256] = { 0 };
1470 int i;
1471
1472 EH_LOG_INFO("Links configured: %d", em_conf->nb_link);
1473
1474 for (i = 0; i < em_conf->nb_link; i++) {
1475 link = &(em_conf->link[i]);
1476
1477 sprintf(print_buf,
1478 "\tEvent dev ID: %-2d\tEvent port: %-2d",
1479 link->eventdev_id,
1480 link->event_port_id);
1481
1482 if (em_conf->ext_params.all_ev_queue_to_ev_port)
1483 sprintf(print_buf + strlen(print_buf),
1484 "Event queue: %-2s\t", "ALL");
1485 else
1486 sprintf(print_buf + strlen(print_buf),
1487 "Event queue: %-2d\t", link->eventq_id);
1488
1489 sprintf(print_buf + strlen(print_buf),
1490 "Lcore: %-2d", link->lcore_id);
1491 EH_LOG_INFO("%s", print_buf);
1492 }
1493 EH_LOG_INFO("");
1494 }
1495
1496 struct eh_conf *
eh_conf_init(void)1497 eh_conf_init(void)
1498 {
1499 struct eventmode_conf *em_conf = NULL;
1500 struct eh_conf *conf = NULL;
1501 unsigned int eth_core_id;
1502 void *bitmap = NULL;
1503 uint32_t nb_bytes;
1504
1505 /* Allocate memory for config */
1506 conf = calloc(1, sizeof(struct eh_conf));
1507 if (conf == NULL) {
1508 EH_LOG_ERR("Failed to allocate memory for eventmode helper "
1509 "config");
1510 return NULL;
1511 }
1512
1513 /* Set default conf */
1514
1515 /* Packet transfer mode: poll */
1516 conf->mode = EH_PKT_TRANSFER_MODE_POLL;
1517 conf->ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1518
1519 /* Keep all ethernet ports enabled by default */
1520 conf->eth_portmask = -1;
1521
1522 /* Allocate memory for event mode params */
1523 conf->mode_params = calloc(1, sizeof(struct eventmode_conf));
1524 if (conf->mode_params == NULL) {
1525 EH_LOG_ERR("Failed to allocate memory for event mode params");
1526 goto free_conf;
1527 }
1528
1529 /* Get eventmode conf */
1530 em_conf = conf->mode_params;
1531
1532 /* Allocate and initialize bitmap for eth cores */
1533 nb_bytes = rte_bitmap_get_memory_footprint(RTE_MAX_LCORE);
1534 if (!nb_bytes) {
1535 EH_LOG_ERR("Failed to get bitmap footprint");
1536 goto free_em_conf;
1537 }
1538
1539 bitmap = rte_zmalloc("event-helper-ethcore-bitmap", nb_bytes,
1540 RTE_CACHE_LINE_SIZE);
1541 if (!bitmap) {
1542 EH_LOG_ERR("Failed to allocate memory for eth cores bitmap\n");
1543 goto free_em_conf;
1544 }
1545
1546 em_conf->eth_core_mask = rte_bitmap_init(RTE_MAX_LCORE, bitmap,
1547 nb_bytes);
1548 if (!em_conf->eth_core_mask) {
1549 EH_LOG_ERR("Failed to initialize bitmap");
1550 goto free_bitmap;
1551 }
1552
1553 /* Set schedule type as not set */
1554 em_conf->ext_params.sched_type = SCHED_TYPE_NOT_SET;
1555
1556 /* Set two cores as eth cores for Rx & Tx */
1557
1558 /* Use first core other than main core as Rx core */
1559 eth_core_id = rte_get_next_lcore(0, /* curr core */
1560 1, /* skip main core */
1561 0 /* wrap */);
1562
1563 rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
1564
1565 /* Use next core as Tx core */
1566 eth_core_id = rte_get_next_lcore(eth_core_id, /* curr core */
1567 1, /* skip main core */
1568 0 /* wrap */);
1569
1570 rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
1571
1572 em_conf->ext_params.vector_size = DEFAULT_VECTOR_SIZE;
1573 em_conf->vector_tmo_ns = DEFAULT_VECTOR_TMO;
1574
1575 return conf;
1576
1577 free_bitmap:
1578 rte_free(bitmap);
1579 free_em_conf:
1580 free(em_conf);
1581 free_conf:
1582 free(conf);
1583 return NULL;
1584 }
1585
1586 void
eh_conf_uninit(struct eh_conf * conf)1587 eh_conf_uninit(struct eh_conf *conf)
1588 {
1589 struct eventmode_conf *em_conf = NULL;
1590
1591 if (!conf || !conf->mode_params)
1592 return;
1593
1594 /* Get eventmode conf */
1595 em_conf = conf->mode_params;
1596
1597 /* Free evenmode configuration memory */
1598 rte_free(em_conf->eth_core_mask);
1599 free(em_conf);
1600 free(conf);
1601 }
1602
1603 void
eh_display_conf(struct eh_conf * conf)1604 eh_display_conf(struct eh_conf *conf)
1605 {
1606 struct eventmode_conf *em_conf;
1607
1608 if (conf == NULL) {
1609 EH_LOG_ERR("Invalid event helper configuration");
1610 return;
1611 }
1612
1613 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1614 return;
1615
1616 if (conf->mode_params == NULL) {
1617 EH_LOG_ERR("Invalid event mode parameters");
1618 return;
1619 }
1620
1621 /* Get eventmode conf */
1622 em_conf = (struct eventmode_conf *)(conf->mode_params);
1623
1624 /* Display user exposed operating modes */
1625 eh_display_operating_mode(em_conf);
1626
1627 /* Display event device conf */
1628 eh_display_event_dev_conf(em_conf);
1629
1630 /* Display Rx adapter conf */
1631 eh_display_rx_adapter_conf(em_conf);
1632
1633 /* Display Tx adapter conf */
1634 eh_display_tx_adapter_conf(em_conf);
1635
1636 /* Display event-lcore link */
1637 eh_display_link_conf(em_conf);
1638 }
1639
1640 int32_t
eh_devs_init(struct eh_conf * conf)1641 eh_devs_init(struct eh_conf *conf)
1642 {
1643 struct eventmode_conf *em_conf;
1644 uint16_t port_id;
1645 int ret;
1646
1647 if (conf == NULL) {
1648 EH_LOG_ERR("Invalid event helper configuration");
1649 return -EINVAL;
1650 }
1651
1652 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1653 return 0;
1654
1655 if (conf->mode_params == NULL) {
1656 EH_LOG_ERR("Invalid event mode parameters");
1657 return -EINVAL;
1658 }
1659
1660 /* Get eventmode conf */
1661 em_conf = conf->mode_params;
1662
1663 /* Eventmode conf would need eth portmask */
1664 em_conf->eth_portmask = conf->eth_portmask;
1665
1666 /* Validate the requested config */
1667 ret = eh_validate_conf(em_conf);
1668 if (ret < 0) {
1669 EH_LOG_ERR("Failed to validate the requested config %d", ret);
1670 return ret;
1671 }
1672
1673 /* Display the current configuration */
1674 eh_display_conf(conf);
1675
1676 /* Stop eth devices before setting up adapter */
1677 RTE_ETH_FOREACH_DEV(port_id) {
1678
1679 /* Use only the ports enabled */
1680 if ((conf->eth_portmask & (1 << port_id)) == 0)
1681 continue;
1682
1683 ret = rte_eth_dev_stop(port_id);
1684 if (ret != 0) {
1685 EH_LOG_ERR("Failed to stop port %u, err: %d",
1686 port_id, ret);
1687 return ret;
1688 }
1689 }
1690
1691 /* Setup eventdev */
1692 ret = eh_initialize_eventdev(em_conf);
1693 if (ret < 0) {
1694 EH_LOG_ERR("Failed to initialize event dev %d", ret);
1695 return ret;
1696 }
1697
1698 /* Setup Rx adapter */
1699 ret = eh_initialize_rx_adapter(em_conf);
1700 if (ret < 0) {
1701 EH_LOG_ERR("Failed to initialize rx adapter %d", ret);
1702 return ret;
1703 }
1704
1705 /* Setup Tx adapter */
1706 ret = eh_initialize_tx_adapter(em_conf);
1707 if (ret < 0) {
1708 EH_LOG_ERR("Failed to initialize tx adapter %d", ret);
1709 return ret;
1710 }
1711
1712 /* Start eventdev */
1713 ret = eh_start_eventdev(em_conf);
1714 if (ret < 0) {
1715 EH_LOG_ERR("Failed to start event dev %d", ret);
1716 return ret;
1717 }
1718
1719 /* Start eth devices after setting up adapter */
1720 RTE_ETH_FOREACH_DEV(port_id) {
1721
1722 /* Use only the ports enabled */
1723 if ((conf->eth_portmask & (1 << port_id)) == 0)
1724 continue;
1725
1726 ret = rte_eth_dev_start(port_id);
1727 if (ret < 0) {
1728 EH_LOG_ERR("Failed to start eth dev %d, %d",
1729 port_id, ret);
1730 return ret;
1731 }
1732 }
1733
1734 return 0;
1735 }
1736
1737 int32_t
eh_devs_uninit(struct eh_conf * conf)1738 eh_devs_uninit(struct eh_conf *conf)
1739 {
1740 struct eventmode_conf *em_conf;
1741 int ret, i, j;
1742 uint16_t id;
1743
1744 if (conf == NULL) {
1745 EH_LOG_ERR("Invalid event helper configuration");
1746 return -EINVAL;
1747 }
1748
1749 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1750 return 0;
1751
1752 if (conf->mode_params == NULL) {
1753 EH_LOG_ERR("Invalid event mode parameters");
1754 return -EINVAL;
1755 }
1756
1757 /* Get eventmode conf */
1758 em_conf = conf->mode_params;
1759
1760 /* Stop and release rx adapters */
1761 for (i = 0; i < em_conf->nb_rx_adapter; i++) {
1762
1763 id = em_conf->rx_adapter[i].adapter_id;
1764 ret = rte_event_eth_rx_adapter_stop(id);
1765 if (ret < 0) {
1766 EH_LOG_ERR("Failed to stop rx adapter %d", ret);
1767 return ret;
1768 }
1769
1770 for (j = 0; j < em_conf->rx_adapter[i].nb_connections; j++) {
1771
1772 ret = rte_event_eth_rx_adapter_queue_del(id,
1773 em_conf->rx_adapter[i].conn[j].ethdev_id, -1);
1774 if (ret < 0) {
1775 EH_LOG_ERR(
1776 "Failed to remove rx adapter queues %d",
1777 ret);
1778 return ret;
1779 }
1780 }
1781
1782 ret = rte_event_eth_rx_adapter_free(id);
1783 if (ret < 0) {
1784 EH_LOG_ERR("Failed to free rx adapter %d", ret);
1785 return ret;
1786 }
1787 }
1788
1789 /* Stop and release event devices */
1790 for (i = 0; i < em_conf->nb_eventdev; i++) {
1791
1792 id = em_conf->eventdev_config[i].eventdev_id;
1793 rte_event_dev_stop(id);
1794
1795 ret = rte_event_dev_close(id);
1796 if (ret < 0) {
1797 EH_LOG_ERR("Failed to close event dev %d, %d", id, ret);
1798 return ret;
1799 }
1800 }
1801
1802 /* Stop and release tx adapters */
1803 for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1804
1805 id = em_conf->tx_adapter[i].adapter_id;
1806 ret = rte_event_eth_tx_adapter_stop(id);
1807 if (ret < 0) {
1808 EH_LOG_ERR("Failed to stop tx adapter %d", ret);
1809 return ret;
1810 }
1811
1812 for (j = 0; j < em_conf->tx_adapter[i].nb_connections; j++) {
1813
1814 ret = rte_event_eth_tx_adapter_queue_del(id,
1815 em_conf->tx_adapter[i].conn[j].ethdev_id, -1);
1816 if (ret < 0) {
1817 EH_LOG_ERR(
1818 "Failed to remove tx adapter queues %d",
1819 ret);
1820 return ret;
1821 }
1822 }
1823
1824 ret = rte_event_eth_tx_adapter_free(id);
1825 if (ret < 0) {
1826 EH_LOG_ERR("Failed to free tx adapter %d", ret);
1827 return ret;
1828 }
1829 }
1830
1831 return 0;
1832 }
1833
1834 void
eh_launch_worker(struct eh_conf * conf,struct eh_app_worker_params * app_wrkr,uint8_t nb_wrkr_param)1835 eh_launch_worker(struct eh_conf *conf, struct eh_app_worker_params *app_wrkr,
1836 uint8_t nb_wrkr_param)
1837 {
1838 struct eh_app_worker_params *match_wrkr;
1839 struct eh_event_link_info *links = NULL;
1840 struct eventmode_conf *em_conf;
1841 uint32_t lcore_id;
1842 uint8_t nb_links;
1843
1844 if (conf == NULL) {
1845 EH_LOG_ERR("Invalid event helper configuration");
1846 return;
1847 }
1848
1849 if (conf->mode_params == NULL) {
1850 EH_LOG_ERR("Invalid event mode parameters");
1851 return;
1852 }
1853
1854 /* Get eventmode conf */
1855 em_conf = conf->mode_params;
1856
1857 /* Get core ID */
1858 lcore_id = rte_lcore_id();
1859
1860 /* Check if this is eth core */
1861 if (rte_bitmap_get(em_conf->eth_core_mask, lcore_id)) {
1862 eh_start_worker_eth_core(em_conf, lcore_id);
1863 return;
1864 }
1865
1866 if (app_wrkr == NULL || nb_wrkr_param == 0) {
1867 EH_LOG_ERR("Invalid args");
1868 return;
1869 }
1870
1871 /*
1872 * This is a regular worker thread. The application registers
1873 * multiple workers with various capabilities. Run worker
1874 * based on the selected capabilities of the event
1875 * device configured.
1876 */
1877
1878 /* Get the first matching worker for the event device */
1879 match_wrkr = eh_find_worker(lcore_id, conf, app_wrkr, nb_wrkr_param);
1880 if (match_wrkr == NULL) {
1881 EH_LOG_ERR("Failed to match worker registered for lcore %d",
1882 lcore_id);
1883 goto clean_and_exit;
1884 }
1885
1886 /* Verify sanity of the matched worker */
1887 if (eh_verify_match_worker(match_wrkr) != 1) {
1888 EH_LOG_ERR("Failed to validate the matched worker");
1889 goto clean_and_exit;
1890 }
1891
1892 /* Get worker links */
1893 nb_links = eh_get_event_lcore_links(lcore_id, conf, &links);
1894
1895 /* Launch the worker thread */
1896 match_wrkr->worker_thread(links, nb_links);
1897
1898 /* Free links info memory */
1899 free(links);
1900
1901 clean_and_exit:
1902
1903 /* Flag eth_cores to stop, if started */
1904 eh_stop_worker_eth_core();
1905 }
1906
1907 uint8_t
eh_get_tx_queue(struct eh_conf * conf,uint8_t eventdev_id)1908 eh_get_tx_queue(struct eh_conf *conf, uint8_t eventdev_id)
1909 {
1910 struct eventdev_params *eventdev_config;
1911 struct eventmode_conf *em_conf;
1912
1913 if (conf == NULL) {
1914 EH_LOG_ERR("Invalid event helper configuration");
1915 return -EINVAL;
1916 }
1917
1918 if (conf->mode_params == NULL) {
1919 EH_LOG_ERR("Invalid event mode parameters");
1920 return -EINVAL;
1921 }
1922
1923 /* Get eventmode conf */
1924 em_conf = conf->mode_params;
1925
1926 /* Get event device conf */
1927 eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
1928
1929 if (eventdev_config == NULL) {
1930 EH_LOG_ERR("Failed to read eventdev config");
1931 return -EINVAL;
1932 }
1933
1934 /*
1935 * The last queue is reserved to be used as atomic queue for the
1936 * last stage (eth packet tx stage)
1937 */
1938 return eventdev_config->nb_eventqueue - 1;
1939 }
1940