1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Intel Corporation.
4 * Copyright 2017 Cavium, Inc.
5 */
6
7 #include "pipeline_common.h"
8
9 static __rte_always_inline int
worker_generic(void * arg)10 worker_generic(void *arg)
11 {
12 struct rte_event ev;
13
14 struct worker_data *data = (struct worker_data *)arg;
15 uint8_t dev_id = data->dev_id;
16 uint8_t port_id = data->port_id;
17 size_t sent = 0, received = 0;
18 unsigned int lcore_id = rte_lcore_id();
19
20 while (!fdata->done) {
21
22 if (fdata->cap.scheduler)
23 fdata->cap.scheduler(lcore_id);
24
25 if (!fdata->worker_core[lcore_id]) {
26 rte_pause();
27 continue;
28 }
29
30 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
31 &ev, 1, 0);
32
33 if (nb_rx == 0) {
34 rte_pause();
35 continue;
36 }
37 received++;
38
39 /* The first worker stage does classification */
40 if (ev.queue_id == cdata.qid[0])
41 ev.flow_id = ev.mbuf->hash.rss
42 % cdata.num_fids;
43
44 ev.queue_id = cdata.next_qid[ev.queue_id];
45 ev.op = RTE_EVENT_OP_FORWARD;
46 ev.sched_type = cdata.queue_type;
47
48 work();
49
50 while (rte_event_enqueue_burst(dev_id, port_id, &ev, 1) != 1)
51 rte_pause();
52 sent++;
53 }
54
55 if (!cdata.quiet)
56 printf(" worker %u thread done. RX=%zu TX=%zu\n",
57 rte_lcore_id(), received, sent);
58
59 return 0;
60 }
61
62 static int
worker_generic_burst(void * arg)63 worker_generic_burst(void *arg)
64 {
65 struct rte_event events[BATCH_SIZE];
66
67 struct worker_data *data = (struct worker_data *)arg;
68 uint8_t dev_id = data->dev_id;
69 uint8_t port_id = data->port_id;
70 size_t sent = 0, received = 0;
71 unsigned int lcore_id = rte_lcore_id();
72
73 while (!fdata->done) {
74 uint16_t i;
75
76 if (fdata->cap.scheduler)
77 fdata->cap.scheduler(lcore_id);
78
79 if (!fdata->worker_core[lcore_id]) {
80 rte_pause();
81 continue;
82 }
83
84 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
85 events, RTE_DIM(events), 0);
86
87 if (nb_rx == 0) {
88 rte_pause();
89 continue;
90 }
91 received += nb_rx;
92
93 for (i = 0; i < nb_rx; i++) {
94
95 /* The first worker stage does classification */
96 if (events[i].queue_id == cdata.qid[0])
97 events[i].flow_id = events[i].mbuf->hash.rss
98 % cdata.num_fids;
99
100 events[i].queue_id = cdata.next_qid[events[i].queue_id];
101 events[i].op = RTE_EVENT_OP_FORWARD;
102 events[i].sched_type = cdata.queue_type;
103
104 work();
105 }
106 uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
107 events, nb_rx);
108 while (nb_tx < nb_rx && !fdata->done)
109 nb_tx += rte_event_enqueue_burst(dev_id, port_id,
110 events + nb_tx,
111 nb_rx - nb_tx);
112 sent += nb_tx;
113 }
114
115 if (!cdata.quiet)
116 printf(" worker %u thread done. RX=%zu TX=%zu\n",
117 rte_lcore_id(), received, sent);
118
119 return 0;
120 }
121
122 static int
setup_eventdev_generic(struct worker_data * worker_data)123 setup_eventdev_generic(struct worker_data *worker_data)
124 {
125 const uint8_t dev_id = 0;
126 /* +1 stages is for a SINGLE_LINK TX stage */
127 const uint8_t nb_queues = cdata.num_stages + 1;
128 const uint8_t nb_ports = cdata.num_workers;
129 struct rte_event_dev_config config = {
130 .nb_event_queues = nb_queues,
131 .nb_event_ports = nb_ports,
132 .nb_single_link_event_port_queues = 1,
133 .nb_events_limit = 4096,
134 .nb_event_queue_flows = 1024,
135 .nb_event_port_dequeue_depth = 128,
136 .nb_event_port_enqueue_depth = 128,
137 };
138 struct rte_event_port_conf wkr_p_conf = {
139 .dequeue_depth = cdata.worker_cq_depth,
140 .enqueue_depth = 64,
141 .new_event_threshold = 4096,
142 };
143 struct rte_event_queue_conf wkr_q_conf = {
144 .schedule_type = cdata.queue_type,
145 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
146 .nb_atomic_flows = 1024,
147 .nb_atomic_order_sequences = 1024,
148 };
149 struct rte_event_queue_conf tx_q_conf = {
150 .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
151 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
152 };
153
154 struct port_link worker_queues[MAX_NUM_STAGES];
155 uint8_t disable_implicit_release;
156 unsigned int i;
157
158 int ret, ndev = rte_event_dev_count();
159 if (ndev < 1) {
160 printf("%d: No Eventdev Devices Found\n", __LINE__);
161 return -1;
162 }
163
164 struct rte_event_dev_info dev_info;
165 ret = rte_event_dev_info_get(dev_id, &dev_info);
166 printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
167
168 disable_implicit_release = (dev_info.event_dev_cap &
169 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
170
171 wkr_p_conf.event_port_cfg = disable_implicit_release ?
172 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
173
174 if (dev_info.max_num_events < config.nb_events_limit)
175 config.nb_events_limit = dev_info.max_num_events;
176 if (dev_info.max_event_port_dequeue_depth <
177 config.nb_event_port_dequeue_depth)
178 config.nb_event_port_dequeue_depth =
179 dev_info.max_event_port_dequeue_depth;
180 if (dev_info.max_event_port_enqueue_depth <
181 config.nb_event_port_enqueue_depth)
182 config.nb_event_port_enqueue_depth =
183 dev_info.max_event_port_enqueue_depth;
184
185 ret = rte_event_dev_configure(dev_id, &config);
186 if (ret < 0) {
187 printf("%d: Error configuring device\n", __LINE__);
188 return -1;
189 }
190
191 /* Q creation - one load balanced per pipeline stage*/
192 printf(" Stages:\n");
193 for (i = 0; i < cdata.num_stages; i++) {
194 if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
195 printf("%d: error creating qid %d\n", __LINE__, i);
196 return -1;
197 }
198 cdata.qid[i] = i;
199 cdata.next_qid[i] = i+1;
200 worker_queues[i].queue_id = i;
201 if (cdata.enable_queue_priorities) {
202 /* calculate priority stepping for each stage, leaving
203 * headroom of 1 for the SINGLE_LINK TX below
204 */
205 const uint32_t prio_delta =
206 (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
207
208 /* higher priority for queues closer to tx */
209 wkr_q_conf.priority =
210 RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
211 }
212
213 const char *type_str = "Atomic";
214 switch (wkr_q_conf.schedule_type) {
215 case RTE_SCHED_TYPE_ORDERED:
216 type_str = "Ordered";
217 break;
218 case RTE_SCHED_TYPE_PARALLEL:
219 type_str = "Parallel";
220 break;
221 }
222 printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
223 wkr_q_conf.priority);
224 }
225 printf("\n");
226
227 /* final queue for sending to TX core */
228 if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
229 printf("%d: error creating qid %d\n", __LINE__, i);
230 return -1;
231 }
232 cdata.tx_queue_id = i;
233
234 if (wkr_p_conf.new_event_threshold > config.nb_events_limit)
235 wkr_p_conf.new_event_threshold = config.nb_events_limit;
236 if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
237 wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
238 if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
239 wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
240
241 /* set up one port per worker, linking to all stage queues */
242 for (i = 0; i < cdata.num_workers; i++) {
243 struct worker_data *w = &worker_data[i];
244 w->dev_id = dev_id;
245 if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
246 printf("Error setting up port %d\n", i);
247 return -1;
248 }
249
250 uint32_t s;
251 for (s = 0; s < cdata.num_stages; s++) {
252 if (rte_event_port_link(dev_id, i,
253 &worker_queues[s].queue_id,
254 &worker_queues[s].priority,
255 1) != 1) {
256 printf("%d: error creating link for port %d\n",
257 __LINE__, i);
258 return -1;
259 }
260 }
261 w->port_id = i;
262 }
263
264 ret = rte_event_dev_service_id_get(dev_id,
265 &fdata->evdev_service_id);
266 if (ret != -ESRCH && ret != 0) {
267 printf("Error getting the service ID for sw eventdev\n");
268 return -1;
269 }
270 rte_service_runstate_set(fdata->evdev_service_id, 1);
271 rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
272
273 return dev_id;
274 }
275
276 /*
277 * Initializes a given port using global settings and with the RX buffers
278 * coming from the mbuf_pool passed as a parameter.
279 */
280 static inline int
port_init(uint8_t port,struct rte_mempool * mbuf_pool)281 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
282 {
283 struct rte_eth_rxconf rx_conf;
284 static const struct rte_eth_conf port_conf_default = {
285 .rxmode = {
286 .mq_mode = ETH_MQ_RX_RSS,
287 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
288 },
289 .rx_adv_conf = {
290 .rss_conf = {
291 .rss_hf = ETH_RSS_IP |
292 ETH_RSS_TCP |
293 ETH_RSS_UDP,
294 }
295 }
296 };
297 const uint16_t rx_rings = 1, tx_rings = 1;
298 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
299 struct rte_eth_conf port_conf = port_conf_default;
300 int retval;
301 uint16_t q;
302 struct rte_eth_dev_info dev_info;
303 struct rte_eth_txconf txconf;
304
305 if (!rte_eth_dev_is_valid_port(port))
306 return -1;
307
308 retval = rte_eth_dev_info_get(port, &dev_info);
309 if (retval != 0) {
310 printf("Error during getting device (port %u) info: %s\n",
311 port, strerror(-retval));
312 return retval;
313 }
314
315 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
316 port_conf.txmode.offloads |=
317 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
318
319 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_RSS_HASH)
320 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
321
322 rx_conf = dev_info.default_rxconf;
323 rx_conf.offloads = port_conf.rxmode.offloads;
324
325 port_conf.rx_adv_conf.rss_conf.rss_hf &=
326 dev_info.flow_type_rss_offloads;
327 if (port_conf.rx_adv_conf.rss_conf.rss_hf !=
328 port_conf_default.rx_adv_conf.rss_conf.rss_hf) {
329 printf("Port %u modified RSS hash function based on hardware support,"
330 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
331 port,
332 port_conf_default.rx_adv_conf.rss_conf.rss_hf,
333 port_conf.rx_adv_conf.rss_conf.rss_hf);
334 }
335
336 /* Configure the Ethernet device. */
337 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
338 if (retval != 0)
339 return retval;
340
341 /* Allocate and set up 1 RX queue per Ethernet port. */
342 for (q = 0; q < rx_rings; q++) {
343 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
344 rte_eth_dev_socket_id(port), &rx_conf,
345 mbuf_pool);
346 if (retval < 0)
347 return retval;
348 }
349
350 txconf = dev_info.default_txconf;
351 txconf.offloads = port_conf_default.txmode.offloads;
352 /* Allocate and set up 1 TX queue per Ethernet port. */
353 for (q = 0; q < tx_rings; q++) {
354 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
355 rte_eth_dev_socket_id(port), &txconf);
356 if (retval < 0)
357 return retval;
358 }
359
360 /* Display the port MAC address. */
361 struct rte_ether_addr addr;
362 retval = rte_eth_macaddr_get(port, &addr);
363 if (retval != 0) {
364 printf("Failed to get MAC address (port %u): %s\n",
365 port, rte_strerror(-retval));
366 return retval;
367 }
368
369 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
370 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
371 (unsigned int)port,
372 addr.addr_bytes[0], addr.addr_bytes[1],
373 addr.addr_bytes[2], addr.addr_bytes[3],
374 addr.addr_bytes[4], addr.addr_bytes[5]);
375
376 /* Enable RX in promiscuous mode for the Ethernet device. */
377 retval = rte_eth_promiscuous_enable(port);
378 if (retval != 0)
379 return retval;
380
381 return 0;
382 }
383
384 static int
init_ports(uint16_t num_ports)385 init_ports(uint16_t num_ports)
386 {
387 uint16_t portid;
388
389 if (!cdata.num_mbuf)
390 cdata.num_mbuf = 16384 * num_ports;
391
392 struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
393 /* mbufs */ cdata.num_mbuf,
394 /* cache_size */ 512,
395 /* priv_size*/ 0,
396 /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
397 rte_socket_id());
398
399 RTE_ETH_FOREACH_DEV(portid)
400 if (port_init(portid, mp) != 0)
401 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
402 portid);
403
404 return 0;
405 }
406
407 static void
init_adapters(uint16_t nb_ports)408 init_adapters(uint16_t nb_ports)
409 {
410 int i;
411 int ret;
412 uint8_t tx_port_id = 0;
413 uint8_t evdev_id = 0;
414 struct rte_event_dev_info dev_info;
415
416 ret = rte_event_dev_info_get(evdev_id, &dev_info);
417
418 struct rte_event_port_conf adptr_p_conf = {
419 .dequeue_depth = cdata.worker_cq_depth,
420 .enqueue_depth = 64,
421 .new_event_threshold = 4096,
422 };
423
424 if (adptr_p_conf.new_event_threshold > dev_info.max_num_events)
425 adptr_p_conf.new_event_threshold = dev_info.max_num_events;
426 if (adptr_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
427 adptr_p_conf.dequeue_depth =
428 dev_info.max_event_port_dequeue_depth;
429 if (adptr_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
430 adptr_p_conf.enqueue_depth =
431 dev_info.max_event_port_enqueue_depth;
432
433 init_ports(nb_ports);
434 /* Create one adapter for all the ethernet ports. */
435 ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
436 &adptr_p_conf);
437 if (ret)
438 rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
439 cdata.rx_adapter_id);
440
441 ret = rte_event_eth_tx_adapter_create(cdata.tx_adapter_id, evdev_id,
442 &adptr_p_conf);
443 if (ret)
444 rte_exit(EXIT_FAILURE, "failed to create tx adapter[%d]",
445 cdata.tx_adapter_id);
446
447 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
448 memset(&queue_conf, 0, sizeof(queue_conf));
449 queue_conf.ev.sched_type = cdata.queue_type;
450 queue_conf.ev.queue_id = cdata.qid[0];
451
452 for (i = 0; i < nb_ports; i++) {
453 ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
454 -1, &queue_conf);
455 if (ret)
456 rte_exit(EXIT_FAILURE,
457 "Failed to add queues to Rx adapter");
458
459 ret = rte_event_eth_tx_adapter_queue_add(cdata.tx_adapter_id, i,
460 -1);
461 if (ret)
462 rte_exit(EXIT_FAILURE,
463 "Failed to add queues to Tx adapter");
464 }
465
466 ret = rte_event_eth_tx_adapter_event_port_get(cdata.tx_adapter_id,
467 &tx_port_id);
468 if (ret)
469 rte_exit(EXIT_FAILURE,
470 "Failed to get Tx adapter port id");
471 ret = rte_event_port_link(evdev_id, tx_port_id, &cdata.tx_queue_id,
472 NULL, 1);
473 if (ret != 1)
474 rte_exit(EXIT_FAILURE,
475 "Unable to link Tx adapter port to Tx queue");
476
477 ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
478 &fdata->rxadptr_service_id);
479 if (ret != -ESRCH && ret != 0) {
480 rte_exit(EXIT_FAILURE,
481 "Error getting the service ID for Rx adapter\n");
482 }
483 rte_service_runstate_set(fdata->rxadptr_service_id, 1);
484 rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
485
486 ret = rte_event_eth_tx_adapter_service_id_get(cdata.tx_adapter_id,
487 &fdata->txadptr_service_id);
488 if (ret != -ESRCH && ret != 0) {
489 rte_exit(EXIT_FAILURE,
490 "Error getting the service ID for Tx adapter\n");
491 }
492 rte_service_runstate_set(fdata->txadptr_service_id, 1);
493 rte_service_set_runstate_mapped_check(fdata->txadptr_service_id, 0);
494
495 ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
496 if (ret)
497 rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
498 cdata.rx_adapter_id);
499
500 ret = rte_event_eth_tx_adapter_start(cdata.tx_adapter_id);
501 if (ret)
502 rte_exit(EXIT_FAILURE, "Tx adapter[%d] start failed",
503 cdata.tx_adapter_id);
504
505 if (rte_event_dev_start(evdev_id) < 0)
506 rte_exit(EXIT_FAILURE, "Error starting eventdev");
507 }
508
509 static void
generic_opt_check(void)510 generic_opt_check(void)
511 {
512 int i;
513 int ret;
514 uint32_t cap = 0;
515 uint8_t rx_needed = 0;
516 uint8_t sched_needed = 0;
517 struct rte_event_dev_info eventdev_info;
518
519 memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
520 rte_event_dev_info_get(0, &eventdev_info);
521
522 if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
523 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
524 rte_exit(EXIT_FAILURE,
525 "Event dev doesn't support all type queues\n");
526 sched_needed = !(eventdev_info.event_dev_cap &
527 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED);
528
529 RTE_ETH_FOREACH_DEV(i) {
530 ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
531 if (ret)
532 rte_exit(EXIT_FAILURE,
533 "failed to get event rx adapter capabilities");
534 rx_needed |=
535 !(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
536 }
537
538 if (cdata.worker_lcore_mask == 0 ||
539 (rx_needed && cdata.rx_lcore_mask == 0) ||
540 (cdata.tx_lcore_mask == 0) ||
541 (sched_needed && cdata.sched_lcore_mask == 0)) {
542 printf("Core part of pipeline was not assigned any cores. "
543 "This will stall the pipeline, please check core masks "
544 "(use -h for details on setting core masks):\n"
545 "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
546 "\n\tworkers: %"PRIu64"\n",
547 cdata.rx_lcore_mask, cdata.tx_lcore_mask,
548 cdata.sched_lcore_mask,
549 cdata.worker_lcore_mask);
550 rte_exit(-1, "Fix core masks\n");
551 }
552
553 if (!sched_needed)
554 memset(fdata->sched_core, 0,
555 sizeof(unsigned int) * MAX_NUM_CORE);
556 if (!rx_needed)
557 memset(fdata->rx_core, 0,
558 sizeof(unsigned int) * MAX_NUM_CORE);
559 }
560
561 void
set_worker_generic_setup_data(struct setup_data * caps,bool burst)562 set_worker_generic_setup_data(struct setup_data *caps, bool burst)
563 {
564 if (burst) {
565 caps->worker = worker_generic_burst;
566 } else {
567 caps->worker = worker_generic;
568 }
569
570 caps->adptr_setup = init_adapters;
571 caps->scheduler = schedule_devices;
572 caps->evdev_setup = setup_eventdev_generic;
573 caps->check_opt = generic_opt_check;
574 }
575