1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
3 */
4
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12
13 #include <rte_string_fns.h>
14 #include <rte_log.h>
15 #include <rte_dev.h>
16 #include <rte_memzone.h>
17 #include <rte_eal.h>
18 #include <rte_common.h>
19 #include <rte_malloc.h>
20 #include <rte_errno.h>
21 #include <ethdev_driver.h>
22 #include <rte_cryptodev.h>
23 #include <cryptodev_pmd.h>
24 #include <rte_telemetry.h>
25
26 #include "rte_eventdev.h"
27 #include "eventdev_pmd.h"
28 #include "eventdev_trace.h"
29
30 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
31
32 struct rte_eventdev *rte_eventdevs = rte_event_devices;
33
34 static struct rte_eventdev_global eventdev_globals = {
35 .nb_devs = 0
36 };
37
38 /* Public fastpath APIs. */
39 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
40
41 /* Event dev north bound API implementation */
42
43 uint8_t
rte_event_dev_count(void)44 rte_event_dev_count(void)
45 {
46 return eventdev_globals.nb_devs;
47 }
48
49 int
rte_event_dev_get_dev_id(const char * name)50 rte_event_dev_get_dev_id(const char *name)
51 {
52 int i;
53 uint8_t cmp;
54
55 if (!name)
56 return -EINVAL;
57
58 for (i = 0; i < eventdev_globals.nb_devs; i++) {
59 cmp = (strncmp(rte_event_devices[i].data->name, name,
60 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
61 (rte_event_devices[i].dev ? (strncmp(
62 rte_event_devices[i].dev->driver->name, name,
63 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
64 if (cmp && (rte_event_devices[i].attached ==
65 RTE_EVENTDEV_ATTACHED))
66 return i;
67 }
68 return -ENODEV;
69 }
70
71 int
rte_event_dev_socket_id(uint8_t dev_id)72 rte_event_dev_socket_id(uint8_t dev_id)
73 {
74 struct rte_eventdev *dev;
75
76 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
77 dev = &rte_eventdevs[dev_id];
78
79 return dev->data->socket_id;
80 }
81
82 int
rte_event_dev_info_get(uint8_t dev_id,struct rte_event_dev_info * dev_info)83 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
84 {
85 struct rte_eventdev *dev;
86
87 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88 dev = &rte_eventdevs[dev_id];
89
90 if (dev_info == NULL)
91 return -EINVAL;
92
93 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
94
95 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
96 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
97
98 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
99
100 dev_info->dev = dev->dev;
101 return 0;
102 }
103
104 int
rte_event_eth_rx_adapter_caps_get(uint8_t dev_id,uint16_t eth_port_id,uint32_t * caps)105 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
106 uint32_t *caps)
107 {
108 struct rte_eventdev *dev;
109
110 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
111 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
112
113 dev = &rte_eventdevs[dev_id];
114
115 if (caps == NULL)
116 return -EINVAL;
117
118 if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
119 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
120 else
121 *caps = 0;
122
123 return dev->dev_ops->eth_rx_adapter_caps_get ?
124 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
125 &rte_eth_devices[eth_port_id],
126 caps)
127 : 0;
128 }
129
130 int
rte_event_timer_adapter_caps_get(uint8_t dev_id,uint32_t * caps)131 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
132 {
133 struct rte_eventdev *dev;
134 const struct event_timer_adapter_ops *ops;
135
136 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
137
138 dev = &rte_eventdevs[dev_id];
139
140 if (caps == NULL)
141 return -EINVAL;
142 *caps = 0;
143
144 return dev->dev_ops->timer_adapter_caps_get ?
145 (*dev->dev_ops->timer_adapter_caps_get)(dev,
146 0,
147 caps,
148 &ops)
149 : 0;
150 }
151
152 int
rte_event_crypto_adapter_caps_get(uint8_t dev_id,uint8_t cdev_id,uint32_t * caps)153 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
154 uint32_t *caps)
155 {
156 struct rte_eventdev *dev;
157 struct rte_cryptodev *cdev;
158
159 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
160 if (!rte_cryptodev_is_valid_dev(cdev_id))
161 return -EINVAL;
162
163 dev = &rte_eventdevs[dev_id];
164 cdev = rte_cryptodev_pmd_get_dev(cdev_id);
165
166 if (caps == NULL)
167 return -EINVAL;
168
169 if (dev->dev_ops->crypto_adapter_caps_get == NULL)
170 *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
171 else
172 *caps = 0;
173
174 return dev->dev_ops->crypto_adapter_caps_get ?
175 (*dev->dev_ops->crypto_adapter_caps_get)
176 (dev, cdev, caps) : 0;
177 }
178
179 int
rte_event_eth_tx_adapter_caps_get(uint8_t dev_id,uint16_t eth_port_id,uint32_t * caps)180 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
181 uint32_t *caps)
182 {
183 struct rte_eventdev *dev;
184 struct rte_eth_dev *eth_dev;
185
186 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
187 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
188
189 dev = &rte_eventdevs[dev_id];
190 eth_dev = &rte_eth_devices[eth_port_id];
191
192 if (caps == NULL)
193 return -EINVAL;
194
195 if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
196 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
197 else
198 *caps = 0;
199
200 return dev->dev_ops->eth_tx_adapter_caps_get ?
201 (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
202 eth_dev,
203 caps)
204 : 0;
205 }
206
207 static inline int
event_dev_queue_config(struct rte_eventdev * dev,uint8_t nb_queues)208 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
209 {
210 uint8_t old_nb_queues = dev->data->nb_queues;
211 struct rte_event_queue_conf *queues_cfg;
212 unsigned int i;
213
214 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
215 dev->data->dev_id);
216
217 if (nb_queues != 0) {
218 queues_cfg = dev->data->queues_cfg;
219 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
220
221 for (i = nb_queues; i < old_nb_queues; i++)
222 (*dev->dev_ops->queue_release)(dev, i);
223
224
225 if (nb_queues > old_nb_queues) {
226 uint8_t new_qs = nb_queues - old_nb_queues;
227
228 memset(queues_cfg + old_nb_queues, 0,
229 sizeof(queues_cfg[0]) * new_qs);
230 }
231 } else {
232 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
233
234 for (i = nb_queues; i < old_nb_queues; i++)
235 (*dev->dev_ops->queue_release)(dev, i);
236 }
237
238 dev->data->nb_queues = nb_queues;
239 return 0;
240 }
241
242 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
243
244 static inline int
event_dev_port_config(struct rte_eventdev * dev,uint8_t nb_ports)245 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
246 {
247 uint8_t old_nb_ports = dev->data->nb_ports;
248 void **ports;
249 uint16_t *links_map;
250 struct rte_event_port_conf *ports_cfg;
251 unsigned int i;
252
253 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
254 dev->data->dev_id);
255
256 if (nb_ports != 0) { /* re-config */
257 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
258
259 ports = dev->data->ports;
260 ports_cfg = dev->data->ports_cfg;
261 links_map = dev->data->links_map;
262
263 for (i = nb_ports; i < old_nb_ports; i++)
264 (*dev->dev_ops->port_release)(ports[i]);
265
266 if (nb_ports > old_nb_ports) {
267 uint8_t new_ps = nb_ports - old_nb_ports;
268 unsigned int old_links_map_end =
269 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
270 unsigned int links_map_end =
271 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
272
273 memset(ports + old_nb_ports, 0,
274 sizeof(ports[0]) * new_ps);
275 memset(ports_cfg + old_nb_ports, 0,
276 sizeof(ports_cfg[0]) * new_ps);
277 for (i = old_links_map_end; i < links_map_end; i++)
278 links_map[i] =
279 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
280 }
281 } else {
282 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
283
284 ports = dev->data->ports;
285 for (i = nb_ports; i < old_nb_ports; i++) {
286 (*dev->dev_ops->port_release)(ports[i]);
287 ports[i] = NULL;
288 }
289 }
290
291 dev->data->nb_ports = nb_ports;
292 return 0;
293 }
294
295 int
rte_event_dev_configure(uint8_t dev_id,const struct rte_event_dev_config * dev_conf)296 rte_event_dev_configure(uint8_t dev_id,
297 const struct rte_event_dev_config *dev_conf)
298 {
299 struct rte_event_dev_info info;
300 struct rte_eventdev *dev;
301 int diag;
302
303 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
304 dev = &rte_eventdevs[dev_id];
305
306 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
307 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
308
309 if (dev->data->dev_started) {
310 RTE_EDEV_LOG_ERR(
311 "device %d must be stopped to allow configuration", dev_id);
312 return -EBUSY;
313 }
314
315 if (dev_conf == NULL)
316 return -EINVAL;
317
318 (*dev->dev_ops->dev_infos_get)(dev, &info);
319
320 /* Check dequeue_timeout_ns value is in limit */
321 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
322 if (dev_conf->dequeue_timeout_ns &&
323 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
324 || dev_conf->dequeue_timeout_ns >
325 info.max_dequeue_timeout_ns)) {
326 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
327 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
328 dev_id, dev_conf->dequeue_timeout_ns,
329 info.min_dequeue_timeout_ns,
330 info.max_dequeue_timeout_ns);
331 return -EINVAL;
332 }
333 }
334
335 /* Check nb_events_limit is in limit */
336 if (dev_conf->nb_events_limit > info.max_num_events) {
337 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
338 dev_id, dev_conf->nb_events_limit, info.max_num_events);
339 return -EINVAL;
340 }
341
342 /* Check nb_event_queues is in limit */
343 if (!dev_conf->nb_event_queues) {
344 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
345 dev_id);
346 return -EINVAL;
347 }
348 if (dev_conf->nb_event_queues > info.max_event_queues +
349 info.max_single_link_event_port_queue_pairs) {
350 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
351 dev_id, dev_conf->nb_event_queues,
352 info.max_event_queues,
353 info.max_single_link_event_port_queue_pairs);
354 return -EINVAL;
355 }
356 if (dev_conf->nb_event_queues -
357 dev_conf->nb_single_link_event_port_queues >
358 info.max_event_queues) {
359 RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
360 dev_id, dev_conf->nb_event_queues,
361 dev_conf->nb_single_link_event_port_queues,
362 info.max_event_queues);
363 return -EINVAL;
364 }
365 if (dev_conf->nb_single_link_event_port_queues >
366 dev_conf->nb_event_queues) {
367 RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
368 dev_id,
369 dev_conf->nb_single_link_event_port_queues,
370 dev_conf->nb_event_queues);
371 return -EINVAL;
372 }
373
374 /* Check nb_event_ports is in limit */
375 if (!dev_conf->nb_event_ports) {
376 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
377 return -EINVAL;
378 }
379 if (dev_conf->nb_event_ports > info.max_event_ports +
380 info.max_single_link_event_port_queue_pairs) {
381 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
382 dev_id, dev_conf->nb_event_ports,
383 info.max_event_ports,
384 info.max_single_link_event_port_queue_pairs);
385 return -EINVAL;
386 }
387 if (dev_conf->nb_event_ports -
388 dev_conf->nb_single_link_event_port_queues
389 > info.max_event_ports) {
390 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
391 dev_id, dev_conf->nb_event_ports,
392 dev_conf->nb_single_link_event_port_queues,
393 info.max_event_ports);
394 return -EINVAL;
395 }
396
397 if (dev_conf->nb_single_link_event_port_queues >
398 dev_conf->nb_event_ports) {
399 RTE_EDEV_LOG_ERR(
400 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
401 dev_id,
402 dev_conf->nb_single_link_event_port_queues,
403 dev_conf->nb_event_ports);
404 return -EINVAL;
405 }
406
407 /* Check nb_event_queue_flows is in limit */
408 if (!dev_conf->nb_event_queue_flows) {
409 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
410 return -EINVAL;
411 }
412 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
413 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
414 dev_id, dev_conf->nb_event_queue_flows,
415 info.max_event_queue_flows);
416 return -EINVAL;
417 }
418
419 /* Check nb_event_port_dequeue_depth is in limit */
420 if (!dev_conf->nb_event_port_dequeue_depth) {
421 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
422 dev_id);
423 return -EINVAL;
424 }
425 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
426 (dev_conf->nb_event_port_dequeue_depth >
427 info.max_event_port_dequeue_depth)) {
428 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
429 dev_id, dev_conf->nb_event_port_dequeue_depth,
430 info.max_event_port_dequeue_depth);
431 return -EINVAL;
432 }
433
434 /* Check nb_event_port_enqueue_depth is in limit */
435 if (!dev_conf->nb_event_port_enqueue_depth) {
436 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
437 dev_id);
438 return -EINVAL;
439 }
440 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
441 (dev_conf->nb_event_port_enqueue_depth >
442 info.max_event_port_enqueue_depth)) {
443 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
444 dev_id, dev_conf->nb_event_port_enqueue_depth,
445 info.max_event_port_enqueue_depth);
446 return -EINVAL;
447 }
448
449 /* Copy the dev_conf parameter into the dev structure */
450 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
451
452 /* Setup new number of queues and reconfigure device. */
453 diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
454 if (diag != 0) {
455 RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
456 diag);
457 return diag;
458 }
459
460 /* Setup new number of ports and reconfigure device. */
461 diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
462 if (diag != 0) {
463 event_dev_queue_config(dev, 0);
464 RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
465 diag);
466 return diag;
467 }
468
469 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
470
471 /* Configure the device */
472 diag = (*dev->dev_ops->dev_configure)(dev);
473 if (diag != 0) {
474 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
475 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
476 event_dev_queue_config(dev, 0);
477 event_dev_port_config(dev, 0);
478 }
479
480 dev->data->event_dev_cap = info.event_dev_cap;
481 rte_eventdev_trace_configure(dev_id, dev_conf, diag);
482 return diag;
483 }
484
485 static inline int
is_valid_queue(struct rte_eventdev * dev,uint8_t queue_id)486 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
487 {
488 if (queue_id < dev->data->nb_queues && queue_id <
489 RTE_EVENT_MAX_QUEUES_PER_DEV)
490 return 1;
491 else
492 return 0;
493 }
494
495 int
rte_event_queue_default_conf_get(uint8_t dev_id,uint8_t queue_id,struct rte_event_queue_conf * queue_conf)496 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
497 struct rte_event_queue_conf *queue_conf)
498 {
499 struct rte_eventdev *dev;
500
501 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
502 dev = &rte_eventdevs[dev_id];
503
504 if (queue_conf == NULL)
505 return -EINVAL;
506
507 if (!is_valid_queue(dev, queue_id)) {
508 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
509 return -EINVAL;
510 }
511
512 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
513 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
514 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
515 return 0;
516 }
517
518 static inline int
is_valid_atomic_queue_conf(const struct rte_event_queue_conf * queue_conf)519 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
520 {
521 if (queue_conf &&
522 !(queue_conf->event_queue_cfg &
523 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
524 ((queue_conf->event_queue_cfg &
525 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
526 (queue_conf->schedule_type
527 == RTE_SCHED_TYPE_ATOMIC)
528 ))
529 return 1;
530 else
531 return 0;
532 }
533
534 static inline int
is_valid_ordered_queue_conf(const struct rte_event_queue_conf * queue_conf)535 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
536 {
537 if (queue_conf &&
538 !(queue_conf->event_queue_cfg &
539 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
540 ((queue_conf->event_queue_cfg &
541 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
542 (queue_conf->schedule_type
543 == RTE_SCHED_TYPE_ORDERED)
544 ))
545 return 1;
546 else
547 return 0;
548 }
549
550
551 int
rte_event_queue_setup(uint8_t dev_id,uint8_t queue_id,const struct rte_event_queue_conf * queue_conf)552 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
553 const struct rte_event_queue_conf *queue_conf)
554 {
555 struct rte_eventdev *dev;
556 struct rte_event_queue_conf def_conf;
557
558 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
559 dev = &rte_eventdevs[dev_id];
560
561 if (!is_valid_queue(dev, queue_id)) {
562 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
563 return -EINVAL;
564 }
565
566 /* Check nb_atomic_flows limit */
567 if (is_valid_atomic_queue_conf(queue_conf)) {
568 if (queue_conf->nb_atomic_flows == 0 ||
569 queue_conf->nb_atomic_flows >
570 dev->data->dev_conf.nb_event_queue_flows) {
571 RTE_EDEV_LOG_ERR(
572 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
573 dev_id, queue_id, queue_conf->nb_atomic_flows,
574 dev->data->dev_conf.nb_event_queue_flows);
575 return -EINVAL;
576 }
577 }
578
579 /* Check nb_atomic_order_sequences limit */
580 if (is_valid_ordered_queue_conf(queue_conf)) {
581 if (queue_conf->nb_atomic_order_sequences == 0 ||
582 queue_conf->nb_atomic_order_sequences >
583 dev->data->dev_conf.nb_event_queue_flows) {
584 RTE_EDEV_LOG_ERR(
585 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
586 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
587 dev->data->dev_conf.nb_event_queue_flows);
588 return -EINVAL;
589 }
590 }
591
592 if (dev->data->dev_started) {
593 RTE_EDEV_LOG_ERR(
594 "device %d must be stopped to allow queue setup", dev_id);
595 return -EBUSY;
596 }
597
598 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
599
600 if (queue_conf == NULL) {
601 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
602 -ENOTSUP);
603 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
604 queue_conf = &def_conf;
605 }
606
607 dev->data->queues_cfg[queue_id] = *queue_conf;
608 rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
609 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
610 }
611
612 static inline int
is_valid_port(struct rte_eventdev * dev,uint8_t port_id)613 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
614 {
615 if (port_id < dev->data->nb_ports)
616 return 1;
617 else
618 return 0;
619 }
620
621 int
rte_event_port_default_conf_get(uint8_t dev_id,uint8_t port_id,struct rte_event_port_conf * port_conf)622 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
623 struct rte_event_port_conf *port_conf)
624 {
625 struct rte_eventdev *dev;
626
627 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
628 dev = &rte_eventdevs[dev_id];
629
630 if (port_conf == NULL)
631 return -EINVAL;
632
633 if (!is_valid_port(dev, port_id)) {
634 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
635 return -EINVAL;
636 }
637
638 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
639 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
640 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
641 return 0;
642 }
643
644 int
rte_event_port_setup(uint8_t dev_id,uint8_t port_id,const struct rte_event_port_conf * port_conf)645 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
646 const struct rte_event_port_conf *port_conf)
647 {
648 struct rte_eventdev *dev;
649 struct rte_event_port_conf def_conf;
650 int diag;
651
652 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
653 dev = &rte_eventdevs[dev_id];
654
655 if (!is_valid_port(dev, port_id)) {
656 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
657 return -EINVAL;
658 }
659
660 /* Check new_event_threshold limit */
661 if ((port_conf && !port_conf->new_event_threshold) ||
662 (port_conf && port_conf->new_event_threshold >
663 dev->data->dev_conf.nb_events_limit)) {
664 RTE_EDEV_LOG_ERR(
665 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
666 dev_id, port_id, port_conf->new_event_threshold,
667 dev->data->dev_conf.nb_events_limit);
668 return -EINVAL;
669 }
670
671 /* Check dequeue_depth limit */
672 if ((port_conf && !port_conf->dequeue_depth) ||
673 (port_conf && port_conf->dequeue_depth >
674 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
675 RTE_EDEV_LOG_ERR(
676 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
677 dev_id, port_id, port_conf->dequeue_depth,
678 dev->data->dev_conf.nb_event_port_dequeue_depth);
679 return -EINVAL;
680 }
681
682 /* Check enqueue_depth limit */
683 if ((port_conf && !port_conf->enqueue_depth) ||
684 (port_conf && port_conf->enqueue_depth >
685 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
686 RTE_EDEV_LOG_ERR(
687 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
688 dev_id, port_id, port_conf->enqueue_depth,
689 dev->data->dev_conf.nb_event_port_enqueue_depth);
690 return -EINVAL;
691 }
692
693 if (port_conf &&
694 (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
695 !(dev->data->event_dev_cap &
696 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
697 RTE_EDEV_LOG_ERR(
698 "dev%d port%d Implicit release disable not supported",
699 dev_id, port_id);
700 return -EINVAL;
701 }
702
703 if (dev->data->dev_started) {
704 RTE_EDEV_LOG_ERR(
705 "device %d must be stopped to allow port setup", dev_id);
706 return -EBUSY;
707 }
708
709 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
710
711 if (port_conf == NULL) {
712 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
713 -ENOTSUP);
714 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
715 port_conf = &def_conf;
716 }
717
718 dev->data->ports_cfg[port_id] = *port_conf;
719
720 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
721
722 /* Unlink all the queues from this port(default state after setup) */
723 if (!diag)
724 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
725
726 rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
727 if (diag < 0)
728 return diag;
729
730 return 0;
731 }
732
733 void
rte_event_port_quiesce(uint8_t dev_id,uint8_t port_id,rte_eventdev_port_flush_t release_cb,void * args)734 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
735 rte_eventdev_port_flush_t release_cb, void *args)
736 {
737 struct rte_eventdev *dev;
738
739 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
740 dev = &rte_eventdevs[dev_id];
741
742 if (!is_valid_port(dev, port_id)) {
743 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
744 return;
745 }
746
747 if (dev->dev_ops->port_quiesce)
748 (*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id],
749 release_cb, args);
750 }
751
752 int
rte_event_dev_attr_get(uint8_t dev_id,uint32_t attr_id,uint32_t * attr_value)753 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
754 uint32_t *attr_value)
755 {
756 struct rte_eventdev *dev;
757
758 if (!attr_value)
759 return -EINVAL;
760 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
761 dev = &rte_eventdevs[dev_id];
762
763 switch (attr_id) {
764 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
765 *attr_value = dev->data->nb_ports;
766 break;
767 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
768 *attr_value = dev->data->nb_queues;
769 break;
770 case RTE_EVENT_DEV_ATTR_STARTED:
771 *attr_value = dev->data->dev_started;
772 break;
773 default:
774 return -EINVAL;
775 }
776
777 return 0;
778 }
779
780 int
rte_event_port_attr_get(uint8_t dev_id,uint8_t port_id,uint32_t attr_id,uint32_t * attr_value)781 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
782 uint32_t *attr_value)
783 {
784 struct rte_eventdev *dev;
785
786 if (!attr_value)
787 return -EINVAL;
788
789 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
790 dev = &rte_eventdevs[dev_id];
791 if (!is_valid_port(dev, port_id)) {
792 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
793 return -EINVAL;
794 }
795
796 switch (attr_id) {
797 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
798 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
799 break;
800 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
801 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
802 break;
803 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
804 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
805 break;
806 case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
807 {
808 uint32_t config;
809
810 config = dev->data->ports_cfg[port_id].event_port_cfg;
811 *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
812 break;
813 }
814 default:
815 return -EINVAL;
816 };
817 return 0;
818 }
819
820 int
rte_event_queue_attr_get(uint8_t dev_id,uint8_t queue_id,uint32_t attr_id,uint32_t * attr_value)821 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
822 uint32_t *attr_value)
823 {
824 struct rte_event_queue_conf *conf;
825 struct rte_eventdev *dev;
826
827 if (!attr_value)
828 return -EINVAL;
829
830 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
831 dev = &rte_eventdevs[dev_id];
832 if (!is_valid_queue(dev, queue_id)) {
833 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
834 return -EINVAL;
835 }
836
837 conf = &dev->data->queues_cfg[queue_id];
838
839 switch (attr_id) {
840 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
841 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
842 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
843 *attr_value = conf->priority;
844 break;
845 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
846 *attr_value = conf->nb_atomic_flows;
847 break;
848 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
849 *attr_value = conf->nb_atomic_order_sequences;
850 break;
851 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
852 *attr_value = conf->event_queue_cfg;
853 break;
854 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
855 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
856 return -EOVERFLOW;
857
858 *attr_value = conf->schedule_type;
859 break;
860 case RTE_EVENT_QUEUE_ATTR_WEIGHT:
861 *attr_value = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
862 if (dev->dev_ops->queue_attr_get)
863 return (*dev->dev_ops->queue_attr_get)(
864 dev, queue_id, attr_id, attr_value);
865 break;
866 case RTE_EVENT_QUEUE_ATTR_AFFINITY:
867 *attr_value = RTE_EVENT_QUEUE_AFFINITY_LOWEST;
868 if (dev->dev_ops->queue_attr_get)
869 return (*dev->dev_ops->queue_attr_get)(
870 dev, queue_id, attr_id, attr_value);
871 break;
872 default:
873 return -EINVAL;
874 };
875 return 0;
876 }
877
878 int
rte_event_queue_attr_set(uint8_t dev_id,uint8_t queue_id,uint32_t attr_id,uint64_t attr_value)879 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
880 uint64_t attr_value)
881 {
882 struct rte_eventdev *dev;
883
884 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
885 dev = &rte_eventdevs[dev_id];
886 if (!is_valid_queue(dev, queue_id)) {
887 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
888 return -EINVAL;
889 }
890
891 if (!(dev->data->event_dev_cap &
892 RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR)) {
893 RTE_EDEV_LOG_ERR(
894 "Device %" PRIu8 "does not support changing queue attributes at runtime",
895 dev_id);
896 return -ENOTSUP;
897 }
898
899 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_attr_set, -ENOTSUP);
900 return (*dev->dev_ops->queue_attr_set)(dev, queue_id, attr_id,
901 attr_value);
902 }
903
904 int
rte_event_port_link(uint8_t dev_id,uint8_t port_id,const uint8_t queues[],const uint8_t priorities[],uint16_t nb_links)905 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
906 const uint8_t queues[], const uint8_t priorities[],
907 uint16_t nb_links)
908 {
909 struct rte_eventdev *dev;
910 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
911 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
912 uint16_t *links_map;
913 int i, diag;
914
915 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
916 dev = &rte_eventdevs[dev_id];
917
918 if (*dev->dev_ops->port_link == NULL) {
919 RTE_EDEV_LOG_ERR("Function not supported\n");
920 rte_errno = ENOTSUP;
921 return 0;
922 }
923
924 if (!is_valid_port(dev, port_id)) {
925 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
926 rte_errno = EINVAL;
927 return 0;
928 }
929
930 if (queues == NULL) {
931 for (i = 0; i < dev->data->nb_queues; i++)
932 queues_list[i] = i;
933
934 queues = queues_list;
935 nb_links = dev->data->nb_queues;
936 }
937
938 if (priorities == NULL) {
939 for (i = 0; i < nb_links; i++)
940 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
941
942 priorities = priorities_list;
943 }
944
945 for (i = 0; i < nb_links; i++)
946 if (queues[i] >= dev->data->nb_queues) {
947 rte_errno = EINVAL;
948 return 0;
949 }
950
951 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
952 queues, priorities, nb_links);
953 if (diag < 0)
954 return diag;
955
956 links_map = dev->data->links_map;
957 /* Point links_map to this port specific area */
958 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
959 for (i = 0; i < diag; i++)
960 links_map[queues[i]] = (uint8_t)priorities[i];
961
962 rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
963 return diag;
964 }
965
966 int
rte_event_port_unlink(uint8_t dev_id,uint8_t port_id,uint8_t queues[],uint16_t nb_unlinks)967 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
968 uint8_t queues[], uint16_t nb_unlinks)
969 {
970 struct rte_eventdev *dev;
971 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
972 int i, diag, j;
973 uint16_t *links_map;
974
975 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
976 dev = &rte_eventdevs[dev_id];
977
978 if (*dev->dev_ops->port_unlink == NULL) {
979 RTE_EDEV_LOG_ERR("Function not supported");
980 rte_errno = ENOTSUP;
981 return 0;
982 }
983
984 if (!is_valid_port(dev, port_id)) {
985 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
986 rte_errno = EINVAL;
987 return 0;
988 }
989
990 links_map = dev->data->links_map;
991 /* Point links_map to this port specific area */
992 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
993
994 if (queues == NULL) {
995 j = 0;
996 for (i = 0; i < dev->data->nb_queues; i++) {
997 if (links_map[i] !=
998 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
999 all_queues[j] = i;
1000 j++;
1001 }
1002 }
1003 queues = all_queues;
1004 } else {
1005 for (j = 0; j < nb_unlinks; j++) {
1006 if (links_map[queues[j]] ==
1007 EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
1008 break;
1009 }
1010 }
1011
1012 nb_unlinks = j;
1013 for (i = 0; i < nb_unlinks; i++)
1014 if (queues[i] >= dev->data->nb_queues) {
1015 rte_errno = EINVAL;
1016 return 0;
1017 }
1018
1019 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
1020 queues, nb_unlinks);
1021
1022 if (diag < 0)
1023 return diag;
1024
1025 for (i = 0; i < diag; i++)
1026 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1027
1028 rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
1029 return diag;
1030 }
1031
1032 int
rte_event_port_unlinks_in_progress(uint8_t dev_id,uint8_t port_id)1033 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
1034 {
1035 struct rte_eventdev *dev;
1036
1037 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1038 dev = &rte_eventdevs[dev_id];
1039 if (!is_valid_port(dev, port_id)) {
1040 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1041 return -EINVAL;
1042 }
1043
1044 /* Return 0 if the PMD does not implement unlinks in progress.
1045 * This allows PMDs which handle unlink synchronously to not implement
1046 * this function at all.
1047 */
1048 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
1049
1050 return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1051 dev->data->ports[port_id]);
1052 }
1053
1054 int
rte_event_port_links_get(uint8_t dev_id,uint8_t port_id,uint8_t queues[],uint8_t priorities[])1055 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1056 uint8_t queues[], uint8_t priorities[])
1057 {
1058 struct rte_eventdev *dev;
1059 uint16_t *links_map;
1060 int i, count = 0;
1061
1062 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1063 dev = &rte_eventdevs[dev_id];
1064 if (!is_valid_port(dev, port_id)) {
1065 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1066 return -EINVAL;
1067 }
1068
1069 links_map = dev->data->links_map;
1070 /* Point links_map to this port specific area */
1071 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1072 for (i = 0; i < dev->data->nb_queues; i++) {
1073 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1074 queues[count] = i;
1075 priorities[count] = (uint8_t)links_map[i];
1076 ++count;
1077 }
1078 }
1079 return count;
1080 }
1081
1082 int
rte_event_dequeue_timeout_ticks(uint8_t dev_id,uint64_t ns,uint64_t * timeout_ticks)1083 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1084 uint64_t *timeout_ticks)
1085 {
1086 struct rte_eventdev *dev;
1087
1088 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1089 dev = &rte_eventdevs[dev_id];
1090 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1091
1092 if (timeout_ticks == NULL)
1093 return -EINVAL;
1094
1095 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1096 }
1097
1098 int
rte_event_dev_service_id_get(uint8_t dev_id,uint32_t * service_id)1099 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1100 {
1101 struct rte_eventdev *dev;
1102
1103 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1104 dev = &rte_eventdevs[dev_id];
1105
1106 if (service_id == NULL)
1107 return -EINVAL;
1108
1109 if (dev->data->service_inited)
1110 *service_id = dev->data->service_id;
1111
1112 return dev->data->service_inited ? 0 : -ESRCH;
1113 }
1114
1115 int
rte_event_dev_dump(uint8_t dev_id,FILE * f)1116 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1117 {
1118 struct rte_eventdev *dev;
1119
1120 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1121 dev = &rte_eventdevs[dev_id];
1122 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1123 if (f == NULL)
1124 return -EINVAL;
1125
1126 (*dev->dev_ops->dump)(dev, f);
1127 return 0;
1128
1129 }
1130
1131 static int
xstats_get_count(uint8_t dev_id,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id)1132 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1133 uint8_t queue_port_id)
1134 {
1135 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1136 if (dev->dev_ops->xstats_get_names != NULL)
1137 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1138 queue_port_id,
1139 NULL, NULL, 0);
1140 return 0;
1141 }
1142
1143 int
rte_event_dev_xstats_names_get(uint8_t dev_id,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,struct rte_event_dev_xstats_name * xstats_names,unsigned int * ids,unsigned int size)1144 rte_event_dev_xstats_names_get(uint8_t dev_id,
1145 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1146 struct rte_event_dev_xstats_name *xstats_names,
1147 unsigned int *ids, unsigned int size)
1148 {
1149 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1150 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1151 queue_port_id);
1152 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1153 (int)size < cnt_expected_entries)
1154 return cnt_expected_entries;
1155
1156 /* dev_id checked above */
1157 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1158
1159 if (dev->dev_ops->xstats_get_names != NULL)
1160 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1161 queue_port_id, xstats_names, ids, size);
1162
1163 return -ENOTSUP;
1164 }
1165
1166 /* retrieve eventdev extended statistics */
1167 int
rte_event_dev_xstats_get(uint8_t dev_id,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,const unsigned int ids[],uint64_t values[],unsigned int n)1168 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1169 uint8_t queue_port_id, const unsigned int ids[],
1170 uint64_t values[], unsigned int n)
1171 {
1172 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1173 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1174
1175 /* implemented by the driver */
1176 if (dev->dev_ops->xstats_get != NULL)
1177 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1178 ids, values, n);
1179 return -ENOTSUP;
1180 }
1181
1182 uint64_t
rte_event_dev_xstats_by_name_get(uint8_t dev_id,const char * name,unsigned int * id)1183 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1184 unsigned int *id)
1185 {
1186 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1187 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1188 unsigned int temp = -1;
1189
1190 if (id != NULL)
1191 *id = (unsigned int)-1;
1192 else
1193 id = &temp; /* ensure driver never gets a NULL value */
1194
1195 /* implemented by driver */
1196 if (dev->dev_ops->xstats_get_by_name != NULL)
1197 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1198 return -ENOTSUP;
1199 }
1200
rte_event_dev_xstats_reset(uint8_t dev_id,enum rte_event_dev_xstats_mode mode,int16_t queue_port_id,const uint32_t ids[],uint32_t nb_ids)1201 int rte_event_dev_xstats_reset(uint8_t dev_id,
1202 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1203 const uint32_t ids[], uint32_t nb_ids)
1204 {
1205 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1206 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1207
1208 if (dev->dev_ops->xstats_reset != NULL)
1209 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1210 ids, nb_ids);
1211 return -ENOTSUP;
1212 }
1213
1214 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1215
rte_event_dev_selftest(uint8_t dev_id)1216 int rte_event_dev_selftest(uint8_t dev_id)
1217 {
1218 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1219 static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1220 .name = "rte_event_pmd_selftest_seqn_dynfield",
1221 .size = sizeof(rte_event_pmd_selftest_seqn_t),
1222 .align = __alignof__(rte_event_pmd_selftest_seqn_t),
1223 };
1224 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1225
1226 if (dev->dev_ops->dev_selftest != NULL) {
1227 rte_event_pmd_selftest_seqn_dynfield_offset =
1228 rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1229 if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1230 return -ENOMEM;
1231 return (*dev->dev_ops->dev_selftest)();
1232 }
1233 return -ENOTSUP;
1234 }
1235
1236 struct rte_mempool *
rte_event_vector_pool_create(const char * name,unsigned int n,unsigned int cache_size,uint16_t nb_elem,int socket_id)1237 rte_event_vector_pool_create(const char *name, unsigned int n,
1238 unsigned int cache_size, uint16_t nb_elem,
1239 int socket_id)
1240 {
1241 const char *mp_ops_name;
1242 struct rte_mempool *mp;
1243 unsigned int elt_sz;
1244 int ret;
1245
1246 if (!nb_elem) {
1247 RTE_LOG(ERR, EVENTDEV,
1248 "Invalid number of elements=%d requested\n", nb_elem);
1249 rte_errno = EINVAL;
1250 return NULL;
1251 }
1252
1253 elt_sz =
1254 sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1255 mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1256 0);
1257 if (mp == NULL)
1258 return NULL;
1259
1260 mp_ops_name = rte_mbuf_best_mempool_ops();
1261 ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1262 if (ret != 0) {
1263 RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1264 goto err;
1265 }
1266
1267 ret = rte_mempool_populate_default(mp);
1268 if (ret < 0)
1269 goto err;
1270
1271 return mp;
1272 err:
1273 rte_mempool_free(mp);
1274 rte_errno = -ret;
1275 return NULL;
1276 }
1277
1278 int
rte_event_dev_start(uint8_t dev_id)1279 rte_event_dev_start(uint8_t dev_id)
1280 {
1281 struct rte_eventdev *dev;
1282 int diag;
1283
1284 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1285
1286 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1287 dev = &rte_eventdevs[dev_id];
1288 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1289
1290 if (dev->data->dev_started != 0) {
1291 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1292 dev_id);
1293 return 0;
1294 }
1295
1296 diag = (*dev->dev_ops->dev_start)(dev);
1297 rte_eventdev_trace_start(dev_id, diag);
1298 if (diag == 0)
1299 dev->data->dev_started = 1;
1300 else
1301 return diag;
1302
1303 event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1304
1305 return 0;
1306 }
1307
1308 int
rte_event_dev_stop_flush_callback_register(uint8_t dev_id,eventdev_stop_flush_t callback,void * userdata)1309 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1310 eventdev_stop_flush_t callback, void *userdata)
1311 {
1312 struct rte_eventdev *dev;
1313
1314 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1315
1316 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1317 dev = &rte_eventdevs[dev_id];
1318
1319 dev->dev_ops->dev_stop_flush = callback;
1320 dev->data->dev_stop_flush_arg = userdata;
1321
1322 return 0;
1323 }
1324
1325 void
rte_event_dev_stop(uint8_t dev_id)1326 rte_event_dev_stop(uint8_t dev_id)
1327 {
1328 struct rte_eventdev *dev;
1329
1330 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1331
1332 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1333 dev = &rte_eventdevs[dev_id];
1334 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1335
1336 if (dev->data->dev_started == 0) {
1337 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1338 dev_id);
1339 return;
1340 }
1341
1342 dev->data->dev_started = 0;
1343 (*dev->dev_ops->dev_stop)(dev);
1344 rte_eventdev_trace_stop(dev_id);
1345 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1346 }
1347
1348 int
rte_event_dev_close(uint8_t dev_id)1349 rte_event_dev_close(uint8_t dev_id)
1350 {
1351 struct rte_eventdev *dev;
1352
1353 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1354 dev = &rte_eventdevs[dev_id];
1355 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1356
1357 /* Device must be stopped before it can be closed */
1358 if (dev->data->dev_started == 1) {
1359 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1360 dev_id);
1361 return -EBUSY;
1362 }
1363
1364 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1365 rte_eventdev_trace_close(dev_id);
1366 return (*dev->dev_ops->dev_close)(dev);
1367 }
1368
1369 static inline int
eventdev_data_alloc(uint8_t dev_id,struct rte_eventdev_data ** data,int socket_id)1370 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1371 int socket_id)
1372 {
1373 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1374 const struct rte_memzone *mz;
1375 int n;
1376
1377 /* Generate memzone name */
1378 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1379 if (n >= (int)sizeof(mz_name))
1380 return -EINVAL;
1381
1382 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1383 mz = rte_memzone_reserve(mz_name,
1384 sizeof(struct rte_eventdev_data),
1385 socket_id, 0);
1386 } else
1387 mz = rte_memzone_lookup(mz_name);
1388
1389 if (mz == NULL)
1390 return -ENOMEM;
1391
1392 *data = mz->addr;
1393 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1394 memset(*data, 0, sizeof(struct rte_eventdev_data));
1395 for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
1396 RTE_EVENT_MAX_QUEUES_PER_DEV;
1397 n++)
1398 (*data)->links_map[n] =
1399 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1400 }
1401
1402 return 0;
1403 }
1404
1405 static inline uint8_t
eventdev_find_free_device_index(void)1406 eventdev_find_free_device_index(void)
1407 {
1408 uint8_t dev_id;
1409
1410 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1411 if (rte_eventdevs[dev_id].attached ==
1412 RTE_EVENTDEV_DETACHED)
1413 return dev_id;
1414 }
1415 return RTE_EVENT_MAX_DEVS;
1416 }
1417
1418 struct rte_eventdev *
rte_event_pmd_allocate(const char * name,int socket_id)1419 rte_event_pmd_allocate(const char *name, int socket_id)
1420 {
1421 struct rte_eventdev *eventdev;
1422 uint8_t dev_id;
1423
1424 if (rte_event_pmd_get_named_dev(name) != NULL) {
1425 RTE_EDEV_LOG_ERR("Event device with name %s already "
1426 "allocated!", name);
1427 return NULL;
1428 }
1429
1430 dev_id = eventdev_find_free_device_index();
1431 if (dev_id == RTE_EVENT_MAX_DEVS) {
1432 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1433 return NULL;
1434 }
1435
1436 eventdev = &rte_eventdevs[dev_id];
1437
1438 if (eventdev->data == NULL) {
1439 struct rte_eventdev_data *eventdev_data = NULL;
1440
1441 int retval =
1442 eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1443
1444 if (retval < 0 || eventdev_data == NULL)
1445 return NULL;
1446
1447 eventdev->data = eventdev_data;
1448
1449 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1450
1451 strlcpy(eventdev->data->name, name,
1452 RTE_EVENTDEV_NAME_MAX_LEN);
1453
1454 eventdev->data->dev_id = dev_id;
1455 eventdev->data->socket_id = socket_id;
1456 eventdev->data->dev_started = 0;
1457 }
1458
1459 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1460 eventdev_globals.nb_devs++;
1461 }
1462
1463 return eventdev;
1464 }
1465
1466 int
rte_event_pmd_release(struct rte_eventdev * eventdev)1467 rte_event_pmd_release(struct rte_eventdev *eventdev)
1468 {
1469 int ret;
1470 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1471 const struct rte_memzone *mz;
1472
1473 if (eventdev == NULL)
1474 return -EINVAL;
1475
1476 event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1477 eventdev->attached = RTE_EVENTDEV_DETACHED;
1478 eventdev_globals.nb_devs--;
1479
1480 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1481 rte_free(eventdev->data->dev_private);
1482
1483 /* Generate memzone name */
1484 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1485 eventdev->data->dev_id);
1486 if (ret >= (int)sizeof(mz_name))
1487 return -EINVAL;
1488
1489 mz = rte_memzone_lookup(mz_name);
1490 if (mz == NULL)
1491 return -ENOMEM;
1492
1493 ret = rte_memzone_free(mz);
1494 if (ret)
1495 return ret;
1496 }
1497
1498 eventdev->data = NULL;
1499 return 0;
1500 }
1501
1502 void
event_dev_probing_finish(struct rte_eventdev * eventdev)1503 event_dev_probing_finish(struct rte_eventdev *eventdev)
1504 {
1505 if (eventdev == NULL)
1506 return;
1507
1508 event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1509 eventdev);
1510 }
1511
1512 static int
handle_dev_list(const char * cmd __rte_unused,const char * params __rte_unused,struct rte_tel_data * d)1513 handle_dev_list(const char *cmd __rte_unused,
1514 const char *params __rte_unused,
1515 struct rte_tel_data *d)
1516 {
1517 uint8_t dev_id;
1518 int ndev = rte_event_dev_count();
1519
1520 if (ndev < 1)
1521 return -1;
1522
1523 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1524 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1525 if (rte_eventdevs[dev_id].attached ==
1526 RTE_EVENTDEV_ATTACHED)
1527 rte_tel_data_add_array_int(d, dev_id);
1528 }
1529
1530 return 0;
1531 }
1532
1533 static int
handle_port_list(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1534 handle_port_list(const char *cmd __rte_unused,
1535 const char *params,
1536 struct rte_tel_data *d)
1537 {
1538 int i;
1539 uint8_t dev_id;
1540 struct rte_eventdev *dev;
1541 char *end_param;
1542
1543 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1544 return -1;
1545
1546 dev_id = strtoul(params, &end_param, 10);
1547 if (*end_param != '\0')
1548 RTE_EDEV_LOG_DEBUG(
1549 "Extra parameters passed to eventdev telemetry command, ignoring");
1550
1551 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1552 dev = &rte_eventdevs[dev_id];
1553
1554 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1555 for (i = 0; i < dev->data->nb_ports; i++)
1556 rte_tel_data_add_array_int(d, i);
1557
1558 return 0;
1559 }
1560
1561 static int
handle_queue_list(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1562 handle_queue_list(const char *cmd __rte_unused,
1563 const char *params,
1564 struct rte_tel_data *d)
1565 {
1566 int i;
1567 uint8_t dev_id;
1568 struct rte_eventdev *dev;
1569 char *end_param;
1570
1571 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1572 return -1;
1573
1574 dev_id = strtoul(params, &end_param, 10);
1575 if (*end_param != '\0')
1576 RTE_EDEV_LOG_DEBUG(
1577 "Extra parameters passed to eventdev telemetry command, ignoring");
1578
1579 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1580 dev = &rte_eventdevs[dev_id];
1581
1582 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1583 for (i = 0; i < dev->data->nb_queues; i++)
1584 rte_tel_data_add_array_int(d, i);
1585
1586 return 0;
1587 }
1588
1589 static int
handle_queue_links(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1590 handle_queue_links(const char *cmd __rte_unused,
1591 const char *params,
1592 struct rte_tel_data *d)
1593 {
1594 int i, ret, port_id = 0;
1595 char *end_param;
1596 uint8_t dev_id;
1597 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1598 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1599 const char *p_param;
1600
1601 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1602 return -1;
1603
1604 /* Get dev ID from parameter string */
1605 dev_id = strtoul(params, &end_param, 10);
1606 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1607
1608 p_param = strtok(end_param, ",");
1609 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1610 return -1;
1611
1612 port_id = strtoul(p_param, &end_param, 10);
1613 p_param = strtok(NULL, "\0");
1614 if (p_param != NULL)
1615 RTE_EDEV_LOG_DEBUG(
1616 "Extra parameters passed to eventdev telemetry command, ignoring");
1617
1618 ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1619 if (ret < 0)
1620 return -1;
1621
1622 rte_tel_data_start_dict(d);
1623 for (i = 0; i < ret; i++) {
1624 char qid_name[32];
1625
1626 snprintf(qid_name, 31, "qid_%u", queues[i]);
1627 rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
1628 }
1629
1630 return 0;
1631 }
1632
1633 static int
eventdev_build_telemetry_data(int dev_id,enum rte_event_dev_xstats_mode mode,int port_queue_id,struct rte_tel_data * d)1634 eventdev_build_telemetry_data(int dev_id,
1635 enum rte_event_dev_xstats_mode mode,
1636 int port_queue_id,
1637 struct rte_tel_data *d)
1638 {
1639 struct rte_event_dev_xstats_name *xstat_names;
1640 unsigned int *ids;
1641 uint64_t *values;
1642 int i, ret, num_xstats;
1643
1644 num_xstats = rte_event_dev_xstats_names_get(dev_id,
1645 mode,
1646 port_queue_id,
1647 NULL,
1648 NULL,
1649 0);
1650
1651 if (num_xstats < 0)
1652 return -1;
1653
1654 /* use one malloc for names */
1655 xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1656 * num_xstats);
1657 if (xstat_names == NULL)
1658 return -1;
1659
1660 ids = malloc((sizeof(unsigned int)) * num_xstats);
1661 if (ids == NULL) {
1662 free(xstat_names);
1663 return -1;
1664 }
1665
1666 values = malloc((sizeof(uint64_t)) * num_xstats);
1667 if (values == NULL) {
1668 free(xstat_names);
1669 free(ids);
1670 return -1;
1671 }
1672
1673 ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1674 xstat_names, ids, num_xstats);
1675 if (ret < 0 || ret > num_xstats) {
1676 free(xstat_names);
1677 free(ids);
1678 free(values);
1679 return -1;
1680 }
1681
1682 ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1683 ids, values, num_xstats);
1684 if (ret < 0 || ret > num_xstats) {
1685 free(xstat_names);
1686 free(ids);
1687 free(values);
1688 return -1;
1689 }
1690
1691 rte_tel_data_start_dict(d);
1692 for (i = 0; i < num_xstats; i++)
1693 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
1694 values[i]);
1695
1696 free(xstat_names);
1697 free(ids);
1698 free(values);
1699 return 0;
1700 }
1701
1702 static int
handle_dev_xstats(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1703 handle_dev_xstats(const char *cmd __rte_unused,
1704 const char *params,
1705 struct rte_tel_data *d)
1706 {
1707 int dev_id;
1708 enum rte_event_dev_xstats_mode mode;
1709 char *end_param;
1710
1711 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1712 return -1;
1713
1714 /* Get dev ID from parameter string */
1715 dev_id = strtoul(params, &end_param, 10);
1716 if (*end_param != '\0')
1717 RTE_EDEV_LOG_DEBUG(
1718 "Extra parameters passed to eventdev telemetry command, ignoring");
1719
1720 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1721
1722 mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1723 return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1724 }
1725
1726 static int
handle_port_xstats(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1727 handle_port_xstats(const char *cmd __rte_unused,
1728 const char *params,
1729 struct rte_tel_data *d)
1730 {
1731 int dev_id;
1732 int port_queue_id = 0;
1733 enum rte_event_dev_xstats_mode mode;
1734 char *end_param;
1735 const char *p_param;
1736
1737 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1738 return -1;
1739
1740 /* Get dev ID from parameter string */
1741 dev_id = strtoul(params, &end_param, 10);
1742 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1743
1744 p_param = strtok(end_param, ",");
1745 mode = RTE_EVENT_DEV_XSTATS_PORT;
1746
1747 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1748 return -1;
1749
1750 port_queue_id = strtoul(p_param, &end_param, 10);
1751
1752 p_param = strtok(NULL, "\0");
1753 if (p_param != NULL)
1754 RTE_EDEV_LOG_DEBUG(
1755 "Extra parameters passed to eventdev telemetry command, ignoring");
1756
1757 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1758 }
1759
1760 static int
handle_queue_xstats(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1761 handle_queue_xstats(const char *cmd __rte_unused,
1762 const char *params,
1763 struct rte_tel_data *d)
1764 {
1765 int dev_id;
1766 int port_queue_id = 0;
1767 enum rte_event_dev_xstats_mode mode;
1768 char *end_param;
1769 const char *p_param;
1770
1771 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1772 return -1;
1773
1774 /* Get dev ID from parameter string */
1775 dev_id = strtoul(params, &end_param, 10);
1776 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1777
1778 p_param = strtok(end_param, ",");
1779 mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1780
1781 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1782 return -1;
1783
1784 port_queue_id = strtoul(p_param, &end_param, 10);
1785
1786 p_param = strtok(NULL, "\0");
1787 if (p_param != NULL)
1788 RTE_EDEV_LOG_DEBUG(
1789 "Extra parameters passed to eventdev telemetry command, ignoring");
1790
1791 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1792 }
1793
RTE_INIT(eventdev_init_telemetry)1794 RTE_INIT(eventdev_init_telemetry)
1795 {
1796 rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1797 "Returns list of available eventdevs. Takes no parameters");
1798 rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1799 "Returns list of available ports. Parameter: DevID");
1800 rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1801 "Returns list of available queues. Parameter: DevID");
1802
1803 rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1804 "Returns stats for an eventdev. Parameter: DevID");
1805 rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1806 "Returns stats for an eventdev port. Params: DevID,PortID");
1807 rte_telemetry_register_cmd("/eventdev/queue_xstats",
1808 handle_queue_xstats,
1809 "Returns stats for an eventdev queue. Params: DevID,QueueID");
1810 rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1811 "Returns links for an eventdev port. Params: DevID,QueueID");
1812 }
1813