1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15 
16 #include <rte_string_fns.h>
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_eal.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_errno.h>
32 #include <rte_ethdev.h>
33 #include <rte_cryptodev.h>
34 #include <rte_cryptodev_pmd.h>
35 #include <rte_telemetry.h>
36 
37 #include "rte_eventdev.h"
38 #include "rte_eventdev_pmd.h"
39 #include "rte_eventdev_trace.h"
40 
41 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
42 
43 struct rte_eventdev *rte_eventdevs = rte_event_devices;
44 
45 static struct rte_eventdev_global eventdev_globals = {
46 	.nb_devs		= 0
47 };
48 
49 /* Event dev north bound API implementation */
50 
51 uint8_t
52 rte_event_dev_count(void)
53 {
54 	return eventdev_globals.nb_devs;
55 }
56 
57 int
58 rte_event_dev_get_dev_id(const char *name)
59 {
60 	int i;
61 	uint8_t cmp;
62 
63 	if (!name)
64 		return -EINVAL;
65 
66 	for (i = 0; i < eventdev_globals.nb_devs; i++) {
67 		cmp = (strncmp(rte_event_devices[i].data->name, name,
68 				RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
69 			(rte_event_devices[i].dev ? (strncmp(
70 				rte_event_devices[i].dev->driver->name, name,
71 					 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
72 		if (cmp && (rte_event_devices[i].attached ==
73 					RTE_EVENTDEV_ATTACHED))
74 			return i;
75 	}
76 	return -ENODEV;
77 }
78 
79 int
80 rte_event_dev_socket_id(uint8_t dev_id)
81 {
82 	struct rte_eventdev *dev;
83 
84 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
85 	dev = &rte_eventdevs[dev_id];
86 
87 	return dev->data->socket_id;
88 }
89 
90 int
91 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
92 {
93 	struct rte_eventdev *dev;
94 
95 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
96 	dev = &rte_eventdevs[dev_id];
97 
98 	if (dev_info == NULL)
99 		return -EINVAL;
100 
101 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
102 
103 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
104 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
105 
106 	dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
107 
108 	dev_info->dev = dev->dev;
109 	return 0;
110 }
111 
112 int
113 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
114 				uint32_t *caps)
115 {
116 	struct rte_eventdev *dev;
117 
118 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
119 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
120 
121 	dev = &rte_eventdevs[dev_id];
122 
123 	if (caps == NULL)
124 		return -EINVAL;
125 	*caps = 0;
126 
127 	return dev->dev_ops->eth_rx_adapter_caps_get ?
128 				(*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
129 						&rte_eth_devices[eth_port_id],
130 						caps)
131 				: 0;
132 }
133 
134 int
135 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
136 {
137 	struct rte_eventdev *dev;
138 	const struct rte_event_timer_adapter_ops *ops;
139 
140 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
141 
142 	dev = &rte_eventdevs[dev_id];
143 
144 	if (caps == NULL)
145 		return -EINVAL;
146 	*caps = 0;
147 
148 	return dev->dev_ops->timer_adapter_caps_get ?
149 				(*dev->dev_ops->timer_adapter_caps_get)(dev,
150 									0,
151 									caps,
152 									&ops)
153 				: 0;
154 }
155 
156 int
157 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
158 				  uint32_t *caps)
159 {
160 	struct rte_eventdev *dev;
161 	struct rte_cryptodev *cdev;
162 
163 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
164 	if (!rte_cryptodev_pmd_is_valid_dev(cdev_id))
165 		return -EINVAL;
166 
167 	dev = &rte_eventdevs[dev_id];
168 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
169 
170 	if (caps == NULL)
171 		return -EINVAL;
172 	*caps = 0;
173 
174 	return dev->dev_ops->crypto_adapter_caps_get ?
175 		(*dev->dev_ops->crypto_adapter_caps_get)
176 		(dev, cdev, caps) : -ENOTSUP;
177 }
178 
179 int
180 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
181 				uint32_t *caps)
182 {
183 	struct rte_eventdev *dev;
184 	struct rte_eth_dev *eth_dev;
185 
186 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
187 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
188 
189 	dev = &rte_eventdevs[dev_id];
190 	eth_dev = &rte_eth_devices[eth_port_id];
191 
192 	if (caps == NULL)
193 		return -EINVAL;
194 
195 	*caps = 0;
196 
197 	return dev->dev_ops->eth_tx_adapter_caps_get ?
198 			(*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
199 								eth_dev,
200 								caps)
201 			: 0;
202 }
203 
204 static inline int
205 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
206 {
207 	uint8_t old_nb_queues = dev->data->nb_queues;
208 	struct rte_event_queue_conf *queues_cfg;
209 	unsigned int i;
210 
211 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
212 			 dev->data->dev_id);
213 
214 	/* First time configuration */
215 	if (dev->data->queues_cfg == NULL && nb_queues != 0) {
216 		/* Allocate memory to store queue configuration */
217 		dev->data->queues_cfg = rte_zmalloc_socket(
218 				"eventdev->data->queues_cfg",
219 				sizeof(dev->data->queues_cfg[0]) * nb_queues,
220 				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
221 		if (dev->data->queues_cfg == NULL) {
222 			dev->data->nb_queues = 0;
223 			RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
224 					"nb_queues %u", nb_queues);
225 			return -(ENOMEM);
226 		}
227 	/* Re-configure */
228 	} else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
229 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
230 
231 		for (i = nb_queues; i < old_nb_queues; i++)
232 			(*dev->dev_ops->queue_release)(dev, i);
233 
234 		/* Re allocate memory to store queue configuration */
235 		queues_cfg = dev->data->queues_cfg;
236 		queues_cfg = rte_realloc(queues_cfg,
237 				sizeof(queues_cfg[0]) * nb_queues,
238 				RTE_CACHE_LINE_SIZE);
239 		if (queues_cfg == NULL) {
240 			RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
241 						" nb_queues %u", nb_queues);
242 			return -(ENOMEM);
243 		}
244 		dev->data->queues_cfg = queues_cfg;
245 
246 		if (nb_queues > old_nb_queues) {
247 			uint8_t new_qs = nb_queues - old_nb_queues;
248 
249 			memset(queues_cfg + old_nb_queues, 0,
250 				sizeof(queues_cfg[0]) * new_qs);
251 		}
252 	} else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
253 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
254 
255 		for (i = nb_queues; i < old_nb_queues; i++)
256 			(*dev->dev_ops->queue_release)(dev, i);
257 	}
258 
259 	dev->data->nb_queues = nb_queues;
260 	return 0;
261 }
262 
263 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
264 
265 static inline int
266 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
267 {
268 	uint8_t old_nb_ports = dev->data->nb_ports;
269 	void **ports;
270 	uint16_t *links_map;
271 	struct rte_event_port_conf *ports_cfg;
272 	unsigned int i;
273 
274 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
275 			 dev->data->dev_id);
276 
277 	/* First time configuration */
278 	if (dev->data->ports == NULL && nb_ports != 0) {
279 		dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
280 				sizeof(dev->data->ports[0]) * nb_ports,
281 				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
282 		if (dev->data->ports == NULL) {
283 			dev->data->nb_ports = 0;
284 			RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
285 					"nb_ports %u", nb_ports);
286 			return -(ENOMEM);
287 		}
288 
289 		/* Allocate memory to store port configurations */
290 		dev->data->ports_cfg =
291 			rte_zmalloc_socket("eventdev->ports_cfg",
292 			sizeof(dev->data->ports_cfg[0]) * nb_ports,
293 			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
294 		if (dev->data->ports_cfg == NULL) {
295 			dev->data->nb_ports = 0;
296 			RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
297 					"nb_ports %u", nb_ports);
298 			return -(ENOMEM);
299 		}
300 
301 		/* Allocate memory to store queue to port link connection */
302 		dev->data->links_map =
303 			rte_zmalloc_socket("eventdev->links_map",
304 			sizeof(dev->data->links_map[0]) * nb_ports *
305 			RTE_EVENT_MAX_QUEUES_PER_DEV,
306 			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
307 		if (dev->data->links_map == NULL) {
308 			dev->data->nb_ports = 0;
309 			RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
310 					"nb_ports %u", nb_ports);
311 			return -(ENOMEM);
312 		}
313 		for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
314 			dev->data->links_map[i] =
315 				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
316 	} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
317 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
318 
319 		ports = dev->data->ports;
320 		ports_cfg = dev->data->ports_cfg;
321 		links_map = dev->data->links_map;
322 
323 		for (i = nb_ports; i < old_nb_ports; i++)
324 			(*dev->dev_ops->port_release)(ports[i]);
325 
326 		/* Realloc memory for ports */
327 		ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
328 				RTE_CACHE_LINE_SIZE);
329 		if (ports == NULL) {
330 			RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
331 						" nb_ports %u", nb_ports);
332 			return -(ENOMEM);
333 		}
334 
335 		/* Realloc memory for ports_cfg */
336 		ports_cfg = rte_realloc(ports_cfg,
337 			sizeof(ports_cfg[0]) * nb_ports,
338 			RTE_CACHE_LINE_SIZE);
339 		if (ports_cfg == NULL) {
340 			RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
341 						" nb_ports %u", nb_ports);
342 			return -(ENOMEM);
343 		}
344 
345 		/* Realloc memory to store queue to port link connection */
346 		links_map = rte_realloc(links_map,
347 			sizeof(dev->data->links_map[0]) * nb_ports *
348 			RTE_EVENT_MAX_QUEUES_PER_DEV,
349 			RTE_CACHE_LINE_SIZE);
350 		if (links_map == NULL) {
351 			dev->data->nb_ports = 0;
352 			RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
353 					"nb_ports %u", nb_ports);
354 			return -(ENOMEM);
355 		}
356 
357 		if (nb_ports > old_nb_ports) {
358 			uint8_t new_ps = nb_ports - old_nb_ports;
359 			unsigned int old_links_map_end =
360 				old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
361 			unsigned int links_map_end =
362 				nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
363 
364 			memset(ports + old_nb_ports, 0,
365 				sizeof(ports[0]) * new_ps);
366 			memset(ports_cfg + old_nb_ports, 0,
367 				sizeof(ports_cfg[0]) * new_ps);
368 			for (i = old_links_map_end; i < links_map_end; i++)
369 				links_map[i] =
370 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
371 		}
372 
373 		dev->data->ports = ports;
374 		dev->data->ports_cfg = ports_cfg;
375 		dev->data->links_map = links_map;
376 	} else if (dev->data->ports != NULL && nb_ports == 0) {
377 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
378 
379 		ports = dev->data->ports;
380 		for (i = nb_ports; i < old_nb_ports; i++)
381 			(*dev->dev_ops->port_release)(ports[i]);
382 	}
383 
384 	dev->data->nb_ports = nb_ports;
385 	return 0;
386 }
387 
388 int
389 rte_event_dev_configure(uint8_t dev_id,
390 			const struct rte_event_dev_config *dev_conf)
391 {
392 	struct rte_eventdev *dev;
393 	struct rte_event_dev_info info;
394 	int diag;
395 
396 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
397 	dev = &rte_eventdevs[dev_id];
398 
399 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
400 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
401 
402 	if (dev->data->dev_started) {
403 		RTE_EDEV_LOG_ERR(
404 		    "device %d must be stopped to allow configuration", dev_id);
405 		return -EBUSY;
406 	}
407 
408 	if (dev_conf == NULL)
409 		return -EINVAL;
410 
411 	(*dev->dev_ops->dev_infos_get)(dev, &info);
412 
413 	/* Check dequeue_timeout_ns value is in limit */
414 	if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
415 		if (dev_conf->dequeue_timeout_ns &&
416 		    (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
417 			|| dev_conf->dequeue_timeout_ns >
418 				 info.max_dequeue_timeout_ns)) {
419 			RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
420 			" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
421 			dev_id, dev_conf->dequeue_timeout_ns,
422 			info.min_dequeue_timeout_ns,
423 			info.max_dequeue_timeout_ns);
424 			return -EINVAL;
425 		}
426 	}
427 
428 	/* Check nb_events_limit is in limit */
429 	if (dev_conf->nb_events_limit > info.max_num_events) {
430 		RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
431 		dev_id, dev_conf->nb_events_limit, info.max_num_events);
432 		return -EINVAL;
433 	}
434 
435 	/* Check nb_event_queues is in limit */
436 	if (!dev_conf->nb_event_queues) {
437 		RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
438 					dev_id);
439 		return -EINVAL;
440 	}
441 	if (dev_conf->nb_event_queues > info.max_event_queues +
442 			info.max_single_link_event_port_queue_pairs) {
443 		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
444 				 dev_id, dev_conf->nb_event_queues,
445 				 info.max_event_queues,
446 				 info.max_single_link_event_port_queue_pairs);
447 		return -EINVAL;
448 	}
449 	if (dev_conf->nb_event_queues -
450 			dev_conf->nb_single_link_event_port_queues >
451 			info.max_event_queues) {
452 		RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
453 				 dev_id, dev_conf->nb_event_queues,
454 				 dev_conf->nb_single_link_event_port_queues,
455 				 info.max_event_queues);
456 		return -EINVAL;
457 	}
458 	if (dev_conf->nb_single_link_event_port_queues >
459 			dev_conf->nb_event_queues) {
460 		RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
461 				 dev_id,
462 				 dev_conf->nb_single_link_event_port_queues,
463 				 dev_conf->nb_event_queues);
464 		return -EINVAL;
465 	}
466 
467 	/* Check nb_event_ports is in limit */
468 	if (!dev_conf->nb_event_ports) {
469 		RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
470 		return -EINVAL;
471 	}
472 	if (dev_conf->nb_event_ports > info.max_event_ports +
473 			info.max_single_link_event_port_queue_pairs) {
474 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
475 				 dev_id, dev_conf->nb_event_ports,
476 				 info.max_event_ports,
477 				 info.max_single_link_event_port_queue_pairs);
478 		return -EINVAL;
479 	}
480 	if (dev_conf->nb_event_ports -
481 			dev_conf->nb_single_link_event_port_queues
482 			> info.max_event_ports) {
483 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
484 				 dev_id, dev_conf->nb_event_ports,
485 				 dev_conf->nb_single_link_event_port_queues,
486 				 info.max_event_ports);
487 		return -EINVAL;
488 	}
489 
490 	if (dev_conf->nb_single_link_event_port_queues >
491 	    dev_conf->nb_event_ports) {
492 		RTE_EDEV_LOG_ERR(
493 				 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
494 				 dev_id,
495 				 dev_conf->nb_single_link_event_port_queues,
496 				 dev_conf->nb_event_ports);
497 		return -EINVAL;
498 	}
499 
500 	/* Check nb_event_queue_flows is in limit */
501 	if (!dev_conf->nb_event_queue_flows) {
502 		RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
503 		return -EINVAL;
504 	}
505 	if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
506 		RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
507 		dev_id, dev_conf->nb_event_queue_flows,
508 		info.max_event_queue_flows);
509 		return -EINVAL;
510 	}
511 
512 	/* Check nb_event_port_dequeue_depth is in limit */
513 	if (!dev_conf->nb_event_port_dequeue_depth) {
514 		RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
515 					dev_id);
516 		return -EINVAL;
517 	}
518 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
519 		 (dev_conf->nb_event_port_dequeue_depth >
520 			 info.max_event_port_dequeue_depth)) {
521 		RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
522 		dev_id, dev_conf->nb_event_port_dequeue_depth,
523 		info.max_event_port_dequeue_depth);
524 		return -EINVAL;
525 	}
526 
527 	/* Check nb_event_port_enqueue_depth is in limit */
528 	if (!dev_conf->nb_event_port_enqueue_depth) {
529 		RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
530 					dev_id);
531 		return -EINVAL;
532 	}
533 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
534 		(dev_conf->nb_event_port_enqueue_depth >
535 			 info.max_event_port_enqueue_depth)) {
536 		RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
537 		dev_id, dev_conf->nb_event_port_enqueue_depth,
538 		info.max_event_port_enqueue_depth);
539 		return -EINVAL;
540 	}
541 
542 	/* Copy the dev_conf parameter into the dev structure */
543 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
544 
545 	/* Setup new number of queues and reconfigure device. */
546 	diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
547 	if (diag != 0) {
548 		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
549 				dev_id, diag);
550 		return diag;
551 	}
552 
553 	/* Setup new number of ports and reconfigure device. */
554 	diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
555 	if (diag != 0) {
556 		rte_event_dev_queue_config(dev, 0);
557 		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
558 				dev_id, diag);
559 		return diag;
560 	}
561 
562 	/* Configure the device */
563 	diag = (*dev->dev_ops->dev_configure)(dev);
564 	if (diag != 0) {
565 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
566 		rte_event_dev_queue_config(dev, 0);
567 		rte_event_dev_port_config(dev, 0);
568 	}
569 
570 	dev->data->event_dev_cap = info.event_dev_cap;
571 	rte_eventdev_trace_configure(dev_id, dev_conf, diag);
572 	return diag;
573 }
574 
575 static inline int
576 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
577 {
578 	if (queue_id < dev->data->nb_queues && queue_id <
579 				RTE_EVENT_MAX_QUEUES_PER_DEV)
580 		return 1;
581 	else
582 		return 0;
583 }
584 
585 int
586 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
587 				 struct rte_event_queue_conf *queue_conf)
588 {
589 	struct rte_eventdev *dev;
590 
591 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
592 	dev = &rte_eventdevs[dev_id];
593 
594 	if (queue_conf == NULL)
595 		return -EINVAL;
596 
597 	if (!is_valid_queue(dev, queue_id)) {
598 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
599 		return -EINVAL;
600 	}
601 
602 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
603 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
604 	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
605 	return 0;
606 }
607 
608 static inline int
609 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
610 {
611 	if (queue_conf &&
612 		!(queue_conf->event_queue_cfg &
613 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
614 		((queue_conf->event_queue_cfg &
615 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
616 		(queue_conf->schedule_type
617 			== RTE_SCHED_TYPE_ATOMIC)
618 		))
619 		return 1;
620 	else
621 		return 0;
622 }
623 
624 static inline int
625 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
626 {
627 	if (queue_conf &&
628 		!(queue_conf->event_queue_cfg &
629 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
630 		((queue_conf->event_queue_cfg &
631 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
632 		(queue_conf->schedule_type
633 			== RTE_SCHED_TYPE_ORDERED)
634 		))
635 		return 1;
636 	else
637 		return 0;
638 }
639 
640 
641 int
642 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
643 		      const struct rte_event_queue_conf *queue_conf)
644 {
645 	struct rte_eventdev *dev;
646 	struct rte_event_queue_conf def_conf;
647 
648 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
649 	dev = &rte_eventdevs[dev_id];
650 
651 	if (!is_valid_queue(dev, queue_id)) {
652 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
653 		return -EINVAL;
654 	}
655 
656 	/* Check nb_atomic_flows limit */
657 	if (is_valid_atomic_queue_conf(queue_conf)) {
658 		if (queue_conf->nb_atomic_flows == 0 ||
659 		    queue_conf->nb_atomic_flows >
660 			dev->data->dev_conf.nb_event_queue_flows) {
661 			RTE_EDEV_LOG_ERR(
662 		"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
663 			dev_id, queue_id, queue_conf->nb_atomic_flows,
664 			dev->data->dev_conf.nb_event_queue_flows);
665 			return -EINVAL;
666 		}
667 	}
668 
669 	/* Check nb_atomic_order_sequences limit */
670 	if (is_valid_ordered_queue_conf(queue_conf)) {
671 		if (queue_conf->nb_atomic_order_sequences == 0 ||
672 		    queue_conf->nb_atomic_order_sequences >
673 			dev->data->dev_conf.nb_event_queue_flows) {
674 			RTE_EDEV_LOG_ERR(
675 		"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
676 			dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
677 			dev->data->dev_conf.nb_event_queue_flows);
678 			return -EINVAL;
679 		}
680 	}
681 
682 	if (dev->data->dev_started) {
683 		RTE_EDEV_LOG_ERR(
684 		    "device %d must be stopped to allow queue setup", dev_id);
685 		return -EBUSY;
686 	}
687 
688 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
689 
690 	if (queue_conf == NULL) {
691 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
692 					-ENOTSUP);
693 		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
694 		queue_conf = &def_conf;
695 	}
696 
697 	dev->data->queues_cfg[queue_id] = *queue_conf;
698 	rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
699 	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
700 }
701 
702 static inline int
703 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
704 {
705 	if (port_id < dev->data->nb_ports)
706 		return 1;
707 	else
708 		return 0;
709 }
710 
711 int
712 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
713 				 struct rte_event_port_conf *port_conf)
714 {
715 	struct rte_eventdev *dev;
716 
717 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
718 	dev = &rte_eventdevs[dev_id];
719 
720 	if (port_conf == NULL)
721 		return -EINVAL;
722 
723 	if (!is_valid_port(dev, port_id)) {
724 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
725 		return -EINVAL;
726 	}
727 
728 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
729 	memset(port_conf, 0, sizeof(struct rte_event_port_conf));
730 	(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
731 	return 0;
732 }
733 
734 int
735 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
736 		     const struct rte_event_port_conf *port_conf)
737 {
738 	struct rte_eventdev *dev;
739 	struct rte_event_port_conf def_conf;
740 	int diag;
741 
742 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
743 	dev = &rte_eventdevs[dev_id];
744 
745 	if (!is_valid_port(dev, port_id)) {
746 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
747 		return -EINVAL;
748 	}
749 
750 	/* Check new_event_threshold limit */
751 	if ((port_conf && !port_conf->new_event_threshold) ||
752 			(port_conf && port_conf->new_event_threshold >
753 				 dev->data->dev_conf.nb_events_limit)) {
754 		RTE_EDEV_LOG_ERR(
755 		   "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
756 			dev_id, port_id, port_conf->new_event_threshold,
757 			dev->data->dev_conf.nb_events_limit);
758 		return -EINVAL;
759 	}
760 
761 	/* Check dequeue_depth limit */
762 	if ((port_conf && !port_conf->dequeue_depth) ||
763 			(port_conf && port_conf->dequeue_depth >
764 		dev->data->dev_conf.nb_event_port_dequeue_depth)) {
765 		RTE_EDEV_LOG_ERR(
766 		   "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
767 			dev_id, port_id, port_conf->dequeue_depth,
768 			dev->data->dev_conf.nb_event_port_dequeue_depth);
769 		return -EINVAL;
770 	}
771 
772 	/* Check enqueue_depth limit */
773 	if ((port_conf && !port_conf->enqueue_depth) ||
774 			(port_conf && port_conf->enqueue_depth >
775 		dev->data->dev_conf.nb_event_port_enqueue_depth)) {
776 		RTE_EDEV_LOG_ERR(
777 		   "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
778 			dev_id, port_id, port_conf->enqueue_depth,
779 			dev->data->dev_conf.nb_event_port_enqueue_depth);
780 		return -EINVAL;
781 	}
782 
783 	if (port_conf &&
784 	    (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
785 	    !(dev->data->event_dev_cap &
786 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
787 		RTE_EDEV_LOG_ERR(
788 		   "dev%d port%d Implicit release disable not supported",
789 			dev_id, port_id);
790 		return -EINVAL;
791 	}
792 
793 	if (dev->data->dev_started) {
794 		RTE_EDEV_LOG_ERR(
795 		    "device %d must be stopped to allow port setup", dev_id);
796 		return -EBUSY;
797 	}
798 
799 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
800 
801 	if (port_conf == NULL) {
802 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
803 					-ENOTSUP);
804 		(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
805 		port_conf = &def_conf;
806 	}
807 
808 	dev->data->ports_cfg[port_id] = *port_conf;
809 
810 	diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
811 
812 	/* Unlink all the queues from this port(default state after setup) */
813 	if (!diag)
814 		diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
815 
816 	rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
817 	if (diag < 0)
818 		return diag;
819 
820 	return 0;
821 }
822 
823 int
824 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
825 		       uint32_t *attr_value)
826 {
827 	struct rte_eventdev *dev;
828 
829 	if (!attr_value)
830 		return -EINVAL;
831 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
832 	dev = &rte_eventdevs[dev_id];
833 
834 	switch (attr_id) {
835 	case RTE_EVENT_DEV_ATTR_PORT_COUNT:
836 		*attr_value = dev->data->nb_ports;
837 		break;
838 	case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
839 		*attr_value = dev->data->nb_queues;
840 		break;
841 	case RTE_EVENT_DEV_ATTR_STARTED:
842 		*attr_value = dev->data->dev_started;
843 		break;
844 	default:
845 		return -EINVAL;
846 	}
847 
848 	return 0;
849 }
850 
851 int
852 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
853 			uint32_t *attr_value)
854 {
855 	struct rte_eventdev *dev;
856 
857 	if (!attr_value)
858 		return -EINVAL;
859 
860 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
861 	dev = &rte_eventdevs[dev_id];
862 	if (!is_valid_port(dev, port_id)) {
863 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
864 		return -EINVAL;
865 	}
866 
867 	switch (attr_id) {
868 	case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
869 		*attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
870 		break;
871 	case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
872 		*attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
873 		break;
874 	case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
875 		*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
876 		break;
877 	case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
878 	{
879 		uint32_t config;
880 
881 		config = dev->data->ports_cfg[port_id].event_port_cfg;
882 		*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
883 		break;
884 	}
885 	default:
886 		return -EINVAL;
887 	};
888 	return 0;
889 }
890 
891 int
892 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
893 			uint32_t *attr_value)
894 {
895 	struct rte_event_queue_conf *conf;
896 	struct rte_eventdev *dev;
897 
898 	if (!attr_value)
899 		return -EINVAL;
900 
901 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
902 	dev = &rte_eventdevs[dev_id];
903 	if (!is_valid_queue(dev, queue_id)) {
904 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
905 		return -EINVAL;
906 	}
907 
908 	conf = &dev->data->queues_cfg[queue_id];
909 
910 	switch (attr_id) {
911 	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
912 		*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
913 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
914 			*attr_value = conf->priority;
915 		break;
916 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
917 		*attr_value = conf->nb_atomic_flows;
918 		break;
919 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
920 		*attr_value = conf->nb_atomic_order_sequences;
921 		break;
922 	case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
923 		*attr_value = conf->event_queue_cfg;
924 		break;
925 	case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
926 		if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
927 			return -EOVERFLOW;
928 
929 		*attr_value = conf->schedule_type;
930 		break;
931 	default:
932 		return -EINVAL;
933 	};
934 	return 0;
935 }
936 
937 int
938 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
939 		    const uint8_t queues[], const uint8_t priorities[],
940 		    uint16_t nb_links)
941 {
942 	struct rte_eventdev *dev;
943 	uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
944 	uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
945 	uint16_t *links_map;
946 	int i, diag;
947 
948 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
949 	dev = &rte_eventdevs[dev_id];
950 
951 	if (*dev->dev_ops->port_link == NULL) {
952 		RTE_EDEV_LOG_ERR("Function not supported\n");
953 		rte_errno = ENOTSUP;
954 		return 0;
955 	}
956 
957 	if (!is_valid_port(dev, port_id)) {
958 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
959 		rte_errno = EINVAL;
960 		return 0;
961 	}
962 
963 	if (queues == NULL) {
964 		for (i = 0; i < dev->data->nb_queues; i++)
965 			queues_list[i] = i;
966 
967 		queues = queues_list;
968 		nb_links = dev->data->nb_queues;
969 	}
970 
971 	if (priorities == NULL) {
972 		for (i = 0; i < nb_links; i++)
973 			priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
974 
975 		priorities = priorities_list;
976 	}
977 
978 	for (i = 0; i < nb_links; i++)
979 		if (queues[i] >= dev->data->nb_queues) {
980 			rte_errno = EINVAL;
981 			return 0;
982 		}
983 
984 	diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
985 						queues, priorities, nb_links);
986 	if (diag < 0)
987 		return diag;
988 
989 	links_map = dev->data->links_map;
990 	/* Point links_map to this port specific area */
991 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
992 	for (i = 0; i < diag; i++)
993 		links_map[queues[i]] = (uint8_t)priorities[i];
994 
995 	rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
996 	return diag;
997 }
998 
999 int
1000 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1001 		      uint8_t queues[], uint16_t nb_unlinks)
1002 {
1003 	struct rte_eventdev *dev;
1004 	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1005 	int i, diag, j;
1006 	uint16_t *links_map;
1007 
1008 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
1009 	dev = &rte_eventdevs[dev_id];
1010 
1011 	if (*dev->dev_ops->port_unlink == NULL) {
1012 		RTE_EDEV_LOG_ERR("Function not supported");
1013 		rte_errno = ENOTSUP;
1014 		return 0;
1015 	}
1016 
1017 	if (!is_valid_port(dev, port_id)) {
1018 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1019 		rte_errno = EINVAL;
1020 		return 0;
1021 	}
1022 
1023 	links_map = dev->data->links_map;
1024 	/* Point links_map to this port specific area */
1025 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1026 
1027 	if (queues == NULL) {
1028 		j = 0;
1029 		for (i = 0; i < dev->data->nb_queues; i++) {
1030 			if (links_map[i] !=
1031 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1032 				all_queues[j] = i;
1033 				j++;
1034 			}
1035 		}
1036 		queues = all_queues;
1037 	} else {
1038 		for (j = 0; j < nb_unlinks; j++) {
1039 			if (links_map[queues[j]] ==
1040 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
1041 				break;
1042 		}
1043 	}
1044 
1045 	nb_unlinks = j;
1046 	for (i = 0; i < nb_unlinks; i++)
1047 		if (queues[i] >= dev->data->nb_queues) {
1048 			rte_errno = EINVAL;
1049 			return 0;
1050 		}
1051 
1052 	diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
1053 					queues, nb_unlinks);
1054 
1055 	if (diag < 0)
1056 		return diag;
1057 
1058 	for (i = 0; i < diag; i++)
1059 		links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1060 
1061 	rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
1062 	return diag;
1063 }
1064 
1065 int
1066 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
1067 {
1068 	struct rte_eventdev *dev;
1069 
1070 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1071 	dev = &rte_eventdevs[dev_id];
1072 	if (!is_valid_port(dev, port_id)) {
1073 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1074 		return -EINVAL;
1075 	}
1076 
1077 	/* Return 0 if the PMD does not implement unlinks in progress.
1078 	 * This allows PMDs which handle unlink synchronously to not implement
1079 	 * this function at all.
1080 	 */
1081 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
1082 
1083 	return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1084 			dev->data->ports[port_id]);
1085 }
1086 
1087 int
1088 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1089 			 uint8_t queues[], uint8_t priorities[])
1090 {
1091 	struct rte_eventdev *dev;
1092 	uint16_t *links_map;
1093 	int i, count = 0;
1094 
1095 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1096 	dev = &rte_eventdevs[dev_id];
1097 	if (!is_valid_port(dev, port_id)) {
1098 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1099 		return -EINVAL;
1100 	}
1101 
1102 	links_map = dev->data->links_map;
1103 	/* Point links_map to this port specific area */
1104 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1105 	for (i = 0; i < dev->data->nb_queues; i++) {
1106 		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1107 			queues[count] = i;
1108 			priorities[count] = (uint8_t)links_map[i];
1109 			++count;
1110 		}
1111 	}
1112 	return count;
1113 }
1114 
1115 int
1116 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1117 				 uint64_t *timeout_ticks)
1118 {
1119 	struct rte_eventdev *dev;
1120 
1121 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1122 	dev = &rte_eventdevs[dev_id];
1123 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1124 
1125 	if (timeout_ticks == NULL)
1126 		return -EINVAL;
1127 
1128 	return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1129 }
1130 
1131 int
1132 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1133 {
1134 	struct rte_eventdev *dev;
1135 
1136 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1137 	dev = &rte_eventdevs[dev_id];
1138 
1139 	if (service_id == NULL)
1140 		return -EINVAL;
1141 
1142 	if (dev->data->service_inited)
1143 		*service_id = dev->data->service_id;
1144 
1145 	return dev->data->service_inited ? 0 : -ESRCH;
1146 }
1147 
1148 int
1149 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1150 {
1151 	struct rte_eventdev *dev;
1152 
1153 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1154 	dev = &rte_eventdevs[dev_id];
1155 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1156 	if (f == NULL)
1157 		return -EINVAL;
1158 
1159 	(*dev->dev_ops->dump)(dev, f);
1160 	return 0;
1161 
1162 }
1163 
1164 static int
1165 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1166 		uint8_t queue_port_id)
1167 {
1168 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1169 	if (dev->dev_ops->xstats_get_names != NULL)
1170 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1171 							queue_port_id,
1172 							NULL, NULL, 0);
1173 	return 0;
1174 }
1175 
1176 int
1177 rte_event_dev_xstats_names_get(uint8_t dev_id,
1178 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1179 		struct rte_event_dev_xstats_name *xstats_names,
1180 		unsigned int *ids, unsigned int size)
1181 {
1182 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1183 	const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1184 							  queue_port_id);
1185 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
1186 			(int)size < cnt_expected_entries)
1187 		return cnt_expected_entries;
1188 
1189 	/* dev_id checked above */
1190 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1191 
1192 	if (dev->dev_ops->xstats_get_names != NULL)
1193 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1194 				queue_port_id, xstats_names, ids, size);
1195 
1196 	return -ENOTSUP;
1197 }
1198 
1199 /* retrieve eventdev extended statistics */
1200 int
1201 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1202 		uint8_t queue_port_id, const unsigned int ids[],
1203 		uint64_t values[], unsigned int n)
1204 {
1205 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1206 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1207 
1208 	/* implemented by the driver */
1209 	if (dev->dev_ops->xstats_get != NULL)
1210 		return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1211 				ids, values, n);
1212 	return -ENOTSUP;
1213 }
1214 
1215 uint64_t
1216 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1217 		unsigned int *id)
1218 {
1219 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1220 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1221 	unsigned int temp = -1;
1222 
1223 	if (id != NULL)
1224 		*id = (unsigned int)-1;
1225 	else
1226 		id = &temp; /* ensure driver never gets a NULL value */
1227 
1228 	/* implemented by driver */
1229 	if (dev->dev_ops->xstats_get_by_name != NULL)
1230 		return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1231 	return -ENOTSUP;
1232 }
1233 
1234 int rte_event_dev_xstats_reset(uint8_t dev_id,
1235 		enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1236 		const uint32_t ids[], uint32_t nb_ids)
1237 {
1238 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1239 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1240 
1241 	if (dev->dev_ops->xstats_reset != NULL)
1242 		return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1243 							ids, nb_ids);
1244 	return -ENOTSUP;
1245 }
1246 
1247 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1248 
1249 int rte_event_dev_selftest(uint8_t dev_id)
1250 {
1251 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1252 	static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1253 		.name = "rte_event_pmd_selftest_seqn_dynfield",
1254 		.size = sizeof(rte_event_pmd_selftest_seqn_t),
1255 		.align = __alignof__(rte_event_pmd_selftest_seqn_t),
1256 	};
1257 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1258 
1259 	if (dev->dev_ops->dev_selftest != NULL) {
1260 		rte_event_pmd_selftest_seqn_dynfield_offset =
1261 			rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1262 		if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1263 			return -ENOMEM;
1264 		return (*dev->dev_ops->dev_selftest)();
1265 	}
1266 	return -ENOTSUP;
1267 }
1268 
1269 int
1270 rte_event_dev_start(uint8_t dev_id)
1271 {
1272 	struct rte_eventdev *dev;
1273 	int diag;
1274 
1275 	RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1276 
1277 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1278 	dev = &rte_eventdevs[dev_id];
1279 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1280 
1281 	if (dev->data->dev_started != 0) {
1282 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1283 			dev_id);
1284 		return 0;
1285 	}
1286 
1287 	diag = (*dev->dev_ops->dev_start)(dev);
1288 	rte_eventdev_trace_start(dev_id, diag);
1289 	if (diag == 0)
1290 		dev->data->dev_started = 1;
1291 	else
1292 		return diag;
1293 
1294 	return 0;
1295 }
1296 
1297 int
1298 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1299 		eventdev_stop_flush_t callback, void *userdata)
1300 {
1301 	struct rte_eventdev *dev;
1302 
1303 	RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1304 
1305 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1306 	dev = &rte_eventdevs[dev_id];
1307 
1308 	dev->dev_ops->dev_stop_flush = callback;
1309 	dev->data->dev_stop_flush_arg = userdata;
1310 
1311 	return 0;
1312 }
1313 
1314 void
1315 rte_event_dev_stop(uint8_t dev_id)
1316 {
1317 	struct rte_eventdev *dev;
1318 
1319 	RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1320 
1321 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1322 	dev = &rte_eventdevs[dev_id];
1323 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1324 
1325 	if (dev->data->dev_started == 0) {
1326 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1327 			dev_id);
1328 		return;
1329 	}
1330 
1331 	dev->data->dev_started = 0;
1332 	(*dev->dev_ops->dev_stop)(dev);
1333 	rte_eventdev_trace_stop(dev_id);
1334 }
1335 
1336 int
1337 rte_event_dev_close(uint8_t dev_id)
1338 {
1339 	struct rte_eventdev *dev;
1340 
1341 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1342 	dev = &rte_eventdevs[dev_id];
1343 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1344 
1345 	/* Device must be stopped before it can be closed */
1346 	if (dev->data->dev_started == 1) {
1347 		RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1348 				dev_id);
1349 		return -EBUSY;
1350 	}
1351 
1352 	rte_eventdev_trace_close(dev_id);
1353 	return (*dev->dev_ops->dev_close)(dev);
1354 }
1355 
1356 static inline int
1357 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1358 		int socket_id)
1359 {
1360 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1361 	const struct rte_memzone *mz;
1362 	int n;
1363 
1364 	/* Generate memzone name */
1365 	n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1366 	if (n >= (int)sizeof(mz_name))
1367 		return -EINVAL;
1368 
1369 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1370 		mz = rte_memzone_reserve(mz_name,
1371 				sizeof(struct rte_eventdev_data),
1372 				socket_id, 0);
1373 	} else
1374 		mz = rte_memzone_lookup(mz_name);
1375 
1376 	if (mz == NULL)
1377 		return -ENOMEM;
1378 
1379 	*data = mz->addr;
1380 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1381 		memset(*data, 0, sizeof(struct rte_eventdev_data));
1382 
1383 	return 0;
1384 }
1385 
1386 static inline uint8_t
1387 rte_eventdev_find_free_device_index(void)
1388 {
1389 	uint8_t dev_id;
1390 
1391 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1392 		if (rte_eventdevs[dev_id].attached ==
1393 				RTE_EVENTDEV_DETACHED)
1394 			return dev_id;
1395 	}
1396 	return RTE_EVENT_MAX_DEVS;
1397 }
1398 
1399 static uint16_t
1400 rte_event_tx_adapter_enqueue(__rte_unused void *port,
1401 			__rte_unused struct rte_event ev[],
1402 			__rte_unused uint16_t nb_events)
1403 {
1404 	rte_errno = ENOTSUP;
1405 	return 0;
1406 }
1407 
1408 struct rte_eventdev *
1409 rte_event_pmd_allocate(const char *name, int socket_id)
1410 {
1411 	struct rte_eventdev *eventdev;
1412 	uint8_t dev_id;
1413 
1414 	if (rte_event_pmd_get_named_dev(name) != NULL) {
1415 		RTE_EDEV_LOG_ERR("Event device with name %s already "
1416 				"allocated!", name);
1417 		return NULL;
1418 	}
1419 
1420 	dev_id = rte_eventdev_find_free_device_index();
1421 	if (dev_id == RTE_EVENT_MAX_DEVS) {
1422 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1423 		return NULL;
1424 	}
1425 
1426 	eventdev = &rte_eventdevs[dev_id];
1427 
1428 	eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
1429 	eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
1430 
1431 	if (eventdev->data == NULL) {
1432 		struct rte_eventdev_data *eventdev_data = NULL;
1433 
1434 		int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1435 				socket_id);
1436 
1437 		if (retval < 0 || eventdev_data == NULL)
1438 			return NULL;
1439 
1440 		eventdev->data = eventdev_data;
1441 
1442 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1443 
1444 			strlcpy(eventdev->data->name, name,
1445 				RTE_EVENTDEV_NAME_MAX_LEN);
1446 
1447 			eventdev->data->dev_id = dev_id;
1448 			eventdev->data->socket_id = socket_id;
1449 			eventdev->data->dev_started = 0;
1450 		}
1451 
1452 		eventdev->attached = RTE_EVENTDEV_ATTACHED;
1453 		eventdev_globals.nb_devs++;
1454 	}
1455 
1456 	return eventdev;
1457 }
1458 
1459 int
1460 rte_event_pmd_release(struct rte_eventdev *eventdev)
1461 {
1462 	int ret;
1463 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1464 	const struct rte_memzone *mz;
1465 
1466 	if (eventdev == NULL)
1467 		return -EINVAL;
1468 
1469 	eventdev->attached = RTE_EVENTDEV_DETACHED;
1470 	eventdev_globals.nb_devs--;
1471 
1472 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1473 		rte_free(eventdev->data->dev_private);
1474 
1475 		/* Generate memzone name */
1476 		ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1477 				eventdev->data->dev_id);
1478 		if (ret >= (int)sizeof(mz_name))
1479 			return -EINVAL;
1480 
1481 		mz = rte_memzone_lookup(mz_name);
1482 		if (mz == NULL)
1483 			return -ENOMEM;
1484 
1485 		ret = rte_memzone_free(mz);
1486 		if (ret)
1487 			return ret;
1488 	}
1489 
1490 	eventdev->data = NULL;
1491 	return 0;
1492 }
1493 
1494 
1495 static int
1496 handle_dev_list(const char *cmd __rte_unused,
1497 		const char *params __rte_unused,
1498 		struct rte_tel_data *d)
1499 {
1500 	uint8_t dev_id;
1501 	int ndev = rte_event_dev_count();
1502 
1503 	if (ndev < 1)
1504 		return -1;
1505 
1506 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1507 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1508 		if (rte_eventdevs[dev_id].attached ==
1509 				RTE_EVENTDEV_ATTACHED)
1510 			rte_tel_data_add_array_int(d, dev_id);
1511 	}
1512 
1513 	return 0;
1514 }
1515 
1516 static int
1517 handle_port_list(const char *cmd __rte_unused,
1518 		 const char *params,
1519 		 struct rte_tel_data *d)
1520 {
1521 	int i;
1522 	uint8_t dev_id;
1523 	struct rte_eventdev *dev;
1524 	char *end_param;
1525 
1526 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1527 		return -1;
1528 
1529 	dev_id = strtoul(params, &end_param, 10);
1530 	if (*end_param != '\0')
1531 		RTE_EDEV_LOG_DEBUG(
1532 			"Extra parameters passed to eventdev telemetry command, ignoring");
1533 
1534 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1535 	dev = &rte_eventdevs[dev_id];
1536 
1537 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1538 	for (i = 0; i < dev->data->nb_ports; i++)
1539 		rte_tel_data_add_array_int(d, i);
1540 
1541 	return 0;
1542 }
1543 
1544 static int
1545 handle_queue_list(const char *cmd __rte_unused,
1546 		  const char *params,
1547 		  struct rte_tel_data *d)
1548 {
1549 	int i;
1550 	uint8_t dev_id;
1551 	struct rte_eventdev *dev;
1552 	char *end_param;
1553 
1554 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1555 		return -1;
1556 
1557 	dev_id = strtoul(params, &end_param, 10);
1558 	if (*end_param != '\0')
1559 		RTE_EDEV_LOG_DEBUG(
1560 			"Extra parameters passed to eventdev telemetry command, ignoring");
1561 
1562 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1563 	dev = &rte_eventdevs[dev_id];
1564 
1565 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1566 	for (i = 0; i < dev->data->nb_queues; i++)
1567 		rte_tel_data_add_array_int(d, i);
1568 
1569 	return 0;
1570 }
1571 
1572 static int
1573 handle_queue_links(const char *cmd __rte_unused,
1574 		   const char *params,
1575 		   struct rte_tel_data *d)
1576 {
1577 	int i, ret, port_id = 0;
1578 	char *end_param;
1579 	uint8_t dev_id;
1580 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1581 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1582 	const char *p_param;
1583 
1584 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1585 		return -1;
1586 
1587 	/* Get dev ID from parameter string */
1588 	dev_id = strtoul(params, &end_param, 10);
1589 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1590 
1591 	p_param = strtok(end_param, ",");
1592 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1593 		return -1;
1594 
1595 	port_id = strtoul(p_param, &end_param, 10);
1596 	p_param = strtok(NULL, "\0");
1597 	if (p_param != NULL)
1598 		RTE_EDEV_LOG_DEBUG(
1599 			"Extra parameters passed to eventdev telemetry command, ignoring");
1600 
1601 	ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1602 	if (ret < 0)
1603 		return -1;
1604 
1605 	rte_tel_data_start_dict(d);
1606 	for (i = 0; i < ret; i++) {
1607 		char qid_name[32];
1608 
1609 		snprintf(qid_name, 31, "qid_%u", queues[i]);
1610 		rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
1611 	}
1612 
1613 	return 0;
1614 }
1615 
1616 static int
1617 eventdev_build_telemetry_data(int dev_id,
1618 			      enum rte_event_dev_xstats_mode mode,
1619 			      int port_queue_id,
1620 			      struct rte_tel_data *d)
1621 {
1622 	struct rte_event_dev_xstats_name *xstat_names;
1623 	unsigned int *ids;
1624 	uint64_t *values;
1625 	int i, ret, num_xstats;
1626 
1627 	num_xstats = rte_event_dev_xstats_names_get(dev_id,
1628 						    mode,
1629 						    port_queue_id,
1630 						    NULL,
1631 						    NULL,
1632 						    0);
1633 
1634 	if (num_xstats < 0)
1635 		return -1;
1636 
1637 	/* use one malloc for names */
1638 	xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1639 			     * num_xstats);
1640 	if (xstat_names == NULL)
1641 		return -1;
1642 
1643 	ids = malloc((sizeof(unsigned int)) * num_xstats);
1644 	if (ids == NULL) {
1645 		free(xstat_names);
1646 		return -1;
1647 	}
1648 
1649 	values = malloc((sizeof(uint64_t)) * num_xstats);
1650 	if (values == NULL) {
1651 		free(xstat_names);
1652 		free(ids);
1653 		return -1;
1654 	}
1655 
1656 	ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1657 					     xstat_names, ids, num_xstats);
1658 	if (ret < 0 || ret > num_xstats) {
1659 		free(xstat_names);
1660 		free(ids);
1661 		free(values);
1662 		return -1;
1663 	}
1664 
1665 	ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1666 				       ids, values, num_xstats);
1667 	if (ret < 0 || ret > num_xstats) {
1668 		free(xstat_names);
1669 		free(ids);
1670 		free(values);
1671 		return -1;
1672 	}
1673 
1674 	rte_tel_data_start_dict(d);
1675 	for (i = 0; i < num_xstats; i++)
1676 		rte_tel_data_add_dict_u64(d, xstat_names[i].name,
1677 					  values[i]);
1678 
1679 	free(xstat_names);
1680 	free(ids);
1681 	free(values);
1682 	return 0;
1683 }
1684 
1685 static int
1686 handle_dev_xstats(const char *cmd __rte_unused,
1687 		  const char *params,
1688 		  struct rte_tel_data *d)
1689 {
1690 	int dev_id;
1691 	enum rte_event_dev_xstats_mode mode;
1692 	char *end_param;
1693 
1694 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1695 		return -1;
1696 
1697 	/* Get dev ID from parameter string */
1698 	dev_id = strtoul(params, &end_param, 10);
1699 	if (*end_param != '\0')
1700 		RTE_EDEV_LOG_DEBUG(
1701 			"Extra parameters passed to eventdev telemetry command, ignoring");
1702 
1703 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1704 
1705 	mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1706 	return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1707 }
1708 
1709 static int
1710 handle_port_xstats(const char *cmd __rte_unused,
1711 		   const char *params,
1712 		   struct rte_tel_data *d)
1713 {
1714 	int dev_id;
1715 	int port_queue_id = 0;
1716 	enum rte_event_dev_xstats_mode mode;
1717 	char *end_param;
1718 	const char *p_param;
1719 
1720 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1721 		return -1;
1722 
1723 	/* Get dev ID from parameter string */
1724 	dev_id = strtoul(params, &end_param, 10);
1725 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1726 
1727 	p_param = strtok(end_param, ",");
1728 	mode = RTE_EVENT_DEV_XSTATS_PORT;
1729 
1730 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1731 		return -1;
1732 
1733 	port_queue_id = strtoul(p_param, &end_param, 10);
1734 
1735 	p_param = strtok(NULL, "\0");
1736 	if (p_param != NULL)
1737 		RTE_EDEV_LOG_DEBUG(
1738 			"Extra parameters passed to eventdev telemetry command, ignoring");
1739 
1740 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1741 }
1742 
1743 static int
1744 handle_queue_xstats(const char *cmd __rte_unused,
1745 		    const char *params,
1746 		    struct rte_tel_data *d)
1747 {
1748 	int dev_id;
1749 	int port_queue_id = 0;
1750 	enum rte_event_dev_xstats_mode mode;
1751 	char *end_param;
1752 	const char *p_param;
1753 
1754 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1755 		return -1;
1756 
1757 	/* Get dev ID from parameter string */
1758 	dev_id = strtoul(params, &end_param, 10);
1759 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1760 
1761 	p_param = strtok(end_param, ",");
1762 	mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1763 
1764 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1765 		return -1;
1766 
1767 	port_queue_id = strtoul(p_param, &end_param, 10);
1768 
1769 	p_param = strtok(NULL, "\0");
1770 	if (p_param != NULL)
1771 		RTE_EDEV_LOG_DEBUG(
1772 			"Extra parameters passed to eventdev telemetry command, ignoring");
1773 
1774 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1775 }
1776 
1777 RTE_INIT(eventdev_init_telemetry)
1778 {
1779 	rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1780 			"Returns list of available eventdevs. Takes no parameters");
1781 	rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1782 			"Returns list of available ports. Parameter: DevID");
1783 	rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1784 			"Returns list of available queues. Parameter: DevID");
1785 
1786 	rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1787 			"Returns stats for an eventdev. Parameter: DevID");
1788 	rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1789 			"Returns stats for an eventdev port. Params: DevID,PortID");
1790 	rte_telemetry_register_cmd("/eventdev/queue_xstats",
1791 			handle_queue_xstats,
1792 			"Returns stats for an eventdev queue. Params: DevID,QueueID");
1793 	rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1794 			"Returns links for an eventdev port. Params: DevID,QueueID");
1795 }
1796