xref: /dpdk/lib/eventdev/rte_eventdev.c (revision 7da008df)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12 
13 #include <rte_string_fns.h>
14 #include <rte_log.h>
15 #include <rte_dev.h>
16 #include <rte_memzone.h>
17 #include <rte_eal.h>
18 #include <rte_common.h>
19 #include <rte_malloc.h>
20 #include <rte_errno.h>
21 #include <ethdev_driver.h>
22 #include <rte_cryptodev.h>
23 #include <cryptodev_pmd.h>
24 #include <rte_telemetry.h>
25 
26 #include "rte_eventdev.h"
27 #include "eventdev_pmd.h"
28 #include "eventdev_trace.h"
29 
30 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
31 
32 struct rte_eventdev *rte_eventdevs = rte_event_devices;
33 
34 static struct rte_eventdev_global eventdev_globals = {
35 	.nb_devs		= 0
36 };
37 
38 /* Public fastpath APIs. */
39 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
40 
41 /* Event dev north bound API implementation */
42 
43 uint8_t
44 rte_event_dev_count(void)
45 {
46 	return eventdev_globals.nb_devs;
47 }
48 
49 int
50 rte_event_dev_get_dev_id(const char *name)
51 {
52 	int i;
53 	uint8_t cmp;
54 
55 	if (!name)
56 		return -EINVAL;
57 
58 	for (i = 0; i < eventdev_globals.nb_devs; i++) {
59 		cmp = (strncmp(rte_event_devices[i].data->name, name,
60 				RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
61 			(rte_event_devices[i].dev ? (strncmp(
62 				rte_event_devices[i].dev->driver->name, name,
63 					 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
64 		if (cmp && (rte_event_devices[i].attached ==
65 					RTE_EVENTDEV_ATTACHED))
66 			return i;
67 	}
68 	return -ENODEV;
69 }
70 
71 int
72 rte_event_dev_socket_id(uint8_t dev_id)
73 {
74 	struct rte_eventdev *dev;
75 
76 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
77 	dev = &rte_eventdevs[dev_id];
78 
79 	return dev->data->socket_id;
80 }
81 
82 int
83 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
84 {
85 	struct rte_eventdev *dev;
86 
87 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88 	dev = &rte_eventdevs[dev_id];
89 
90 	if (dev_info == NULL)
91 		return -EINVAL;
92 
93 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
94 
95 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
96 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
97 
98 	dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
99 
100 	dev_info->dev = dev->dev;
101 	return 0;
102 }
103 
104 int
105 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
106 				uint32_t *caps)
107 {
108 	struct rte_eventdev *dev;
109 
110 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
111 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
112 
113 	dev = &rte_eventdevs[dev_id];
114 
115 	if (caps == NULL)
116 		return -EINVAL;
117 
118 	if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
119 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
120 	else
121 		*caps = 0;
122 
123 	return dev->dev_ops->eth_rx_adapter_caps_get ?
124 				(*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
125 						&rte_eth_devices[eth_port_id],
126 						caps)
127 				: 0;
128 }
129 
130 int
131 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
132 {
133 	struct rte_eventdev *dev;
134 	const struct event_timer_adapter_ops *ops;
135 
136 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
137 
138 	dev = &rte_eventdevs[dev_id];
139 
140 	if (caps == NULL)
141 		return -EINVAL;
142 	*caps = 0;
143 
144 	return dev->dev_ops->timer_adapter_caps_get ?
145 				(*dev->dev_ops->timer_adapter_caps_get)(dev,
146 									0,
147 									caps,
148 									&ops)
149 				: 0;
150 }
151 
152 int
153 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
154 				  uint32_t *caps)
155 {
156 	struct rte_eventdev *dev;
157 	struct rte_cryptodev *cdev;
158 
159 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
160 	if (!rte_cryptodev_is_valid_dev(cdev_id))
161 		return -EINVAL;
162 
163 	dev = &rte_eventdevs[dev_id];
164 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
165 
166 	if (caps == NULL)
167 		return -EINVAL;
168 
169 	if (dev->dev_ops->crypto_adapter_caps_get == NULL)
170 		*caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
171 	else
172 		*caps = 0;
173 
174 	return dev->dev_ops->crypto_adapter_caps_get ?
175 		(*dev->dev_ops->crypto_adapter_caps_get)
176 		(dev, cdev, caps) : 0;
177 }
178 
179 int
180 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
181 				uint32_t *caps)
182 {
183 	struct rte_eventdev *dev;
184 	struct rte_eth_dev *eth_dev;
185 
186 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
187 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
188 
189 	dev = &rte_eventdevs[dev_id];
190 	eth_dev = &rte_eth_devices[eth_port_id];
191 
192 	if (caps == NULL)
193 		return -EINVAL;
194 
195 	if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
196 		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
197 	else
198 		*caps = 0;
199 
200 	return dev->dev_ops->eth_tx_adapter_caps_get ?
201 			(*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
202 								eth_dev,
203 								caps)
204 			: 0;
205 }
206 
207 static inline int
208 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
209 {
210 	uint8_t old_nb_queues = dev->data->nb_queues;
211 	struct rte_event_queue_conf *queues_cfg;
212 	unsigned int i;
213 
214 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
215 			 dev->data->dev_id);
216 
217 	if (nb_queues != 0) {
218 		queues_cfg = dev->data->queues_cfg;
219 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
220 
221 		for (i = nb_queues; i < old_nb_queues; i++)
222 			(*dev->dev_ops->queue_release)(dev, i);
223 
224 
225 		if (nb_queues > old_nb_queues) {
226 			uint8_t new_qs = nb_queues - old_nb_queues;
227 
228 			memset(queues_cfg + old_nb_queues, 0,
229 				sizeof(queues_cfg[0]) * new_qs);
230 		}
231 	} else {
232 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
233 
234 		for (i = nb_queues; i < old_nb_queues; i++)
235 			(*dev->dev_ops->queue_release)(dev, i);
236 	}
237 
238 	dev->data->nb_queues = nb_queues;
239 	return 0;
240 }
241 
242 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
243 
244 static inline int
245 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
246 {
247 	uint8_t old_nb_ports = dev->data->nb_ports;
248 	void **ports;
249 	uint16_t *links_map;
250 	struct rte_event_port_conf *ports_cfg;
251 	unsigned int i;
252 
253 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
254 			 dev->data->dev_id);
255 
256 	if (nb_ports != 0) { /* re-config */
257 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
258 
259 		ports = dev->data->ports;
260 		ports_cfg = dev->data->ports_cfg;
261 		links_map = dev->data->links_map;
262 
263 		for (i = nb_ports; i < old_nb_ports; i++)
264 			(*dev->dev_ops->port_release)(ports[i]);
265 
266 		if (nb_ports > old_nb_ports) {
267 			uint8_t new_ps = nb_ports - old_nb_ports;
268 			unsigned int old_links_map_end =
269 				old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
270 			unsigned int links_map_end =
271 				nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
272 
273 			memset(ports + old_nb_ports, 0,
274 				sizeof(ports[0]) * new_ps);
275 			memset(ports_cfg + old_nb_ports, 0,
276 				sizeof(ports_cfg[0]) * new_ps);
277 			for (i = old_links_map_end; i < links_map_end; i++)
278 				links_map[i] =
279 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
280 		}
281 	} else {
282 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
283 
284 		ports = dev->data->ports;
285 		for (i = nb_ports; i < old_nb_ports; i++) {
286 			(*dev->dev_ops->port_release)(ports[i]);
287 			ports[i] = NULL;
288 		}
289 	}
290 
291 	dev->data->nb_ports = nb_ports;
292 	return 0;
293 }
294 
295 int
296 rte_event_dev_configure(uint8_t dev_id,
297 			const struct rte_event_dev_config *dev_conf)
298 {
299 	struct rte_event_dev_info info;
300 	struct rte_eventdev *dev;
301 	int diag;
302 
303 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
304 	dev = &rte_eventdevs[dev_id];
305 
306 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
307 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
308 
309 	if (dev->data->dev_started) {
310 		RTE_EDEV_LOG_ERR(
311 		    "device %d must be stopped to allow configuration", dev_id);
312 		return -EBUSY;
313 	}
314 
315 	if (dev_conf == NULL)
316 		return -EINVAL;
317 
318 	(*dev->dev_ops->dev_infos_get)(dev, &info);
319 
320 	/* Check dequeue_timeout_ns value is in limit */
321 	if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
322 		if (dev_conf->dequeue_timeout_ns &&
323 		    (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
324 			|| dev_conf->dequeue_timeout_ns >
325 				 info.max_dequeue_timeout_ns)) {
326 			RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
327 			" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
328 			dev_id, dev_conf->dequeue_timeout_ns,
329 			info.min_dequeue_timeout_ns,
330 			info.max_dequeue_timeout_ns);
331 			return -EINVAL;
332 		}
333 	}
334 
335 	/* Check nb_events_limit is in limit */
336 	if (dev_conf->nb_events_limit > info.max_num_events) {
337 		RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
338 		dev_id, dev_conf->nb_events_limit, info.max_num_events);
339 		return -EINVAL;
340 	}
341 
342 	/* Check nb_event_queues is in limit */
343 	if (!dev_conf->nb_event_queues) {
344 		RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
345 					dev_id);
346 		return -EINVAL;
347 	}
348 	if (dev_conf->nb_event_queues > info.max_event_queues +
349 			info.max_single_link_event_port_queue_pairs) {
350 		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
351 				 dev_id, dev_conf->nb_event_queues,
352 				 info.max_event_queues,
353 				 info.max_single_link_event_port_queue_pairs);
354 		return -EINVAL;
355 	}
356 	if (dev_conf->nb_event_queues -
357 			dev_conf->nb_single_link_event_port_queues >
358 			info.max_event_queues) {
359 		RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
360 				 dev_id, dev_conf->nb_event_queues,
361 				 dev_conf->nb_single_link_event_port_queues,
362 				 info.max_event_queues);
363 		return -EINVAL;
364 	}
365 	if (dev_conf->nb_single_link_event_port_queues >
366 			dev_conf->nb_event_queues) {
367 		RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
368 				 dev_id,
369 				 dev_conf->nb_single_link_event_port_queues,
370 				 dev_conf->nb_event_queues);
371 		return -EINVAL;
372 	}
373 
374 	/* Check nb_event_ports is in limit */
375 	if (!dev_conf->nb_event_ports) {
376 		RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
377 		return -EINVAL;
378 	}
379 	if (dev_conf->nb_event_ports > info.max_event_ports +
380 			info.max_single_link_event_port_queue_pairs) {
381 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
382 				 dev_id, dev_conf->nb_event_ports,
383 				 info.max_event_ports,
384 				 info.max_single_link_event_port_queue_pairs);
385 		return -EINVAL;
386 	}
387 	if (dev_conf->nb_event_ports -
388 			dev_conf->nb_single_link_event_port_queues
389 			> info.max_event_ports) {
390 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
391 				 dev_id, dev_conf->nb_event_ports,
392 				 dev_conf->nb_single_link_event_port_queues,
393 				 info.max_event_ports);
394 		return -EINVAL;
395 	}
396 
397 	if (dev_conf->nb_single_link_event_port_queues >
398 	    dev_conf->nb_event_ports) {
399 		RTE_EDEV_LOG_ERR(
400 				 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
401 				 dev_id,
402 				 dev_conf->nb_single_link_event_port_queues,
403 				 dev_conf->nb_event_ports);
404 		return -EINVAL;
405 	}
406 
407 	/* Check nb_event_queue_flows is in limit */
408 	if (!dev_conf->nb_event_queue_flows) {
409 		RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
410 		return -EINVAL;
411 	}
412 	if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
413 		RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
414 		dev_id, dev_conf->nb_event_queue_flows,
415 		info.max_event_queue_flows);
416 		return -EINVAL;
417 	}
418 
419 	/* Check nb_event_port_dequeue_depth is in limit */
420 	if (!dev_conf->nb_event_port_dequeue_depth) {
421 		RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
422 					dev_id);
423 		return -EINVAL;
424 	}
425 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
426 		 (dev_conf->nb_event_port_dequeue_depth >
427 			 info.max_event_port_dequeue_depth)) {
428 		RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
429 		dev_id, dev_conf->nb_event_port_dequeue_depth,
430 		info.max_event_port_dequeue_depth);
431 		return -EINVAL;
432 	}
433 
434 	/* Check nb_event_port_enqueue_depth is in limit */
435 	if (!dev_conf->nb_event_port_enqueue_depth) {
436 		RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
437 					dev_id);
438 		return -EINVAL;
439 	}
440 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
441 		(dev_conf->nb_event_port_enqueue_depth >
442 			 info.max_event_port_enqueue_depth)) {
443 		RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
444 		dev_id, dev_conf->nb_event_port_enqueue_depth,
445 		info.max_event_port_enqueue_depth);
446 		return -EINVAL;
447 	}
448 
449 	/* Copy the dev_conf parameter into the dev structure */
450 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
451 
452 	/* Setup new number of queues and reconfigure device. */
453 	diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
454 	if (diag != 0) {
455 		RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
456 				 diag);
457 		return diag;
458 	}
459 
460 	/* Setup new number of ports and reconfigure device. */
461 	diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
462 	if (diag != 0) {
463 		event_dev_queue_config(dev, 0);
464 		RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
465 				 diag);
466 		return diag;
467 	}
468 
469 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
470 
471 	/* Configure the device */
472 	diag = (*dev->dev_ops->dev_configure)(dev);
473 	if (diag != 0) {
474 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
475 		event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
476 		event_dev_queue_config(dev, 0);
477 		event_dev_port_config(dev, 0);
478 	}
479 
480 	dev->data->event_dev_cap = info.event_dev_cap;
481 	rte_eventdev_trace_configure(dev_id, dev_conf, diag);
482 	return diag;
483 }
484 
485 static inline int
486 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
487 {
488 	if (queue_id < dev->data->nb_queues && queue_id <
489 				RTE_EVENT_MAX_QUEUES_PER_DEV)
490 		return 1;
491 	else
492 		return 0;
493 }
494 
495 int
496 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
497 				 struct rte_event_queue_conf *queue_conf)
498 {
499 	struct rte_eventdev *dev;
500 
501 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
502 	dev = &rte_eventdevs[dev_id];
503 
504 	if (queue_conf == NULL)
505 		return -EINVAL;
506 
507 	if (!is_valid_queue(dev, queue_id)) {
508 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
509 		return -EINVAL;
510 	}
511 
512 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
513 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
514 	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
515 	return 0;
516 }
517 
518 static inline int
519 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
520 {
521 	if (queue_conf &&
522 		!(queue_conf->event_queue_cfg &
523 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
524 		((queue_conf->event_queue_cfg &
525 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
526 		(queue_conf->schedule_type
527 			== RTE_SCHED_TYPE_ATOMIC)
528 		))
529 		return 1;
530 	else
531 		return 0;
532 }
533 
534 static inline int
535 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
536 {
537 	if (queue_conf &&
538 		!(queue_conf->event_queue_cfg &
539 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
540 		((queue_conf->event_queue_cfg &
541 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
542 		(queue_conf->schedule_type
543 			== RTE_SCHED_TYPE_ORDERED)
544 		))
545 		return 1;
546 	else
547 		return 0;
548 }
549 
550 
551 int
552 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
553 		      const struct rte_event_queue_conf *queue_conf)
554 {
555 	struct rte_eventdev *dev;
556 	struct rte_event_queue_conf def_conf;
557 
558 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
559 	dev = &rte_eventdevs[dev_id];
560 
561 	if (!is_valid_queue(dev, queue_id)) {
562 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
563 		return -EINVAL;
564 	}
565 
566 	/* Check nb_atomic_flows limit */
567 	if (is_valid_atomic_queue_conf(queue_conf)) {
568 		if (queue_conf->nb_atomic_flows == 0 ||
569 		    queue_conf->nb_atomic_flows >
570 			dev->data->dev_conf.nb_event_queue_flows) {
571 			RTE_EDEV_LOG_ERR(
572 		"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
573 			dev_id, queue_id, queue_conf->nb_atomic_flows,
574 			dev->data->dev_conf.nb_event_queue_flows);
575 			return -EINVAL;
576 		}
577 	}
578 
579 	/* Check nb_atomic_order_sequences limit */
580 	if (is_valid_ordered_queue_conf(queue_conf)) {
581 		if (queue_conf->nb_atomic_order_sequences == 0 ||
582 		    queue_conf->nb_atomic_order_sequences >
583 			dev->data->dev_conf.nb_event_queue_flows) {
584 			RTE_EDEV_LOG_ERR(
585 		"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
586 			dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
587 			dev->data->dev_conf.nb_event_queue_flows);
588 			return -EINVAL;
589 		}
590 	}
591 
592 	if (dev->data->dev_started) {
593 		RTE_EDEV_LOG_ERR(
594 		    "device %d must be stopped to allow queue setup", dev_id);
595 		return -EBUSY;
596 	}
597 
598 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
599 
600 	if (queue_conf == NULL) {
601 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
602 					-ENOTSUP);
603 		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
604 		queue_conf = &def_conf;
605 	}
606 
607 	dev->data->queues_cfg[queue_id] = *queue_conf;
608 	rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
609 	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
610 }
611 
612 static inline int
613 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
614 {
615 	if (port_id < dev->data->nb_ports)
616 		return 1;
617 	else
618 		return 0;
619 }
620 
621 int
622 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
623 				 struct rte_event_port_conf *port_conf)
624 {
625 	struct rte_eventdev *dev;
626 
627 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
628 	dev = &rte_eventdevs[dev_id];
629 
630 	if (port_conf == NULL)
631 		return -EINVAL;
632 
633 	if (!is_valid_port(dev, port_id)) {
634 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
635 		return -EINVAL;
636 	}
637 
638 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
639 	memset(port_conf, 0, sizeof(struct rte_event_port_conf));
640 	(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
641 	return 0;
642 }
643 
644 int
645 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
646 		     const struct rte_event_port_conf *port_conf)
647 {
648 	struct rte_eventdev *dev;
649 	struct rte_event_port_conf def_conf;
650 	int diag;
651 
652 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
653 	dev = &rte_eventdevs[dev_id];
654 
655 	if (!is_valid_port(dev, port_id)) {
656 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
657 		return -EINVAL;
658 	}
659 
660 	/* Check new_event_threshold limit */
661 	if ((port_conf && !port_conf->new_event_threshold) ||
662 			(port_conf && port_conf->new_event_threshold >
663 				 dev->data->dev_conf.nb_events_limit)) {
664 		RTE_EDEV_LOG_ERR(
665 		   "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
666 			dev_id, port_id, port_conf->new_event_threshold,
667 			dev->data->dev_conf.nb_events_limit);
668 		return -EINVAL;
669 	}
670 
671 	/* Check dequeue_depth limit */
672 	if ((port_conf && !port_conf->dequeue_depth) ||
673 			(port_conf && port_conf->dequeue_depth >
674 		dev->data->dev_conf.nb_event_port_dequeue_depth)) {
675 		RTE_EDEV_LOG_ERR(
676 		   "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
677 			dev_id, port_id, port_conf->dequeue_depth,
678 			dev->data->dev_conf.nb_event_port_dequeue_depth);
679 		return -EINVAL;
680 	}
681 
682 	/* Check enqueue_depth limit */
683 	if ((port_conf && !port_conf->enqueue_depth) ||
684 			(port_conf && port_conf->enqueue_depth >
685 		dev->data->dev_conf.nb_event_port_enqueue_depth)) {
686 		RTE_EDEV_LOG_ERR(
687 		   "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
688 			dev_id, port_id, port_conf->enqueue_depth,
689 			dev->data->dev_conf.nb_event_port_enqueue_depth);
690 		return -EINVAL;
691 	}
692 
693 	if (port_conf &&
694 	    (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
695 	    !(dev->data->event_dev_cap &
696 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
697 		RTE_EDEV_LOG_ERR(
698 		   "dev%d port%d Implicit release disable not supported",
699 			dev_id, port_id);
700 		return -EINVAL;
701 	}
702 
703 	if (dev->data->dev_started) {
704 		RTE_EDEV_LOG_ERR(
705 		    "device %d must be stopped to allow port setup", dev_id);
706 		return -EBUSY;
707 	}
708 
709 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
710 
711 	if (port_conf == NULL) {
712 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
713 					-ENOTSUP);
714 		(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
715 		port_conf = &def_conf;
716 	}
717 
718 	dev->data->ports_cfg[port_id] = *port_conf;
719 
720 	diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
721 
722 	/* Unlink all the queues from this port(default state after setup) */
723 	if (!diag)
724 		diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
725 
726 	rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
727 	if (diag < 0)
728 		return diag;
729 
730 	return 0;
731 }
732 
733 void
734 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
735 		       rte_eventdev_port_flush_t release_cb, void *args)
736 {
737 	struct rte_eventdev *dev;
738 
739 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
740 	dev = &rte_eventdevs[dev_id];
741 
742 	if (!is_valid_port(dev, port_id)) {
743 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
744 		return;
745 	}
746 
747 	if (dev->dev_ops->port_quiesce)
748 		(*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id],
749 					      release_cb, args);
750 }
751 
752 int
753 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
754 		       uint32_t *attr_value)
755 {
756 	struct rte_eventdev *dev;
757 
758 	if (!attr_value)
759 		return -EINVAL;
760 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
761 	dev = &rte_eventdevs[dev_id];
762 
763 	switch (attr_id) {
764 	case RTE_EVENT_DEV_ATTR_PORT_COUNT:
765 		*attr_value = dev->data->nb_ports;
766 		break;
767 	case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
768 		*attr_value = dev->data->nb_queues;
769 		break;
770 	case RTE_EVENT_DEV_ATTR_STARTED:
771 		*attr_value = dev->data->dev_started;
772 		break;
773 	default:
774 		return -EINVAL;
775 	}
776 
777 	return 0;
778 }
779 
780 int
781 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
782 			uint32_t *attr_value)
783 {
784 	struct rte_eventdev *dev;
785 
786 	if (!attr_value)
787 		return -EINVAL;
788 
789 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
790 	dev = &rte_eventdevs[dev_id];
791 	if (!is_valid_port(dev, port_id)) {
792 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
793 		return -EINVAL;
794 	}
795 
796 	switch (attr_id) {
797 	case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
798 		*attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
799 		break;
800 	case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
801 		*attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
802 		break;
803 	case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
804 		*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
805 		break;
806 	case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
807 	{
808 		uint32_t config;
809 
810 		config = dev->data->ports_cfg[port_id].event_port_cfg;
811 		*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
812 		break;
813 	}
814 	default:
815 		return -EINVAL;
816 	};
817 	return 0;
818 }
819 
820 int
821 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
822 			uint32_t *attr_value)
823 {
824 	struct rte_event_queue_conf *conf;
825 	struct rte_eventdev *dev;
826 
827 	if (!attr_value)
828 		return -EINVAL;
829 
830 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
831 	dev = &rte_eventdevs[dev_id];
832 	if (!is_valid_queue(dev, queue_id)) {
833 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
834 		return -EINVAL;
835 	}
836 
837 	conf = &dev->data->queues_cfg[queue_id];
838 
839 	switch (attr_id) {
840 	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
841 		*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
842 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
843 			*attr_value = conf->priority;
844 		break;
845 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
846 		*attr_value = conf->nb_atomic_flows;
847 		break;
848 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
849 		*attr_value = conf->nb_atomic_order_sequences;
850 		break;
851 	case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
852 		*attr_value = conf->event_queue_cfg;
853 		break;
854 	case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
855 		if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
856 			return -EOVERFLOW;
857 
858 		*attr_value = conf->schedule_type;
859 		break;
860 	default:
861 		return -EINVAL;
862 	};
863 	return 0;
864 }
865 
866 int
867 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
868 		    const uint8_t queues[], const uint8_t priorities[],
869 		    uint16_t nb_links)
870 {
871 	struct rte_eventdev *dev;
872 	uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
873 	uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
874 	uint16_t *links_map;
875 	int i, diag;
876 
877 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
878 	dev = &rte_eventdevs[dev_id];
879 
880 	if (*dev->dev_ops->port_link == NULL) {
881 		RTE_EDEV_LOG_ERR("Function not supported\n");
882 		rte_errno = ENOTSUP;
883 		return 0;
884 	}
885 
886 	if (!is_valid_port(dev, port_id)) {
887 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
888 		rte_errno = EINVAL;
889 		return 0;
890 	}
891 
892 	if (queues == NULL) {
893 		for (i = 0; i < dev->data->nb_queues; i++)
894 			queues_list[i] = i;
895 
896 		queues = queues_list;
897 		nb_links = dev->data->nb_queues;
898 	}
899 
900 	if (priorities == NULL) {
901 		for (i = 0; i < nb_links; i++)
902 			priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
903 
904 		priorities = priorities_list;
905 	}
906 
907 	for (i = 0; i < nb_links; i++)
908 		if (queues[i] >= dev->data->nb_queues) {
909 			rte_errno = EINVAL;
910 			return 0;
911 		}
912 
913 	diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
914 						queues, priorities, nb_links);
915 	if (diag < 0)
916 		return diag;
917 
918 	links_map = dev->data->links_map;
919 	/* Point links_map to this port specific area */
920 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
921 	for (i = 0; i < diag; i++)
922 		links_map[queues[i]] = (uint8_t)priorities[i];
923 
924 	rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
925 	return diag;
926 }
927 
928 int
929 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
930 		      uint8_t queues[], uint16_t nb_unlinks)
931 {
932 	struct rte_eventdev *dev;
933 	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
934 	int i, diag, j;
935 	uint16_t *links_map;
936 
937 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
938 	dev = &rte_eventdevs[dev_id];
939 
940 	if (*dev->dev_ops->port_unlink == NULL) {
941 		RTE_EDEV_LOG_ERR("Function not supported");
942 		rte_errno = ENOTSUP;
943 		return 0;
944 	}
945 
946 	if (!is_valid_port(dev, port_id)) {
947 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
948 		rte_errno = EINVAL;
949 		return 0;
950 	}
951 
952 	links_map = dev->data->links_map;
953 	/* Point links_map to this port specific area */
954 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
955 
956 	if (queues == NULL) {
957 		j = 0;
958 		for (i = 0; i < dev->data->nb_queues; i++) {
959 			if (links_map[i] !=
960 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
961 				all_queues[j] = i;
962 				j++;
963 			}
964 		}
965 		queues = all_queues;
966 	} else {
967 		for (j = 0; j < nb_unlinks; j++) {
968 			if (links_map[queues[j]] ==
969 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
970 				break;
971 		}
972 	}
973 
974 	nb_unlinks = j;
975 	for (i = 0; i < nb_unlinks; i++)
976 		if (queues[i] >= dev->data->nb_queues) {
977 			rte_errno = EINVAL;
978 			return 0;
979 		}
980 
981 	diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
982 					queues, nb_unlinks);
983 
984 	if (diag < 0)
985 		return diag;
986 
987 	for (i = 0; i < diag; i++)
988 		links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
989 
990 	rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
991 	return diag;
992 }
993 
994 int
995 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
996 {
997 	struct rte_eventdev *dev;
998 
999 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1000 	dev = &rte_eventdevs[dev_id];
1001 	if (!is_valid_port(dev, port_id)) {
1002 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1003 		return -EINVAL;
1004 	}
1005 
1006 	/* Return 0 if the PMD does not implement unlinks in progress.
1007 	 * This allows PMDs which handle unlink synchronously to not implement
1008 	 * this function at all.
1009 	 */
1010 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
1011 
1012 	return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1013 			dev->data->ports[port_id]);
1014 }
1015 
1016 int
1017 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1018 			 uint8_t queues[], uint8_t priorities[])
1019 {
1020 	struct rte_eventdev *dev;
1021 	uint16_t *links_map;
1022 	int i, count = 0;
1023 
1024 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1025 	dev = &rte_eventdevs[dev_id];
1026 	if (!is_valid_port(dev, port_id)) {
1027 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1028 		return -EINVAL;
1029 	}
1030 
1031 	links_map = dev->data->links_map;
1032 	/* Point links_map to this port specific area */
1033 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1034 	for (i = 0; i < dev->data->nb_queues; i++) {
1035 		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1036 			queues[count] = i;
1037 			priorities[count] = (uint8_t)links_map[i];
1038 			++count;
1039 		}
1040 	}
1041 	return count;
1042 }
1043 
1044 int
1045 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1046 				 uint64_t *timeout_ticks)
1047 {
1048 	struct rte_eventdev *dev;
1049 
1050 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1051 	dev = &rte_eventdevs[dev_id];
1052 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1053 
1054 	if (timeout_ticks == NULL)
1055 		return -EINVAL;
1056 
1057 	return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1058 }
1059 
1060 int
1061 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1062 {
1063 	struct rte_eventdev *dev;
1064 
1065 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1066 	dev = &rte_eventdevs[dev_id];
1067 
1068 	if (service_id == NULL)
1069 		return -EINVAL;
1070 
1071 	if (dev->data->service_inited)
1072 		*service_id = dev->data->service_id;
1073 
1074 	return dev->data->service_inited ? 0 : -ESRCH;
1075 }
1076 
1077 int
1078 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1079 {
1080 	struct rte_eventdev *dev;
1081 
1082 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1083 	dev = &rte_eventdevs[dev_id];
1084 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1085 	if (f == NULL)
1086 		return -EINVAL;
1087 
1088 	(*dev->dev_ops->dump)(dev, f);
1089 	return 0;
1090 
1091 }
1092 
1093 static int
1094 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1095 		uint8_t queue_port_id)
1096 {
1097 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1098 	if (dev->dev_ops->xstats_get_names != NULL)
1099 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1100 							queue_port_id,
1101 							NULL, NULL, 0);
1102 	return 0;
1103 }
1104 
1105 int
1106 rte_event_dev_xstats_names_get(uint8_t dev_id,
1107 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1108 		struct rte_event_dev_xstats_name *xstats_names,
1109 		unsigned int *ids, unsigned int size)
1110 {
1111 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1112 	const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1113 							  queue_port_id);
1114 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
1115 			(int)size < cnt_expected_entries)
1116 		return cnt_expected_entries;
1117 
1118 	/* dev_id checked above */
1119 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1120 
1121 	if (dev->dev_ops->xstats_get_names != NULL)
1122 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1123 				queue_port_id, xstats_names, ids, size);
1124 
1125 	return -ENOTSUP;
1126 }
1127 
1128 /* retrieve eventdev extended statistics */
1129 int
1130 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1131 		uint8_t queue_port_id, const unsigned int ids[],
1132 		uint64_t values[], unsigned int n)
1133 {
1134 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1135 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1136 
1137 	/* implemented by the driver */
1138 	if (dev->dev_ops->xstats_get != NULL)
1139 		return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1140 				ids, values, n);
1141 	return -ENOTSUP;
1142 }
1143 
1144 uint64_t
1145 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1146 		unsigned int *id)
1147 {
1148 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1149 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1150 	unsigned int temp = -1;
1151 
1152 	if (id != NULL)
1153 		*id = (unsigned int)-1;
1154 	else
1155 		id = &temp; /* ensure driver never gets a NULL value */
1156 
1157 	/* implemented by driver */
1158 	if (dev->dev_ops->xstats_get_by_name != NULL)
1159 		return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1160 	return -ENOTSUP;
1161 }
1162 
1163 int rte_event_dev_xstats_reset(uint8_t dev_id,
1164 		enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1165 		const uint32_t ids[], uint32_t nb_ids)
1166 {
1167 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1168 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1169 
1170 	if (dev->dev_ops->xstats_reset != NULL)
1171 		return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1172 							ids, nb_ids);
1173 	return -ENOTSUP;
1174 }
1175 
1176 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1177 
1178 int rte_event_dev_selftest(uint8_t dev_id)
1179 {
1180 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1181 	static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1182 		.name = "rte_event_pmd_selftest_seqn_dynfield",
1183 		.size = sizeof(rte_event_pmd_selftest_seqn_t),
1184 		.align = __alignof__(rte_event_pmd_selftest_seqn_t),
1185 	};
1186 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1187 
1188 	if (dev->dev_ops->dev_selftest != NULL) {
1189 		rte_event_pmd_selftest_seqn_dynfield_offset =
1190 			rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1191 		if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1192 			return -ENOMEM;
1193 		return (*dev->dev_ops->dev_selftest)();
1194 	}
1195 	return -ENOTSUP;
1196 }
1197 
1198 struct rte_mempool *
1199 rte_event_vector_pool_create(const char *name, unsigned int n,
1200 			     unsigned int cache_size, uint16_t nb_elem,
1201 			     int socket_id)
1202 {
1203 	const char *mp_ops_name;
1204 	struct rte_mempool *mp;
1205 	unsigned int elt_sz;
1206 	int ret;
1207 
1208 	if (!nb_elem) {
1209 		RTE_LOG(ERR, EVENTDEV,
1210 			"Invalid number of elements=%d requested\n", nb_elem);
1211 		rte_errno = EINVAL;
1212 		return NULL;
1213 	}
1214 
1215 	elt_sz =
1216 		sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1217 	mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1218 				      0);
1219 	if (mp == NULL)
1220 		return NULL;
1221 
1222 	mp_ops_name = rte_mbuf_best_mempool_ops();
1223 	ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1224 	if (ret != 0) {
1225 		RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1226 		goto err;
1227 	}
1228 
1229 	ret = rte_mempool_populate_default(mp);
1230 	if (ret < 0)
1231 		goto err;
1232 
1233 	return mp;
1234 err:
1235 	rte_mempool_free(mp);
1236 	rte_errno = -ret;
1237 	return NULL;
1238 }
1239 
1240 int
1241 rte_event_dev_start(uint8_t dev_id)
1242 {
1243 	struct rte_eventdev *dev;
1244 	int diag;
1245 
1246 	RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1247 
1248 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1249 	dev = &rte_eventdevs[dev_id];
1250 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1251 
1252 	if (dev->data->dev_started != 0) {
1253 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1254 			dev_id);
1255 		return 0;
1256 	}
1257 
1258 	diag = (*dev->dev_ops->dev_start)(dev);
1259 	rte_eventdev_trace_start(dev_id, diag);
1260 	if (diag == 0)
1261 		dev->data->dev_started = 1;
1262 	else
1263 		return diag;
1264 
1265 	event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1266 
1267 	return 0;
1268 }
1269 
1270 int
1271 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1272 		eventdev_stop_flush_t callback, void *userdata)
1273 {
1274 	struct rte_eventdev *dev;
1275 
1276 	RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1277 
1278 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1279 	dev = &rte_eventdevs[dev_id];
1280 
1281 	dev->dev_ops->dev_stop_flush = callback;
1282 	dev->data->dev_stop_flush_arg = userdata;
1283 
1284 	return 0;
1285 }
1286 
1287 void
1288 rte_event_dev_stop(uint8_t dev_id)
1289 {
1290 	struct rte_eventdev *dev;
1291 
1292 	RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1293 
1294 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1295 	dev = &rte_eventdevs[dev_id];
1296 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1297 
1298 	if (dev->data->dev_started == 0) {
1299 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1300 			dev_id);
1301 		return;
1302 	}
1303 
1304 	dev->data->dev_started = 0;
1305 	(*dev->dev_ops->dev_stop)(dev);
1306 	rte_eventdev_trace_stop(dev_id);
1307 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1308 }
1309 
1310 int
1311 rte_event_dev_close(uint8_t dev_id)
1312 {
1313 	struct rte_eventdev *dev;
1314 
1315 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1316 	dev = &rte_eventdevs[dev_id];
1317 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1318 
1319 	/* Device must be stopped before it can be closed */
1320 	if (dev->data->dev_started == 1) {
1321 		RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1322 				dev_id);
1323 		return -EBUSY;
1324 	}
1325 
1326 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1327 	rte_eventdev_trace_close(dev_id);
1328 	return (*dev->dev_ops->dev_close)(dev);
1329 }
1330 
1331 static inline int
1332 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1333 		    int socket_id)
1334 {
1335 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1336 	const struct rte_memzone *mz;
1337 	int n;
1338 
1339 	/* Generate memzone name */
1340 	n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1341 	if (n >= (int)sizeof(mz_name))
1342 		return -EINVAL;
1343 
1344 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1345 		mz = rte_memzone_reserve(mz_name,
1346 				sizeof(struct rte_eventdev_data),
1347 				socket_id, 0);
1348 	} else
1349 		mz = rte_memzone_lookup(mz_name);
1350 
1351 	if (mz == NULL)
1352 		return -ENOMEM;
1353 
1354 	*data = mz->addr;
1355 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1356 		memset(*data, 0, sizeof(struct rte_eventdev_data));
1357 		for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
1358 					RTE_EVENT_MAX_QUEUES_PER_DEV;
1359 		     n++)
1360 			(*data)->links_map[n] =
1361 				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1362 	}
1363 
1364 	return 0;
1365 }
1366 
1367 static inline uint8_t
1368 eventdev_find_free_device_index(void)
1369 {
1370 	uint8_t dev_id;
1371 
1372 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1373 		if (rte_eventdevs[dev_id].attached ==
1374 				RTE_EVENTDEV_DETACHED)
1375 			return dev_id;
1376 	}
1377 	return RTE_EVENT_MAX_DEVS;
1378 }
1379 
1380 struct rte_eventdev *
1381 rte_event_pmd_allocate(const char *name, int socket_id)
1382 {
1383 	struct rte_eventdev *eventdev;
1384 	uint8_t dev_id;
1385 
1386 	if (rte_event_pmd_get_named_dev(name) != NULL) {
1387 		RTE_EDEV_LOG_ERR("Event device with name %s already "
1388 				"allocated!", name);
1389 		return NULL;
1390 	}
1391 
1392 	dev_id = eventdev_find_free_device_index();
1393 	if (dev_id == RTE_EVENT_MAX_DEVS) {
1394 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1395 		return NULL;
1396 	}
1397 
1398 	eventdev = &rte_eventdevs[dev_id];
1399 
1400 	if (eventdev->data == NULL) {
1401 		struct rte_eventdev_data *eventdev_data = NULL;
1402 
1403 		int retval =
1404 			eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1405 
1406 		if (retval < 0 || eventdev_data == NULL)
1407 			return NULL;
1408 
1409 		eventdev->data = eventdev_data;
1410 
1411 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1412 
1413 			strlcpy(eventdev->data->name, name,
1414 				RTE_EVENTDEV_NAME_MAX_LEN);
1415 
1416 			eventdev->data->dev_id = dev_id;
1417 			eventdev->data->socket_id = socket_id;
1418 			eventdev->data->dev_started = 0;
1419 		}
1420 
1421 		eventdev->attached = RTE_EVENTDEV_ATTACHED;
1422 		eventdev_globals.nb_devs++;
1423 	}
1424 
1425 	return eventdev;
1426 }
1427 
1428 int
1429 rte_event_pmd_release(struct rte_eventdev *eventdev)
1430 {
1431 	int ret;
1432 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1433 	const struct rte_memzone *mz;
1434 
1435 	if (eventdev == NULL)
1436 		return -EINVAL;
1437 
1438 	event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1439 	eventdev->attached = RTE_EVENTDEV_DETACHED;
1440 	eventdev_globals.nb_devs--;
1441 
1442 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1443 		rte_free(eventdev->data->dev_private);
1444 
1445 		/* Generate memzone name */
1446 		ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1447 				eventdev->data->dev_id);
1448 		if (ret >= (int)sizeof(mz_name))
1449 			return -EINVAL;
1450 
1451 		mz = rte_memzone_lookup(mz_name);
1452 		if (mz == NULL)
1453 			return -ENOMEM;
1454 
1455 		ret = rte_memzone_free(mz);
1456 		if (ret)
1457 			return ret;
1458 	}
1459 
1460 	eventdev->data = NULL;
1461 	return 0;
1462 }
1463 
1464 void
1465 event_dev_probing_finish(struct rte_eventdev *eventdev)
1466 {
1467 	if (eventdev == NULL)
1468 		return;
1469 
1470 	event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1471 			     eventdev);
1472 }
1473 
1474 static int
1475 handle_dev_list(const char *cmd __rte_unused,
1476 		const char *params __rte_unused,
1477 		struct rte_tel_data *d)
1478 {
1479 	uint8_t dev_id;
1480 	int ndev = rte_event_dev_count();
1481 
1482 	if (ndev < 1)
1483 		return -1;
1484 
1485 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1486 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1487 		if (rte_eventdevs[dev_id].attached ==
1488 				RTE_EVENTDEV_ATTACHED)
1489 			rte_tel_data_add_array_int(d, dev_id);
1490 	}
1491 
1492 	return 0;
1493 }
1494 
1495 static int
1496 handle_port_list(const char *cmd __rte_unused,
1497 		 const char *params,
1498 		 struct rte_tel_data *d)
1499 {
1500 	int i;
1501 	uint8_t dev_id;
1502 	struct rte_eventdev *dev;
1503 	char *end_param;
1504 
1505 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1506 		return -1;
1507 
1508 	dev_id = strtoul(params, &end_param, 10);
1509 	if (*end_param != '\0')
1510 		RTE_EDEV_LOG_DEBUG(
1511 			"Extra parameters passed to eventdev telemetry command, ignoring");
1512 
1513 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1514 	dev = &rte_eventdevs[dev_id];
1515 
1516 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1517 	for (i = 0; i < dev->data->nb_ports; i++)
1518 		rte_tel_data_add_array_int(d, i);
1519 
1520 	return 0;
1521 }
1522 
1523 static int
1524 handle_queue_list(const char *cmd __rte_unused,
1525 		  const char *params,
1526 		  struct rte_tel_data *d)
1527 {
1528 	int i;
1529 	uint8_t dev_id;
1530 	struct rte_eventdev *dev;
1531 	char *end_param;
1532 
1533 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1534 		return -1;
1535 
1536 	dev_id = strtoul(params, &end_param, 10);
1537 	if (*end_param != '\0')
1538 		RTE_EDEV_LOG_DEBUG(
1539 			"Extra parameters passed to eventdev telemetry command, ignoring");
1540 
1541 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1542 	dev = &rte_eventdevs[dev_id];
1543 
1544 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1545 	for (i = 0; i < dev->data->nb_queues; i++)
1546 		rte_tel_data_add_array_int(d, i);
1547 
1548 	return 0;
1549 }
1550 
1551 static int
1552 handle_queue_links(const char *cmd __rte_unused,
1553 		   const char *params,
1554 		   struct rte_tel_data *d)
1555 {
1556 	int i, ret, port_id = 0;
1557 	char *end_param;
1558 	uint8_t dev_id;
1559 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1560 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1561 	const char *p_param;
1562 
1563 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1564 		return -1;
1565 
1566 	/* Get dev ID from parameter string */
1567 	dev_id = strtoul(params, &end_param, 10);
1568 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1569 
1570 	p_param = strtok(end_param, ",");
1571 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1572 		return -1;
1573 
1574 	port_id = strtoul(p_param, &end_param, 10);
1575 	p_param = strtok(NULL, "\0");
1576 	if (p_param != NULL)
1577 		RTE_EDEV_LOG_DEBUG(
1578 			"Extra parameters passed to eventdev telemetry command, ignoring");
1579 
1580 	ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1581 	if (ret < 0)
1582 		return -1;
1583 
1584 	rte_tel_data_start_dict(d);
1585 	for (i = 0; i < ret; i++) {
1586 		char qid_name[32];
1587 
1588 		snprintf(qid_name, 31, "qid_%u", queues[i]);
1589 		rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
1590 	}
1591 
1592 	return 0;
1593 }
1594 
1595 static int
1596 eventdev_build_telemetry_data(int dev_id,
1597 			      enum rte_event_dev_xstats_mode mode,
1598 			      int port_queue_id,
1599 			      struct rte_tel_data *d)
1600 {
1601 	struct rte_event_dev_xstats_name *xstat_names;
1602 	unsigned int *ids;
1603 	uint64_t *values;
1604 	int i, ret, num_xstats;
1605 
1606 	num_xstats = rte_event_dev_xstats_names_get(dev_id,
1607 						    mode,
1608 						    port_queue_id,
1609 						    NULL,
1610 						    NULL,
1611 						    0);
1612 
1613 	if (num_xstats < 0)
1614 		return -1;
1615 
1616 	/* use one malloc for names */
1617 	xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1618 			     * num_xstats);
1619 	if (xstat_names == NULL)
1620 		return -1;
1621 
1622 	ids = malloc((sizeof(unsigned int)) * num_xstats);
1623 	if (ids == NULL) {
1624 		free(xstat_names);
1625 		return -1;
1626 	}
1627 
1628 	values = malloc((sizeof(uint64_t)) * num_xstats);
1629 	if (values == NULL) {
1630 		free(xstat_names);
1631 		free(ids);
1632 		return -1;
1633 	}
1634 
1635 	ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1636 					     xstat_names, ids, num_xstats);
1637 	if (ret < 0 || ret > num_xstats) {
1638 		free(xstat_names);
1639 		free(ids);
1640 		free(values);
1641 		return -1;
1642 	}
1643 
1644 	ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1645 				       ids, values, num_xstats);
1646 	if (ret < 0 || ret > num_xstats) {
1647 		free(xstat_names);
1648 		free(ids);
1649 		free(values);
1650 		return -1;
1651 	}
1652 
1653 	rte_tel_data_start_dict(d);
1654 	for (i = 0; i < num_xstats; i++)
1655 		rte_tel_data_add_dict_u64(d, xstat_names[i].name,
1656 					  values[i]);
1657 
1658 	free(xstat_names);
1659 	free(ids);
1660 	free(values);
1661 	return 0;
1662 }
1663 
1664 static int
1665 handle_dev_xstats(const char *cmd __rte_unused,
1666 		  const char *params,
1667 		  struct rte_tel_data *d)
1668 {
1669 	int dev_id;
1670 	enum rte_event_dev_xstats_mode mode;
1671 	char *end_param;
1672 
1673 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1674 		return -1;
1675 
1676 	/* Get dev ID from parameter string */
1677 	dev_id = strtoul(params, &end_param, 10);
1678 	if (*end_param != '\0')
1679 		RTE_EDEV_LOG_DEBUG(
1680 			"Extra parameters passed to eventdev telemetry command, ignoring");
1681 
1682 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1683 
1684 	mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1685 	return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1686 }
1687 
1688 static int
1689 handle_port_xstats(const char *cmd __rte_unused,
1690 		   const char *params,
1691 		   struct rte_tel_data *d)
1692 {
1693 	int dev_id;
1694 	int port_queue_id = 0;
1695 	enum rte_event_dev_xstats_mode mode;
1696 	char *end_param;
1697 	const char *p_param;
1698 
1699 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1700 		return -1;
1701 
1702 	/* Get dev ID from parameter string */
1703 	dev_id = strtoul(params, &end_param, 10);
1704 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1705 
1706 	p_param = strtok(end_param, ",");
1707 	mode = RTE_EVENT_DEV_XSTATS_PORT;
1708 
1709 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1710 		return -1;
1711 
1712 	port_queue_id = strtoul(p_param, &end_param, 10);
1713 
1714 	p_param = strtok(NULL, "\0");
1715 	if (p_param != NULL)
1716 		RTE_EDEV_LOG_DEBUG(
1717 			"Extra parameters passed to eventdev telemetry command, ignoring");
1718 
1719 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1720 }
1721 
1722 static int
1723 handle_queue_xstats(const char *cmd __rte_unused,
1724 		    const char *params,
1725 		    struct rte_tel_data *d)
1726 {
1727 	int dev_id;
1728 	int port_queue_id = 0;
1729 	enum rte_event_dev_xstats_mode mode;
1730 	char *end_param;
1731 	const char *p_param;
1732 
1733 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1734 		return -1;
1735 
1736 	/* Get dev ID from parameter string */
1737 	dev_id = strtoul(params, &end_param, 10);
1738 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1739 
1740 	p_param = strtok(end_param, ",");
1741 	mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1742 
1743 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1744 		return -1;
1745 
1746 	port_queue_id = strtoul(p_param, &end_param, 10);
1747 
1748 	p_param = strtok(NULL, "\0");
1749 	if (p_param != NULL)
1750 		RTE_EDEV_LOG_DEBUG(
1751 			"Extra parameters passed to eventdev telemetry command, ignoring");
1752 
1753 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1754 }
1755 
1756 RTE_INIT(eventdev_init_telemetry)
1757 {
1758 	rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1759 			"Returns list of available eventdevs. Takes no parameters");
1760 	rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1761 			"Returns list of available ports. Parameter: DevID");
1762 	rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1763 			"Returns list of available queues. Parameter: DevID");
1764 
1765 	rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1766 			"Returns stats for an eventdev. Parameter: DevID");
1767 	rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1768 			"Returns stats for an eventdev port. Params: DevID,PortID");
1769 	rte_telemetry_register_cmd("/eventdev/queue_xstats",
1770 			handle_queue_xstats,
1771 			"Returns stats for an eventdev queue. Params: DevID,QueueID");
1772 	rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1773 			"Returns links for an eventdev port. Params: DevID,QueueID");
1774 }
1775