xref: /dpdk/lib/eventdev/rte_eventdev.c (revision 0988482f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15 
16 #include <rte_string_fns.h>
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_eal.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_errno.h>
32 #include <ethdev_driver.h>
33 #include <rte_cryptodev.h>
34 #include <cryptodev_pmd.h>
35 #include <rte_telemetry.h>
36 
37 #include "rte_eventdev.h"
38 #include "eventdev_pmd.h"
39 #include "eventdev_trace.h"
40 
41 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
42 
43 struct rte_eventdev *rte_eventdevs = rte_event_devices;
44 
45 static struct rte_eventdev_global eventdev_globals = {
46 	.nb_devs		= 0
47 };
48 
49 /* Public fastpath APIs. */
50 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
51 
52 /* Event dev north bound API implementation */
53 
54 uint8_t
55 rte_event_dev_count(void)
56 {
57 	return eventdev_globals.nb_devs;
58 }
59 
60 int
61 rte_event_dev_get_dev_id(const char *name)
62 {
63 	int i;
64 	uint8_t cmp;
65 
66 	if (!name)
67 		return -EINVAL;
68 
69 	for (i = 0; i < eventdev_globals.nb_devs; i++) {
70 		cmp = (strncmp(rte_event_devices[i].data->name, name,
71 				RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
72 			(rte_event_devices[i].dev ? (strncmp(
73 				rte_event_devices[i].dev->driver->name, name,
74 					 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
75 		if (cmp && (rte_event_devices[i].attached ==
76 					RTE_EVENTDEV_ATTACHED))
77 			return i;
78 	}
79 	return -ENODEV;
80 }
81 
82 int
83 rte_event_dev_socket_id(uint8_t dev_id)
84 {
85 	struct rte_eventdev *dev;
86 
87 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88 	dev = &rte_eventdevs[dev_id];
89 
90 	return dev->data->socket_id;
91 }
92 
93 int
94 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
95 {
96 	struct rte_eventdev *dev;
97 
98 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
99 	dev = &rte_eventdevs[dev_id];
100 
101 	if (dev_info == NULL)
102 		return -EINVAL;
103 
104 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
105 
106 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
107 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
108 
109 	dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
110 
111 	dev_info->dev = dev->dev;
112 	return 0;
113 }
114 
115 int
116 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
117 				uint32_t *caps)
118 {
119 	struct rte_eventdev *dev;
120 
121 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
122 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
123 
124 	dev = &rte_eventdevs[dev_id];
125 
126 	if (caps == NULL)
127 		return -EINVAL;
128 
129 	if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
130 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
131 	else
132 		*caps = 0;
133 
134 	return dev->dev_ops->eth_rx_adapter_caps_get ?
135 				(*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
136 						&rte_eth_devices[eth_port_id],
137 						caps)
138 				: 0;
139 }
140 
141 int
142 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
143 {
144 	struct rte_eventdev *dev;
145 	const struct event_timer_adapter_ops *ops;
146 
147 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
148 
149 	dev = &rte_eventdevs[dev_id];
150 
151 	if (caps == NULL)
152 		return -EINVAL;
153 	*caps = 0;
154 
155 	return dev->dev_ops->timer_adapter_caps_get ?
156 				(*dev->dev_ops->timer_adapter_caps_get)(dev,
157 									0,
158 									caps,
159 									&ops)
160 				: 0;
161 }
162 
163 int
164 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
165 				  uint32_t *caps)
166 {
167 	struct rte_eventdev *dev;
168 	struct rte_cryptodev *cdev;
169 
170 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
171 	if (!rte_cryptodev_is_valid_dev(cdev_id))
172 		return -EINVAL;
173 
174 	dev = &rte_eventdevs[dev_id];
175 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
176 
177 	if (caps == NULL)
178 		return -EINVAL;
179 
180 	if (dev->dev_ops->crypto_adapter_caps_get == NULL)
181 		*caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
182 	else
183 		*caps = 0;
184 
185 	return dev->dev_ops->crypto_adapter_caps_get ?
186 		(*dev->dev_ops->crypto_adapter_caps_get)
187 		(dev, cdev, caps) : 0;
188 }
189 
190 int
191 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
192 				uint32_t *caps)
193 {
194 	struct rte_eventdev *dev;
195 	struct rte_eth_dev *eth_dev;
196 
197 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
198 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
199 
200 	dev = &rte_eventdevs[dev_id];
201 	eth_dev = &rte_eth_devices[eth_port_id];
202 
203 	if (caps == NULL)
204 		return -EINVAL;
205 
206 	if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
207 		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
208 	else
209 		*caps = 0;
210 
211 	return dev->dev_ops->eth_tx_adapter_caps_get ?
212 			(*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
213 								eth_dev,
214 								caps)
215 			: 0;
216 }
217 
218 static inline int
219 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
220 {
221 	uint8_t old_nb_queues = dev->data->nb_queues;
222 	struct rte_event_queue_conf *queues_cfg;
223 	unsigned int i;
224 
225 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
226 			 dev->data->dev_id);
227 
228 	if (nb_queues != 0) {
229 		queues_cfg = dev->data->queues_cfg;
230 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
231 
232 		for (i = nb_queues; i < old_nb_queues; i++)
233 			(*dev->dev_ops->queue_release)(dev, i);
234 
235 
236 		if (nb_queues > old_nb_queues) {
237 			uint8_t new_qs = nb_queues - old_nb_queues;
238 
239 			memset(queues_cfg + old_nb_queues, 0,
240 				sizeof(queues_cfg[0]) * new_qs);
241 		}
242 	} else {
243 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
244 
245 		for (i = nb_queues; i < old_nb_queues; i++)
246 			(*dev->dev_ops->queue_release)(dev, i);
247 	}
248 
249 	dev->data->nb_queues = nb_queues;
250 	return 0;
251 }
252 
253 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
254 
255 static inline int
256 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
257 {
258 	uint8_t old_nb_ports = dev->data->nb_ports;
259 	void **ports;
260 	uint16_t *links_map;
261 	struct rte_event_port_conf *ports_cfg;
262 	unsigned int i;
263 
264 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
265 			 dev->data->dev_id);
266 
267 	if (nb_ports != 0) { /* re-config */
268 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
269 
270 		ports = dev->data->ports;
271 		ports_cfg = dev->data->ports_cfg;
272 		links_map = dev->data->links_map;
273 
274 		for (i = nb_ports; i < old_nb_ports; i++)
275 			(*dev->dev_ops->port_release)(ports[i]);
276 
277 		if (nb_ports > old_nb_ports) {
278 			uint8_t new_ps = nb_ports - old_nb_ports;
279 			unsigned int old_links_map_end =
280 				old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
281 			unsigned int links_map_end =
282 				nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
283 
284 			memset(ports + old_nb_ports, 0,
285 				sizeof(ports[0]) * new_ps);
286 			memset(ports_cfg + old_nb_ports, 0,
287 				sizeof(ports_cfg[0]) * new_ps);
288 			for (i = old_links_map_end; i < links_map_end; i++)
289 				links_map[i] =
290 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
291 		}
292 	} else {
293 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
294 
295 		ports = dev->data->ports;
296 		for (i = nb_ports; i < old_nb_ports; i++) {
297 			(*dev->dev_ops->port_release)(ports[i]);
298 			ports[i] = NULL;
299 		}
300 	}
301 
302 	dev->data->nb_ports = nb_ports;
303 	return 0;
304 }
305 
306 int
307 rte_event_dev_configure(uint8_t dev_id,
308 			const struct rte_event_dev_config *dev_conf)
309 {
310 	struct rte_event_dev_info info;
311 	struct rte_eventdev *dev;
312 	int diag;
313 
314 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
315 	dev = &rte_eventdevs[dev_id];
316 
317 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
318 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
319 
320 	if (dev->data->dev_started) {
321 		RTE_EDEV_LOG_ERR(
322 		    "device %d must be stopped to allow configuration", dev_id);
323 		return -EBUSY;
324 	}
325 
326 	if (dev_conf == NULL)
327 		return -EINVAL;
328 
329 	(*dev->dev_ops->dev_infos_get)(dev, &info);
330 
331 	/* Check dequeue_timeout_ns value is in limit */
332 	if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
333 		if (dev_conf->dequeue_timeout_ns &&
334 		    (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
335 			|| dev_conf->dequeue_timeout_ns >
336 				 info.max_dequeue_timeout_ns)) {
337 			RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
338 			" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
339 			dev_id, dev_conf->dequeue_timeout_ns,
340 			info.min_dequeue_timeout_ns,
341 			info.max_dequeue_timeout_ns);
342 			return -EINVAL;
343 		}
344 	}
345 
346 	/* Check nb_events_limit is in limit */
347 	if (dev_conf->nb_events_limit > info.max_num_events) {
348 		RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
349 		dev_id, dev_conf->nb_events_limit, info.max_num_events);
350 		return -EINVAL;
351 	}
352 
353 	/* Check nb_event_queues is in limit */
354 	if (!dev_conf->nb_event_queues) {
355 		RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
356 					dev_id);
357 		return -EINVAL;
358 	}
359 	if (dev_conf->nb_event_queues > info.max_event_queues +
360 			info.max_single_link_event_port_queue_pairs) {
361 		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
362 				 dev_id, dev_conf->nb_event_queues,
363 				 info.max_event_queues,
364 				 info.max_single_link_event_port_queue_pairs);
365 		return -EINVAL;
366 	}
367 	if (dev_conf->nb_event_queues -
368 			dev_conf->nb_single_link_event_port_queues >
369 			info.max_event_queues) {
370 		RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
371 				 dev_id, dev_conf->nb_event_queues,
372 				 dev_conf->nb_single_link_event_port_queues,
373 				 info.max_event_queues);
374 		return -EINVAL;
375 	}
376 	if (dev_conf->nb_single_link_event_port_queues >
377 			dev_conf->nb_event_queues) {
378 		RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
379 				 dev_id,
380 				 dev_conf->nb_single_link_event_port_queues,
381 				 dev_conf->nb_event_queues);
382 		return -EINVAL;
383 	}
384 
385 	/* Check nb_event_ports is in limit */
386 	if (!dev_conf->nb_event_ports) {
387 		RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
388 		return -EINVAL;
389 	}
390 	if (dev_conf->nb_event_ports > info.max_event_ports +
391 			info.max_single_link_event_port_queue_pairs) {
392 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
393 				 dev_id, dev_conf->nb_event_ports,
394 				 info.max_event_ports,
395 				 info.max_single_link_event_port_queue_pairs);
396 		return -EINVAL;
397 	}
398 	if (dev_conf->nb_event_ports -
399 			dev_conf->nb_single_link_event_port_queues
400 			> info.max_event_ports) {
401 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
402 				 dev_id, dev_conf->nb_event_ports,
403 				 dev_conf->nb_single_link_event_port_queues,
404 				 info.max_event_ports);
405 		return -EINVAL;
406 	}
407 
408 	if (dev_conf->nb_single_link_event_port_queues >
409 	    dev_conf->nb_event_ports) {
410 		RTE_EDEV_LOG_ERR(
411 				 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
412 				 dev_id,
413 				 dev_conf->nb_single_link_event_port_queues,
414 				 dev_conf->nb_event_ports);
415 		return -EINVAL;
416 	}
417 
418 	/* Check nb_event_queue_flows is in limit */
419 	if (!dev_conf->nb_event_queue_flows) {
420 		RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
421 		return -EINVAL;
422 	}
423 	if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
424 		RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
425 		dev_id, dev_conf->nb_event_queue_flows,
426 		info.max_event_queue_flows);
427 		return -EINVAL;
428 	}
429 
430 	/* Check nb_event_port_dequeue_depth is in limit */
431 	if (!dev_conf->nb_event_port_dequeue_depth) {
432 		RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
433 					dev_id);
434 		return -EINVAL;
435 	}
436 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
437 		 (dev_conf->nb_event_port_dequeue_depth >
438 			 info.max_event_port_dequeue_depth)) {
439 		RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
440 		dev_id, dev_conf->nb_event_port_dequeue_depth,
441 		info.max_event_port_dequeue_depth);
442 		return -EINVAL;
443 	}
444 
445 	/* Check nb_event_port_enqueue_depth is in limit */
446 	if (!dev_conf->nb_event_port_enqueue_depth) {
447 		RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
448 					dev_id);
449 		return -EINVAL;
450 	}
451 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
452 		(dev_conf->nb_event_port_enqueue_depth >
453 			 info.max_event_port_enqueue_depth)) {
454 		RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
455 		dev_id, dev_conf->nb_event_port_enqueue_depth,
456 		info.max_event_port_enqueue_depth);
457 		return -EINVAL;
458 	}
459 
460 	/* Copy the dev_conf parameter into the dev structure */
461 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
462 
463 	/* Setup new number of queues and reconfigure device. */
464 	diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
465 	if (diag != 0) {
466 		RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
467 				 diag);
468 		return diag;
469 	}
470 
471 	/* Setup new number of ports and reconfigure device. */
472 	diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
473 	if (diag != 0) {
474 		event_dev_queue_config(dev, 0);
475 		RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
476 				 diag);
477 		return diag;
478 	}
479 
480 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
481 
482 	/* Configure the device */
483 	diag = (*dev->dev_ops->dev_configure)(dev);
484 	if (diag != 0) {
485 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
486 		event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
487 		event_dev_queue_config(dev, 0);
488 		event_dev_port_config(dev, 0);
489 	}
490 
491 	dev->data->event_dev_cap = info.event_dev_cap;
492 	rte_eventdev_trace_configure(dev_id, dev_conf, diag);
493 	return diag;
494 }
495 
496 static inline int
497 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
498 {
499 	if (queue_id < dev->data->nb_queues && queue_id <
500 				RTE_EVENT_MAX_QUEUES_PER_DEV)
501 		return 1;
502 	else
503 		return 0;
504 }
505 
506 int
507 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
508 				 struct rte_event_queue_conf *queue_conf)
509 {
510 	struct rte_eventdev *dev;
511 
512 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
513 	dev = &rte_eventdevs[dev_id];
514 
515 	if (queue_conf == NULL)
516 		return -EINVAL;
517 
518 	if (!is_valid_queue(dev, queue_id)) {
519 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
520 		return -EINVAL;
521 	}
522 
523 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
524 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
525 	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
526 	return 0;
527 }
528 
529 static inline int
530 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
531 {
532 	if (queue_conf &&
533 		!(queue_conf->event_queue_cfg &
534 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
535 		((queue_conf->event_queue_cfg &
536 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
537 		(queue_conf->schedule_type
538 			== RTE_SCHED_TYPE_ATOMIC)
539 		))
540 		return 1;
541 	else
542 		return 0;
543 }
544 
545 static inline int
546 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
547 {
548 	if (queue_conf &&
549 		!(queue_conf->event_queue_cfg &
550 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
551 		((queue_conf->event_queue_cfg &
552 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
553 		(queue_conf->schedule_type
554 			== RTE_SCHED_TYPE_ORDERED)
555 		))
556 		return 1;
557 	else
558 		return 0;
559 }
560 
561 
562 int
563 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
564 		      const struct rte_event_queue_conf *queue_conf)
565 {
566 	struct rte_eventdev *dev;
567 	struct rte_event_queue_conf def_conf;
568 
569 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
570 	dev = &rte_eventdevs[dev_id];
571 
572 	if (!is_valid_queue(dev, queue_id)) {
573 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
574 		return -EINVAL;
575 	}
576 
577 	/* Check nb_atomic_flows limit */
578 	if (is_valid_atomic_queue_conf(queue_conf)) {
579 		if (queue_conf->nb_atomic_flows == 0 ||
580 		    queue_conf->nb_atomic_flows >
581 			dev->data->dev_conf.nb_event_queue_flows) {
582 			RTE_EDEV_LOG_ERR(
583 		"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
584 			dev_id, queue_id, queue_conf->nb_atomic_flows,
585 			dev->data->dev_conf.nb_event_queue_flows);
586 			return -EINVAL;
587 		}
588 	}
589 
590 	/* Check nb_atomic_order_sequences limit */
591 	if (is_valid_ordered_queue_conf(queue_conf)) {
592 		if (queue_conf->nb_atomic_order_sequences == 0 ||
593 		    queue_conf->nb_atomic_order_sequences >
594 			dev->data->dev_conf.nb_event_queue_flows) {
595 			RTE_EDEV_LOG_ERR(
596 		"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
597 			dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
598 			dev->data->dev_conf.nb_event_queue_flows);
599 			return -EINVAL;
600 		}
601 	}
602 
603 	if (dev->data->dev_started) {
604 		RTE_EDEV_LOG_ERR(
605 		    "device %d must be stopped to allow queue setup", dev_id);
606 		return -EBUSY;
607 	}
608 
609 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
610 
611 	if (queue_conf == NULL) {
612 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
613 					-ENOTSUP);
614 		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
615 		queue_conf = &def_conf;
616 	}
617 
618 	dev->data->queues_cfg[queue_id] = *queue_conf;
619 	rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
620 	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
621 }
622 
623 static inline int
624 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
625 {
626 	if (port_id < dev->data->nb_ports)
627 		return 1;
628 	else
629 		return 0;
630 }
631 
632 int
633 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
634 				 struct rte_event_port_conf *port_conf)
635 {
636 	struct rte_eventdev *dev;
637 
638 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
639 	dev = &rte_eventdevs[dev_id];
640 
641 	if (port_conf == NULL)
642 		return -EINVAL;
643 
644 	if (!is_valid_port(dev, port_id)) {
645 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
646 		return -EINVAL;
647 	}
648 
649 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
650 	memset(port_conf, 0, sizeof(struct rte_event_port_conf));
651 	(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
652 	return 0;
653 }
654 
655 int
656 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
657 		     const struct rte_event_port_conf *port_conf)
658 {
659 	struct rte_eventdev *dev;
660 	struct rte_event_port_conf def_conf;
661 	int diag;
662 
663 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
664 	dev = &rte_eventdevs[dev_id];
665 
666 	if (!is_valid_port(dev, port_id)) {
667 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
668 		return -EINVAL;
669 	}
670 
671 	/* Check new_event_threshold limit */
672 	if ((port_conf && !port_conf->new_event_threshold) ||
673 			(port_conf && port_conf->new_event_threshold >
674 				 dev->data->dev_conf.nb_events_limit)) {
675 		RTE_EDEV_LOG_ERR(
676 		   "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
677 			dev_id, port_id, port_conf->new_event_threshold,
678 			dev->data->dev_conf.nb_events_limit);
679 		return -EINVAL;
680 	}
681 
682 	/* Check dequeue_depth limit */
683 	if ((port_conf && !port_conf->dequeue_depth) ||
684 			(port_conf && port_conf->dequeue_depth >
685 		dev->data->dev_conf.nb_event_port_dequeue_depth)) {
686 		RTE_EDEV_LOG_ERR(
687 		   "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
688 			dev_id, port_id, port_conf->dequeue_depth,
689 			dev->data->dev_conf.nb_event_port_dequeue_depth);
690 		return -EINVAL;
691 	}
692 
693 	/* Check enqueue_depth limit */
694 	if ((port_conf && !port_conf->enqueue_depth) ||
695 			(port_conf && port_conf->enqueue_depth >
696 		dev->data->dev_conf.nb_event_port_enqueue_depth)) {
697 		RTE_EDEV_LOG_ERR(
698 		   "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
699 			dev_id, port_id, port_conf->enqueue_depth,
700 			dev->data->dev_conf.nb_event_port_enqueue_depth);
701 		return -EINVAL;
702 	}
703 
704 	if (port_conf &&
705 	    (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
706 	    !(dev->data->event_dev_cap &
707 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
708 		RTE_EDEV_LOG_ERR(
709 		   "dev%d port%d Implicit release disable not supported",
710 			dev_id, port_id);
711 		return -EINVAL;
712 	}
713 
714 	if (dev->data->dev_started) {
715 		RTE_EDEV_LOG_ERR(
716 		    "device %d must be stopped to allow port setup", dev_id);
717 		return -EBUSY;
718 	}
719 
720 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
721 
722 	if (port_conf == NULL) {
723 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
724 					-ENOTSUP);
725 		(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
726 		port_conf = &def_conf;
727 	}
728 
729 	dev->data->ports_cfg[port_id] = *port_conf;
730 
731 	diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
732 
733 	/* Unlink all the queues from this port(default state after setup) */
734 	if (!diag)
735 		diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
736 
737 	rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
738 	if (diag < 0)
739 		return diag;
740 
741 	return 0;
742 }
743 
744 int
745 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
746 		       uint32_t *attr_value)
747 {
748 	struct rte_eventdev *dev;
749 
750 	if (!attr_value)
751 		return -EINVAL;
752 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
753 	dev = &rte_eventdevs[dev_id];
754 
755 	switch (attr_id) {
756 	case RTE_EVENT_DEV_ATTR_PORT_COUNT:
757 		*attr_value = dev->data->nb_ports;
758 		break;
759 	case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
760 		*attr_value = dev->data->nb_queues;
761 		break;
762 	case RTE_EVENT_DEV_ATTR_STARTED:
763 		*attr_value = dev->data->dev_started;
764 		break;
765 	default:
766 		return -EINVAL;
767 	}
768 
769 	return 0;
770 }
771 
772 int
773 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
774 			uint32_t *attr_value)
775 {
776 	struct rte_eventdev *dev;
777 
778 	if (!attr_value)
779 		return -EINVAL;
780 
781 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
782 	dev = &rte_eventdevs[dev_id];
783 	if (!is_valid_port(dev, port_id)) {
784 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
785 		return -EINVAL;
786 	}
787 
788 	switch (attr_id) {
789 	case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
790 		*attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
791 		break;
792 	case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
793 		*attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
794 		break;
795 	case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
796 		*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
797 		break;
798 	case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
799 	{
800 		uint32_t config;
801 
802 		config = dev->data->ports_cfg[port_id].event_port_cfg;
803 		*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
804 		break;
805 	}
806 	default:
807 		return -EINVAL;
808 	};
809 	return 0;
810 }
811 
812 int
813 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
814 			uint32_t *attr_value)
815 {
816 	struct rte_event_queue_conf *conf;
817 	struct rte_eventdev *dev;
818 
819 	if (!attr_value)
820 		return -EINVAL;
821 
822 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
823 	dev = &rte_eventdevs[dev_id];
824 	if (!is_valid_queue(dev, queue_id)) {
825 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
826 		return -EINVAL;
827 	}
828 
829 	conf = &dev->data->queues_cfg[queue_id];
830 
831 	switch (attr_id) {
832 	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
833 		*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
834 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
835 			*attr_value = conf->priority;
836 		break;
837 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
838 		*attr_value = conf->nb_atomic_flows;
839 		break;
840 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
841 		*attr_value = conf->nb_atomic_order_sequences;
842 		break;
843 	case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
844 		*attr_value = conf->event_queue_cfg;
845 		break;
846 	case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
847 		if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
848 			return -EOVERFLOW;
849 
850 		*attr_value = conf->schedule_type;
851 		break;
852 	default:
853 		return -EINVAL;
854 	};
855 	return 0;
856 }
857 
858 int
859 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
860 		    const uint8_t queues[], const uint8_t priorities[],
861 		    uint16_t nb_links)
862 {
863 	struct rte_eventdev *dev;
864 	uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
865 	uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
866 	uint16_t *links_map;
867 	int i, diag;
868 
869 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
870 	dev = &rte_eventdevs[dev_id];
871 
872 	if (*dev->dev_ops->port_link == NULL) {
873 		RTE_EDEV_LOG_ERR("Function not supported\n");
874 		rte_errno = ENOTSUP;
875 		return 0;
876 	}
877 
878 	if (!is_valid_port(dev, port_id)) {
879 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
880 		rte_errno = EINVAL;
881 		return 0;
882 	}
883 
884 	if (queues == NULL) {
885 		for (i = 0; i < dev->data->nb_queues; i++)
886 			queues_list[i] = i;
887 
888 		queues = queues_list;
889 		nb_links = dev->data->nb_queues;
890 	}
891 
892 	if (priorities == NULL) {
893 		for (i = 0; i < nb_links; i++)
894 			priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
895 
896 		priorities = priorities_list;
897 	}
898 
899 	for (i = 0; i < nb_links; i++)
900 		if (queues[i] >= dev->data->nb_queues) {
901 			rte_errno = EINVAL;
902 			return 0;
903 		}
904 
905 	diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
906 						queues, priorities, nb_links);
907 	if (diag < 0)
908 		return diag;
909 
910 	links_map = dev->data->links_map;
911 	/* Point links_map to this port specific area */
912 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
913 	for (i = 0; i < diag; i++)
914 		links_map[queues[i]] = (uint8_t)priorities[i];
915 
916 	rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
917 	return diag;
918 }
919 
920 int
921 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
922 		      uint8_t queues[], uint16_t nb_unlinks)
923 {
924 	struct rte_eventdev *dev;
925 	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
926 	int i, diag, j;
927 	uint16_t *links_map;
928 
929 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
930 	dev = &rte_eventdevs[dev_id];
931 
932 	if (*dev->dev_ops->port_unlink == NULL) {
933 		RTE_EDEV_LOG_ERR("Function not supported");
934 		rte_errno = ENOTSUP;
935 		return 0;
936 	}
937 
938 	if (!is_valid_port(dev, port_id)) {
939 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
940 		rte_errno = EINVAL;
941 		return 0;
942 	}
943 
944 	links_map = dev->data->links_map;
945 	/* Point links_map to this port specific area */
946 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
947 
948 	if (queues == NULL) {
949 		j = 0;
950 		for (i = 0; i < dev->data->nb_queues; i++) {
951 			if (links_map[i] !=
952 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
953 				all_queues[j] = i;
954 				j++;
955 			}
956 		}
957 		queues = all_queues;
958 	} else {
959 		for (j = 0; j < nb_unlinks; j++) {
960 			if (links_map[queues[j]] ==
961 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
962 				break;
963 		}
964 	}
965 
966 	nb_unlinks = j;
967 	for (i = 0; i < nb_unlinks; i++)
968 		if (queues[i] >= dev->data->nb_queues) {
969 			rte_errno = EINVAL;
970 			return 0;
971 		}
972 
973 	diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
974 					queues, nb_unlinks);
975 
976 	if (diag < 0)
977 		return diag;
978 
979 	for (i = 0; i < diag; i++)
980 		links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
981 
982 	rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
983 	return diag;
984 }
985 
986 int
987 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
988 {
989 	struct rte_eventdev *dev;
990 
991 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
992 	dev = &rte_eventdevs[dev_id];
993 	if (!is_valid_port(dev, port_id)) {
994 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
995 		return -EINVAL;
996 	}
997 
998 	/* Return 0 if the PMD does not implement unlinks in progress.
999 	 * This allows PMDs which handle unlink synchronously to not implement
1000 	 * this function at all.
1001 	 */
1002 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
1003 
1004 	return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1005 			dev->data->ports[port_id]);
1006 }
1007 
1008 int
1009 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1010 			 uint8_t queues[], uint8_t priorities[])
1011 {
1012 	struct rte_eventdev *dev;
1013 	uint16_t *links_map;
1014 	int i, count = 0;
1015 
1016 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1017 	dev = &rte_eventdevs[dev_id];
1018 	if (!is_valid_port(dev, port_id)) {
1019 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1020 		return -EINVAL;
1021 	}
1022 
1023 	links_map = dev->data->links_map;
1024 	/* Point links_map to this port specific area */
1025 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1026 	for (i = 0; i < dev->data->nb_queues; i++) {
1027 		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1028 			queues[count] = i;
1029 			priorities[count] = (uint8_t)links_map[i];
1030 			++count;
1031 		}
1032 	}
1033 	return count;
1034 }
1035 
1036 int
1037 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1038 				 uint64_t *timeout_ticks)
1039 {
1040 	struct rte_eventdev *dev;
1041 
1042 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1043 	dev = &rte_eventdevs[dev_id];
1044 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1045 
1046 	if (timeout_ticks == NULL)
1047 		return -EINVAL;
1048 
1049 	return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1050 }
1051 
1052 int
1053 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1054 {
1055 	struct rte_eventdev *dev;
1056 
1057 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1058 	dev = &rte_eventdevs[dev_id];
1059 
1060 	if (service_id == NULL)
1061 		return -EINVAL;
1062 
1063 	if (dev->data->service_inited)
1064 		*service_id = dev->data->service_id;
1065 
1066 	return dev->data->service_inited ? 0 : -ESRCH;
1067 }
1068 
1069 int
1070 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1071 {
1072 	struct rte_eventdev *dev;
1073 
1074 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1075 	dev = &rte_eventdevs[dev_id];
1076 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1077 	if (f == NULL)
1078 		return -EINVAL;
1079 
1080 	(*dev->dev_ops->dump)(dev, f);
1081 	return 0;
1082 
1083 }
1084 
1085 static int
1086 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1087 		uint8_t queue_port_id)
1088 {
1089 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1090 	if (dev->dev_ops->xstats_get_names != NULL)
1091 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1092 							queue_port_id,
1093 							NULL, NULL, 0);
1094 	return 0;
1095 }
1096 
1097 int
1098 rte_event_dev_xstats_names_get(uint8_t dev_id,
1099 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1100 		struct rte_event_dev_xstats_name *xstats_names,
1101 		unsigned int *ids, unsigned int size)
1102 {
1103 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1104 	const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1105 							  queue_port_id);
1106 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
1107 			(int)size < cnt_expected_entries)
1108 		return cnt_expected_entries;
1109 
1110 	/* dev_id checked above */
1111 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1112 
1113 	if (dev->dev_ops->xstats_get_names != NULL)
1114 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1115 				queue_port_id, xstats_names, ids, size);
1116 
1117 	return -ENOTSUP;
1118 }
1119 
1120 /* retrieve eventdev extended statistics */
1121 int
1122 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1123 		uint8_t queue_port_id, const unsigned int ids[],
1124 		uint64_t values[], unsigned int n)
1125 {
1126 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1127 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1128 
1129 	/* implemented by the driver */
1130 	if (dev->dev_ops->xstats_get != NULL)
1131 		return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1132 				ids, values, n);
1133 	return -ENOTSUP;
1134 }
1135 
1136 uint64_t
1137 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1138 		unsigned int *id)
1139 {
1140 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1141 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1142 	unsigned int temp = -1;
1143 
1144 	if (id != NULL)
1145 		*id = (unsigned int)-1;
1146 	else
1147 		id = &temp; /* ensure driver never gets a NULL value */
1148 
1149 	/* implemented by driver */
1150 	if (dev->dev_ops->xstats_get_by_name != NULL)
1151 		return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1152 	return -ENOTSUP;
1153 }
1154 
1155 int rte_event_dev_xstats_reset(uint8_t dev_id,
1156 		enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1157 		const uint32_t ids[], uint32_t nb_ids)
1158 {
1159 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1160 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1161 
1162 	if (dev->dev_ops->xstats_reset != NULL)
1163 		return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1164 							ids, nb_ids);
1165 	return -ENOTSUP;
1166 }
1167 
1168 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1169 
1170 int rte_event_dev_selftest(uint8_t dev_id)
1171 {
1172 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1173 	static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1174 		.name = "rte_event_pmd_selftest_seqn_dynfield",
1175 		.size = sizeof(rte_event_pmd_selftest_seqn_t),
1176 		.align = __alignof__(rte_event_pmd_selftest_seqn_t),
1177 	};
1178 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1179 
1180 	if (dev->dev_ops->dev_selftest != NULL) {
1181 		rte_event_pmd_selftest_seqn_dynfield_offset =
1182 			rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1183 		if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1184 			return -ENOMEM;
1185 		return (*dev->dev_ops->dev_selftest)();
1186 	}
1187 	return -ENOTSUP;
1188 }
1189 
1190 struct rte_mempool *
1191 rte_event_vector_pool_create(const char *name, unsigned int n,
1192 			     unsigned int cache_size, uint16_t nb_elem,
1193 			     int socket_id)
1194 {
1195 	const char *mp_ops_name;
1196 	struct rte_mempool *mp;
1197 	unsigned int elt_sz;
1198 	int ret;
1199 
1200 	if (!nb_elem) {
1201 		RTE_LOG(ERR, EVENTDEV,
1202 			"Invalid number of elements=%d requested\n", nb_elem);
1203 		rte_errno = EINVAL;
1204 		return NULL;
1205 	}
1206 
1207 	elt_sz =
1208 		sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1209 	mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1210 				      0);
1211 	if (mp == NULL)
1212 		return NULL;
1213 
1214 	mp_ops_name = rte_mbuf_best_mempool_ops();
1215 	ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1216 	if (ret != 0) {
1217 		RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1218 		goto err;
1219 	}
1220 
1221 	ret = rte_mempool_populate_default(mp);
1222 	if (ret < 0)
1223 		goto err;
1224 
1225 	return mp;
1226 err:
1227 	rte_mempool_free(mp);
1228 	rte_errno = -ret;
1229 	return NULL;
1230 }
1231 
1232 int
1233 rte_event_dev_start(uint8_t dev_id)
1234 {
1235 	struct rte_eventdev *dev;
1236 	int diag;
1237 
1238 	RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1239 
1240 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1241 	dev = &rte_eventdevs[dev_id];
1242 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1243 
1244 	if (dev->data->dev_started != 0) {
1245 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1246 			dev_id);
1247 		return 0;
1248 	}
1249 
1250 	diag = (*dev->dev_ops->dev_start)(dev);
1251 	rte_eventdev_trace_start(dev_id, diag);
1252 	if (diag == 0)
1253 		dev->data->dev_started = 1;
1254 	else
1255 		return diag;
1256 
1257 	event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1258 
1259 	return 0;
1260 }
1261 
1262 int
1263 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1264 		eventdev_stop_flush_t callback, void *userdata)
1265 {
1266 	struct rte_eventdev *dev;
1267 
1268 	RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1269 
1270 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1271 	dev = &rte_eventdevs[dev_id];
1272 
1273 	dev->dev_ops->dev_stop_flush = callback;
1274 	dev->data->dev_stop_flush_arg = userdata;
1275 
1276 	return 0;
1277 }
1278 
1279 void
1280 rte_event_dev_stop(uint8_t dev_id)
1281 {
1282 	struct rte_eventdev *dev;
1283 
1284 	RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1285 
1286 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1287 	dev = &rte_eventdevs[dev_id];
1288 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1289 
1290 	if (dev->data->dev_started == 0) {
1291 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1292 			dev_id);
1293 		return;
1294 	}
1295 
1296 	dev->data->dev_started = 0;
1297 	(*dev->dev_ops->dev_stop)(dev);
1298 	rte_eventdev_trace_stop(dev_id);
1299 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1300 }
1301 
1302 int
1303 rte_event_dev_close(uint8_t dev_id)
1304 {
1305 	struct rte_eventdev *dev;
1306 
1307 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1308 	dev = &rte_eventdevs[dev_id];
1309 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1310 
1311 	/* Device must be stopped before it can be closed */
1312 	if (dev->data->dev_started == 1) {
1313 		RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1314 				dev_id);
1315 		return -EBUSY;
1316 	}
1317 
1318 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1319 	rte_eventdev_trace_close(dev_id);
1320 	return (*dev->dev_ops->dev_close)(dev);
1321 }
1322 
1323 static inline int
1324 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1325 		    int socket_id)
1326 {
1327 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1328 	const struct rte_memzone *mz;
1329 	int n;
1330 
1331 	/* Generate memzone name */
1332 	n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1333 	if (n >= (int)sizeof(mz_name))
1334 		return -EINVAL;
1335 
1336 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1337 		mz = rte_memzone_reserve(mz_name,
1338 				sizeof(struct rte_eventdev_data),
1339 				socket_id, 0);
1340 	} else
1341 		mz = rte_memzone_lookup(mz_name);
1342 
1343 	if (mz == NULL)
1344 		return -ENOMEM;
1345 
1346 	*data = mz->addr;
1347 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1348 		memset(*data, 0, sizeof(struct rte_eventdev_data));
1349 		for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
1350 					RTE_EVENT_MAX_QUEUES_PER_DEV;
1351 		     n++)
1352 			(*data)->links_map[n] =
1353 				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1354 	}
1355 
1356 	return 0;
1357 }
1358 
1359 static inline uint8_t
1360 eventdev_find_free_device_index(void)
1361 {
1362 	uint8_t dev_id;
1363 
1364 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1365 		if (rte_eventdevs[dev_id].attached ==
1366 				RTE_EVENTDEV_DETACHED)
1367 			return dev_id;
1368 	}
1369 	return RTE_EVENT_MAX_DEVS;
1370 }
1371 
1372 struct rte_eventdev *
1373 rte_event_pmd_allocate(const char *name, int socket_id)
1374 {
1375 	struct rte_eventdev *eventdev;
1376 	uint8_t dev_id;
1377 
1378 	if (rte_event_pmd_get_named_dev(name) != NULL) {
1379 		RTE_EDEV_LOG_ERR("Event device with name %s already "
1380 				"allocated!", name);
1381 		return NULL;
1382 	}
1383 
1384 	dev_id = eventdev_find_free_device_index();
1385 	if (dev_id == RTE_EVENT_MAX_DEVS) {
1386 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1387 		return NULL;
1388 	}
1389 
1390 	eventdev = &rte_eventdevs[dev_id];
1391 
1392 	if (eventdev->data == NULL) {
1393 		struct rte_eventdev_data *eventdev_data = NULL;
1394 
1395 		int retval =
1396 			eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1397 
1398 		if (retval < 0 || eventdev_data == NULL)
1399 			return NULL;
1400 
1401 		eventdev->data = eventdev_data;
1402 
1403 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1404 
1405 			strlcpy(eventdev->data->name, name,
1406 				RTE_EVENTDEV_NAME_MAX_LEN);
1407 
1408 			eventdev->data->dev_id = dev_id;
1409 			eventdev->data->socket_id = socket_id;
1410 			eventdev->data->dev_started = 0;
1411 		}
1412 
1413 		eventdev->attached = RTE_EVENTDEV_ATTACHED;
1414 		eventdev_globals.nb_devs++;
1415 	}
1416 
1417 	return eventdev;
1418 }
1419 
1420 int
1421 rte_event_pmd_release(struct rte_eventdev *eventdev)
1422 {
1423 	int ret;
1424 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1425 	const struct rte_memzone *mz;
1426 
1427 	if (eventdev == NULL)
1428 		return -EINVAL;
1429 
1430 	event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1431 	eventdev->attached = RTE_EVENTDEV_DETACHED;
1432 	eventdev_globals.nb_devs--;
1433 
1434 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1435 		rte_free(eventdev->data->dev_private);
1436 
1437 		/* Generate memzone name */
1438 		ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1439 				eventdev->data->dev_id);
1440 		if (ret >= (int)sizeof(mz_name))
1441 			return -EINVAL;
1442 
1443 		mz = rte_memzone_lookup(mz_name);
1444 		if (mz == NULL)
1445 			return -ENOMEM;
1446 
1447 		ret = rte_memzone_free(mz);
1448 		if (ret)
1449 			return ret;
1450 	}
1451 
1452 	eventdev->data = NULL;
1453 	return 0;
1454 }
1455 
1456 void
1457 event_dev_probing_finish(struct rte_eventdev *eventdev)
1458 {
1459 	if (eventdev == NULL)
1460 		return;
1461 
1462 	event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1463 			     eventdev);
1464 }
1465 
1466 static int
1467 handle_dev_list(const char *cmd __rte_unused,
1468 		const char *params __rte_unused,
1469 		struct rte_tel_data *d)
1470 {
1471 	uint8_t dev_id;
1472 	int ndev = rte_event_dev_count();
1473 
1474 	if (ndev < 1)
1475 		return -1;
1476 
1477 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1478 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1479 		if (rte_eventdevs[dev_id].attached ==
1480 				RTE_EVENTDEV_ATTACHED)
1481 			rte_tel_data_add_array_int(d, dev_id);
1482 	}
1483 
1484 	return 0;
1485 }
1486 
1487 static int
1488 handle_port_list(const char *cmd __rte_unused,
1489 		 const char *params,
1490 		 struct rte_tel_data *d)
1491 {
1492 	int i;
1493 	uint8_t dev_id;
1494 	struct rte_eventdev *dev;
1495 	char *end_param;
1496 
1497 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1498 		return -1;
1499 
1500 	dev_id = strtoul(params, &end_param, 10);
1501 	if (*end_param != '\0')
1502 		RTE_EDEV_LOG_DEBUG(
1503 			"Extra parameters passed to eventdev telemetry command, ignoring");
1504 
1505 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1506 	dev = &rte_eventdevs[dev_id];
1507 
1508 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1509 	for (i = 0; i < dev->data->nb_ports; i++)
1510 		rte_tel_data_add_array_int(d, i);
1511 
1512 	return 0;
1513 }
1514 
1515 static int
1516 handle_queue_list(const char *cmd __rte_unused,
1517 		  const char *params,
1518 		  struct rte_tel_data *d)
1519 {
1520 	int i;
1521 	uint8_t dev_id;
1522 	struct rte_eventdev *dev;
1523 	char *end_param;
1524 
1525 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1526 		return -1;
1527 
1528 	dev_id = strtoul(params, &end_param, 10);
1529 	if (*end_param != '\0')
1530 		RTE_EDEV_LOG_DEBUG(
1531 			"Extra parameters passed to eventdev telemetry command, ignoring");
1532 
1533 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1534 	dev = &rte_eventdevs[dev_id];
1535 
1536 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1537 	for (i = 0; i < dev->data->nb_queues; i++)
1538 		rte_tel_data_add_array_int(d, i);
1539 
1540 	return 0;
1541 }
1542 
1543 static int
1544 handle_queue_links(const char *cmd __rte_unused,
1545 		   const char *params,
1546 		   struct rte_tel_data *d)
1547 {
1548 	int i, ret, port_id = 0;
1549 	char *end_param;
1550 	uint8_t dev_id;
1551 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1552 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1553 	const char *p_param;
1554 
1555 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1556 		return -1;
1557 
1558 	/* Get dev ID from parameter string */
1559 	dev_id = strtoul(params, &end_param, 10);
1560 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1561 
1562 	p_param = strtok(end_param, ",");
1563 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1564 		return -1;
1565 
1566 	port_id = strtoul(p_param, &end_param, 10);
1567 	p_param = strtok(NULL, "\0");
1568 	if (p_param != NULL)
1569 		RTE_EDEV_LOG_DEBUG(
1570 			"Extra parameters passed to eventdev telemetry command, ignoring");
1571 
1572 	ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1573 	if (ret < 0)
1574 		return -1;
1575 
1576 	rte_tel_data_start_dict(d);
1577 	for (i = 0; i < ret; i++) {
1578 		char qid_name[32];
1579 
1580 		snprintf(qid_name, 31, "qid_%u", queues[i]);
1581 		rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
1582 	}
1583 
1584 	return 0;
1585 }
1586 
1587 static int
1588 eventdev_build_telemetry_data(int dev_id,
1589 			      enum rte_event_dev_xstats_mode mode,
1590 			      int port_queue_id,
1591 			      struct rte_tel_data *d)
1592 {
1593 	struct rte_event_dev_xstats_name *xstat_names;
1594 	unsigned int *ids;
1595 	uint64_t *values;
1596 	int i, ret, num_xstats;
1597 
1598 	num_xstats = rte_event_dev_xstats_names_get(dev_id,
1599 						    mode,
1600 						    port_queue_id,
1601 						    NULL,
1602 						    NULL,
1603 						    0);
1604 
1605 	if (num_xstats < 0)
1606 		return -1;
1607 
1608 	/* use one malloc for names */
1609 	xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1610 			     * num_xstats);
1611 	if (xstat_names == NULL)
1612 		return -1;
1613 
1614 	ids = malloc((sizeof(unsigned int)) * num_xstats);
1615 	if (ids == NULL) {
1616 		free(xstat_names);
1617 		return -1;
1618 	}
1619 
1620 	values = malloc((sizeof(uint64_t)) * num_xstats);
1621 	if (values == NULL) {
1622 		free(xstat_names);
1623 		free(ids);
1624 		return -1;
1625 	}
1626 
1627 	ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1628 					     xstat_names, ids, num_xstats);
1629 	if (ret < 0 || ret > num_xstats) {
1630 		free(xstat_names);
1631 		free(ids);
1632 		free(values);
1633 		return -1;
1634 	}
1635 
1636 	ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1637 				       ids, values, num_xstats);
1638 	if (ret < 0 || ret > num_xstats) {
1639 		free(xstat_names);
1640 		free(ids);
1641 		free(values);
1642 		return -1;
1643 	}
1644 
1645 	rte_tel_data_start_dict(d);
1646 	for (i = 0; i < num_xstats; i++)
1647 		rte_tel_data_add_dict_u64(d, xstat_names[i].name,
1648 					  values[i]);
1649 
1650 	free(xstat_names);
1651 	free(ids);
1652 	free(values);
1653 	return 0;
1654 }
1655 
1656 static int
1657 handle_dev_xstats(const char *cmd __rte_unused,
1658 		  const char *params,
1659 		  struct rte_tel_data *d)
1660 {
1661 	int dev_id;
1662 	enum rte_event_dev_xstats_mode mode;
1663 	char *end_param;
1664 
1665 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1666 		return -1;
1667 
1668 	/* Get dev ID from parameter string */
1669 	dev_id = strtoul(params, &end_param, 10);
1670 	if (*end_param != '\0')
1671 		RTE_EDEV_LOG_DEBUG(
1672 			"Extra parameters passed to eventdev telemetry command, ignoring");
1673 
1674 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1675 
1676 	mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1677 	return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1678 }
1679 
1680 static int
1681 handle_port_xstats(const char *cmd __rte_unused,
1682 		   const char *params,
1683 		   struct rte_tel_data *d)
1684 {
1685 	int dev_id;
1686 	int port_queue_id = 0;
1687 	enum rte_event_dev_xstats_mode mode;
1688 	char *end_param;
1689 	const char *p_param;
1690 
1691 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1692 		return -1;
1693 
1694 	/* Get dev ID from parameter string */
1695 	dev_id = strtoul(params, &end_param, 10);
1696 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1697 
1698 	p_param = strtok(end_param, ",");
1699 	mode = RTE_EVENT_DEV_XSTATS_PORT;
1700 
1701 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1702 		return -1;
1703 
1704 	port_queue_id = strtoul(p_param, &end_param, 10);
1705 
1706 	p_param = strtok(NULL, "\0");
1707 	if (p_param != NULL)
1708 		RTE_EDEV_LOG_DEBUG(
1709 			"Extra parameters passed to eventdev telemetry command, ignoring");
1710 
1711 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1712 }
1713 
1714 static int
1715 handle_queue_xstats(const char *cmd __rte_unused,
1716 		    const char *params,
1717 		    struct rte_tel_data *d)
1718 {
1719 	int dev_id;
1720 	int port_queue_id = 0;
1721 	enum rte_event_dev_xstats_mode mode;
1722 	char *end_param;
1723 	const char *p_param;
1724 
1725 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1726 		return -1;
1727 
1728 	/* Get dev ID from parameter string */
1729 	dev_id = strtoul(params, &end_param, 10);
1730 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1731 
1732 	p_param = strtok(end_param, ",");
1733 	mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1734 
1735 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1736 		return -1;
1737 
1738 	port_queue_id = strtoul(p_param, &end_param, 10);
1739 
1740 	p_param = strtok(NULL, "\0");
1741 	if (p_param != NULL)
1742 		RTE_EDEV_LOG_DEBUG(
1743 			"Extra parameters passed to eventdev telemetry command, ignoring");
1744 
1745 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1746 }
1747 
1748 RTE_INIT(eventdev_init_telemetry)
1749 {
1750 	rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1751 			"Returns list of available eventdevs. Takes no parameters");
1752 	rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1753 			"Returns list of available ports. Parameter: DevID");
1754 	rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1755 			"Returns list of available queues. Parameter: DevID");
1756 
1757 	rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1758 			"Returns stats for an eventdev. Parameter: DevID");
1759 	rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1760 			"Returns stats for an eventdev port. Params: DevID,PortID");
1761 	rte_telemetry_register_cmd("/eventdev/queue_xstats",
1762 			handle_queue_xstats,
1763 			"Returns stats for an eventdev queue. Params: DevID,QueueID");
1764 	rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1765 			"Returns links for an eventdev port. Params: DevID,QueueID");
1766 }
1767