1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15 
16 #include <rte_string_fns.h>
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_eal.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_errno.h>
32 #include <rte_ethdev.h>
33 #include <rte_cryptodev.h>
34 #include <rte_cryptodev_pmd.h>
35 
36 #include "rte_eventdev.h"
37 #include "rte_eventdev_pmd.h"
38 
39 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
40 
41 struct rte_eventdev *rte_eventdevs = rte_event_devices;
42 
43 static struct rte_eventdev_global eventdev_globals = {
44 	.nb_devs		= 0
45 };
46 
47 /* Event dev north bound API implementation */
48 
49 uint8_t
50 rte_event_dev_count(void)
51 {
52 	return eventdev_globals.nb_devs;
53 }
54 
55 int
56 rte_event_dev_get_dev_id(const char *name)
57 {
58 	int i;
59 	uint8_t cmp;
60 
61 	if (!name)
62 		return -EINVAL;
63 
64 	for (i = 0; i < eventdev_globals.nb_devs; i++) {
65 		cmp = (strncmp(rte_event_devices[i].data->name, name,
66 				RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
67 			(rte_event_devices[i].dev ? (strncmp(
68 				rte_event_devices[i].dev->driver->name, name,
69 					 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
70 		if (cmp && (rte_event_devices[i].attached ==
71 					RTE_EVENTDEV_ATTACHED))
72 			return i;
73 	}
74 	return -ENODEV;
75 }
76 
77 int
78 rte_event_dev_socket_id(uint8_t dev_id)
79 {
80 	struct rte_eventdev *dev;
81 
82 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
83 	dev = &rte_eventdevs[dev_id];
84 
85 	return dev->data->socket_id;
86 }
87 
88 int
89 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
90 {
91 	struct rte_eventdev *dev;
92 
93 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
94 	dev = &rte_eventdevs[dev_id];
95 
96 	if (dev_info == NULL)
97 		return -EINVAL;
98 
99 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
100 
101 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
102 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
103 
104 	dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
105 
106 	dev_info->dev = dev->dev;
107 	return 0;
108 }
109 
110 int
111 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
112 				uint32_t *caps)
113 {
114 	struct rte_eventdev *dev;
115 
116 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
117 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
118 
119 	dev = &rte_eventdevs[dev_id];
120 
121 	if (caps == NULL)
122 		return -EINVAL;
123 	*caps = 0;
124 
125 	return dev->dev_ops->eth_rx_adapter_caps_get ?
126 				(*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
127 						&rte_eth_devices[eth_port_id],
128 						caps)
129 				: 0;
130 }
131 
132 int
133 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
134 {
135 	struct rte_eventdev *dev;
136 	const struct rte_event_timer_adapter_ops *ops;
137 
138 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
139 
140 	dev = &rte_eventdevs[dev_id];
141 
142 	if (caps == NULL)
143 		return -EINVAL;
144 	*caps = 0;
145 
146 	return dev->dev_ops->timer_adapter_caps_get ?
147 				(*dev->dev_ops->timer_adapter_caps_get)(dev,
148 									0,
149 									caps,
150 									&ops)
151 				: 0;
152 }
153 
154 int
155 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
156 				  uint32_t *caps)
157 {
158 	struct rte_eventdev *dev;
159 	struct rte_cryptodev *cdev;
160 
161 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
162 	if (!rte_cryptodev_pmd_is_valid_dev(cdev_id))
163 		return -EINVAL;
164 
165 	dev = &rte_eventdevs[dev_id];
166 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
167 
168 	if (caps == NULL)
169 		return -EINVAL;
170 	*caps = 0;
171 
172 	return dev->dev_ops->crypto_adapter_caps_get ?
173 		(*dev->dev_ops->crypto_adapter_caps_get)
174 		(dev, cdev, caps) : -ENOTSUP;
175 }
176 
177 int
178 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
179 				uint32_t *caps)
180 {
181 	struct rte_eventdev *dev;
182 	struct rte_eth_dev *eth_dev;
183 
184 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
185 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
186 
187 	dev = &rte_eventdevs[dev_id];
188 	eth_dev = &rte_eth_devices[eth_port_id];
189 
190 	if (caps == NULL)
191 		return -EINVAL;
192 
193 	*caps = 0;
194 
195 	return dev->dev_ops->eth_tx_adapter_caps_get ?
196 			(*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
197 								eth_dev,
198 								caps)
199 			: 0;
200 }
201 
202 static inline int
203 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
204 {
205 	uint8_t old_nb_queues = dev->data->nb_queues;
206 	struct rte_event_queue_conf *queues_cfg;
207 	unsigned int i;
208 
209 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
210 			 dev->data->dev_id);
211 
212 	/* First time configuration */
213 	if (dev->data->queues_cfg == NULL && nb_queues != 0) {
214 		/* Allocate memory to store queue configuration */
215 		dev->data->queues_cfg = rte_zmalloc_socket(
216 				"eventdev->data->queues_cfg",
217 				sizeof(dev->data->queues_cfg[0]) * nb_queues,
218 				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
219 		if (dev->data->queues_cfg == NULL) {
220 			dev->data->nb_queues = 0;
221 			RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
222 					"nb_queues %u", nb_queues);
223 			return -(ENOMEM);
224 		}
225 	/* Re-configure */
226 	} else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
227 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
228 
229 		for (i = nb_queues; i < old_nb_queues; i++)
230 			(*dev->dev_ops->queue_release)(dev, i);
231 
232 		/* Re allocate memory to store queue configuration */
233 		queues_cfg = dev->data->queues_cfg;
234 		queues_cfg = rte_realloc(queues_cfg,
235 				sizeof(queues_cfg[0]) * nb_queues,
236 				RTE_CACHE_LINE_SIZE);
237 		if (queues_cfg == NULL) {
238 			RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
239 						" nb_queues %u", nb_queues);
240 			return -(ENOMEM);
241 		}
242 		dev->data->queues_cfg = queues_cfg;
243 
244 		if (nb_queues > old_nb_queues) {
245 			uint8_t new_qs = nb_queues - old_nb_queues;
246 
247 			memset(queues_cfg + old_nb_queues, 0,
248 				sizeof(queues_cfg[0]) * new_qs);
249 		}
250 	} else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
251 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
252 
253 		for (i = nb_queues; i < old_nb_queues; i++)
254 			(*dev->dev_ops->queue_release)(dev, i);
255 	}
256 
257 	dev->data->nb_queues = nb_queues;
258 	return 0;
259 }
260 
261 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
262 
263 static inline int
264 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
265 {
266 	uint8_t old_nb_ports = dev->data->nb_ports;
267 	void **ports;
268 	uint16_t *links_map;
269 	struct rte_event_port_conf *ports_cfg;
270 	unsigned int i;
271 
272 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
273 			 dev->data->dev_id);
274 
275 	/* First time configuration */
276 	if (dev->data->ports == NULL && nb_ports != 0) {
277 		dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
278 				sizeof(dev->data->ports[0]) * nb_ports,
279 				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
280 		if (dev->data->ports == NULL) {
281 			dev->data->nb_ports = 0;
282 			RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
283 					"nb_ports %u", nb_ports);
284 			return -(ENOMEM);
285 		}
286 
287 		/* Allocate memory to store port configurations */
288 		dev->data->ports_cfg =
289 			rte_zmalloc_socket("eventdev->ports_cfg",
290 			sizeof(dev->data->ports_cfg[0]) * nb_ports,
291 			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
292 		if (dev->data->ports_cfg == NULL) {
293 			dev->data->nb_ports = 0;
294 			RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
295 					"nb_ports %u", nb_ports);
296 			return -(ENOMEM);
297 		}
298 
299 		/* Allocate memory to store queue to port link connection */
300 		dev->data->links_map =
301 			rte_zmalloc_socket("eventdev->links_map",
302 			sizeof(dev->data->links_map[0]) * nb_ports *
303 			RTE_EVENT_MAX_QUEUES_PER_DEV,
304 			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
305 		if (dev->data->links_map == NULL) {
306 			dev->data->nb_ports = 0;
307 			RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
308 					"nb_ports %u", nb_ports);
309 			return -(ENOMEM);
310 		}
311 		for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
312 			dev->data->links_map[i] =
313 				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
314 	} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
315 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
316 
317 		ports = dev->data->ports;
318 		ports_cfg = dev->data->ports_cfg;
319 		links_map = dev->data->links_map;
320 
321 		for (i = nb_ports; i < old_nb_ports; i++)
322 			(*dev->dev_ops->port_release)(ports[i]);
323 
324 		/* Realloc memory for ports */
325 		ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
326 				RTE_CACHE_LINE_SIZE);
327 		if (ports == NULL) {
328 			RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
329 						" nb_ports %u", nb_ports);
330 			return -(ENOMEM);
331 		}
332 
333 		/* Realloc memory for ports_cfg */
334 		ports_cfg = rte_realloc(ports_cfg,
335 			sizeof(ports_cfg[0]) * nb_ports,
336 			RTE_CACHE_LINE_SIZE);
337 		if (ports_cfg == NULL) {
338 			RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
339 						" nb_ports %u", nb_ports);
340 			return -(ENOMEM);
341 		}
342 
343 		/* Realloc memory to store queue to port link connection */
344 		links_map = rte_realloc(links_map,
345 			sizeof(dev->data->links_map[0]) * nb_ports *
346 			RTE_EVENT_MAX_QUEUES_PER_DEV,
347 			RTE_CACHE_LINE_SIZE);
348 		if (links_map == NULL) {
349 			dev->data->nb_ports = 0;
350 			RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
351 					"nb_ports %u", nb_ports);
352 			return -(ENOMEM);
353 		}
354 
355 		if (nb_ports > old_nb_ports) {
356 			uint8_t new_ps = nb_ports - old_nb_ports;
357 			unsigned int old_links_map_end =
358 				old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
359 			unsigned int links_map_end =
360 				nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
361 
362 			memset(ports + old_nb_ports, 0,
363 				sizeof(ports[0]) * new_ps);
364 			memset(ports_cfg + old_nb_ports, 0,
365 				sizeof(ports_cfg[0]) * new_ps);
366 			for (i = old_links_map_end; i < links_map_end; i++)
367 				links_map[i] =
368 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
369 		}
370 
371 		dev->data->ports = ports;
372 		dev->data->ports_cfg = ports_cfg;
373 		dev->data->links_map = links_map;
374 	} else if (dev->data->ports != NULL && nb_ports == 0) {
375 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
376 
377 		ports = dev->data->ports;
378 		for (i = nb_ports; i < old_nb_ports; i++)
379 			(*dev->dev_ops->port_release)(ports[i]);
380 	}
381 
382 	dev->data->nb_ports = nb_ports;
383 	return 0;
384 }
385 
386 int
387 rte_event_dev_configure(uint8_t dev_id,
388 			const struct rte_event_dev_config *dev_conf)
389 {
390 	struct rte_eventdev *dev;
391 	struct rte_event_dev_info info;
392 	int diag;
393 
394 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
395 	dev = &rte_eventdevs[dev_id];
396 
397 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
398 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
399 
400 	if (dev->data->dev_started) {
401 		RTE_EDEV_LOG_ERR(
402 		    "device %d must be stopped to allow configuration", dev_id);
403 		return -EBUSY;
404 	}
405 
406 	if (dev_conf == NULL)
407 		return -EINVAL;
408 
409 	(*dev->dev_ops->dev_infos_get)(dev, &info);
410 
411 	/* Check dequeue_timeout_ns value is in limit */
412 	if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
413 		if (dev_conf->dequeue_timeout_ns &&
414 		    (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
415 			|| dev_conf->dequeue_timeout_ns >
416 				 info.max_dequeue_timeout_ns)) {
417 			RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
418 			" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
419 			dev_id, dev_conf->dequeue_timeout_ns,
420 			info.min_dequeue_timeout_ns,
421 			info.max_dequeue_timeout_ns);
422 			return -EINVAL;
423 		}
424 	}
425 
426 	/* Check nb_events_limit is in limit */
427 	if (dev_conf->nb_events_limit > info.max_num_events) {
428 		RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
429 		dev_id, dev_conf->nb_events_limit, info.max_num_events);
430 		return -EINVAL;
431 	}
432 
433 	/* Check nb_event_queues is in limit */
434 	if (!dev_conf->nb_event_queues) {
435 		RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
436 					dev_id);
437 		return -EINVAL;
438 	}
439 	if (dev_conf->nb_event_queues > info.max_event_queues) {
440 		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
441 		dev_id, dev_conf->nb_event_queues, info.max_event_queues);
442 		return -EINVAL;
443 	}
444 
445 	/* Check nb_event_ports is in limit */
446 	if (!dev_conf->nb_event_ports) {
447 		RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
448 		return -EINVAL;
449 	}
450 	if (dev_conf->nb_event_ports > info.max_event_ports) {
451 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
452 		dev_id, dev_conf->nb_event_ports, info.max_event_ports);
453 		return -EINVAL;
454 	}
455 
456 	/* Check nb_event_queue_flows is in limit */
457 	if (!dev_conf->nb_event_queue_flows) {
458 		RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
459 		return -EINVAL;
460 	}
461 	if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
462 		RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
463 		dev_id, dev_conf->nb_event_queue_flows,
464 		info.max_event_queue_flows);
465 		return -EINVAL;
466 	}
467 
468 	/* Check nb_event_port_dequeue_depth is in limit */
469 	if (!dev_conf->nb_event_port_dequeue_depth) {
470 		RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
471 					dev_id);
472 		return -EINVAL;
473 	}
474 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
475 		 (dev_conf->nb_event_port_dequeue_depth >
476 			 info.max_event_port_dequeue_depth)) {
477 		RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
478 		dev_id, dev_conf->nb_event_port_dequeue_depth,
479 		info.max_event_port_dequeue_depth);
480 		return -EINVAL;
481 	}
482 
483 	/* Check nb_event_port_enqueue_depth is in limit */
484 	if (!dev_conf->nb_event_port_enqueue_depth) {
485 		RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
486 					dev_id);
487 		return -EINVAL;
488 	}
489 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
490 		(dev_conf->nb_event_port_enqueue_depth >
491 			 info.max_event_port_enqueue_depth)) {
492 		RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
493 		dev_id, dev_conf->nb_event_port_enqueue_depth,
494 		info.max_event_port_enqueue_depth);
495 		return -EINVAL;
496 	}
497 
498 	/* Copy the dev_conf parameter into the dev structure */
499 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
500 
501 	/* Setup new number of queues and reconfigure device. */
502 	diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
503 	if (diag != 0) {
504 		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
505 				dev_id, diag);
506 		return diag;
507 	}
508 
509 	/* Setup new number of ports and reconfigure device. */
510 	diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
511 	if (diag != 0) {
512 		rte_event_dev_queue_config(dev, 0);
513 		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
514 				dev_id, diag);
515 		return diag;
516 	}
517 
518 	/* Configure the device */
519 	diag = (*dev->dev_ops->dev_configure)(dev);
520 	if (diag != 0) {
521 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
522 		rte_event_dev_queue_config(dev, 0);
523 		rte_event_dev_port_config(dev, 0);
524 	}
525 
526 	dev->data->event_dev_cap = info.event_dev_cap;
527 	return diag;
528 }
529 
530 static inline int
531 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
532 {
533 	if (queue_id < dev->data->nb_queues && queue_id <
534 				RTE_EVENT_MAX_QUEUES_PER_DEV)
535 		return 1;
536 	else
537 		return 0;
538 }
539 
540 int
541 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
542 				 struct rte_event_queue_conf *queue_conf)
543 {
544 	struct rte_eventdev *dev;
545 
546 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
547 	dev = &rte_eventdevs[dev_id];
548 
549 	if (queue_conf == NULL)
550 		return -EINVAL;
551 
552 	if (!is_valid_queue(dev, queue_id)) {
553 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
554 		return -EINVAL;
555 	}
556 
557 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
558 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
559 	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
560 	return 0;
561 }
562 
563 static inline int
564 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
565 {
566 	if (queue_conf &&
567 		!(queue_conf->event_queue_cfg &
568 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
569 		((queue_conf->event_queue_cfg &
570 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
571 		(queue_conf->schedule_type
572 			== RTE_SCHED_TYPE_ATOMIC)
573 		))
574 		return 1;
575 	else
576 		return 0;
577 }
578 
579 static inline int
580 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
581 {
582 	if (queue_conf &&
583 		!(queue_conf->event_queue_cfg &
584 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
585 		((queue_conf->event_queue_cfg &
586 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
587 		(queue_conf->schedule_type
588 			== RTE_SCHED_TYPE_ORDERED)
589 		))
590 		return 1;
591 	else
592 		return 0;
593 }
594 
595 
596 int
597 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
598 		      const struct rte_event_queue_conf *queue_conf)
599 {
600 	struct rte_eventdev *dev;
601 	struct rte_event_queue_conf def_conf;
602 
603 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
604 	dev = &rte_eventdevs[dev_id];
605 
606 	if (!is_valid_queue(dev, queue_id)) {
607 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
608 		return -EINVAL;
609 	}
610 
611 	/* Check nb_atomic_flows limit */
612 	if (is_valid_atomic_queue_conf(queue_conf)) {
613 		if (queue_conf->nb_atomic_flows == 0 ||
614 		    queue_conf->nb_atomic_flows >
615 			dev->data->dev_conf.nb_event_queue_flows) {
616 			RTE_EDEV_LOG_ERR(
617 		"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
618 			dev_id, queue_id, queue_conf->nb_atomic_flows,
619 			dev->data->dev_conf.nb_event_queue_flows);
620 			return -EINVAL;
621 		}
622 	}
623 
624 	/* Check nb_atomic_order_sequences limit */
625 	if (is_valid_ordered_queue_conf(queue_conf)) {
626 		if (queue_conf->nb_atomic_order_sequences == 0 ||
627 		    queue_conf->nb_atomic_order_sequences >
628 			dev->data->dev_conf.nb_event_queue_flows) {
629 			RTE_EDEV_LOG_ERR(
630 		"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
631 			dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
632 			dev->data->dev_conf.nb_event_queue_flows);
633 			return -EINVAL;
634 		}
635 	}
636 
637 	if (dev->data->dev_started) {
638 		RTE_EDEV_LOG_ERR(
639 		    "device %d must be stopped to allow queue setup", dev_id);
640 		return -EBUSY;
641 	}
642 
643 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
644 
645 	if (queue_conf == NULL) {
646 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
647 					-ENOTSUP);
648 		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
649 		queue_conf = &def_conf;
650 	}
651 
652 	dev->data->queues_cfg[queue_id] = *queue_conf;
653 	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
654 }
655 
656 static inline int
657 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
658 {
659 	if (port_id < dev->data->nb_ports)
660 		return 1;
661 	else
662 		return 0;
663 }
664 
665 int
666 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
667 				 struct rte_event_port_conf *port_conf)
668 {
669 	struct rte_eventdev *dev;
670 
671 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
672 	dev = &rte_eventdevs[dev_id];
673 
674 	if (port_conf == NULL)
675 		return -EINVAL;
676 
677 	if (!is_valid_port(dev, port_id)) {
678 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
679 		return -EINVAL;
680 	}
681 
682 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
683 	memset(port_conf, 0, sizeof(struct rte_event_port_conf));
684 	(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
685 	return 0;
686 }
687 
688 int
689 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
690 		     const struct rte_event_port_conf *port_conf)
691 {
692 	struct rte_eventdev *dev;
693 	struct rte_event_port_conf def_conf;
694 	int diag;
695 
696 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
697 	dev = &rte_eventdevs[dev_id];
698 
699 	if (!is_valid_port(dev, port_id)) {
700 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
701 		return -EINVAL;
702 	}
703 
704 	/* Check new_event_threshold limit */
705 	if ((port_conf && !port_conf->new_event_threshold) ||
706 			(port_conf && port_conf->new_event_threshold >
707 				 dev->data->dev_conf.nb_events_limit)) {
708 		RTE_EDEV_LOG_ERR(
709 		   "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
710 			dev_id, port_id, port_conf->new_event_threshold,
711 			dev->data->dev_conf.nb_events_limit);
712 		return -EINVAL;
713 	}
714 
715 	/* Check dequeue_depth limit */
716 	if ((port_conf && !port_conf->dequeue_depth) ||
717 			(port_conf && port_conf->dequeue_depth >
718 		dev->data->dev_conf.nb_event_port_dequeue_depth)) {
719 		RTE_EDEV_LOG_ERR(
720 		   "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
721 			dev_id, port_id, port_conf->dequeue_depth,
722 			dev->data->dev_conf.nb_event_port_dequeue_depth);
723 		return -EINVAL;
724 	}
725 
726 	/* Check enqueue_depth limit */
727 	if ((port_conf && !port_conf->enqueue_depth) ||
728 			(port_conf && port_conf->enqueue_depth >
729 		dev->data->dev_conf.nb_event_port_enqueue_depth)) {
730 		RTE_EDEV_LOG_ERR(
731 		   "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
732 			dev_id, port_id, port_conf->enqueue_depth,
733 			dev->data->dev_conf.nb_event_port_enqueue_depth);
734 		return -EINVAL;
735 	}
736 
737 	if (port_conf && port_conf->disable_implicit_release &&
738 	    !(dev->data->event_dev_cap &
739 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
740 		RTE_EDEV_LOG_ERR(
741 		   "dev%d port%d Implicit release disable not supported",
742 			dev_id, port_id);
743 		return -EINVAL;
744 	}
745 
746 	if (dev->data->dev_started) {
747 		RTE_EDEV_LOG_ERR(
748 		    "device %d must be stopped to allow port setup", dev_id);
749 		return -EBUSY;
750 	}
751 
752 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
753 
754 	if (port_conf == NULL) {
755 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
756 					-ENOTSUP);
757 		(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
758 		port_conf = &def_conf;
759 	}
760 
761 	dev->data->ports_cfg[port_id] = *port_conf;
762 
763 	diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
764 
765 	/* Unlink all the queues from this port(default state after setup) */
766 	if (!diag)
767 		diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
768 
769 	if (diag < 0)
770 		return diag;
771 
772 	return 0;
773 }
774 
775 int
776 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
777 		       uint32_t *attr_value)
778 {
779 	struct rte_eventdev *dev;
780 
781 	if (!attr_value)
782 		return -EINVAL;
783 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
784 	dev = &rte_eventdevs[dev_id];
785 
786 	switch (attr_id) {
787 	case RTE_EVENT_DEV_ATTR_PORT_COUNT:
788 		*attr_value = dev->data->nb_ports;
789 		break;
790 	case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
791 		*attr_value = dev->data->nb_queues;
792 		break;
793 	case RTE_EVENT_DEV_ATTR_STARTED:
794 		*attr_value = dev->data->dev_started;
795 		break;
796 	default:
797 		return -EINVAL;
798 	}
799 
800 	return 0;
801 }
802 
803 int
804 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
805 			uint32_t *attr_value)
806 {
807 	struct rte_eventdev *dev;
808 
809 	if (!attr_value)
810 		return -EINVAL;
811 
812 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
813 	dev = &rte_eventdevs[dev_id];
814 	if (!is_valid_port(dev, port_id)) {
815 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
816 		return -EINVAL;
817 	}
818 
819 	switch (attr_id) {
820 	case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
821 		*attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
822 		break;
823 	case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
824 		*attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
825 		break;
826 	case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
827 		*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
828 		break;
829 	default:
830 		return -EINVAL;
831 	};
832 	return 0;
833 }
834 
835 int
836 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
837 			uint32_t *attr_value)
838 {
839 	struct rte_event_queue_conf *conf;
840 	struct rte_eventdev *dev;
841 
842 	if (!attr_value)
843 		return -EINVAL;
844 
845 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
846 	dev = &rte_eventdevs[dev_id];
847 	if (!is_valid_queue(dev, queue_id)) {
848 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
849 		return -EINVAL;
850 	}
851 
852 	conf = &dev->data->queues_cfg[queue_id];
853 
854 	switch (attr_id) {
855 	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
856 		*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
857 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
858 			*attr_value = conf->priority;
859 		break;
860 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
861 		*attr_value = conf->nb_atomic_flows;
862 		break;
863 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
864 		*attr_value = conf->nb_atomic_order_sequences;
865 		break;
866 	case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
867 		*attr_value = conf->event_queue_cfg;
868 		break;
869 	case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
870 		if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
871 			return -EOVERFLOW;
872 
873 		*attr_value = conf->schedule_type;
874 		break;
875 	default:
876 		return -EINVAL;
877 	};
878 	return 0;
879 }
880 
881 int
882 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
883 		    const uint8_t queues[], const uint8_t priorities[],
884 		    uint16_t nb_links)
885 {
886 	struct rte_eventdev *dev;
887 	uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
888 	uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
889 	uint16_t *links_map;
890 	int i, diag;
891 
892 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
893 	dev = &rte_eventdevs[dev_id];
894 
895 	if (*dev->dev_ops->port_link == NULL) {
896 		RTE_EDEV_LOG_ERR("Function not supported\n");
897 		rte_errno = ENOTSUP;
898 		return 0;
899 	}
900 
901 	if (!is_valid_port(dev, port_id)) {
902 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
903 		rte_errno = EINVAL;
904 		return 0;
905 	}
906 
907 	if (queues == NULL) {
908 		for (i = 0; i < dev->data->nb_queues; i++)
909 			queues_list[i] = i;
910 
911 		queues = queues_list;
912 		nb_links = dev->data->nb_queues;
913 	}
914 
915 	if (priorities == NULL) {
916 		for (i = 0; i < nb_links; i++)
917 			priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
918 
919 		priorities = priorities_list;
920 	}
921 
922 	for (i = 0; i < nb_links; i++)
923 		if (queues[i] >= dev->data->nb_queues) {
924 			rte_errno = EINVAL;
925 			return 0;
926 		}
927 
928 	diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
929 						queues, priorities, nb_links);
930 	if (diag < 0)
931 		return diag;
932 
933 	links_map = dev->data->links_map;
934 	/* Point links_map to this port specific area */
935 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
936 	for (i = 0; i < diag; i++)
937 		links_map[queues[i]] = (uint8_t)priorities[i];
938 
939 	return diag;
940 }
941 
942 int
943 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
944 		      uint8_t queues[], uint16_t nb_unlinks)
945 {
946 	struct rte_eventdev *dev;
947 	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
948 	int i, diag, j;
949 	uint16_t *links_map;
950 
951 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
952 	dev = &rte_eventdevs[dev_id];
953 
954 	if (*dev->dev_ops->port_unlink == NULL) {
955 		RTE_EDEV_LOG_ERR("Function not supported");
956 		rte_errno = ENOTSUP;
957 		return 0;
958 	}
959 
960 	if (!is_valid_port(dev, port_id)) {
961 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
962 		rte_errno = EINVAL;
963 		return 0;
964 	}
965 
966 	links_map = dev->data->links_map;
967 	/* Point links_map to this port specific area */
968 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
969 
970 	if (queues == NULL) {
971 		j = 0;
972 		for (i = 0; i < dev->data->nb_queues; i++) {
973 			if (links_map[i] !=
974 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
975 				all_queues[j] = i;
976 				j++;
977 			}
978 		}
979 		queues = all_queues;
980 	} else {
981 		for (j = 0; j < nb_unlinks; j++) {
982 			if (links_map[queues[j]] ==
983 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
984 				break;
985 		}
986 	}
987 
988 	nb_unlinks = j;
989 	for (i = 0; i < nb_unlinks; i++)
990 		if (queues[i] >= dev->data->nb_queues) {
991 			rte_errno = EINVAL;
992 			return 0;
993 		}
994 
995 	diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
996 					queues, nb_unlinks);
997 
998 	if (diag < 0)
999 		return diag;
1000 
1001 	for (i = 0; i < diag; i++)
1002 		links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1003 
1004 	return diag;
1005 }
1006 
1007 int
1008 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
1009 {
1010 	struct rte_eventdev *dev;
1011 
1012 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1013 	dev = &rte_eventdevs[dev_id];
1014 	if (!is_valid_port(dev, port_id)) {
1015 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1016 		return -EINVAL;
1017 	}
1018 
1019 	/* Return 0 if the PMD does not implement unlinks in progress.
1020 	 * This allows PMDs which handle unlink synchronously to not implement
1021 	 * this function at all.
1022 	 */
1023 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
1024 
1025 	return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1026 			dev->data->ports[port_id]);
1027 }
1028 
1029 int
1030 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1031 			 uint8_t queues[], uint8_t priorities[])
1032 {
1033 	struct rte_eventdev *dev;
1034 	uint16_t *links_map;
1035 	int i, count = 0;
1036 
1037 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1038 	dev = &rte_eventdevs[dev_id];
1039 	if (!is_valid_port(dev, port_id)) {
1040 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1041 		return -EINVAL;
1042 	}
1043 
1044 	links_map = dev->data->links_map;
1045 	/* Point links_map to this port specific area */
1046 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1047 	for (i = 0; i < dev->data->nb_queues; i++) {
1048 		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1049 			queues[count] = i;
1050 			priorities[count] = (uint8_t)links_map[i];
1051 			++count;
1052 		}
1053 	}
1054 	return count;
1055 }
1056 
1057 int
1058 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1059 				 uint64_t *timeout_ticks)
1060 {
1061 	struct rte_eventdev *dev;
1062 
1063 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1064 	dev = &rte_eventdevs[dev_id];
1065 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1066 
1067 	if (timeout_ticks == NULL)
1068 		return -EINVAL;
1069 
1070 	return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1071 }
1072 
1073 int
1074 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1075 {
1076 	struct rte_eventdev *dev;
1077 
1078 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1079 	dev = &rte_eventdevs[dev_id];
1080 
1081 	if (service_id == NULL)
1082 		return -EINVAL;
1083 
1084 	if (dev->data->service_inited)
1085 		*service_id = dev->data->service_id;
1086 
1087 	return dev->data->service_inited ? 0 : -ESRCH;
1088 }
1089 
1090 int
1091 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1092 {
1093 	struct rte_eventdev *dev;
1094 
1095 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1096 	dev = &rte_eventdevs[dev_id];
1097 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1098 
1099 	(*dev->dev_ops->dump)(dev, f);
1100 	return 0;
1101 
1102 }
1103 
1104 static int
1105 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1106 		uint8_t queue_port_id)
1107 {
1108 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1109 	if (dev->dev_ops->xstats_get_names != NULL)
1110 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1111 							queue_port_id,
1112 							NULL, NULL, 0);
1113 	return 0;
1114 }
1115 
1116 int
1117 rte_event_dev_xstats_names_get(uint8_t dev_id,
1118 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1119 		struct rte_event_dev_xstats_name *xstats_names,
1120 		unsigned int *ids, unsigned int size)
1121 {
1122 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1123 	const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1124 							  queue_port_id);
1125 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
1126 			(int)size < cnt_expected_entries)
1127 		return cnt_expected_entries;
1128 
1129 	/* dev_id checked above */
1130 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1131 
1132 	if (dev->dev_ops->xstats_get_names != NULL)
1133 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1134 				queue_port_id, xstats_names, ids, size);
1135 
1136 	return -ENOTSUP;
1137 }
1138 
1139 /* retrieve eventdev extended statistics */
1140 int
1141 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1142 		uint8_t queue_port_id, const unsigned int ids[],
1143 		uint64_t values[], unsigned int n)
1144 {
1145 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1146 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1147 
1148 	/* implemented by the driver */
1149 	if (dev->dev_ops->xstats_get != NULL)
1150 		return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1151 				ids, values, n);
1152 	return -ENOTSUP;
1153 }
1154 
1155 uint64_t
1156 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1157 		unsigned int *id)
1158 {
1159 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1160 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1161 	unsigned int temp = -1;
1162 
1163 	if (id != NULL)
1164 		*id = (unsigned int)-1;
1165 	else
1166 		id = &temp; /* ensure driver never gets a NULL value */
1167 
1168 	/* implemented by driver */
1169 	if (dev->dev_ops->xstats_get_by_name != NULL)
1170 		return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1171 	return -ENOTSUP;
1172 }
1173 
1174 int rte_event_dev_xstats_reset(uint8_t dev_id,
1175 		enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1176 		const uint32_t ids[], uint32_t nb_ids)
1177 {
1178 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1179 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1180 
1181 	if (dev->dev_ops->xstats_reset != NULL)
1182 		return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1183 							ids, nb_ids);
1184 	return -ENOTSUP;
1185 }
1186 
1187 int rte_event_dev_selftest(uint8_t dev_id)
1188 {
1189 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1190 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1191 
1192 	if (dev->dev_ops->dev_selftest != NULL)
1193 		return (*dev->dev_ops->dev_selftest)();
1194 	return -ENOTSUP;
1195 }
1196 
1197 int
1198 rte_event_dev_start(uint8_t dev_id)
1199 {
1200 	struct rte_eventdev *dev;
1201 	int diag;
1202 
1203 	RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1204 
1205 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1206 	dev = &rte_eventdevs[dev_id];
1207 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1208 
1209 	if (dev->data->dev_started != 0) {
1210 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1211 			dev_id);
1212 		return 0;
1213 	}
1214 
1215 	diag = (*dev->dev_ops->dev_start)(dev);
1216 	if (diag == 0)
1217 		dev->data->dev_started = 1;
1218 	else
1219 		return diag;
1220 
1221 	return 0;
1222 }
1223 
1224 int
1225 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1226 		eventdev_stop_flush_t callback, void *userdata)
1227 {
1228 	struct rte_eventdev *dev;
1229 
1230 	RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1231 
1232 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1233 	dev = &rte_eventdevs[dev_id];
1234 
1235 	dev->dev_ops->dev_stop_flush = callback;
1236 	dev->data->dev_stop_flush_arg = userdata;
1237 
1238 	return 0;
1239 }
1240 
1241 void
1242 rte_event_dev_stop(uint8_t dev_id)
1243 {
1244 	struct rte_eventdev *dev;
1245 
1246 	RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1247 
1248 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1249 	dev = &rte_eventdevs[dev_id];
1250 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1251 
1252 	if (dev->data->dev_started == 0) {
1253 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1254 			dev_id);
1255 		return;
1256 	}
1257 
1258 	dev->data->dev_started = 0;
1259 	(*dev->dev_ops->dev_stop)(dev);
1260 }
1261 
1262 int
1263 rte_event_dev_close(uint8_t dev_id)
1264 {
1265 	struct rte_eventdev *dev;
1266 
1267 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1268 	dev = &rte_eventdevs[dev_id];
1269 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1270 
1271 	/* Device must be stopped before it can be closed */
1272 	if (dev->data->dev_started == 1) {
1273 		RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1274 				dev_id);
1275 		return -EBUSY;
1276 	}
1277 
1278 	return (*dev->dev_ops->dev_close)(dev);
1279 }
1280 
1281 static inline int
1282 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1283 		int socket_id)
1284 {
1285 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1286 	const struct rte_memzone *mz;
1287 	int n;
1288 
1289 	/* Generate memzone name */
1290 	n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1291 	if (n >= (int)sizeof(mz_name))
1292 		return -EINVAL;
1293 
1294 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1295 		mz = rte_memzone_reserve(mz_name,
1296 				sizeof(struct rte_eventdev_data),
1297 				socket_id, 0);
1298 	} else
1299 		mz = rte_memzone_lookup(mz_name);
1300 
1301 	if (mz == NULL)
1302 		return -ENOMEM;
1303 
1304 	*data = mz->addr;
1305 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1306 		memset(*data, 0, sizeof(struct rte_eventdev_data));
1307 
1308 	return 0;
1309 }
1310 
1311 static inline uint8_t
1312 rte_eventdev_find_free_device_index(void)
1313 {
1314 	uint8_t dev_id;
1315 
1316 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1317 		if (rte_eventdevs[dev_id].attached ==
1318 				RTE_EVENTDEV_DETACHED)
1319 			return dev_id;
1320 	}
1321 	return RTE_EVENT_MAX_DEVS;
1322 }
1323 
1324 static uint16_t
1325 rte_event_tx_adapter_enqueue(__rte_unused void *port,
1326 			__rte_unused struct rte_event ev[],
1327 			__rte_unused uint16_t nb_events)
1328 {
1329 	rte_errno = ENOTSUP;
1330 	return 0;
1331 }
1332 
1333 struct rte_eventdev *
1334 rte_event_pmd_allocate(const char *name, int socket_id)
1335 {
1336 	struct rte_eventdev *eventdev;
1337 	uint8_t dev_id;
1338 
1339 	if (rte_event_pmd_get_named_dev(name) != NULL) {
1340 		RTE_EDEV_LOG_ERR("Event device with name %s already "
1341 				"allocated!", name);
1342 		return NULL;
1343 	}
1344 
1345 	dev_id = rte_eventdev_find_free_device_index();
1346 	if (dev_id == RTE_EVENT_MAX_DEVS) {
1347 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1348 		return NULL;
1349 	}
1350 
1351 	eventdev = &rte_eventdevs[dev_id];
1352 
1353 	eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
1354 	eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
1355 
1356 	if (eventdev->data == NULL) {
1357 		struct rte_eventdev_data *eventdev_data = NULL;
1358 
1359 		int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1360 				socket_id);
1361 
1362 		if (retval < 0 || eventdev_data == NULL)
1363 			return NULL;
1364 
1365 		eventdev->data = eventdev_data;
1366 
1367 		strlcpy(eventdev->data->name, name, RTE_EVENTDEV_NAME_MAX_LEN);
1368 
1369 		eventdev->data->dev_id = dev_id;
1370 		eventdev->data->socket_id = socket_id;
1371 		eventdev->data->dev_started = 0;
1372 
1373 		eventdev->attached = RTE_EVENTDEV_ATTACHED;
1374 
1375 		eventdev_globals.nb_devs++;
1376 	}
1377 
1378 	return eventdev;
1379 }
1380 
1381 int
1382 rte_event_pmd_release(struct rte_eventdev *eventdev)
1383 {
1384 	int ret;
1385 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1386 	const struct rte_memzone *mz;
1387 
1388 	if (eventdev == NULL)
1389 		return -EINVAL;
1390 
1391 	eventdev->attached = RTE_EVENTDEV_DETACHED;
1392 	eventdev_globals.nb_devs--;
1393 
1394 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1395 		rte_free(eventdev->data->dev_private);
1396 
1397 		/* Generate memzone name */
1398 		ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1399 				eventdev->data->dev_id);
1400 		if (ret >= (int)sizeof(mz_name))
1401 			return -EINVAL;
1402 
1403 		mz = rte_memzone_lookup(mz_name);
1404 		if (mz == NULL)
1405 			return -ENOMEM;
1406 
1407 		ret = rte_memzone_free(mz);
1408 		if (ret)
1409 			return ret;
1410 	}
1411 
1412 	eventdev->data = NULL;
1413 	return 0;
1414 }
1415