xref: /f-stack/dpdk/lib/librte_bbdev/rte_bbdev.c (revision 8d21adc4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdbool.h>
8 
9 #include <rte_compat.h>
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_log.h>
13 #include <rte_debug.h>
14 #include <rte_eal.h>
15 #include <rte_malloc.h>
16 #include <rte_mempool.h>
17 #include <rte_memzone.h>
18 #include <rte_lcore.h>
19 #include <rte_dev.h>
20 #include <rte_spinlock.h>
21 #include <rte_tailq.h>
22 #include <rte_interrupts.h>
23 
24 #include "rte_bbdev_op.h"
25 #include "rte_bbdev.h"
26 #include "rte_bbdev_pmd.h"
27 
28 #define DEV_NAME "BBDEV"
29 
30 
31 /* BBDev library logging ID */
32 static int bbdev_logtype;
33 
34 /* Helper macro for logging */
35 #define rte_bbdev_log(level, fmt, ...) \
36 	rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__)
37 
38 #define rte_bbdev_log_debug(fmt, ...) \
39 	rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
40 		##__VA_ARGS__)
41 
42 /* Helper macro to check dev_id is valid */
43 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
44 	if (dev == NULL) { \
45 		rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
46 		return -ENODEV; \
47 	} \
48 } while (0)
49 
50 /* Helper macro to check dev_ops is valid */
51 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
52 	if (dev->dev_ops == NULL) { \
53 		rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
54 				dev_id); \
55 		return -ENODEV; \
56 	} \
57 } while (0)
58 
59 /* Helper macro to check that driver implements required function pointer */
60 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
61 	if (func == NULL) { \
62 		rte_bbdev_log(ERR, "device %u does not support %s", \
63 				dev_id, #func); \
64 		return -ENOTSUP; \
65 	} \
66 } while (0)
67 
68 /* Helper macro to check that queue is valid */
69 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
70 	if (queue_id >= dev->data->num_queues) { \
71 		rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
72 				queue_id, dev->data->dev_id); \
73 		return -ERANGE; \
74 	} \
75 } while (0)
76 
77 /* List of callback functions registered by an application */
78 struct rte_bbdev_callback {
79 	TAILQ_ENTRY(rte_bbdev_callback) next;  /* Callbacks list */
80 	rte_bbdev_cb_fn cb_fn;  /* Callback address */
81 	void *cb_arg;  /* Parameter for callback */
82 	void *ret_param;  /* Return parameter */
83 	enum rte_bbdev_event_type event; /* Interrupt event type */
84 	uint32_t active; /* Callback is executing */
85 };
86 
87 /* spinlock for bbdev device callbacks */
88 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
89 
90 /*
91  * Global array of all devices. This is not static because it's used by the
92  * inline enqueue and dequeue functions
93  */
94 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
95 
96 /* Global array with rte_bbdev_data structures */
97 static struct rte_bbdev_data *rte_bbdev_data;
98 
99 /* Memzone name for global bbdev data pool */
100 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
101 
102 /* Number of currently valid devices */
103 static uint16_t num_devs;
104 
105 /* Return pointer to device structure, with validity check */
106 static struct rte_bbdev *
107 get_dev(uint16_t dev_id)
108 {
109 	if (rte_bbdev_is_valid(dev_id))
110 		return &rte_bbdev_devices[dev_id];
111 	return NULL;
112 }
113 
114 /* Allocate global data array */
115 static int
116 rte_bbdev_data_alloc(void)
117 {
118 	const unsigned int flags = 0;
119 	const struct rte_memzone *mz;
120 
121 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
122 		mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
123 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
124 				rte_socket_id(), flags);
125 	} else
126 		mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
127 	if (mz == NULL) {
128 		rte_bbdev_log(CRIT,
129 				"Cannot allocate memzone for bbdev port data");
130 		return -ENOMEM;
131 	}
132 
133 	rte_bbdev_data = mz->addr;
134 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
135 		memset(rte_bbdev_data, 0,
136 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
137 	return 0;
138 }
139 
140 /*
141  * Find data alocated for the device or if not found return first unused bbdev
142  * data. If all structures are in use and none is used by the device return
143  * NULL.
144  */
145 static struct rte_bbdev_data *
146 find_bbdev_data(const char *name)
147 {
148 	uint16_t data_id;
149 
150 	for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
151 		if (strlen(rte_bbdev_data[data_id].name) == 0) {
152 			memset(&rte_bbdev_data[data_id], 0,
153 					sizeof(struct rte_bbdev_data));
154 			return &rte_bbdev_data[data_id];
155 		} else if (strncmp(rte_bbdev_data[data_id].name, name,
156 				RTE_BBDEV_NAME_MAX_LEN) == 0)
157 			return &rte_bbdev_data[data_id];
158 	}
159 
160 	return NULL;
161 }
162 
163 /* Find lowest device id with no attached device */
164 static uint16_t
165 find_free_dev_id(void)
166 {
167 	uint16_t i;
168 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
169 		if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
170 			return i;
171 	}
172 	return RTE_BBDEV_MAX_DEVS;
173 }
174 
175 struct rte_bbdev *
176 rte_bbdev_allocate(const char *name)
177 {
178 	int ret;
179 	struct rte_bbdev *bbdev;
180 	uint16_t dev_id;
181 
182 	if (name == NULL) {
183 		rte_bbdev_log(ERR, "Invalid null device name");
184 		return NULL;
185 	}
186 
187 	if (rte_bbdev_get_named_dev(name) != NULL) {
188 		rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
189 		return NULL;
190 	}
191 
192 	dev_id = find_free_dev_id();
193 	if (dev_id == RTE_BBDEV_MAX_DEVS) {
194 		rte_bbdev_log(ERR, "Reached maximum number of devices");
195 		return NULL;
196 	}
197 
198 	bbdev = &rte_bbdev_devices[dev_id];
199 
200 	if (rte_bbdev_data == NULL) {
201 		ret = rte_bbdev_data_alloc();
202 		if (ret != 0)
203 			return NULL;
204 	}
205 
206 	bbdev->data = find_bbdev_data(name);
207 	if (bbdev->data == NULL) {
208 		rte_bbdev_log(ERR,
209 				"Max BBDevs already allocated in multi-process environment!");
210 		return NULL;
211 	}
212 
213 	rte_atomic16_inc(&bbdev->data->process_cnt);
214 	bbdev->data->dev_id = dev_id;
215 	bbdev->state = RTE_BBDEV_INITIALIZED;
216 
217 	ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name);
218 	if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
219 		rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
220 		return NULL;
221 	}
222 
223 	/* init user callbacks */
224 	TAILQ_INIT(&(bbdev->list_cbs));
225 
226 	num_devs++;
227 
228 	rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
229 			name, dev_id, num_devs);
230 
231 	return bbdev;
232 }
233 
234 int
235 rte_bbdev_release(struct rte_bbdev *bbdev)
236 {
237 	uint16_t dev_id;
238 	struct rte_bbdev_callback *cb, *next;
239 
240 	if (bbdev == NULL) {
241 		rte_bbdev_log(ERR, "NULL bbdev");
242 		return -ENODEV;
243 	}
244 	dev_id = bbdev->data->dev_id;
245 
246 	/* free all callbacks from the device's list */
247 	for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
248 
249 		next = TAILQ_NEXT(cb, next);
250 		TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
251 		rte_free(cb);
252 	}
253 
254 	/* clear shared BBDev Data if no process is using the device anymore */
255 	if (rte_atomic16_dec_and_test(&bbdev->data->process_cnt))
256 		memset(bbdev->data, 0, sizeof(*bbdev->data));
257 
258 	memset(bbdev, 0, sizeof(*bbdev));
259 	num_devs--;
260 	bbdev->state = RTE_BBDEV_UNUSED;
261 
262 	rte_bbdev_log_debug(
263 			"Un-initialised device id = %u. Num devices = %u",
264 			dev_id, num_devs);
265 	return 0;
266 }
267 
268 struct rte_bbdev *
269 rte_bbdev_get_named_dev(const char *name)
270 {
271 	unsigned int i;
272 
273 	if (name == NULL) {
274 		rte_bbdev_log(ERR, "NULL driver name");
275 		return NULL;
276 	}
277 
278 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
279 		struct rte_bbdev *dev = get_dev(i);
280 		if (dev && (strncmp(dev->data->name,
281 				name, RTE_BBDEV_NAME_MAX_LEN) == 0))
282 			return dev;
283 	}
284 
285 	return NULL;
286 }
287 
288 uint16_t
289 rte_bbdev_count(void)
290 {
291 	return num_devs;
292 }
293 
294 bool
295 rte_bbdev_is_valid(uint16_t dev_id)
296 {
297 	if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
298 		rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
299 		return true;
300 	return false;
301 }
302 
303 uint16_t
304 rte_bbdev_find_next(uint16_t dev_id)
305 {
306 	dev_id++;
307 	for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
308 		if (rte_bbdev_is_valid(dev_id))
309 			break;
310 	return dev_id;
311 }
312 
313 int
314 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
315 {
316 	unsigned int i;
317 	int ret;
318 	struct rte_bbdev_driver_info dev_info;
319 	struct rte_bbdev *dev = get_dev(dev_id);
320 	VALID_DEV_OR_RET_ERR(dev, dev_id);
321 
322 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
323 
324 	if (dev->data->started) {
325 		rte_bbdev_log(ERR,
326 				"Device %u cannot be configured when started",
327 				dev_id);
328 		return -EBUSY;
329 	}
330 
331 	/* Get device driver information to get max number of queues */
332 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
333 	memset(&dev_info, 0, sizeof(dev_info));
334 	dev->dev_ops->info_get(dev, &dev_info);
335 
336 	if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
337 		rte_bbdev_log(ERR,
338 				"Device %u supports 0 < N <= %u queues, not %u",
339 				dev_id, dev_info.max_num_queues, num_queues);
340 		return -EINVAL;
341 	}
342 
343 	/* If re-configuration, get driver to free existing internal memory */
344 	if (dev->data->queues != NULL) {
345 		VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
346 		for (i = 0; i < dev->data->num_queues; i++) {
347 			int ret = dev->dev_ops->queue_release(dev, i);
348 			if (ret < 0) {
349 				rte_bbdev_log(ERR,
350 						"Device %u queue %u release failed",
351 						dev_id, i);
352 				return ret;
353 			}
354 		}
355 		/* Call optional device close */
356 		if (dev->dev_ops->close) {
357 			ret = dev->dev_ops->close(dev);
358 			if (ret < 0) {
359 				rte_bbdev_log(ERR,
360 						"Device %u couldn't be closed",
361 						dev_id);
362 				return ret;
363 			}
364 		}
365 		rte_free(dev->data->queues);
366 	}
367 
368 	/* Allocate queue pointers */
369 	dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
370 			sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
371 				dev->data->socket_id);
372 	if (dev->data->queues == NULL) {
373 		rte_bbdev_log(ERR,
374 				"calloc of %u queues for device %u on socket %i failed",
375 				num_queues, dev_id, dev->data->socket_id);
376 		return -ENOMEM;
377 	}
378 
379 	dev->data->num_queues = num_queues;
380 
381 	/* Call optional device configuration */
382 	if (dev->dev_ops->setup_queues) {
383 		ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
384 		if (ret < 0) {
385 			rte_bbdev_log(ERR,
386 					"Device %u memory configuration failed",
387 					dev_id);
388 			goto error;
389 		}
390 	}
391 
392 	rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
393 			num_queues);
394 	return 0;
395 
396 error:
397 	dev->data->num_queues = 0;
398 	rte_free(dev->data->queues);
399 	dev->data->queues = NULL;
400 	return ret;
401 }
402 
403 int
404 rte_bbdev_intr_enable(uint16_t dev_id)
405 {
406 	int ret;
407 	struct rte_bbdev *dev = get_dev(dev_id);
408 	VALID_DEV_OR_RET_ERR(dev, dev_id);
409 
410 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
411 
412 	if (dev->data->started) {
413 		rte_bbdev_log(ERR,
414 				"Device %u cannot be configured when started",
415 				dev_id);
416 		return -EBUSY;
417 	}
418 
419 	if (dev->dev_ops->intr_enable) {
420 		ret = dev->dev_ops->intr_enable(dev);
421 		if (ret < 0) {
422 			rte_bbdev_log(ERR,
423 					"Device %u interrupts configuration failed",
424 					dev_id);
425 			return ret;
426 		}
427 		rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
428 		return 0;
429 	}
430 
431 	rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
432 	return -ENOTSUP;
433 }
434 
435 int
436 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
437 		const struct rte_bbdev_queue_conf *conf)
438 {
439 	int ret = 0;
440 	struct rte_bbdev_driver_info dev_info;
441 	struct rte_bbdev *dev = get_dev(dev_id);
442 	const struct rte_bbdev_op_cap *p;
443 	struct rte_bbdev_queue_conf *stored_conf;
444 	const char *op_type_str;
445 	VALID_DEV_OR_RET_ERR(dev, dev_id);
446 
447 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
448 
449 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
450 
451 	if (dev->data->queues[queue_id].started || dev->data->started) {
452 		rte_bbdev_log(ERR,
453 				"Queue %u of device %u cannot be configured when started",
454 				queue_id, dev_id);
455 		return -EBUSY;
456 	}
457 
458 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
459 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
460 
461 	/* Get device driver information to verify config is valid */
462 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
463 	memset(&dev_info, 0, sizeof(dev_info));
464 	dev->dev_ops->info_get(dev, &dev_info);
465 
466 	/* Check configuration is valid */
467 	if (conf != NULL) {
468 		if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
469 				(dev_info.capabilities[0].type ==
470 				RTE_BBDEV_OP_NONE)) {
471 			ret = 1;
472 		} else {
473 			for (p = dev_info.capabilities;
474 					p->type != RTE_BBDEV_OP_NONE; p++) {
475 				if (conf->op_type == p->type) {
476 					ret = 1;
477 					break;
478 				}
479 			}
480 		}
481 		if (ret == 0) {
482 			rte_bbdev_log(ERR, "Invalid operation type");
483 			return -EINVAL;
484 		}
485 		if (conf->queue_size > dev_info.queue_size_lim) {
486 			rte_bbdev_log(ERR,
487 					"Size (%u) of queue %u of device %u must be: <= %u",
488 					conf->queue_size, queue_id, dev_id,
489 					dev_info.queue_size_lim);
490 			return -EINVAL;
491 		}
492 		if (!rte_is_power_of_2(conf->queue_size)) {
493 			rte_bbdev_log(ERR,
494 					"Size (%u) of queue %u of device %u must be a power of 2",
495 					conf->queue_size, queue_id, dev_id);
496 			return -EINVAL;
497 		}
498 		if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC &&
499 			conf->priority > dev_info.max_ul_queue_priority) {
500 			rte_bbdev_log(ERR,
501 					"Priority (%u) of queue %u of bbdev %u must be <= %u",
502 					conf->priority, queue_id, dev_id,
503 					dev_info.max_ul_queue_priority);
504 			return -EINVAL;
505 		}
506 		if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC &&
507 			conf->priority > dev_info.max_dl_queue_priority) {
508 			rte_bbdev_log(ERR,
509 					"Priority (%u) of queue %u of bbdev %u must be <= %u",
510 					conf->priority, queue_id, dev_id,
511 					dev_info.max_dl_queue_priority);
512 			return -EINVAL;
513 		}
514 	}
515 
516 	/* Release existing queue (in case of queue reconfiguration) */
517 	if (dev->data->queues[queue_id].queue_private != NULL) {
518 		ret = dev->dev_ops->queue_release(dev, queue_id);
519 		if (ret < 0) {
520 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
521 					dev_id, queue_id);
522 			return ret;
523 		}
524 	}
525 
526 	/* Get driver to setup the queue */
527 	ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
528 			conf : &dev_info.default_queue_conf);
529 	if (ret < 0) {
530 		rte_bbdev_log(ERR,
531 				"Device %u queue %u setup failed", dev_id,
532 				queue_id);
533 		return ret;
534 	}
535 
536 	/* Store configuration */
537 	stored_conf = &dev->data->queues[queue_id].conf;
538 	memcpy(stored_conf,
539 			(conf != NULL) ? conf : &dev_info.default_queue_conf,
540 			sizeof(*stored_conf));
541 
542 	op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
543 	if (op_type_str == NULL)
544 		return -EINVAL;
545 
546 	rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
547 			dev_id, queue_id, stored_conf->queue_size, op_type_str,
548 			stored_conf->priority);
549 
550 	return 0;
551 }
552 
553 int
554 rte_bbdev_start(uint16_t dev_id)
555 {
556 	int i;
557 	struct rte_bbdev *dev = get_dev(dev_id);
558 	VALID_DEV_OR_RET_ERR(dev, dev_id);
559 
560 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
561 
562 	if (dev->data->started) {
563 		rte_bbdev_log_debug("Device %u is already started", dev_id);
564 		return 0;
565 	}
566 
567 	if (dev->dev_ops->start) {
568 		int ret = dev->dev_ops->start(dev);
569 		if (ret < 0) {
570 			rte_bbdev_log(ERR, "Device %u start failed", dev_id);
571 			return ret;
572 		}
573 	}
574 
575 	/* Store new state */
576 	for (i = 0; i < dev->data->num_queues; i++)
577 		if (!dev->data->queues[i].conf.deferred_start)
578 			dev->data->queues[i].started = true;
579 	dev->data->started = true;
580 
581 	rte_bbdev_log_debug("Started device %u", dev_id);
582 	return 0;
583 }
584 
585 int
586 rte_bbdev_stop(uint16_t dev_id)
587 {
588 	struct rte_bbdev *dev = get_dev(dev_id);
589 	VALID_DEV_OR_RET_ERR(dev, dev_id);
590 
591 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
592 
593 	if (!dev->data->started) {
594 		rte_bbdev_log_debug("Device %u is already stopped", dev_id);
595 		return 0;
596 	}
597 
598 	if (dev->dev_ops->stop)
599 		dev->dev_ops->stop(dev);
600 	dev->data->started = false;
601 
602 	rte_bbdev_log_debug("Stopped device %u", dev_id);
603 	return 0;
604 }
605 
606 int
607 rte_bbdev_close(uint16_t dev_id)
608 {
609 	int ret;
610 	uint16_t i;
611 	struct rte_bbdev *dev = get_dev(dev_id);
612 	VALID_DEV_OR_RET_ERR(dev, dev_id);
613 
614 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
615 
616 	if (dev->data->started) {
617 		ret = rte_bbdev_stop(dev_id);
618 		if (ret < 0) {
619 			rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
620 			return ret;
621 		}
622 	}
623 
624 	/* Free memory used by queues */
625 	for (i = 0; i < dev->data->num_queues; i++) {
626 		ret = dev->dev_ops->queue_release(dev, i);
627 		if (ret < 0) {
628 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
629 					dev_id, i);
630 			return ret;
631 		}
632 	}
633 	rte_free(dev->data->queues);
634 
635 	if (dev->dev_ops->close) {
636 		ret = dev->dev_ops->close(dev);
637 		if (ret < 0) {
638 			rte_bbdev_log(ERR, "Device %u close failed", dev_id);
639 			return ret;
640 		}
641 	}
642 
643 	/* Clear configuration */
644 	dev->data->queues = NULL;
645 	dev->data->num_queues = 0;
646 
647 	rte_bbdev_log_debug("Closed device %u", dev_id);
648 	return 0;
649 }
650 
651 int
652 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
653 {
654 	struct rte_bbdev *dev = get_dev(dev_id);
655 	VALID_DEV_OR_RET_ERR(dev, dev_id);
656 
657 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
658 
659 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
660 
661 	if (dev->data->queues[queue_id].started) {
662 		rte_bbdev_log_debug("Queue %u of device %u already started",
663 				queue_id, dev_id);
664 		return 0;
665 	}
666 
667 	if (dev->dev_ops->queue_start) {
668 		int ret = dev->dev_ops->queue_start(dev, queue_id);
669 		if (ret < 0) {
670 			rte_bbdev_log(ERR, "Device %u queue %u start failed",
671 					dev_id, queue_id);
672 			return ret;
673 		}
674 	}
675 	dev->data->queues[queue_id].started = true;
676 
677 	rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
678 	return 0;
679 }
680 
681 int
682 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
683 {
684 	struct rte_bbdev *dev = get_dev(dev_id);
685 	VALID_DEV_OR_RET_ERR(dev, dev_id);
686 
687 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
688 
689 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
690 
691 	if (!dev->data->queues[queue_id].started) {
692 		rte_bbdev_log_debug("Queue %u of device %u already stopped",
693 				queue_id, dev_id);
694 		return 0;
695 	}
696 
697 	if (dev->dev_ops->queue_stop) {
698 		int ret = dev->dev_ops->queue_stop(dev, queue_id);
699 		if (ret < 0) {
700 			rte_bbdev_log(ERR, "Device %u queue %u stop failed",
701 					dev_id, queue_id);
702 			return ret;
703 		}
704 	}
705 	dev->data->queues[queue_id].started = false;
706 
707 	rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
708 	return 0;
709 }
710 
711 /* Get device statistics */
712 static void
713 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
714 {
715 	unsigned int q_id;
716 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
717 		struct rte_bbdev_stats *q_stats =
718 				&dev->data->queues[q_id].queue_stats;
719 
720 		stats->enqueued_count += q_stats->enqueued_count;
721 		stats->dequeued_count += q_stats->dequeued_count;
722 		stats->enqueue_err_count += q_stats->enqueue_err_count;
723 		stats->dequeue_err_count += q_stats->dequeue_err_count;
724 	}
725 	rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
726 }
727 
728 static void
729 reset_stats_in_queues(struct rte_bbdev *dev)
730 {
731 	unsigned int q_id;
732 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
733 		struct rte_bbdev_stats *q_stats =
734 				&dev->data->queues[q_id].queue_stats;
735 
736 		memset(q_stats, 0, sizeof(*q_stats));
737 	}
738 	rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
739 }
740 
741 int
742 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
743 {
744 	struct rte_bbdev *dev = get_dev(dev_id);
745 	VALID_DEV_OR_RET_ERR(dev, dev_id);
746 
747 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
748 
749 	if (stats == NULL) {
750 		rte_bbdev_log(ERR, "NULL stats structure");
751 		return -EINVAL;
752 	}
753 
754 	memset(stats, 0, sizeof(*stats));
755 	if (dev->dev_ops->stats_get != NULL)
756 		dev->dev_ops->stats_get(dev, stats);
757 	else
758 		get_stats_from_queues(dev, stats);
759 
760 	rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
761 	return 0;
762 }
763 
764 int
765 rte_bbdev_stats_reset(uint16_t dev_id)
766 {
767 	struct rte_bbdev *dev = get_dev(dev_id);
768 	VALID_DEV_OR_RET_ERR(dev, dev_id);
769 
770 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
771 
772 	if (dev->dev_ops->stats_reset != NULL)
773 		dev->dev_ops->stats_reset(dev);
774 	else
775 		reset_stats_in_queues(dev);
776 
777 	rte_bbdev_log_debug("Reset stats of device %u", dev_id);
778 	return 0;
779 }
780 
781 int
782 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
783 {
784 	struct rte_bbdev *dev = get_dev(dev_id);
785 	VALID_DEV_OR_RET_ERR(dev, dev_id);
786 
787 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
788 
789 	if (dev_info == NULL) {
790 		rte_bbdev_log(ERR, "NULL dev info structure");
791 		return -EINVAL;
792 	}
793 
794 	/* Copy data maintained by device interface layer */
795 	memset(dev_info, 0, sizeof(*dev_info));
796 	dev_info->dev_name = dev->data->name;
797 	dev_info->num_queues = dev->data->num_queues;
798 	dev_info->device = dev->device;
799 	dev_info->socket_id = dev->data->socket_id;
800 	dev_info->started = dev->data->started;
801 
802 	/* Copy data maintained by device driver layer */
803 	dev->dev_ops->info_get(dev, &dev_info->drv);
804 
805 	rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
806 	return 0;
807 }
808 
809 int
810 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
811 		struct rte_bbdev_queue_info *queue_info)
812 {
813 	struct rte_bbdev *dev = get_dev(dev_id);
814 	VALID_DEV_OR_RET_ERR(dev, dev_id);
815 
816 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
817 
818 	if (queue_info == NULL) {
819 		rte_bbdev_log(ERR, "NULL queue info structure");
820 		return -EINVAL;
821 	}
822 
823 	/* Copy data to output */
824 	memset(queue_info, 0, sizeof(*queue_info));
825 	queue_info->conf = dev->data->queues[queue_id].conf;
826 	queue_info->started = dev->data->queues[queue_id].started;
827 
828 	rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
829 			queue_id, dev_id);
830 	return 0;
831 }
832 
833 /* Calculate size needed to store bbdev_op, depending on type */
834 static unsigned int
835 get_bbdev_op_size(enum rte_bbdev_op_type type)
836 {
837 	unsigned int result = 0;
838 	switch (type) {
839 	case RTE_BBDEV_OP_NONE:
840 		result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
841 				sizeof(struct rte_bbdev_enc_op));
842 		break;
843 	case RTE_BBDEV_OP_TURBO_DEC:
844 		result = sizeof(struct rte_bbdev_dec_op);
845 		break;
846 	case RTE_BBDEV_OP_TURBO_ENC:
847 		result = sizeof(struct rte_bbdev_enc_op);
848 		break;
849 	case RTE_BBDEV_OP_LDPC_DEC:
850 		result = sizeof(struct rte_bbdev_dec_op);
851 		break;
852 	case RTE_BBDEV_OP_LDPC_ENC:
853 		result = sizeof(struct rte_bbdev_enc_op);
854 		break;
855 	default:
856 		break;
857 	}
858 
859 	return result;
860 }
861 
862 /* Initialise a bbdev_op structure */
863 static void
864 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
865 		__rte_unused unsigned int n)
866 {
867 	enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
868 
869 	if (type == RTE_BBDEV_OP_TURBO_DEC || type == RTE_BBDEV_OP_LDPC_DEC) {
870 		struct rte_bbdev_dec_op *op = element;
871 		memset(op, 0, mempool->elt_size);
872 		op->mempool = mempool;
873 	} else if (type == RTE_BBDEV_OP_TURBO_ENC ||
874 			type == RTE_BBDEV_OP_LDPC_ENC) {
875 		struct rte_bbdev_enc_op *op = element;
876 		memset(op, 0, mempool->elt_size);
877 		op->mempool = mempool;
878 	}
879 }
880 
881 struct rte_mempool *
882 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
883 		unsigned int num_elements, unsigned int cache_size,
884 		int socket_id)
885 {
886 	struct rte_bbdev_op_pool_private *priv;
887 	struct rte_mempool *mp;
888 	const char *op_type_str;
889 
890 	if (name == NULL) {
891 		rte_bbdev_log(ERR, "NULL name for op pool");
892 		return NULL;
893 	}
894 
895 	if (type >= RTE_BBDEV_OP_TYPE_COUNT) {
896 		rte_bbdev_log(ERR,
897 				"Invalid op type (%u), should be less than %u",
898 				type, RTE_BBDEV_OP_TYPE_COUNT);
899 		return NULL;
900 	}
901 
902 	mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
903 			cache_size, sizeof(struct rte_bbdev_op_pool_private),
904 			NULL, NULL, bbdev_op_init, &type, socket_id, 0);
905 	if (mp == NULL) {
906 		rte_bbdev_log(ERR,
907 				"Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
908 				name, num_elements, get_bbdev_op_size(type),
909 				rte_strerror(rte_errno));
910 		return NULL;
911 	}
912 
913 	op_type_str = rte_bbdev_op_type_str(type);
914 	if (op_type_str == NULL)
915 		return NULL;
916 
917 	rte_bbdev_log_debug(
918 			"Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
919 			name, num_elements, op_type_str, cache_size, socket_id,
920 			get_bbdev_op_size(type));
921 
922 	priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
923 	priv->type = type;
924 
925 	return mp;
926 }
927 
928 int
929 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
930 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
931 {
932 	struct rte_bbdev_callback *user_cb;
933 	struct rte_bbdev *dev = get_dev(dev_id);
934 	VALID_DEV_OR_RET_ERR(dev, dev_id);
935 
936 	if (event >= RTE_BBDEV_EVENT_MAX) {
937 		rte_bbdev_log(ERR,
938 				"Invalid event type (%u), should be less than %u",
939 				event, RTE_BBDEV_EVENT_MAX);
940 		return -EINVAL;
941 	}
942 
943 	if (cb_fn == NULL) {
944 		rte_bbdev_log(ERR, "NULL callback function");
945 		return -EINVAL;
946 	}
947 
948 	rte_spinlock_lock(&rte_bbdev_cb_lock);
949 
950 	TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
951 		if (user_cb->cb_fn == cb_fn &&
952 				user_cb->cb_arg == cb_arg &&
953 				user_cb->event == event)
954 			break;
955 	}
956 
957 	/* create a new callback. */
958 	if (user_cb == NULL) {
959 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
960 				sizeof(struct rte_bbdev_callback), 0);
961 		if (user_cb != NULL) {
962 			user_cb->cb_fn = cb_fn;
963 			user_cb->cb_arg = cb_arg;
964 			user_cb->event = event;
965 			TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
966 		}
967 	}
968 
969 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
970 	return (user_cb == NULL) ? -ENOMEM : 0;
971 }
972 
973 int
974 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
975 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
976 {
977 	int ret = 0;
978 	struct rte_bbdev_callback *cb, *next;
979 	struct rte_bbdev *dev = get_dev(dev_id);
980 	VALID_DEV_OR_RET_ERR(dev, dev_id);
981 
982 	if (event >= RTE_BBDEV_EVENT_MAX) {
983 		rte_bbdev_log(ERR,
984 				"Invalid event type (%u), should be less than %u",
985 				event, RTE_BBDEV_EVENT_MAX);
986 		return -EINVAL;
987 	}
988 
989 	if (cb_fn == NULL) {
990 		rte_bbdev_log(ERR,
991 				"NULL callback function cannot be unregistered");
992 		return -EINVAL;
993 	}
994 
995 	dev = &rte_bbdev_devices[dev_id];
996 	rte_spinlock_lock(&rte_bbdev_cb_lock);
997 
998 	for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
999 
1000 		next = TAILQ_NEXT(cb, next);
1001 
1002 		if (cb->cb_fn != cb_fn || cb->event != event ||
1003 				(cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
1004 			continue;
1005 
1006 		/* If this callback is not executing right now, remove it. */
1007 		if (cb->active == 0) {
1008 			TAILQ_REMOVE(&(dev->list_cbs), cb, next);
1009 			rte_free(cb);
1010 		} else
1011 			ret = -EAGAIN;
1012 	}
1013 
1014 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1015 	return ret;
1016 }
1017 
1018 void
1019 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
1020 	enum rte_bbdev_event_type event, void *ret_param)
1021 {
1022 	struct rte_bbdev_callback *cb_lst;
1023 	struct rte_bbdev_callback dev_cb;
1024 
1025 	if (dev == NULL) {
1026 		rte_bbdev_log(ERR, "NULL device");
1027 		return;
1028 	}
1029 
1030 	if (dev->data == NULL) {
1031 		rte_bbdev_log(ERR, "NULL data structure");
1032 		return;
1033 	}
1034 
1035 	if (event >= RTE_BBDEV_EVENT_MAX) {
1036 		rte_bbdev_log(ERR,
1037 				"Invalid event type (%u), should be less than %u",
1038 				event, RTE_BBDEV_EVENT_MAX);
1039 		return;
1040 	}
1041 
1042 	rte_spinlock_lock(&rte_bbdev_cb_lock);
1043 	TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1044 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1045 			continue;
1046 		dev_cb = *cb_lst;
1047 		cb_lst->active = 1;
1048 		if (ret_param != NULL)
1049 			dev_cb.ret_param = ret_param;
1050 
1051 		rte_spinlock_unlock(&rte_bbdev_cb_lock);
1052 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1053 				dev_cb.cb_arg, dev_cb.ret_param);
1054 		rte_spinlock_lock(&rte_bbdev_cb_lock);
1055 		cb_lst->active = 0;
1056 	}
1057 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1058 }
1059 
1060 int
1061 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
1062 {
1063 	struct rte_bbdev *dev = get_dev(dev_id);
1064 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1065 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1066 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1067 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1068 	return dev->dev_ops->queue_intr_enable(dev, queue_id);
1069 }
1070 
1071 int
1072 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
1073 {
1074 	struct rte_bbdev *dev = get_dev(dev_id);
1075 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1076 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1077 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1078 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1079 	return dev->dev_ops->queue_intr_disable(dev, queue_id);
1080 }
1081 
1082 int
1083 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1084 		void *data)
1085 {
1086 	uint32_t vec;
1087 	struct rte_bbdev *dev = get_dev(dev_id);
1088 	struct rte_intr_handle *intr_handle;
1089 	int ret;
1090 
1091 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1092 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1093 
1094 	intr_handle = dev->intr_handle;
1095 	if (!intr_handle || !intr_handle->intr_vec) {
1096 		rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
1097 		return -ENOTSUP;
1098 	}
1099 
1100 	if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
1101 		rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
1102 				dev_id, queue_id);
1103 		return -ENOTSUP;
1104 	}
1105 
1106 	vec = intr_handle->intr_vec[queue_id];
1107 	ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
1108 	if (ret && (ret != -EEXIST)) {
1109 		rte_bbdev_log(ERR,
1110 				"dev %u q %u int ctl error op %d epfd %d vec %u\n",
1111 				dev_id, queue_id, op, epfd, vec);
1112 		return ret;
1113 	}
1114 
1115 	return 0;
1116 }
1117 
1118 
1119 const char *
1120 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
1121 {
1122 	static const char * const op_types[] = {
1123 		"RTE_BBDEV_OP_NONE",
1124 		"RTE_BBDEV_OP_TURBO_DEC",
1125 		"RTE_BBDEV_OP_TURBO_ENC",
1126 		"RTE_BBDEV_OP_LDPC_DEC",
1127 		"RTE_BBDEV_OP_LDPC_ENC",
1128 	};
1129 
1130 	if (op_type < RTE_BBDEV_OP_TYPE_COUNT)
1131 		return op_types[op_type];
1132 
1133 	rte_bbdev_log(ERR, "Invalid operation type");
1134 	return NULL;
1135 }
1136 
1137 RTE_INIT(rte_bbdev_init_log)
1138 {
1139 	bbdev_logtype = rte_log_register("lib.bbdev");
1140 	if (bbdev_logtype >= 0)
1141 		rte_log_set_level(bbdev_logtype, RTE_LOG_NOTICE);
1142 }
1143