xref: /f-stack/dpdk/lib/librte_bbdev/rte_bbdev.c (revision d30ea906)
1*d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*d30ea906Sjfb8856606  * Copyright(c) 2017 Intel Corporation
3*d30ea906Sjfb8856606  */
4*d30ea906Sjfb8856606 
5*d30ea906Sjfb8856606 #include <stdint.h>
6*d30ea906Sjfb8856606 #include <string.h>
7*d30ea906Sjfb8856606 #include <stdbool.h>
8*d30ea906Sjfb8856606 
9*d30ea906Sjfb8856606 #include <rte_compat.h>
10*d30ea906Sjfb8856606 #include <rte_common.h>
11*d30ea906Sjfb8856606 #include <rte_errno.h>
12*d30ea906Sjfb8856606 #include <rte_log.h>
13*d30ea906Sjfb8856606 #include <rte_debug.h>
14*d30ea906Sjfb8856606 #include <rte_eal.h>
15*d30ea906Sjfb8856606 #include <rte_malloc.h>
16*d30ea906Sjfb8856606 #include <rte_mempool.h>
17*d30ea906Sjfb8856606 #include <rte_memzone.h>
18*d30ea906Sjfb8856606 #include <rte_lcore.h>
19*d30ea906Sjfb8856606 #include <rte_dev.h>
20*d30ea906Sjfb8856606 #include <rte_spinlock.h>
21*d30ea906Sjfb8856606 #include <rte_tailq.h>
22*d30ea906Sjfb8856606 #include <rte_interrupts.h>
23*d30ea906Sjfb8856606 
24*d30ea906Sjfb8856606 #include "rte_bbdev_op.h"
25*d30ea906Sjfb8856606 #include "rte_bbdev.h"
26*d30ea906Sjfb8856606 #include "rte_bbdev_pmd.h"
27*d30ea906Sjfb8856606 
28*d30ea906Sjfb8856606 #define DEV_NAME "BBDEV"
29*d30ea906Sjfb8856606 
30*d30ea906Sjfb8856606 
31*d30ea906Sjfb8856606 /* BBDev library logging ID */
32*d30ea906Sjfb8856606 static int bbdev_logtype;
33*d30ea906Sjfb8856606 
34*d30ea906Sjfb8856606 /* Helper macro for logging */
35*d30ea906Sjfb8856606 #define rte_bbdev_log(level, fmt, ...) \
36*d30ea906Sjfb8856606 	rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__)
37*d30ea906Sjfb8856606 
38*d30ea906Sjfb8856606 #define rte_bbdev_log_debug(fmt, ...) \
39*d30ea906Sjfb8856606 	rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
40*d30ea906Sjfb8856606 		##__VA_ARGS__)
41*d30ea906Sjfb8856606 
42*d30ea906Sjfb8856606 /* Helper macro to check dev_id is valid */
43*d30ea906Sjfb8856606 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
44*d30ea906Sjfb8856606 	if (dev == NULL) { \
45*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
46*d30ea906Sjfb8856606 		return -ENODEV; \
47*d30ea906Sjfb8856606 	} \
48*d30ea906Sjfb8856606 } while (0)
49*d30ea906Sjfb8856606 
50*d30ea906Sjfb8856606 /* Helper macro to check dev_ops is valid */
51*d30ea906Sjfb8856606 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
52*d30ea906Sjfb8856606 	if (dev->dev_ops == NULL) { \
53*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
54*d30ea906Sjfb8856606 				dev_id); \
55*d30ea906Sjfb8856606 		return -ENODEV; \
56*d30ea906Sjfb8856606 	} \
57*d30ea906Sjfb8856606 } while (0)
58*d30ea906Sjfb8856606 
59*d30ea906Sjfb8856606 /* Helper macro to check that driver implements required function pointer */
60*d30ea906Sjfb8856606 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
61*d30ea906Sjfb8856606 	if (func == NULL) { \
62*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "device %u does not support %s", \
63*d30ea906Sjfb8856606 				dev_id, #func); \
64*d30ea906Sjfb8856606 		return -ENOTSUP; \
65*d30ea906Sjfb8856606 	} \
66*d30ea906Sjfb8856606 } while (0)
67*d30ea906Sjfb8856606 
68*d30ea906Sjfb8856606 /* Helper macro to check that queue is valid */
69*d30ea906Sjfb8856606 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
70*d30ea906Sjfb8856606 	if (queue_id >= dev->data->num_queues) { \
71*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
72*d30ea906Sjfb8856606 				queue_id, dev->data->dev_id); \
73*d30ea906Sjfb8856606 		return -ERANGE; \
74*d30ea906Sjfb8856606 	} \
75*d30ea906Sjfb8856606 } while (0)
76*d30ea906Sjfb8856606 
77*d30ea906Sjfb8856606 /* List of callback functions registered by an application */
78*d30ea906Sjfb8856606 struct rte_bbdev_callback {
79*d30ea906Sjfb8856606 	TAILQ_ENTRY(rte_bbdev_callback) next;  /* Callbacks list */
80*d30ea906Sjfb8856606 	rte_bbdev_cb_fn cb_fn;  /* Callback address */
81*d30ea906Sjfb8856606 	void *cb_arg;  /* Parameter for callback */
82*d30ea906Sjfb8856606 	void *ret_param;  /* Return parameter */
83*d30ea906Sjfb8856606 	enum rte_bbdev_event_type event; /* Interrupt event type */
84*d30ea906Sjfb8856606 	uint32_t active; /* Callback is executing */
85*d30ea906Sjfb8856606 };
86*d30ea906Sjfb8856606 
87*d30ea906Sjfb8856606 /* spinlock for bbdev device callbacks */
88*d30ea906Sjfb8856606 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
89*d30ea906Sjfb8856606 
90*d30ea906Sjfb8856606 /*
91*d30ea906Sjfb8856606  * Global array of all devices. This is not static because it's used by the
92*d30ea906Sjfb8856606  * inline enqueue and dequeue functions
93*d30ea906Sjfb8856606  */
94*d30ea906Sjfb8856606 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
95*d30ea906Sjfb8856606 
96*d30ea906Sjfb8856606 /* Global array with rte_bbdev_data structures */
97*d30ea906Sjfb8856606 static struct rte_bbdev_data *rte_bbdev_data;
98*d30ea906Sjfb8856606 
99*d30ea906Sjfb8856606 /* Memzone name for global bbdev data pool */
100*d30ea906Sjfb8856606 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
101*d30ea906Sjfb8856606 
102*d30ea906Sjfb8856606 /* Number of currently valid devices */
103*d30ea906Sjfb8856606 static uint16_t num_devs;
104*d30ea906Sjfb8856606 
105*d30ea906Sjfb8856606 /* Return pointer to device structure, with validity check */
106*d30ea906Sjfb8856606 static struct rte_bbdev *
107*d30ea906Sjfb8856606 get_dev(uint16_t dev_id)
108*d30ea906Sjfb8856606 {
109*d30ea906Sjfb8856606 	if (rte_bbdev_is_valid(dev_id))
110*d30ea906Sjfb8856606 		return &rte_bbdev_devices[dev_id];
111*d30ea906Sjfb8856606 	return NULL;
112*d30ea906Sjfb8856606 }
113*d30ea906Sjfb8856606 
114*d30ea906Sjfb8856606 /* Allocate global data array */
115*d30ea906Sjfb8856606 static int
116*d30ea906Sjfb8856606 rte_bbdev_data_alloc(void)
117*d30ea906Sjfb8856606 {
118*d30ea906Sjfb8856606 	const unsigned int flags = 0;
119*d30ea906Sjfb8856606 	const struct rte_memzone *mz;
120*d30ea906Sjfb8856606 
121*d30ea906Sjfb8856606 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
122*d30ea906Sjfb8856606 		mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
123*d30ea906Sjfb8856606 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
124*d30ea906Sjfb8856606 				rte_socket_id(), flags);
125*d30ea906Sjfb8856606 	} else
126*d30ea906Sjfb8856606 		mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
127*d30ea906Sjfb8856606 	if (mz == NULL) {
128*d30ea906Sjfb8856606 		rte_bbdev_log(CRIT,
129*d30ea906Sjfb8856606 				"Cannot allocate memzone for bbdev port data");
130*d30ea906Sjfb8856606 		return -ENOMEM;
131*d30ea906Sjfb8856606 	}
132*d30ea906Sjfb8856606 
133*d30ea906Sjfb8856606 	rte_bbdev_data = mz->addr;
134*d30ea906Sjfb8856606 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
135*d30ea906Sjfb8856606 		memset(rte_bbdev_data, 0,
136*d30ea906Sjfb8856606 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
137*d30ea906Sjfb8856606 	return 0;
138*d30ea906Sjfb8856606 }
139*d30ea906Sjfb8856606 
140*d30ea906Sjfb8856606 /*
141*d30ea906Sjfb8856606  * Find data alocated for the device or if not found return first unused bbdev
142*d30ea906Sjfb8856606  * data. If all structures are in use and none is used by the device return
143*d30ea906Sjfb8856606  * NULL.
144*d30ea906Sjfb8856606  */
145*d30ea906Sjfb8856606 static struct rte_bbdev_data *
146*d30ea906Sjfb8856606 find_bbdev_data(const char *name)
147*d30ea906Sjfb8856606 {
148*d30ea906Sjfb8856606 	uint16_t data_id;
149*d30ea906Sjfb8856606 
150*d30ea906Sjfb8856606 	for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
151*d30ea906Sjfb8856606 		if (strlen(rte_bbdev_data[data_id].name) == 0) {
152*d30ea906Sjfb8856606 			memset(&rte_bbdev_data[data_id], 0,
153*d30ea906Sjfb8856606 					sizeof(struct rte_bbdev_data));
154*d30ea906Sjfb8856606 			return &rte_bbdev_data[data_id];
155*d30ea906Sjfb8856606 		} else if (strncmp(rte_bbdev_data[data_id].name, name,
156*d30ea906Sjfb8856606 				RTE_BBDEV_NAME_MAX_LEN) == 0)
157*d30ea906Sjfb8856606 			return &rte_bbdev_data[data_id];
158*d30ea906Sjfb8856606 	}
159*d30ea906Sjfb8856606 
160*d30ea906Sjfb8856606 	return NULL;
161*d30ea906Sjfb8856606 }
162*d30ea906Sjfb8856606 
163*d30ea906Sjfb8856606 /* Find lowest device id with no attached device */
164*d30ea906Sjfb8856606 static uint16_t
165*d30ea906Sjfb8856606 find_free_dev_id(void)
166*d30ea906Sjfb8856606 {
167*d30ea906Sjfb8856606 	uint16_t i;
168*d30ea906Sjfb8856606 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
169*d30ea906Sjfb8856606 		if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
170*d30ea906Sjfb8856606 			return i;
171*d30ea906Sjfb8856606 	}
172*d30ea906Sjfb8856606 	return RTE_BBDEV_MAX_DEVS;
173*d30ea906Sjfb8856606 }
174*d30ea906Sjfb8856606 
175*d30ea906Sjfb8856606 struct rte_bbdev * __rte_experimental
176*d30ea906Sjfb8856606 rte_bbdev_allocate(const char *name)
177*d30ea906Sjfb8856606 {
178*d30ea906Sjfb8856606 	int ret;
179*d30ea906Sjfb8856606 	struct rte_bbdev *bbdev;
180*d30ea906Sjfb8856606 	uint16_t dev_id;
181*d30ea906Sjfb8856606 
182*d30ea906Sjfb8856606 	if (name == NULL) {
183*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "Invalid null device name");
184*d30ea906Sjfb8856606 		return NULL;
185*d30ea906Sjfb8856606 	}
186*d30ea906Sjfb8856606 
187*d30ea906Sjfb8856606 	if (rte_bbdev_get_named_dev(name) != NULL) {
188*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
189*d30ea906Sjfb8856606 		return NULL;
190*d30ea906Sjfb8856606 	}
191*d30ea906Sjfb8856606 
192*d30ea906Sjfb8856606 	dev_id = find_free_dev_id();
193*d30ea906Sjfb8856606 	if (dev_id == RTE_BBDEV_MAX_DEVS) {
194*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "Reached maximum number of devices");
195*d30ea906Sjfb8856606 		return NULL;
196*d30ea906Sjfb8856606 	}
197*d30ea906Sjfb8856606 
198*d30ea906Sjfb8856606 	bbdev = &rte_bbdev_devices[dev_id];
199*d30ea906Sjfb8856606 
200*d30ea906Sjfb8856606 	if (rte_bbdev_data == NULL) {
201*d30ea906Sjfb8856606 		ret = rte_bbdev_data_alloc();
202*d30ea906Sjfb8856606 		if (ret != 0)
203*d30ea906Sjfb8856606 			return NULL;
204*d30ea906Sjfb8856606 	}
205*d30ea906Sjfb8856606 
206*d30ea906Sjfb8856606 	bbdev->data = find_bbdev_data(name);
207*d30ea906Sjfb8856606 	if (bbdev->data == NULL) {
208*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
209*d30ea906Sjfb8856606 				"Max BBDevs already allocated in multi-process environment!");
210*d30ea906Sjfb8856606 		return NULL;
211*d30ea906Sjfb8856606 	}
212*d30ea906Sjfb8856606 
213*d30ea906Sjfb8856606 	rte_atomic16_inc(&bbdev->data->process_cnt);
214*d30ea906Sjfb8856606 	bbdev->data->dev_id = dev_id;
215*d30ea906Sjfb8856606 	bbdev->state = RTE_BBDEV_INITIALIZED;
216*d30ea906Sjfb8856606 
217*d30ea906Sjfb8856606 	ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name);
218*d30ea906Sjfb8856606 	if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
219*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
220*d30ea906Sjfb8856606 		return NULL;
221*d30ea906Sjfb8856606 	}
222*d30ea906Sjfb8856606 
223*d30ea906Sjfb8856606 	/* init user callbacks */
224*d30ea906Sjfb8856606 	TAILQ_INIT(&(bbdev->list_cbs));
225*d30ea906Sjfb8856606 
226*d30ea906Sjfb8856606 	num_devs++;
227*d30ea906Sjfb8856606 
228*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
229*d30ea906Sjfb8856606 			name, dev_id, num_devs);
230*d30ea906Sjfb8856606 
231*d30ea906Sjfb8856606 	return bbdev;
232*d30ea906Sjfb8856606 }
233*d30ea906Sjfb8856606 
234*d30ea906Sjfb8856606 int __rte_experimental
235*d30ea906Sjfb8856606 rte_bbdev_release(struct rte_bbdev *bbdev)
236*d30ea906Sjfb8856606 {
237*d30ea906Sjfb8856606 	uint16_t dev_id;
238*d30ea906Sjfb8856606 	struct rte_bbdev_callback *cb, *next;
239*d30ea906Sjfb8856606 
240*d30ea906Sjfb8856606 	if (bbdev == NULL) {
241*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "NULL bbdev");
242*d30ea906Sjfb8856606 		return -ENODEV;
243*d30ea906Sjfb8856606 	}
244*d30ea906Sjfb8856606 	dev_id = bbdev->data->dev_id;
245*d30ea906Sjfb8856606 
246*d30ea906Sjfb8856606 	/* free all callbacks from the device's list */
247*d30ea906Sjfb8856606 	for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
248*d30ea906Sjfb8856606 
249*d30ea906Sjfb8856606 		next = TAILQ_NEXT(cb, next);
250*d30ea906Sjfb8856606 		TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
251*d30ea906Sjfb8856606 		rte_free(cb);
252*d30ea906Sjfb8856606 	}
253*d30ea906Sjfb8856606 
254*d30ea906Sjfb8856606 	/* clear shared BBDev Data if no process is using the device anymore */
255*d30ea906Sjfb8856606 	if (rte_atomic16_dec_and_test(&bbdev->data->process_cnt))
256*d30ea906Sjfb8856606 		memset(bbdev->data, 0, sizeof(*bbdev->data));
257*d30ea906Sjfb8856606 
258*d30ea906Sjfb8856606 	memset(bbdev, 0, sizeof(*bbdev));
259*d30ea906Sjfb8856606 	num_devs--;
260*d30ea906Sjfb8856606 	bbdev->state = RTE_BBDEV_UNUSED;
261*d30ea906Sjfb8856606 
262*d30ea906Sjfb8856606 	rte_bbdev_log_debug(
263*d30ea906Sjfb8856606 			"Un-initialised device id = %u. Num devices = %u",
264*d30ea906Sjfb8856606 			dev_id, num_devs);
265*d30ea906Sjfb8856606 	return 0;
266*d30ea906Sjfb8856606 }
267*d30ea906Sjfb8856606 
268*d30ea906Sjfb8856606 struct rte_bbdev * __rte_experimental
269*d30ea906Sjfb8856606 rte_bbdev_get_named_dev(const char *name)
270*d30ea906Sjfb8856606 {
271*d30ea906Sjfb8856606 	unsigned int i;
272*d30ea906Sjfb8856606 
273*d30ea906Sjfb8856606 	if (name == NULL) {
274*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "NULL driver name");
275*d30ea906Sjfb8856606 		return NULL;
276*d30ea906Sjfb8856606 	}
277*d30ea906Sjfb8856606 
278*d30ea906Sjfb8856606 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
279*d30ea906Sjfb8856606 		struct rte_bbdev *dev = get_dev(i);
280*d30ea906Sjfb8856606 		if (dev && (strncmp(dev->data->name,
281*d30ea906Sjfb8856606 				name, RTE_BBDEV_NAME_MAX_LEN) == 0))
282*d30ea906Sjfb8856606 			return dev;
283*d30ea906Sjfb8856606 	}
284*d30ea906Sjfb8856606 
285*d30ea906Sjfb8856606 	return NULL;
286*d30ea906Sjfb8856606 }
287*d30ea906Sjfb8856606 
288*d30ea906Sjfb8856606 uint16_t __rte_experimental
289*d30ea906Sjfb8856606 rte_bbdev_count(void)
290*d30ea906Sjfb8856606 {
291*d30ea906Sjfb8856606 	return num_devs;
292*d30ea906Sjfb8856606 }
293*d30ea906Sjfb8856606 
294*d30ea906Sjfb8856606 bool __rte_experimental
295*d30ea906Sjfb8856606 rte_bbdev_is_valid(uint16_t dev_id)
296*d30ea906Sjfb8856606 {
297*d30ea906Sjfb8856606 	if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
298*d30ea906Sjfb8856606 		rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
299*d30ea906Sjfb8856606 		return true;
300*d30ea906Sjfb8856606 	return false;
301*d30ea906Sjfb8856606 }
302*d30ea906Sjfb8856606 
303*d30ea906Sjfb8856606 uint16_t __rte_experimental
304*d30ea906Sjfb8856606 rte_bbdev_find_next(uint16_t dev_id)
305*d30ea906Sjfb8856606 {
306*d30ea906Sjfb8856606 	dev_id++;
307*d30ea906Sjfb8856606 	for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
308*d30ea906Sjfb8856606 		if (rte_bbdev_is_valid(dev_id))
309*d30ea906Sjfb8856606 			break;
310*d30ea906Sjfb8856606 	return dev_id;
311*d30ea906Sjfb8856606 }
312*d30ea906Sjfb8856606 
313*d30ea906Sjfb8856606 int __rte_experimental
314*d30ea906Sjfb8856606 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
315*d30ea906Sjfb8856606 {
316*d30ea906Sjfb8856606 	unsigned int i;
317*d30ea906Sjfb8856606 	int ret;
318*d30ea906Sjfb8856606 	struct rte_bbdev_driver_info dev_info;
319*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
320*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
321*d30ea906Sjfb8856606 
322*d30ea906Sjfb8856606 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
323*d30ea906Sjfb8856606 
324*d30ea906Sjfb8856606 	if (dev->data->started) {
325*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
326*d30ea906Sjfb8856606 				"Device %u cannot be configured when started",
327*d30ea906Sjfb8856606 				dev_id);
328*d30ea906Sjfb8856606 		return -EBUSY;
329*d30ea906Sjfb8856606 	}
330*d30ea906Sjfb8856606 
331*d30ea906Sjfb8856606 	/* Get device driver information to get max number of queues */
332*d30ea906Sjfb8856606 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
333*d30ea906Sjfb8856606 	memset(&dev_info, 0, sizeof(dev_info));
334*d30ea906Sjfb8856606 	dev->dev_ops->info_get(dev, &dev_info);
335*d30ea906Sjfb8856606 
336*d30ea906Sjfb8856606 	if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
337*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
338*d30ea906Sjfb8856606 				"Device %u supports 0 < N <= %u queues, not %u",
339*d30ea906Sjfb8856606 				dev_id, dev_info.max_num_queues, num_queues);
340*d30ea906Sjfb8856606 		return -EINVAL;
341*d30ea906Sjfb8856606 	}
342*d30ea906Sjfb8856606 
343*d30ea906Sjfb8856606 	/* If re-configuration, get driver to free existing internal memory */
344*d30ea906Sjfb8856606 	if (dev->data->queues != NULL) {
345*d30ea906Sjfb8856606 		VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
346*d30ea906Sjfb8856606 		for (i = 0; i < dev->data->num_queues; i++) {
347*d30ea906Sjfb8856606 			int ret = dev->dev_ops->queue_release(dev, i);
348*d30ea906Sjfb8856606 			if (ret < 0) {
349*d30ea906Sjfb8856606 				rte_bbdev_log(ERR,
350*d30ea906Sjfb8856606 						"Device %u queue %u release failed",
351*d30ea906Sjfb8856606 						dev_id, i);
352*d30ea906Sjfb8856606 				return ret;
353*d30ea906Sjfb8856606 			}
354*d30ea906Sjfb8856606 		}
355*d30ea906Sjfb8856606 		/* Call optional device close */
356*d30ea906Sjfb8856606 		if (dev->dev_ops->close) {
357*d30ea906Sjfb8856606 			ret = dev->dev_ops->close(dev);
358*d30ea906Sjfb8856606 			if (ret < 0) {
359*d30ea906Sjfb8856606 				rte_bbdev_log(ERR,
360*d30ea906Sjfb8856606 						"Device %u couldn't be closed",
361*d30ea906Sjfb8856606 						dev_id);
362*d30ea906Sjfb8856606 				return ret;
363*d30ea906Sjfb8856606 			}
364*d30ea906Sjfb8856606 		}
365*d30ea906Sjfb8856606 		rte_free(dev->data->queues);
366*d30ea906Sjfb8856606 	}
367*d30ea906Sjfb8856606 
368*d30ea906Sjfb8856606 	/* Allocate queue pointers */
369*d30ea906Sjfb8856606 	dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
370*d30ea906Sjfb8856606 			sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
371*d30ea906Sjfb8856606 				dev->data->socket_id);
372*d30ea906Sjfb8856606 	if (dev->data->queues == NULL) {
373*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
374*d30ea906Sjfb8856606 				"calloc of %u queues for device %u on socket %i failed",
375*d30ea906Sjfb8856606 				num_queues, dev_id, dev->data->socket_id);
376*d30ea906Sjfb8856606 		return -ENOMEM;
377*d30ea906Sjfb8856606 	}
378*d30ea906Sjfb8856606 
379*d30ea906Sjfb8856606 	dev->data->num_queues = num_queues;
380*d30ea906Sjfb8856606 
381*d30ea906Sjfb8856606 	/* Call optional device configuration */
382*d30ea906Sjfb8856606 	if (dev->dev_ops->setup_queues) {
383*d30ea906Sjfb8856606 		ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
384*d30ea906Sjfb8856606 		if (ret < 0) {
385*d30ea906Sjfb8856606 			rte_bbdev_log(ERR,
386*d30ea906Sjfb8856606 					"Device %u memory configuration failed",
387*d30ea906Sjfb8856606 					dev_id);
388*d30ea906Sjfb8856606 			goto error;
389*d30ea906Sjfb8856606 		}
390*d30ea906Sjfb8856606 	}
391*d30ea906Sjfb8856606 
392*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
393*d30ea906Sjfb8856606 			num_queues);
394*d30ea906Sjfb8856606 	return 0;
395*d30ea906Sjfb8856606 
396*d30ea906Sjfb8856606 error:
397*d30ea906Sjfb8856606 	dev->data->num_queues = 0;
398*d30ea906Sjfb8856606 	rte_free(dev->data->queues);
399*d30ea906Sjfb8856606 	dev->data->queues = NULL;
400*d30ea906Sjfb8856606 	return ret;
401*d30ea906Sjfb8856606 }
402*d30ea906Sjfb8856606 
403*d30ea906Sjfb8856606 int __rte_experimental
404*d30ea906Sjfb8856606 rte_bbdev_intr_enable(uint16_t dev_id)
405*d30ea906Sjfb8856606 {
406*d30ea906Sjfb8856606 	int ret;
407*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
408*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
409*d30ea906Sjfb8856606 
410*d30ea906Sjfb8856606 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
411*d30ea906Sjfb8856606 
412*d30ea906Sjfb8856606 	if (dev->data->started) {
413*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
414*d30ea906Sjfb8856606 				"Device %u cannot be configured when started",
415*d30ea906Sjfb8856606 				dev_id);
416*d30ea906Sjfb8856606 		return -EBUSY;
417*d30ea906Sjfb8856606 	}
418*d30ea906Sjfb8856606 
419*d30ea906Sjfb8856606 	if (dev->dev_ops->intr_enable) {
420*d30ea906Sjfb8856606 		ret = dev->dev_ops->intr_enable(dev);
421*d30ea906Sjfb8856606 		if (ret < 0) {
422*d30ea906Sjfb8856606 			rte_bbdev_log(ERR,
423*d30ea906Sjfb8856606 					"Device %u interrupts configuration failed",
424*d30ea906Sjfb8856606 					dev_id);
425*d30ea906Sjfb8856606 			return ret;
426*d30ea906Sjfb8856606 		}
427*d30ea906Sjfb8856606 		rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
428*d30ea906Sjfb8856606 		return 0;
429*d30ea906Sjfb8856606 	}
430*d30ea906Sjfb8856606 
431*d30ea906Sjfb8856606 	rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
432*d30ea906Sjfb8856606 	return -ENOTSUP;
433*d30ea906Sjfb8856606 }
434*d30ea906Sjfb8856606 
435*d30ea906Sjfb8856606 int __rte_experimental
436*d30ea906Sjfb8856606 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
437*d30ea906Sjfb8856606 		const struct rte_bbdev_queue_conf *conf)
438*d30ea906Sjfb8856606 {
439*d30ea906Sjfb8856606 	int ret = 0;
440*d30ea906Sjfb8856606 	struct rte_bbdev_driver_info dev_info;
441*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
442*d30ea906Sjfb8856606 	const struct rte_bbdev_op_cap *p;
443*d30ea906Sjfb8856606 	struct rte_bbdev_queue_conf *stored_conf;
444*d30ea906Sjfb8856606 	const char *op_type_str;
445*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
446*d30ea906Sjfb8856606 
447*d30ea906Sjfb8856606 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
448*d30ea906Sjfb8856606 
449*d30ea906Sjfb8856606 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
450*d30ea906Sjfb8856606 
451*d30ea906Sjfb8856606 	if (dev->data->queues[queue_id].started || dev->data->started) {
452*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
453*d30ea906Sjfb8856606 				"Queue %u of device %u cannot be configured when started",
454*d30ea906Sjfb8856606 				queue_id, dev_id);
455*d30ea906Sjfb8856606 		return -EBUSY;
456*d30ea906Sjfb8856606 	}
457*d30ea906Sjfb8856606 
458*d30ea906Sjfb8856606 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
459*d30ea906Sjfb8856606 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
460*d30ea906Sjfb8856606 
461*d30ea906Sjfb8856606 	/* Get device driver information to verify config is valid */
462*d30ea906Sjfb8856606 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
463*d30ea906Sjfb8856606 	memset(&dev_info, 0, sizeof(dev_info));
464*d30ea906Sjfb8856606 	dev->dev_ops->info_get(dev, &dev_info);
465*d30ea906Sjfb8856606 
466*d30ea906Sjfb8856606 	/* Check configuration is valid */
467*d30ea906Sjfb8856606 	if (conf != NULL) {
468*d30ea906Sjfb8856606 		if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
469*d30ea906Sjfb8856606 				(dev_info.capabilities[0].type ==
470*d30ea906Sjfb8856606 				RTE_BBDEV_OP_NONE)) {
471*d30ea906Sjfb8856606 			ret = 1;
472*d30ea906Sjfb8856606 		} else {
473*d30ea906Sjfb8856606 			for (p = dev_info.capabilities;
474*d30ea906Sjfb8856606 					p->type != RTE_BBDEV_OP_NONE; p++) {
475*d30ea906Sjfb8856606 				if (conf->op_type == p->type) {
476*d30ea906Sjfb8856606 					ret = 1;
477*d30ea906Sjfb8856606 					break;
478*d30ea906Sjfb8856606 				}
479*d30ea906Sjfb8856606 			}
480*d30ea906Sjfb8856606 		}
481*d30ea906Sjfb8856606 		if (ret == 0) {
482*d30ea906Sjfb8856606 			rte_bbdev_log(ERR, "Invalid operation type");
483*d30ea906Sjfb8856606 			return -EINVAL;
484*d30ea906Sjfb8856606 		}
485*d30ea906Sjfb8856606 		if (conf->queue_size > dev_info.queue_size_lim) {
486*d30ea906Sjfb8856606 			rte_bbdev_log(ERR,
487*d30ea906Sjfb8856606 					"Size (%u) of queue %u of device %u must be: <= %u",
488*d30ea906Sjfb8856606 					conf->queue_size, queue_id, dev_id,
489*d30ea906Sjfb8856606 					dev_info.queue_size_lim);
490*d30ea906Sjfb8856606 			return -EINVAL;
491*d30ea906Sjfb8856606 		}
492*d30ea906Sjfb8856606 		if (!rte_is_power_of_2(conf->queue_size)) {
493*d30ea906Sjfb8856606 			rte_bbdev_log(ERR,
494*d30ea906Sjfb8856606 					"Size (%u) of queue %u of device %u must be a power of 2",
495*d30ea906Sjfb8856606 					conf->queue_size, queue_id, dev_id);
496*d30ea906Sjfb8856606 			return -EINVAL;
497*d30ea906Sjfb8856606 		}
498*d30ea906Sjfb8856606 		if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC &&
499*d30ea906Sjfb8856606 			conf->priority > dev_info.max_ul_queue_priority) {
500*d30ea906Sjfb8856606 			rte_bbdev_log(ERR,
501*d30ea906Sjfb8856606 					"Priority (%u) of queue %u of bdev %u must be <= %u",
502*d30ea906Sjfb8856606 					conf->priority, queue_id, dev_id,
503*d30ea906Sjfb8856606 					dev_info.max_ul_queue_priority);
504*d30ea906Sjfb8856606 			return -EINVAL;
505*d30ea906Sjfb8856606 		}
506*d30ea906Sjfb8856606 		if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC &&
507*d30ea906Sjfb8856606 			conf->priority > dev_info.max_dl_queue_priority) {
508*d30ea906Sjfb8856606 			rte_bbdev_log(ERR,
509*d30ea906Sjfb8856606 					"Priority (%u) of queue %u of bdev %u must be <= %u",
510*d30ea906Sjfb8856606 					conf->priority, queue_id, dev_id,
511*d30ea906Sjfb8856606 					dev_info.max_dl_queue_priority);
512*d30ea906Sjfb8856606 			return -EINVAL;
513*d30ea906Sjfb8856606 		}
514*d30ea906Sjfb8856606 	}
515*d30ea906Sjfb8856606 
516*d30ea906Sjfb8856606 	/* Release existing queue (in case of queue reconfiguration) */
517*d30ea906Sjfb8856606 	if (dev->data->queues[queue_id].queue_private != NULL) {
518*d30ea906Sjfb8856606 		ret = dev->dev_ops->queue_release(dev, queue_id);
519*d30ea906Sjfb8856606 		if (ret < 0) {
520*d30ea906Sjfb8856606 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
521*d30ea906Sjfb8856606 					dev_id, queue_id);
522*d30ea906Sjfb8856606 			return ret;
523*d30ea906Sjfb8856606 		}
524*d30ea906Sjfb8856606 	}
525*d30ea906Sjfb8856606 
526*d30ea906Sjfb8856606 	/* Get driver to setup the queue */
527*d30ea906Sjfb8856606 	ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
528*d30ea906Sjfb8856606 			conf : &dev_info.default_queue_conf);
529*d30ea906Sjfb8856606 	if (ret < 0) {
530*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
531*d30ea906Sjfb8856606 				"Device %u queue %u setup failed", dev_id,
532*d30ea906Sjfb8856606 				queue_id);
533*d30ea906Sjfb8856606 		return ret;
534*d30ea906Sjfb8856606 	}
535*d30ea906Sjfb8856606 
536*d30ea906Sjfb8856606 	/* Store configuration */
537*d30ea906Sjfb8856606 	stored_conf = &dev->data->queues[queue_id].conf;
538*d30ea906Sjfb8856606 	memcpy(stored_conf,
539*d30ea906Sjfb8856606 			(conf != NULL) ? conf : &dev_info.default_queue_conf,
540*d30ea906Sjfb8856606 			sizeof(*stored_conf));
541*d30ea906Sjfb8856606 
542*d30ea906Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
543*d30ea906Sjfb8856606 	if (op_type_str == NULL)
544*d30ea906Sjfb8856606 		return -EINVAL;
545*d30ea906Sjfb8856606 
546*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
547*d30ea906Sjfb8856606 			dev_id, queue_id, stored_conf->queue_size, op_type_str,
548*d30ea906Sjfb8856606 			stored_conf->priority);
549*d30ea906Sjfb8856606 
550*d30ea906Sjfb8856606 	return 0;
551*d30ea906Sjfb8856606 }
552*d30ea906Sjfb8856606 
553*d30ea906Sjfb8856606 int __rte_experimental
554*d30ea906Sjfb8856606 rte_bbdev_start(uint16_t dev_id)
555*d30ea906Sjfb8856606 {
556*d30ea906Sjfb8856606 	int i;
557*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
558*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
559*d30ea906Sjfb8856606 
560*d30ea906Sjfb8856606 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
561*d30ea906Sjfb8856606 
562*d30ea906Sjfb8856606 	if (dev->data->started) {
563*d30ea906Sjfb8856606 		rte_bbdev_log_debug("Device %u is already started", dev_id);
564*d30ea906Sjfb8856606 		return 0;
565*d30ea906Sjfb8856606 	}
566*d30ea906Sjfb8856606 
567*d30ea906Sjfb8856606 	if (dev->dev_ops->start) {
568*d30ea906Sjfb8856606 		int ret = dev->dev_ops->start(dev);
569*d30ea906Sjfb8856606 		if (ret < 0) {
570*d30ea906Sjfb8856606 			rte_bbdev_log(ERR, "Device %u start failed", dev_id);
571*d30ea906Sjfb8856606 			return ret;
572*d30ea906Sjfb8856606 		}
573*d30ea906Sjfb8856606 	}
574*d30ea906Sjfb8856606 
575*d30ea906Sjfb8856606 	/* Store new state */
576*d30ea906Sjfb8856606 	for (i = 0; i < dev->data->num_queues; i++)
577*d30ea906Sjfb8856606 		if (!dev->data->queues[i].conf.deferred_start)
578*d30ea906Sjfb8856606 			dev->data->queues[i].started = true;
579*d30ea906Sjfb8856606 	dev->data->started = true;
580*d30ea906Sjfb8856606 
581*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Started device %u", dev_id);
582*d30ea906Sjfb8856606 	return 0;
583*d30ea906Sjfb8856606 }
584*d30ea906Sjfb8856606 
585*d30ea906Sjfb8856606 int __rte_experimental
586*d30ea906Sjfb8856606 rte_bbdev_stop(uint16_t dev_id)
587*d30ea906Sjfb8856606 {
588*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
589*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
590*d30ea906Sjfb8856606 
591*d30ea906Sjfb8856606 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
592*d30ea906Sjfb8856606 
593*d30ea906Sjfb8856606 	if (!dev->data->started) {
594*d30ea906Sjfb8856606 		rte_bbdev_log_debug("Device %u is already stopped", dev_id);
595*d30ea906Sjfb8856606 		return 0;
596*d30ea906Sjfb8856606 	}
597*d30ea906Sjfb8856606 
598*d30ea906Sjfb8856606 	if (dev->dev_ops->stop)
599*d30ea906Sjfb8856606 		dev->dev_ops->stop(dev);
600*d30ea906Sjfb8856606 	dev->data->started = false;
601*d30ea906Sjfb8856606 
602*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Stopped device %u", dev_id);
603*d30ea906Sjfb8856606 	return 0;
604*d30ea906Sjfb8856606 }
605*d30ea906Sjfb8856606 
606*d30ea906Sjfb8856606 int __rte_experimental
607*d30ea906Sjfb8856606 rte_bbdev_close(uint16_t dev_id)
608*d30ea906Sjfb8856606 {
609*d30ea906Sjfb8856606 	int ret;
610*d30ea906Sjfb8856606 	uint16_t i;
611*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
612*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
613*d30ea906Sjfb8856606 
614*d30ea906Sjfb8856606 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
615*d30ea906Sjfb8856606 
616*d30ea906Sjfb8856606 	if (dev->data->started) {
617*d30ea906Sjfb8856606 		ret = rte_bbdev_stop(dev_id);
618*d30ea906Sjfb8856606 		if (ret < 0) {
619*d30ea906Sjfb8856606 			rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
620*d30ea906Sjfb8856606 			return ret;
621*d30ea906Sjfb8856606 		}
622*d30ea906Sjfb8856606 	}
623*d30ea906Sjfb8856606 
624*d30ea906Sjfb8856606 	/* Free memory used by queues */
625*d30ea906Sjfb8856606 	for (i = 0; i < dev->data->num_queues; i++) {
626*d30ea906Sjfb8856606 		ret = dev->dev_ops->queue_release(dev, i);
627*d30ea906Sjfb8856606 		if (ret < 0) {
628*d30ea906Sjfb8856606 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
629*d30ea906Sjfb8856606 					dev_id, i);
630*d30ea906Sjfb8856606 			return ret;
631*d30ea906Sjfb8856606 		}
632*d30ea906Sjfb8856606 	}
633*d30ea906Sjfb8856606 	rte_free(dev->data->queues);
634*d30ea906Sjfb8856606 
635*d30ea906Sjfb8856606 	if (dev->dev_ops->close) {
636*d30ea906Sjfb8856606 		ret = dev->dev_ops->close(dev);
637*d30ea906Sjfb8856606 		if (ret < 0) {
638*d30ea906Sjfb8856606 			rte_bbdev_log(ERR, "Device %u close failed", dev_id);
639*d30ea906Sjfb8856606 			return ret;
640*d30ea906Sjfb8856606 		}
641*d30ea906Sjfb8856606 	}
642*d30ea906Sjfb8856606 
643*d30ea906Sjfb8856606 	/* Clear configuration */
644*d30ea906Sjfb8856606 	dev->data->queues = NULL;
645*d30ea906Sjfb8856606 	dev->data->num_queues = 0;
646*d30ea906Sjfb8856606 
647*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Closed device %u", dev_id);
648*d30ea906Sjfb8856606 	return 0;
649*d30ea906Sjfb8856606 }
650*d30ea906Sjfb8856606 
651*d30ea906Sjfb8856606 int __rte_experimental
652*d30ea906Sjfb8856606 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
653*d30ea906Sjfb8856606 {
654*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
655*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
656*d30ea906Sjfb8856606 
657*d30ea906Sjfb8856606 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
658*d30ea906Sjfb8856606 
659*d30ea906Sjfb8856606 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
660*d30ea906Sjfb8856606 
661*d30ea906Sjfb8856606 	if (dev->data->queues[queue_id].started) {
662*d30ea906Sjfb8856606 		rte_bbdev_log_debug("Queue %u of device %u already started",
663*d30ea906Sjfb8856606 				queue_id, dev_id);
664*d30ea906Sjfb8856606 		return 0;
665*d30ea906Sjfb8856606 	}
666*d30ea906Sjfb8856606 
667*d30ea906Sjfb8856606 	if (dev->dev_ops->queue_start) {
668*d30ea906Sjfb8856606 		int ret = dev->dev_ops->queue_start(dev, queue_id);
669*d30ea906Sjfb8856606 		if (ret < 0) {
670*d30ea906Sjfb8856606 			rte_bbdev_log(ERR, "Device %u queue %u start failed",
671*d30ea906Sjfb8856606 					dev_id, queue_id);
672*d30ea906Sjfb8856606 			return ret;
673*d30ea906Sjfb8856606 		}
674*d30ea906Sjfb8856606 	}
675*d30ea906Sjfb8856606 	dev->data->queues[queue_id].started = true;
676*d30ea906Sjfb8856606 
677*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
678*d30ea906Sjfb8856606 	return 0;
679*d30ea906Sjfb8856606 }
680*d30ea906Sjfb8856606 
681*d30ea906Sjfb8856606 int __rte_experimental
682*d30ea906Sjfb8856606 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
683*d30ea906Sjfb8856606 {
684*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
685*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
686*d30ea906Sjfb8856606 
687*d30ea906Sjfb8856606 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
688*d30ea906Sjfb8856606 
689*d30ea906Sjfb8856606 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
690*d30ea906Sjfb8856606 
691*d30ea906Sjfb8856606 	if (!dev->data->queues[queue_id].started) {
692*d30ea906Sjfb8856606 		rte_bbdev_log_debug("Queue %u of device %u already stopped",
693*d30ea906Sjfb8856606 				queue_id, dev_id);
694*d30ea906Sjfb8856606 		return 0;
695*d30ea906Sjfb8856606 	}
696*d30ea906Sjfb8856606 
697*d30ea906Sjfb8856606 	if (dev->dev_ops->queue_stop) {
698*d30ea906Sjfb8856606 		int ret = dev->dev_ops->queue_stop(dev, queue_id);
699*d30ea906Sjfb8856606 		if (ret < 0) {
700*d30ea906Sjfb8856606 			rte_bbdev_log(ERR, "Device %u queue %u stop failed",
701*d30ea906Sjfb8856606 					dev_id, queue_id);
702*d30ea906Sjfb8856606 			return ret;
703*d30ea906Sjfb8856606 		}
704*d30ea906Sjfb8856606 	}
705*d30ea906Sjfb8856606 	dev->data->queues[queue_id].started = false;
706*d30ea906Sjfb8856606 
707*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
708*d30ea906Sjfb8856606 	return 0;
709*d30ea906Sjfb8856606 }
710*d30ea906Sjfb8856606 
711*d30ea906Sjfb8856606 /* Get device statistics */
712*d30ea906Sjfb8856606 static void
713*d30ea906Sjfb8856606 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
714*d30ea906Sjfb8856606 {
715*d30ea906Sjfb8856606 	unsigned int q_id;
716*d30ea906Sjfb8856606 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
717*d30ea906Sjfb8856606 		struct rte_bbdev_stats *q_stats =
718*d30ea906Sjfb8856606 				&dev->data->queues[q_id].queue_stats;
719*d30ea906Sjfb8856606 
720*d30ea906Sjfb8856606 		stats->enqueued_count += q_stats->enqueued_count;
721*d30ea906Sjfb8856606 		stats->dequeued_count += q_stats->dequeued_count;
722*d30ea906Sjfb8856606 		stats->enqueue_err_count += q_stats->enqueue_err_count;
723*d30ea906Sjfb8856606 		stats->dequeue_err_count += q_stats->dequeue_err_count;
724*d30ea906Sjfb8856606 	}
725*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
726*d30ea906Sjfb8856606 }
727*d30ea906Sjfb8856606 
728*d30ea906Sjfb8856606 static void
729*d30ea906Sjfb8856606 reset_stats_in_queues(struct rte_bbdev *dev)
730*d30ea906Sjfb8856606 {
731*d30ea906Sjfb8856606 	unsigned int q_id;
732*d30ea906Sjfb8856606 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
733*d30ea906Sjfb8856606 		struct rte_bbdev_stats *q_stats =
734*d30ea906Sjfb8856606 				&dev->data->queues[q_id].queue_stats;
735*d30ea906Sjfb8856606 
736*d30ea906Sjfb8856606 		memset(q_stats, 0, sizeof(*q_stats));
737*d30ea906Sjfb8856606 	}
738*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
739*d30ea906Sjfb8856606 }
740*d30ea906Sjfb8856606 
741*d30ea906Sjfb8856606 int __rte_experimental
742*d30ea906Sjfb8856606 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
743*d30ea906Sjfb8856606 {
744*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
745*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
746*d30ea906Sjfb8856606 
747*d30ea906Sjfb8856606 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
748*d30ea906Sjfb8856606 
749*d30ea906Sjfb8856606 	if (stats == NULL) {
750*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "NULL stats structure");
751*d30ea906Sjfb8856606 		return -EINVAL;
752*d30ea906Sjfb8856606 	}
753*d30ea906Sjfb8856606 
754*d30ea906Sjfb8856606 	memset(stats, 0, sizeof(*stats));
755*d30ea906Sjfb8856606 	if (dev->dev_ops->stats_get != NULL)
756*d30ea906Sjfb8856606 		dev->dev_ops->stats_get(dev, stats);
757*d30ea906Sjfb8856606 	else
758*d30ea906Sjfb8856606 		get_stats_from_queues(dev, stats);
759*d30ea906Sjfb8856606 
760*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
761*d30ea906Sjfb8856606 	return 0;
762*d30ea906Sjfb8856606 }
763*d30ea906Sjfb8856606 
764*d30ea906Sjfb8856606 int __rte_experimental
765*d30ea906Sjfb8856606 rte_bbdev_stats_reset(uint16_t dev_id)
766*d30ea906Sjfb8856606 {
767*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
768*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
769*d30ea906Sjfb8856606 
770*d30ea906Sjfb8856606 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
771*d30ea906Sjfb8856606 
772*d30ea906Sjfb8856606 	if (dev->dev_ops->stats_reset != NULL)
773*d30ea906Sjfb8856606 		dev->dev_ops->stats_reset(dev);
774*d30ea906Sjfb8856606 	else
775*d30ea906Sjfb8856606 		reset_stats_in_queues(dev);
776*d30ea906Sjfb8856606 
777*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Reset stats of device %u", dev_id);
778*d30ea906Sjfb8856606 	return 0;
779*d30ea906Sjfb8856606 }
780*d30ea906Sjfb8856606 
781*d30ea906Sjfb8856606 int __rte_experimental
782*d30ea906Sjfb8856606 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
783*d30ea906Sjfb8856606 {
784*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
785*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
786*d30ea906Sjfb8856606 
787*d30ea906Sjfb8856606 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
788*d30ea906Sjfb8856606 
789*d30ea906Sjfb8856606 	if (dev_info == NULL) {
790*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "NULL dev info structure");
791*d30ea906Sjfb8856606 		return -EINVAL;
792*d30ea906Sjfb8856606 	}
793*d30ea906Sjfb8856606 
794*d30ea906Sjfb8856606 	/* Copy data maintained by device interface layer */
795*d30ea906Sjfb8856606 	memset(dev_info, 0, sizeof(*dev_info));
796*d30ea906Sjfb8856606 	dev_info->dev_name = dev->data->name;
797*d30ea906Sjfb8856606 	dev_info->num_queues = dev->data->num_queues;
798*d30ea906Sjfb8856606 	dev_info->bus = rte_bus_find_by_device(dev->device);
799*d30ea906Sjfb8856606 	dev_info->socket_id = dev->data->socket_id;
800*d30ea906Sjfb8856606 	dev_info->started = dev->data->started;
801*d30ea906Sjfb8856606 
802*d30ea906Sjfb8856606 	/* Copy data maintained by device driver layer */
803*d30ea906Sjfb8856606 	dev->dev_ops->info_get(dev, &dev_info->drv);
804*d30ea906Sjfb8856606 
805*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
806*d30ea906Sjfb8856606 	return 0;
807*d30ea906Sjfb8856606 }
808*d30ea906Sjfb8856606 
809*d30ea906Sjfb8856606 int __rte_experimental
810*d30ea906Sjfb8856606 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
811*d30ea906Sjfb8856606 		struct rte_bbdev_queue_info *queue_info)
812*d30ea906Sjfb8856606 {
813*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
814*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
815*d30ea906Sjfb8856606 
816*d30ea906Sjfb8856606 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
817*d30ea906Sjfb8856606 
818*d30ea906Sjfb8856606 	if (queue_info == NULL) {
819*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "NULL queue info structure");
820*d30ea906Sjfb8856606 		return -EINVAL;
821*d30ea906Sjfb8856606 	}
822*d30ea906Sjfb8856606 
823*d30ea906Sjfb8856606 	/* Copy data to output */
824*d30ea906Sjfb8856606 	memset(queue_info, 0, sizeof(*queue_info));
825*d30ea906Sjfb8856606 	queue_info->conf = dev->data->queues[queue_id].conf;
826*d30ea906Sjfb8856606 	queue_info->started = dev->data->queues[queue_id].started;
827*d30ea906Sjfb8856606 
828*d30ea906Sjfb8856606 	rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
829*d30ea906Sjfb8856606 			queue_id, dev_id);
830*d30ea906Sjfb8856606 	return 0;
831*d30ea906Sjfb8856606 }
832*d30ea906Sjfb8856606 
833*d30ea906Sjfb8856606 /* Calculate size needed to store bbdev_op, depending on type */
834*d30ea906Sjfb8856606 static unsigned int
835*d30ea906Sjfb8856606 get_bbdev_op_size(enum rte_bbdev_op_type type)
836*d30ea906Sjfb8856606 {
837*d30ea906Sjfb8856606 	unsigned int result = 0;
838*d30ea906Sjfb8856606 	switch (type) {
839*d30ea906Sjfb8856606 	case RTE_BBDEV_OP_NONE:
840*d30ea906Sjfb8856606 		result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
841*d30ea906Sjfb8856606 				sizeof(struct rte_bbdev_enc_op));
842*d30ea906Sjfb8856606 		break;
843*d30ea906Sjfb8856606 	case RTE_BBDEV_OP_TURBO_DEC:
844*d30ea906Sjfb8856606 		result = sizeof(struct rte_bbdev_dec_op);
845*d30ea906Sjfb8856606 		break;
846*d30ea906Sjfb8856606 	case RTE_BBDEV_OP_TURBO_ENC:
847*d30ea906Sjfb8856606 		result = sizeof(struct rte_bbdev_enc_op);
848*d30ea906Sjfb8856606 		break;
849*d30ea906Sjfb8856606 	default:
850*d30ea906Sjfb8856606 		break;
851*d30ea906Sjfb8856606 	}
852*d30ea906Sjfb8856606 
853*d30ea906Sjfb8856606 	return result;
854*d30ea906Sjfb8856606 }
855*d30ea906Sjfb8856606 
856*d30ea906Sjfb8856606 /* Initialise a bbdev_op structure */
857*d30ea906Sjfb8856606 static void
858*d30ea906Sjfb8856606 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
859*d30ea906Sjfb8856606 		__rte_unused unsigned int n)
860*d30ea906Sjfb8856606 {
861*d30ea906Sjfb8856606 	enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
862*d30ea906Sjfb8856606 
863*d30ea906Sjfb8856606 	if (type == RTE_BBDEV_OP_TURBO_DEC) {
864*d30ea906Sjfb8856606 		struct rte_bbdev_dec_op *op = element;
865*d30ea906Sjfb8856606 		memset(op, 0, mempool->elt_size);
866*d30ea906Sjfb8856606 		op->mempool = mempool;
867*d30ea906Sjfb8856606 	} else if (type == RTE_BBDEV_OP_TURBO_ENC) {
868*d30ea906Sjfb8856606 		struct rte_bbdev_enc_op *op = element;
869*d30ea906Sjfb8856606 		memset(op, 0, mempool->elt_size);
870*d30ea906Sjfb8856606 		op->mempool = mempool;
871*d30ea906Sjfb8856606 	}
872*d30ea906Sjfb8856606 }
873*d30ea906Sjfb8856606 
874*d30ea906Sjfb8856606 struct rte_mempool * __rte_experimental
875*d30ea906Sjfb8856606 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
876*d30ea906Sjfb8856606 		unsigned int num_elements, unsigned int cache_size,
877*d30ea906Sjfb8856606 		int socket_id)
878*d30ea906Sjfb8856606 {
879*d30ea906Sjfb8856606 	struct rte_bbdev_op_pool_private *priv;
880*d30ea906Sjfb8856606 	struct rte_mempool *mp;
881*d30ea906Sjfb8856606 	const char *op_type_str;
882*d30ea906Sjfb8856606 
883*d30ea906Sjfb8856606 	if (name == NULL) {
884*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "NULL name for op pool");
885*d30ea906Sjfb8856606 		return NULL;
886*d30ea906Sjfb8856606 	}
887*d30ea906Sjfb8856606 
888*d30ea906Sjfb8856606 	if (type >= RTE_BBDEV_OP_TYPE_COUNT) {
889*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
890*d30ea906Sjfb8856606 				"Invalid op type (%u), should be less than %u",
891*d30ea906Sjfb8856606 				type, RTE_BBDEV_OP_TYPE_COUNT);
892*d30ea906Sjfb8856606 		return NULL;
893*d30ea906Sjfb8856606 	}
894*d30ea906Sjfb8856606 
895*d30ea906Sjfb8856606 	mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
896*d30ea906Sjfb8856606 			cache_size, sizeof(struct rte_bbdev_op_pool_private),
897*d30ea906Sjfb8856606 			NULL, NULL, bbdev_op_init, &type, socket_id, 0);
898*d30ea906Sjfb8856606 	if (mp == NULL) {
899*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
900*d30ea906Sjfb8856606 				"Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
901*d30ea906Sjfb8856606 				name, num_elements, get_bbdev_op_size(type),
902*d30ea906Sjfb8856606 				rte_strerror(rte_errno));
903*d30ea906Sjfb8856606 		return NULL;
904*d30ea906Sjfb8856606 	}
905*d30ea906Sjfb8856606 
906*d30ea906Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(type);
907*d30ea906Sjfb8856606 	if (op_type_str == NULL)
908*d30ea906Sjfb8856606 		return NULL;
909*d30ea906Sjfb8856606 
910*d30ea906Sjfb8856606 	rte_bbdev_log_debug(
911*d30ea906Sjfb8856606 			"Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
912*d30ea906Sjfb8856606 			name, num_elements, op_type_str, cache_size, socket_id,
913*d30ea906Sjfb8856606 			get_bbdev_op_size(type));
914*d30ea906Sjfb8856606 
915*d30ea906Sjfb8856606 	priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
916*d30ea906Sjfb8856606 	priv->type = type;
917*d30ea906Sjfb8856606 
918*d30ea906Sjfb8856606 	return mp;
919*d30ea906Sjfb8856606 }
920*d30ea906Sjfb8856606 
921*d30ea906Sjfb8856606 int __rte_experimental
922*d30ea906Sjfb8856606 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
923*d30ea906Sjfb8856606 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
924*d30ea906Sjfb8856606 {
925*d30ea906Sjfb8856606 	struct rte_bbdev_callback *user_cb;
926*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
927*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
928*d30ea906Sjfb8856606 
929*d30ea906Sjfb8856606 	if (event >= RTE_BBDEV_EVENT_MAX) {
930*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
931*d30ea906Sjfb8856606 				"Invalid event type (%u), should be less than %u",
932*d30ea906Sjfb8856606 				event, RTE_BBDEV_EVENT_MAX);
933*d30ea906Sjfb8856606 		return -EINVAL;
934*d30ea906Sjfb8856606 	}
935*d30ea906Sjfb8856606 
936*d30ea906Sjfb8856606 	if (cb_fn == NULL) {
937*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "NULL callback function");
938*d30ea906Sjfb8856606 		return -EINVAL;
939*d30ea906Sjfb8856606 	}
940*d30ea906Sjfb8856606 
941*d30ea906Sjfb8856606 	rte_spinlock_lock(&rte_bbdev_cb_lock);
942*d30ea906Sjfb8856606 
943*d30ea906Sjfb8856606 	TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
944*d30ea906Sjfb8856606 		if (user_cb->cb_fn == cb_fn &&
945*d30ea906Sjfb8856606 				user_cb->cb_arg == cb_arg &&
946*d30ea906Sjfb8856606 				user_cb->event == event)
947*d30ea906Sjfb8856606 			break;
948*d30ea906Sjfb8856606 	}
949*d30ea906Sjfb8856606 
950*d30ea906Sjfb8856606 	/* create a new callback. */
951*d30ea906Sjfb8856606 	if (user_cb == NULL) {
952*d30ea906Sjfb8856606 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
953*d30ea906Sjfb8856606 				sizeof(struct rte_bbdev_callback), 0);
954*d30ea906Sjfb8856606 		if (user_cb != NULL) {
955*d30ea906Sjfb8856606 			user_cb->cb_fn = cb_fn;
956*d30ea906Sjfb8856606 			user_cb->cb_arg = cb_arg;
957*d30ea906Sjfb8856606 			user_cb->event = event;
958*d30ea906Sjfb8856606 			TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
959*d30ea906Sjfb8856606 		}
960*d30ea906Sjfb8856606 	}
961*d30ea906Sjfb8856606 
962*d30ea906Sjfb8856606 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
963*d30ea906Sjfb8856606 	return (user_cb == NULL) ? -ENOMEM : 0;
964*d30ea906Sjfb8856606 }
965*d30ea906Sjfb8856606 
966*d30ea906Sjfb8856606 int __rte_experimental
967*d30ea906Sjfb8856606 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
968*d30ea906Sjfb8856606 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
969*d30ea906Sjfb8856606 {
970*d30ea906Sjfb8856606 	int ret = 0;
971*d30ea906Sjfb8856606 	struct rte_bbdev_callback *cb, *next;
972*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
973*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
974*d30ea906Sjfb8856606 
975*d30ea906Sjfb8856606 	if (event >= RTE_BBDEV_EVENT_MAX) {
976*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
977*d30ea906Sjfb8856606 				"Invalid event type (%u), should be less than %u",
978*d30ea906Sjfb8856606 				event, RTE_BBDEV_EVENT_MAX);
979*d30ea906Sjfb8856606 		return -EINVAL;
980*d30ea906Sjfb8856606 	}
981*d30ea906Sjfb8856606 
982*d30ea906Sjfb8856606 	if (cb_fn == NULL) {
983*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
984*d30ea906Sjfb8856606 				"NULL callback function cannot be unregistered");
985*d30ea906Sjfb8856606 		return -EINVAL;
986*d30ea906Sjfb8856606 	}
987*d30ea906Sjfb8856606 
988*d30ea906Sjfb8856606 	dev = &rte_bbdev_devices[dev_id];
989*d30ea906Sjfb8856606 	rte_spinlock_lock(&rte_bbdev_cb_lock);
990*d30ea906Sjfb8856606 
991*d30ea906Sjfb8856606 	for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
992*d30ea906Sjfb8856606 
993*d30ea906Sjfb8856606 		next = TAILQ_NEXT(cb, next);
994*d30ea906Sjfb8856606 
995*d30ea906Sjfb8856606 		if (cb->cb_fn != cb_fn || cb->event != event ||
996*d30ea906Sjfb8856606 				(cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
997*d30ea906Sjfb8856606 			continue;
998*d30ea906Sjfb8856606 
999*d30ea906Sjfb8856606 		/* If this callback is not executing right now, remove it. */
1000*d30ea906Sjfb8856606 		if (cb->active == 0) {
1001*d30ea906Sjfb8856606 			TAILQ_REMOVE(&(dev->list_cbs), cb, next);
1002*d30ea906Sjfb8856606 			rte_free(cb);
1003*d30ea906Sjfb8856606 		} else
1004*d30ea906Sjfb8856606 			ret = -EAGAIN;
1005*d30ea906Sjfb8856606 	}
1006*d30ea906Sjfb8856606 
1007*d30ea906Sjfb8856606 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1008*d30ea906Sjfb8856606 	return ret;
1009*d30ea906Sjfb8856606 }
1010*d30ea906Sjfb8856606 
1011*d30ea906Sjfb8856606 void __rte_experimental
1012*d30ea906Sjfb8856606 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
1013*d30ea906Sjfb8856606 	enum rte_bbdev_event_type event, void *ret_param)
1014*d30ea906Sjfb8856606 {
1015*d30ea906Sjfb8856606 	struct rte_bbdev_callback *cb_lst;
1016*d30ea906Sjfb8856606 	struct rte_bbdev_callback dev_cb;
1017*d30ea906Sjfb8856606 
1018*d30ea906Sjfb8856606 	if (dev == NULL) {
1019*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "NULL device");
1020*d30ea906Sjfb8856606 		return;
1021*d30ea906Sjfb8856606 	}
1022*d30ea906Sjfb8856606 
1023*d30ea906Sjfb8856606 	if (dev->data == NULL) {
1024*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "NULL data structure");
1025*d30ea906Sjfb8856606 		return;
1026*d30ea906Sjfb8856606 	}
1027*d30ea906Sjfb8856606 
1028*d30ea906Sjfb8856606 	if (event >= RTE_BBDEV_EVENT_MAX) {
1029*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
1030*d30ea906Sjfb8856606 				"Invalid event type (%u), should be less than %u",
1031*d30ea906Sjfb8856606 				event, RTE_BBDEV_EVENT_MAX);
1032*d30ea906Sjfb8856606 		return;
1033*d30ea906Sjfb8856606 	}
1034*d30ea906Sjfb8856606 
1035*d30ea906Sjfb8856606 	rte_spinlock_lock(&rte_bbdev_cb_lock);
1036*d30ea906Sjfb8856606 	TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1037*d30ea906Sjfb8856606 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1038*d30ea906Sjfb8856606 			continue;
1039*d30ea906Sjfb8856606 		dev_cb = *cb_lst;
1040*d30ea906Sjfb8856606 		cb_lst->active = 1;
1041*d30ea906Sjfb8856606 		if (ret_param != NULL)
1042*d30ea906Sjfb8856606 			dev_cb.ret_param = ret_param;
1043*d30ea906Sjfb8856606 
1044*d30ea906Sjfb8856606 		rte_spinlock_unlock(&rte_bbdev_cb_lock);
1045*d30ea906Sjfb8856606 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1046*d30ea906Sjfb8856606 				dev_cb.cb_arg, dev_cb.ret_param);
1047*d30ea906Sjfb8856606 		rte_spinlock_lock(&rte_bbdev_cb_lock);
1048*d30ea906Sjfb8856606 		cb_lst->active = 0;
1049*d30ea906Sjfb8856606 	}
1050*d30ea906Sjfb8856606 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1051*d30ea906Sjfb8856606 }
1052*d30ea906Sjfb8856606 
1053*d30ea906Sjfb8856606 int __rte_experimental
1054*d30ea906Sjfb8856606 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
1055*d30ea906Sjfb8856606 {
1056*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
1057*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1058*d30ea906Sjfb8856606 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1059*d30ea906Sjfb8856606 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1060*d30ea906Sjfb8856606 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1061*d30ea906Sjfb8856606 	return dev->dev_ops->queue_intr_enable(dev, queue_id);
1062*d30ea906Sjfb8856606 }
1063*d30ea906Sjfb8856606 
1064*d30ea906Sjfb8856606 int __rte_experimental
1065*d30ea906Sjfb8856606 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
1066*d30ea906Sjfb8856606 {
1067*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
1068*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1069*d30ea906Sjfb8856606 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1070*d30ea906Sjfb8856606 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1071*d30ea906Sjfb8856606 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1072*d30ea906Sjfb8856606 	return dev->dev_ops->queue_intr_disable(dev, queue_id);
1073*d30ea906Sjfb8856606 }
1074*d30ea906Sjfb8856606 
1075*d30ea906Sjfb8856606 int __rte_experimental
1076*d30ea906Sjfb8856606 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1077*d30ea906Sjfb8856606 		void *data)
1078*d30ea906Sjfb8856606 {
1079*d30ea906Sjfb8856606 	uint32_t vec;
1080*d30ea906Sjfb8856606 	struct rte_bbdev *dev = get_dev(dev_id);
1081*d30ea906Sjfb8856606 	struct rte_intr_handle *intr_handle;
1082*d30ea906Sjfb8856606 	int ret;
1083*d30ea906Sjfb8856606 
1084*d30ea906Sjfb8856606 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1085*d30ea906Sjfb8856606 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1086*d30ea906Sjfb8856606 
1087*d30ea906Sjfb8856606 	intr_handle = dev->intr_handle;
1088*d30ea906Sjfb8856606 	if (!intr_handle || !intr_handle->intr_vec) {
1089*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
1090*d30ea906Sjfb8856606 		return -ENOTSUP;
1091*d30ea906Sjfb8856606 	}
1092*d30ea906Sjfb8856606 
1093*d30ea906Sjfb8856606 	if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
1094*d30ea906Sjfb8856606 		rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
1095*d30ea906Sjfb8856606 				dev_id, queue_id);
1096*d30ea906Sjfb8856606 		return -ENOTSUP;
1097*d30ea906Sjfb8856606 	}
1098*d30ea906Sjfb8856606 
1099*d30ea906Sjfb8856606 	vec = intr_handle->intr_vec[queue_id];
1100*d30ea906Sjfb8856606 	ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
1101*d30ea906Sjfb8856606 	if (ret && (ret != -EEXIST)) {
1102*d30ea906Sjfb8856606 		rte_bbdev_log(ERR,
1103*d30ea906Sjfb8856606 				"dev %u q %u int ctl error op %d epfd %d vec %u\n",
1104*d30ea906Sjfb8856606 				dev_id, queue_id, op, epfd, vec);
1105*d30ea906Sjfb8856606 		return ret;
1106*d30ea906Sjfb8856606 	}
1107*d30ea906Sjfb8856606 
1108*d30ea906Sjfb8856606 	return 0;
1109*d30ea906Sjfb8856606 }
1110*d30ea906Sjfb8856606 
1111*d30ea906Sjfb8856606 
1112*d30ea906Sjfb8856606 const char * __rte_experimental
1113*d30ea906Sjfb8856606 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
1114*d30ea906Sjfb8856606 {
1115*d30ea906Sjfb8856606 	static const char * const op_types[] = {
1116*d30ea906Sjfb8856606 		"RTE_BBDEV_OP_NONE",
1117*d30ea906Sjfb8856606 		"RTE_BBDEV_OP_TURBO_DEC",
1118*d30ea906Sjfb8856606 		"RTE_BBDEV_OP_TURBO_ENC",
1119*d30ea906Sjfb8856606 	};
1120*d30ea906Sjfb8856606 
1121*d30ea906Sjfb8856606 	if (op_type < RTE_BBDEV_OP_TYPE_COUNT)
1122*d30ea906Sjfb8856606 		return op_types[op_type];
1123*d30ea906Sjfb8856606 
1124*d30ea906Sjfb8856606 	rte_bbdev_log(ERR, "Invalid operation type");
1125*d30ea906Sjfb8856606 	return NULL;
1126*d30ea906Sjfb8856606 }
1127*d30ea906Sjfb8856606 
1128*d30ea906Sjfb8856606 RTE_INIT(rte_bbdev_init_log)
1129*d30ea906Sjfb8856606 {
1130*d30ea906Sjfb8856606 	bbdev_logtype = rte_log_register("lib.bbdev");
1131*d30ea906Sjfb8856606 	if (bbdev_logtype >= 0)
1132*d30ea906Sjfb8856606 		rte_log_set_level(bbdev_logtype, RTE_LOG_NOTICE);
1133*d30ea906Sjfb8856606 }
1134