xref: /f-stack/dpdk/lib/librte_bbdev/rte_bbdev.h (revision d30ea906)
1*d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*d30ea906Sjfb8856606  * Copyright(c) 2017 Intel Corporation
3*d30ea906Sjfb8856606  */
4*d30ea906Sjfb8856606 
5*d30ea906Sjfb8856606 #ifndef _RTE_BBDEV_H_
6*d30ea906Sjfb8856606 #define _RTE_BBDEV_H_
7*d30ea906Sjfb8856606 
8*d30ea906Sjfb8856606 /**
9*d30ea906Sjfb8856606  * @file rte_bbdev.h
10*d30ea906Sjfb8856606  *
11*d30ea906Sjfb8856606  * Wireless base band device abstraction APIs.
12*d30ea906Sjfb8856606  *
13*d30ea906Sjfb8856606  * @warning
14*d30ea906Sjfb8856606  * @b EXPERIMENTAL: this API may change without prior notice
15*d30ea906Sjfb8856606  *
16*d30ea906Sjfb8856606  * This API allows an application to discover, configure and use a device to
17*d30ea906Sjfb8856606  * process operations. An asynchronous API (enqueue, followed by later dequeue)
18*d30ea906Sjfb8856606  * is used for processing operations.
19*d30ea906Sjfb8856606  *
20*d30ea906Sjfb8856606  * The functions in this API are not thread-safe when called on the same
21*d30ea906Sjfb8856606  * target object (a device, or a queue on a device), with the exception that
22*d30ea906Sjfb8856606  * one thread can enqueue operations to a queue while another thread dequeues
23*d30ea906Sjfb8856606  * from the same queue.
24*d30ea906Sjfb8856606  */
25*d30ea906Sjfb8856606 
26*d30ea906Sjfb8856606 #ifdef __cplusplus
27*d30ea906Sjfb8856606 extern "C" {
28*d30ea906Sjfb8856606 #endif
29*d30ea906Sjfb8856606 
30*d30ea906Sjfb8856606 #include <stdint.h>
31*d30ea906Sjfb8856606 #include <stdbool.h>
32*d30ea906Sjfb8856606 #include <string.h>
33*d30ea906Sjfb8856606 
34*d30ea906Sjfb8856606 #include <rte_compat.h>
35*d30ea906Sjfb8856606 #include <rte_atomic.h>
36*d30ea906Sjfb8856606 #include <rte_bus.h>
37*d30ea906Sjfb8856606 #include <rte_cpuflags.h>
38*d30ea906Sjfb8856606 #include <rte_memory.h>
39*d30ea906Sjfb8856606 
40*d30ea906Sjfb8856606 #include "rte_bbdev_op.h"
41*d30ea906Sjfb8856606 
42*d30ea906Sjfb8856606 #ifndef RTE_BBDEV_MAX_DEVS
43*d30ea906Sjfb8856606 #define RTE_BBDEV_MAX_DEVS 128  /**< Max number of devices */
44*d30ea906Sjfb8856606 #endif
45*d30ea906Sjfb8856606 
46*d30ea906Sjfb8856606 /** Flags indiciate current state of BBDEV device */
47*d30ea906Sjfb8856606 enum rte_bbdev_state {
48*d30ea906Sjfb8856606 	RTE_BBDEV_UNUSED,
49*d30ea906Sjfb8856606 	RTE_BBDEV_INITIALIZED
50*d30ea906Sjfb8856606 };
51*d30ea906Sjfb8856606 
52*d30ea906Sjfb8856606 /**
53*d30ea906Sjfb8856606  * Get the total number of devices that have been successfully initialised.
54*d30ea906Sjfb8856606  *
55*d30ea906Sjfb8856606  * @return
56*d30ea906Sjfb8856606  *   The total number of usable devices.
57*d30ea906Sjfb8856606  */
58*d30ea906Sjfb8856606 uint16_t __rte_experimental
59*d30ea906Sjfb8856606 rte_bbdev_count(void);
60*d30ea906Sjfb8856606 
61*d30ea906Sjfb8856606 /**
62*d30ea906Sjfb8856606  * Check if a device is valid.
63*d30ea906Sjfb8856606  *
64*d30ea906Sjfb8856606  * @param dev_id
65*d30ea906Sjfb8856606  *   The identifier of the device.
66*d30ea906Sjfb8856606  *
67*d30ea906Sjfb8856606  * @return
68*d30ea906Sjfb8856606  *   true if device ID is valid and device is attached, false otherwise.
69*d30ea906Sjfb8856606  */
70*d30ea906Sjfb8856606 bool __rte_experimental
71*d30ea906Sjfb8856606 rte_bbdev_is_valid(uint16_t dev_id);
72*d30ea906Sjfb8856606 
73*d30ea906Sjfb8856606 /**
74*d30ea906Sjfb8856606  * Get the next enabled device.
75*d30ea906Sjfb8856606  *
76*d30ea906Sjfb8856606  * @param dev_id
77*d30ea906Sjfb8856606  *   The current device
78*d30ea906Sjfb8856606  *
79*d30ea906Sjfb8856606  * @return
80*d30ea906Sjfb8856606  *   - The next device, or
81*d30ea906Sjfb8856606  *   - RTE_BBDEV_MAX_DEVS if none found
82*d30ea906Sjfb8856606  */
83*d30ea906Sjfb8856606 uint16_t __rte_experimental
84*d30ea906Sjfb8856606 rte_bbdev_find_next(uint16_t dev_id);
85*d30ea906Sjfb8856606 
86*d30ea906Sjfb8856606 /** Iterate through all enabled devices */
87*d30ea906Sjfb8856606 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
88*d30ea906Sjfb8856606 		i < RTE_BBDEV_MAX_DEVS; \
89*d30ea906Sjfb8856606 		i = rte_bbdev_find_next(i))
90*d30ea906Sjfb8856606 
91*d30ea906Sjfb8856606 /**
92*d30ea906Sjfb8856606  * Setup up device queues.
93*d30ea906Sjfb8856606  * This function must be called on a device before setting up the queues and
94*d30ea906Sjfb8856606  * starting the device. It can also be called when a device is in the stopped
95*d30ea906Sjfb8856606  * state. If any device queues have been configured their configuration will be
96*d30ea906Sjfb8856606  * cleared by a call to this function.
97*d30ea906Sjfb8856606  *
98*d30ea906Sjfb8856606  * @param dev_id
99*d30ea906Sjfb8856606  *   The identifier of the device.
100*d30ea906Sjfb8856606  * @param num_queues
101*d30ea906Sjfb8856606  *   Number of queues to configure on device.
102*d30ea906Sjfb8856606  * @param socket_id
103*d30ea906Sjfb8856606  *   ID of a socket which will be used to allocate memory.
104*d30ea906Sjfb8856606  *
105*d30ea906Sjfb8856606  * @return
106*d30ea906Sjfb8856606  *   - 0 on success
107*d30ea906Sjfb8856606  *   - -ENODEV if dev_id is invalid or the device is corrupted
108*d30ea906Sjfb8856606  *   - -EINVAL if num_queues is invalid, 0 or greater than maximum
109*d30ea906Sjfb8856606  *   - -EBUSY if the identified device has already started
110*d30ea906Sjfb8856606  *   - -ENOMEM if unable to allocate memory
111*d30ea906Sjfb8856606  */
112*d30ea906Sjfb8856606 int __rte_experimental
113*d30ea906Sjfb8856606 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
114*d30ea906Sjfb8856606 
115*d30ea906Sjfb8856606 /**
116*d30ea906Sjfb8856606  * Enable interrupts.
117*d30ea906Sjfb8856606  * This function may be called before starting the device to enable the
118*d30ea906Sjfb8856606  * interrupts if they are available.
119*d30ea906Sjfb8856606  *
120*d30ea906Sjfb8856606  * @param dev_id
121*d30ea906Sjfb8856606  *   The identifier of the device.
122*d30ea906Sjfb8856606  *
123*d30ea906Sjfb8856606  * @return
124*d30ea906Sjfb8856606  *   - 0 on success
125*d30ea906Sjfb8856606  *   - -ENODEV if dev_id is invalid or the device is corrupted
126*d30ea906Sjfb8856606  *   - -EBUSY if the identified device has already started
127*d30ea906Sjfb8856606  *   - -ENOTSUP if the interrupts are not supported by the device
128*d30ea906Sjfb8856606  */
129*d30ea906Sjfb8856606 int __rte_experimental
130*d30ea906Sjfb8856606 rte_bbdev_intr_enable(uint16_t dev_id);
131*d30ea906Sjfb8856606 
132*d30ea906Sjfb8856606 /** Device queue configuration structure */
133*d30ea906Sjfb8856606 struct rte_bbdev_queue_conf {
134*d30ea906Sjfb8856606 	int socket;  /**< NUMA socket used for memory allocation */
135*d30ea906Sjfb8856606 	uint32_t queue_size;  /**< Size of queue */
136*d30ea906Sjfb8856606 	uint8_t priority;  /**< Queue priority */
137*d30ea906Sjfb8856606 	bool deferred_start; /**< Do not start queue when device is started. */
138*d30ea906Sjfb8856606 	enum rte_bbdev_op_type op_type; /**< Operation type */
139*d30ea906Sjfb8856606 };
140*d30ea906Sjfb8856606 
141*d30ea906Sjfb8856606 /**
142*d30ea906Sjfb8856606  * Configure a queue on a device.
143*d30ea906Sjfb8856606  * This function can be called after device configuration, and before starting.
144*d30ea906Sjfb8856606  * It can also be called when the device or the queue is in the stopped state.
145*d30ea906Sjfb8856606  *
146*d30ea906Sjfb8856606  * @param dev_id
147*d30ea906Sjfb8856606  *   The identifier of the device.
148*d30ea906Sjfb8856606  * @param queue_id
149*d30ea906Sjfb8856606  *   The index of the queue.
150*d30ea906Sjfb8856606  * @param conf
151*d30ea906Sjfb8856606  *   The queue configuration. If NULL, a default configuration will be used.
152*d30ea906Sjfb8856606  *
153*d30ea906Sjfb8856606  * @return
154*d30ea906Sjfb8856606  *   - 0 on success
155*d30ea906Sjfb8856606  *   - EINVAL if the identified queue size or priority are invalid
156*d30ea906Sjfb8856606  *   - EBUSY if the identified queue or its device have already started
157*d30ea906Sjfb8856606  */
158*d30ea906Sjfb8856606 int __rte_experimental
159*d30ea906Sjfb8856606 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
160*d30ea906Sjfb8856606 		const struct rte_bbdev_queue_conf *conf);
161*d30ea906Sjfb8856606 
162*d30ea906Sjfb8856606 /**
163*d30ea906Sjfb8856606  * Start a device.
164*d30ea906Sjfb8856606  * This is the last step needed before enqueueing operations is possible.
165*d30ea906Sjfb8856606  *
166*d30ea906Sjfb8856606  * @param dev_id
167*d30ea906Sjfb8856606  *   The identifier of the device.
168*d30ea906Sjfb8856606  *
169*d30ea906Sjfb8856606  * @return
170*d30ea906Sjfb8856606  *   - 0 on success
171*d30ea906Sjfb8856606  *   - negative value on failure - as returned from PMD driver
172*d30ea906Sjfb8856606  */
173*d30ea906Sjfb8856606 int __rte_experimental
174*d30ea906Sjfb8856606 rte_bbdev_start(uint16_t dev_id);
175*d30ea906Sjfb8856606 
176*d30ea906Sjfb8856606 /**
177*d30ea906Sjfb8856606  * Stop a device.
178*d30ea906Sjfb8856606  * The device can be reconfigured, and restarted after being stopped.
179*d30ea906Sjfb8856606  *
180*d30ea906Sjfb8856606  * @param dev_id
181*d30ea906Sjfb8856606  *   The identifier of the device.
182*d30ea906Sjfb8856606  *
183*d30ea906Sjfb8856606  * @return
184*d30ea906Sjfb8856606  *   - 0 on success
185*d30ea906Sjfb8856606  */
186*d30ea906Sjfb8856606 int __rte_experimental
187*d30ea906Sjfb8856606 rte_bbdev_stop(uint16_t dev_id);
188*d30ea906Sjfb8856606 
189*d30ea906Sjfb8856606 /**
190*d30ea906Sjfb8856606  * Close a device.
191*d30ea906Sjfb8856606  * The device cannot be restarted without reconfiguration!
192*d30ea906Sjfb8856606  *
193*d30ea906Sjfb8856606  * @param dev_id
194*d30ea906Sjfb8856606  *   The identifier of the device.
195*d30ea906Sjfb8856606  *
196*d30ea906Sjfb8856606  * @return
197*d30ea906Sjfb8856606  *   - 0 on success
198*d30ea906Sjfb8856606  */
199*d30ea906Sjfb8856606 int __rte_experimental
200*d30ea906Sjfb8856606 rte_bbdev_close(uint16_t dev_id);
201*d30ea906Sjfb8856606 
202*d30ea906Sjfb8856606 /**
203*d30ea906Sjfb8856606  * Start a specified queue on a device.
204*d30ea906Sjfb8856606  * This is only needed if the queue has been stopped, or if the deferred_start
205*d30ea906Sjfb8856606  * flag has been set when configuring the queue.
206*d30ea906Sjfb8856606  *
207*d30ea906Sjfb8856606  * @param dev_id
208*d30ea906Sjfb8856606  *   The identifier of the device.
209*d30ea906Sjfb8856606  * @param queue_id
210*d30ea906Sjfb8856606  *   The index of the queue.
211*d30ea906Sjfb8856606  *
212*d30ea906Sjfb8856606  * @return
213*d30ea906Sjfb8856606  *   - 0 on success
214*d30ea906Sjfb8856606  *   - negative value on failure - as returned from PMD driver
215*d30ea906Sjfb8856606  */
216*d30ea906Sjfb8856606 int __rte_experimental
217*d30ea906Sjfb8856606 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
218*d30ea906Sjfb8856606 
219*d30ea906Sjfb8856606 /**
220*d30ea906Sjfb8856606  * Stop a specified queue on a device, to allow re configuration.
221*d30ea906Sjfb8856606  *
222*d30ea906Sjfb8856606  * @param dev_id
223*d30ea906Sjfb8856606  *   The identifier of the device.
224*d30ea906Sjfb8856606  * @param queue_id
225*d30ea906Sjfb8856606  *   The index of the queue.
226*d30ea906Sjfb8856606  *
227*d30ea906Sjfb8856606  * @return
228*d30ea906Sjfb8856606  *   - 0 on success
229*d30ea906Sjfb8856606  *   - negative value on failure - as returned from PMD driver
230*d30ea906Sjfb8856606  */
231*d30ea906Sjfb8856606 int __rte_experimental
232*d30ea906Sjfb8856606 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
233*d30ea906Sjfb8856606 
234*d30ea906Sjfb8856606 /** Device statistics. */
235*d30ea906Sjfb8856606 struct rte_bbdev_stats {
236*d30ea906Sjfb8856606 	uint64_t enqueued_count;  /**< Count of all operations enqueued */
237*d30ea906Sjfb8856606 	uint64_t dequeued_count;  /**< Count of all operations dequeued */
238*d30ea906Sjfb8856606 	/** Total error count on operations enqueued */
239*d30ea906Sjfb8856606 	uint64_t enqueue_err_count;
240*d30ea906Sjfb8856606 	/** Total error count on operations dequeued */
241*d30ea906Sjfb8856606 	uint64_t dequeue_err_count;
242*d30ea906Sjfb8856606 	/** Offload time */
243*d30ea906Sjfb8856606 	uint64_t offload_time;
244*d30ea906Sjfb8856606 };
245*d30ea906Sjfb8856606 
246*d30ea906Sjfb8856606 /**
247*d30ea906Sjfb8856606  * Retrieve the general I/O statistics of a device.
248*d30ea906Sjfb8856606  *
249*d30ea906Sjfb8856606  * @param dev_id
250*d30ea906Sjfb8856606  *   The identifier of the device.
251*d30ea906Sjfb8856606  * @param stats
252*d30ea906Sjfb8856606  *   Pointer to structure to where statistics will be copied. On error, this
253*d30ea906Sjfb8856606  *   location may or may not have been modified.
254*d30ea906Sjfb8856606  *
255*d30ea906Sjfb8856606  * @return
256*d30ea906Sjfb8856606  *   - 0 on success
257*d30ea906Sjfb8856606  *   - EINVAL if invalid parameter pointer is provided
258*d30ea906Sjfb8856606  */
259*d30ea906Sjfb8856606 int __rte_experimental
260*d30ea906Sjfb8856606 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
261*d30ea906Sjfb8856606 
262*d30ea906Sjfb8856606 /**
263*d30ea906Sjfb8856606  * Reset the statistics of a device.
264*d30ea906Sjfb8856606  *
265*d30ea906Sjfb8856606  * @param dev_id
266*d30ea906Sjfb8856606  *   The identifier of the device.
267*d30ea906Sjfb8856606  * @return
268*d30ea906Sjfb8856606  *   - 0 on success
269*d30ea906Sjfb8856606  */
270*d30ea906Sjfb8856606 int __rte_experimental
271*d30ea906Sjfb8856606 rte_bbdev_stats_reset(uint16_t dev_id);
272*d30ea906Sjfb8856606 
273*d30ea906Sjfb8856606 /** Device information supplied by the device's driver */
274*d30ea906Sjfb8856606 struct rte_bbdev_driver_info {
275*d30ea906Sjfb8856606 	/** Driver name */
276*d30ea906Sjfb8856606 	const char *driver_name;
277*d30ea906Sjfb8856606 
278*d30ea906Sjfb8856606 	/** Maximum number of queues supported by the device */
279*d30ea906Sjfb8856606 	unsigned int max_num_queues;
280*d30ea906Sjfb8856606 	/** Queue size limit (queue size must also be power of 2) */
281*d30ea906Sjfb8856606 	uint32_t queue_size_lim;
282*d30ea906Sjfb8856606 	/** Set if device off-loads operation to hardware  */
283*d30ea906Sjfb8856606 	bool hardware_accelerated;
284*d30ea906Sjfb8856606 	/** Max value supported by queue priority for DL */
285*d30ea906Sjfb8856606 	uint8_t max_dl_queue_priority;
286*d30ea906Sjfb8856606 	/** Max value supported by queue priority for UL */
287*d30ea906Sjfb8856606 	uint8_t max_ul_queue_priority;
288*d30ea906Sjfb8856606 	/** Set if device supports per-queue interrupts */
289*d30ea906Sjfb8856606 	bool queue_intr_supported;
290*d30ea906Sjfb8856606 	/** Minimum alignment of buffers, in bytes */
291*d30ea906Sjfb8856606 	uint16_t min_alignment;
292*d30ea906Sjfb8856606 	/** Default queue configuration used if none is supplied  */
293*d30ea906Sjfb8856606 	struct rte_bbdev_queue_conf default_queue_conf;
294*d30ea906Sjfb8856606 	/** Device operation capabilities */
295*d30ea906Sjfb8856606 	const struct rte_bbdev_op_cap *capabilities;
296*d30ea906Sjfb8856606 	/** Device cpu_flag requirements */
297*d30ea906Sjfb8856606 	const enum rte_cpu_flag_t *cpu_flag_reqs;
298*d30ea906Sjfb8856606 };
299*d30ea906Sjfb8856606 
300*d30ea906Sjfb8856606 /** Macro used at end of bbdev PMD list */
301*d30ea906Sjfb8856606 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
302*d30ea906Sjfb8856606 	{ RTE_BBDEV_OP_NONE }
303*d30ea906Sjfb8856606 
304*d30ea906Sjfb8856606 /**
305*d30ea906Sjfb8856606  * Device information structure used by an application to discover a devices
306*d30ea906Sjfb8856606  * capabilities and current configuration
307*d30ea906Sjfb8856606  */
308*d30ea906Sjfb8856606 struct rte_bbdev_info {
309*d30ea906Sjfb8856606 	int socket_id;  /**< NUMA socket that device is on */
310*d30ea906Sjfb8856606 	const char *dev_name;  /**< Unique device name */
311*d30ea906Sjfb8856606 	const struct rte_bus *bus;  /**< Bus information */
312*d30ea906Sjfb8856606 	uint16_t num_queues;  /**< Number of queues currently configured */
313*d30ea906Sjfb8856606 	bool started;  /**< Set if device is currently started */
314*d30ea906Sjfb8856606 	struct rte_bbdev_driver_info drv;  /**< Info from device driver */
315*d30ea906Sjfb8856606 };
316*d30ea906Sjfb8856606 
317*d30ea906Sjfb8856606 /**
318*d30ea906Sjfb8856606  * Retrieve information about a device.
319*d30ea906Sjfb8856606  *
320*d30ea906Sjfb8856606  * @param dev_id
321*d30ea906Sjfb8856606  *   The identifier of the device.
322*d30ea906Sjfb8856606  * @param dev_info
323*d30ea906Sjfb8856606  *   Pointer to structure to where information will be copied. On error, this
324*d30ea906Sjfb8856606  *   location may or may not have been modified.
325*d30ea906Sjfb8856606  *
326*d30ea906Sjfb8856606  * @return
327*d30ea906Sjfb8856606  *   - 0 on success
328*d30ea906Sjfb8856606  *   - EINVAL if invalid parameter pointer is provided
329*d30ea906Sjfb8856606  */
330*d30ea906Sjfb8856606 int __rte_experimental
331*d30ea906Sjfb8856606 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
332*d30ea906Sjfb8856606 
333*d30ea906Sjfb8856606 /** Queue information */
334*d30ea906Sjfb8856606 struct rte_bbdev_queue_info {
335*d30ea906Sjfb8856606 	/** Current device configuration */
336*d30ea906Sjfb8856606 	struct rte_bbdev_queue_conf conf;
337*d30ea906Sjfb8856606 	/** Set if queue is currently started */
338*d30ea906Sjfb8856606 	bool started;
339*d30ea906Sjfb8856606 };
340*d30ea906Sjfb8856606 
341*d30ea906Sjfb8856606 /**
342*d30ea906Sjfb8856606  * Retrieve information about a specific queue on a device.
343*d30ea906Sjfb8856606  *
344*d30ea906Sjfb8856606  * @param dev_id
345*d30ea906Sjfb8856606  *   The identifier of the device.
346*d30ea906Sjfb8856606  * @param queue_id
347*d30ea906Sjfb8856606  *   The index of the queue.
348*d30ea906Sjfb8856606  * @param queue_info
349*d30ea906Sjfb8856606  *   Pointer to structure to where information will be copied. On error, this
350*d30ea906Sjfb8856606  *   location may or may not have been modified.
351*d30ea906Sjfb8856606  *
352*d30ea906Sjfb8856606  * @return
353*d30ea906Sjfb8856606  *   - 0 on success
354*d30ea906Sjfb8856606  *   - EINVAL if invalid parameter pointer is provided
355*d30ea906Sjfb8856606  */
356*d30ea906Sjfb8856606 int __rte_experimental
357*d30ea906Sjfb8856606 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
358*d30ea906Sjfb8856606 		struct rte_bbdev_queue_info *queue_info);
359*d30ea906Sjfb8856606 
360*d30ea906Sjfb8856606 /** @internal The data structure associated with each queue of a device. */
361*d30ea906Sjfb8856606 struct rte_bbdev_queue_data {
362*d30ea906Sjfb8856606 	void *queue_private;  /**< Driver-specific per-queue data */
363*d30ea906Sjfb8856606 	struct rte_bbdev_queue_conf conf;  /**< Current configuration */
364*d30ea906Sjfb8856606 	struct rte_bbdev_stats queue_stats;  /**< Queue statistics */
365*d30ea906Sjfb8856606 	bool started;  /**< Queue state */
366*d30ea906Sjfb8856606 };
367*d30ea906Sjfb8856606 
368*d30ea906Sjfb8856606 /** @internal Enqueue encode operations for processing on queue of a device. */
369*d30ea906Sjfb8856606 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
370*d30ea906Sjfb8856606 		struct rte_bbdev_queue_data *q_data,
371*d30ea906Sjfb8856606 		struct rte_bbdev_enc_op **ops,
372*d30ea906Sjfb8856606 		uint16_t num);
373*d30ea906Sjfb8856606 
374*d30ea906Sjfb8856606 /** @internal Enqueue decode operations for processing on queue of a device. */
375*d30ea906Sjfb8856606 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
376*d30ea906Sjfb8856606 		struct rte_bbdev_queue_data *q_data,
377*d30ea906Sjfb8856606 		struct rte_bbdev_dec_op **ops,
378*d30ea906Sjfb8856606 		uint16_t num);
379*d30ea906Sjfb8856606 
380*d30ea906Sjfb8856606 /** @internal Dequeue encode operations from a queue of a device. */
381*d30ea906Sjfb8856606 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
382*d30ea906Sjfb8856606 		struct rte_bbdev_queue_data *q_data,
383*d30ea906Sjfb8856606 		struct rte_bbdev_enc_op **ops, uint16_t num);
384*d30ea906Sjfb8856606 
385*d30ea906Sjfb8856606 /** @internal Dequeue decode operations from a queue of a device. */
386*d30ea906Sjfb8856606 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
387*d30ea906Sjfb8856606 		struct rte_bbdev_queue_data *q_data,
388*d30ea906Sjfb8856606 		struct rte_bbdev_dec_op **ops, uint16_t num);
389*d30ea906Sjfb8856606 
390*d30ea906Sjfb8856606 #define RTE_BBDEV_NAME_MAX_LEN  64  /**< Max length of device name */
391*d30ea906Sjfb8856606 
392*d30ea906Sjfb8856606 /**
393*d30ea906Sjfb8856606  * @internal The data associated with a device, with no function pointers.
394*d30ea906Sjfb8856606  * This structure is safe to place in shared memory to be common among
395*d30ea906Sjfb8856606  * different processes in a multi-process configuration. Drivers can access
396*d30ea906Sjfb8856606  * these fields, but should never write to them!
397*d30ea906Sjfb8856606  */
398*d30ea906Sjfb8856606 struct rte_bbdev_data {
399*d30ea906Sjfb8856606 	char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
400*d30ea906Sjfb8856606 	void *dev_private;  /**< Driver-specific private data */
401*d30ea906Sjfb8856606 	uint16_t num_queues;  /**< Number of currently configured queues */
402*d30ea906Sjfb8856606 	struct rte_bbdev_queue_data *queues;  /**< Queue structures */
403*d30ea906Sjfb8856606 	uint16_t dev_id;  /**< Device ID */
404*d30ea906Sjfb8856606 	int socket_id;  /**< NUMA socket that device is on */
405*d30ea906Sjfb8856606 	bool started;  /**< Device run-time state */
406*d30ea906Sjfb8856606 	/** Counter of processes using the device */
407*d30ea906Sjfb8856606 	rte_atomic16_t process_cnt;
408*d30ea906Sjfb8856606 };
409*d30ea906Sjfb8856606 
410*d30ea906Sjfb8856606 /* Forward declarations */
411*d30ea906Sjfb8856606 struct rte_bbdev_ops;
412*d30ea906Sjfb8856606 struct rte_bbdev_callback;
413*d30ea906Sjfb8856606 struct rte_intr_handle;
414*d30ea906Sjfb8856606 
415*d30ea906Sjfb8856606 /** Structure to keep track of registered callbacks */
416*d30ea906Sjfb8856606 TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
417*d30ea906Sjfb8856606 
418*d30ea906Sjfb8856606 /**
419*d30ea906Sjfb8856606  * @internal The data structure associated with a device. Drivers can access
420*d30ea906Sjfb8856606  * these fields, but should only write to the *_ops fields.
421*d30ea906Sjfb8856606  */
422*d30ea906Sjfb8856606 struct __rte_cache_aligned rte_bbdev {
423*d30ea906Sjfb8856606 	/**< Enqueue encode function */
424*d30ea906Sjfb8856606 	rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
425*d30ea906Sjfb8856606 	/**< Enqueue decode function */
426*d30ea906Sjfb8856606 	rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
427*d30ea906Sjfb8856606 	/**< Dequeue encode function */
428*d30ea906Sjfb8856606 	rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
429*d30ea906Sjfb8856606 	/**< Dequeue decode function */
430*d30ea906Sjfb8856606 	rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
431*d30ea906Sjfb8856606 	const struct rte_bbdev_ops *dev_ops;  /**< Functions exported by PMD */
432*d30ea906Sjfb8856606 	struct rte_bbdev_data *data;  /**< Pointer to device data */
433*d30ea906Sjfb8856606 	enum rte_bbdev_state state;  /**< If device is currently used or not */
434*d30ea906Sjfb8856606 	struct rte_device *device; /**< Backing device */
435*d30ea906Sjfb8856606 	/** User application callback for interrupts if present */
436*d30ea906Sjfb8856606 	struct rte_bbdev_cb_list list_cbs;
437*d30ea906Sjfb8856606 	struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
438*d30ea906Sjfb8856606 };
439*d30ea906Sjfb8856606 
440*d30ea906Sjfb8856606 /** @internal array of all devices */
441*d30ea906Sjfb8856606 extern struct rte_bbdev rte_bbdev_devices[];
442*d30ea906Sjfb8856606 
443*d30ea906Sjfb8856606 /**
444*d30ea906Sjfb8856606  * Enqueue a burst of processed encode operations to a queue of the device.
445*d30ea906Sjfb8856606  * This functions only enqueues as many operations as currently possible and
446*d30ea906Sjfb8856606  * does not block until @p num_ops entries in the queue are available.
447*d30ea906Sjfb8856606  * This function does not provide any error notification to avoid the
448*d30ea906Sjfb8856606  * corresponding overhead.
449*d30ea906Sjfb8856606  *
450*d30ea906Sjfb8856606  * @param dev_id
451*d30ea906Sjfb8856606  *   The identifier of the device.
452*d30ea906Sjfb8856606  * @param queue_id
453*d30ea906Sjfb8856606  *   The index of the queue.
454*d30ea906Sjfb8856606  * @param ops
455*d30ea906Sjfb8856606  *   Pointer array containing operations to be enqueued Must have at least
456*d30ea906Sjfb8856606  *   @p num_ops entries
457*d30ea906Sjfb8856606  * @param num_ops
458*d30ea906Sjfb8856606  *   The maximum number of operations to enqueue.
459*d30ea906Sjfb8856606  *
460*d30ea906Sjfb8856606  * @return
461*d30ea906Sjfb8856606  *   The number of operations actually enqueued (this is the number of processed
462*d30ea906Sjfb8856606  *   entries in the @p ops array).
463*d30ea906Sjfb8856606  */
464*d30ea906Sjfb8856606 static inline uint16_t
465*d30ea906Sjfb8856606 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
466*d30ea906Sjfb8856606 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
467*d30ea906Sjfb8856606 {
468*d30ea906Sjfb8856606 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
469*d30ea906Sjfb8856606 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
470*d30ea906Sjfb8856606 	return dev->enqueue_enc_ops(q_data, ops, num_ops);
471*d30ea906Sjfb8856606 }
472*d30ea906Sjfb8856606 
473*d30ea906Sjfb8856606 /**
474*d30ea906Sjfb8856606  * Enqueue a burst of processed decode operations to a queue of the device.
475*d30ea906Sjfb8856606  * This functions only enqueues as many operations as currently possible and
476*d30ea906Sjfb8856606  * does not block until @p num_ops entries in the queue are available.
477*d30ea906Sjfb8856606  * This function does not provide any error notification to avoid the
478*d30ea906Sjfb8856606  * corresponding overhead.
479*d30ea906Sjfb8856606  *
480*d30ea906Sjfb8856606  * @param dev_id
481*d30ea906Sjfb8856606  *   The identifier of the device.
482*d30ea906Sjfb8856606  * @param queue_id
483*d30ea906Sjfb8856606  *   The index of the queue.
484*d30ea906Sjfb8856606  * @param ops
485*d30ea906Sjfb8856606  *   Pointer array containing operations to be enqueued Must have at least
486*d30ea906Sjfb8856606  *   @p num_ops entries
487*d30ea906Sjfb8856606  * @param num_ops
488*d30ea906Sjfb8856606  *   The maximum number of operations to enqueue.
489*d30ea906Sjfb8856606  *
490*d30ea906Sjfb8856606  * @return
491*d30ea906Sjfb8856606  *   The number of operations actually enqueued (this is the number of processed
492*d30ea906Sjfb8856606  *   entries in the @p ops array).
493*d30ea906Sjfb8856606  */
494*d30ea906Sjfb8856606 static inline uint16_t
495*d30ea906Sjfb8856606 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
496*d30ea906Sjfb8856606 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
497*d30ea906Sjfb8856606 {
498*d30ea906Sjfb8856606 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
499*d30ea906Sjfb8856606 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
500*d30ea906Sjfb8856606 	return dev->enqueue_dec_ops(q_data, ops, num_ops);
501*d30ea906Sjfb8856606 }
502*d30ea906Sjfb8856606 
503*d30ea906Sjfb8856606 /**
504*d30ea906Sjfb8856606  * Dequeue a burst of processed encode operations from a queue of the device.
505*d30ea906Sjfb8856606  * This functions returns only the current contents of the queue, and does not
506*d30ea906Sjfb8856606  * block until @ num_ops is available.
507*d30ea906Sjfb8856606  * This function does not provide any error notification to avoid the
508*d30ea906Sjfb8856606  * corresponding overhead.
509*d30ea906Sjfb8856606  *
510*d30ea906Sjfb8856606  * @param dev_id
511*d30ea906Sjfb8856606  *   The identifier of the device.
512*d30ea906Sjfb8856606  * @param queue_id
513*d30ea906Sjfb8856606  *   The index of the queue.
514*d30ea906Sjfb8856606  * @param ops
515*d30ea906Sjfb8856606  *   Pointer array where operations will be dequeued to. Must have at least
516*d30ea906Sjfb8856606  *   @p num_ops entries
517*d30ea906Sjfb8856606  * @param num_ops
518*d30ea906Sjfb8856606  *   The maximum number of operations to dequeue.
519*d30ea906Sjfb8856606  *
520*d30ea906Sjfb8856606  * @return
521*d30ea906Sjfb8856606  *   The number of operations actually dequeued (this is the number of entries
522*d30ea906Sjfb8856606  *   copied into the @p ops array).
523*d30ea906Sjfb8856606  */
524*d30ea906Sjfb8856606 static inline uint16_t
525*d30ea906Sjfb8856606 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
526*d30ea906Sjfb8856606 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
527*d30ea906Sjfb8856606 {
528*d30ea906Sjfb8856606 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
529*d30ea906Sjfb8856606 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
530*d30ea906Sjfb8856606 	return dev->dequeue_enc_ops(q_data, ops, num_ops);
531*d30ea906Sjfb8856606 }
532*d30ea906Sjfb8856606 
533*d30ea906Sjfb8856606 /**
534*d30ea906Sjfb8856606  * Dequeue a burst of processed decode operations from a queue of the device.
535*d30ea906Sjfb8856606  * This functions returns only the current contents of the queue, and does not
536*d30ea906Sjfb8856606  * block until @ num_ops is available.
537*d30ea906Sjfb8856606  * This function does not provide any error notification to avoid the
538*d30ea906Sjfb8856606  * corresponding overhead.
539*d30ea906Sjfb8856606  *
540*d30ea906Sjfb8856606  * @param dev_id
541*d30ea906Sjfb8856606  *   The identifier of the device.
542*d30ea906Sjfb8856606  * @param queue_id
543*d30ea906Sjfb8856606  *   The index of the queue.
544*d30ea906Sjfb8856606  * @param ops
545*d30ea906Sjfb8856606  *   Pointer array where operations will be dequeued to. Must have at least
546*d30ea906Sjfb8856606  *   @p num_ops entries
547*d30ea906Sjfb8856606  * @param num_ops
548*d30ea906Sjfb8856606  *   The maximum number of operations to dequeue.
549*d30ea906Sjfb8856606  *
550*d30ea906Sjfb8856606  * @return
551*d30ea906Sjfb8856606  *   The number of operations actually dequeued (this is the number of entries
552*d30ea906Sjfb8856606  *   copied into the @p ops array).
553*d30ea906Sjfb8856606  */
554*d30ea906Sjfb8856606 
555*d30ea906Sjfb8856606 static inline uint16_t
556*d30ea906Sjfb8856606 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
557*d30ea906Sjfb8856606 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
558*d30ea906Sjfb8856606 {
559*d30ea906Sjfb8856606 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
560*d30ea906Sjfb8856606 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
561*d30ea906Sjfb8856606 	return dev->dequeue_dec_ops(q_data, ops, num_ops);
562*d30ea906Sjfb8856606 }
563*d30ea906Sjfb8856606 
564*d30ea906Sjfb8856606 /** Definitions of device event types */
565*d30ea906Sjfb8856606 enum rte_bbdev_event_type {
566*d30ea906Sjfb8856606 	RTE_BBDEV_EVENT_UNKNOWN,  /**< unknown event type */
567*d30ea906Sjfb8856606 	RTE_BBDEV_EVENT_ERROR,  /**< error interrupt event */
568*d30ea906Sjfb8856606 	RTE_BBDEV_EVENT_DEQUEUE,  /**< dequeue event */
569*d30ea906Sjfb8856606 	RTE_BBDEV_EVENT_MAX  /**< max value of this enum */
570*d30ea906Sjfb8856606 };
571*d30ea906Sjfb8856606 
572*d30ea906Sjfb8856606 /**
573*d30ea906Sjfb8856606  * Typedef for application callback function registered by application
574*d30ea906Sjfb8856606  * software for notification of device events
575*d30ea906Sjfb8856606  *
576*d30ea906Sjfb8856606  * @param dev_id
577*d30ea906Sjfb8856606  *   Device identifier
578*d30ea906Sjfb8856606  * @param event
579*d30ea906Sjfb8856606  *   Device event to register for notification of.
580*d30ea906Sjfb8856606  * @param cb_arg
581*d30ea906Sjfb8856606  *   User specified parameter to be passed to user's callback function.
582*d30ea906Sjfb8856606  * @param ret_param
583*d30ea906Sjfb8856606  *   To pass data back to user application.
584*d30ea906Sjfb8856606  */
585*d30ea906Sjfb8856606 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
586*d30ea906Sjfb8856606 		enum rte_bbdev_event_type event, void *cb_arg,
587*d30ea906Sjfb8856606 		void *ret_param);
588*d30ea906Sjfb8856606 
589*d30ea906Sjfb8856606 /**
590*d30ea906Sjfb8856606  * Register a callback function for specific device id. Multiple callbacks can
591*d30ea906Sjfb8856606  * be added and will be called in the order they are added when an event is
592*d30ea906Sjfb8856606  * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
593*d30ea906Sjfb8856606  *
594*d30ea906Sjfb8856606  * @param dev_id
595*d30ea906Sjfb8856606  *   Device id.
596*d30ea906Sjfb8856606  * @param event
597*d30ea906Sjfb8856606  *   The event that the callback will be registered for.
598*d30ea906Sjfb8856606  * @param cb_fn
599*d30ea906Sjfb8856606  *   User supplied callback function to be called.
600*d30ea906Sjfb8856606  * @param cb_arg
601*d30ea906Sjfb8856606  *   Pointer to parameter that will be passed to the callback.
602*d30ea906Sjfb8856606  *
603*d30ea906Sjfb8856606  * @return
604*d30ea906Sjfb8856606  *   Zero on success, negative value on failure.
605*d30ea906Sjfb8856606  */
606*d30ea906Sjfb8856606 int __rte_experimental
607*d30ea906Sjfb8856606 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
608*d30ea906Sjfb8856606 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
609*d30ea906Sjfb8856606 
610*d30ea906Sjfb8856606 /**
611*d30ea906Sjfb8856606  * Unregister a callback function for specific device id.
612*d30ea906Sjfb8856606  *
613*d30ea906Sjfb8856606  * @param dev_id
614*d30ea906Sjfb8856606  *   The device identifier.
615*d30ea906Sjfb8856606  * @param event
616*d30ea906Sjfb8856606  *   The event that the callback will be unregistered for.
617*d30ea906Sjfb8856606  * @param cb_fn
618*d30ea906Sjfb8856606  *   User supplied callback function to be unregistered.
619*d30ea906Sjfb8856606  * @param cb_arg
620*d30ea906Sjfb8856606  *   Pointer to the parameter supplied when registering the callback.
621*d30ea906Sjfb8856606  *   (void *)-1 means to remove all registered callbacks with the specified
622*d30ea906Sjfb8856606  *   function address.
623*d30ea906Sjfb8856606  *
624*d30ea906Sjfb8856606  * @return
625*d30ea906Sjfb8856606  *   - 0 on success
626*d30ea906Sjfb8856606  *   - EINVAL if invalid parameter pointer is provided
627*d30ea906Sjfb8856606  *   - EAGAIN if the provided callback pointer does not exist
628*d30ea906Sjfb8856606  */
629*d30ea906Sjfb8856606 int __rte_experimental
630*d30ea906Sjfb8856606 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
631*d30ea906Sjfb8856606 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
632*d30ea906Sjfb8856606 
633*d30ea906Sjfb8856606 /**
634*d30ea906Sjfb8856606  * Enable a one-shot interrupt on the next operation enqueued to a particular
635*d30ea906Sjfb8856606  * queue. The interrupt will be triggered when the operation is ready to be
636*d30ea906Sjfb8856606  * dequeued. To handle the interrupt, an epoll file descriptor must be
637*d30ea906Sjfb8856606  * registered using rte_bbdev_queue_intr_ctl(), and then an application
638*d30ea906Sjfb8856606  * thread/lcore can wait for the interrupt using rte_epoll_wait().
639*d30ea906Sjfb8856606  *
640*d30ea906Sjfb8856606  * @param dev_id
641*d30ea906Sjfb8856606  *   The device identifier.
642*d30ea906Sjfb8856606  * @param queue_id
643*d30ea906Sjfb8856606  *   The index of the queue.
644*d30ea906Sjfb8856606  *
645*d30ea906Sjfb8856606  * @return
646*d30ea906Sjfb8856606  *   - 0 on success
647*d30ea906Sjfb8856606  *   - negative value on failure - as returned from PMD driver
648*d30ea906Sjfb8856606  */
649*d30ea906Sjfb8856606 int __rte_experimental
650*d30ea906Sjfb8856606 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
651*d30ea906Sjfb8856606 
652*d30ea906Sjfb8856606 /**
653*d30ea906Sjfb8856606  * Disable a one-shot interrupt on the next operation enqueued to a particular
654*d30ea906Sjfb8856606  * queue (if it has been enabled).
655*d30ea906Sjfb8856606  *
656*d30ea906Sjfb8856606  * @param dev_id
657*d30ea906Sjfb8856606  *   The device identifier.
658*d30ea906Sjfb8856606  * @param queue_id
659*d30ea906Sjfb8856606  *   The index of the queue.
660*d30ea906Sjfb8856606  *
661*d30ea906Sjfb8856606  * @return
662*d30ea906Sjfb8856606  *   - 0 on success
663*d30ea906Sjfb8856606  *   - negative value on failure - as returned from PMD driver
664*d30ea906Sjfb8856606  */
665*d30ea906Sjfb8856606 int __rte_experimental
666*d30ea906Sjfb8856606 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
667*d30ea906Sjfb8856606 
668*d30ea906Sjfb8856606 /**
669*d30ea906Sjfb8856606  * Control interface for per-queue interrupts.
670*d30ea906Sjfb8856606  *
671*d30ea906Sjfb8856606  * @param dev_id
672*d30ea906Sjfb8856606  *   The device identifier.
673*d30ea906Sjfb8856606  * @param queue_id
674*d30ea906Sjfb8856606  *   The index of the queue.
675*d30ea906Sjfb8856606  * @param epfd
676*d30ea906Sjfb8856606  *   Epoll file descriptor that will be associated with the interrupt source.
677*d30ea906Sjfb8856606  *   If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
678*d30ea906Sjfb8856606  *   file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
679*d30ea906Sjfb8856606  *   be used when calling rte_epoll_wait()).
680*d30ea906Sjfb8856606  * @param op
681*d30ea906Sjfb8856606  *   The operation be performed for the vector.RTE_INTR_EVENT_ADD or
682*d30ea906Sjfb8856606  *   RTE_INTR_EVENT_DEL.
683*d30ea906Sjfb8856606  * @param data
684*d30ea906Sjfb8856606  *   User context, that will be returned in the epdata.data field of the
685*d30ea906Sjfb8856606  *   rte_epoll_event structure filled in by rte_epoll_wait().
686*d30ea906Sjfb8856606  *
687*d30ea906Sjfb8856606  * @return
688*d30ea906Sjfb8856606  *   - 0 on success
689*d30ea906Sjfb8856606  *   - ENOTSUP if interrupts are not supported by the identified device
690*d30ea906Sjfb8856606  *   - negative value on failure - as returned from PMD driver
691*d30ea906Sjfb8856606  */
692*d30ea906Sjfb8856606 int __rte_experimental
693*d30ea906Sjfb8856606 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
694*d30ea906Sjfb8856606 		void *data);
695*d30ea906Sjfb8856606 
696*d30ea906Sjfb8856606 #ifdef __cplusplus
697*d30ea906Sjfb8856606 }
698*d30ea906Sjfb8856606 #endif
699*d30ea906Sjfb8856606 
700*d30ea906Sjfb8856606 #endif /* _RTE_BBDEV_H_ */
701