xref: /f-stack/dpdk/lib/librte_bbdev/rte_bbdev.h (revision 0c6bd470)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_BBDEV_H_
6 #define _RTE_BBDEV_H_
7 
8 /**
9  * @file rte_bbdev.h
10  *
11  * Wireless base band device abstraction APIs.
12  *
13  * @warning
14  * @b EXPERIMENTAL:
15  * All functions in this file may be changed or removed without prior notice.
16  *
17  * This API allows an application to discover, configure and use a device to
18  * process operations. An asynchronous API (enqueue, followed by later dequeue)
19  * is used for processing operations.
20  *
21  * The functions in this API are not thread-safe when called on the same
22  * target object (a device, or a queue on a device), with the exception that
23  * one thread can enqueue operations to a queue while another thread dequeues
24  * from the same queue.
25  */
26 
27 #ifdef __cplusplus
28 extern "C" {
29 #endif
30 
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <string.h>
34 
35 #include <rte_compat.h>
36 #include <rte_atomic.h>
37 #include <rte_bus.h>
38 #include <rte_cpuflags.h>
39 #include <rte_memory.h>
40 
41 #include "rte_bbdev_op.h"
42 
43 #ifndef RTE_BBDEV_MAX_DEVS
44 #define RTE_BBDEV_MAX_DEVS 128  /**< Max number of devices */
45 #endif
46 
47 /** Flags indicate current state of BBDEV device */
48 enum rte_bbdev_state {
49 	RTE_BBDEV_UNUSED,
50 	RTE_BBDEV_INITIALIZED
51 };
52 
53 /**
54  * Get the total number of devices that have been successfully initialised.
55  *
56  * @return
57  *   The total number of usable devices.
58  */
59 __rte_experimental
60 uint16_t
61 rte_bbdev_count(void);
62 
63 /**
64  * Check if a device is valid.
65  *
66  * @param dev_id
67  *   The identifier of the device.
68  *
69  * @return
70  *   true if device ID is valid and device is attached, false otherwise.
71  */
72 __rte_experimental
73 bool
74 rte_bbdev_is_valid(uint16_t dev_id);
75 
76 /**
77  * Get the next enabled device.
78  *
79  * @param dev_id
80  *   The current device
81  *
82  * @return
83  *   - The next device, or
84  *   - RTE_BBDEV_MAX_DEVS if none found
85  */
86 __rte_experimental
87 uint16_t
88 rte_bbdev_find_next(uint16_t dev_id);
89 
90 /** Iterate through all enabled devices */
91 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
92 		i < RTE_BBDEV_MAX_DEVS; \
93 		i = rte_bbdev_find_next(i))
94 
95 /**
96  * Setup up device queues.
97  * This function must be called on a device before setting up the queues and
98  * starting the device. It can also be called when a device is in the stopped
99  * state. If any device queues have been configured their configuration will be
100  * cleared by a call to this function.
101  *
102  * @param dev_id
103  *   The identifier of the device.
104  * @param num_queues
105  *   Number of queues to configure on device.
106  * @param socket_id
107  *   ID of a socket which will be used to allocate memory.
108  *
109  * @return
110  *   - 0 on success
111  *   - -ENODEV if dev_id is invalid or the device is corrupted
112  *   - -EINVAL if num_queues is invalid, 0 or greater than maximum
113  *   - -EBUSY if the identified device has already started
114  *   - -ENOMEM if unable to allocate memory
115  */
116 __rte_experimental
117 int
118 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
119 
120 /**
121  * Enable interrupts.
122  * This function may be called before starting the device to enable the
123  * interrupts if they are available.
124  *
125  * @param dev_id
126  *   The identifier of the device.
127  *
128  * @return
129  *   - 0 on success
130  *   - -ENODEV if dev_id is invalid or the device is corrupted
131  *   - -EBUSY if the identified device has already started
132  *   - -ENOTSUP if the interrupts are not supported by the device
133  */
134 __rte_experimental
135 int
136 rte_bbdev_intr_enable(uint16_t dev_id);
137 
138 /** Device queue configuration structure */
139 struct rte_bbdev_queue_conf {
140 	int socket;  /**< NUMA socket used for memory allocation */
141 	uint32_t queue_size;  /**< Size of queue */
142 	uint8_t priority;  /**< Queue priority */
143 	bool deferred_start; /**< Do not start queue when device is started. */
144 	enum rte_bbdev_op_type op_type; /**< Operation type */
145 };
146 
147 /**
148  * Configure a queue on a device.
149  * This function can be called after device configuration, and before starting.
150  * It can also be called when the device or the queue is in the stopped state.
151  *
152  * @param dev_id
153  *   The identifier of the device.
154  * @param queue_id
155  *   The index of the queue.
156  * @param conf
157  *   The queue configuration. If NULL, a default configuration will be used.
158  *
159  * @return
160  *   - 0 on success
161  *   - EINVAL if the identified queue size or priority are invalid
162  *   - EBUSY if the identified queue or its device have already started
163  */
164 __rte_experimental
165 int
166 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
167 		const struct rte_bbdev_queue_conf *conf);
168 
169 /**
170  * Start a device.
171  * This is the last step needed before enqueueing operations is possible.
172  *
173  * @param dev_id
174  *   The identifier of the device.
175  *
176  * @return
177  *   - 0 on success
178  *   - negative value on failure - as returned from PMD driver
179  */
180 __rte_experimental
181 int
182 rte_bbdev_start(uint16_t dev_id);
183 
184 /**
185  * Stop a device.
186  * The device can be reconfigured, and restarted after being stopped.
187  *
188  * @param dev_id
189  *   The identifier of the device.
190  *
191  * @return
192  *   - 0 on success
193  */
194 __rte_experimental
195 int
196 rte_bbdev_stop(uint16_t dev_id);
197 
198 /**
199  * Close a device.
200  * The device cannot be restarted without reconfiguration!
201  *
202  * @param dev_id
203  *   The identifier of the device.
204  *
205  * @return
206  *   - 0 on success
207  */
208 __rte_experimental
209 int
210 rte_bbdev_close(uint16_t dev_id);
211 
212 /**
213  * Start a specified queue on a device.
214  * This is only needed if the queue has been stopped, or if the deferred_start
215  * flag has been set when configuring the queue.
216  *
217  * @param dev_id
218  *   The identifier of the device.
219  * @param queue_id
220  *   The index of the queue.
221  *
222  * @return
223  *   - 0 on success
224  *   - negative value on failure - as returned from PMD driver
225  */
226 __rte_experimental
227 int
228 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
229 
230 /**
231  * Stop a specified queue on a device, to allow re configuration.
232  *
233  * @param dev_id
234  *   The identifier of the device.
235  * @param queue_id
236  *   The index of the queue.
237  *
238  * @return
239  *   - 0 on success
240  *   - negative value on failure - as returned from PMD driver
241  */
242 __rte_experimental
243 int
244 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
245 
246 /** Device statistics. */
247 struct rte_bbdev_stats {
248 	uint64_t enqueued_count;  /**< Count of all operations enqueued */
249 	uint64_t dequeued_count;  /**< Count of all operations dequeued */
250 	/** Total error count on operations enqueued */
251 	uint64_t enqueue_err_count;
252 	/** Total error count on operations dequeued */
253 	uint64_t dequeue_err_count;
254 	/** CPU cycles consumed by the (HW/SW) accelerator device to offload
255 	 *  the enqueue request to its internal queues.
256 	 *  - For a HW device this is the cycles consumed in MMIO write
257 	 *  - For a SW (vdev) device, this is the processing time of the
258 	 *     bbdev operation
259 	 */
260 	uint64_t acc_offload_cycles;
261 };
262 
263 /**
264  * Retrieve the general I/O statistics of a device.
265  *
266  * @param dev_id
267  *   The identifier of the device.
268  * @param stats
269  *   Pointer to structure to where statistics will be copied. On error, this
270  *   location may or may not have been modified.
271  *
272  * @return
273  *   - 0 on success
274  *   - EINVAL if invalid parameter pointer is provided
275  */
276 __rte_experimental
277 int
278 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
279 
280 /**
281  * Reset the statistics of a device.
282  *
283  * @param dev_id
284  *   The identifier of the device.
285  * @return
286  *   - 0 on success
287  */
288 __rte_experimental
289 int
290 rte_bbdev_stats_reset(uint16_t dev_id);
291 
292 /** Device information supplied by the device's driver */
293 struct rte_bbdev_driver_info {
294 	/** Driver name */
295 	const char *driver_name;
296 
297 	/** Maximum number of queues supported by the device */
298 	unsigned int max_num_queues;
299 	/** Queue size limit (queue size must also be power of 2) */
300 	uint32_t queue_size_lim;
301 	/** Set if device off-loads operation to hardware  */
302 	bool hardware_accelerated;
303 	/** Max value supported by queue priority for DL */
304 	uint8_t max_dl_queue_priority;
305 	/** Max value supported by queue priority for UL */
306 	uint8_t max_ul_queue_priority;
307 	/** Set if device supports per-queue interrupts */
308 	bool queue_intr_supported;
309 	/** Minimum alignment of buffers, in bytes */
310 	uint16_t min_alignment;
311 	/** Default queue configuration used if none is supplied  */
312 	struct rte_bbdev_queue_conf default_queue_conf;
313 	/** Device operation capabilities */
314 	const struct rte_bbdev_op_cap *capabilities;
315 	/** Device cpu_flag requirements */
316 	const enum rte_cpu_flag_t *cpu_flag_reqs;
317 };
318 
319 /** Macro used at end of bbdev PMD list */
320 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
321 	{ RTE_BBDEV_OP_NONE }
322 
323 /**
324  * Device information structure used by an application to discover a devices
325  * capabilities and current configuration
326  */
327 struct rte_bbdev_info {
328 	int socket_id;  /**< NUMA socket that device is on */
329 	const char *dev_name;  /**< Unique device name */
330 	const struct rte_device *device; /**< Device Information */
331 	uint16_t num_queues;  /**< Number of queues currently configured */
332 	bool started;  /**< Set if device is currently started */
333 	struct rte_bbdev_driver_info drv;  /**< Info from device driver */
334 };
335 
336 /**
337  * Retrieve information about a device.
338  *
339  * @param dev_id
340  *   The identifier of the device.
341  * @param dev_info
342  *   Pointer to structure to where information will be copied. On error, this
343  *   location may or may not have been modified.
344  *
345  * @return
346  *   - 0 on success
347  *   - EINVAL if invalid parameter pointer is provided
348  */
349 __rte_experimental
350 int
351 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
352 
353 /** Queue information */
354 struct rte_bbdev_queue_info {
355 	/** Current device configuration */
356 	struct rte_bbdev_queue_conf conf;
357 	/** Set if queue is currently started */
358 	bool started;
359 };
360 
361 /**
362  * Retrieve information about a specific queue on a device.
363  *
364  * @param dev_id
365  *   The identifier of the device.
366  * @param queue_id
367  *   The index of the queue.
368  * @param queue_info
369  *   Pointer to structure to where information will be copied. On error, this
370  *   location may or may not have been modified.
371  *
372  * @return
373  *   - 0 on success
374  *   - EINVAL if invalid parameter pointer is provided
375  */
376 __rte_experimental
377 int
378 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
379 		struct rte_bbdev_queue_info *queue_info);
380 
381 /** @internal The data structure associated with each queue of a device. */
382 struct rte_bbdev_queue_data {
383 	void *queue_private;  /**< Driver-specific per-queue data */
384 	struct rte_bbdev_queue_conf conf;  /**< Current configuration */
385 	struct rte_bbdev_stats queue_stats;  /**< Queue statistics */
386 	bool started;  /**< Queue state */
387 };
388 
389 /** @internal Enqueue encode operations for processing on queue of a device. */
390 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
391 		struct rte_bbdev_queue_data *q_data,
392 		struct rte_bbdev_enc_op **ops,
393 		uint16_t num);
394 
395 /** @internal Enqueue decode operations for processing on queue of a device. */
396 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
397 		struct rte_bbdev_queue_data *q_data,
398 		struct rte_bbdev_dec_op **ops,
399 		uint16_t num);
400 
401 /** @internal Dequeue encode operations from a queue of a device. */
402 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
403 		struct rte_bbdev_queue_data *q_data,
404 		struct rte_bbdev_enc_op **ops, uint16_t num);
405 
406 /** @internal Dequeue decode operations from a queue of a device. */
407 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
408 		struct rte_bbdev_queue_data *q_data,
409 		struct rte_bbdev_dec_op **ops, uint16_t num);
410 
411 #define RTE_BBDEV_NAME_MAX_LEN  64  /**< Max length of device name */
412 
413 /**
414  * @internal The data associated with a device, with no function pointers.
415  * This structure is safe to place in shared memory to be common among
416  * different processes in a multi-process configuration. Drivers can access
417  * these fields, but should never write to them!
418  */
419 struct rte_bbdev_data {
420 	char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
421 	void *dev_private;  /**< Driver-specific private data */
422 	uint16_t num_queues;  /**< Number of currently configured queues */
423 	struct rte_bbdev_queue_data *queues;  /**< Queue structures */
424 	uint16_t dev_id;  /**< Device ID */
425 	int socket_id;  /**< NUMA socket that device is on */
426 	bool started;  /**< Device run-time state */
427 	/** Counter of processes using the device */
428 	rte_atomic16_t process_cnt;
429 };
430 
431 /* Forward declarations */
432 struct rte_bbdev_ops;
433 struct rte_bbdev_callback;
434 struct rte_intr_handle;
435 
436 /** Structure to keep track of registered callbacks */
437 TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
438 
439 /**
440  * @internal The data structure associated with a device. Drivers can access
441  * these fields, but should only write to the *_ops fields.
442  */
443 struct __rte_cache_aligned rte_bbdev {
444 	/** Enqueue encode function */
445 	rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
446 	/** Enqueue decode function */
447 	rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
448 	/** Dequeue encode function */
449 	rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
450 	/** Dequeue decode function */
451 	rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
452 	/** Enqueue encode function */
453 	rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
454 	/** Enqueue decode function */
455 	rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
456 	/** Dequeue encode function */
457 	rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
458 	/** Dequeue decode function */
459 	rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
460 	const struct rte_bbdev_ops *dev_ops;  /**< Functions exported by PMD */
461 	struct rte_bbdev_data *data;  /**< Pointer to device data */
462 	enum rte_bbdev_state state;  /**< If device is currently used or not */
463 	struct rte_device *device; /**< Backing device */
464 	/** User application callback for interrupts if present */
465 	struct rte_bbdev_cb_list list_cbs;
466 	struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
467 };
468 
469 /** @internal array of all devices */
470 extern struct rte_bbdev rte_bbdev_devices[];
471 
472 /**
473  * Enqueue a burst of processed encode operations to a queue of the device.
474  * This functions only enqueues as many operations as currently possible and
475  * does not block until @p num_ops entries in the queue are available.
476  * This function does not provide any error notification to avoid the
477  * corresponding overhead.
478  *
479  * @param dev_id
480  *   The identifier of the device.
481  * @param queue_id
482  *   The index of the queue.
483  * @param ops
484  *   Pointer array containing operations to be enqueued Must have at least
485  *   @p num_ops entries
486  * @param num_ops
487  *   The maximum number of operations to enqueue.
488  *
489  * @return
490  *   The number of operations actually enqueued (this is the number of processed
491  *   entries in the @p ops array).
492  */
493 __rte_experimental
494 static inline uint16_t
495 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
496 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
497 {
498 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
499 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
500 	return dev->enqueue_enc_ops(q_data, ops, num_ops);
501 }
502 
503 /**
504  * Enqueue a burst of processed decode operations to a queue of the device.
505  * This functions only enqueues as many operations as currently possible and
506  * does not block until @p num_ops entries in the queue are available.
507  * This function does not provide any error notification to avoid the
508  * corresponding overhead.
509  *
510  * @param dev_id
511  *   The identifier of the device.
512  * @param queue_id
513  *   The index of the queue.
514  * @param ops
515  *   Pointer array containing operations to be enqueued Must have at least
516  *   @p num_ops entries
517  * @param num_ops
518  *   The maximum number of operations to enqueue.
519  *
520  * @return
521  *   The number of operations actually enqueued (this is the number of processed
522  *   entries in the @p ops array).
523  */
524 __rte_experimental
525 static inline uint16_t
526 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
527 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
528 {
529 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
530 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
531 	return dev->enqueue_dec_ops(q_data, ops, num_ops);
532 }
533 
534 /**
535  * Enqueue a burst of processed encode operations to a queue of the device.
536  * This functions only enqueues as many operations as currently possible and
537  * does not block until @p num_ops entries in the queue are available.
538  * This function does not provide any error notification to avoid the
539  * corresponding overhead.
540  *
541  * @param dev_id
542  *   The identifier of the device.
543  * @param queue_id
544  *   The index of the queue.
545  * @param ops
546  *   Pointer array containing operations to be enqueued Must have at least
547  *   @p num_ops entries
548  * @param num_ops
549  *   The maximum number of operations to enqueue.
550  *
551  * @return
552  *   The number of operations actually enqueued (this is the number of processed
553  *   entries in the @p ops array).
554  */
555 __rte_experimental
556 static inline uint16_t
557 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
558 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
559 {
560 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
561 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
562 	return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
563 }
564 
565 /**
566  * Enqueue a burst of processed decode operations to a queue of the device.
567  * This functions only enqueues as many operations as currently possible and
568  * does not block until @p num_ops entries in the queue are available.
569  * This function does not provide any error notification to avoid the
570  * corresponding overhead.
571  *
572  * @param dev_id
573  *   The identifier of the device.
574  * @param queue_id
575  *   The index of the queue.
576  * @param ops
577  *   Pointer array containing operations to be enqueued Must have at least
578  *   @p num_ops entries
579  * @param num_ops
580  *   The maximum number of operations to enqueue.
581  *
582  * @return
583  *   The number of operations actually enqueued (this is the number of processed
584  *   entries in the @p ops array).
585  */
586 __rte_experimental
587 static inline uint16_t
588 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
589 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
590 {
591 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
592 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
593 	return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
594 }
595 
596 
597 /**
598  * Dequeue a burst of processed encode operations from a queue of the device.
599  * This functions returns only the current contents of the queue, and does not
600  * block until @ num_ops is available.
601  * This function does not provide any error notification to avoid the
602  * corresponding overhead.
603  *
604  * @param dev_id
605  *   The identifier of the device.
606  * @param queue_id
607  *   The index of the queue.
608  * @param ops
609  *   Pointer array where operations will be dequeued to. Must have at least
610  *   @p num_ops entries
611  * @param num_ops
612  *   The maximum number of operations to dequeue.
613  *
614  * @return
615  *   The number of operations actually dequeued (this is the number of entries
616  *   copied into the @p ops array).
617  */
618 __rte_experimental
619 static inline uint16_t
620 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
621 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
622 {
623 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
624 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
625 	return dev->dequeue_enc_ops(q_data, ops, num_ops);
626 }
627 
628 /**
629  * Dequeue a burst of processed decode operations from a queue of the device.
630  * This functions returns only the current contents of the queue, and does not
631  * block until @ num_ops is available.
632  * This function does not provide any error notification to avoid the
633  * corresponding overhead.
634  *
635  * @param dev_id
636  *   The identifier of the device.
637  * @param queue_id
638  *   The index of the queue.
639  * @param ops
640  *   Pointer array where operations will be dequeued to. Must have at least
641  *   @p num_ops entries
642  * @param num_ops
643  *   The maximum number of operations to dequeue.
644  *
645  * @return
646  *   The number of operations actually dequeued (this is the number of entries
647  *   copied into the @p ops array).
648  */
649 
650 __rte_experimental
651 static inline uint16_t
652 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
653 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
654 {
655 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
656 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
657 	return dev->dequeue_dec_ops(q_data, ops, num_ops);
658 }
659 
660 
661 /**
662  * Dequeue a burst of processed encode operations from a queue of the device.
663  * This functions returns only the current contents of the queue, and does not
664  * block until @ num_ops is available.
665  * This function does not provide any error notification to avoid the
666  * corresponding overhead.
667  *
668  * @param dev_id
669  *   The identifier of the device.
670  * @param queue_id
671  *   The index of the queue.
672  * @param ops
673  *   Pointer array where operations will be dequeued to. Must have at least
674  *   @p num_ops entries
675  * @param num_ops
676  *   The maximum number of operations to dequeue.
677  *
678  * @return
679  *   The number of operations actually dequeued (this is the number of entries
680  *   copied into the @p ops array).
681  */
682 __rte_experimental
683 static inline uint16_t
684 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
685 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
686 {
687 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
688 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
689 	return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
690 }
691 
692 /**
693  * Dequeue a burst of processed decode operations from a queue of the device.
694  * This functions returns only the current contents of the queue, and does not
695  * block until @ num_ops is available.
696  * This function does not provide any error notification to avoid the
697  * corresponding overhead.
698  *
699  * @param dev_id
700  *   The identifier of the device.
701  * @param queue_id
702  *   The index of the queue.
703  * @param ops
704  *   Pointer array where operations will be dequeued to. Must have at least
705  *   @p num_ops entries
706  * @param num_ops
707  *   The maximum number of operations to dequeue.
708  *
709  * @return
710  *   The number of operations actually dequeued (this is the number of entries
711  *   copied into the @p ops array).
712  */
713 __rte_experimental
714 static inline uint16_t
715 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
716 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
717 {
718 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
719 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
720 	return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
721 }
722 
723 /** Definitions of device event types */
724 enum rte_bbdev_event_type {
725 	RTE_BBDEV_EVENT_UNKNOWN,  /**< unknown event type */
726 	RTE_BBDEV_EVENT_ERROR,  /**< error interrupt event */
727 	RTE_BBDEV_EVENT_DEQUEUE,  /**< dequeue event */
728 	RTE_BBDEV_EVENT_MAX  /**< max value of this enum */
729 };
730 
731 /**
732  * Typedef for application callback function registered by application
733  * software for notification of device events
734  *
735  * @param dev_id
736  *   Device identifier
737  * @param event
738  *   Device event to register for notification of.
739  * @param cb_arg
740  *   User specified parameter to be passed to user's callback function.
741  * @param ret_param
742  *   To pass data back to user application.
743  */
744 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
745 		enum rte_bbdev_event_type event, void *cb_arg,
746 		void *ret_param);
747 
748 /**
749  * Register a callback function for specific device id. Multiple callbacks can
750  * be added and will be called in the order they are added when an event is
751  * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
752  *
753  * @param dev_id
754  *   Device id.
755  * @param event
756  *   The event that the callback will be registered for.
757  * @param cb_fn
758  *   User supplied callback function to be called.
759  * @param cb_arg
760  *   Pointer to parameter that will be passed to the callback.
761  *
762  * @return
763  *   Zero on success, negative value on failure.
764  */
765 __rte_experimental
766 int
767 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
768 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
769 
770 /**
771  * Unregister a callback function for specific device id.
772  *
773  * @param dev_id
774  *   The device identifier.
775  * @param event
776  *   The event that the callback will be unregistered for.
777  * @param cb_fn
778  *   User supplied callback function to be unregistered.
779  * @param cb_arg
780  *   Pointer to the parameter supplied when registering the callback.
781  *   (void *)-1 means to remove all registered callbacks with the specified
782  *   function address.
783  *
784  * @return
785  *   - 0 on success
786  *   - EINVAL if invalid parameter pointer is provided
787  *   - EAGAIN if the provided callback pointer does not exist
788  */
789 __rte_experimental
790 int
791 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
792 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
793 
794 /**
795  * Enable a one-shot interrupt on the next operation enqueued to a particular
796  * queue. The interrupt will be triggered when the operation is ready to be
797  * dequeued. To handle the interrupt, an epoll file descriptor must be
798  * registered using rte_bbdev_queue_intr_ctl(), and then an application
799  * thread/lcore can wait for the interrupt using rte_epoll_wait().
800  *
801  * @param dev_id
802  *   The device identifier.
803  * @param queue_id
804  *   The index of the queue.
805  *
806  * @return
807  *   - 0 on success
808  *   - negative value on failure - as returned from PMD driver
809  */
810 __rte_experimental
811 int
812 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
813 
814 /**
815  * Disable a one-shot interrupt on the next operation enqueued to a particular
816  * queue (if it has been enabled).
817  *
818  * @param dev_id
819  *   The device identifier.
820  * @param queue_id
821  *   The index of the queue.
822  *
823  * @return
824  *   - 0 on success
825  *   - negative value on failure - as returned from PMD driver
826  */
827 __rte_experimental
828 int
829 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
830 
831 /**
832  * Control interface for per-queue interrupts.
833  *
834  * @param dev_id
835  *   The device identifier.
836  * @param queue_id
837  *   The index of the queue.
838  * @param epfd
839  *   Epoll file descriptor that will be associated with the interrupt source.
840  *   If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
841  *   file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
842  *   be used when calling rte_epoll_wait()).
843  * @param op
844  *   The operation be performed for the vector.RTE_INTR_EVENT_ADD or
845  *   RTE_INTR_EVENT_DEL.
846  * @param data
847  *   User context, that will be returned in the epdata.data field of the
848  *   rte_epoll_event structure filled in by rte_epoll_wait().
849  *
850  * @return
851  *   - 0 on success
852  *   - ENOTSUP if interrupts are not supported by the identified device
853  *   - negative value on failure - as returned from PMD driver
854  */
855 __rte_experimental
856 int
857 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
858 		void *data);
859 
860 #ifdef __cplusplus
861 }
862 #endif
863 
864 #endif /* _RTE_BBDEV_H_ */
865