1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #ifndef _RTE_BBDEV_H_
6 #define _RTE_BBDEV_H_
7
8 /**
9 * @file rte_bbdev.h
10 *
11 * Wireless base band device abstraction APIs.
12 *
13 * @warning
14 * @b EXPERIMENTAL:
15 * All functions in this file may be changed or removed without prior notice.
16 *
17 * This API allows an application to discover, configure and use a device to
18 * process operations. An asynchronous API (enqueue, followed by later dequeue)
19 * is used for processing operations.
20 *
21 * The functions in this API are not thread-safe when called on the same
22 * target object (a device, or a queue on a device), with the exception that
23 * one thread can enqueue operations to a queue while another thread dequeues
24 * from the same queue.
25 */
26
27 #ifdef __cplusplus
28 extern "C" {
29 #endif
30
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <string.h>
34
35 #include <rte_compat.h>
36 #include <rte_bus.h>
37 #include <rte_cpuflags.h>
38 #include <rte_memory.h>
39
40 #include "rte_bbdev_op.h"
41
42 #ifndef RTE_BBDEV_MAX_DEVS
43 #define RTE_BBDEV_MAX_DEVS 128 /**< Max number of devices */
44 #endif
45
46 /** Flags indicate current state of BBDEV device */
47 enum rte_bbdev_state {
48 RTE_BBDEV_UNUSED,
49 RTE_BBDEV_INITIALIZED
50 };
51
52 /**
53 * Get the total number of devices that have been successfully initialised.
54 *
55 * @return
56 * The total number of usable devices.
57 */
58 __rte_experimental
59 uint16_t
60 rte_bbdev_count(void);
61
62 /**
63 * Check if a device is valid.
64 *
65 * @param dev_id
66 * The identifier of the device.
67 *
68 * @return
69 * true if device ID is valid and device is attached, false otherwise.
70 */
71 __rte_experimental
72 bool
73 rte_bbdev_is_valid(uint16_t dev_id);
74
75 /**
76 * Get the next enabled device.
77 *
78 * @param dev_id
79 * The current device
80 *
81 * @return
82 * - The next device, or
83 * - RTE_BBDEV_MAX_DEVS if none found
84 */
85 __rte_experimental
86 uint16_t
87 rte_bbdev_find_next(uint16_t dev_id);
88
89 /** Iterate through all enabled devices */
90 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
91 i < RTE_BBDEV_MAX_DEVS; \
92 i = rte_bbdev_find_next(i))
93
94 /**
95 * Setup up device queues.
96 * This function must be called on a device before setting up the queues and
97 * starting the device. It can also be called when a device is in the stopped
98 * state. If any device queues have been configured their configuration will be
99 * cleared by a call to this function.
100 *
101 * @param dev_id
102 * The identifier of the device.
103 * @param num_queues
104 * Number of queues to configure on device.
105 * @param socket_id
106 * ID of a socket which will be used to allocate memory.
107 *
108 * @return
109 * - 0 on success
110 * - -ENODEV if dev_id is invalid or the device is corrupted
111 * - -EINVAL if num_queues is invalid, 0 or greater than maximum
112 * - -EBUSY if the identified device has already started
113 * - -ENOMEM if unable to allocate memory
114 */
115 __rte_experimental
116 int
117 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
118
119 /**
120 * Enable interrupts.
121 * This function may be called before starting the device to enable the
122 * interrupts if they are available.
123 *
124 * @param dev_id
125 * The identifier of the device.
126 *
127 * @return
128 * - 0 on success
129 * - -ENODEV if dev_id is invalid or the device is corrupted
130 * - -EBUSY if the identified device has already started
131 * - -ENOTSUP if the interrupts are not supported by the device
132 */
133 __rte_experimental
134 int
135 rte_bbdev_intr_enable(uint16_t dev_id);
136
137 /** Device queue configuration structure */
138 struct rte_bbdev_queue_conf {
139 int socket; /**< NUMA socket used for memory allocation */
140 uint32_t queue_size; /**< Size of queue */
141 uint8_t priority; /**< Queue priority */
142 bool deferred_start; /**< Do not start queue when device is started. */
143 enum rte_bbdev_op_type op_type; /**< Operation type */
144 };
145
146 /**
147 * Configure a queue on a device.
148 * This function can be called after device configuration, and before starting.
149 * It can also be called when the device or the queue is in the stopped state.
150 *
151 * @param dev_id
152 * The identifier of the device.
153 * @param queue_id
154 * The index of the queue.
155 * @param conf
156 * The queue configuration. If NULL, a default configuration will be used.
157 *
158 * @return
159 * - 0 on success
160 * - EINVAL if the identified queue size or priority are invalid
161 * - EBUSY if the identified queue or its device have already started
162 */
163 __rte_experimental
164 int
165 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
166 const struct rte_bbdev_queue_conf *conf);
167
168 /**
169 * Start a device.
170 * This is the last step needed before enqueueing operations is possible.
171 *
172 * @param dev_id
173 * The identifier of the device.
174 *
175 * @return
176 * - 0 on success
177 * - negative value on failure - as returned from PMD driver
178 */
179 __rte_experimental
180 int
181 rte_bbdev_start(uint16_t dev_id);
182
183 /**
184 * Stop a device.
185 * The device can be reconfigured, and restarted after being stopped.
186 *
187 * @param dev_id
188 * The identifier of the device.
189 *
190 * @return
191 * - 0 on success
192 */
193 __rte_experimental
194 int
195 rte_bbdev_stop(uint16_t dev_id);
196
197 /**
198 * Close a device.
199 * The device cannot be restarted without reconfiguration!
200 *
201 * @param dev_id
202 * The identifier of the device.
203 *
204 * @return
205 * - 0 on success
206 */
207 __rte_experimental
208 int
209 rte_bbdev_close(uint16_t dev_id);
210
211 /**
212 * Start a specified queue on a device.
213 * This is only needed if the queue has been stopped, or if the deferred_start
214 * flag has been set when configuring the queue.
215 *
216 * @param dev_id
217 * The identifier of the device.
218 * @param queue_id
219 * The index of the queue.
220 *
221 * @return
222 * - 0 on success
223 * - negative value on failure - as returned from PMD driver
224 */
225 __rte_experimental
226 int
227 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
228
229 /**
230 * Stop a specified queue on a device, to allow re configuration.
231 *
232 * @param dev_id
233 * The identifier of the device.
234 * @param queue_id
235 * The index of the queue.
236 *
237 * @return
238 * - 0 on success
239 * - negative value on failure - as returned from PMD driver
240 */
241 __rte_experimental
242 int
243 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
244
245 /** Device statistics. */
246 struct rte_bbdev_stats {
247 uint64_t enqueued_count; /**< Count of all operations enqueued */
248 uint64_t dequeued_count; /**< Count of all operations dequeued */
249 /** Total error count on operations enqueued */
250 uint64_t enqueue_err_count;
251 /** Total error count on operations dequeued */
252 uint64_t dequeue_err_count;
253 /** CPU cycles consumed by the (HW/SW) accelerator device to offload
254 * the enqueue request to its internal queues.
255 * - For a HW device this is the cycles consumed in MMIO write
256 * - For a SW (vdev) device, this is the processing time of the
257 * bbdev operation
258 */
259 uint64_t acc_offload_cycles;
260 };
261
262 /**
263 * Retrieve the general I/O statistics of a device.
264 *
265 * @param dev_id
266 * The identifier of the device.
267 * @param stats
268 * Pointer to structure to where statistics will be copied. On error, this
269 * location may or may not have been modified.
270 *
271 * @return
272 * - 0 on success
273 * - EINVAL if invalid parameter pointer is provided
274 */
275 __rte_experimental
276 int
277 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
278
279 /**
280 * Reset the statistics of a device.
281 *
282 * @param dev_id
283 * The identifier of the device.
284 * @return
285 * - 0 on success
286 */
287 __rte_experimental
288 int
289 rte_bbdev_stats_reset(uint16_t dev_id);
290
291 /** Device information supplied by the device's driver */
292 struct rte_bbdev_driver_info {
293 /** Driver name */
294 const char *driver_name;
295
296 /** Maximum number of queues supported by the device */
297 unsigned int max_num_queues;
298 /** Queue size limit (queue size must also be power of 2) */
299 uint32_t queue_size_lim;
300 /** Set if device off-loads operation to hardware */
301 bool hardware_accelerated;
302 /** Max value supported by queue priority for DL */
303 uint8_t max_dl_queue_priority;
304 /** Max value supported by queue priority for UL */
305 uint8_t max_ul_queue_priority;
306 /** Set if device supports per-queue interrupts */
307 bool queue_intr_supported;
308 /** Minimum alignment of buffers, in bytes */
309 uint16_t min_alignment;
310 /** HARQ memory available in kB */
311 uint32_t harq_buffer_size;
312 /** Default queue configuration used if none is supplied */
313 struct rte_bbdev_queue_conf default_queue_conf;
314 /** Device operation capabilities */
315 const struct rte_bbdev_op_cap *capabilities;
316 /** Device cpu_flag requirements */
317 const enum rte_cpu_flag_t *cpu_flag_reqs;
318 };
319
320 /** Macro used at end of bbdev PMD list */
321 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
322 { RTE_BBDEV_OP_NONE }
323
324 /**
325 * Device information structure used by an application to discover a devices
326 * capabilities and current configuration
327 */
328 struct rte_bbdev_info {
329 int socket_id; /**< NUMA socket that device is on */
330 const char *dev_name; /**< Unique device name */
331 const struct rte_device *device; /**< Device Information */
332 uint16_t num_queues; /**< Number of queues currently configured */
333 bool started; /**< Set if device is currently started */
334 struct rte_bbdev_driver_info drv; /**< Info from device driver */
335 };
336
337 /**
338 * Retrieve information about a device.
339 *
340 * @param dev_id
341 * The identifier of the device.
342 * @param dev_info
343 * Pointer to structure to where information will be copied. On error, this
344 * location may or may not have been modified.
345 *
346 * @return
347 * - 0 on success
348 * - EINVAL if invalid parameter pointer is provided
349 */
350 __rte_experimental
351 int
352 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
353
354 /** Queue information */
355 struct rte_bbdev_queue_info {
356 /** Current device configuration */
357 struct rte_bbdev_queue_conf conf;
358 /** Set if queue is currently started */
359 bool started;
360 };
361
362 /**
363 * Retrieve information about a specific queue on a device.
364 *
365 * @param dev_id
366 * The identifier of the device.
367 * @param queue_id
368 * The index of the queue.
369 * @param queue_info
370 * Pointer to structure to where information will be copied. On error, this
371 * location may or may not have been modified.
372 *
373 * @return
374 * - 0 on success
375 * - EINVAL if invalid parameter pointer is provided
376 */
377 __rte_experimental
378 int
379 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
380 struct rte_bbdev_queue_info *queue_info);
381
382 /** @internal The data structure associated with each queue of a device. */
383 struct rte_bbdev_queue_data {
384 void *queue_private; /**< Driver-specific per-queue data */
385 struct rte_bbdev_queue_conf conf; /**< Current configuration */
386 struct rte_bbdev_stats queue_stats; /**< Queue statistics */
387 bool started; /**< Queue state */
388 };
389
390 /** @internal Enqueue encode operations for processing on queue of a device. */
391 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
392 struct rte_bbdev_queue_data *q_data,
393 struct rte_bbdev_enc_op **ops,
394 uint16_t num);
395
396 /** @internal Enqueue decode operations for processing on queue of a device. */
397 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
398 struct rte_bbdev_queue_data *q_data,
399 struct rte_bbdev_dec_op **ops,
400 uint16_t num);
401
402 /** @internal Dequeue encode operations from a queue of a device. */
403 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
404 struct rte_bbdev_queue_data *q_data,
405 struct rte_bbdev_enc_op **ops, uint16_t num);
406
407 /** @internal Dequeue decode operations from a queue of a device. */
408 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
409 struct rte_bbdev_queue_data *q_data,
410 struct rte_bbdev_dec_op **ops, uint16_t num);
411
412 #define RTE_BBDEV_NAME_MAX_LEN 64 /**< Max length of device name */
413
414 /**
415 * @internal The data associated with a device, with no function pointers.
416 * This structure is safe to place in shared memory to be common among
417 * different processes in a multi-process configuration. Drivers can access
418 * these fields, but should never write to them!
419 */
420 struct rte_bbdev_data {
421 char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
422 void *dev_private; /**< Driver-specific private data */
423 uint16_t num_queues; /**< Number of currently configured queues */
424 struct rte_bbdev_queue_data *queues; /**< Queue structures */
425 uint16_t dev_id; /**< Device ID */
426 int socket_id; /**< NUMA socket that device is on */
427 bool started; /**< Device run-time state */
428 uint16_t process_cnt; /** Counter of processes using the device */
429 };
430
431 /* Forward declarations */
432 struct rte_bbdev_ops;
433 struct rte_bbdev_callback;
434 struct rte_intr_handle;
435
436 /** Structure to keep track of registered callbacks */
437 TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
438
439 /**
440 * @internal The data structure associated with a device. Drivers can access
441 * these fields, but should only write to the *_ops fields.
442 */
443 struct __rte_cache_aligned rte_bbdev {
444 /** Enqueue encode function */
445 rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
446 /** Enqueue decode function */
447 rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
448 /** Dequeue encode function */
449 rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
450 /** Dequeue decode function */
451 rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
452 /** Enqueue encode function */
453 rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
454 /** Enqueue decode function */
455 rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
456 /** Dequeue encode function */
457 rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
458 /** Dequeue decode function */
459 rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
460 const struct rte_bbdev_ops *dev_ops; /**< Functions exported by PMD */
461 struct rte_bbdev_data *data; /**< Pointer to device data */
462 enum rte_bbdev_state state; /**< If device is currently used or not */
463 struct rte_device *device; /**< Backing device */
464 /** User application callback for interrupts if present */
465 struct rte_bbdev_cb_list list_cbs;
466 struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
467 };
468
469 /** @internal array of all devices */
470 extern struct rte_bbdev rte_bbdev_devices[];
471
472 /**
473 * Enqueue a burst of processed encode operations to a queue of the device.
474 * This functions only enqueues as many operations as currently possible and
475 * does not block until @p num_ops entries in the queue are available.
476 * This function does not provide any error notification to avoid the
477 * corresponding overhead.
478 *
479 * @param dev_id
480 * The identifier of the device.
481 * @param queue_id
482 * The index of the queue.
483 * @param ops
484 * Pointer array containing operations to be enqueued Must have at least
485 * @p num_ops entries
486 * @param num_ops
487 * The maximum number of operations to enqueue.
488 *
489 * @return
490 * The number of operations actually enqueued (this is the number of processed
491 * entries in the @p ops array).
492 */
493 __rte_experimental
494 static inline uint16_t
rte_bbdev_enqueue_enc_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_enc_op ** ops,uint16_t num_ops)495 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
496 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
497 {
498 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
499 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
500 return dev->enqueue_enc_ops(q_data, ops, num_ops);
501 }
502
503 /**
504 * Enqueue a burst of processed decode operations to a queue of the device.
505 * This functions only enqueues as many operations as currently possible and
506 * does not block until @p num_ops entries in the queue are available.
507 * This function does not provide any error notification to avoid the
508 * corresponding overhead.
509 *
510 * @param dev_id
511 * The identifier of the device.
512 * @param queue_id
513 * The index of the queue.
514 * @param ops
515 * Pointer array containing operations to be enqueued Must have at least
516 * @p num_ops entries
517 * @param num_ops
518 * The maximum number of operations to enqueue.
519 *
520 * @return
521 * The number of operations actually enqueued (this is the number of processed
522 * entries in the @p ops array).
523 */
524 __rte_experimental
525 static inline uint16_t
rte_bbdev_enqueue_dec_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_dec_op ** ops,uint16_t num_ops)526 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
527 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
528 {
529 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
530 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
531 return dev->enqueue_dec_ops(q_data, ops, num_ops);
532 }
533
534 /**
535 * Enqueue a burst of processed encode operations to a queue of the device.
536 * This functions only enqueues as many operations as currently possible and
537 * does not block until @p num_ops entries in the queue are available.
538 * This function does not provide any error notification to avoid the
539 * corresponding overhead.
540 *
541 * @param dev_id
542 * The identifier of the device.
543 * @param queue_id
544 * The index of the queue.
545 * @param ops
546 * Pointer array containing operations to be enqueued Must have at least
547 * @p num_ops entries
548 * @param num_ops
549 * The maximum number of operations to enqueue.
550 *
551 * @return
552 * The number of operations actually enqueued (this is the number of processed
553 * entries in the @p ops array).
554 */
555 __rte_experimental
556 static inline uint16_t
rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_enc_op ** ops,uint16_t num_ops)557 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
558 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
559 {
560 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
561 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
562 return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
563 }
564
565 /**
566 * Enqueue a burst of processed decode operations to a queue of the device.
567 * This functions only enqueues as many operations as currently possible and
568 * does not block until @p num_ops entries in the queue are available.
569 * This function does not provide any error notification to avoid the
570 * corresponding overhead.
571 *
572 * @param dev_id
573 * The identifier of the device.
574 * @param queue_id
575 * The index of the queue.
576 * @param ops
577 * Pointer array containing operations to be enqueued Must have at least
578 * @p num_ops entries
579 * @param num_ops
580 * The maximum number of operations to enqueue.
581 *
582 * @return
583 * The number of operations actually enqueued (this is the number of processed
584 * entries in the @p ops array).
585 */
586 __rte_experimental
587 static inline uint16_t
rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_dec_op ** ops,uint16_t num_ops)588 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
589 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
590 {
591 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
592 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
593 return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
594 }
595
596
597 /**
598 * Dequeue a burst of processed encode operations from a queue of the device.
599 * This functions returns only the current contents of the queue, and does not
600 * block until @ num_ops is available.
601 * This function does not provide any error notification to avoid the
602 * corresponding overhead.
603 *
604 * @param dev_id
605 * The identifier of the device.
606 * @param queue_id
607 * The index of the queue.
608 * @param ops
609 * Pointer array where operations will be dequeued to. Must have at least
610 * @p num_ops entries
611 * ie. A pointer to a table of void * pointers (ops) that will be filled.
612 * @param num_ops
613 * The maximum number of operations to dequeue.
614 *
615 * @return
616 * The number of operations actually dequeued (this is the number of entries
617 * copied into the @p ops array).
618 */
619 __rte_experimental
620 static inline uint16_t
rte_bbdev_dequeue_enc_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_enc_op ** ops,uint16_t num_ops)621 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
622 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
623 {
624 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
625 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
626 return dev->dequeue_enc_ops(q_data, ops, num_ops);
627 }
628
629 /**
630 * Dequeue a burst of processed decode operations from a queue of the device.
631 * This functions returns only the current contents of the queue, and does not
632 * block until @ num_ops is available.
633 * This function does not provide any error notification to avoid the
634 * corresponding overhead.
635 *
636 * @param dev_id
637 * The identifier of the device.
638 * @param queue_id
639 * The index of the queue.
640 * @param ops
641 * Pointer array where operations will be dequeued to. Must have at least
642 * @p num_ops entries
643 * ie. A pointer to a table of void * pointers (ops) that will be filled.
644 * @param num_ops
645 * The maximum number of operations to dequeue.
646 *
647 * @return
648 * The number of operations actually dequeued (this is the number of entries
649 * copied into the @p ops array).
650 */
651
652 __rte_experimental
653 static inline uint16_t
rte_bbdev_dequeue_dec_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_dec_op ** ops,uint16_t num_ops)654 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
655 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
656 {
657 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
658 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
659 return dev->dequeue_dec_ops(q_data, ops, num_ops);
660 }
661
662
663 /**
664 * Dequeue a burst of processed encode operations from a queue of the device.
665 * This functions returns only the current contents of the queue, and does not
666 * block until @ num_ops is available.
667 * This function does not provide any error notification to avoid the
668 * corresponding overhead.
669 *
670 * @param dev_id
671 * The identifier of the device.
672 * @param queue_id
673 * The index of the queue.
674 * @param ops
675 * Pointer array where operations will be dequeued to. Must have at least
676 * @p num_ops entries
677 * @param num_ops
678 * The maximum number of operations to dequeue.
679 *
680 * @return
681 * The number of operations actually dequeued (this is the number of entries
682 * copied into the @p ops array).
683 */
684 __rte_experimental
685 static inline uint16_t
rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_enc_op ** ops,uint16_t num_ops)686 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
687 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
688 {
689 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
690 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
691 return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
692 }
693
694 /**
695 * Dequeue a burst of processed decode operations from a queue of the device.
696 * This functions returns only the current contents of the queue, and does not
697 * block until @ num_ops is available.
698 * This function does not provide any error notification to avoid the
699 * corresponding overhead.
700 *
701 * @param dev_id
702 * The identifier of the device.
703 * @param queue_id
704 * The index of the queue.
705 * @param ops
706 * Pointer array where operations will be dequeued to. Must have at least
707 * @p num_ops entries
708 * @param num_ops
709 * The maximum number of operations to dequeue.
710 *
711 * @return
712 * The number of operations actually dequeued (this is the number of entries
713 * copied into the @p ops array).
714 */
715 __rte_experimental
716 static inline uint16_t
rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_dec_op ** ops,uint16_t num_ops)717 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
718 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
719 {
720 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
721 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
722 return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
723 }
724
725 /** Definitions of device event types */
726 enum rte_bbdev_event_type {
727 RTE_BBDEV_EVENT_UNKNOWN, /**< unknown event type */
728 RTE_BBDEV_EVENT_ERROR, /**< error interrupt event */
729 RTE_BBDEV_EVENT_DEQUEUE, /**< dequeue event */
730 RTE_BBDEV_EVENT_MAX /**< max value of this enum */
731 };
732
733 /**
734 * Typedef for application callback function registered by application
735 * software for notification of device events
736 *
737 * @param dev_id
738 * Device identifier
739 * @param event
740 * Device event to register for notification of.
741 * @param cb_arg
742 * User specified parameter to be passed to user's callback function.
743 * @param ret_param
744 * To pass data back to user application.
745 */
746 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
747 enum rte_bbdev_event_type event, void *cb_arg,
748 void *ret_param);
749
750 /**
751 * Register a callback function for specific device id. Multiple callbacks can
752 * be added and will be called in the order they are added when an event is
753 * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
754 *
755 * @param dev_id
756 * Device id.
757 * @param event
758 * The event that the callback will be registered for.
759 * @param cb_fn
760 * User supplied callback function to be called.
761 * @param cb_arg
762 * Pointer to parameter that will be passed to the callback.
763 *
764 * @return
765 * Zero on success, negative value on failure.
766 */
767 __rte_experimental
768 int
769 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
770 rte_bbdev_cb_fn cb_fn, void *cb_arg);
771
772 /**
773 * Unregister a callback function for specific device id.
774 *
775 * @param dev_id
776 * The device identifier.
777 * @param event
778 * The event that the callback will be unregistered for.
779 * @param cb_fn
780 * User supplied callback function to be unregistered.
781 * @param cb_arg
782 * Pointer to the parameter supplied when registering the callback.
783 * (void *)-1 means to remove all registered callbacks with the specified
784 * function address.
785 *
786 * @return
787 * - 0 on success
788 * - EINVAL if invalid parameter pointer is provided
789 * - EAGAIN if the provided callback pointer does not exist
790 */
791 __rte_experimental
792 int
793 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
794 rte_bbdev_cb_fn cb_fn, void *cb_arg);
795
796 /**
797 * Enable a one-shot interrupt on the next operation enqueued to a particular
798 * queue. The interrupt will be triggered when the operation is ready to be
799 * dequeued. To handle the interrupt, an epoll file descriptor must be
800 * registered using rte_bbdev_queue_intr_ctl(), and then an application
801 * thread/lcore can wait for the interrupt using rte_epoll_wait().
802 *
803 * @param dev_id
804 * The device identifier.
805 * @param queue_id
806 * The index of the queue.
807 *
808 * @return
809 * - 0 on success
810 * - negative value on failure - as returned from PMD driver
811 */
812 __rte_experimental
813 int
814 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
815
816 /**
817 * Disable a one-shot interrupt on the next operation enqueued to a particular
818 * queue (if it has been enabled).
819 *
820 * @param dev_id
821 * The device identifier.
822 * @param queue_id
823 * The index of the queue.
824 *
825 * @return
826 * - 0 on success
827 * - negative value on failure - as returned from PMD driver
828 */
829 __rte_experimental
830 int
831 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
832
833 /**
834 * Control interface for per-queue interrupts.
835 *
836 * @param dev_id
837 * The device identifier.
838 * @param queue_id
839 * The index of the queue.
840 * @param epfd
841 * Epoll file descriptor that will be associated with the interrupt source.
842 * If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
843 * file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
844 * be used when calling rte_epoll_wait()).
845 * @param op
846 * The operation be performed for the vector.RTE_INTR_EVENT_ADD or
847 * RTE_INTR_EVENT_DEL.
848 * @param data
849 * User context, that will be returned in the epdata.data field of the
850 * rte_epoll_event structure filled in by rte_epoll_wait().
851 *
852 * @return
853 * - 0 on success
854 * - ENOTSUP if interrupts are not supported by the identified device
855 * - negative value on failure - as returned from PMD driver
856 */
857 __rte_experimental
858 int
859 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
860 void *data);
861
862 #ifdef __cplusplus
863 }
864 #endif
865
866 #endif /* _RTE_BBDEV_H_ */
867