1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #ifndef _RTE_BBDEV_H_
6 #define _RTE_BBDEV_H_
7
8 /**
9 * @file rte_bbdev.h
10 *
11 * Wireless base band device abstraction APIs.
12 *
13 * This API allows an application to discover, configure and use a device to
14 * process operations. An asynchronous API (enqueue, followed by later dequeue)
15 * is used for processing operations.
16 *
17 * The functions in this API are not thread-safe when called on the same
18 * target object (a device, or a queue on a device), with the exception that
19 * one thread can enqueue operations to a queue while another thread dequeues
20 * from the same queue.
21 */
22
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
26
27 #include <stdint.h>
28 #include <stdbool.h>
29
30 #include <rte_cpuflags.h>
31
32 #include "rte_bbdev_op.h"
33
34 #ifndef RTE_BBDEV_MAX_DEVS
35 #define RTE_BBDEV_MAX_DEVS 128 /**< Max number of devices */
36 #endif
37
38 /** Flags indicate current state of BBDEV device */
39 enum rte_bbdev_state {
40 RTE_BBDEV_UNUSED,
41 RTE_BBDEV_INITIALIZED
42 };
43
44 /**
45 * Get the total number of devices that have been successfully initialised.
46 *
47 * @return
48 * The total number of usable devices.
49 */
50 uint16_t
51 rte_bbdev_count(void);
52
53 /**
54 * Check if a device is valid.
55 *
56 * @param dev_id
57 * The identifier of the device.
58 *
59 * @return
60 * true if device ID is valid and device is attached, false otherwise.
61 */
62 bool
63 rte_bbdev_is_valid(uint16_t dev_id);
64
65 /**
66 * Get the next enabled device.
67 *
68 * @param dev_id
69 * The current device
70 *
71 * @return
72 * - The next device, or
73 * - RTE_BBDEV_MAX_DEVS if none found
74 */
75 uint16_t
76 rte_bbdev_find_next(uint16_t dev_id);
77
78 /** Iterate through all enabled devices */
79 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
80 i < RTE_BBDEV_MAX_DEVS; \
81 i = rte_bbdev_find_next(i))
82
83 /**
84 * Setup up device queues.
85 * This function must be called on a device before setting up the queues and
86 * starting the device. It can also be called when a device is in the stopped
87 * state. If any device queues have been configured their configuration will be
88 * cleared by a call to this function.
89 *
90 * @param dev_id
91 * The identifier of the device.
92 * @param num_queues
93 * Number of queues to configure on device.
94 * @param socket_id
95 * ID of a socket which will be used to allocate memory.
96 *
97 * @return
98 * - 0 on success
99 * - -ENODEV if dev_id is invalid or the device is corrupted
100 * - -EINVAL if num_queues is invalid, 0 or greater than maximum
101 * - -EBUSY if the identified device has already started
102 * - -ENOMEM if unable to allocate memory
103 */
104 int
105 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
106
107 /**
108 * Enable interrupts.
109 * This function may be called before starting the device to enable the
110 * interrupts if they are available.
111 *
112 * @param dev_id
113 * The identifier of the device.
114 *
115 * @return
116 * - 0 on success
117 * - -ENODEV if dev_id is invalid or the device is corrupted
118 * - -EBUSY if the identified device has already started
119 * - -ENOTSUP if the interrupts are not supported by the device
120 */
121 int
122 rte_bbdev_intr_enable(uint16_t dev_id);
123
124 /** Device queue configuration structure */
125 struct rte_bbdev_queue_conf {
126 int socket; /**< NUMA socket used for memory allocation */
127 uint32_t queue_size; /**< Size of queue */
128 uint8_t priority; /**< Queue priority */
129 bool deferred_start; /**< Do not start queue when device is started. */
130 enum rte_bbdev_op_type op_type; /**< Operation type */
131 };
132
133 /**
134 * Configure a queue on a device.
135 * This function can be called after device configuration, and before starting.
136 * It can also be called when the device or the queue is in the stopped state.
137 *
138 * @param dev_id
139 * The identifier of the device.
140 * @param queue_id
141 * The index of the queue.
142 * @param conf
143 * The queue configuration. If NULL, a default configuration will be used.
144 *
145 * @return
146 * - 0 on success
147 * - EINVAL if the identified queue size or priority are invalid
148 * - EBUSY if the identified queue or its device have already started
149 */
150 int
151 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
152 const struct rte_bbdev_queue_conf *conf);
153
154 /**
155 * Start a device.
156 * This is the last step needed before enqueueing operations is possible.
157 *
158 * @param dev_id
159 * The identifier of the device.
160 *
161 * @return
162 * - 0 on success
163 * - negative value on failure - as returned from PMD
164 */
165 int
166 rte_bbdev_start(uint16_t dev_id);
167
168 /**
169 * Stop a device.
170 * The device can be reconfigured, and restarted after being stopped.
171 *
172 * @param dev_id
173 * The identifier of the device.
174 *
175 * @return
176 * - 0 on success
177 */
178 int
179 rte_bbdev_stop(uint16_t dev_id);
180
181 /**
182 * Close a device.
183 * The device cannot be restarted without reconfiguration!
184 *
185 * @param dev_id
186 * The identifier of the device.
187 *
188 * @return
189 * - 0 on success
190 */
191 int
192 rte_bbdev_close(uint16_t dev_id);
193
194 /**
195 * Start a specified queue on a device.
196 * This is only needed if the queue has been stopped, or if the deferred_start
197 * flag has been set when configuring the queue.
198 *
199 * @param dev_id
200 * The identifier of the device.
201 * @param queue_id
202 * The index of the queue.
203 *
204 * @return
205 * - 0 on success
206 * - negative value on failure - as returned from PMD
207 */
208 int
209 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
210
211 /**
212 * Stop a specified queue on a device, to allow re configuration.
213 *
214 * @param dev_id
215 * The identifier of the device.
216 * @param queue_id
217 * The index of the queue.
218 *
219 * @return
220 * - 0 on success
221 * - negative value on failure - as returned from PMD
222 */
223 int
224 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
225
226 /** Device statistics. */
227 struct rte_bbdev_stats {
228 uint64_t enqueued_count; /**< Count of all operations enqueued */
229 uint64_t dequeued_count; /**< Count of all operations dequeued */
230 /** Total error count on operations enqueued */
231 uint64_t enqueue_err_count;
232 /** Total error count on operations dequeued */
233 uint64_t dequeue_err_count;
234 /** CPU cycles consumed by the (HW/SW) accelerator device to offload
235 * the enqueue request to its internal queues.
236 * - For a HW device this is the cycles consumed in MMIO write
237 * - For a SW (vdev) device, this is the processing time of the
238 * bbdev operation
239 */
240 uint64_t acc_offload_cycles;
241 };
242
243 /**
244 * Retrieve the general I/O statistics of a device.
245 *
246 * @param dev_id
247 * The identifier of the device.
248 * @param stats
249 * Pointer to structure to where statistics will be copied. On error, this
250 * location may or may not have been modified.
251 *
252 * @return
253 * - 0 on success
254 * - EINVAL if invalid parameter pointer is provided
255 */
256 int
257 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
258
259 /**
260 * Reset the statistics of a device.
261 *
262 * @param dev_id
263 * The identifier of the device.
264 * @return
265 * - 0 on success
266 */
267 int
268 rte_bbdev_stats_reset(uint16_t dev_id);
269
270 /** Device information supplied by the device's driver */
271 struct rte_bbdev_driver_info {
272 /** Driver name */
273 const char *driver_name;
274
275 /** Maximum number of queues supported by the device */
276 unsigned int max_num_queues;
277 /** Queue size limit (queue size must also be power of 2) */
278 uint32_t queue_size_lim;
279 /** Set if device off-loads operation to hardware */
280 bool hardware_accelerated;
281 /** Max value supported by queue priority for DL */
282 uint8_t max_dl_queue_priority;
283 /** Max value supported by queue priority for UL */
284 uint8_t max_ul_queue_priority;
285 /** Set if device supports per-queue interrupts */
286 bool queue_intr_supported;
287 /** Minimum alignment of buffers, in bytes */
288 uint16_t min_alignment;
289 /** HARQ memory available in kB */
290 uint32_t harq_buffer_size;
291 /** Byte endianness (RTE_BIG_ENDIAN/RTE_LITTLE_ENDIAN) supported
292 * for input/output data
293 */
294 uint8_t data_endianness;
295 /** Default queue configuration used if none is supplied */
296 struct rte_bbdev_queue_conf default_queue_conf;
297 /** Device operation capabilities */
298 const struct rte_bbdev_op_cap *capabilities;
299 /** Device cpu_flag requirements */
300 const enum rte_cpu_flag_t *cpu_flag_reqs;
301 };
302
303 /** Macro used at end of bbdev PMD list */
304 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
305 { RTE_BBDEV_OP_NONE }
306
307 /**
308 * Device information structure used by an application to discover a devices
309 * capabilities and current configuration
310 */
311 struct rte_bbdev_info {
312 int socket_id; /**< NUMA socket that device is on */
313 const char *dev_name; /**< Unique device name */
314 const struct rte_device *device; /**< Device Information */
315 uint16_t num_queues; /**< Number of queues currently configured */
316 bool started; /**< Set if device is currently started */
317 struct rte_bbdev_driver_info drv; /**< Info from device driver */
318 };
319
320 /**
321 * Retrieve information about a device.
322 *
323 * @param dev_id
324 * The identifier of the device.
325 * @param dev_info
326 * Pointer to structure to where information will be copied. On error, this
327 * location may or may not have been modified.
328 *
329 * @return
330 * - 0 on success
331 * - EINVAL if invalid parameter pointer is provided
332 */
333 int
334 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
335
336 /** Queue information */
337 struct rte_bbdev_queue_info {
338 /** Current device configuration */
339 struct rte_bbdev_queue_conf conf;
340 /** Set if queue is currently started */
341 bool started;
342 };
343
344 /**
345 * Retrieve information about a specific queue on a device.
346 *
347 * @param dev_id
348 * The identifier of the device.
349 * @param queue_id
350 * The index of the queue.
351 * @param queue_info
352 * Pointer to structure to where information will be copied. On error, this
353 * location may or may not have been modified.
354 *
355 * @return
356 * - 0 on success
357 * - EINVAL if invalid parameter pointer is provided
358 */
359 int
360 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
361 struct rte_bbdev_queue_info *queue_info);
362
363 /** @internal The data structure associated with each queue of a device. */
364 struct rte_bbdev_queue_data {
365 void *queue_private; /**< Driver-specific per-queue data */
366 struct rte_bbdev_queue_conf conf; /**< Current configuration */
367 struct rte_bbdev_stats queue_stats; /**< Queue statistics */
368 bool started; /**< Queue state */
369 };
370
371 /** @internal Enqueue encode operations for processing on queue of a device. */
372 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
373 struct rte_bbdev_queue_data *q_data,
374 struct rte_bbdev_enc_op **ops,
375 uint16_t num);
376
377 /** @internal Enqueue decode operations for processing on queue of a device. */
378 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
379 struct rte_bbdev_queue_data *q_data,
380 struct rte_bbdev_dec_op **ops,
381 uint16_t num);
382
383 /** @internal Dequeue encode operations from a queue of a device. */
384 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
385 struct rte_bbdev_queue_data *q_data,
386 struct rte_bbdev_enc_op **ops, uint16_t num);
387
388 /** @internal Dequeue decode operations from a queue of a device. */
389 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
390 struct rte_bbdev_queue_data *q_data,
391 struct rte_bbdev_dec_op **ops, uint16_t num);
392
393 #define RTE_BBDEV_NAME_MAX_LEN 64 /**< Max length of device name */
394
395 /**
396 * @internal The data associated with a device, with no function pointers.
397 * This structure is safe to place in shared memory to be common among
398 * different processes in a multi-process configuration. Drivers can access
399 * these fields, but should never write to them!
400 */
401 struct rte_bbdev_data {
402 char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
403 void *dev_private; /**< Driver-specific private data */
404 uint16_t num_queues; /**< Number of currently configured queues */
405 struct rte_bbdev_queue_data *queues; /**< Queue structures */
406 uint16_t dev_id; /**< Device ID */
407 int socket_id; /**< NUMA socket that device is on */
408 bool started; /**< Device run-time state */
409 uint16_t process_cnt; /** Counter of processes using the device */
410 };
411
412 /* Forward declarations */
413 struct rte_bbdev_ops;
414 struct rte_bbdev_callback;
415 struct rte_intr_handle;
416
417 /** Structure to keep track of registered callbacks */
418 RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
419
420 /**
421 * @internal The data structure associated with a device. Drivers can access
422 * these fields, but should only write to the *_ops fields.
423 */
424 struct __rte_cache_aligned rte_bbdev {
425 /** Enqueue encode function */
426 rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
427 /** Enqueue decode function */
428 rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
429 /** Dequeue encode function */
430 rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
431 /** Dequeue decode function */
432 rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
433 /** Enqueue encode function */
434 rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
435 /** Enqueue decode function */
436 rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
437 /** Dequeue encode function */
438 rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
439 /** Dequeue decode function */
440 rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
441 const struct rte_bbdev_ops *dev_ops; /**< Functions exported by PMD */
442 struct rte_bbdev_data *data; /**< Pointer to device data */
443 enum rte_bbdev_state state; /**< If device is currently used or not */
444 struct rte_device *device; /**< Backing device */
445 /** User application callback for interrupts if present */
446 struct rte_bbdev_cb_list list_cbs;
447 struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
448 };
449
450 /** @internal array of all devices */
451 extern struct rte_bbdev rte_bbdev_devices[];
452
453 /**
454 * Enqueue a burst of processed encode operations to a queue of the device.
455 * This functions only enqueues as many operations as currently possible and
456 * does not block until @p num_ops entries in the queue are available.
457 * This function does not provide any error notification to avoid the
458 * corresponding overhead.
459 *
460 * @param dev_id
461 * The identifier of the device.
462 * @param queue_id
463 * The index of the queue.
464 * @param ops
465 * Pointer array containing operations to be enqueued Must have at least
466 * @p num_ops entries
467 * @param num_ops
468 * The maximum number of operations to enqueue.
469 *
470 * @return
471 * The number of operations actually enqueued (this is the number of processed
472 * entries in the @p ops array).
473 */
474 static inline uint16_t
rte_bbdev_enqueue_enc_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_enc_op ** ops,uint16_t num_ops)475 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
476 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
477 {
478 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
479 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
480 return dev->enqueue_enc_ops(q_data, ops, num_ops);
481 }
482
483 /**
484 * Enqueue a burst of processed decode operations to a queue of the device.
485 * This functions only enqueues as many operations as currently possible and
486 * does not block until @p num_ops entries in the queue are available.
487 * This function does not provide any error notification to avoid the
488 * corresponding overhead.
489 *
490 * @param dev_id
491 * The identifier of the device.
492 * @param queue_id
493 * The index of the queue.
494 * @param ops
495 * Pointer array containing operations to be enqueued Must have at least
496 * @p num_ops entries
497 * @param num_ops
498 * The maximum number of operations to enqueue.
499 *
500 * @return
501 * The number of operations actually enqueued (this is the number of processed
502 * entries in the @p ops array).
503 */
504 static inline uint16_t
rte_bbdev_enqueue_dec_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_dec_op ** ops,uint16_t num_ops)505 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
506 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
507 {
508 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
509 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
510 return dev->enqueue_dec_ops(q_data, ops, num_ops);
511 }
512
513 /**
514 * Enqueue a burst of processed encode operations to a queue of the device.
515 * This functions only enqueues as many operations as currently possible and
516 * does not block until @p num_ops entries in the queue are available.
517 * This function does not provide any error notification to avoid the
518 * corresponding overhead.
519 *
520 * @param dev_id
521 * The identifier of the device.
522 * @param queue_id
523 * The index of the queue.
524 * @param ops
525 * Pointer array containing operations to be enqueued Must have at least
526 * @p num_ops entries
527 * @param num_ops
528 * The maximum number of operations to enqueue.
529 *
530 * @return
531 * The number of operations actually enqueued (this is the number of processed
532 * entries in the @p ops array).
533 */
534 static inline uint16_t
rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_enc_op ** ops,uint16_t num_ops)535 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
536 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
537 {
538 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
539 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
540 return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
541 }
542
543 /**
544 * Enqueue a burst of processed decode operations to a queue of the device.
545 * This functions only enqueues as many operations as currently possible and
546 * does not block until @p num_ops entries in the queue are available.
547 * This function does not provide any error notification to avoid the
548 * corresponding overhead.
549 *
550 * @param dev_id
551 * The identifier of the device.
552 * @param queue_id
553 * The index of the queue.
554 * @param ops
555 * Pointer array containing operations to be enqueued Must have at least
556 * @p num_ops entries
557 * @param num_ops
558 * The maximum number of operations to enqueue.
559 *
560 * @return
561 * The number of operations actually enqueued (this is the number of processed
562 * entries in the @p ops array).
563 */
564 static inline uint16_t
rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_dec_op ** ops,uint16_t num_ops)565 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
566 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
567 {
568 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
569 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
570 return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
571 }
572
573
574 /**
575 * Dequeue a burst of processed encode operations from a queue of the device.
576 * This functions returns only the current contents of the queue, and does not
577 * block until @ num_ops is available.
578 * This function does not provide any error notification to avoid the
579 * corresponding overhead.
580 *
581 * @param dev_id
582 * The identifier of the device.
583 * @param queue_id
584 * The index of the queue.
585 * @param ops
586 * Pointer array where operations will be dequeued to. Must have at least
587 * @p num_ops entries
588 * ie. A pointer to a table of void * pointers (ops) that will be filled.
589 * @param num_ops
590 * The maximum number of operations to dequeue.
591 *
592 * @return
593 * The number of operations actually dequeued (this is the number of entries
594 * copied into the @p ops array).
595 */
596 static inline uint16_t
rte_bbdev_dequeue_enc_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_enc_op ** ops,uint16_t num_ops)597 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
598 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
599 {
600 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
601 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
602 return dev->dequeue_enc_ops(q_data, ops, num_ops);
603 }
604
605 /**
606 * Dequeue a burst of processed decode operations from a queue of the device.
607 * This functions returns only the current contents of the queue, and does not
608 * block until @ num_ops is available.
609 * This function does not provide any error notification to avoid the
610 * corresponding overhead.
611 *
612 * @param dev_id
613 * The identifier of the device.
614 * @param queue_id
615 * The index of the queue.
616 * @param ops
617 * Pointer array where operations will be dequeued to. Must have at least
618 * @p num_ops entries
619 * ie. A pointer to a table of void * pointers (ops) that will be filled.
620 * @param num_ops
621 * The maximum number of operations to dequeue.
622 *
623 * @return
624 * The number of operations actually dequeued (this is the number of entries
625 * copied into the @p ops array).
626 */
627
628 static inline uint16_t
rte_bbdev_dequeue_dec_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_dec_op ** ops,uint16_t num_ops)629 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
630 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
631 {
632 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
633 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
634 return dev->dequeue_dec_ops(q_data, ops, num_ops);
635 }
636
637
638 /**
639 * Dequeue a burst of processed encode operations from a queue of the device.
640 * This functions returns only the current contents of the queue, and does not
641 * block until @ num_ops is available.
642 * This function does not provide any error notification to avoid the
643 * corresponding overhead.
644 *
645 * @param dev_id
646 * The identifier of the device.
647 * @param queue_id
648 * The index of the queue.
649 * @param ops
650 * Pointer array where operations will be dequeued to. Must have at least
651 * @p num_ops entries
652 * @param num_ops
653 * The maximum number of operations to dequeue.
654 *
655 * @return
656 * The number of operations actually dequeued (this is the number of entries
657 * copied into the @p ops array).
658 */
659 static inline uint16_t
rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_enc_op ** ops,uint16_t num_ops)660 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
661 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
662 {
663 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
664 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
665 return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
666 }
667
668 /**
669 * Dequeue a burst of processed decode operations from a queue of the device.
670 * This functions returns only the current contents of the queue, and does not
671 * block until @ num_ops is available.
672 * This function does not provide any error notification to avoid the
673 * corresponding overhead.
674 *
675 * @param dev_id
676 * The identifier of the device.
677 * @param queue_id
678 * The index of the queue.
679 * @param ops
680 * Pointer array where operations will be dequeued to. Must have at least
681 * @p num_ops entries
682 * @param num_ops
683 * The maximum number of operations to dequeue.
684 *
685 * @return
686 * The number of operations actually dequeued (this is the number of entries
687 * copied into the @p ops array).
688 */
689 static inline uint16_t
rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_dec_op ** ops,uint16_t num_ops)690 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
691 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
692 {
693 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
694 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
695 return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
696 }
697
698 /** Definitions of device event types */
699 enum rte_bbdev_event_type {
700 RTE_BBDEV_EVENT_UNKNOWN, /**< unknown event type */
701 RTE_BBDEV_EVENT_ERROR, /**< error interrupt event */
702 RTE_BBDEV_EVENT_DEQUEUE, /**< dequeue event */
703 RTE_BBDEV_EVENT_MAX /**< max value of this enum */
704 };
705
706 /**
707 * Typedef for application callback function registered by application
708 * software for notification of device events
709 *
710 * @param dev_id
711 * Device identifier
712 * @param event
713 * Device event to register for notification of.
714 * @param cb_arg
715 * User specified parameter to be passed to user's callback function.
716 * @param ret_param
717 * To pass data back to user application.
718 */
719 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
720 enum rte_bbdev_event_type event, void *cb_arg,
721 void *ret_param);
722
723 /**
724 * Register a callback function for specific device id. Multiple callbacks can
725 * be added and will be called in the order they are added when an event is
726 * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
727 *
728 * @param dev_id
729 * Device id.
730 * @param event
731 * The event that the callback will be registered for.
732 * @param cb_fn
733 * User supplied callback function to be called.
734 * @param cb_arg
735 * Pointer to parameter that will be passed to the callback.
736 *
737 * @return
738 * Zero on success, negative value on failure.
739 */
740 int
741 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
742 rte_bbdev_cb_fn cb_fn, void *cb_arg);
743
744 /**
745 * Unregister a callback function for specific device id.
746 *
747 * @param dev_id
748 * The device identifier.
749 * @param event
750 * The event that the callback will be unregistered for.
751 * @param cb_fn
752 * User supplied callback function to be unregistered.
753 * @param cb_arg
754 * Pointer to the parameter supplied when registering the callback.
755 * (void *)-1 means to remove all registered callbacks with the specified
756 * function address.
757 *
758 * @return
759 * - 0 on success
760 * - EINVAL if invalid parameter pointer is provided
761 * - EAGAIN if the provided callback pointer does not exist
762 */
763 int
764 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
765 rte_bbdev_cb_fn cb_fn, void *cb_arg);
766
767 /**
768 * Enable a one-shot interrupt on the next operation enqueued to a particular
769 * queue. The interrupt will be triggered when the operation is ready to be
770 * dequeued. To handle the interrupt, an epoll file descriptor must be
771 * registered using rte_bbdev_queue_intr_ctl(), and then an application
772 * thread/lcore can wait for the interrupt using rte_epoll_wait().
773 *
774 * @param dev_id
775 * The device identifier.
776 * @param queue_id
777 * The index of the queue.
778 *
779 * @return
780 * - 0 on success
781 * - negative value on failure - as returned from PMD
782 */
783 int
784 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
785
786 /**
787 * Disable a one-shot interrupt on the next operation enqueued to a particular
788 * queue (if it has been enabled).
789 *
790 * @param dev_id
791 * The device identifier.
792 * @param queue_id
793 * The index of the queue.
794 *
795 * @return
796 * - 0 on success
797 * - negative value on failure - as returned from PMD
798 */
799 int
800 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
801
802 /**
803 * Control interface for per-queue interrupts.
804 *
805 * @param dev_id
806 * The device identifier.
807 * @param queue_id
808 * The index of the queue.
809 * @param epfd
810 * Epoll file descriptor that will be associated with the interrupt source.
811 * If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
812 * file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
813 * be used when calling rte_epoll_wait()).
814 * @param op
815 * The operation be performed for the vector.RTE_INTR_EVENT_ADD or
816 * RTE_INTR_EVENT_DEL.
817 * @param data
818 * User context, that will be returned in the epdata.data field of the
819 * rte_epoll_event structure filled in by rte_epoll_wait().
820 *
821 * @return
822 * - 0 on success
823 * - ENOTSUP if interrupts are not supported by the identified device
824 * - negative value on failure - as returned from PMD
825 */
826 int
827 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
828 void *data);
829
830 #ifdef __cplusplus
831 }
832 #endif
833
834 #endif /* _RTE_BBDEV_H_ */
835