1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020 Intel Corporation.
3 */
4
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7
8 /**
9 * @file rte_cryptodev.h
10 *
11 * RTE Cryptographic Device APIs
12 *
13 * Defines RTE Crypto Device APIs for the provisioning of cipher and
14 * authentication operations.
15 */
16
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20
21 #include "rte_kvargs.h"
22 #include "rte_crypto.h"
23 #include <rte_common.h>
24 #include <rte_rcu_qsbr.h>
25
26 #include "rte_cryptodev_trace_fp.h"
27
28 extern const char **rte_cyptodev_names;
29
30 /* Logging Macros */
31
32 #define CDEV_LOG_ERR(...) \
33 RTE_LOG(ERR, CRYPTODEV, \
34 RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
35 __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
36
37 #define CDEV_LOG_INFO(...) \
38 RTE_LOG(INFO, CRYPTODEV, \
39 RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
40 RTE_FMT_TAIL(__VA_ARGS__,)))
41
42 #define CDEV_LOG_DEBUG(...) \
43 RTE_LOG(DEBUG, CRYPTODEV, \
44 RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
45 __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
46
47 #define CDEV_PMD_TRACE(...) \
48 RTE_LOG(DEBUG, CRYPTODEV, \
49 RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
50 dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
51
52 /**
53 * A macro that points to an offset from the start
54 * of the crypto operation structure (rte_crypto_op)
55 *
56 * The returned pointer is cast to type t.
57 *
58 * @param c
59 * The crypto operation.
60 * @param o
61 * The offset from the start of the crypto operation.
62 * @param t
63 * The type to cast the result into.
64 */
65 #define rte_crypto_op_ctod_offset(c, t, o) \
66 ((t)((char *)(c) + (o)))
67
68 /**
69 * A macro that returns the physical address that points
70 * to an offset from the start of the crypto operation
71 * (rte_crypto_op)
72 *
73 * @param c
74 * The crypto operation.
75 * @param o
76 * The offset from the start of the crypto operation
77 * to calculate address from.
78 */
79 #define rte_crypto_op_ctophys_offset(c, o) \
80 (rte_iova_t)((c)->phys_addr + (o))
81
82 /**
83 * Crypto parameters range description
84 */
85 struct rte_crypto_param_range {
86 uint16_t min; /**< minimum size */
87 uint16_t max; /**< maximum size */
88 uint16_t increment;
89 /**< if a range of sizes are supported,
90 * this parameter is used to indicate
91 * increments in byte size that are supported
92 * between the minimum and maximum
93 */
94 };
95
96 /**
97 * Data-unit supported lengths of cipher algorithms.
98 * A bit can represent any set of data-unit sizes
99 * (single size, multiple size, range, etc).
100 */
101 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES RTE_BIT32(0)
102 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES RTE_BIT32(1)
103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES RTE_BIT32(2)
104
105 /**
106 * Symmetric Crypto Capability
107 */
108 struct rte_cryptodev_symmetric_capability {
109 enum rte_crypto_sym_xform_type xform_type;
110 /**< Transform type : Authentication / Cipher / AEAD */
111 RTE_STD_C11
112 union {
113 struct {
114 enum rte_crypto_auth_algorithm algo;
115 /**< authentication algorithm */
116 uint16_t block_size;
117 /**< algorithm block size */
118 struct rte_crypto_param_range key_size;
119 /**< auth key size range */
120 struct rte_crypto_param_range digest_size;
121 /**< digest size range */
122 struct rte_crypto_param_range aad_size;
123 /**< Additional authentication data size range */
124 struct rte_crypto_param_range iv_size;
125 /**< Initialisation vector data size range */
126 } auth;
127 /**< Symmetric Authentication transform capabilities */
128 struct {
129 enum rte_crypto_cipher_algorithm algo;
130 /**< cipher algorithm */
131 uint16_t block_size;
132 /**< algorithm block size */
133 struct rte_crypto_param_range key_size;
134 /**< cipher key size range */
135 struct rte_crypto_param_range iv_size;
136 /**< Initialisation vector data size range */
137 uint32_t dataunit_set;
138 /**<
139 * Supported data-unit lengths:
140 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
141 * or 0 for lengths defined in the algorithm standard.
142 */
143 } cipher;
144 /**< Symmetric Cipher transform capabilities */
145 struct {
146 enum rte_crypto_aead_algorithm algo;
147 /**< AEAD algorithm */
148 uint16_t block_size;
149 /**< algorithm block size */
150 struct rte_crypto_param_range key_size;
151 /**< AEAD key size range */
152 struct rte_crypto_param_range digest_size;
153 /**< digest size range */
154 struct rte_crypto_param_range aad_size;
155 /**< Additional authentication data size range */
156 struct rte_crypto_param_range iv_size;
157 /**< Initialisation vector data size range */
158 } aead;
159 };
160 };
161
162 /**
163 * Asymmetric Xform Crypto Capability
164 *
165 */
166 struct rte_cryptodev_asymmetric_xform_capability {
167 enum rte_crypto_asym_xform_type xform_type;
168 /**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
169
170 uint32_t op_types;
171 /**< bitmask for supported rte_crypto_asym_op_type */
172
173 __extension__
174 union {
175 struct rte_crypto_param_range modlen;
176 /**< Range of modulus length supported by modulus based xform.
177 * Value 0 mean implementation default
178 */
179 };
180 };
181
182 /**
183 * Asymmetric Crypto Capability
184 *
185 */
186 struct rte_cryptodev_asymmetric_capability {
187 struct rte_cryptodev_asymmetric_xform_capability xform_capa;
188 };
189
190
191 /** Structure used to capture a capability of a crypto device */
192 struct rte_cryptodev_capabilities {
193 enum rte_crypto_op_type op;
194 /**< Operation type */
195
196 RTE_STD_C11
197 union {
198 struct rte_cryptodev_symmetric_capability sym;
199 /**< Symmetric operation capability parameters */
200 struct rte_cryptodev_asymmetric_capability asym;
201 /**< Asymmetric operation capability parameters */
202 };
203 };
204
205 /** Structure used to describe crypto algorithms */
206 struct rte_cryptodev_sym_capability_idx {
207 enum rte_crypto_sym_xform_type type;
208 union {
209 enum rte_crypto_cipher_algorithm cipher;
210 enum rte_crypto_auth_algorithm auth;
211 enum rte_crypto_aead_algorithm aead;
212 } algo;
213 };
214
215 /**
216 * Structure used to describe asymmetric crypto xforms
217 * Each xform maps to one asym algorithm.
218 *
219 */
220 struct rte_cryptodev_asym_capability_idx {
221 enum rte_crypto_asym_xform_type type;
222 /**< Asymmetric xform (algo) type */
223 };
224
225 /**
226 * Provide capabilities available for defined device and algorithm
227 *
228 * @param dev_id The identifier of the device.
229 * @param idx Description of crypto algorithms.
230 *
231 * @return
232 * - Return description of the symmetric crypto capability if exist.
233 * - Return NULL if the capability not exist.
234 */
235 const struct rte_cryptodev_symmetric_capability *
236 rte_cryptodev_sym_capability_get(uint8_t dev_id,
237 const struct rte_cryptodev_sym_capability_idx *idx);
238
239 /**
240 * Provide capabilities available for defined device and xform
241 *
242 * @param dev_id The identifier of the device.
243 * @param idx Description of asym crypto xform.
244 *
245 * @return
246 * - Return description of the asymmetric crypto capability if exist.
247 * - Return NULL if the capability not exist.
248 */
249 __rte_experimental
250 const struct rte_cryptodev_asymmetric_xform_capability *
251 rte_cryptodev_asym_capability_get(uint8_t dev_id,
252 const struct rte_cryptodev_asym_capability_idx *idx);
253
254 /**
255 * Check if key size and initial vector are supported
256 * in crypto cipher capability
257 *
258 * @param capability Description of the symmetric crypto capability.
259 * @param key_size Cipher key size.
260 * @param iv_size Cipher initial vector size.
261 *
262 * @return
263 * - Return 0 if the parameters are in range of the capability.
264 * - Return -1 if the parameters are out of range of the capability.
265 */
266 int
267 rte_cryptodev_sym_capability_check_cipher(
268 const struct rte_cryptodev_symmetric_capability *capability,
269 uint16_t key_size, uint16_t iv_size);
270
271 /**
272 * Check if key size and initial vector are supported
273 * in crypto auth capability
274 *
275 * @param capability Description of the symmetric crypto capability.
276 * @param key_size Auth key size.
277 * @param digest_size Auth digest size.
278 * @param iv_size Auth initial vector size.
279 *
280 * @return
281 * - Return 0 if the parameters are in range of the capability.
282 * - Return -1 if the parameters are out of range of the capability.
283 */
284 int
285 rte_cryptodev_sym_capability_check_auth(
286 const struct rte_cryptodev_symmetric_capability *capability,
287 uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
288
289 /**
290 * Check if key, digest, AAD and initial vector sizes are supported
291 * in crypto AEAD capability
292 *
293 * @param capability Description of the symmetric crypto capability.
294 * @param key_size AEAD key size.
295 * @param digest_size AEAD digest size.
296 * @param aad_size AEAD AAD size.
297 * @param iv_size AEAD IV size.
298 *
299 * @return
300 * - Return 0 if the parameters are in range of the capability.
301 * - Return -1 if the parameters are out of range of the capability.
302 */
303 int
304 rte_cryptodev_sym_capability_check_aead(
305 const struct rte_cryptodev_symmetric_capability *capability,
306 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
307 uint16_t iv_size);
308
309 /**
310 * Check if op type is supported
311 *
312 * @param capability Description of the asymmetric crypto capability.
313 * @param op_type op type
314 *
315 * @return
316 * - Return 1 if the op type is supported
317 * - Return 0 if unsupported
318 */
319 __rte_experimental
320 int
321 rte_cryptodev_asym_xform_capability_check_optype(
322 const struct rte_cryptodev_asymmetric_xform_capability *capability,
323 enum rte_crypto_asym_op_type op_type);
324
325 /**
326 * Check if modulus length is in supported range
327 *
328 * @param capability Description of the asymmetric crypto capability.
329 * @param modlen modulus length.
330 *
331 * @return
332 * - Return 0 if the parameters are in range of the capability.
333 * - Return -1 if the parameters are out of range of the capability.
334 */
335 __rte_experimental
336 int
337 rte_cryptodev_asym_xform_capability_check_modlen(
338 const struct rte_cryptodev_asymmetric_xform_capability *capability,
339 uint16_t modlen);
340
341 /**
342 * Provide the cipher algorithm enum, given an algorithm string
343 *
344 * @param algo_enum A pointer to the cipher algorithm
345 * enum to be filled
346 * @param algo_string Authentication algo string
347 *
348 * @return
349 * - Return -1 if string is not valid
350 * - Return 0 is the string is valid
351 */
352 int
353 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
354 const char *algo_string);
355
356 /**
357 * Provide the authentication algorithm enum, given an algorithm string
358 *
359 * @param algo_enum A pointer to the authentication algorithm
360 * enum to be filled
361 * @param algo_string Authentication algo string
362 *
363 * @return
364 * - Return -1 if string is not valid
365 * - Return 0 is the string is valid
366 */
367 int
368 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
369 const char *algo_string);
370
371 /**
372 * Provide the AEAD algorithm enum, given an algorithm string
373 *
374 * @param algo_enum A pointer to the AEAD algorithm
375 * enum to be filled
376 * @param algo_string AEAD algorithm string
377 *
378 * @return
379 * - Return -1 if string is not valid
380 * - Return 0 is the string is valid
381 */
382 int
383 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
384 const char *algo_string);
385
386 /**
387 * Provide the Asymmetric xform enum, given an xform string
388 *
389 * @param xform_enum A pointer to the xform type
390 * enum to be filled
391 * @param xform_string xform string
392 *
393 * @return
394 * - Return -1 if string is not valid
395 * - Return 0 if the string is valid
396 */
397 __rte_experimental
398 int
399 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
400 const char *xform_string);
401
402
403 /** Macro used at end of crypto PMD list */
404 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
405 { RTE_CRYPTO_OP_TYPE_UNDEFINED }
406
407
408 /**
409 * Crypto device supported feature flags
410 *
411 * Note:
412 * New features flags should be added to the end of the list
413 *
414 * Keep these flags synchronised with rte_cryptodev_get_feature_name()
415 */
416 #define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0)
417 /**< Symmetric crypto operations are supported */
418 #define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1)
419 /**< Asymmetric crypto operations are supported */
420 #define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2)
421 /**< Chaining symmetric crypto operations are supported */
422 #define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3)
423 /**< Utilises CPU SIMD SSE instructions */
424 #define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4)
425 /**< Utilises CPU SIMD AVX instructions */
426 #define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5)
427 /**< Utilises CPU SIMD AVX2 instructions */
428 #define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6)
429 /**< Utilises CPU AES-NI instructions */
430 #define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7)
431 /**< Operations are off-loaded to an
432 * external hardware accelerator
433 */
434 #define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
435 /**< Utilises CPU SIMD AVX512 instructions */
436 #define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9)
437 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
438 * are supported
439 */
440 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10)
441 /**< Out-of-place Scatter-gather (SGL) buffers are
442 * supported in input and output
443 */
444 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11)
445 /**< Out-of-place Scatter-gather (SGL) buffers are supported
446 * in input, combined with linear buffers (LB), with a
447 * single segment in output
448 */
449 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12)
450 /**< Out-of-place Scatter-gather (SGL) buffers are supported
451 * in output, combined with linear buffers (LB) in input
452 */
453 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13)
454 /**< Out-of-place linear buffers (LB) are supported in input and output */
455 #define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14)
456 /**< Utilises CPU NEON instructions */
457 #define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15)
458 /**< Utilises ARM CPU Cryptographic Extensions */
459 #define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16)
460 /**< Support Security Protocol Processing */
461 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP (1ULL << 17)
462 /**< Support RSA Private Key OP with exponent */
463 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT (1ULL << 18)
464 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
465 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED (1ULL << 19)
466 /**< Support encrypted-digest operations where digest is appended to data */
467 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20)
468 /**< Support asymmetric session-less operations */
469 #define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21)
470 /**< Support symmetric cpu-crypto processing */
471 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS (1ULL << 22)
472 /**< Support symmetric session-less operations */
473 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23)
474 /**< Support operations on data which is not byte aligned */
475 #define RTE_CRYPTODEV_FF_SYM_RAW_DP (1ULL << 24)
476 /**< Support accelerator specific symmetric raw data-path APIs */
477 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS (1ULL << 25)
478 /**< Support operations on multiple data-units message */
479 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY (1ULL << 26)
480 /**< Support wrapped key in cipher xform */
481 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM (1ULL << 27)
482 /**< Support inner checksum computation/verification */
483
484 /**
485 * Get the name of a crypto device feature flag
486 *
487 * @param flag The mask describing the flag.
488 *
489 * @return
490 * The name of this flag, or NULL if it's not a valid feature flag.
491 */
492
493 extern const char *
494 rte_cryptodev_get_feature_name(uint64_t flag);
495
496 /** Crypto device information */
497 struct rte_cryptodev_info {
498 const char *driver_name; /**< Driver name. */
499 uint8_t driver_id; /**< Driver identifier */
500 struct rte_device *device; /**< Generic device information. */
501
502 uint64_t feature_flags;
503 /**< Feature flags exposes HW/SW features for the given device */
504
505 const struct rte_cryptodev_capabilities *capabilities;
506 /**< Array of devices supported capabilities */
507
508 unsigned max_nb_queue_pairs;
509 /**< Maximum number of queues pairs supported by device. */
510
511 uint16_t min_mbuf_headroom_req;
512 /**< Minimum mbuf headroom required by device */
513
514 uint16_t min_mbuf_tailroom_req;
515 /**< Minimum mbuf tailroom required by device */
516
517 struct {
518 unsigned max_nb_sessions;
519 /**< Maximum number of sessions supported by device.
520 * If 0, the device does not have any limitation in
521 * number of sessions that can be used.
522 */
523 } sym;
524 };
525
526 #define RTE_CRYPTODEV_DETACHED (0)
527 #define RTE_CRYPTODEV_ATTACHED (1)
528
529 /** Definitions of Crypto device event types */
530 enum rte_cryptodev_event_type {
531 RTE_CRYPTODEV_EVENT_UNKNOWN, /**< unknown event type */
532 RTE_CRYPTODEV_EVENT_ERROR, /**< error interrupt event */
533 RTE_CRYPTODEV_EVENT_MAX /**< max value of this enum */
534 };
535
536 /** Crypto device queue pair configuration structure. */
537 struct rte_cryptodev_qp_conf {
538 uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
539 struct rte_mempool *mp_session;
540 /**< The mempool for creating session in sessionless mode */
541 struct rte_mempool *mp_session_private;
542 /**< The mempool for creating sess private data in sessionless mode */
543 };
544
545 /**
546 * Function type used for processing crypto ops when enqueue/dequeue burst is
547 * called.
548 *
549 * The callback function is called on enqueue/dequeue burst immediately.
550 *
551 * @param dev_id The identifier of the device.
552 * @param qp_id The index of the queue pair on which ops are
553 * enqueued/dequeued. The value must be in the
554 * range [0, nb_queue_pairs - 1] previously
555 * supplied to *rte_cryptodev_configure*.
556 * @param ops The address of an array of *nb_ops* pointers
557 * to *rte_crypto_op* structures which contain
558 * the crypto operations to be processed.
559 * @param nb_ops The number of operations to process.
560 * @param user_param The arbitrary user parameter passed in by the
561 * application when the callback was originally
562 * registered.
563 * @return The number of ops to be enqueued to the
564 * crypto device.
565 */
566 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
567 struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
568
569 /**
570 * Typedef for application callback function to be registered by application
571 * software for notification of device events
572 *
573 * @param dev_id Crypto device identifier
574 * @param event Crypto device event to register for notification of.
575 * @param cb_arg User specified parameter to be passed as to passed to
576 * users callback function.
577 */
578 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
579 enum rte_cryptodev_event_type event, void *cb_arg);
580
581
582 /** Crypto Device statistics */
583 struct rte_cryptodev_stats {
584 uint64_t enqueued_count;
585 /**< Count of all operations enqueued */
586 uint64_t dequeued_count;
587 /**< Count of all operations dequeued */
588
589 uint64_t enqueue_err_count;
590 /**< Total error count on operations enqueued */
591 uint64_t dequeue_err_count;
592 /**< Total error count on operations dequeued */
593 };
594
595 #define RTE_CRYPTODEV_NAME_MAX_LEN (64)
596 /**< Max length of name of crypto PMD */
597
598 /**
599 * Get the device identifier for the named crypto device.
600 *
601 * @param name device name to select the device structure.
602 *
603 * @return
604 * - Returns crypto device identifier on success.
605 * - Return -1 on failure to find named crypto device.
606 */
607 extern int
608 rte_cryptodev_get_dev_id(const char *name);
609
610 /**
611 * Get the crypto device name given a device identifier.
612 *
613 * @param dev_id
614 * The identifier of the device
615 *
616 * @return
617 * - Returns crypto device name.
618 * - Returns NULL if crypto device is not present.
619 */
620 extern const char *
621 rte_cryptodev_name_get(uint8_t dev_id);
622
623 /**
624 * Get the total number of crypto devices that have been successfully
625 * initialised.
626 *
627 * @return
628 * - The total number of usable crypto devices.
629 */
630 extern uint8_t
631 rte_cryptodev_count(void);
632
633 /**
634 * Get number of crypto device defined type.
635 *
636 * @param driver_id driver identifier.
637 *
638 * @return
639 * Returns number of crypto device.
640 */
641 extern uint8_t
642 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
643
644 /**
645 * Get number and identifiers of attached crypto devices that
646 * use the same crypto driver.
647 *
648 * @param driver_name driver name.
649 * @param devices output devices identifiers.
650 * @param nb_devices maximal number of devices.
651 *
652 * @return
653 * Returns number of attached crypto device.
654 */
655 uint8_t
656 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
657 uint8_t nb_devices);
658 /*
659 * Return the NUMA socket to which a device is connected
660 *
661 * @param dev_id
662 * The identifier of the device
663 * @return
664 * The NUMA socket id to which the device is connected or
665 * a default of zero if the socket could not be determined.
666 * -1 if returned is the dev_id value is out of range.
667 */
668 extern int
669 rte_cryptodev_socket_id(uint8_t dev_id);
670
671 /** Crypto device configuration structure */
672 struct rte_cryptodev_config {
673 int socket_id; /**< Socket to allocate resources on */
674 uint16_t nb_queue_pairs;
675 /**< Number of queue pairs to configure on device */
676 uint64_t ff_disable;
677 /**< Feature flags to be disabled. Only the following features are
678 * allowed to be disabled,
679 * - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
680 * - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
681 * - RTE_CRYTPODEV_FF_SECURITY
682 */
683 };
684
685 /**
686 * Configure a device.
687 *
688 * This function must be invoked first before any other function in the
689 * API. This function can also be re-invoked when a device is in the
690 * stopped state.
691 *
692 * @param dev_id The identifier of the device to configure.
693 * @param config The crypto device configuration structure.
694 *
695 * @return
696 * - 0: Success, device configured.
697 * - <0: Error code returned by the driver configuration function.
698 */
699 extern int
700 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
701
702 /**
703 * Start an device.
704 *
705 * The device start step is the last one and consists of setting the configured
706 * offload features and in starting the transmit and the receive units of the
707 * device.
708 * On success, all basic functions exported by the API (link status,
709 * receive/transmit, and so on) can be invoked.
710 *
711 * @param dev_id
712 * The identifier of the device.
713 * @return
714 * - 0: Success, device started.
715 * - <0: Error code of the driver device start function.
716 */
717 extern int
718 rte_cryptodev_start(uint8_t dev_id);
719
720 /**
721 * Stop an device. The device can be restarted with a call to
722 * rte_cryptodev_start()
723 *
724 * @param dev_id The identifier of the device.
725 */
726 extern void
727 rte_cryptodev_stop(uint8_t dev_id);
728
729 /**
730 * Close an device. The device cannot be restarted!
731 *
732 * @param dev_id The identifier of the device.
733 *
734 * @return
735 * - 0 on successfully closing device
736 * - <0 on failure to close device
737 */
738 extern int
739 rte_cryptodev_close(uint8_t dev_id);
740
741 /**
742 * Allocate and set up a receive queue pair for a device.
743 *
744 *
745 * @param dev_id The identifier of the device.
746 * @param queue_pair_id The index of the queue pairs to set up. The
747 * value must be in the range [0, nb_queue_pair
748 * - 1] previously supplied to
749 * rte_cryptodev_configure().
750 * @param qp_conf The pointer to the configuration data to be
751 * used for the queue pair.
752 * @param socket_id The *socket_id* argument is the socket
753 * identifier in case of NUMA. The value can be
754 * *SOCKET_ID_ANY* if there is no NUMA constraint
755 * for the DMA memory allocated for the receive
756 * queue pair.
757 *
758 * @return
759 * - 0: Success, queue pair correctly set up.
760 * - <0: Queue pair configuration failed
761 */
762 extern int
763 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
764 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
765
766 /**
767 * Get the status of queue pairs setup on a specific crypto device
768 *
769 * @param dev_id Crypto device identifier.
770 * @param queue_pair_id The index of the queue pairs to set up. The
771 * value must be in the range [0, nb_queue_pair
772 * - 1] previously supplied to
773 * rte_cryptodev_configure().
774 * @return
775 * - 0: qp was not configured
776 * - 1: qp was configured
777 * - -EINVAL: device was not configured
778 */
779 __rte_experimental
780 int
781 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
782
783 /**
784 * Get the number of queue pairs on a specific crypto device
785 *
786 * @param dev_id Crypto device identifier.
787 * @return
788 * - The number of configured queue pairs.
789 */
790 extern uint16_t
791 rte_cryptodev_queue_pair_count(uint8_t dev_id);
792
793
794 /**
795 * Retrieve the general I/O statistics of a device.
796 *
797 * @param dev_id The identifier of the device.
798 * @param stats A pointer to a structure of type
799 * *rte_cryptodev_stats* to be filled with the
800 * values of device counters.
801 * @return
802 * - Zero if successful.
803 * - Non-zero otherwise.
804 */
805 extern int
806 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
807
808 /**
809 * Reset the general I/O statistics of a device.
810 *
811 * @param dev_id The identifier of the device.
812 */
813 extern void
814 rte_cryptodev_stats_reset(uint8_t dev_id);
815
816 /**
817 * Retrieve the contextual information of a device.
818 *
819 * @param dev_id The identifier of the device.
820 * @param dev_info A pointer to a structure of type
821 * *rte_cryptodev_info* to be filled with the
822 * contextual information of the device.
823 *
824 * @note The capabilities field of dev_info is set to point to the first
825 * element of an array of struct rte_cryptodev_capabilities. The element after
826 * the last valid element has it's op field set to
827 * RTE_CRYPTO_OP_TYPE_UNDEFINED.
828 */
829 extern void
830 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
831
832
833 /**
834 * Register a callback function for specific device id.
835 *
836 * @param dev_id Device id.
837 * @param event Event interested.
838 * @param cb_fn User supplied callback function to be called.
839 * @param cb_arg Pointer to the parameters for the registered
840 * callback.
841 *
842 * @return
843 * - On success, zero.
844 * - On failure, a negative value.
845 */
846 extern int
847 rte_cryptodev_callback_register(uint8_t dev_id,
848 enum rte_cryptodev_event_type event,
849 rte_cryptodev_cb_fn cb_fn, void *cb_arg);
850
851 /**
852 * Unregister a callback function for specific device id.
853 *
854 * @param dev_id The device identifier.
855 * @param event Event interested.
856 * @param cb_fn User supplied callback function to be called.
857 * @param cb_arg Pointer to the parameters for the registered
858 * callback.
859 *
860 * @return
861 * - On success, zero.
862 * - On failure, a negative value.
863 */
864 extern int
865 rte_cryptodev_callback_unregister(uint8_t dev_id,
866 enum rte_cryptodev_event_type event,
867 rte_cryptodev_cb_fn cb_fn, void *cb_arg);
868
869 struct rte_cryptodev_callback;
870
871 /** Structure to keep track of registered callbacks */
872 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
873
874 /**
875 * Structure used to hold information about the callbacks to be called for a
876 * queue pair on enqueue/dequeue.
877 */
878 struct rte_cryptodev_cb {
879 struct rte_cryptodev_cb *next;
880 /**< Pointer to next callback */
881 rte_cryptodev_callback_fn fn;
882 /**< Pointer to callback function */
883 void *arg;
884 /**< Pointer to argument */
885 };
886
887 /**
888 * @internal
889 * Structure used to hold information about the RCU for a queue pair.
890 */
891 struct rte_cryptodev_cb_rcu {
892 struct rte_cryptodev_cb *next;
893 /**< Pointer to next callback */
894 struct rte_rcu_qsbr *qsbr;
895 /**< RCU QSBR variable per queue pair */
896 };
897
898 void *
899 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
900
901 /** Cryptodev symmetric crypto session
902 * Each session is derived from a fixed xform chain. Therefore each session
903 * has a fixed algo, key, op-type, digest_len etc.
904 */
905 struct rte_cryptodev_sym_session {
906 uint64_t opaque_data;
907 /**< Can be used for external metadata */
908 uint16_t nb_drivers;
909 /**< number of elements in sess_data array */
910 uint16_t user_data_sz;
911 /**< session user data will be placed after sess_data */
912 __extension__ struct {
913 void *data;
914 uint16_t refcnt;
915 } sess_data[0];
916 /**< Driver specific session material, variable size */
917 };
918
919 /**
920 * Create a symmetric session mempool.
921 *
922 * @param name
923 * The unique mempool name.
924 * @param nb_elts
925 * The number of elements in the mempool.
926 * @param elt_size
927 * The size of the element. This value will be ignored if it is smaller than
928 * the minimum session header size required for the system. For the user who
929 * want to use the same mempool for sym session and session private data it
930 * can be the maximum value of all existing devices' private data and session
931 * header sizes.
932 * @param cache_size
933 * The number of per-lcore cache elements
934 * @param priv_size
935 * The private data size of each session.
936 * @param socket_id
937 * The *socket_id* argument is the socket identifier in the case of
938 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
939 * constraint for the reserved zone.
940 *
941 * @return
942 * - On success return size of the session
943 * - On failure returns 0
944 */
945 __rte_experimental
946 struct rte_mempool *
947 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
948 uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
949 int socket_id);
950
951 /**
952 * Create an asymmetric session mempool.
953 *
954 * @param name
955 * The unique mempool name.
956 * @param nb_elts
957 * The number of elements in the mempool.
958 * @param cache_size
959 * The number of per-lcore cache elements
960 * @param user_data_size
961 * The size of user data to be placed after session private data.
962 * @param socket_id
963 * The *socket_id* argument is the socket identifier in the case of
964 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
965 * constraint for the reserved zone.
966 *
967 * @return
968 * - On success return mempool
969 * - On failure returns NULL
970 */
971 __rte_experimental
972 struct rte_mempool *
973 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
974 uint32_t cache_size, uint16_t user_data_size, int socket_id);
975
976 /**
977 * Create symmetric crypto session header (generic with no private data)
978 *
979 * @param mempool Symmetric session mempool to allocate session
980 * objects from
981 * @return
982 * - On success return pointer to sym-session
983 * - On failure returns NULL
984 */
985 struct rte_cryptodev_sym_session *
986 rte_cryptodev_sym_session_create(struct rte_mempool *mempool);
987
988 /**
989 * Create and initialise an asymmetric crypto session structure.
990 * Calls the PMD to configure the private session data.
991 *
992 * @param dev_id ID of device that we want the session to be used on
993 * @param xforms Asymmetric crypto transform operations to apply on flow
994 * processed with this session
995 * @param mp mempool to allocate asymmetric session
996 * objects from
997 * @param session void ** for session to be used
998 *
999 * @return
1000 * - 0 on success.
1001 * - -EINVAL on invalid arguments.
1002 * - -ENOMEM on memory error for session allocation.
1003 * - -ENOTSUP if device doesn't support session configuration.
1004 */
1005 __rte_experimental
1006 int
1007 rte_cryptodev_asym_session_create(uint8_t dev_id,
1008 struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1009 void **session);
1010
1011 /**
1012 * Frees symmetric crypto session header, after checking that all
1013 * the device private data has been freed, returning it
1014 * to its original mempool.
1015 *
1016 * @param sess Session header to be freed.
1017 *
1018 * @return
1019 * - 0 if successful.
1020 * - -EINVAL if session is NULL.
1021 * - -EBUSY if not all device private data has been freed.
1022 */
1023 int
1024 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess);
1025
1026 /**
1027 * Clears and frees asymmetric crypto session header and private data,
1028 * returning it to its original mempool.
1029 *
1030 * @param dev_id ID of device that uses the asymmetric session.
1031 * @param sess Session header to be freed.
1032 *
1033 * @return
1034 * - 0 if successful.
1035 * - -EINVAL if device is invalid or session is NULL.
1036 */
1037 __rte_experimental
1038 int
1039 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1040
1041 /**
1042 * Fill out private data for the device id, based on its device type.
1043 *
1044 * @param dev_id ID of device that we want the session to be used on
1045 * @param sess Session where the private data will be attached to
1046 * @param xforms Symmetric crypto transform operations to apply on flow
1047 * processed with this session
1048 * @param mempool Mempool where the private data is allocated.
1049 *
1050 * @return
1051 * - On success, zero.
1052 * - -EINVAL if input parameters are invalid.
1053 * - -ENOTSUP if crypto device does not support the crypto transform or
1054 * does not support symmetric operations.
1055 * - -ENOMEM if the private session could not be allocated.
1056 */
1057 int
1058 rte_cryptodev_sym_session_init(uint8_t dev_id,
1059 struct rte_cryptodev_sym_session *sess,
1060 struct rte_crypto_sym_xform *xforms,
1061 struct rte_mempool *mempool);
1062
1063 /**
1064 * Frees private data for the device id, based on its device type,
1065 * returning it to its mempool. It is the application's responsibility
1066 * to ensure that private session data is not cleared while there are
1067 * still in-flight operations using it.
1068 *
1069 * @param dev_id ID of device that uses the session.
1070 * @param sess Session containing the reference to the private data
1071 *
1072 * @return
1073 * - 0 if successful.
1074 * - -EINVAL if device is invalid or session is NULL.
1075 * - -ENOTSUP if crypto device does not support symmetric operations.
1076 */
1077 int
1078 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1079 struct rte_cryptodev_sym_session *sess);
1080
1081 /**
1082 * Get the size of the header session, for all registered drivers excluding
1083 * the user data size.
1084 *
1085 * @return
1086 * Size of the symmetric header session.
1087 */
1088 unsigned int
1089 rte_cryptodev_sym_get_header_session_size(void);
1090
1091 /**
1092 * Get the size of the header session from created session.
1093 *
1094 * @param sess
1095 * The sym cryptodev session pointer
1096 *
1097 * @return
1098 * - If sess is not NULL, return the size of the header session including
1099 * the private data size defined within sess.
1100 * - If sess is NULL, return 0.
1101 */
1102 __rte_experimental
1103 unsigned int
1104 rte_cryptodev_sym_get_existing_header_session_size(
1105 struct rte_cryptodev_sym_session *sess);
1106
1107 /**
1108 * Get the size of the asymmetric session header.
1109 *
1110 * @return
1111 * Size of the asymmetric header session.
1112 */
1113 __rte_experimental
1114 unsigned int
1115 rte_cryptodev_asym_get_header_session_size(void);
1116
1117 /**
1118 * Get the size of the private symmetric session data
1119 * for a device.
1120 *
1121 * @param dev_id The device identifier.
1122 *
1123 * @return
1124 * - Size of the private data, if successful
1125 * - 0 if device is invalid or does not have private
1126 * symmetric session
1127 */
1128 unsigned int
1129 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1130
1131 /**
1132 * Get the size of the private data for asymmetric session
1133 * on device
1134 *
1135 * @param dev_id The device identifier.
1136 *
1137 * @return
1138 * - Size of the asymmetric private data, if successful
1139 * - 0 if device is invalid or does not have private session
1140 */
1141 __rte_experimental
1142 unsigned int
1143 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1144
1145 /**
1146 * Validate if the crypto device index is valid attached crypto device.
1147 *
1148 * @param dev_id Crypto device index.
1149 *
1150 * @return
1151 * - If the device index is valid (1) or not (0).
1152 */
1153 unsigned int
1154 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1155
1156 /**
1157 * Provide driver identifier.
1158 *
1159 * @param name
1160 * The pointer to a driver name.
1161 * @return
1162 * The driver type identifier or -1 if no driver found
1163 */
1164 int rte_cryptodev_driver_id_get(const char *name);
1165
1166 /**
1167 * Provide driver name.
1168 *
1169 * @param driver_id
1170 * The driver identifier.
1171 * @return
1172 * The driver name or null if no driver found
1173 */
1174 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1175
1176 /**
1177 * Store user data in a session.
1178 *
1179 * @param sess Session pointer allocated by
1180 * *rte_cryptodev_sym_session_create*.
1181 * @param data Pointer to the user data.
1182 * @param size Size of the user data.
1183 *
1184 * @return
1185 * - On success, zero.
1186 * - On failure, a negative value.
1187 */
1188 __rte_experimental
1189 int
1190 rte_cryptodev_sym_session_set_user_data(
1191 struct rte_cryptodev_sym_session *sess,
1192 void *data,
1193 uint16_t size);
1194
1195 /**
1196 * Get user data stored in a session.
1197 *
1198 * @param sess Session pointer allocated by
1199 * *rte_cryptodev_sym_session_create*.
1200 *
1201 * @return
1202 * - On success return pointer to user data.
1203 * - On failure returns NULL.
1204 */
1205 __rte_experimental
1206 void *
1207 rte_cryptodev_sym_session_get_user_data(
1208 struct rte_cryptodev_sym_session *sess);
1209
1210 /**
1211 * Store user data in an asymmetric session.
1212 *
1213 * @param sess Session pointer allocated by
1214 * *rte_cryptodev_asym_session_create*.
1215 * @param data Pointer to the user data.
1216 * @param size Size of the user data.
1217 *
1218 * @return
1219 * - On success, zero.
1220 * - -EINVAL if the session pointer is invalid.
1221 * - -ENOMEM if the available user data size is smaller than the size parameter.
1222 */
1223 __rte_experimental
1224 int
1225 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1226
1227 /**
1228 * Get user data stored in an asymmetric session.
1229 *
1230 * @param sess Session pointer allocated by
1231 * *rte_cryptodev_asym_session_create*.
1232 *
1233 * @return
1234 * - On success return pointer to user data.
1235 * - On failure returns NULL.
1236 */
1237 __rte_experimental
1238 void *
1239 rte_cryptodev_asym_session_get_user_data(void *sess);
1240
1241 /**
1242 * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1243 * on user provided data.
1244 *
1245 * @param dev_id The device identifier.
1246 * @param sess Cryptodev session structure
1247 * @param ofs Start and stop offsets for auth and cipher operations
1248 * @param vec Vectorized operation descriptor
1249 *
1250 * @return
1251 * - Returns number of successfully processed packets.
1252 */
1253 __rte_experimental
1254 uint32_t
1255 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1256 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1257 struct rte_crypto_sym_vec *vec);
1258
1259 /**
1260 * Get the size of the raw data-path context buffer.
1261 *
1262 * @param dev_id The device identifier.
1263 *
1264 * @return
1265 * - If the device supports raw data-path APIs, return the context size.
1266 * - If the device does not support the APIs, return -1.
1267 */
1268 __rte_experimental
1269 int
1270 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1271
1272 /**
1273 * Union of different crypto session types, including session-less xform
1274 * pointer.
1275 */
1276 union rte_cryptodev_session_ctx {
1277 struct rte_cryptodev_sym_session *crypto_sess;
1278 struct rte_crypto_sym_xform *xform;
1279 struct rte_security_session *sec_sess;
1280 };
1281
1282 /**
1283 * Enqueue a vectorized operation descriptor into the device queue but the
1284 * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1285 * is called.
1286 *
1287 * @param qp Driver specific queue pair data.
1288 * @param drv_ctx Driver specific context data.
1289 * @param vec Vectorized operation descriptor.
1290 * @param ofs Start and stop offsets for auth and cipher
1291 * operations.
1292 * @param user_data The array of user data for dequeue later.
1293 * @param enqueue_status Driver written value to specify the
1294 * enqueue status. Possible values:
1295 * - 1: The number of operations returned are
1296 * enqueued successfully.
1297 * - 0: The number of operations returned are
1298 * cached into the queue but are not processed
1299 * until rte_cryptodev_raw_enqueue_done() is
1300 * called.
1301 * - negative integer: Error occurred.
1302 * @return
1303 * - The number of operations in the descriptor successfully enqueued or
1304 * cached into the queue but not enqueued yet, depends on the
1305 * "enqueue_status" value.
1306 */
1307 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1308 void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1309 union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1310
1311 /**
1312 * Enqueue single raw data vector into the device queue but the driver may or
1313 * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1314 *
1315 * @param qp Driver specific queue pair data.
1316 * @param drv_ctx Driver specific context data.
1317 * @param data_vec The buffer data vector.
1318 * @param n_data_vecs Number of buffer data vectors.
1319 * @param ofs Start and stop offsets for auth and cipher
1320 * operations.
1321 * @param iv IV virtual and IOVA addresses
1322 * @param digest digest virtual and IOVA addresses
1323 * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses,
1324 * depends on the algorithm used.
1325 * @param user_data The user data.
1326 * @return
1327 * - 1: The data vector is enqueued successfully.
1328 * - 0: The data vector is cached into the queue but is not processed
1329 * until rte_cryptodev_raw_enqueue_done() is called.
1330 * - negative integer: failure.
1331 */
1332 typedef int (*cryptodev_sym_raw_enqueue_t)(
1333 void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1334 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1335 struct rte_crypto_va_iova_ptr *iv,
1336 struct rte_crypto_va_iova_ptr *digest,
1337 struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1338 void *user_data);
1339
1340 /**
1341 * Inform the cryptodev queue pair to start processing or finish dequeuing all
1342 * enqueued/dequeued operations.
1343 *
1344 * @param qp Driver specific queue pair data.
1345 * @param drv_ctx Driver specific context data.
1346 * @param n The total number of processed operations.
1347 * @return
1348 * - On success return 0.
1349 * - On failure return negative integer.
1350 */
1351 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1352 uint32_t n);
1353
1354 /**
1355 * Typedef that the user provided for the driver to get the dequeue count.
1356 * The function may return a fixed number or the number parsed from the user
1357 * data stored in the first processed operation.
1358 *
1359 * @param user_data Dequeued user data.
1360 * @return
1361 * - The number of operations to be dequeued.
1362 **/
1363 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1364
1365 /**
1366 * Typedef that the user provided to deal with post dequeue operation, such
1367 * as filling status.
1368 *
1369 * @param user_data Dequeued user data.
1370 * @param index Index number of the processed descriptor.
1371 * @param is_op_success Operation status provided by the driver.
1372 **/
1373 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1374 uint32_t index, uint8_t is_op_success);
1375
1376 /**
1377 * Dequeue a burst of symmetric crypto processing.
1378 *
1379 * @param qp Driver specific queue pair data.
1380 * @param drv_ctx Driver specific context data.
1381 * @param get_dequeue_count User provided callback function to
1382 * obtain dequeue operation count.
1383 * @param max_nb_to_dequeue When get_dequeue_count is NULL this
1384 * value is used to pass the maximum
1385 * number of operations to be dequeued.
1386 * @param post_dequeue User provided callback function to
1387 * post-process a dequeued operation.
1388 * @param out_user_data User data pointer array to be retrieve
1389 * from device queue. In case of
1390 * *is_user_data_array* is set there
1391 * should be enough room to store all
1392 * user data.
1393 * @param is_user_data_array Set 1 if every dequeued user data will
1394 * be written into out_user_data array.
1395 * Set 0 if only the first user data will
1396 * be written into out_user_data array.
1397 * @param n_success Driver written value to specific the
1398 * total successful operations count.
1399 * @param dequeue_status Driver written value to specify the
1400 * dequeue status. Possible values:
1401 * - 1: Successfully dequeued the number
1402 * of operations returned. The user
1403 * data previously set during enqueue
1404 * is stored in the "out_user_data".
1405 * - 0: The number of operations returned
1406 * are completed and the user data is
1407 * stored in the "out_user_data", but
1408 * they are not freed from the queue
1409 * until
1410 * rte_cryptodev_raw_dequeue_done()
1411 * is called.
1412 * - negative integer: Error occurred.
1413 * @return
1414 * - The number of operations dequeued or completed but not freed from the
1415 * queue, depends on "dequeue_status" value.
1416 */
1417 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1418 uint8_t *drv_ctx,
1419 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1420 uint32_t max_nb_to_dequeue,
1421 rte_cryptodev_raw_post_dequeue_t post_dequeue,
1422 void **out_user_data, uint8_t is_user_data_array,
1423 uint32_t *n_success, int *dequeue_status);
1424
1425 /**
1426 * Dequeue a symmetric crypto processing.
1427 *
1428 * @param qp Driver specific queue pair data.
1429 * @param drv_ctx Driver specific context data.
1430 * @param dequeue_status Driver written value to specify the
1431 * dequeue status. Possible values:
1432 * - 1: Successfully dequeued a operation.
1433 * The user data is returned.
1434 * - 0: The first operation in the queue
1435 * is completed and the user data
1436 * previously set during enqueue is
1437 * returned, but it is not freed from
1438 * the queue until
1439 * rte_cryptodev_raw_dequeue_done() is
1440 * called.
1441 * - negative integer: Error occurred.
1442 * @param op_status Driver written value to specify
1443 * operation status.
1444 * @return
1445 * - The user data pointer retrieved from device queue or NULL if no
1446 * operation is ready for dequeue.
1447 */
1448 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1449 void *qp, uint8_t *drv_ctx, int *dequeue_status,
1450 enum rte_crypto_op_status *op_status);
1451
1452 /**
1453 * Context data for raw data-path API crypto process. The buffer of this
1454 * structure is to be allocated by the user application with the size equal
1455 * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1456 */
1457 struct rte_crypto_raw_dp_ctx {
1458 void *qp_data;
1459
1460 cryptodev_sym_raw_enqueue_t enqueue;
1461 cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1462 cryptodev_sym_raw_operation_done_t enqueue_done;
1463 cryptodev_sym_raw_dequeue_t dequeue;
1464 cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1465 cryptodev_sym_raw_operation_done_t dequeue_done;
1466
1467 /* Driver specific context data */
1468 __extension__ uint8_t drv_ctx_data[];
1469 };
1470
1471 /**
1472 * Configure raw data-path context data.
1473 *
1474 * NOTE:
1475 * After the context data is configured, the user should call
1476 * rte_cryptodev_raw_attach_session() before using it in
1477 * rte_cryptodev_raw_enqueue/dequeue function call.
1478 *
1479 * @param dev_id The device identifier.
1480 * @param qp_id The index of the queue pair from which to
1481 * retrieve processed packets. The value must be
1482 * in the range [0, nb_queue_pair - 1] previously
1483 * supplied to rte_cryptodev_configure().
1484 * @param ctx The raw data-path context data.
1485 * @param sess_type session type.
1486 * @param session_ctx Session context data.
1487 * @param is_update Set 0 if it is to initialize the ctx.
1488 * Set 1 if ctx is initialized and only to update
1489 * session context data.
1490 * @return
1491 * - On success return 0.
1492 * - On failure return negative integer.
1493 */
1494 __rte_experimental
1495 int
1496 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1497 struct rte_crypto_raw_dp_ctx *ctx,
1498 enum rte_crypto_op_sess_type sess_type,
1499 union rte_cryptodev_session_ctx session_ctx,
1500 uint8_t is_update);
1501
1502 /**
1503 * Enqueue a vectorized operation descriptor into the device queue but the
1504 * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1505 * is called.
1506 *
1507 * @param ctx The initialized raw data-path context data.
1508 * @param vec Vectorized operation descriptor.
1509 * @param ofs Start and stop offsets for auth and cipher
1510 * operations.
1511 * @param user_data The array of user data for dequeue later.
1512 * @param enqueue_status Driver written value to specify the
1513 * enqueue status. Possible values:
1514 * - 1: The number of operations returned are
1515 * enqueued successfully.
1516 * - 0: The number of operations returned are
1517 * cached into the queue but are not processed
1518 * until rte_cryptodev_raw_enqueue_done() is
1519 * called.
1520 * - negative integer: Error occurred.
1521 * @return
1522 * - The number of operations in the descriptor successfully enqueued or
1523 * cached into the queue but not enqueued yet, depends on the
1524 * "enqueue_status" value.
1525 */
1526 __rte_experimental
1527 uint32_t
1528 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1529 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1530 void **user_data, int *enqueue_status);
1531
1532 /**
1533 * Enqueue single raw data vector into the device queue but the driver may or
1534 * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1535 *
1536 * @param ctx The initialized raw data-path context data.
1537 * @param data_vec The buffer data vector.
1538 * @param n_data_vecs Number of buffer data vectors.
1539 * @param ofs Start and stop offsets for auth and cipher
1540 * operations.
1541 * @param iv IV virtual and IOVA addresses
1542 * @param digest digest virtual and IOVA addresses
1543 * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses,
1544 * depends on the algorithm used.
1545 * @param user_data The user data.
1546 * @return
1547 * - 1: The data vector is enqueued successfully.
1548 * - 0: The data vector is cached into the queue but is not processed
1549 * until rte_cryptodev_raw_enqueue_done() is called.
1550 * - negative integer: failure.
1551 */
1552 __rte_experimental
1553 static __rte_always_inline int
rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx * ctx,struct rte_crypto_vec * data_vec,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * aad_or_auth_iv,void * user_data)1554 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1555 struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1556 union rte_crypto_sym_ofs ofs,
1557 struct rte_crypto_va_iova_ptr *iv,
1558 struct rte_crypto_va_iova_ptr *digest,
1559 struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1560 void *user_data)
1561 {
1562 return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1563 n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1564 }
1565
1566 /**
1567 * Start processing all enqueued operations from last
1568 * rte_cryptodev_configure_raw_dp_ctx() call.
1569 *
1570 * @param ctx The initialized raw data-path context data.
1571 * @param n The number of operations cached.
1572 * @return
1573 * - On success return 0.
1574 * - On failure return negative integer.
1575 */
1576 __rte_experimental
1577 int
1578 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1579 uint32_t n);
1580
1581 /**
1582 * Dequeue a burst of symmetric crypto processing.
1583 *
1584 * @param ctx The initialized raw data-path context
1585 * data.
1586 * @param get_dequeue_count User provided callback function to
1587 * obtain dequeue operation count.
1588 * @param max_nb_to_dequeue When get_dequeue_count is NULL this
1589 * value is used to pass the maximum
1590 * number of operations to be dequeued.
1591 * @param post_dequeue User provided callback function to
1592 * post-process a dequeued operation.
1593 * @param out_user_data User data pointer array to be retrieve
1594 * from device queue. In case of
1595 * *is_user_data_array* is set there
1596 * should be enough room to store all
1597 * user data.
1598 * @param is_user_data_array Set 1 if every dequeued user data will
1599 * be written into out_user_data array.
1600 * Set 0 if only the first user data will
1601 * be written into out_user_data array.
1602 * @param n_success Driver written value to specific the
1603 * total successful operations count.
1604 * @param dequeue_status Driver written value to specify the
1605 * dequeue status. Possible values:
1606 * - 1: Successfully dequeued the number
1607 * of operations returned. The user
1608 * data previously set during enqueue
1609 * is stored in the "out_user_data".
1610 * - 0: The number of operations returned
1611 * are completed and the user data is
1612 * stored in the "out_user_data", but
1613 * they are not freed from the queue
1614 * until
1615 * rte_cryptodev_raw_dequeue_done()
1616 * is called.
1617 * - negative integer: Error occurred.
1618 * @return
1619 * - The number of operations dequeued or completed but not freed from the
1620 * queue, depends on "dequeue_status" value.
1621 */
1622 __rte_experimental
1623 uint32_t
1624 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1625 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1626 uint32_t max_nb_to_dequeue,
1627 rte_cryptodev_raw_post_dequeue_t post_dequeue,
1628 void **out_user_data, uint8_t is_user_data_array,
1629 uint32_t *n_success, int *dequeue_status);
1630
1631 /**
1632 * Dequeue a symmetric crypto processing.
1633 *
1634 * @param ctx The initialized raw data-path context
1635 * data.
1636 * @param dequeue_status Driver written value to specify the
1637 * dequeue status. Possible values:
1638 * - 1: Successfully dequeued a operation.
1639 * The user data is returned.
1640 * - 0: The first operation in the queue
1641 * is completed and the user data
1642 * previously set during enqueue is
1643 * returned, but it is not freed from
1644 * the queue until
1645 * rte_cryptodev_raw_dequeue_done() is
1646 * called.
1647 * - negative integer: Error occurred.
1648 * @param op_status Driver written value to specify
1649 * operation status.
1650 * @return
1651 * - The user data pointer retrieved from device queue or NULL if no
1652 * operation is ready for dequeue.
1653 */
1654 __rte_experimental
1655 static __rte_always_inline void *
rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx * ctx,int * dequeue_status,enum rte_crypto_op_status * op_status)1656 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1657 int *dequeue_status, enum rte_crypto_op_status *op_status)
1658 {
1659 return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1660 op_status);
1661 }
1662
1663 /**
1664 * Inform the queue pair dequeue operations is finished.
1665 *
1666 * @param ctx The initialized raw data-path context data.
1667 * @param n The number of operations.
1668 * @return
1669 * - On success return 0.
1670 * - On failure return negative integer.
1671 */
1672 __rte_experimental
1673 int
1674 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1675 uint32_t n);
1676
1677 /**
1678 * Add a user callback for a given crypto device and queue pair which will be
1679 * called on crypto ops enqueue.
1680 *
1681 * This API configures a function to be called for each burst of crypto ops
1682 * received on a given crypto device queue pair. The return value is a pointer
1683 * that can be used later to remove the callback using
1684 * rte_cryptodev_remove_enq_callback().
1685 *
1686 * Callbacks registered by application would not survive
1687 * rte_cryptodev_configure() as it reinitializes the callback list.
1688 * It is user responsibility to remove all installed callbacks before
1689 * calling rte_cryptodev_configure() to avoid possible memory leakage.
1690 * Application is expected to call add API after rte_cryptodev_configure().
1691 *
1692 * Multiple functions can be registered per queue pair & they are called
1693 * in the order they were added. The API does not restrict on maximum number
1694 * of callbacks.
1695 *
1696 * @param dev_id The identifier of the device.
1697 * @param qp_id The index of the queue pair on which ops are
1698 * to be enqueued for processing. The value
1699 * must be in the range [0, nb_queue_pairs - 1]
1700 * previously supplied to
1701 * *rte_cryptodev_configure*.
1702 * @param cb_fn The callback function
1703 * @param cb_arg A generic pointer parameter which will be passed
1704 * to each invocation of the callback function on
1705 * this crypto device and queue pair.
1706 *
1707 * @return
1708 * - NULL on error & rte_errno will contain the error code.
1709 * - On success, a pointer value which can later be used to remove the
1710 * callback.
1711 */
1712
1713 __rte_experimental
1714 struct rte_cryptodev_cb *
1715 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1716 uint16_t qp_id,
1717 rte_cryptodev_callback_fn cb_fn,
1718 void *cb_arg);
1719
1720 /**
1721 * Remove a user callback function for given crypto device and queue pair.
1722 *
1723 * This function is used to remove enqueue callbacks that were added to a
1724 * crypto device queue pair using rte_cryptodev_add_enq_callback().
1725 *
1726 *
1727 *
1728 * @param dev_id The identifier of the device.
1729 * @param qp_id The index of the queue pair on which ops are
1730 * to be enqueued. The value must be in the
1731 * range [0, nb_queue_pairs - 1] previously
1732 * supplied to *rte_cryptodev_configure*.
1733 * @param cb Pointer to user supplied callback created via
1734 * rte_cryptodev_add_enq_callback().
1735 *
1736 * @return
1737 * - 0: Success. Callback was removed.
1738 * - <0: The dev_id or the qp_id is out of range, or the callback
1739 * is NULL or not found for the crypto device queue pair.
1740 */
1741
1742 __rte_experimental
1743 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1744 uint16_t qp_id,
1745 struct rte_cryptodev_cb *cb);
1746
1747 /**
1748 * Add a user callback for a given crypto device and queue pair which will be
1749 * called on crypto ops dequeue.
1750 *
1751 * This API configures a function to be called for each burst of crypto ops
1752 * received on a given crypto device queue pair. The return value is a pointer
1753 * that can be used later to remove the callback using
1754 * rte_cryptodev_remove_deq_callback().
1755 *
1756 * Callbacks registered by application would not survive
1757 * rte_cryptodev_configure() as it reinitializes the callback list.
1758 * It is user responsibility to remove all installed callbacks before
1759 * calling rte_cryptodev_configure() to avoid possible memory leakage.
1760 * Application is expected to call add API after rte_cryptodev_configure().
1761 *
1762 * Multiple functions can be registered per queue pair & they are called
1763 * in the order they were added. The API does not restrict on maximum number
1764 * of callbacks.
1765 *
1766 * @param dev_id The identifier of the device.
1767 * @param qp_id The index of the queue pair on which ops are
1768 * to be dequeued. The value must be in the
1769 * range [0, nb_queue_pairs - 1] previously
1770 * supplied to *rte_cryptodev_configure*.
1771 * @param cb_fn The callback function
1772 * @param cb_arg A generic pointer parameter which will be passed
1773 * to each invocation of the callback function on
1774 * this crypto device and queue pair.
1775 *
1776 * @return
1777 * - NULL on error & rte_errno will contain the error code.
1778 * - On success, a pointer value which can later be used to remove the
1779 * callback.
1780 */
1781
1782 __rte_experimental
1783 struct rte_cryptodev_cb *
1784 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1785 uint16_t qp_id,
1786 rte_cryptodev_callback_fn cb_fn,
1787 void *cb_arg);
1788
1789 /**
1790 * Remove a user callback function for given crypto device and queue pair.
1791 *
1792 * This function is used to remove dequeue callbacks that were added to a
1793 * crypto device queue pair using rte_cryptodev_add_deq_callback().
1794 *
1795 *
1796 *
1797 * @param dev_id The identifier of the device.
1798 * @param qp_id The index of the queue pair on which ops are
1799 * to be dequeued. The value must be in the
1800 * range [0, nb_queue_pairs - 1] previously
1801 * supplied to *rte_cryptodev_configure*.
1802 * @param cb Pointer to user supplied callback created via
1803 * rte_cryptodev_add_deq_callback().
1804 *
1805 * @return
1806 * - 0: Success. Callback was removed.
1807 * - <0: The dev_id or the qp_id is out of range, or the callback
1808 * is NULL or not found for the crypto device queue pair.
1809 */
1810 __rte_experimental
1811 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1812 uint16_t qp_id,
1813 struct rte_cryptodev_cb *cb);
1814
1815 #include <rte_cryptodev_core.h>
1816 /**
1817 *
1818 * Dequeue a burst of processed crypto operations from a queue on the crypto
1819 * device. The dequeued operation are stored in *rte_crypto_op* structures
1820 * whose pointers are supplied in the *ops* array.
1821 *
1822 * The rte_cryptodev_dequeue_burst() function returns the number of ops
1823 * actually dequeued, which is the number of *rte_crypto_op* data structures
1824 * effectively supplied into the *ops* array.
1825 *
1826 * A return value equal to *nb_ops* indicates that the queue contained
1827 * at least *nb_ops* operations, and this is likely to signify that other
1828 * processed operations remain in the devices output queue. Applications
1829 * implementing a "retrieve as many processed operations as possible" policy
1830 * can check this specific case and keep invoking the
1831 * rte_cryptodev_dequeue_burst() function until a value less than
1832 * *nb_ops* is returned.
1833 *
1834 * The rte_cryptodev_dequeue_burst() function does not provide any error
1835 * notification to avoid the corresponding overhead.
1836 *
1837 * @param dev_id The symmetric crypto device identifier
1838 * @param qp_id The index of the queue pair from which to
1839 * retrieve processed packets. The value must be
1840 * in the range [0, nb_queue_pair - 1] previously
1841 * supplied to rte_cryptodev_configure().
1842 * @param ops The address of an array of pointers to
1843 * *rte_crypto_op* structures that must be
1844 * large enough to store *nb_ops* pointers in it.
1845 * @param nb_ops The maximum number of operations to dequeue.
1846 *
1847 * @return
1848 * - The number of operations actually dequeued, which is the number
1849 * of pointers to *rte_crypto_op* structures effectively supplied to the
1850 * *ops* array.
1851 */
1852 static inline uint16_t
rte_cryptodev_dequeue_burst(uint8_t dev_id,uint16_t qp_id,struct rte_crypto_op ** ops,uint16_t nb_ops)1853 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1854 struct rte_crypto_op **ops, uint16_t nb_ops)
1855 {
1856 const struct rte_crypto_fp_ops *fp_ops;
1857 void *qp;
1858
1859 rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1860
1861 fp_ops = &rte_crypto_fp_ops[dev_id];
1862 qp = fp_ops->qp.data[qp_id];
1863
1864 nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1865
1866 #ifdef RTE_CRYPTO_CALLBACKS
1867 if (unlikely(fp_ops->qp.deq_cb != NULL)) {
1868 struct rte_cryptodev_cb_rcu *list;
1869 struct rte_cryptodev_cb *cb;
1870
1871 /* __ATOMIC_RELEASE memory order was used when the
1872 * call back was inserted into the list.
1873 * Since there is a clear dependency between loading
1874 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1875 * not required.
1876 */
1877 list = &fp_ops->qp.deq_cb[qp_id];
1878 rte_rcu_qsbr_thread_online(list->qsbr, 0);
1879 cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1880
1881 while (cb != NULL) {
1882 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1883 cb->arg);
1884 cb = cb->next;
1885 };
1886
1887 rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1888 }
1889 #endif
1890 return nb_ops;
1891 }
1892
1893 /**
1894 * Enqueue a burst of operations for processing on a crypto device.
1895 *
1896 * The rte_cryptodev_enqueue_burst() function is invoked to place
1897 * crypto operations on the queue *qp_id* of the device designated by
1898 * its *dev_id*.
1899 *
1900 * The *nb_ops* parameter is the number of operations to process which are
1901 * supplied in the *ops* array of *rte_crypto_op* structures.
1902 *
1903 * The rte_cryptodev_enqueue_burst() function returns the number of
1904 * operations it actually enqueued for processing. A return value equal to
1905 * *nb_ops* means that all packets have been enqueued.
1906 *
1907 * @param dev_id The identifier of the device.
1908 * @param qp_id The index of the queue pair which packets are
1909 * to be enqueued for processing. The value
1910 * must be in the range [0, nb_queue_pairs - 1]
1911 * previously supplied to
1912 * *rte_cryptodev_configure*.
1913 * @param ops The address of an array of *nb_ops* pointers
1914 * to *rte_crypto_op* structures which contain
1915 * the crypto operations to be processed.
1916 * @param nb_ops The number of operations to process.
1917 *
1918 * @return
1919 * The number of operations actually enqueued on the crypto device. The return
1920 * value can be less than the value of the *nb_ops* parameter when the
1921 * crypto devices queue is full or if invalid parameters are specified in
1922 * a *rte_crypto_op*.
1923 */
1924 static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id,uint16_t qp_id,struct rte_crypto_op ** ops,uint16_t nb_ops)1925 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1926 struct rte_crypto_op **ops, uint16_t nb_ops)
1927 {
1928 const struct rte_crypto_fp_ops *fp_ops;
1929 void *qp;
1930
1931 fp_ops = &rte_crypto_fp_ops[dev_id];
1932 qp = fp_ops->qp.data[qp_id];
1933 #ifdef RTE_CRYPTO_CALLBACKS
1934 if (unlikely(fp_ops->qp.enq_cb != NULL)) {
1935 struct rte_cryptodev_cb_rcu *list;
1936 struct rte_cryptodev_cb *cb;
1937
1938 /* __ATOMIC_RELEASE memory order was used when the
1939 * call back was inserted into the list.
1940 * Since there is a clear dependency between loading
1941 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1942 * not required.
1943 */
1944 list = &fp_ops->qp.enq_cb[qp_id];
1945 rte_rcu_qsbr_thread_online(list->qsbr, 0);
1946 cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1947
1948 while (cb != NULL) {
1949 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1950 cb->arg);
1951 cb = cb->next;
1952 };
1953
1954 rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1955 }
1956 #endif
1957
1958 rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1959 return fp_ops->enqueue_burst(qp, ops, nb_ops);
1960 }
1961
1962
1963
1964 #ifdef __cplusplus
1965 }
1966 #endif
1967
1968 #endif /* _RTE_CRYPTODEV_H_ */
1969