1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Intel Corporation. 3 */ 4 5 #ifndef _RTE_CRYPTODEV_H_ 6 #define _RTE_CRYPTODEV_H_ 7 8 /** 9 * @file rte_cryptodev.h 10 * 11 * RTE Cryptographic Device APIs 12 * 13 * Defines RTE Crypto Device APIs for the provisioning of cipher and 14 * authentication operations. 15 */ 16 17 #ifdef __cplusplus 18 extern "C" { 19 #endif 20 21 #include "rte_kvargs.h" 22 #include "rte_crypto.h" 23 #include "rte_dev.h" 24 #include <rte_common.h> 25 #include <rte_config.h> 26 #include <rte_rcu_qsbr.h> 27 28 #include "rte_cryptodev_trace_fp.h" 29 30 extern const char **rte_cyptodev_names; 31 32 /* Logging Macros */ 33 34 #define CDEV_LOG_ERR(...) \ 35 RTE_LOG(ERR, CRYPTODEV, \ 36 RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 37 __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,))) 38 39 #define CDEV_LOG_INFO(...) \ 40 RTE_LOG(INFO, CRYPTODEV, \ 41 RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 42 RTE_FMT_TAIL(__VA_ARGS__,))) 43 44 #define CDEV_LOG_DEBUG(...) \ 45 RTE_LOG(DEBUG, CRYPTODEV, \ 46 RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 47 __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,))) 48 49 #define CDEV_PMD_TRACE(...) \ 50 RTE_LOG(DEBUG, CRYPTODEV, \ 51 RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 52 dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,))) 53 54 /** 55 * A macro that points to an offset from the start 56 * of the crypto operation structure (rte_crypto_op) 57 * 58 * The returned pointer is cast to type t. 59 * 60 * @param c 61 * The crypto operation. 62 * @param o 63 * The offset from the start of the crypto operation. 64 * @param t 65 * The type to cast the result into. 66 */ 67 #define rte_crypto_op_ctod_offset(c, t, o) \ 68 ((t)((char *)(c) + (o))) 69 70 /** 71 * A macro that returns the physical address that points 72 * to an offset from the start of the crypto operation 73 * (rte_crypto_op) 74 * 75 * @param c 76 * The crypto operation. 77 * @param o 78 * The offset from the start of the crypto operation 79 * to calculate address from. 80 */ 81 #define rte_crypto_op_ctophys_offset(c, o) \ 82 (rte_iova_t)((c)->phys_addr + (o)) 83 84 /** 85 * Crypto parameters range description 86 */ 87 struct rte_crypto_param_range { 88 uint16_t min; /**< minimum size */ 89 uint16_t max; /**< maximum size */ 90 uint16_t increment; 91 /**< if a range of sizes are supported, 92 * this parameter is used to indicate 93 * increments in byte size that are supported 94 * between the minimum and maximum 95 */ 96 }; 97 98 /** 99 * Data-unit supported lengths of cipher algorithms. 100 * A bit can represent any set of data-unit sizes 101 * (single size, multiple size, range, etc). 102 */ 103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES RTE_BIT32(0) 104 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES RTE_BIT32(1) 105 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES RTE_BIT32(2) 106 107 /** 108 * Symmetric Crypto Capability 109 */ 110 struct rte_cryptodev_symmetric_capability { 111 enum rte_crypto_sym_xform_type xform_type; 112 /**< Transform type : Authentication / Cipher / AEAD */ 113 RTE_STD_C11 114 union { 115 struct { 116 enum rte_crypto_auth_algorithm algo; 117 /**< authentication algorithm */ 118 uint16_t block_size; 119 /**< algorithm block size */ 120 struct rte_crypto_param_range key_size; 121 /**< auth key size range */ 122 struct rte_crypto_param_range digest_size; 123 /**< digest size range */ 124 struct rte_crypto_param_range aad_size; 125 /**< Additional authentication data size range */ 126 struct rte_crypto_param_range iv_size; 127 /**< Initialisation vector data size range */ 128 } auth; 129 /**< Symmetric Authentication transform capabilities */ 130 struct { 131 enum rte_crypto_cipher_algorithm algo; 132 /**< cipher algorithm */ 133 uint16_t block_size; 134 /**< algorithm block size */ 135 struct rte_crypto_param_range key_size; 136 /**< cipher key size range */ 137 struct rte_crypto_param_range iv_size; 138 /**< Initialisation vector data size range */ 139 uint32_t dataunit_set; 140 /**< 141 * Supported data-unit lengths: 142 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits 143 * or 0 for lengths defined in the algorithm standard. 144 */ 145 } cipher; 146 /**< Symmetric Cipher transform capabilities */ 147 struct { 148 enum rte_crypto_aead_algorithm algo; 149 /**< AEAD algorithm */ 150 uint16_t block_size; 151 /**< algorithm block size */ 152 struct rte_crypto_param_range key_size; 153 /**< AEAD key size range */ 154 struct rte_crypto_param_range digest_size; 155 /**< digest size range */ 156 struct rte_crypto_param_range aad_size; 157 /**< Additional authentication data size range */ 158 struct rte_crypto_param_range iv_size; 159 /**< Initialisation vector data size range */ 160 } aead; 161 }; 162 }; 163 164 /** 165 * Asymmetric Xform Crypto Capability 166 * 167 */ 168 struct rte_cryptodev_asymmetric_xform_capability { 169 enum rte_crypto_asym_xform_type xform_type; 170 /**< Transform type: RSA/MODEXP/DH/DSA/MODINV */ 171 172 uint32_t op_types; 173 /**< bitmask for supported rte_crypto_asym_op_type */ 174 175 __extension__ 176 union { 177 struct rte_crypto_param_range modlen; 178 /**< Range of modulus length supported by modulus based xform. 179 * Value 0 mean implementation default 180 */ 181 }; 182 }; 183 184 /** 185 * Asymmetric Crypto Capability 186 * 187 */ 188 struct rte_cryptodev_asymmetric_capability { 189 struct rte_cryptodev_asymmetric_xform_capability xform_capa; 190 }; 191 192 193 /** Structure used to capture a capability of a crypto device */ 194 struct rte_cryptodev_capabilities { 195 enum rte_crypto_op_type op; 196 /**< Operation type */ 197 198 RTE_STD_C11 199 union { 200 struct rte_cryptodev_symmetric_capability sym; 201 /**< Symmetric operation capability parameters */ 202 struct rte_cryptodev_asymmetric_capability asym; 203 /**< Asymmetric operation capability parameters */ 204 }; 205 }; 206 207 /** Structure used to describe crypto algorithms */ 208 struct rte_cryptodev_sym_capability_idx { 209 enum rte_crypto_sym_xform_type type; 210 union { 211 enum rte_crypto_cipher_algorithm cipher; 212 enum rte_crypto_auth_algorithm auth; 213 enum rte_crypto_aead_algorithm aead; 214 } algo; 215 }; 216 217 /** 218 * Structure used to describe asymmetric crypto xforms 219 * Each xform maps to one asym algorithm. 220 * 221 */ 222 struct rte_cryptodev_asym_capability_idx { 223 enum rte_crypto_asym_xform_type type; 224 /**< Asymmetric xform (algo) type */ 225 }; 226 227 /** 228 * Provide capabilities available for defined device and algorithm 229 * 230 * @param dev_id The identifier of the device. 231 * @param idx Description of crypto algorithms. 232 * 233 * @return 234 * - Return description of the symmetric crypto capability if exist. 235 * - Return NULL if the capability not exist. 236 */ 237 const struct rte_cryptodev_symmetric_capability * 238 rte_cryptodev_sym_capability_get(uint8_t dev_id, 239 const struct rte_cryptodev_sym_capability_idx *idx); 240 241 /** 242 * Provide capabilities available for defined device and xform 243 * 244 * @param dev_id The identifier of the device. 245 * @param idx Description of asym crypto xform. 246 * 247 * @return 248 * - Return description of the asymmetric crypto capability if exist. 249 * - Return NULL if the capability not exist. 250 */ 251 __rte_experimental 252 const struct rte_cryptodev_asymmetric_xform_capability * 253 rte_cryptodev_asym_capability_get(uint8_t dev_id, 254 const struct rte_cryptodev_asym_capability_idx *idx); 255 256 /** 257 * Check if key size and initial vector are supported 258 * in crypto cipher capability 259 * 260 * @param capability Description of the symmetric crypto capability. 261 * @param key_size Cipher key size. 262 * @param iv_size Cipher initial vector size. 263 * 264 * @return 265 * - Return 0 if the parameters are in range of the capability. 266 * - Return -1 if the parameters are out of range of the capability. 267 */ 268 int 269 rte_cryptodev_sym_capability_check_cipher( 270 const struct rte_cryptodev_symmetric_capability *capability, 271 uint16_t key_size, uint16_t iv_size); 272 273 /** 274 * Check if key size and initial vector are supported 275 * in crypto auth capability 276 * 277 * @param capability Description of the symmetric crypto capability. 278 * @param key_size Auth key size. 279 * @param digest_size Auth digest size. 280 * @param iv_size Auth initial vector size. 281 * 282 * @return 283 * - Return 0 if the parameters are in range of the capability. 284 * - Return -1 if the parameters are out of range of the capability. 285 */ 286 int 287 rte_cryptodev_sym_capability_check_auth( 288 const struct rte_cryptodev_symmetric_capability *capability, 289 uint16_t key_size, uint16_t digest_size, uint16_t iv_size); 290 291 /** 292 * Check if key, digest, AAD and initial vector sizes are supported 293 * in crypto AEAD capability 294 * 295 * @param capability Description of the symmetric crypto capability. 296 * @param key_size AEAD key size. 297 * @param digest_size AEAD digest size. 298 * @param aad_size AEAD AAD size. 299 * @param iv_size AEAD IV size. 300 * 301 * @return 302 * - Return 0 if the parameters are in range of the capability. 303 * - Return -1 if the parameters are out of range of the capability. 304 */ 305 int 306 rte_cryptodev_sym_capability_check_aead( 307 const struct rte_cryptodev_symmetric_capability *capability, 308 uint16_t key_size, uint16_t digest_size, uint16_t aad_size, 309 uint16_t iv_size); 310 311 /** 312 * Check if op type is supported 313 * 314 * @param capability Description of the asymmetric crypto capability. 315 * @param op_type op type 316 * 317 * @return 318 * - Return 1 if the op type is supported 319 * - Return 0 if unsupported 320 */ 321 __rte_experimental 322 int 323 rte_cryptodev_asym_xform_capability_check_optype( 324 const struct rte_cryptodev_asymmetric_xform_capability *capability, 325 enum rte_crypto_asym_op_type op_type); 326 327 /** 328 * Check if modulus length is in supported range 329 * 330 * @param capability Description of the asymmetric crypto capability. 331 * @param modlen modulus length. 332 * 333 * @return 334 * - Return 0 if the parameters are in range of the capability. 335 * - Return -1 if the parameters are out of range of the capability. 336 */ 337 __rte_experimental 338 int 339 rte_cryptodev_asym_xform_capability_check_modlen( 340 const struct rte_cryptodev_asymmetric_xform_capability *capability, 341 uint16_t modlen); 342 343 /** 344 * Provide the cipher algorithm enum, given an algorithm string 345 * 346 * @param algo_enum A pointer to the cipher algorithm 347 * enum to be filled 348 * @param algo_string Authentication algo string 349 * 350 * @return 351 * - Return -1 if string is not valid 352 * - Return 0 is the string is valid 353 */ 354 int 355 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, 356 const char *algo_string); 357 358 /** 359 * Provide the authentication algorithm enum, given an algorithm string 360 * 361 * @param algo_enum A pointer to the authentication algorithm 362 * enum to be filled 363 * @param algo_string Authentication algo string 364 * 365 * @return 366 * - Return -1 if string is not valid 367 * - Return 0 is the string is valid 368 */ 369 int 370 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, 371 const char *algo_string); 372 373 /** 374 * Provide the AEAD algorithm enum, given an algorithm string 375 * 376 * @param algo_enum A pointer to the AEAD algorithm 377 * enum to be filled 378 * @param algo_string AEAD algorithm string 379 * 380 * @return 381 * - Return -1 if string is not valid 382 * - Return 0 is the string is valid 383 */ 384 int 385 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, 386 const char *algo_string); 387 388 /** 389 * Provide the Asymmetric xform enum, given an xform string 390 * 391 * @param xform_enum A pointer to the xform type 392 * enum to be filled 393 * @param xform_string xform string 394 * 395 * @return 396 * - Return -1 if string is not valid 397 * - Return 0 if the string is valid 398 */ 399 __rte_experimental 400 int 401 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, 402 const char *xform_string); 403 404 405 /** Macro used at end of crypto PMD list */ 406 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \ 407 { RTE_CRYPTO_OP_TYPE_UNDEFINED } 408 409 410 /** 411 * Crypto device supported feature flags 412 * 413 * Note: 414 * New features flags should be added to the end of the list 415 * 416 * Keep these flags synchronised with rte_cryptodev_get_feature_name() 417 */ 418 #define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0) 419 /**< Symmetric crypto operations are supported */ 420 #define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1) 421 /**< Asymmetric crypto operations are supported */ 422 #define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2) 423 /**< Chaining symmetric crypto operations are supported */ 424 #define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3) 425 /**< Utilises CPU SIMD SSE instructions */ 426 #define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4) 427 /**< Utilises CPU SIMD AVX instructions */ 428 #define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5) 429 /**< Utilises CPU SIMD AVX2 instructions */ 430 #define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6) 431 /**< Utilises CPU AES-NI instructions */ 432 #define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7) 433 /**< Operations are off-loaded to an 434 * external hardware accelerator 435 */ 436 #define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8) 437 /**< Utilises CPU SIMD AVX512 instructions */ 438 #define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9) 439 /**< In-place Scatter-gather (SGL) buffers, with multiple segments, 440 * are supported 441 */ 442 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10) 443 /**< Out-of-place Scatter-gather (SGL) buffers are 444 * supported in input and output 445 */ 446 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11) 447 /**< Out-of-place Scatter-gather (SGL) buffers are supported 448 * in input, combined with linear buffers (LB), with a 449 * single segment in output 450 */ 451 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12) 452 /**< Out-of-place Scatter-gather (SGL) buffers are supported 453 * in output, combined with linear buffers (LB) in input 454 */ 455 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13) 456 /**< Out-of-place linear buffers (LB) are supported in input and output */ 457 #define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14) 458 /**< Utilises CPU NEON instructions */ 459 #define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15) 460 /**< Utilises ARM CPU Cryptographic Extensions */ 461 #define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16) 462 /**< Support Security Protocol Processing */ 463 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP (1ULL << 17) 464 /**< Support RSA Private Key OP with exponent */ 465 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT (1ULL << 18) 466 /**< Support RSA Private Key OP with CRT (quintuple) Keys */ 467 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED (1ULL << 19) 468 /**< Support encrypted-digest operations where digest is appended to data */ 469 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20) 470 /**< Support asymmetric session-less operations */ 471 #define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21) 472 /**< Support symmetric cpu-crypto processing */ 473 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS (1ULL << 22) 474 /**< Support symmetric session-less operations */ 475 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23) 476 /**< Support operations on data which is not byte aligned */ 477 #define RTE_CRYPTODEV_FF_SYM_RAW_DP (1ULL << 24) 478 /**< Support accelerator specific symmetric raw data-path APIs */ 479 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS (1ULL << 25) 480 /**< Support operations on multiple data-units message */ 481 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY (1ULL << 26) 482 /**< Support wrapped key in cipher xform */ 483 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM (1ULL << 27) 484 /**< Support inner checksum computation/verification */ 485 486 /** 487 * Get the name of a crypto device feature flag 488 * 489 * @param flag The mask describing the flag. 490 * 491 * @return 492 * The name of this flag, or NULL if it's not a valid feature flag. 493 */ 494 495 extern const char * 496 rte_cryptodev_get_feature_name(uint64_t flag); 497 498 /** Crypto device information */ 499 struct rte_cryptodev_info { 500 const char *driver_name; /**< Driver name. */ 501 uint8_t driver_id; /**< Driver identifier */ 502 struct rte_device *device; /**< Generic device information. */ 503 504 uint64_t feature_flags; 505 /**< Feature flags exposes HW/SW features for the given device */ 506 507 const struct rte_cryptodev_capabilities *capabilities; 508 /**< Array of devices supported capabilities */ 509 510 unsigned max_nb_queue_pairs; 511 /**< Maximum number of queues pairs supported by device. */ 512 513 uint16_t min_mbuf_headroom_req; 514 /**< Minimum mbuf headroom required by device */ 515 516 uint16_t min_mbuf_tailroom_req; 517 /**< Minimum mbuf tailroom required by device */ 518 519 struct { 520 unsigned max_nb_sessions; 521 /**< Maximum number of sessions supported by device. 522 * If 0, the device does not have any limitation in 523 * number of sessions that can be used. 524 */ 525 } sym; 526 }; 527 528 #define RTE_CRYPTODEV_DETACHED (0) 529 #define RTE_CRYPTODEV_ATTACHED (1) 530 531 /** Definitions of Crypto device event types */ 532 enum rte_cryptodev_event_type { 533 RTE_CRYPTODEV_EVENT_UNKNOWN, /**< unknown event type */ 534 RTE_CRYPTODEV_EVENT_ERROR, /**< error interrupt event */ 535 RTE_CRYPTODEV_EVENT_MAX /**< max value of this enum */ 536 }; 537 538 /** Crypto device queue pair configuration structure. */ 539 struct rte_cryptodev_qp_conf { 540 uint32_t nb_descriptors; /**< Number of descriptors per queue pair */ 541 struct rte_mempool *mp_session; 542 /**< The mempool for creating session in sessionless mode */ 543 struct rte_mempool *mp_session_private; 544 /**< The mempool for creating sess private data in sessionless mode */ 545 }; 546 547 /** 548 * Function type used for processing crypto ops when enqueue/dequeue burst is 549 * called. 550 * 551 * The callback function is called on enqueue/dequeue burst immediately. 552 * 553 * @param dev_id The identifier of the device. 554 * @param qp_id The index of the queue pair on which ops are 555 * enqueued/dequeued. The value must be in the 556 * range [0, nb_queue_pairs - 1] previously 557 * supplied to *rte_cryptodev_configure*. 558 * @param ops The address of an array of *nb_ops* pointers 559 * to *rte_crypto_op* structures which contain 560 * the crypto operations to be processed. 561 * @param nb_ops The number of operations to process. 562 * @param user_param The arbitrary user parameter passed in by the 563 * application when the callback was originally 564 * registered. 565 * @return The number of ops to be enqueued to the 566 * crypto device. 567 */ 568 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id, 569 struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param); 570 571 /** 572 * Typedef for application callback function to be registered by application 573 * software for notification of device events 574 * 575 * @param dev_id Crypto device identifier 576 * @param event Crypto device event to register for notification of. 577 * @param cb_arg User specified parameter to be passed as to passed to 578 * users callback function. 579 */ 580 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id, 581 enum rte_cryptodev_event_type event, void *cb_arg); 582 583 584 /** Crypto Device statistics */ 585 struct rte_cryptodev_stats { 586 uint64_t enqueued_count; 587 /**< Count of all operations enqueued */ 588 uint64_t dequeued_count; 589 /**< Count of all operations dequeued */ 590 591 uint64_t enqueue_err_count; 592 /**< Total error count on operations enqueued */ 593 uint64_t dequeue_err_count; 594 /**< Total error count on operations dequeued */ 595 }; 596 597 #define RTE_CRYPTODEV_NAME_MAX_LEN (64) 598 /**< Max length of name of crypto PMD */ 599 600 /** 601 * Get the device identifier for the named crypto device. 602 * 603 * @param name device name to select the device structure. 604 * 605 * @return 606 * - Returns crypto device identifier on success. 607 * - Return -1 on failure to find named crypto device. 608 */ 609 extern int 610 rte_cryptodev_get_dev_id(const char *name); 611 612 /** 613 * Get the crypto device name given a device identifier. 614 * 615 * @param dev_id 616 * The identifier of the device 617 * 618 * @return 619 * - Returns crypto device name. 620 * - Returns NULL if crypto device is not present. 621 */ 622 extern const char * 623 rte_cryptodev_name_get(uint8_t dev_id); 624 625 /** 626 * Get the total number of crypto devices that have been successfully 627 * initialised. 628 * 629 * @return 630 * - The total number of usable crypto devices. 631 */ 632 extern uint8_t 633 rte_cryptodev_count(void); 634 635 /** 636 * Get number of crypto device defined type. 637 * 638 * @param driver_id driver identifier. 639 * 640 * @return 641 * Returns number of crypto device. 642 */ 643 extern uint8_t 644 rte_cryptodev_device_count_by_driver(uint8_t driver_id); 645 646 /** 647 * Get number and identifiers of attached crypto devices that 648 * use the same crypto driver. 649 * 650 * @param driver_name driver name. 651 * @param devices output devices identifiers. 652 * @param nb_devices maximal number of devices. 653 * 654 * @return 655 * Returns number of attached crypto device. 656 */ 657 uint8_t 658 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, 659 uint8_t nb_devices); 660 /* 661 * Return the NUMA socket to which a device is connected 662 * 663 * @param dev_id 664 * The identifier of the device 665 * @return 666 * The NUMA socket id to which the device is connected or 667 * a default of zero if the socket could not be determined. 668 * -1 if returned is the dev_id value is out of range. 669 */ 670 extern int 671 rte_cryptodev_socket_id(uint8_t dev_id); 672 673 /** Crypto device configuration structure */ 674 struct rte_cryptodev_config { 675 int socket_id; /**< Socket to allocate resources on */ 676 uint16_t nb_queue_pairs; 677 /**< Number of queue pairs to configure on device */ 678 uint64_t ff_disable; 679 /**< Feature flags to be disabled. Only the following features are 680 * allowed to be disabled, 681 * - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO 682 * - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO 683 * - RTE_CRYTPODEV_FF_SECURITY 684 */ 685 }; 686 687 /** 688 * Configure a device. 689 * 690 * This function must be invoked first before any other function in the 691 * API. This function can also be re-invoked when a device is in the 692 * stopped state. 693 * 694 * @param dev_id The identifier of the device to configure. 695 * @param config The crypto device configuration structure. 696 * 697 * @return 698 * - 0: Success, device configured. 699 * - <0: Error code returned by the driver configuration function. 700 */ 701 extern int 702 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config); 703 704 /** 705 * Start an device. 706 * 707 * The device start step is the last one and consists of setting the configured 708 * offload features and in starting the transmit and the receive units of the 709 * device. 710 * On success, all basic functions exported by the API (link status, 711 * receive/transmit, and so on) can be invoked. 712 * 713 * @param dev_id 714 * The identifier of the device. 715 * @return 716 * - 0: Success, device started. 717 * - <0: Error code of the driver device start function. 718 */ 719 extern int 720 rte_cryptodev_start(uint8_t dev_id); 721 722 /** 723 * Stop an device. The device can be restarted with a call to 724 * rte_cryptodev_start() 725 * 726 * @param dev_id The identifier of the device. 727 */ 728 extern void 729 rte_cryptodev_stop(uint8_t dev_id); 730 731 /** 732 * Close an device. The device cannot be restarted! 733 * 734 * @param dev_id The identifier of the device. 735 * 736 * @return 737 * - 0 on successfully closing device 738 * - <0 on failure to close device 739 */ 740 extern int 741 rte_cryptodev_close(uint8_t dev_id); 742 743 /** 744 * Allocate and set up a receive queue pair for a device. 745 * 746 * 747 * @param dev_id The identifier of the device. 748 * @param queue_pair_id The index of the queue pairs to set up. The 749 * value must be in the range [0, nb_queue_pair 750 * - 1] previously supplied to 751 * rte_cryptodev_configure(). 752 * @param qp_conf The pointer to the configuration data to be 753 * used for the queue pair. 754 * @param socket_id The *socket_id* argument is the socket 755 * identifier in case of NUMA. The value can be 756 * *SOCKET_ID_ANY* if there is no NUMA constraint 757 * for the DMA memory allocated for the receive 758 * queue pair. 759 * 760 * @return 761 * - 0: Success, queue pair correctly set up. 762 * - <0: Queue pair configuration failed 763 */ 764 extern int 765 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 766 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id); 767 768 /** 769 * Get the status of queue pairs setup on a specific crypto device 770 * 771 * @param dev_id Crypto device identifier. 772 * @param queue_pair_id The index of the queue pairs to set up. The 773 * value must be in the range [0, nb_queue_pair 774 * - 1] previously supplied to 775 * rte_cryptodev_configure(). 776 * @return 777 * - 0: qp was not configured 778 * - 1: qp was configured 779 * - -EINVAL: device was not configured 780 */ 781 __rte_experimental 782 int 783 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id); 784 785 /** 786 * Get the number of queue pairs on a specific crypto device 787 * 788 * @param dev_id Crypto device identifier. 789 * @return 790 * - The number of configured queue pairs. 791 */ 792 extern uint16_t 793 rte_cryptodev_queue_pair_count(uint8_t dev_id); 794 795 796 /** 797 * Retrieve the general I/O statistics of a device. 798 * 799 * @param dev_id The identifier of the device. 800 * @param stats A pointer to a structure of type 801 * *rte_cryptodev_stats* to be filled with the 802 * values of device counters. 803 * @return 804 * - Zero if successful. 805 * - Non-zero otherwise. 806 */ 807 extern int 808 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats); 809 810 /** 811 * Reset the general I/O statistics of a device. 812 * 813 * @param dev_id The identifier of the device. 814 */ 815 extern void 816 rte_cryptodev_stats_reset(uint8_t dev_id); 817 818 /** 819 * Retrieve the contextual information of a device. 820 * 821 * @param dev_id The identifier of the device. 822 * @param dev_info A pointer to a structure of type 823 * *rte_cryptodev_info* to be filled with the 824 * contextual information of the device. 825 * 826 * @note The capabilities field of dev_info is set to point to the first 827 * element of an array of struct rte_cryptodev_capabilities. The element after 828 * the last valid element has it's op field set to 829 * RTE_CRYPTO_OP_TYPE_UNDEFINED. 830 */ 831 extern void 832 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info); 833 834 835 /** 836 * Register a callback function for specific device id. 837 * 838 * @param dev_id Device id. 839 * @param event Event interested. 840 * @param cb_fn User supplied callback function to be called. 841 * @param cb_arg Pointer to the parameters for the registered 842 * callback. 843 * 844 * @return 845 * - On success, zero. 846 * - On failure, a negative value. 847 */ 848 extern int 849 rte_cryptodev_callback_register(uint8_t dev_id, 850 enum rte_cryptodev_event_type event, 851 rte_cryptodev_cb_fn cb_fn, void *cb_arg); 852 853 /** 854 * Unregister a callback function for specific device id. 855 * 856 * @param dev_id The device identifier. 857 * @param event Event interested. 858 * @param cb_fn User supplied callback function to be called. 859 * @param cb_arg Pointer to the parameters for the registered 860 * callback. 861 * 862 * @return 863 * - On success, zero. 864 * - On failure, a negative value. 865 */ 866 extern int 867 rte_cryptodev_callback_unregister(uint8_t dev_id, 868 enum rte_cryptodev_event_type event, 869 rte_cryptodev_cb_fn cb_fn, void *cb_arg); 870 871 struct rte_cryptodev_callback; 872 873 /** Structure to keep track of registered callbacks */ 874 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback); 875 876 /** 877 * Structure used to hold information about the callbacks to be called for a 878 * queue pair on enqueue/dequeue. 879 */ 880 struct rte_cryptodev_cb { 881 struct rte_cryptodev_cb *next; 882 /**< Pointer to next callback */ 883 rte_cryptodev_callback_fn fn; 884 /**< Pointer to callback function */ 885 void *arg; 886 /**< Pointer to argument */ 887 }; 888 889 /** 890 * @internal 891 * Structure used to hold information about the RCU for a queue pair. 892 */ 893 struct rte_cryptodev_cb_rcu { 894 struct rte_cryptodev_cb *next; 895 /**< Pointer to next callback */ 896 struct rte_rcu_qsbr *qsbr; 897 /**< RCU QSBR variable per queue pair */ 898 }; 899 900 void * 901 rte_cryptodev_get_sec_ctx(uint8_t dev_id); 902 903 /** Cryptodev symmetric crypto session 904 * Each session is derived from a fixed xform chain. Therefore each session 905 * has a fixed algo, key, op-type, digest_len etc. 906 */ 907 struct rte_cryptodev_sym_session { 908 uint64_t opaque_data; 909 /**< Can be used for external metadata */ 910 uint16_t nb_drivers; 911 /**< number of elements in sess_data array */ 912 uint16_t user_data_sz; 913 /**< session user data will be placed after sess_data */ 914 __extension__ struct { 915 void *data; 916 uint16_t refcnt; 917 } sess_data[0]; 918 /**< Driver specific session material, variable size */ 919 }; 920 921 /** 922 * Create a symmetric session mempool. 923 * 924 * @param name 925 * The unique mempool name. 926 * @param nb_elts 927 * The number of elements in the mempool. 928 * @param elt_size 929 * The size of the element. This value will be ignored if it is smaller than 930 * the minimum session header size required for the system. For the user who 931 * want to use the same mempool for sym session and session private data it 932 * can be the maximum value of all existing devices' private data and session 933 * header sizes. 934 * @param cache_size 935 * The number of per-lcore cache elements 936 * @param priv_size 937 * The private data size of each session. 938 * @param socket_id 939 * The *socket_id* argument is the socket identifier in the case of 940 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA 941 * constraint for the reserved zone. 942 * 943 * @return 944 * - On success return size of the session 945 * - On failure returns 0 946 */ 947 __rte_experimental 948 struct rte_mempool * 949 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, 950 uint32_t elt_size, uint32_t cache_size, uint16_t priv_size, 951 int socket_id); 952 953 /** 954 * Create an asymmetric session mempool. 955 * 956 * @param name 957 * The unique mempool name. 958 * @param nb_elts 959 * The number of elements in the mempool. 960 * @param cache_size 961 * The number of per-lcore cache elements 962 * @param user_data_size 963 * The size of user data to be placed after session private data. 964 * @param socket_id 965 * The *socket_id* argument is the socket identifier in the case of 966 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA 967 * constraint for the reserved zone. 968 * 969 * @return 970 * - On success return mempool 971 * - On failure returns NULL 972 */ 973 __rte_experimental 974 struct rte_mempool * 975 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, 976 uint32_t cache_size, uint16_t user_data_size, int socket_id); 977 978 /** 979 * Create symmetric crypto session header (generic with no private data) 980 * 981 * @param mempool Symmetric session mempool to allocate session 982 * objects from 983 * @return 984 * - On success return pointer to sym-session 985 * - On failure returns NULL 986 */ 987 struct rte_cryptodev_sym_session * 988 rte_cryptodev_sym_session_create(struct rte_mempool *mempool); 989 990 /** 991 * Create and initialise an asymmetric crypto session structure. 992 * Calls the PMD to configure the private session data. 993 * 994 * @param dev_id ID of device that we want the session to be used on 995 * @param xforms Asymmetric crypto transform operations to apply on flow 996 * processed with this session 997 * @param mp mempool to allocate asymmetric session 998 * objects from 999 * @param session void ** for session to be used 1000 * 1001 * @return 1002 * - 0 on success. 1003 * - -EINVAL on invalid arguments. 1004 * - -ENOMEM on memory error for session allocation. 1005 * - -ENOTSUP if device doesn't support session configuration. 1006 */ 1007 __rte_experimental 1008 int 1009 rte_cryptodev_asym_session_create(uint8_t dev_id, 1010 struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp, 1011 void **session); 1012 1013 /** 1014 * Frees symmetric crypto session header, after checking that all 1015 * the device private data has been freed, returning it 1016 * to its original mempool. 1017 * 1018 * @param sess Session header to be freed. 1019 * 1020 * @return 1021 * - 0 if successful. 1022 * - -EINVAL if session is NULL. 1023 * - -EBUSY if not all device private data has been freed. 1024 */ 1025 int 1026 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess); 1027 1028 /** 1029 * Clears and frees asymmetric crypto session header and private data, 1030 * returning it to its original mempool. 1031 * 1032 * @param dev_id ID of device that uses the asymmetric session. 1033 * @param sess Session header to be freed. 1034 * 1035 * @return 1036 * - 0 if successful. 1037 * - -EINVAL if device is invalid or session is NULL. 1038 */ 1039 __rte_experimental 1040 int 1041 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess); 1042 1043 /** 1044 * Fill out private data for the device id, based on its device type. 1045 * 1046 * @param dev_id ID of device that we want the session to be used on 1047 * @param sess Session where the private data will be attached to 1048 * @param xforms Symmetric crypto transform operations to apply on flow 1049 * processed with this session 1050 * @param mempool Mempool where the private data is allocated. 1051 * 1052 * @return 1053 * - On success, zero. 1054 * - -EINVAL if input parameters are invalid. 1055 * - -ENOTSUP if crypto device does not support the crypto transform or 1056 * does not support symmetric operations. 1057 * - -ENOMEM if the private session could not be allocated. 1058 */ 1059 int 1060 rte_cryptodev_sym_session_init(uint8_t dev_id, 1061 struct rte_cryptodev_sym_session *sess, 1062 struct rte_crypto_sym_xform *xforms, 1063 struct rte_mempool *mempool); 1064 1065 /** 1066 * Frees private data for the device id, based on its device type, 1067 * returning it to its mempool. It is the application's responsibility 1068 * to ensure that private session data is not cleared while there are 1069 * still in-flight operations using it. 1070 * 1071 * @param dev_id ID of device that uses the session. 1072 * @param sess Session containing the reference to the private data 1073 * 1074 * @return 1075 * - 0 if successful. 1076 * - -EINVAL if device is invalid or session is NULL. 1077 * - -ENOTSUP if crypto device does not support symmetric operations. 1078 */ 1079 int 1080 rte_cryptodev_sym_session_clear(uint8_t dev_id, 1081 struct rte_cryptodev_sym_session *sess); 1082 1083 /** 1084 * Get the size of the header session, for all registered drivers excluding 1085 * the user data size. 1086 * 1087 * @return 1088 * Size of the symmetric header session. 1089 */ 1090 unsigned int 1091 rte_cryptodev_sym_get_header_session_size(void); 1092 1093 /** 1094 * Get the size of the header session from created session. 1095 * 1096 * @param sess 1097 * The sym cryptodev session pointer 1098 * 1099 * @return 1100 * - If sess is not NULL, return the size of the header session including 1101 * the private data size defined within sess. 1102 * - If sess is NULL, return 0. 1103 */ 1104 __rte_experimental 1105 unsigned int 1106 rte_cryptodev_sym_get_existing_header_session_size( 1107 struct rte_cryptodev_sym_session *sess); 1108 1109 /** 1110 * Get the size of the asymmetric session header. 1111 * 1112 * @return 1113 * Size of the asymmetric header session. 1114 */ 1115 __rte_experimental 1116 unsigned int 1117 rte_cryptodev_asym_get_header_session_size(void); 1118 1119 /** 1120 * Get the size of the private symmetric session data 1121 * for a device. 1122 * 1123 * @param dev_id The device identifier. 1124 * 1125 * @return 1126 * - Size of the private data, if successful 1127 * - 0 if device is invalid or does not have private 1128 * symmetric session 1129 */ 1130 unsigned int 1131 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id); 1132 1133 /** 1134 * Get the size of the private data for asymmetric session 1135 * on device 1136 * 1137 * @param dev_id The device identifier. 1138 * 1139 * @return 1140 * - Size of the asymmetric private data, if successful 1141 * - 0 if device is invalid or does not have private session 1142 */ 1143 __rte_experimental 1144 unsigned int 1145 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id); 1146 1147 /** 1148 * Validate if the crypto device index is valid attached crypto device. 1149 * 1150 * @param dev_id Crypto device index. 1151 * 1152 * @return 1153 * - If the device index is valid (1) or not (0). 1154 */ 1155 unsigned int 1156 rte_cryptodev_is_valid_dev(uint8_t dev_id); 1157 1158 /** 1159 * Provide driver identifier. 1160 * 1161 * @param name 1162 * The pointer to a driver name. 1163 * @return 1164 * The driver type identifier or -1 if no driver found 1165 */ 1166 int rte_cryptodev_driver_id_get(const char *name); 1167 1168 /** 1169 * Provide driver name. 1170 * 1171 * @param driver_id 1172 * The driver identifier. 1173 * @return 1174 * The driver name or null if no driver found 1175 */ 1176 const char *rte_cryptodev_driver_name_get(uint8_t driver_id); 1177 1178 /** 1179 * Store user data in a session. 1180 * 1181 * @param sess Session pointer allocated by 1182 * *rte_cryptodev_sym_session_create*. 1183 * @param data Pointer to the user data. 1184 * @param size Size of the user data. 1185 * 1186 * @return 1187 * - On success, zero. 1188 * - On failure, a negative value. 1189 */ 1190 __rte_experimental 1191 int 1192 rte_cryptodev_sym_session_set_user_data( 1193 struct rte_cryptodev_sym_session *sess, 1194 void *data, 1195 uint16_t size); 1196 1197 /** 1198 * Get user data stored in a session. 1199 * 1200 * @param sess Session pointer allocated by 1201 * *rte_cryptodev_sym_session_create*. 1202 * 1203 * @return 1204 * - On success return pointer to user data. 1205 * - On failure returns NULL. 1206 */ 1207 __rte_experimental 1208 void * 1209 rte_cryptodev_sym_session_get_user_data( 1210 struct rte_cryptodev_sym_session *sess); 1211 1212 /** 1213 * Store user data in an asymmetric session. 1214 * 1215 * @param sess Session pointer allocated by 1216 * *rte_cryptodev_asym_session_create*. 1217 * @param data Pointer to the user data. 1218 * @param size Size of the user data. 1219 * 1220 * @return 1221 * - On success, zero. 1222 * - -EINVAL if the session pointer is invalid. 1223 * - -ENOMEM if the available user data size is smaller than the size parameter. 1224 */ 1225 __rte_experimental 1226 int 1227 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size); 1228 1229 /** 1230 * Get user data stored in an asymmetric session. 1231 * 1232 * @param sess Session pointer allocated by 1233 * *rte_cryptodev_asym_session_create*. 1234 * 1235 * @return 1236 * - On success return pointer to user data. 1237 * - On failure returns NULL. 1238 */ 1239 __rte_experimental 1240 void * 1241 rte_cryptodev_asym_session_get_user_data(void *sess); 1242 1243 /** 1244 * Perform actual crypto processing (encrypt/digest or auth/decrypt) 1245 * on user provided data. 1246 * 1247 * @param dev_id The device identifier. 1248 * @param sess Cryptodev session structure 1249 * @param ofs Start and stop offsets for auth and cipher operations 1250 * @param vec Vectorized operation descriptor 1251 * 1252 * @return 1253 * - Returns number of successfully processed packets. 1254 */ 1255 __rte_experimental 1256 uint32_t 1257 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, 1258 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, 1259 struct rte_crypto_sym_vec *vec); 1260 1261 /** 1262 * Get the size of the raw data-path context buffer. 1263 * 1264 * @param dev_id The device identifier. 1265 * 1266 * @return 1267 * - If the device supports raw data-path APIs, return the context size. 1268 * - If the device does not support the APIs, return -1. 1269 */ 1270 __rte_experimental 1271 int 1272 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id); 1273 1274 /** 1275 * Union of different crypto session types, including session-less xform 1276 * pointer. 1277 */ 1278 union rte_cryptodev_session_ctx { 1279 struct rte_cryptodev_sym_session *crypto_sess; 1280 struct rte_crypto_sym_xform *xform; 1281 struct rte_security_session *sec_sess; 1282 }; 1283 1284 /** 1285 * Enqueue a vectorized operation descriptor into the device queue but the 1286 * driver may or may not start processing until rte_cryptodev_raw_enqueue_done() 1287 * is called. 1288 * 1289 * @param qp Driver specific queue pair data. 1290 * @param drv_ctx Driver specific context data. 1291 * @param vec Vectorized operation descriptor. 1292 * @param ofs Start and stop offsets for auth and cipher 1293 * operations. 1294 * @param user_data The array of user data for dequeue later. 1295 * @param enqueue_status Driver written value to specify the 1296 * enqueue status. Possible values: 1297 * - 1: The number of operations returned are 1298 * enqueued successfully. 1299 * - 0: The number of operations returned are 1300 * cached into the queue but are not processed 1301 * until rte_cryptodev_raw_enqueue_done() is 1302 * called. 1303 * - negative integer: Error occurred. 1304 * @return 1305 * - The number of operations in the descriptor successfully enqueued or 1306 * cached into the queue but not enqueued yet, depends on the 1307 * "enqueue_status" value. 1308 */ 1309 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)( 1310 void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec, 1311 union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status); 1312 1313 /** 1314 * Enqueue single raw data vector into the device queue but the driver may or 1315 * may not start processing until rte_cryptodev_raw_enqueue_done() is called. 1316 * 1317 * @param qp Driver specific queue pair data. 1318 * @param drv_ctx Driver specific context data. 1319 * @param data_vec The buffer data vector. 1320 * @param n_data_vecs Number of buffer data vectors. 1321 * @param ofs Start and stop offsets for auth and cipher 1322 * operations. 1323 * @param iv IV virtual and IOVA addresses 1324 * @param digest digest virtual and IOVA addresses 1325 * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses, 1326 * depends on the algorithm used. 1327 * @param user_data The user data. 1328 * @return 1329 * - 1: The data vector is enqueued successfully. 1330 * - 0: The data vector is cached into the queue but is not processed 1331 * until rte_cryptodev_raw_enqueue_done() is called. 1332 * - negative integer: failure. 1333 */ 1334 typedef int (*cryptodev_sym_raw_enqueue_t)( 1335 void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec, 1336 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, 1337 struct rte_crypto_va_iova_ptr *iv, 1338 struct rte_crypto_va_iova_ptr *digest, 1339 struct rte_crypto_va_iova_ptr *aad_or_auth_iv, 1340 void *user_data); 1341 1342 /** 1343 * Inform the cryptodev queue pair to start processing or finish dequeuing all 1344 * enqueued/dequeued operations. 1345 * 1346 * @param qp Driver specific queue pair data. 1347 * @param drv_ctx Driver specific context data. 1348 * @param n The total number of processed operations. 1349 * @return 1350 * - On success return 0. 1351 * - On failure return negative integer. 1352 */ 1353 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx, 1354 uint32_t n); 1355 1356 /** 1357 * Typedef that the user provided for the driver to get the dequeue count. 1358 * The function may return a fixed number or the number parsed from the user 1359 * data stored in the first processed operation. 1360 * 1361 * @param user_data Dequeued user data. 1362 * @return 1363 * - The number of operations to be dequeued. 1364 **/ 1365 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data); 1366 1367 /** 1368 * Typedef that the user provided to deal with post dequeue operation, such 1369 * as filling status. 1370 * 1371 * @param user_data Dequeued user data. 1372 * @param index Index number of the processed descriptor. 1373 * @param is_op_success Operation status provided by the driver. 1374 **/ 1375 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data, 1376 uint32_t index, uint8_t is_op_success); 1377 1378 /** 1379 * Dequeue a burst of symmetric crypto processing. 1380 * 1381 * @param qp Driver specific queue pair data. 1382 * @param drv_ctx Driver specific context data. 1383 * @param get_dequeue_count User provided callback function to 1384 * obtain dequeue operation count. 1385 * @param max_nb_to_dequeue When get_dequeue_count is NULL this 1386 * value is used to pass the maximum 1387 * number of operations to be dequeued. 1388 * @param post_dequeue User provided callback function to 1389 * post-process a dequeued operation. 1390 * @param out_user_data User data pointer array to be retrieve 1391 * from device queue. In case of 1392 * *is_user_data_array* is set there 1393 * should be enough room to store all 1394 * user data. 1395 * @param is_user_data_array Set 1 if every dequeued user data will 1396 * be written into out_user_data array. 1397 * Set 0 if only the first user data will 1398 * be written into out_user_data array. 1399 * @param n_success Driver written value to specific the 1400 * total successful operations count. 1401 * @param dequeue_status Driver written value to specify the 1402 * dequeue status. Possible values: 1403 * - 1: Successfully dequeued the number 1404 * of operations returned. The user 1405 * data previously set during enqueue 1406 * is stored in the "out_user_data". 1407 * - 0: The number of operations returned 1408 * are completed and the user data is 1409 * stored in the "out_user_data", but 1410 * they are not freed from the queue 1411 * until 1412 * rte_cryptodev_raw_dequeue_done() 1413 * is called. 1414 * - negative integer: Error occurred. 1415 * @return 1416 * - The number of operations dequeued or completed but not freed from the 1417 * queue, depends on "dequeue_status" value. 1418 */ 1419 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp, 1420 uint8_t *drv_ctx, 1421 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 1422 uint32_t max_nb_to_dequeue, 1423 rte_cryptodev_raw_post_dequeue_t post_dequeue, 1424 void **out_user_data, uint8_t is_user_data_array, 1425 uint32_t *n_success, int *dequeue_status); 1426 1427 /** 1428 * Dequeue a symmetric crypto processing. 1429 * 1430 * @param qp Driver specific queue pair data. 1431 * @param drv_ctx Driver specific context data. 1432 * @param dequeue_status Driver written value to specify the 1433 * dequeue status. Possible values: 1434 * - 1: Successfully dequeued a operation. 1435 * The user data is returned. 1436 * - 0: The first operation in the queue 1437 * is completed and the user data 1438 * previously set during enqueue is 1439 * returned, but it is not freed from 1440 * the queue until 1441 * rte_cryptodev_raw_dequeue_done() is 1442 * called. 1443 * - negative integer: Error occurred. 1444 * @param op_status Driver written value to specify 1445 * operation status. 1446 * @return 1447 * - The user data pointer retrieved from device queue or NULL if no 1448 * operation is ready for dequeue. 1449 */ 1450 typedef void * (*cryptodev_sym_raw_dequeue_t)( 1451 void *qp, uint8_t *drv_ctx, int *dequeue_status, 1452 enum rte_crypto_op_status *op_status); 1453 1454 /** 1455 * Context data for raw data-path API crypto process. The buffer of this 1456 * structure is to be allocated by the user application with the size equal 1457 * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value. 1458 */ 1459 struct rte_crypto_raw_dp_ctx { 1460 void *qp_data; 1461 1462 cryptodev_sym_raw_enqueue_t enqueue; 1463 cryptodev_sym_raw_enqueue_burst_t enqueue_burst; 1464 cryptodev_sym_raw_operation_done_t enqueue_done; 1465 cryptodev_sym_raw_dequeue_t dequeue; 1466 cryptodev_sym_raw_dequeue_burst_t dequeue_burst; 1467 cryptodev_sym_raw_operation_done_t dequeue_done; 1468 1469 /* Driver specific context data */ 1470 __extension__ uint8_t drv_ctx_data[]; 1471 }; 1472 1473 /** 1474 * Configure raw data-path context data. 1475 * 1476 * NOTE: 1477 * After the context data is configured, the user should call 1478 * rte_cryptodev_raw_attach_session() before using it in 1479 * rte_cryptodev_raw_enqueue/dequeue function call. 1480 * 1481 * @param dev_id The device identifier. 1482 * @param qp_id The index of the queue pair from which to 1483 * retrieve processed packets. The value must be 1484 * in the range [0, nb_queue_pair - 1] previously 1485 * supplied to rte_cryptodev_configure(). 1486 * @param ctx The raw data-path context data. 1487 * @param sess_type session type. 1488 * @param session_ctx Session context data. 1489 * @param is_update Set 0 if it is to initialize the ctx. 1490 * Set 1 if ctx is initialized and only to update 1491 * session context data. 1492 * @return 1493 * - On success return 0. 1494 * - On failure return negative integer. 1495 */ 1496 __rte_experimental 1497 int 1498 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, 1499 struct rte_crypto_raw_dp_ctx *ctx, 1500 enum rte_crypto_op_sess_type sess_type, 1501 union rte_cryptodev_session_ctx session_ctx, 1502 uint8_t is_update); 1503 1504 /** 1505 * Enqueue a vectorized operation descriptor into the device queue but the 1506 * driver may or may not start processing until rte_cryptodev_raw_enqueue_done() 1507 * is called. 1508 * 1509 * @param ctx The initialized raw data-path context data. 1510 * @param vec Vectorized operation descriptor. 1511 * @param ofs Start and stop offsets for auth and cipher 1512 * operations. 1513 * @param user_data The array of user data for dequeue later. 1514 * @param enqueue_status Driver written value to specify the 1515 * enqueue status. Possible values: 1516 * - 1: The number of operations returned are 1517 * enqueued successfully. 1518 * - 0: The number of operations returned are 1519 * cached into the queue but are not processed 1520 * until rte_cryptodev_raw_enqueue_done() is 1521 * called. 1522 * - negative integer: Error occurred. 1523 * @return 1524 * - The number of operations in the descriptor successfully enqueued or 1525 * cached into the queue but not enqueued yet, depends on the 1526 * "enqueue_status" value. 1527 */ 1528 __rte_experimental 1529 uint32_t 1530 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, 1531 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, 1532 void **user_data, int *enqueue_status); 1533 1534 /** 1535 * Enqueue single raw data vector into the device queue but the driver may or 1536 * may not start processing until rte_cryptodev_raw_enqueue_done() is called. 1537 * 1538 * @param ctx The initialized raw data-path context data. 1539 * @param data_vec The buffer data vector. 1540 * @param n_data_vecs Number of buffer data vectors. 1541 * @param ofs Start and stop offsets for auth and cipher 1542 * operations. 1543 * @param iv IV virtual and IOVA addresses 1544 * @param digest digest virtual and IOVA addresses 1545 * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses, 1546 * depends on the algorithm used. 1547 * @param user_data The user data. 1548 * @return 1549 * - 1: The data vector is enqueued successfully. 1550 * - 0: The data vector is cached into the queue but is not processed 1551 * until rte_cryptodev_raw_enqueue_done() is called. 1552 * - negative integer: failure. 1553 */ 1554 __rte_experimental 1555 static __rte_always_inline int 1556 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx, 1557 struct rte_crypto_vec *data_vec, uint16_t n_data_vecs, 1558 union rte_crypto_sym_ofs ofs, 1559 struct rte_crypto_va_iova_ptr *iv, 1560 struct rte_crypto_va_iova_ptr *digest, 1561 struct rte_crypto_va_iova_ptr *aad_or_auth_iv, 1562 void *user_data) 1563 { 1564 return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec, 1565 n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data); 1566 } 1567 1568 /** 1569 * Start processing all enqueued operations from last 1570 * rte_cryptodev_configure_raw_dp_ctx() call. 1571 * 1572 * @param ctx The initialized raw data-path context data. 1573 * @param n The number of operations cached. 1574 * @return 1575 * - On success return 0. 1576 * - On failure return negative integer. 1577 */ 1578 __rte_experimental 1579 int 1580 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, 1581 uint32_t n); 1582 1583 /** 1584 * Dequeue a burst of symmetric crypto processing. 1585 * 1586 * @param ctx The initialized raw data-path context 1587 * data. 1588 * @param get_dequeue_count User provided callback function to 1589 * obtain dequeue operation count. 1590 * @param max_nb_to_dequeue When get_dequeue_count is NULL this 1591 * value is used to pass the maximum 1592 * number of operations to be dequeued. 1593 * @param post_dequeue User provided callback function to 1594 * post-process a dequeued operation. 1595 * @param out_user_data User data pointer array to be retrieve 1596 * from device queue. In case of 1597 * *is_user_data_array* is set there 1598 * should be enough room to store all 1599 * user data. 1600 * @param is_user_data_array Set 1 if every dequeued user data will 1601 * be written into out_user_data array. 1602 * Set 0 if only the first user data will 1603 * be written into out_user_data array. 1604 * @param n_success Driver written value to specific the 1605 * total successful operations count. 1606 * @param dequeue_status Driver written value to specify the 1607 * dequeue status. Possible values: 1608 * - 1: Successfully dequeued the number 1609 * of operations returned. The user 1610 * data previously set during enqueue 1611 * is stored in the "out_user_data". 1612 * - 0: The number of operations returned 1613 * are completed and the user data is 1614 * stored in the "out_user_data", but 1615 * they are not freed from the queue 1616 * until 1617 * rte_cryptodev_raw_dequeue_done() 1618 * is called. 1619 * - negative integer: Error occurred. 1620 * @return 1621 * - The number of operations dequeued or completed but not freed from the 1622 * queue, depends on "dequeue_status" value. 1623 */ 1624 __rte_experimental 1625 uint32_t 1626 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, 1627 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 1628 uint32_t max_nb_to_dequeue, 1629 rte_cryptodev_raw_post_dequeue_t post_dequeue, 1630 void **out_user_data, uint8_t is_user_data_array, 1631 uint32_t *n_success, int *dequeue_status); 1632 1633 /** 1634 * Dequeue a symmetric crypto processing. 1635 * 1636 * @param ctx The initialized raw data-path context 1637 * data. 1638 * @param dequeue_status Driver written value to specify the 1639 * dequeue status. Possible values: 1640 * - 1: Successfully dequeued a operation. 1641 * The user data is returned. 1642 * - 0: The first operation in the queue 1643 * is completed and the user data 1644 * previously set during enqueue is 1645 * returned, but it is not freed from 1646 * the queue until 1647 * rte_cryptodev_raw_dequeue_done() is 1648 * called. 1649 * - negative integer: Error occurred. 1650 * @param op_status Driver written value to specify 1651 * operation status. 1652 * @return 1653 * - The user data pointer retrieved from device queue or NULL if no 1654 * operation is ready for dequeue. 1655 */ 1656 __rte_experimental 1657 static __rte_always_inline void * 1658 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx, 1659 int *dequeue_status, enum rte_crypto_op_status *op_status) 1660 { 1661 return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status, 1662 op_status); 1663 } 1664 1665 /** 1666 * Inform the queue pair dequeue operations is finished. 1667 * 1668 * @param ctx The initialized raw data-path context data. 1669 * @param n The number of operations. 1670 * @return 1671 * - On success return 0. 1672 * - On failure return negative integer. 1673 */ 1674 __rte_experimental 1675 int 1676 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, 1677 uint32_t n); 1678 1679 /** 1680 * Add a user callback for a given crypto device and queue pair which will be 1681 * called on crypto ops enqueue. 1682 * 1683 * This API configures a function to be called for each burst of crypto ops 1684 * received on a given crypto device queue pair. The return value is a pointer 1685 * that can be used later to remove the callback using 1686 * rte_cryptodev_remove_enq_callback(). 1687 * 1688 * Callbacks registered by application would not survive 1689 * rte_cryptodev_configure() as it reinitializes the callback list. 1690 * It is user responsibility to remove all installed callbacks before 1691 * calling rte_cryptodev_configure() to avoid possible memory leakage. 1692 * Application is expected to call add API after rte_cryptodev_configure(). 1693 * 1694 * Multiple functions can be registered per queue pair & they are called 1695 * in the order they were added. The API does not restrict on maximum number 1696 * of callbacks. 1697 * 1698 * @param dev_id The identifier of the device. 1699 * @param qp_id The index of the queue pair on which ops are 1700 * to be enqueued for processing. The value 1701 * must be in the range [0, nb_queue_pairs - 1] 1702 * previously supplied to 1703 * *rte_cryptodev_configure*. 1704 * @param cb_fn The callback function 1705 * @param cb_arg A generic pointer parameter which will be passed 1706 * to each invocation of the callback function on 1707 * this crypto device and queue pair. 1708 * 1709 * @return 1710 * - NULL on error & rte_errno will contain the error code. 1711 * - On success, a pointer value which can later be used to remove the 1712 * callback. 1713 */ 1714 1715 __rte_experimental 1716 struct rte_cryptodev_cb * 1717 rte_cryptodev_add_enq_callback(uint8_t dev_id, 1718 uint16_t qp_id, 1719 rte_cryptodev_callback_fn cb_fn, 1720 void *cb_arg); 1721 1722 /** 1723 * Remove a user callback function for given crypto device and queue pair. 1724 * 1725 * This function is used to remove enqueue callbacks that were added to a 1726 * crypto device queue pair using rte_cryptodev_add_enq_callback(). 1727 * 1728 * 1729 * 1730 * @param dev_id The identifier of the device. 1731 * @param qp_id The index of the queue pair on which ops are 1732 * to be enqueued. The value must be in the 1733 * range [0, nb_queue_pairs - 1] previously 1734 * supplied to *rte_cryptodev_configure*. 1735 * @param cb Pointer to user supplied callback created via 1736 * rte_cryptodev_add_enq_callback(). 1737 * 1738 * @return 1739 * - 0: Success. Callback was removed. 1740 * - <0: The dev_id or the qp_id is out of range, or the callback 1741 * is NULL or not found for the crypto device queue pair. 1742 */ 1743 1744 __rte_experimental 1745 int rte_cryptodev_remove_enq_callback(uint8_t dev_id, 1746 uint16_t qp_id, 1747 struct rte_cryptodev_cb *cb); 1748 1749 /** 1750 * Add a user callback for a given crypto device and queue pair which will be 1751 * called on crypto ops dequeue. 1752 * 1753 * This API configures a function to be called for each burst of crypto ops 1754 * received on a given crypto device queue pair. The return value is a pointer 1755 * that can be used later to remove the callback using 1756 * rte_cryptodev_remove_deq_callback(). 1757 * 1758 * Callbacks registered by application would not survive 1759 * rte_cryptodev_configure() as it reinitializes the callback list. 1760 * It is user responsibility to remove all installed callbacks before 1761 * calling rte_cryptodev_configure() to avoid possible memory leakage. 1762 * Application is expected to call add API after rte_cryptodev_configure(). 1763 * 1764 * Multiple functions can be registered per queue pair & they are called 1765 * in the order they were added. The API does not restrict on maximum number 1766 * of callbacks. 1767 * 1768 * @param dev_id The identifier of the device. 1769 * @param qp_id The index of the queue pair on which ops are 1770 * to be dequeued. The value must be in the 1771 * range [0, nb_queue_pairs - 1] previously 1772 * supplied to *rte_cryptodev_configure*. 1773 * @param cb_fn The callback function 1774 * @param cb_arg A generic pointer parameter which will be passed 1775 * to each invocation of the callback function on 1776 * this crypto device and queue pair. 1777 * 1778 * @return 1779 * - NULL on error & rte_errno will contain the error code. 1780 * - On success, a pointer value which can later be used to remove the 1781 * callback. 1782 */ 1783 1784 __rte_experimental 1785 struct rte_cryptodev_cb * 1786 rte_cryptodev_add_deq_callback(uint8_t dev_id, 1787 uint16_t qp_id, 1788 rte_cryptodev_callback_fn cb_fn, 1789 void *cb_arg); 1790 1791 /** 1792 * Remove a user callback function for given crypto device and queue pair. 1793 * 1794 * This function is used to remove dequeue callbacks that were added to a 1795 * crypto device queue pair using rte_cryptodev_add_deq_callback(). 1796 * 1797 * 1798 * 1799 * @param dev_id The identifier of the device. 1800 * @param qp_id The index of the queue pair on which ops are 1801 * to be dequeued. The value must be in the 1802 * range [0, nb_queue_pairs - 1] previously 1803 * supplied to *rte_cryptodev_configure*. 1804 * @param cb Pointer to user supplied callback created via 1805 * rte_cryptodev_add_deq_callback(). 1806 * 1807 * @return 1808 * - 0: Success. Callback was removed. 1809 * - <0: The dev_id or the qp_id is out of range, or the callback 1810 * is NULL or not found for the crypto device queue pair. 1811 */ 1812 __rte_experimental 1813 int rte_cryptodev_remove_deq_callback(uint8_t dev_id, 1814 uint16_t qp_id, 1815 struct rte_cryptodev_cb *cb); 1816 1817 #include <rte_cryptodev_core.h> 1818 /** 1819 * 1820 * Dequeue a burst of processed crypto operations from a queue on the crypto 1821 * device. The dequeued operation are stored in *rte_crypto_op* structures 1822 * whose pointers are supplied in the *ops* array. 1823 * 1824 * The rte_cryptodev_dequeue_burst() function returns the number of ops 1825 * actually dequeued, which is the number of *rte_crypto_op* data structures 1826 * effectively supplied into the *ops* array. 1827 * 1828 * A return value equal to *nb_ops* indicates that the queue contained 1829 * at least *nb_ops* operations, and this is likely to signify that other 1830 * processed operations remain in the devices output queue. Applications 1831 * implementing a "retrieve as many processed operations as possible" policy 1832 * can check this specific case and keep invoking the 1833 * rte_cryptodev_dequeue_burst() function until a value less than 1834 * *nb_ops* is returned. 1835 * 1836 * The rte_cryptodev_dequeue_burst() function does not provide any error 1837 * notification to avoid the corresponding overhead. 1838 * 1839 * @param dev_id The symmetric crypto device identifier 1840 * @param qp_id The index of the queue pair from which to 1841 * retrieve processed packets. The value must be 1842 * in the range [0, nb_queue_pair - 1] previously 1843 * supplied to rte_cryptodev_configure(). 1844 * @param ops The address of an array of pointers to 1845 * *rte_crypto_op* structures that must be 1846 * large enough to store *nb_ops* pointers in it. 1847 * @param nb_ops The maximum number of operations to dequeue. 1848 * 1849 * @return 1850 * - The number of operations actually dequeued, which is the number 1851 * of pointers to *rte_crypto_op* structures effectively supplied to the 1852 * *ops* array. 1853 */ 1854 static inline uint16_t 1855 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, 1856 struct rte_crypto_op **ops, uint16_t nb_ops) 1857 { 1858 const struct rte_crypto_fp_ops *fp_ops; 1859 void *qp; 1860 1861 rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops); 1862 1863 fp_ops = &rte_crypto_fp_ops[dev_id]; 1864 qp = fp_ops->qp.data[qp_id]; 1865 1866 nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops); 1867 1868 #ifdef RTE_CRYPTO_CALLBACKS 1869 if (unlikely(fp_ops->qp.deq_cb != NULL)) { 1870 struct rte_cryptodev_cb_rcu *list; 1871 struct rte_cryptodev_cb *cb; 1872 1873 /* __ATOMIC_RELEASE memory order was used when the 1874 * call back was inserted into the list. 1875 * Since there is a clear dependency between loading 1876 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is 1877 * not required. 1878 */ 1879 list = &fp_ops->qp.deq_cb[qp_id]; 1880 rte_rcu_qsbr_thread_online(list->qsbr, 0); 1881 cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); 1882 1883 while (cb != NULL) { 1884 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops, 1885 cb->arg); 1886 cb = cb->next; 1887 }; 1888 1889 rte_rcu_qsbr_thread_offline(list->qsbr, 0); 1890 } 1891 #endif 1892 return nb_ops; 1893 } 1894 1895 /** 1896 * Enqueue a burst of operations for processing on a crypto device. 1897 * 1898 * The rte_cryptodev_enqueue_burst() function is invoked to place 1899 * crypto operations on the queue *qp_id* of the device designated by 1900 * its *dev_id*. 1901 * 1902 * The *nb_ops* parameter is the number of operations to process which are 1903 * supplied in the *ops* array of *rte_crypto_op* structures. 1904 * 1905 * The rte_cryptodev_enqueue_burst() function returns the number of 1906 * operations it actually enqueued for processing. A return value equal to 1907 * *nb_ops* means that all packets have been enqueued. 1908 * 1909 * @param dev_id The identifier of the device. 1910 * @param qp_id The index of the queue pair which packets are 1911 * to be enqueued for processing. The value 1912 * must be in the range [0, nb_queue_pairs - 1] 1913 * previously supplied to 1914 * *rte_cryptodev_configure*. 1915 * @param ops The address of an array of *nb_ops* pointers 1916 * to *rte_crypto_op* structures which contain 1917 * the crypto operations to be processed. 1918 * @param nb_ops The number of operations to process. 1919 * 1920 * @return 1921 * The number of operations actually enqueued on the crypto device. The return 1922 * value can be less than the value of the *nb_ops* parameter when the 1923 * crypto devices queue is full or if invalid parameters are specified in 1924 * a *rte_crypto_op*. 1925 */ 1926 static inline uint16_t 1927 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, 1928 struct rte_crypto_op **ops, uint16_t nb_ops) 1929 { 1930 const struct rte_crypto_fp_ops *fp_ops; 1931 void *qp; 1932 1933 fp_ops = &rte_crypto_fp_ops[dev_id]; 1934 qp = fp_ops->qp.data[qp_id]; 1935 #ifdef RTE_CRYPTO_CALLBACKS 1936 if (unlikely(fp_ops->qp.enq_cb != NULL)) { 1937 struct rte_cryptodev_cb_rcu *list; 1938 struct rte_cryptodev_cb *cb; 1939 1940 /* __ATOMIC_RELEASE memory order was used when the 1941 * call back was inserted into the list. 1942 * Since there is a clear dependency between loading 1943 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is 1944 * not required. 1945 */ 1946 list = &fp_ops->qp.enq_cb[qp_id]; 1947 rte_rcu_qsbr_thread_online(list->qsbr, 0); 1948 cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); 1949 1950 while (cb != NULL) { 1951 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops, 1952 cb->arg); 1953 cb = cb->next; 1954 }; 1955 1956 rte_rcu_qsbr_thread_offline(list->qsbr, 0); 1957 } 1958 #endif 1959 1960 rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops); 1961 return fp_ops->enqueue_burst(qp, ops, nb_ops); 1962 } 1963 1964 1965 1966 #ifdef __cplusplus 1967 } 1968 #endif 1969 1970 #endif /* _RTE_CRYPTODEV_H_ */ 1971