1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Scatterlist Cryptographic API. 4 * 5 * Copyright (c) 2002 James Morris <[email protected]> 6 * Copyright (c) 2002 David S. Miller ([email protected]) 7 * Copyright (c) 2005 Herbert Xu <[email protected]> 8 * 9 * Portions derived from Cryptoapi, by Alexander Kjeldaas <[email protected]> 10 * and Nettle, by Niels Möller. 11 */ 12 #ifndef _LINUX_CRYPTO_H 13 #define _LINUX_CRYPTO_H 14 15 #include <linux/atomic.h> 16 #include <linux/kernel.h> 17 #include <linux/list.h> 18 #include <linux/bug.h> 19 #include <linux/slab.h> 20 #include <linux/string.h> 21 #include <linux/uaccess.h> 22 #include <linux/completion.h> 23 24 /* 25 * Autoloaded crypto modules should only use a prefixed name to avoid allowing 26 * arbitrary modules to be loaded. Loading from userspace may still need the 27 * unprefixed names, so retains those aliases as well. 28 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 29 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro 30 * expands twice on the same line. Instead, use a separate base name for the 31 * alias. 32 */ 33 #define MODULE_ALIAS_CRYPTO(name) \ 34 __MODULE_INFO(alias, alias_userspace, name); \ 35 __MODULE_INFO(alias, alias_crypto, "crypto-" name) 36 37 /* 38 * Algorithm masks and types. 39 */ 40 #define CRYPTO_ALG_TYPE_MASK 0x0000000f 41 #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 42 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 43 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 44 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 45 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 46 #define CRYPTO_ALG_TYPE_KPP 0x00000008 47 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a 48 #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b 49 #define CRYPTO_ALG_TYPE_RNG 0x0000000c 50 #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d 51 #define CRYPTO_ALG_TYPE_HASH 0x0000000e 52 #define CRYPTO_ALG_TYPE_SHASH 0x0000000e 53 #define CRYPTO_ALG_TYPE_AHASH 0x0000000f 54 55 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 56 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e 57 #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e 58 59 #define CRYPTO_ALG_LARVAL 0x00000010 60 #define CRYPTO_ALG_DEAD 0x00000020 61 #define CRYPTO_ALG_DYING 0x00000040 62 #define CRYPTO_ALG_ASYNC 0x00000080 63 64 /* 65 * Set this bit if and only if the algorithm requires another algorithm of 66 * the same type to handle corner cases. 67 */ 68 #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 69 70 /* 71 * Set if the algorithm has passed automated run-time testing. Note that 72 * if there is no run-time testing for a given algorithm it is considered 73 * to have passed. 74 */ 75 76 #define CRYPTO_ALG_TESTED 0x00000400 77 78 /* 79 * Set if the algorithm is an instance that is built from templates. 80 */ 81 #define CRYPTO_ALG_INSTANCE 0x00000800 82 83 /* Set this bit if the algorithm provided is hardware accelerated but 84 * not available to userspace via instruction set or so. 85 */ 86 #define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 87 88 /* 89 * Mark a cipher as a service implementation only usable by another 90 * cipher and never by a normal user of the kernel crypto API 91 */ 92 #define CRYPTO_ALG_INTERNAL 0x00002000 93 94 /* 95 * Set if the algorithm has a ->setkey() method but can be used without 96 * calling it first, i.e. there is a default key. 97 */ 98 #define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 99 100 /* 101 * Don't trigger module loading 102 */ 103 #define CRYPTO_NOLOAD 0x00008000 104 105 /* 106 * Transform masks and values (for crt_flags). 107 */ 108 #define CRYPTO_TFM_NEED_KEY 0x00000001 109 110 #define CRYPTO_TFM_REQ_MASK 0x000fff00 111 #define CRYPTO_TFM_RES_MASK 0xfff00000 112 113 #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 114 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 115 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 116 #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 117 #define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 118 #define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 119 #define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000 120 #define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000 121 122 /* 123 * Miscellaneous stuff. 124 */ 125 #define CRYPTO_MAX_ALG_NAME 128 126 127 /* 128 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 129 * declaration) is used to ensure that the crypto_tfm context structure is 130 * aligned correctly for the given architecture so that there are no alignment 131 * faults for C data types. In particular, this is required on platforms such 132 * as arm where pointers are 32-bit aligned but there are data types such as 133 * u64 which require 64-bit alignment. 134 */ 135 #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 136 137 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 138 139 struct scatterlist; 140 struct crypto_ablkcipher; 141 struct crypto_async_request; 142 struct crypto_tfm; 143 struct crypto_type; 144 145 typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); 146 147 /** 148 * DOC: Block Cipher Context Data Structures 149 * 150 * These data structures define the operating context for each block cipher 151 * type. 152 */ 153 154 struct crypto_async_request { 155 struct list_head list; 156 crypto_completion_t complete; 157 void *data; 158 struct crypto_tfm *tfm; 159 160 u32 flags; 161 }; 162 163 struct ablkcipher_request { 164 struct crypto_async_request base; 165 166 unsigned int nbytes; 167 168 void *info; 169 170 struct scatterlist *src; 171 struct scatterlist *dst; 172 173 void *__ctx[] CRYPTO_MINALIGN_ATTR; 174 }; 175 176 /** 177 * DOC: Block Cipher Algorithm Definitions 178 * 179 * These data structures define modular crypto algorithm implementations, 180 * managed via crypto_register_alg() and crypto_unregister_alg(). 181 */ 182 183 /** 184 * struct ablkcipher_alg - asynchronous block cipher definition 185 * @min_keysize: Minimum key size supported by the transformation. This is the 186 * smallest key length supported by this transformation algorithm. 187 * This must be set to one of the pre-defined values as this is 188 * not hardware specific. Possible values for this field can be 189 * found via git grep "_MIN_KEY_SIZE" include/crypto/ 190 * @max_keysize: Maximum key size supported by the transformation. This is the 191 * largest key length supported by this transformation algorithm. 192 * This must be set to one of the pre-defined values as this is 193 * not hardware specific. Possible values for this field can be 194 * found via git grep "_MAX_KEY_SIZE" include/crypto/ 195 * @setkey: Set key for the transformation. This function is used to either 196 * program a supplied key into the hardware or store the key in the 197 * transformation context for programming it later. Note that this 198 * function does modify the transformation context. This function can 199 * be called multiple times during the existence of the transformation 200 * object, so one must make sure the key is properly reprogrammed into 201 * the hardware. This function is also responsible for checking the key 202 * length for validity. In case a software fallback was put in place in 203 * the @cra_init call, this function might need to use the fallback if 204 * the algorithm doesn't support all of the key sizes. 205 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt 206 * the supplied scatterlist containing the blocks of data. The crypto 207 * API consumer is responsible for aligning the entries of the 208 * scatterlist properly and making sure the chunks are correctly 209 * sized. In case a software fallback was put in place in the 210 * @cra_init call, this function might need to use the fallback if 211 * the algorithm doesn't support all of the key sizes. In case the 212 * key was stored in transformation context, the key might need to be 213 * re-programmed into the hardware in this function. This function 214 * shall not modify the transformation context, as this function may 215 * be called in parallel with the same transformation object. 216 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt 217 * and the conditions are exactly the same. 218 * @ivsize: IV size applicable for transformation. The consumer must provide an 219 * IV of exactly that size to perform the encrypt or decrypt operation. 220 * 221 * All fields except @ivsize are mandatory and must be filled. 222 */ 223 struct ablkcipher_alg { 224 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 225 unsigned int keylen); 226 int (*encrypt)(struct ablkcipher_request *req); 227 int (*decrypt)(struct ablkcipher_request *req); 228 229 unsigned int min_keysize; 230 unsigned int max_keysize; 231 unsigned int ivsize; 232 }; 233 234 /** 235 * struct cipher_alg - single-block symmetric ciphers definition 236 * @cia_min_keysize: Minimum key size supported by the transformation. This is 237 * the smallest key length supported by this transformation 238 * algorithm. This must be set to one of the pre-defined 239 * values as this is not hardware specific. Possible values 240 * for this field can be found via git grep "_MIN_KEY_SIZE" 241 * include/crypto/ 242 * @cia_max_keysize: Maximum key size supported by the transformation. This is 243 * the largest key length supported by this transformation 244 * algorithm. This must be set to one of the pre-defined values 245 * as this is not hardware specific. Possible values for this 246 * field can be found via git grep "_MAX_KEY_SIZE" 247 * include/crypto/ 248 * @cia_setkey: Set key for the transformation. This function is used to either 249 * program a supplied key into the hardware or store the key in the 250 * transformation context for programming it later. Note that this 251 * function does modify the transformation context. This function 252 * can be called multiple times during the existence of the 253 * transformation object, so one must make sure the key is properly 254 * reprogrammed into the hardware. This function is also 255 * responsible for checking the key length for validity. 256 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a 257 * single block of data, which must be @cra_blocksize big. This 258 * always operates on a full @cra_blocksize and it is not possible 259 * to encrypt a block of smaller size. The supplied buffers must 260 * therefore also be at least of @cra_blocksize size. Both the 261 * input and output buffers are always aligned to @cra_alignmask. 262 * In case either of the input or output buffer supplied by user 263 * of the crypto API is not aligned to @cra_alignmask, the crypto 264 * API will re-align the buffers. The re-alignment means that a 265 * new buffer will be allocated, the data will be copied into the 266 * new buffer, then the processing will happen on the new buffer, 267 * then the data will be copied back into the original buffer and 268 * finally the new buffer will be freed. In case a software 269 * fallback was put in place in the @cra_init call, this function 270 * might need to use the fallback if the algorithm doesn't support 271 * all of the key sizes. In case the key was stored in 272 * transformation context, the key might need to be re-programmed 273 * into the hardware in this function. This function shall not 274 * modify the transformation context, as this function may be 275 * called in parallel with the same transformation object. 276 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to 277 * @cia_encrypt, and the conditions are exactly the same. 278 * 279 * All fields are mandatory and must be filled. 280 */ 281 struct cipher_alg { 282 unsigned int cia_min_keysize; 283 unsigned int cia_max_keysize; 284 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 285 unsigned int keylen); 286 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 287 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 288 }; 289 290 /** 291 * struct compress_alg - compression/decompression algorithm 292 * @coa_compress: Compress a buffer of specified length, storing the resulting 293 * data in the specified buffer. Return the length of the 294 * compressed data in dlen. 295 * @coa_decompress: Decompress the source buffer, storing the uncompressed 296 * data in the specified buffer. The length of the data is 297 * returned in dlen. 298 * 299 * All fields are mandatory. 300 */ 301 struct compress_alg { 302 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, 303 unsigned int slen, u8 *dst, unsigned int *dlen); 304 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, 305 unsigned int slen, u8 *dst, unsigned int *dlen); 306 }; 307 308 #ifdef CONFIG_CRYPTO_STATS 309 /* 310 * struct crypto_istat_aead - statistics for AEAD algorithm 311 * @encrypt_cnt: number of encrypt requests 312 * @encrypt_tlen: total data size handled by encrypt requests 313 * @decrypt_cnt: number of decrypt requests 314 * @decrypt_tlen: total data size handled by decrypt requests 315 * @err_cnt: number of error for AEAD requests 316 */ 317 struct crypto_istat_aead { 318 atomic64_t encrypt_cnt; 319 atomic64_t encrypt_tlen; 320 atomic64_t decrypt_cnt; 321 atomic64_t decrypt_tlen; 322 atomic64_t err_cnt; 323 }; 324 325 /* 326 * struct crypto_istat_akcipher - statistics for akcipher algorithm 327 * @encrypt_cnt: number of encrypt requests 328 * @encrypt_tlen: total data size handled by encrypt requests 329 * @decrypt_cnt: number of decrypt requests 330 * @decrypt_tlen: total data size handled by decrypt requests 331 * @verify_cnt: number of verify operation 332 * @sign_cnt: number of sign requests 333 * @err_cnt: number of error for akcipher requests 334 */ 335 struct crypto_istat_akcipher { 336 atomic64_t encrypt_cnt; 337 atomic64_t encrypt_tlen; 338 atomic64_t decrypt_cnt; 339 atomic64_t decrypt_tlen; 340 atomic64_t verify_cnt; 341 atomic64_t sign_cnt; 342 atomic64_t err_cnt; 343 }; 344 345 /* 346 * struct crypto_istat_cipher - statistics for cipher algorithm 347 * @encrypt_cnt: number of encrypt requests 348 * @encrypt_tlen: total data size handled by encrypt requests 349 * @decrypt_cnt: number of decrypt requests 350 * @decrypt_tlen: total data size handled by decrypt requests 351 * @err_cnt: number of error for cipher requests 352 */ 353 struct crypto_istat_cipher { 354 atomic64_t encrypt_cnt; 355 atomic64_t encrypt_tlen; 356 atomic64_t decrypt_cnt; 357 atomic64_t decrypt_tlen; 358 atomic64_t err_cnt; 359 }; 360 361 /* 362 * struct crypto_istat_compress - statistics for compress algorithm 363 * @compress_cnt: number of compress requests 364 * @compress_tlen: total data size handled by compress requests 365 * @decompress_cnt: number of decompress requests 366 * @decompress_tlen: total data size handled by decompress requests 367 * @err_cnt: number of error for compress requests 368 */ 369 struct crypto_istat_compress { 370 atomic64_t compress_cnt; 371 atomic64_t compress_tlen; 372 atomic64_t decompress_cnt; 373 atomic64_t decompress_tlen; 374 atomic64_t err_cnt; 375 }; 376 377 /* 378 * struct crypto_istat_hash - statistics for has algorithm 379 * @hash_cnt: number of hash requests 380 * @hash_tlen: total data size hashed 381 * @err_cnt: number of error for hash requests 382 */ 383 struct crypto_istat_hash { 384 atomic64_t hash_cnt; 385 atomic64_t hash_tlen; 386 atomic64_t err_cnt; 387 }; 388 389 /* 390 * struct crypto_istat_kpp - statistics for KPP algorithm 391 * @setsecret_cnt: number of setsecrey operation 392 * @generate_public_key_cnt: number of generate_public_key operation 393 * @compute_shared_secret_cnt: number of compute_shared_secret operation 394 * @err_cnt: number of error for KPP requests 395 */ 396 struct crypto_istat_kpp { 397 atomic64_t setsecret_cnt; 398 atomic64_t generate_public_key_cnt; 399 atomic64_t compute_shared_secret_cnt; 400 atomic64_t err_cnt; 401 }; 402 403 /* 404 * struct crypto_istat_rng: statistics for RNG algorithm 405 * @generate_cnt: number of RNG generate requests 406 * @generate_tlen: total data size of generated data by the RNG 407 * @seed_cnt: number of times the RNG was seeded 408 * @err_cnt: number of error for RNG requests 409 */ 410 struct crypto_istat_rng { 411 atomic64_t generate_cnt; 412 atomic64_t generate_tlen; 413 atomic64_t seed_cnt; 414 atomic64_t err_cnt; 415 }; 416 #endif /* CONFIG_CRYPTO_STATS */ 417 418 #define cra_ablkcipher cra_u.ablkcipher 419 #define cra_cipher cra_u.cipher 420 #define cra_compress cra_u.compress 421 422 /** 423 * struct crypto_alg - definition of a cryptograpic cipher algorithm 424 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h 425 * CRYPTO_ALG_* flags for the flags which go in here. Those are 426 * used for fine-tuning the description of the transformation 427 * algorithm. 428 * @cra_blocksize: Minimum block size of this transformation. The size in bytes 429 * of the smallest possible unit which can be transformed with 430 * this algorithm. The users must respect this value. 431 * In case of HASH transformation, it is possible for a smaller 432 * block than @cra_blocksize to be passed to the crypto API for 433 * transformation, in case of any other transformation type, an 434 * error will be returned upon any attempt to transform smaller 435 * than @cra_blocksize chunks. 436 * @cra_ctxsize: Size of the operational context of the transformation. This 437 * value informs the kernel crypto API about the memory size 438 * needed to be allocated for the transformation context. 439 * @cra_alignmask: Alignment mask for the input and output data buffer. The data 440 * buffer containing the input data for the algorithm must be 441 * aligned to this alignment mask. The data buffer for the 442 * output data must be aligned to this alignment mask. Note that 443 * the Crypto API will do the re-alignment in software, but 444 * only under special conditions and there is a performance hit. 445 * The re-alignment happens at these occasions for different 446 * @cra_u types: cipher -- For both input data and output data 447 * buffer; ahash -- For output hash destination buf; shash -- 448 * For output hash destination buf. 449 * This is needed on hardware which is flawed by design and 450 * cannot pick data from arbitrary addresses. 451 * @cra_priority: Priority of this transformation implementation. In case 452 * multiple transformations with same @cra_name are available to 453 * the Crypto API, the kernel will use the one with highest 454 * @cra_priority. 455 * @cra_name: Generic name (usable by multiple implementations) of the 456 * transformation algorithm. This is the name of the transformation 457 * itself. This field is used by the kernel when looking up the 458 * providers of particular transformation. 459 * @cra_driver_name: Unique name of the transformation provider. This is the 460 * name of the provider of the transformation. This can be any 461 * arbitrary value, but in the usual case, this contains the 462 * name of the chip or provider and the name of the 463 * transformation algorithm. 464 * @cra_type: Type of the cryptographic transformation. This is a pointer to 465 * struct crypto_type, which implements callbacks common for all 466 * transformation types. There are multiple options, such as 467 * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type. 468 * This field might be empty. In that case, there are no common 469 * callbacks. This is the case for: cipher, compress, shash. 470 * @cra_u: Callbacks implementing the transformation. This is a union of 471 * multiple structures. Depending on the type of transformation selected 472 * by @cra_type and @cra_flags above, the associated structure must be 473 * filled with callbacks. This field might be empty. This is the case 474 * for ahash, shash. 475 * @cra_init: Initialize the cryptographic transformation object. This function 476 * is used to initialize the cryptographic transformation object. 477 * This function is called only once at the instantiation time, right 478 * after the transformation context was allocated. In case the 479 * cryptographic hardware has some special requirements which need to 480 * be handled by software, this function shall check for the precise 481 * requirement of the transformation and put any software fallbacks 482 * in place. 483 * @cra_exit: Deinitialize the cryptographic transformation object. This is a 484 * counterpart to @cra_init, used to remove various changes set in 485 * @cra_init. 486 * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher 487 * definition. See @struct @ablkcipher_alg. 488 * @cra_u.cipher: Union member which contains a single-block symmetric cipher 489 * definition. See @struct @cipher_alg. 490 * @cra_u.compress: Union member which contains a (de)compression algorithm. 491 * See @struct @compress_alg. 492 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE 493 * @cra_list: internally used 494 * @cra_users: internally used 495 * @cra_refcnt: internally used 496 * @cra_destroy: internally used 497 * 498 * @stats: union of all possible crypto_istat_xxx structures 499 * @stats.aead: statistics for AEAD algorithm 500 * @stats.akcipher: statistics for akcipher algorithm 501 * @stats.cipher: statistics for cipher algorithm 502 * @stats.compress: statistics for compress algorithm 503 * @stats.hash: statistics for hash algorithm 504 * @stats.rng: statistics for rng algorithm 505 * @stats.kpp: statistics for KPP algorithm 506 * 507 * The struct crypto_alg describes a generic Crypto API algorithm and is common 508 * for all of the transformations. Any variable not documented here shall not 509 * be used by a cipher implementation as it is internal to the Crypto API. 510 */ 511 struct crypto_alg { 512 struct list_head cra_list; 513 struct list_head cra_users; 514 515 u32 cra_flags; 516 unsigned int cra_blocksize; 517 unsigned int cra_ctxsize; 518 unsigned int cra_alignmask; 519 520 int cra_priority; 521 refcount_t cra_refcnt; 522 523 char cra_name[CRYPTO_MAX_ALG_NAME]; 524 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 525 526 const struct crypto_type *cra_type; 527 528 union { 529 struct ablkcipher_alg ablkcipher; 530 struct cipher_alg cipher; 531 struct compress_alg compress; 532 } cra_u; 533 534 int (*cra_init)(struct crypto_tfm *tfm); 535 void (*cra_exit)(struct crypto_tfm *tfm); 536 void (*cra_destroy)(struct crypto_alg *alg); 537 538 struct module *cra_module; 539 540 #ifdef CONFIG_CRYPTO_STATS 541 union { 542 struct crypto_istat_aead aead; 543 struct crypto_istat_akcipher akcipher; 544 struct crypto_istat_cipher cipher; 545 struct crypto_istat_compress compress; 546 struct crypto_istat_hash hash; 547 struct crypto_istat_rng rng; 548 struct crypto_istat_kpp kpp; 549 } stats; 550 #endif /* CONFIG_CRYPTO_STATS */ 551 552 } CRYPTO_MINALIGN_ATTR; 553 554 #ifdef CONFIG_CRYPTO_STATS 555 void crypto_stats_init(struct crypto_alg *alg); 556 void crypto_stats_get(struct crypto_alg *alg); 557 void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); 558 void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); 559 void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); 560 void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); 561 void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg); 562 void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg); 563 void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg); 564 void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg); 565 void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg); 566 void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg); 567 void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg); 568 void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg); 569 void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret); 570 void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret); 571 void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret); 572 void crypto_stats_rng_seed(struct crypto_alg *alg, int ret); 573 void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret); 574 void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); 575 void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); 576 #else 577 static inline void crypto_stats_init(struct crypto_alg *alg) 578 {} 579 static inline void crypto_stats_get(struct crypto_alg *alg) 580 {} 581 static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) 582 {} 583 static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) 584 {} 585 static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) 586 {} 587 static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) 588 {} 589 static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg) 590 {} 591 static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg) 592 {} 593 static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg) 594 {} 595 static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg) 596 {} 597 static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) 598 {} 599 static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) 600 {} 601 static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) 602 {} 603 static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) 604 {} 605 static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) 606 {} 607 static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) 608 {} 609 static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) 610 {} 611 static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) 612 {} 613 static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret) 614 {} 615 static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) 616 {} 617 static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) 618 {} 619 #endif 620 /* 621 * A helper struct for waiting for completion of async crypto ops 622 */ 623 struct crypto_wait { 624 struct completion completion; 625 int err; 626 }; 627 628 /* 629 * Macro for declaring a crypto op async wait object on stack 630 */ 631 #define DECLARE_CRYPTO_WAIT(_wait) \ 632 struct crypto_wait _wait = { \ 633 COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } 634 635 /* 636 * Async ops completion helper functioons 637 */ 638 void crypto_req_done(struct crypto_async_request *req, int err); 639 640 static inline int crypto_wait_req(int err, struct crypto_wait *wait) 641 { 642 switch (err) { 643 case -EINPROGRESS: 644 case -EBUSY: 645 wait_for_completion(&wait->completion); 646 reinit_completion(&wait->completion); 647 err = wait->err; 648 break; 649 }; 650 651 return err; 652 } 653 654 static inline void crypto_init_wait(struct crypto_wait *wait) 655 { 656 init_completion(&wait->completion); 657 } 658 659 /* 660 * Algorithm registration interface. 661 */ 662 int crypto_register_alg(struct crypto_alg *alg); 663 int crypto_unregister_alg(struct crypto_alg *alg); 664 int crypto_register_algs(struct crypto_alg *algs, int count); 665 int crypto_unregister_algs(struct crypto_alg *algs, int count); 666 667 /* 668 * Algorithm query interface. 669 */ 670 int crypto_has_alg(const char *name, u32 type, u32 mask); 671 672 /* 673 * Transforms: user-instantiated objects which encapsulate algorithms 674 * and core processing logic. Managed via crypto_alloc_*() and 675 * crypto_free_*(), as well as the various helpers below. 676 */ 677 678 struct ablkcipher_tfm { 679 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 680 unsigned int keylen); 681 int (*encrypt)(struct ablkcipher_request *req); 682 int (*decrypt)(struct ablkcipher_request *req); 683 684 struct crypto_ablkcipher *base; 685 686 unsigned int ivsize; 687 unsigned int reqsize; 688 }; 689 690 struct cipher_tfm { 691 int (*cit_setkey)(struct crypto_tfm *tfm, 692 const u8 *key, unsigned int keylen); 693 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 694 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 695 }; 696 697 struct compress_tfm { 698 int (*cot_compress)(struct crypto_tfm *tfm, 699 const u8 *src, unsigned int slen, 700 u8 *dst, unsigned int *dlen); 701 int (*cot_decompress)(struct crypto_tfm *tfm, 702 const u8 *src, unsigned int slen, 703 u8 *dst, unsigned int *dlen); 704 }; 705 706 #define crt_ablkcipher crt_u.ablkcipher 707 #define crt_cipher crt_u.cipher 708 #define crt_compress crt_u.compress 709 710 struct crypto_tfm { 711 712 u32 crt_flags; 713 714 union { 715 struct ablkcipher_tfm ablkcipher; 716 struct cipher_tfm cipher; 717 struct compress_tfm compress; 718 } crt_u; 719 720 void (*exit)(struct crypto_tfm *tfm); 721 722 struct crypto_alg *__crt_alg; 723 724 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 725 }; 726 727 struct crypto_ablkcipher { 728 struct crypto_tfm base; 729 }; 730 731 struct crypto_cipher { 732 struct crypto_tfm base; 733 }; 734 735 struct crypto_comp { 736 struct crypto_tfm base; 737 }; 738 739 enum { 740 CRYPTOA_UNSPEC, 741 CRYPTOA_ALG, 742 CRYPTOA_TYPE, 743 CRYPTOA_U32, 744 __CRYPTOA_MAX, 745 }; 746 747 #define CRYPTOA_MAX (__CRYPTOA_MAX - 1) 748 749 /* Maximum number of (rtattr) parameters for each template. */ 750 #define CRYPTO_MAX_ATTRS 32 751 752 struct crypto_attr_alg { 753 char name[CRYPTO_MAX_ALG_NAME]; 754 }; 755 756 struct crypto_attr_type { 757 u32 type; 758 u32 mask; 759 }; 760 761 struct crypto_attr_u32 { 762 u32 num; 763 }; 764 765 /* 766 * Transform user interface. 767 */ 768 769 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 770 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); 771 772 static inline void crypto_free_tfm(struct crypto_tfm *tfm) 773 { 774 return crypto_destroy_tfm(tfm, tfm); 775 } 776 777 int alg_test(const char *driver, const char *alg, u32 type, u32 mask); 778 779 /* 780 * Transform helpers which query the underlying algorithm. 781 */ 782 static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) 783 { 784 return tfm->__crt_alg->cra_name; 785 } 786 787 static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) 788 { 789 return tfm->__crt_alg->cra_driver_name; 790 } 791 792 static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) 793 { 794 return tfm->__crt_alg->cra_priority; 795 } 796 797 static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) 798 { 799 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; 800 } 801 802 static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) 803 { 804 return tfm->__crt_alg->cra_blocksize; 805 } 806 807 static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) 808 { 809 return tfm->__crt_alg->cra_alignmask; 810 } 811 812 static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) 813 { 814 return tfm->crt_flags; 815 } 816 817 static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) 818 { 819 tfm->crt_flags |= flags; 820 } 821 822 static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) 823 { 824 tfm->crt_flags &= ~flags; 825 } 826 827 static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) 828 { 829 return tfm->__crt_ctx; 830 } 831 832 static inline unsigned int crypto_tfm_ctx_alignment(void) 833 { 834 struct crypto_tfm *tfm; 835 return __alignof__(tfm->__crt_ctx); 836 } 837 838 /* 839 * API wrappers. 840 */ 841 static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( 842 struct crypto_tfm *tfm) 843 { 844 return (struct crypto_ablkcipher *)tfm; 845 } 846 847 /** 848 * DOC: Asynchronous Block Cipher API 849 * 850 * Asynchronous block cipher API is used with the ciphers of type 851 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). 852 * 853 * Asynchronous cipher operations imply that the function invocation for a 854 * cipher request returns immediately before the completion of the operation. 855 * The cipher request is scheduled as a separate kernel thread and therefore 856 * load-balanced on the different CPUs via the process scheduler. To allow 857 * the kernel crypto API to inform the caller about the completion of a cipher 858 * request, the caller must provide a callback function. That function is 859 * invoked with the cipher handle when the request completes. 860 * 861 * To support the asynchronous operation, additional information than just the 862 * cipher handle must be supplied to the kernel crypto API. That additional 863 * information is given by filling in the ablkcipher_request data structure. 864 * 865 * For the asynchronous block cipher API, the state is maintained with the tfm 866 * cipher handle. A single tfm can be used across multiple calls and in 867 * parallel. For asynchronous block cipher calls, context data supplied and 868 * only used by the caller can be referenced the request data structure in 869 * addition to the IV used for the cipher request. The maintenance of such 870 * state information would be important for a crypto driver implementer to 871 * have, because when calling the callback function upon completion of the 872 * cipher operation, that callback function may need some information about 873 * which operation just finished if it invoked multiple in parallel. This 874 * state information is unused by the kernel crypto API. 875 */ 876 877 static inline struct crypto_tfm *crypto_ablkcipher_tfm( 878 struct crypto_ablkcipher *tfm) 879 { 880 return &tfm->base; 881 } 882 883 /** 884 * crypto_free_ablkcipher() - zeroize and free cipher handle 885 * @tfm: cipher handle to be freed 886 */ 887 static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) 888 { 889 crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); 890 } 891 892 static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( 893 struct crypto_ablkcipher *tfm) 894 { 895 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; 896 } 897 898 /** 899 * crypto_ablkcipher_ivsize() - obtain IV size 900 * @tfm: cipher handle 901 * 902 * The size of the IV for the ablkcipher referenced by the cipher handle is 903 * returned. This IV size may be zero if the cipher does not need an IV. 904 * 905 * Return: IV size in bytes 906 */ 907 static inline unsigned int crypto_ablkcipher_ivsize( 908 struct crypto_ablkcipher *tfm) 909 { 910 return crypto_ablkcipher_crt(tfm)->ivsize; 911 } 912 913 /** 914 * crypto_ablkcipher_blocksize() - obtain block size of cipher 915 * @tfm: cipher handle 916 * 917 * The block size for the ablkcipher referenced with the cipher handle is 918 * returned. The caller may use that information to allocate appropriate 919 * memory for the data returned by the encryption or decryption operation 920 * 921 * Return: block size of cipher 922 */ 923 static inline unsigned int crypto_ablkcipher_blocksize( 924 struct crypto_ablkcipher *tfm) 925 { 926 return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); 927 } 928 929 static inline unsigned int crypto_ablkcipher_alignmask( 930 struct crypto_ablkcipher *tfm) 931 { 932 return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); 933 } 934 935 static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) 936 { 937 return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); 938 } 939 940 static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, 941 u32 flags) 942 { 943 crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); 944 } 945 946 static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, 947 u32 flags) 948 { 949 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); 950 } 951 952 /** 953 * crypto_ablkcipher_setkey() - set key for cipher 954 * @tfm: cipher handle 955 * @key: buffer holding the key 956 * @keylen: length of the key in bytes 957 * 958 * The caller provided key is set for the ablkcipher referenced by the cipher 959 * handle. 960 * 961 * Note, the key length determines the cipher type. Many block ciphers implement 962 * different cipher modes depending on the key size, such as AES-128 vs AES-192 963 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 964 * is performed. 965 * 966 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 967 */ 968 static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 969 const u8 *key, unsigned int keylen) 970 { 971 struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); 972 973 return crt->setkey(crt->base, key, keylen); 974 } 975 976 /** 977 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request 978 * @req: ablkcipher_request out of which the cipher handle is to be obtained 979 * 980 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request 981 * data structure. 982 * 983 * Return: crypto_ablkcipher handle 984 */ 985 static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( 986 struct ablkcipher_request *req) 987 { 988 return __crypto_ablkcipher_cast(req->base.tfm); 989 } 990 991 /** 992 * crypto_ablkcipher_encrypt() - encrypt plaintext 993 * @req: reference to the ablkcipher_request handle that holds all information 994 * needed to perform the cipher operation 995 * 996 * Encrypt plaintext data using the ablkcipher_request handle. That data 997 * structure and how it is filled with data is discussed with the 998 * ablkcipher_request_* functions. 999 * 1000 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1001 */ 1002 static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) 1003 { 1004 struct ablkcipher_tfm *crt = 1005 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 1006 struct crypto_alg *alg = crt->base->base.__crt_alg; 1007 unsigned int nbytes = req->nbytes; 1008 int ret; 1009 1010 crypto_stats_get(alg); 1011 ret = crt->encrypt(req); 1012 crypto_stats_ablkcipher_encrypt(nbytes, ret, alg); 1013 return ret; 1014 } 1015 1016 /** 1017 * crypto_ablkcipher_decrypt() - decrypt ciphertext 1018 * @req: reference to the ablkcipher_request handle that holds all information 1019 * needed to perform the cipher operation 1020 * 1021 * Decrypt ciphertext data using the ablkcipher_request handle. That data 1022 * structure and how it is filled with data is discussed with the 1023 * ablkcipher_request_* functions. 1024 * 1025 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1026 */ 1027 static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) 1028 { 1029 struct ablkcipher_tfm *crt = 1030 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 1031 struct crypto_alg *alg = crt->base->base.__crt_alg; 1032 unsigned int nbytes = req->nbytes; 1033 int ret; 1034 1035 crypto_stats_get(alg); 1036 ret = crt->decrypt(req); 1037 crypto_stats_ablkcipher_decrypt(nbytes, ret, alg); 1038 return ret; 1039 } 1040 1041 /** 1042 * DOC: Asynchronous Cipher Request Handle 1043 * 1044 * The ablkcipher_request data structure contains all pointers to data 1045 * required for the asynchronous cipher operation. This includes the cipher 1046 * handle (which can be used by multiple ablkcipher_request instances), pointer 1047 * to plaintext and ciphertext, asynchronous callback function, etc. It acts 1048 * as a handle to the ablkcipher_request_* API calls in a similar way as 1049 * ablkcipher handle to the crypto_ablkcipher_* API calls. 1050 */ 1051 1052 /** 1053 * crypto_ablkcipher_reqsize() - obtain size of the request data structure 1054 * @tfm: cipher handle 1055 * 1056 * Return: number of bytes 1057 */ 1058 static inline unsigned int crypto_ablkcipher_reqsize( 1059 struct crypto_ablkcipher *tfm) 1060 { 1061 return crypto_ablkcipher_crt(tfm)->reqsize; 1062 } 1063 1064 /** 1065 * ablkcipher_request_set_tfm() - update cipher handle reference in request 1066 * @req: request handle to be modified 1067 * @tfm: cipher handle that shall be added to the request handle 1068 * 1069 * Allow the caller to replace the existing ablkcipher handle in the request 1070 * data structure with a different one. 1071 */ 1072 static inline void ablkcipher_request_set_tfm( 1073 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) 1074 { 1075 req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); 1076 } 1077 1078 static inline struct ablkcipher_request *ablkcipher_request_cast( 1079 struct crypto_async_request *req) 1080 { 1081 return container_of(req, struct ablkcipher_request, base); 1082 } 1083 1084 /** 1085 * ablkcipher_request_alloc() - allocate request data structure 1086 * @tfm: cipher handle to be registered with the request 1087 * @gfp: memory allocation flag that is handed to kmalloc by the API call. 1088 * 1089 * Allocate the request data structure that must be used with the ablkcipher 1090 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher 1091 * handle is registered in the request data structure. 1092 * 1093 * Return: allocated request handle in case of success, or NULL if out of memory 1094 */ 1095 static inline struct ablkcipher_request *ablkcipher_request_alloc( 1096 struct crypto_ablkcipher *tfm, gfp_t gfp) 1097 { 1098 struct ablkcipher_request *req; 1099 1100 req = kmalloc(sizeof(struct ablkcipher_request) + 1101 crypto_ablkcipher_reqsize(tfm), gfp); 1102 1103 if (likely(req)) 1104 ablkcipher_request_set_tfm(req, tfm); 1105 1106 return req; 1107 } 1108 1109 /** 1110 * ablkcipher_request_free() - zeroize and free request data structure 1111 * @req: request data structure cipher handle to be freed 1112 */ 1113 static inline void ablkcipher_request_free(struct ablkcipher_request *req) 1114 { 1115 kzfree(req); 1116 } 1117 1118 /** 1119 * ablkcipher_request_set_callback() - set asynchronous callback function 1120 * @req: request handle 1121 * @flags: specify zero or an ORing of the flags 1122 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and 1123 * increase the wait queue beyond the initial maximum size; 1124 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep 1125 * @compl: callback function pointer to be registered with the request handle 1126 * @data: The data pointer refers to memory that is not used by the kernel 1127 * crypto API, but provided to the callback function for it to use. Here, 1128 * the caller can provide a reference to memory the callback function can 1129 * operate on. As the callback function is invoked asynchronously to the 1130 * related functionality, it may need to access data structures of the 1131 * related functionality which can be referenced using this pointer. The 1132 * callback function can access the memory via the "data" field in the 1133 * crypto_async_request data structure provided to the callback function. 1134 * 1135 * This function allows setting the callback function that is triggered once the 1136 * cipher operation completes. 1137 * 1138 * The callback function is registered with the ablkcipher_request handle and 1139 * must comply with the following template:: 1140 * 1141 * void callback_function(struct crypto_async_request *req, int error) 1142 */ 1143 static inline void ablkcipher_request_set_callback( 1144 struct ablkcipher_request *req, 1145 u32 flags, crypto_completion_t compl, void *data) 1146 { 1147 req->base.complete = compl; 1148 req->base.data = data; 1149 req->base.flags = flags; 1150 } 1151 1152 /** 1153 * ablkcipher_request_set_crypt() - set data buffers 1154 * @req: request handle 1155 * @src: source scatter / gather list 1156 * @dst: destination scatter / gather list 1157 * @nbytes: number of bytes to process from @src 1158 * @iv: IV for the cipher operation which must comply with the IV size defined 1159 * by crypto_ablkcipher_ivsize 1160 * 1161 * This function allows setting of the source data and destination data 1162 * scatter / gather lists. 1163 * 1164 * For encryption, the source is treated as the plaintext and the 1165 * destination is the ciphertext. For a decryption operation, the use is 1166 * reversed - the source is the ciphertext and the destination is the plaintext. 1167 */ 1168 static inline void ablkcipher_request_set_crypt( 1169 struct ablkcipher_request *req, 1170 struct scatterlist *src, struct scatterlist *dst, 1171 unsigned int nbytes, void *iv) 1172 { 1173 req->src = src; 1174 req->dst = dst; 1175 req->nbytes = nbytes; 1176 req->info = iv; 1177 } 1178 1179 /** 1180 * DOC: Single Block Cipher API 1181 * 1182 * The single block cipher API is used with the ciphers of type 1183 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). 1184 * 1185 * Using the single block cipher API calls, operations with the basic cipher 1186 * primitive can be implemented. These cipher primitives exclude any block 1187 * chaining operations including IV handling. 1188 * 1189 * The purpose of this single block cipher API is to support the implementation 1190 * of templates or other concepts that only need to perform the cipher operation 1191 * on one block at a time. Templates invoke the underlying cipher primitive 1192 * block-wise and process either the input or the output data of these cipher 1193 * operations. 1194 */ 1195 1196 static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) 1197 { 1198 return (struct crypto_cipher *)tfm; 1199 } 1200 1201 static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) 1202 { 1203 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 1204 return __crypto_cipher_cast(tfm); 1205 } 1206 1207 /** 1208 * crypto_alloc_cipher() - allocate single block cipher handle 1209 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1210 * single block cipher 1211 * @type: specifies the type of the cipher 1212 * @mask: specifies the mask for the cipher 1213 * 1214 * Allocate a cipher handle for a single block cipher. The returned struct 1215 * crypto_cipher is the cipher handle that is required for any subsequent API 1216 * invocation for that single block cipher. 1217 * 1218 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 1219 * of an error, PTR_ERR() returns the error code. 1220 */ 1221 static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, 1222 u32 type, u32 mask) 1223 { 1224 type &= ~CRYPTO_ALG_TYPE_MASK; 1225 type |= CRYPTO_ALG_TYPE_CIPHER; 1226 mask |= CRYPTO_ALG_TYPE_MASK; 1227 1228 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); 1229 } 1230 1231 static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) 1232 { 1233 return &tfm->base; 1234 } 1235 1236 /** 1237 * crypto_free_cipher() - zeroize and free the single block cipher handle 1238 * @tfm: cipher handle to be freed 1239 */ 1240 static inline void crypto_free_cipher(struct crypto_cipher *tfm) 1241 { 1242 crypto_free_tfm(crypto_cipher_tfm(tfm)); 1243 } 1244 1245 /** 1246 * crypto_has_cipher() - Search for the availability of a single block cipher 1247 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1248 * single block cipher 1249 * @type: specifies the type of the cipher 1250 * @mask: specifies the mask for the cipher 1251 * 1252 * Return: true when the single block cipher is known to the kernel crypto API; 1253 * false otherwise 1254 */ 1255 static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) 1256 { 1257 type &= ~CRYPTO_ALG_TYPE_MASK; 1258 type |= CRYPTO_ALG_TYPE_CIPHER; 1259 mask |= CRYPTO_ALG_TYPE_MASK; 1260 1261 return crypto_has_alg(alg_name, type, mask); 1262 } 1263 1264 static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) 1265 { 1266 return &crypto_cipher_tfm(tfm)->crt_cipher; 1267 } 1268 1269 /** 1270 * crypto_cipher_blocksize() - obtain block size for cipher 1271 * @tfm: cipher handle 1272 * 1273 * The block size for the single block cipher referenced with the cipher handle 1274 * tfm is returned. The caller may use that information to allocate appropriate 1275 * memory for the data returned by the encryption or decryption operation 1276 * 1277 * Return: block size of cipher 1278 */ 1279 static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) 1280 { 1281 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); 1282 } 1283 1284 static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) 1285 { 1286 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); 1287 } 1288 1289 static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) 1290 { 1291 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); 1292 } 1293 1294 static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, 1295 u32 flags) 1296 { 1297 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); 1298 } 1299 1300 static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, 1301 u32 flags) 1302 { 1303 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); 1304 } 1305 1306 /** 1307 * crypto_cipher_setkey() - set key for cipher 1308 * @tfm: cipher handle 1309 * @key: buffer holding the key 1310 * @keylen: length of the key in bytes 1311 * 1312 * The caller provided key is set for the single block cipher referenced by the 1313 * cipher handle. 1314 * 1315 * Note, the key length determines the cipher type. Many block ciphers implement 1316 * different cipher modes depending on the key size, such as AES-128 vs AES-192 1317 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 1318 * is performed. 1319 * 1320 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 1321 */ 1322 static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, 1323 const u8 *key, unsigned int keylen) 1324 { 1325 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), 1326 key, keylen); 1327 } 1328 1329 /** 1330 * crypto_cipher_encrypt_one() - encrypt one block of plaintext 1331 * @tfm: cipher handle 1332 * @dst: points to the buffer that will be filled with the ciphertext 1333 * @src: buffer holding the plaintext to be encrypted 1334 * 1335 * Invoke the encryption operation of one block. The caller must ensure that 1336 * the plaintext and ciphertext buffers are at least one block in size. 1337 */ 1338 static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, 1339 u8 *dst, const u8 *src) 1340 { 1341 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), 1342 dst, src); 1343 } 1344 1345 /** 1346 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext 1347 * @tfm: cipher handle 1348 * @dst: points to the buffer that will be filled with the plaintext 1349 * @src: buffer holding the ciphertext to be decrypted 1350 * 1351 * Invoke the decryption operation of one block. The caller must ensure that 1352 * the plaintext and ciphertext buffers are at least one block in size. 1353 */ 1354 static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, 1355 u8 *dst, const u8 *src) 1356 { 1357 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), 1358 dst, src); 1359 } 1360 1361 static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) 1362 { 1363 return (struct crypto_comp *)tfm; 1364 } 1365 1366 static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) 1367 { 1368 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & 1369 CRYPTO_ALG_TYPE_MASK); 1370 return __crypto_comp_cast(tfm); 1371 } 1372 1373 static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, 1374 u32 type, u32 mask) 1375 { 1376 type &= ~CRYPTO_ALG_TYPE_MASK; 1377 type |= CRYPTO_ALG_TYPE_COMPRESS; 1378 mask |= CRYPTO_ALG_TYPE_MASK; 1379 1380 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); 1381 } 1382 1383 static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) 1384 { 1385 return &tfm->base; 1386 } 1387 1388 static inline void crypto_free_comp(struct crypto_comp *tfm) 1389 { 1390 crypto_free_tfm(crypto_comp_tfm(tfm)); 1391 } 1392 1393 static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) 1394 { 1395 type &= ~CRYPTO_ALG_TYPE_MASK; 1396 type |= CRYPTO_ALG_TYPE_COMPRESS; 1397 mask |= CRYPTO_ALG_TYPE_MASK; 1398 1399 return crypto_has_alg(alg_name, type, mask); 1400 } 1401 1402 static inline const char *crypto_comp_name(struct crypto_comp *tfm) 1403 { 1404 return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); 1405 } 1406 1407 static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) 1408 { 1409 return &crypto_comp_tfm(tfm)->crt_compress; 1410 } 1411 1412 static inline int crypto_comp_compress(struct crypto_comp *tfm, 1413 const u8 *src, unsigned int slen, 1414 u8 *dst, unsigned int *dlen) 1415 { 1416 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), 1417 src, slen, dst, dlen); 1418 } 1419 1420 static inline int crypto_comp_decompress(struct crypto_comp *tfm, 1421 const u8 *src, unsigned int slen, 1422 u8 *dst, unsigned int *dlen) 1423 { 1424 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), 1425 src, slen, dst, dlen); 1426 } 1427 1428 #endif /* _LINUX_CRYPTO_H */ 1429 1430