1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Scatterlist Cryptographic API. 4 * 5 * Copyright (c) 2002 James Morris <[email protected]> 6 * Copyright (c) 2002 David S. Miller ([email protected]) 7 * Copyright (c) 2005 Herbert Xu <[email protected]> 8 * 9 * Portions derived from Cryptoapi, by Alexander Kjeldaas <[email protected]> 10 * and Nettle, by Niels Möller. 11 */ 12 #ifndef _LINUX_CRYPTO_H 13 #define _LINUX_CRYPTO_H 14 15 #include <linux/atomic.h> 16 #include <linux/kernel.h> 17 #include <linux/list.h> 18 #include <linux/bug.h> 19 #include <linux/refcount.h> 20 #include <linux/slab.h> 21 #include <linux/completion.h> 22 23 /* 24 * Algorithm masks and types. 25 */ 26 #define CRYPTO_ALG_TYPE_MASK 0x0000000f 27 #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 28 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 29 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 30 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 31 #define CRYPTO_ALG_TYPE_KPP 0x00000008 32 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a 33 #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b 34 #define CRYPTO_ALG_TYPE_RNG 0x0000000c 35 #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d 36 #define CRYPTO_ALG_TYPE_HASH 0x0000000e 37 #define CRYPTO_ALG_TYPE_SHASH 0x0000000e 38 #define CRYPTO_ALG_TYPE_AHASH 0x0000000f 39 40 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 41 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e 42 #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e 43 44 #define CRYPTO_ALG_LARVAL 0x00000010 45 #define CRYPTO_ALG_DEAD 0x00000020 46 #define CRYPTO_ALG_DYING 0x00000040 47 #define CRYPTO_ALG_ASYNC 0x00000080 48 49 /* 50 * Set if the algorithm (or an algorithm which it uses) requires another 51 * algorithm of the same type to handle corner cases. 52 */ 53 #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 54 55 /* 56 * Set if the algorithm has passed automated run-time testing. Note that 57 * if there is no run-time testing for a given algorithm it is considered 58 * to have passed. 59 */ 60 61 #define CRYPTO_ALG_TESTED 0x00000400 62 63 /* 64 * Set if the algorithm is an instance that is built from templates. 65 */ 66 #define CRYPTO_ALG_INSTANCE 0x00000800 67 68 /* Set this bit if the algorithm provided is hardware accelerated but 69 * not available to userspace via instruction set or so. 70 */ 71 #define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 72 73 /* 74 * Mark a cipher as a service implementation only usable by another 75 * cipher and never by a normal user of the kernel crypto API 76 */ 77 #define CRYPTO_ALG_INTERNAL 0x00002000 78 79 /* 80 * Set if the algorithm has a ->setkey() method but can be used without 81 * calling it first, i.e. there is a default key. 82 */ 83 #define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 84 85 /* 86 * Don't trigger module loading 87 */ 88 #define CRYPTO_NOLOAD 0x00008000 89 90 /* 91 * The algorithm may allocate memory during request processing, i.e. during 92 * encryption, decryption, or hashing. Users can request an algorithm with this 93 * flag unset if they can't handle memory allocation failures. 94 * 95 * This flag is currently only implemented for algorithms of type "skcipher", 96 * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not 97 * have this flag set even if they allocate memory. 98 * 99 * In some edge cases, algorithms can allocate memory regardless of this flag. 100 * To avoid these cases, users must obey the following usage constraints: 101 * skcipher: 102 * - The IV buffer and all scatterlist elements must be aligned to the 103 * algorithm's alignmask. 104 * - If the data were to be divided into chunks of size 105 * crypto_skcipher_walksize() (with any remainder going at the end), no 106 * chunk can cross a page boundary or a scatterlist element boundary. 107 * aead: 108 * - The IV buffer and all scatterlist elements must be aligned to the 109 * algorithm's alignmask. 110 * - The first scatterlist element must contain all the associated data, 111 * and its pages must be !PageHighMem. 112 * - If the plaintext/ciphertext were to be divided into chunks of size 113 * crypto_aead_walksize() (with the remainder going at the end), no chunk 114 * can cross a page boundary or a scatterlist element boundary. 115 * ahash: 116 * - The result buffer must be aligned to the algorithm's alignmask. 117 * - crypto_ahash_finup() must not be used unless the algorithm implements 118 * ->finup() natively. 119 */ 120 #define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000 121 122 /* 123 * Mark an algorithm as a service implementation only usable by a 124 * template and never by a normal user of the kernel crypto API. 125 * This is intended to be used by algorithms that are themselves 126 * not FIPS-approved but may instead be used to implement parts of 127 * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)). 128 */ 129 #define CRYPTO_ALG_FIPS_INTERNAL 0x00020000 130 131 /* 132 * Transform masks and values (for crt_flags). 133 */ 134 #define CRYPTO_TFM_NEED_KEY 0x00000001 135 136 #define CRYPTO_TFM_REQ_MASK 0x000fff00 137 #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 138 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 139 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 140 141 /* 142 * Miscellaneous stuff. 143 */ 144 #define CRYPTO_MAX_ALG_NAME 128 145 146 /* 147 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 148 * declaration) is used to ensure that the crypto_tfm context structure is 149 * aligned correctly for the given architecture so that there are no alignment 150 * faults for C data types. On architectures that support non-cache coherent 151 * DMA, such as ARM or arm64, it also takes into account the minimal alignment 152 * that is required to ensure that the context struct member does not share any 153 * cachelines with the rest of the struct. This is needed to ensure that cache 154 * maintenance for non-coherent DMA (cache invalidation in particular) does not 155 * affect data that may be accessed by the CPU concurrently. 156 */ 157 #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 158 159 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 160 161 struct scatterlist; 162 struct crypto_async_request; 163 struct crypto_tfm; 164 struct crypto_type; 165 166 typedef void (*crypto_completion_t)(void *req, int err); 167 168 /** 169 * DOC: Block Cipher Context Data Structures 170 * 171 * These data structures define the operating context for each block cipher 172 * type. 173 */ 174 175 struct crypto_async_request { 176 struct list_head list; 177 crypto_completion_t complete; 178 void *data; 179 struct crypto_tfm *tfm; 180 181 u32 flags; 182 }; 183 184 /** 185 * DOC: Block Cipher Algorithm Definitions 186 * 187 * These data structures define modular crypto algorithm implementations, 188 * managed via crypto_register_alg() and crypto_unregister_alg(). 189 */ 190 191 /** 192 * struct cipher_alg - single-block symmetric ciphers definition 193 * @cia_min_keysize: Minimum key size supported by the transformation. This is 194 * the smallest key length supported by this transformation 195 * algorithm. This must be set to one of the pre-defined 196 * values as this is not hardware specific. Possible values 197 * for this field can be found via git grep "_MIN_KEY_SIZE" 198 * include/crypto/ 199 * @cia_max_keysize: Maximum key size supported by the transformation. This is 200 * the largest key length supported by this transformation 201 * algorithm. This must be set to one of the pre-defined values 202 * as this is not hardware specific. Possible values for this 203 * field can be found via git grep "_MAX_KEY_SIZE" 204 * include/crypto/ 205 * @cia_setkey: Set key for the transformation. This function is used to either 206 * program a supplied key into the hardware or store the key in the 207 * transformation context for programming it later. Note that this 208 * function does modify the transformation context. This function 209 * can be called multiple times during the existence of the 210 * transformation object, so one must make sure the key is properly 211 * reprogrammed into the hardware. This function is also 212 * responsible for checking the key length for validity. 213 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a 214 * single block of data, which must be @cra_blocksize big. This 215 * always operates on a full @cra_blocksize and it is not possible 216 * to encrypt a block of smaller size. The supplied buffers must 217 * therefore also be at least of @cra_blocksize size. Both the 218 * input and output buffers are always aligned to @cra_alignmask. 219 * In case either of the input or output buffer supplied by user 220 * of the crypto API is not aligned to @cra_alignmask, the crypto 221 * API will re-align the buffers. The re-alignment means that a 222 * new buffer will be allocated, the data will be copied into the 223 * new buffer, then the processing will happen on the new buffer, 224 * then the data will be copied back into the original buffer and 225 * finally the new buffer will be freed. In case a software 226 * fallback was put in place in the @cra_init call, this function 227 * might need to use the fallback if the algorithm doesn't support 228 * all of the key sizes. In case the key was stored in 229 * transformation context, the key might need to be re-programmed 230 * into the hardware in this function. This function shall not 231 * modify the transformation context, as this function may be 232 * called in parallel with the same transformation object. 233 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to 234 * @cia_encrypt, and the conditions are exactly the same. 235 * 236 * All fields are mandatory and must be filled. 237 */ 238 struct cipher_alg { 239 unsigned int cia_min_keysize; 240 unsigned int cia_max_keysize; 241 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 242 unsigned int keylen); 243 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 244 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 245 }; 246 247 /** 248 * struct compress_alg - compression/decompression algorithm 249 * @coa_compress: Compress a buffer of specified length, storing the resulting 250 * data in the specified buffer. Return the length of the 251 * compressed data in dlen. 252 * @coa_decompress: Decompress the source buffer, storing the uncompressed 253 * data in the specified buffer. The length of the data is 254 * returned in dlen. 255 * 256 * All fields are mandatory. 257 */ 258 struct compress_alg { 259 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, 260 unsigned int slen, u8 *dst, unsigned int *dlen); 261 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, 262 unsigned int slen, u8 *dst, unsigned int *dlen); 263 }; 264 265 #define cra_cipher cra_u.cipher 266 #define cra_compress cra_u.compress 267 268 /** 269 * struct crypto_alg - definition of a cryptograpic cipher algorithm 270 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h 271 * CRYPTO_ALG_* flags for the flags which go in here. Those are 272 * used for fine-tuning the description of the transformation 273 * algorithm. 274 * @cra_blocksize: Minimum block size of this transformation. The size in bytes 275 * of the smallest possible unit which can be transformed with 276 * this algorithm. The users must respect this value. 277 * In case of HASH transformation, it is possible for a smaller 278 * block than @cra_blocksize to be passed to the crypto API for 279 * transformation, in case of any other transformation type, an 280 * error will be returned upon any attempt to transform smaller 281 * than @cra_blocksize chunks. 282 * @cra_ctxsize: Size of the operational context of the transformation. This 283 * value informs the kernel crypto API about the memory size 284 * needed to be allocated for the transformation context. 285 * @cra_alignmask: Alignment mask for the input and output data buffer. The data 286 * buffer containing the input data for the algorithm must be 287 * aligned to this alignment mask. The data buffer for the 288 * output data must be aligned to this alignment mask. Note that 289 * the Crypto API will do the re-alignment in software, but 290 * only under special conditions and there is a performance hit. 291 * The re-alignment happens at these occasions for different 292 * @cra_u types: cipher -- For both input data and output data 293 * buffer; ahash -- For output hash destination buf; shash -- 294 * For output hash destination buf. 295 * This is needed on hardware which is flawed by design and 296 * cannot pick data from arbitrary addresses. 297 * @cra_priority: Priority of this transformation implementation. In case 298 * multiple transformations with same @cra_name are available to 299 * the Crypto API, the kernel will use the one with highest 300 * @cra_priority. 301 * @cra_name: Generic name (usable by multiple implementations) of the 302 * transformation algorithm. This is the name of the transformation 303 * itself. This field is used by the kernel when looking up the 304 * providers of particular transformation. 305 * @cra_driver_name: Unique name of the transformation provider. This is the 306 * name of the provider of the transformation. This can be any 307 * arbitrary value, but in the usual case, this contains the 308 * name of the chip or provider and the name of the 309 * transformation algorithm. 310 * @cra_type: Type of the cryptographic transformation. This is a pointer to 311 * struct crypto_type, which implements callbacks common for all 312 * transformation types. There are multiple options, such as 313 * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type. 314 * This field might be empty. In that case, there are no common 315 * callbacks. This is the case for: cipher, compress, shash. 316 * @cra_u: Callbacks implementing the transformation. This is a union of 317 * multiple structures. Depending on the type of transformation selected 318 * by @cra_type and @cra_flags above, the associated structure must be 319 * filled with callbacks. This field might be empty. This is the case 320 * for ahash, shash. 321 * @cra_init: Initialize the cryptographic transformation object. This function 322 * is used to initialize the cryptographic transformation object. 323 * This function is called only once at the instantiation time, right 324 * after the transformation context was allocated. In case the 325 * cryptographic hardware has some special requirements which need to 326 * be handled by software, this function shall check for the precise 327 * requirement of the transformation and put any software fallbacks 328 * in place. 329 * @cra_exit: Deinitialize the cryptographic transformation object. This is a 330 * counterpart to @cra_init, used to remove various changes set in 331 * @cra_init. 332 * @cra_u.cipher: Union member which contains a single-block symmetric cipher 333 * definition. See @struct @cipher_alg. 334 * @cra_u.compress: Union member which contains a (de)compression algorithm. 335 * See @struct @compress_alg. 336 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE 337 * @cra_list: internally used 338 * @cra_users: internally used 339 * @cra_refcnt: internally used 340 * @cra_destroy: internally used 341 * 342 * The struct crypto_alg describes a generic Crypto API algorithm and is common 343 * for all of the transformations. Any variable not documented here shall not 344 * be used by a cipher implementation as it is internal to the Crypto API. 345 */ 346 struct crypto_alg { 347 struct list_head cra_list; 348 struct list_head cra_users; 349 350 u32 cra_flags; 351 unsigned int cra_blocksize; 352 unsigned int cra_ctxsize; 353 unsigned int cra_alignmask; 354 355 int cra_priority; 356 refcount_t cra_refcnt; 357 358 char cra_name[CRYPTO_MAX_ALG_NAME]; 359 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 360 361 const struct crypto_type *cra_type; 362 363 union { 364 struct cipher_alg cipher; 365 struct compress_alg compress; 366 } cra_u; 367 368 int (*cra_init)(struct crypto_tfm *tfm); 369 void (*cra_exit)(struct crypto_tfm *tfm); 370 void (*cra_destroy)(struct crypto_alg *alg); 371 372 struct module *cra_module; 373 } CRYPTO_MINALIGN_ATTR; 374 375 /* 376 * A helper struct for waiting for completion of async crypto ops 377 */ 378 struct crypto_wait { 379 struct completion completion; 380 int err; 381 }; 382 383 /* 384 * Macro for declaring a crypto op async wait object on stack 385 */ 386 #define DECLARE_CRYPTO_WAIT(_wait) \ 387 struct crypto_wait _wait = { \ 388 COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } 389 390 /* 391 * Async ops completion helper functioons 392 */ 393 void crypto_req_done(void *req, int err); 394 395 static inline int crypto_wait_req(int err, struct crypto_wait *wait) 396 { 397 switch (err) { 398 case -EINPROGRESS: 399 case -EBUSY: 400 wait_for_completion(&wait->completion); 401 reinit_completion(&wait->completion); 402 err = wait->err; 403 break; 404 } 405 406 return err; 407 } 408 409 static inline void crypto_init_wait(struct crypto_wait *wait) 410 { 411 init_completion(&wait->completion); 412 } 413 414 /* 415 * Algorithm registration interface. 416 */ 417 int crypto_register_alg(struct crypto_alg *alg); 418 void crypto_unregister_alg(struct crypto_alg *alg); 419 int crypto_register_algs(struct crypto_alg *algs, int count); 420 void crypto_unregister_algs(struct crypto_alg *algs, int count); 421 422 /* 423 * Algorithm query interface. 424 */ 425 int crypto_has_alg(const char *name, u32 type, u32 mask); 426 427 /* 428 * Transforms: user-instantiated objects which encapsulate algorithms 429 * and core processing logic. Managed via crypto_alloc_*() and 430 * crypto_free_*(), as well as the various helpers below. 431 */ 432 433 struct crypto_tfm { 434 435 u32 crt_flags; 436 437 int node; 438 439 void (*exit)(struct crypto_tfm *tfm); 440 441 struct crypto_alg *__crt_alg; 442 443 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 444 }; 445 446 struct crypto_comp { 447 struct crypto_tfm base; 448 }; 449 450 /* 451 * Transform user interface. 452 */ 453 454 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 455 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); 456 457 static inline void crypto_free_tfm(struct crypto_tfm *tfm) 458 { 459 return crypto_destroy_tfm(tfm, tfm); 460 } 461 462 int alg_test(const char *driver, const char *alg, u32 type, u32 mask); 463 464 /* 465 * Transform helpers which query the underlying algorithm. 466 */ 467 static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) 468 { 469 return tfm->__crt_alg->cra_name; 470 } 471 472 static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) 473 { 474 return tfm->__crt_alg->cra_driver_name; 475 } 476 477 static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) 478 { 479 return tfm->__crt_alg->cra_priority; 480 } 481 482 static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) 483 { 484 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; 485 } 486 487 static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) 488 { 489 return tfm->__crt_alg->cra_blocksize; 490 } 491 492 static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) 493 { 494 return tfm->__crt_alg->cra_alignmask; 495 } 496 497 static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) 498 { 499 return tfm->crt_flags; 500 } 501 502 static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) 503 { 504 tfm->crt_flags |= flags; 505 } 506 507 static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) 508 { 509 tfm->crt_flags &= ~flags; 510 } 511 512 static inline unsigned int crypto_tfm_ctx_alignment(void) 513 { 514 struct crypto_tfm *tfm; 515 return __alignof__(tfm->__crt_ctx); 516 } 517 518 static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) 519 { 520 return (struct crypto_comp *)tfm; 521 } 522 523 static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, 524 u32 type, u32 mask) 525 { 526 type &= ~CRYPTO_ALG_TYPE_MASK; 527 type |= CRYPTO_ALG_TYPE_COMPRESS; 528 mask |= CRYPTO_ALG_TYPE_MASK; 529 530 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); 531 } 532 533 static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) 534 { 535 return &tfm->base; 536 } 537 538 static inline void crypto_free_comp(struct crypto_comp *tfm) 539 { 540 crypto_free_tfm(crypto_comp_tfm(tfm)); 541 } 542 543 static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) 544 { 545 type &= ~CRYPTO_ALG_TYPE_MASK; 546 type |= CRYPTO_ALG_TYPE_COMPRESS; 547 mask |= CRYPTO_ALG_TYPE_MASK; 548 549 return crypto_has_alg(alg_name, type, mask); 550 } 551 552 static inline const char *crypto_comp_name(struct crypto_comp *tfm) 553 { 554 return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); 555 } 556 557 int crypto_comp_compress(struct crypto_comp *tfm, 558 const u8 *src, unsigned int slen, 559 u8 *dst, unsigned int *dlen); 560 561 int crypto_comp_decompress(struct crypto_comp *tfm, 562 const u8 *src, unsigned int slen, 563 u8 *dst, unsigned int *dlen); 564 565 #endif /* _LINUX_CRYPTO_H */ 566 567