1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Scatterlist Cryptographic API. 4 * 5 * Copyright (c) 2002 James Morris <[email protected]> 6 * Copyright (c) 2002 David S. Miller ([email protected]) 7 * Copyright (c) 2005 Herbert Xu <[email protected]> 8 * 9 * Portions derived from Cryptoapi, by Alexander Kjeldaas <[email protected]> 10 * and Nettle, by Niels Möller. 11 */ 12 #ifndef _LINUX_CRYPTO_H 13 #define _LINUX_CRYPTO_H 14 15 #include <linux/completion.h> 16 #include <linux/refcount.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 20 /* 21 * Algorithm masks and types. 22 */ 23 #define CRYPTO_ALG_TYPE_MASK 0x0000000f 24 #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 25 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 26 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 27 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 28 #define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006 29 #define CRYPTO_ALG_TYPE_SIG 0x00000007 30 #define CRYPTO_ALG_TYPE_KPP 0x00000008 31 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a 32 #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b 33 #define CRYPTO_ALG_TYPE_RNG 0x0000000c 34 #define CRYPTO_ALG_TYPE_HASH 0x0000000e 35 #define CRYPTO_ALG_TYPE_SHASH 0x0000000e 36 #define CRYPTO_ALG_TYPE_AHASH 0x0000000f 37 38 #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e 39 40 #define CRYPTO_ALG_LARVAL 0x00000010 41 #define CRYPTO_ALG_DEAD 0x00000020 42 #define CRYPTO_ALG_DYING 0x00000040 43 #define CRYPTO_ALG_ASYNC 0x00000080 44 45 /* 46 * Set if the algorithm (or an algorithm which it uses) requires another 47 * algorithm of the same type to handle corner cases. 48 */ 49 #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 50 51 /* 52 * Set if the algorithm has passed automated run-time testing. Note that 53 * if there is no run-time testing for a given algorithm it is considered 54 * to have passed. 55 */ 56 57 #define CRYPTO_ALG_TESTED 0x00000400 58 59 /* 60 * Set if the algorithm is an instance that is built from templates. 61 */ 62 #define CRYPTO_ALG_INSTANCE 0x00000800 63 64 /* Set this bit if the algorithm provided is hardware accelerated but 65 * not available to userspace via instruction set or so. 66 */ 67 #define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 68 69 /* 70 * Mark a cipher as a service implementation only usable by another 71 * cipher and never by a normal user of the kernel crypto API 72 */ 73 #define CRYPTO_ALG_INTERNAL 0x00002000 74 75 /* 76 * Set if the algorithm has a ->setkey() method but can be used without 77 * calling it first, i.e. there is a default key. 78 */ 79 #define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 80 81 /* 82 * Don't trigger module loading 83 */ 84 #define CRYPTO_NOLOAD 0x00008000 85 86 /* 87 * The algorithm may allocate memory during request processing, i.e. during 88 * encryption, decryption, or hashing. Users can request an algorithm with this 89 * flag unset if they can't handle memory allocation failures. 90 * 91 * This flag is currently only implemented for algorithms of type "skcipher", 92 * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not 93 * have this flag set even if they allocate memory. 94 * 95 * In some edge cases, algorithms can allocate memory regardless of this flag. 96 * To avoid these cases, users must obey the following usage constraints: 97 * skcipher: 98 * - The IV buffer and all scatterlist elements must be aligned to the 99 * algorithm's alignmask. 100 * - If the data were to be divided into chunks of size 101 * crypto_skcipher_walksize() (with any remainder going at the end), no 102 * chunk can cross a page boundary or a scatterlist element boundary. 103 * aead: 104 * - The IV buffer and all scatterlist elements must be aligned to the 105 * algorithm's alignmask. 106 * - The first scatterlist element must contain all the associated data, 107 * and its pages must be !PageHighMem. 108 * - If the plaintext/ciphertext were to be divided into chunks of size 109 * crypto_aead_walksize() (with the remainder going at the end), no chunk 110 * can cross a page boundary or a scatterlist element boundary. 111 * ahash: 112 * - The result buffer must be aligned to the algorithm's alignmask. 113 * - crypto_ahash_finup() must not be used unless the algorithm implements 114 * ->finup() natively. 115 */ 116 #define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000 117 118 /* 119 * Mark an algorithm as a service implementation only usable by a 120 * template and never by a normal user of the kernel crypto API. 121 * This is intended to be used by algorithms that are themselves 122 * not FIPS-approved but may instead be used to implement parts of 123 * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)). 124 */ 125 #define CRYPTO_ALG_FIPS_INTERNAL 0x00020000 126 127 /* 128 * Transform masks and values (for crt_flags). 129 */ 130 #define CRYPTO_TFM_NEED_KEY 0x00000001 131 132 #define CRYPTO_TFM_REQ_MASK 0x000fff00 133 #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 134 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 135 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 136 137 /* 138 * Miscellaneous stuff. 139 */ 140 #define CRYPTO_MAX_ALG_NAME 128 141 142 /* 143 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 144 * declaration) is used to ensure that the crypto_tfm context structure is 145 * aligned correctly for the given architecture so that there are no alignment 146 * faults for C data types. On architectures that support non-cache coherent 147 * DMA, such as ARM or arm64, it also takes into account the minimal alignment 148 * that is required to ensure that the context struct member does not share any 149 * cachelines with the rest of the struct. This is needed to ensure that cache 150 * maintenance for non-coherent DMA (cache invalidation in particular) does not 151 * affect data that may be accessed by the CPU concurrently. 152 */ 153 #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 154 155 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 156 157 struct crypto_tfm; 158 struct crypto_type; 159 struct module; 160 161 typedef void (*crypto_completion_t)(void *req, int err); 162 163 /** 164 * DOC: Block Cipher Context Data Structures 165 * 166 * These data structures define the operating context for each block cipher 167 * type. 168 */ 169 170 struct crypto_async_request { 171 struct list_head list; 172 crypto_completion_t complete; 173 void *data; 174 struct crypto_tfm *tfm; 175 176 u32 flags; 177 }; 178 179 /** 180 * DOC: Block Cipher Algorithm Definitions 181 * 182 * These data structures define modular crypto algorithm implementations, 183 * managed via crypto_register_alg() and crypto_unregister_alg(). 184 */ 185 186 /** 187 * struct cipher_alg - single-block symmetric ciphers definition 188 * @cia_min_keysize: Minimum key size supported by the transformation. This is 189 * the smallest key length supported by this transformation 190 * algorithm. This must be set to one of the pre-defined 191 * values as this is not hardware specific. Possible values 192 * for this field can be found via git grep "_MIN_KEY_SIZE" 193 * include/crypto/ 194 * @cia_max_keysize: Maximum key size supported by the transformation. This is 195 * the largest key length supported by this transformation 196 * algorithm. This must be set to one of the pre-defined values 197 * as this is not hardware specific. Possible values for this 198 * field can be found via git grep "_MAX_KEY_SIZE" 199 * include/crypto/ 200 * @cia_setkey: Set key for the transformation. This function is used to either 201 * program a supplied key into the hardware or store the key in the 202 * transformation context for programming it later. Note that this 203 * function does modify the transformation context. This function 204 * can be called multiple times during the existence of the 205 * transformation object, so one must make sure the key is properly 206 * reprogrammed into the hardware. This function is also 207 * responsible for checking the key length for validity. 208 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a 209 * single block of data, which must be @cra_blocksize big. This 210 * always operates on a full @cra_blocksize and it is not possible 211 * to encrypt a block of smaller size. The supplied buffers must 212 * therefore also be at least of @cra_blocksize size. Both the 213 * input and output buffers are always aligned to @cra_alignmask. 214 * In case either of the input or output buffer supplied by user 215 * of the crypto API is not aligned to @cra_alignmask, the crypto 216 * API will re-align the buffers. The re-alignment means that a 217 * new buffer will be allocated, the data will be copied into the 218 * new buffer, then the processing will happen on the new buffer, 219 * then the data will be copied back into the original buffer and 220 * finally the new buffer will be freed. In case a software 221 * fallback was put in place in the @cra_init call, this function 222 * might need to use the fallback if the algorithm doesn't support 223 * all of the key sizes. In case the key was stored in 224 * transformation context, the key might need to be re-programmed 225 * into the hardware in this function. This function shall not 226 * modify the transformation context, as this function may be 227 * called in parallel with the same transformation object. 228 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to 229 * @cia_encrypt, and the conditions are exactly the same. 230 * 231 * All fields are mandatory and must be filled. 232 */ 233 struct cipher_alg { 234 unsigned int cia_min_keysize; 235 unsigned int cia_max_keysize; 236 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 237 unsigned int keylen); 238 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 239 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 240 }; 241 242 /** 243 * struct compress_alg - compression/decompression algorithm 244 * @coa_compress: Compress a buffer of specified length, storing the resulting 245 * data in the specified buffer. Return the length of the 246 * compressed data in dlen. 247 * @coa_decompress: Decompress the source buffer, storing the uncompressed 248 * data in the specified buffer. The length of the data is 249 * returned in dlen. 250 * 251 * All fields are mandatory. 252 */ 253 struct compress_alg { 254 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, 255 unsigned int slen, u8 *dst, unsigned int *dlen); 256 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, 257 unsigned int slen, u8 *dst, unsigned int *dlen); 258 }; 259 260 #define cra_cipher cra_u.cipher 261 #define cra_compress cra_u.compress 262 263 /** 264 * struct crypto_alg - definition of a cryptograpic cipher algorithm 265 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h 266 * CRYPTO_ALG_* flags for the flags which go in here. Those are 267 * used for fine-tuning the description of the transformation 268 * algorithm. 269 * @cra_blocksize: Minimum block size of this transformation. The size in bytes 270 * of the smallest possible unit which can be transformed with 271 * this algorithm. The users must respect this value. 272 * In case of HASH transformation, it is possible for a smaller 273 * block than @cra_blocksize to be passed to the crypto API for 274 * transformation, in case of any other transformation type, an 275 * error will be returned upon any attempt to transform smaller 276 * than @cra_blocksize chunks. 277 * @cra_ctxsize: Size of the operational context of the transformation. This 278 * value informs the kernel crypto API about the memory size 279 * needed to be allocated for the transformation context. 280 * @cra_alignmask: Alignment mask for the input and output data buffer. The data 281 * buffer containing the input data for the algorithm must be 282 * aligned to this alignment mask. The data buffer for the 283 * output data must be aligned to this alignment mask. Note that 284 * the Crypto API will do the re-alignment in software, but 285 * only under special conditions and there is a performance hit. 286 * The re-alignment happens at these occasions for different 287 * @cra_u types: cipher -- For both input data and output data 288 * buffer; ahash -- For output hash destination buf; shash -- 289 * For output hash destination buf. 290 * This is needed on hardware which is flawed by design and 291 * cannot pick data from arbitrary addresses. 292 * @cra_priority: Priority of this transformation implementation. In case 293 * multiple transformations with same @cra_name are available to 294 * the Crypto API, the kernel will use the one with highest 295 * @cra_priority. 296 * @cra_name: Generic name (usable by multiple implementations) of the 297 * transformation algorithm. This is the name of the transformation 298 * itself. This field is used by the kernel when looking up the 299 * providers of particular transformation. 300 * @cra_driver_name: Unique name of the transformation provider. This is the 301 * name of the provider of the transformation. This can be any 302 * arbitrary value, but in the usual case, this contains the 303 * name of the chip or provider and the name of the 304 * transformation algorithm. 305 * @cra_type: Type of the cryptographic transformation. This is a pointer to 306 * struct crypto_type, which implements callbacks common for all 307 * transformation types. There are multiple options, such as 308 * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type. 309 * This field might be empty. In that case, there are no common 310 * callbacks. This is the case for: cipher, compress, shash. 311 * @cra_u: Callbacks implementing the transformation. This is a union of 312 * multiple structures. Depending on the type of transformation selected 313 * by @cra_type and @cra_flags above, the associated structure must be 314 * filled with callbacks. This field might be empty. This is the case 315 * for ahash, shash. 316 * @cra_init: Initialize the cryptographic transformation object. This function 317 * is used to initialize the cryptographic transformation object. 318 * This function is called only once at the instantiation time, right 319 * after the transformation context was allocated. In case the 320 * cryptographic hardware has some special requirements which need to 321 * be handled by software, this function shall check for the precise 322 * requirement of the transformation and put any software fallbacks 323 * in place. 324 * @cra_exit: Deinitialize the cryptographic transformation object. This is a 325 * counterpart to @cra_init, used to remove various changes set in 326 * @cra_init. 327 * @cra_u.cipher: Union member which contains a single-block symmetric cipher 328 * definition. See @struct @cipher_alg. 329 * @cra_u.compress: Union member which contains a (de)compression algorithm. 330 * See @struct @compress_alg. 331 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE 332 * @cra_list: internally used 333 * @cra_users: internally used 334 * @cra_refcnt: internally used 335 * @cra_destroy: internally used 336 * 337 * The struct crypto_alg describes a generic Crypto API algorithm and is common 338 * for all of the transformations. Any variable not documented here shall not 339 * be used by a cipher implementation as it is internal to the Crypto API. 340 */ 341 struct crypto_alg { 342 struct list_head cra_list; 343 struct list_head cra_users; 344 345 u32 cra_flags; 346 unsigned int cra_blocksize; 347 unsigned int cra_ctxsize; 348 unsigned int cra_alignmask; 349 350 int cra_priority; 351 refcount_t cra_refcnt; 352 353 char cra_name[CRYPTO_MAX_ALG_NAME]; 354 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 355 356 const struct crypto_type *cra_type; 357 358 union { 359 struct cipher_alg cipher; 360 struct compress_alg compress; 361 } cra_u; 362 363 int (*cra_init)(struct crypto_tfm *tfm); 364 void (*cra_exit)(struct crypto_tfm *tfm); 365 void (*cra_destroy)(struct crypto_alg *alg); 366 367 struct module *cra_module; 368 } CRYPTO_MINALIGN_ATTR; 369 370 /* 371 * A helper struct for waiting for completion of async crypto ops 372 */ 373 struct crypto_wait { 374 struct completion completion; 375 int err; 376 }; 377 378 /* 379 * Macro for declaring a crypto op async wait object on stack 380 */ 381 #define DECLARE_CRYPTO_WAIT(_wait) \ 382 struct crypto_wait _wait = { \ 383 COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } 384 385 /* 386 * Async ops completion helper functioons 387 */ 388 void crypto_req_done(void *req, int err); 389 390 static inline int crypto_wait_req(int err, struct crypto_wait *wait) 391 { 392 switch (err) { 393 case -EINPROGRESS: 394 case -EBUSY: 395 wait_for_completion(&wait->completion); 396 reinit_completion(&wait->completion); 397 err = wait->err; 398 break; 399 } 400 401 return err; 402 } 403 404 static inline void crypto_init_wait(struct crypto_wait *wait) 405 { 406 init_completion(&wait->completion); 407 } 408 409 /* 410 * Algorithm query interface. 411 */ 412 int crypto_has_alg(const char *name, u32 type, u32 mask); 413 414 /* 415 * Transforms: user-instantiated objects which encapsulate algorithms 416 * and core processing logic. Managed via crypto_alloc_*() and 417 * crypto_free_*(), as well as the various helpers below. 418 */ 419 420 struct crypto_tfm { 421 refcount_t refcnt; 422 423 u32 crt_flags; 424 425 int node; 426 427 void (*exit)(struct crypto_tfm *tfm); 428 429 struct crypto_alg *__crt_alg; 430 431 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 432 }; 433 434 struct crypto_comp { 435 struct crypto_tfm base; 436 }; 437 438 /* 439 * Transform user interface. 440 */ 441 442 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 443 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); 444 445 static inline void crypto_free_tfm(struct crypto_tfm *tfm) 446 { 447 return crypto_destroy_tfm(tfm, tfm); 448 } 449 450 /* 451 * Transform helpers which query the underlying algorithm. 452 */ 453 static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) 454 { 455 return tfm->__crt_alg->cra_name; 456 } 457 458 static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) 459 { 460 return tfm->__crt_alg->cra_driver_name; 461 } 462 463 static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) 464 { 465 return tfm->__crt_alg->cra_blocksize; 466 } 467 468 static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) 469 { 470 return tfm->__crt_alg->cra_alignmask; 471 } 472 473 static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) 474 { 475 return tfm->crt_flags; 476 } 477 478 static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) 479 { 480 tfm->crt_flags |= flags; 481 } 482 483 static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) 484 { 485 tfm->crt_flags &= ~flags; 486 } 487 488 static inline unsigned int crypto_tfm_ctx_alignment(void) 489 { 490 struct crypto_tfm *tfm; 491 return __alignof__(tfm->__crt_ctx); 492 } 493 494 static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) 495 { 496 return (struct crypto_comp *)tfm; 497 } 498 499 static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, 500 u32 type, u32 mask) 501 { 502 type &= ~CRYPTO_ALG_TYPE_MASK; 503 type |= CRYPTO_ALG_TYPE_COMPRESS; 504 mask |= CRYPTO_ALG_TYPE_MASK; 505 506 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); 507 } 508 509 static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) 510 { 511 return &tfm->base; 512 } 513 514 static inline void crypto_free_comp(struct crypto_comp *tfm) 515 { 516 crypto_free_tfm(crypto_comp_tfm(tfm)); 517 } 518 519 static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) 520 { 521 type &= ~CRYPTO_ALG_TYPE_MASK; 522 type |= CRYPTO_ALG_TYPE_COMPRESS; 523 mask |= CRYPTO_ALG_TYPE_MASK; 524 525 return crypto_has_alg(alg_name, type, mask); 526 } 527 528 static inline const char *crypto_comp_name(struct crypto_comp *tfm) 529 { 530 return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); 531 } 532 533 int crypto_comp_compress(struct crypto_comp *tfm, 534 const u8 *src, unsigned int slen, 535 u8 *dst, unsigned int *dlen); 536 537 int crypto_comp_decompress(struct crypto_comp *tfm, 538 const u8 *src, unsigned int slen, 539 u8 *dst, unsigned int *dlen); 540 541 #endif /* _LINUX_CRYPTO_H */ 542 543