xref: /linux-6.15/include/linux/crypto.h (revision 76d09ea7)
1 /*
2  * Scatterlist Cryptographic API.
3  *
4  * Copyright (c) 2002 James Morris <[email protected]>
5  * Copyright (c) 2002 David S. Miller ([email protected])
6  * Copyright (c) 2005 Herbert Xu <[email protected]>
7  *
8  * Portions derived from Cryptoapi, by Alexander Kjeldaas <[email protected]>
9  * and Nettle, by Niels Möller.
10  *
11  * This program is free software; you can redistribute it and/or modify it
12  * under the terms of the GNU General Public License as published by the Free
13  * Software Foundation; either version 2 of the License, or (at your option)
14  * any later version.
15  *
16  */
17 #ifndef _LINUX_CRYPTO_H
18 #define _LINUX_CRYPTO_H
19 
20 #include <linux/atomic.h>
21 #include <linux/kernel.h>
22 #include <linux/list.h>
23 #include <linux/bug.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/completion.h>
28 
29 /*
30  * Autoloaded crypto modules should only use a prefixed name to avoid allowing
31  * arbitrary modules to be loaded. Loading from userspace may still need the
32  * unprefixed names, so retains those aliases as well.
33  * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
34  * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
35  * expands twice on the same line. Instead, use a separate base name for the
36  * alias.
37  */
38 #define MODULE_ALIAS_CRYPTO(name)	\
39 		__MODULE_INFO(alias, alias_userspace, name);	\
40 		__MODULE_INFO(alias, alias_crypto, "crypto-" name)
41 
42 /*
43  * Algorithm masks and types.
44  */
45 #define CRYPTO_ALG_TYPE_MASK		0x0000000f
46 #define CRYPTO_ALG_TYPE_CIPHER		0x00000001
47 #define CRYPTO_ALG_TYPE_COMPRESS	0x00000002
48 #define CRYPTO_ALG_TYPE_AEAD		0x00000003
49 #define CRYPTO_ALG_TYPE_BLKCIPHER	0x00000004
50 #define CRYPTO_ALG_TYPE_ABLKCIPHER	0x00000005
51 #define CRYPTO_ALG_TYPE_SKCIPHER	0x00000005
52 #define CRYPTO_ALG_TYPE_GIVCIPHER	0x00000006
53 #define CRYPTO_ALG_TYPE_KPP		0x00000008
54 #define CRYPTO_ALG_TYPE_ACOMPRESS	0x0000000a
55 #define CRYPTO_ALG_TYPE_SCOMPRESS	0x0000000b
56 #define CRYPTO_ALG_TYPE_RNG		0x0000000c
57 #define CRYPTO_ALG_TYPE_AKCIPHER	0x0000000d
58 #define CRYPTO_ALG_TYPE_DIGEST		0x0000000e
59 #define CRYPTO_ALG_TYPE_HASH		0x0000000e
60 #define CRYPTO_ALG_TYPE_SHASH		0x0000000e
61 #define CRYPTO_ALG_TYPE_AHASH		0x0000000f
62 
63 #define CRYPTO_ALG_TYPE_HASH_MASK	0x0000000e
64 #define CRYPTO_ALG_TYPE_AHASH_MASK	0x0000000e
65 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK	0x0000000c
66 #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK	0x0000000e
67 
68 #define CRYPTO_ALG_LARVAL		0x00000010
69 #define CRYPTO_ALG_DEAD			0x00000020
70 #define CRYPTO_ALG_DYING		0x00000040
71 #define CRYPTO_ALG_ASYNC		0x00000080
72 
73 /*
74  * Set this bit if and only if the algorithm requires another algorithm of
75  * the same type to handle corner cases.
76  */
77 #define CRYPTO_ALG_NEED_FALLBACK	0x00000100
78 
79 /*
80  * This bit is set for symmetric key ciphers that have already been wrapped
81  * with a generic IV generator to prevent them from being wrapped again.
82  */
83 #define CRYPTO_ALG_GENIV		0x00000200
84 
85 /*
86  * Set if the algorithm has passed automated run-time testing.  Note that
87  * if there is no run-time testing for a given algorithm it is considered
88  * to have passed.
89  */
90 
91 #define CRYPTO_ALG_TESTED		0x00000400
92 
93 /*
94  * Set if the algorithm is an instance that is built from templates.
95  */
96 #define CRYPTO_ALG_INSTANCE		0x00000800
97 
98 /* Set this bit if the algorithm provided is hardware accelerated but
99  * not available to userspace via instruction set or so.
100  */
101 #define CRYPTO_ALG_KERN_DRIVER_ONLY	0x00001000
102 
103 /*
104  * Mark a cipher as a service implementation only usable by another
105  * cipher and never by a normal user of the kernel crypto API
106  */
107 #define CRYPTO_ALG_INTERNAL		0x00002000
108 
109 /*
110  * Set if the algorithm has a ->setkey() method but can be used without
111  * calling it first, i.e. there is a default key.
112  */
113 #define CRYPTO_ALG_OPTIONAL_KEY		0x00004000
114 
115 /*
116  * Don't trigger module loading
117  */
118 #define CRYPTO_NOLOAD			0x00008000
119 
120 /*
121  * Transform masks and values (for crt_flags).
122  */
123 #define CRYPTO_TFM_NEED_KEY		0x00000001
124 
125 #define CRYPTO_TFM_REQ_MASK		0x000fff00
126 #define CRYPTO_TFM_RES_MASK		0xfff00000
127 
128 #define CRYPTO_TFM_REQ_WEAK_KEY		0x00000100
129 #define CRYPTO_TFM_REQ_MAY_SLEEP	0x00000200
130 #define CRYPTO_TFM_REQ_MAY_BACKLOG	0x00000400
131 #define CRYPTO_TFM_RES_WEAK_KEY		0x00100000
132 #define CRYPTO_TFM_RES_BAD_KEY_LEN   	0x00200000
133 #define CRYPTO_TFM_RES_BAD_KEY_SCHED 	0x00400000
134 #define CRYPTO_TFM_RES_BAD_BLOCK_LEN 	0x00800000
135 #define CRYPTO_TFM_RES_BAD_FLAGS 	0x01000000
136 
137 /*
138  * Miscellaneous stuff.
139  */
140 #define CRYPTO_MAX_ALG_NAME		128
141 
142 /*
143  * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
144  * declaration) is used to ensure that the crypto_tfm context structure is
145  * aligned correctly for the given architecture so that there are no alignment
146  * faults for C data types.  In particular, this is required on platforms such
147  * as arm where pointers are 32-bit aligned but there are data types such as
148  * u64 which require 64-bit alignment.
149  */
150 #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
151 
152 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
153 
154 struct scatterlist;
155 struct crypto_ablkcipher;
156 struct crypto_async_request;
157 struct crypto_blkcipher;
158 struct crypto_tfm;
159 struct crypto_type;
160 struct skcipher_givcrypt_request;
161 
162 typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
163 
164 /**
165  * DOC: Block Cipher Context Data Structures
166  *
167  * These data structures define the operating context for each block cipher
168  * type.
169  */
170 
171 struct crypto_async_request {
172 	struct list_head list;
173 	crypto_completion_t complete;
174 	void *data;
175 	struct crypto_tfm *tfm;
176 
177 	u32 flags;
178 };
179 
180 struct ablkcipher_request {
181 	struct crypto_async_request base;
182 
183 	unsigned int nbytes;
184 
185 	void *info;
186 
187 	struct scatterlist *src;
188 	struct scatterlist *dst;
189 
190 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
191 };
192 
193 struct blkcipher_desc {
194 	struct crypto_blkcipher *tfm;
195 	void *info;
196 	u32 flags;
197 };
198 
199 struct cipher_desc {
200 	struct crypto_tfm *tfm;
201 	void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
202 	unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
203 			     const u8 *src, unsigned int nbytes);
204 	void *info;
205 };
206 
207 /**
208  * DOC: Block Cipher Algorithm Definitions
209  *
210  * These data structures define modular crypto algorithm implementations,
211  * managed via crypto_register_alg() and crypto_unregister_alg().
212  */
213 
214 /**
215  * struct ablkcipher_alg - asynchronous block cipher definition
216  * @min_keysize: Minimum key size supported by the transformation. This is the
217  *		 smallest key length supported by this transformation algorithm.
218  *		 This must be set to one of the pre-defined values as this is
219  *		 not hardware specific. Possible values for this field can be
220  *		 found via git grep "_MIN_KEY_SIZE" include/crypto/
221  * @max_keysize: Maximum key size supported by the transformation. This is the
222  *		 largest key length supported by this transformation algorithm.
223  *		 This must be set to one of the pre-defined values as this is
224  *		 not hardware specific. Possible values for this field can be
225  *		 found via git grep "_MAX_KEY_SIZE" include/crypto/
226  * @setkey: Set key for the transformation. This function is used to either
227  *	    program a supplied key into the hardware or store the key in the
228  *	    transformation context for programming it later. Note that this
229  *	    function does modify the transformation context. This function can
230  *	    be called multiple times during the existence of the transformation
231  *	    object, so one must make sure the key is properly reprogrammed into
232  *	    the hardware. This function is also responsible for checking the key
233  *	    length for validity. In case a software fallback was put in place in
234  *	    the @cra_init call, this function might need to use the fallback if
235  *	    the algorithm doesn't support all of the key sizes.
236  * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
237  *	     the supplied scatterlist containing the blocks of data. The crypto
238  *	     API consumer is responsible for aligning the entries of the
239  *	     scatterlist properly and making sure the chunks are correctly
240  *	     sized. In case a software fallback was put in place in the
241  *	     @cra_init call, this function might need to use the fallback if
242  *	     the algorithm doesn't support all of the key sizes. In case the
243  *	     key was stored in transformation context, the key might need to be
244  *	     re-programmed into the hardware in this function. This function
245  *	     shall not modify the transformation context, as this function may
246  *	     be called in parallel with the same transformation object.
247  * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
248  *	     and the conditions are exactly the same.
249  * @givencrypt: Update the IV for encryption. With this function, a cipher
250  *	        implementation may provide the function on how to update the IV
251  *	        for encryption.
252  * @givdecrypt: Update the IV for decryption. This is the reverse of
253  *	        @givencrypt .
254  * @geniv: The transformation implementation may use an "IV generator" provided
255  *	   by the kernel crypto API. Several use cases have a predefined
256  *	   approach how IVs are to be updated. For such use cases, the kernel
257  *	   crypto API provides ready-to-use implementations that can be
258  *	   referenced with this variable.
259  * @ivsize: IV size applicable for transformation. The consumer must provide an
260  *	    IV of exactly that size to perform the encrypt or decrypt operation.
261  *
262  * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
263  * mandatory and must be filled.
264  */
265 struct ablkcipher_alg {
266 	int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
267 	              unsigned int keylen);
268 	int (*encrypt)(struct ablkcipher_request *req);
269 	int (*decrypt)(struct ablkcipher_request *req);
270 	int (*givencrypt)(struct skcipher_givcrypt_request *req);
271 	int (*givdecrypt)(struct skcipher_givcrypt_request *req);
272 
273 	const char *geniv;
274 
275 	unsigned int min_keysize;
276 	unsigned int max_keysize;
277 	unsigned int ivsize;
278 };
279 
280 /**
281  * struct blkcipher_alg - synchronous block cipher definition
282  * @min_keysize: see struct ablkcipher_alg
283  * @max_keysize: see struct ablkcipher_alg
284  * @setkey: see struct ablkcipher_alg
285  * @encrypt: see struct ablkcipher_alg
286  * @decrypt: see struct ablkcipher_alg
287  * @geniv: see struct ablkcipher_alg
288  * @ivsize: see struct ablkcipher_alg
289  *
290  * All fields except @geniv and @ivsize are mandatory and must be filled.
291  */
292 struct blkcipher_alg {
293 	int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
294 	              unsigned int keylen);
295 	int (*encrypt)(struct blkcipher_desc *desc,
296 		       struct scatterlist *dst, struct scatterlist *src,
297 		       unsigned int nbytes);
298 	int (*decrypt)(struct blkcipher_desc *desc,
299 		       struct scatterlist *dst, struct scatterlist *src,
300 		       unsigned int nbytes);
301 
302 	const char *geniv;
303 
304 	unsigned int min_keysize;
305 	unsigned int max_keysize;
306 	unsigned int ivsize;
307 };
308 
309 /**
310  * struct cipher_alg - single-block symmetric ciphers definition
311  * @cia_min_keysize: Minimum key size supported by the transformation. This is
312  *		     the smallest key length supported by this transformation
313  *		     algorithm. This must be set to one of the pre-defined
314  *		     values as this is not hardware specific. Possible values
315  *		     for this field can be found via git grep "_MIN_KEY_SIZE"
316  *		     include/crypto/
317  * @cia_max_keysize: Maximum key size supported by the transformation. This is
318  *		    the largest key length supported by this transformation
319  *		    algorithm. This must be set to one of the pre-defined values
320  *		    as this is not hardware specific. Possible values for this
321  *		    field can be found via git grep "_MAX_KEY_SIZE"
322  *		    include/crypto/
323  * @cia_setkey: Set key for the transformation. This function is used to either
324  *	        program a supplied key into the hardware or store the key in the
325  *	        transformation context for programming it later. Note that this
326  *	        function does modify the transformation context. This function
327  *	        can be called multiple times during the existence of the
328  *	        transformation object, so one must make sure the key is properly
329  *	        reprogrammed into the hardware. This function is also
330  *	        responsible for checking the key length for validity.
331  * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
332  *		 single block of data, which must be @cra_blocksize big. This
333  *		 always operates on a full @cra_blocksize and it is not possible
334  *		 to encrypt a block of smaller size. The supplied buffers must
335  *		 therefore also be at least of @cra_blocksize size. Both the
336  *		 input and output buffers are always aligned to @cra_alignmask.
337  *		 In case either of the input or output buffer supplied by user
338  *		 of the crypto API is not aligned to @cra_alignmask, the crypto
339  *		 API will re-align the buffers. The re-alignment means that a
340  *		 new buffer will be allocated, the data will be copied into the
341  *		 new buffer, then the processing will happen on the new buffer,
342  *		 then the data will be copied back into the original buffer and
343  *		 finally the new buffer will be freed. In case a software
344  *		 fallback was put in place in the @cra_init call, this function
345  *		 might need to use the fallback if the algorithm doesn't support
346  *		 all of the key sizes. In case the key was stored in
347  *		 transformation context, the key might need to be re-programmed
348  *		 into the hardware in this function. This function shall not
349  *		 modify the transformation context, as this function may be
350  *		 called in parallel with the same transformation object.
351  * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
352  *		 @cia_encrypt, and the conditions are exactly the same.
353  *
354  * All fields are mandatory and must be filled.
355  */
356 struct cipher_alg {
357 	unsigned int cia_min_keysize;
358 	unsigned int cia_max_keysize;
359 	int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
360 	                  unsigned int keylen);
361 	void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
362 	void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
363 };
364 
365 struct compress_alg {
366 	int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
367 			    unsigned int slen, u8 *dst, unsigned int *dlen);
368 	int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
369 			      unsigned int slen, u8 *dst, unsigned int *dlen);
370 };
371 
372 
373 #define cra_ablkcipher	cra_u.ablkcipher
374 #define cra_blkcipher	cra_u.blkcipher
375 #define cra_cipher	cra_u.cipher
376 #define cra_compress	cra_u.compress
377 
378 /**
379  * struct crypto_alg - definition of a cryptograpic cipher algorithm
380  * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
381  *	       CRYPTO_ALG_* flags for the flags which go in here. Those are
382  *	       used for fine-tuning the description of the transformation
383  *	       algorithm.
384  * @cra_blocksize: Minimum block size of this transformation. The size in bytes
385  *		   of the smallest possible unit which can be transformed with
386  *		   this algorithm. The users must respect this value.
387  *		   In case of HASH transformation, it is possible for a smaller
388  *		   block than @cra_blocksize to be passed to the crypto API for
389  *		   transformation, in case of any other transformation type, an
390  * 		   error will be returned upon any attempt to transform smaller
391  *		   than @cra_blocksize chunks.
392  * @cra_ctxsize: Size of the operational context of the transformation. This
393  *		 value informs the kernel crypto API about the memory size
394  *		 needed to be allocated for the transformation context.
395  * @cra_alignmask: Alignment mask for the input and output data buffer. The data
396  *		   buffer containing the input data for the algorithm must be
397  *		   aligned to this alignment mask. The data buffer for the
398  *		   output data must be aligned to this alignment mask. Note that
399  *		   the Crypto API will do the re-alignment in software, but
400  *		   only under special conditions and there is a performance hit.
401  *		   The re-alignment happens at these occasions for different
402  *		   @cra_u types: cipher -- For both input data and output data
403  *		   buffer; ahash -- For output hash destination buf; shash --
404  *		   For output hash destination buf.
405  *		   This is needed on hardware which is flawed by design and
406  *		   cannot pick data from arbitrary addresses.
407  * @cra_priority: Priority of this transformation implementation. In case
408  *		  multiple transformations with same @cra_name are available to
409  *		  the Crypto API, the kernel will use the one with highest
410  *		  @cra_priority.
411  * @cra_name: Generic name (usable by multiple implementations) of the
412  *	      transformation algorithm. This is the name of the transformation
413  *	      itself. This field is used by the kernel when looking up the
414  *	      providers of particular transformation.
415  * @cra_driver_name: Unique name of the transformation provider. This is the
416  *		     name of the provider of the transformation. This can be any
417  *		     arbitrary value, but in the usual case, this contains the
418  *		     name of the chip or provider and the name of the
419  *		     transformation algorithm.
420  * @cra_type: Type of the cryptographic transformation. This is a pointer to
421  *	      struct crypto_type, which implements callbacks common for all
422  *	      transformation types. There are multiple options:
423  *	      &crypto_blkcipher_type, &crypto_ablkcipher_type,
424  *	      &crypto_ahash_type, &crypto_rng_type.
425  *	      This field might be empty. In that case, there are no common
426  *	      callbacks. This is the case for: cipher, compress, shash.
427  * @cra_u: Callbacks implementing the transformation. This is a union of
428  *	   multiple structures. Depending on the type of transformation selected
429  *	   by @cra_type and @cra_flags above, the associated structure must be
430  *	   filled with callbacks. This field might be empty. This is the case
431  *	   for ahash, shash.
432  * @cra_init: Initialize the cryptographic transformation object. This function
433  *	      is used to initialize the cryptographic transformation object.
434  *	      This function is called only once at the instantiation time, right
435  *	      after the transformation context was allocated. In case the
436  *	      cryptographic hardware has some special requirements which need to
437  *	      be handled by software, this function shall check for the precise
438  *	      requirement of the transformation and put any software fallbacks
439  *	      in place.
440  * @cra_exit: Deinitialize the cryptographic transformation object. This is a
441  *	      counterpart to @cra_init, used to remove various changes set in
442  *	      @cra_init.
443  * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher
444  *		      definition. See @struct @ablkcipher_alg.
445  * @cra_u.blkcipher: Union member which contains a synchronous block cipher
446  * 		     definition See @struct @blkcipher_alg.
447  * @cra_u.cipher: Union member which contains a single-block symmetric cipher
448  *		  definition. See @struct @cipher_alg.
449  * @cra_u.compress: Union member which contains a (de)compression algorithm.
450  *		    See @struct @compress_alg.
451  * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
452  * @cra_list: internally used
453  * @cra_users: internally used
454  * @cra_refcnt: internally used
455  * @cra_destroy: internally used
456  *
457  * All following statistics are for this crypto_alg
458  * @encrypt_cnt:	number of encrypt requests
459  * @decrypt_cnt:	number of decrypt requests
460  * @compress_cnt:	number of compress requests
461  * @decompress_cnt:	number of decompress requests
462  * @generate_cnt:	number of RNG generate requests
463  * @seed_cnt:		number of times the rng was seeded
464  * @hash_cnt:		number of hash requests
465  * @sign_cnt:		number of sign requests
466  * @setsecret_cnt:	number of setsecrey operation
467  * @generate_public_key_cnt:	number of generate_public_key operation
468  * @verify_cnt:			number of verify operation
469  * @compute_shared_secret_cnt:	number of compute_shared_secret operation
470  * @encrypt_tlen:	total data size handled by encrypt requests
471  * @decrypt_tlen:	total data size handled by decrypt requests
472  * @compress_tlen:	total data size handled by compress requests
473  * @decompress_tlen:	total data size handled by decompress requests
474  * @generate_tlen:	total data size of generated data by the RNG
475  * @hash_tlen:		total data size hashed
476  * @akcipher_err_cnt:	number of error for akcipher requests
477  * @cipher_err_cnt:	number of error for akcipher requests
478  * @compress_err_cnt:	number of error for akcipher requests
479  * @aead_err_cnt:	number of error for akcipher requests
480  * @hash_err_cnt:	number of error for akcipher requests
481  * @rng_err_cnt:	number of error for akcipher requests
482  * @kpp_err_cnt:	number of error for akcipher requests
483  *
484  * The struct crypto_alg describes a generic Crypto API algorithm and is common
485  * for all of the transformations. Any variable not documented here shall not
486  * be used by a cipher implementation as it is internal to the Crypto API.
487  */
488 struct crypto_alg {
489 	struct list_head cra_list;
490 	struct list_head cra_users;
491 
492 	u32 cra_flags;
493 	unsigned int cra_blocksize;
494 	unsigned int cra_ctxsize;
495 	unsigned int cra_alignmask;
496 
497 	int cra_priority;
498 	refcount_t cra_refcnt;
499 
500 	char cra_name[CRYPTO_MAX_ALG_NAME];
501 	char cra_driver_name[CRYPTO_MAX_ALG_NAME];
502 
503 	const struct crypto_type *cra_type;
504 
505 	union {
506 		struct ablkcipher_alg ablkcipher;
507 		struct blkcipher_alg blkcipher;
508 		struct cipher_alg cipher;
509 		struct compress_alg compress;
510 	} cra_u;
511 
512 	int (*cra_init)(struct crypto_tfm *tfm);
513 	void (*cra_exit)(struct crypto_tfm *tfm);
514 	void (*cra_destroy)(struct crypto_alg *alg);
515 
516 	struct module *cra_module;
517 
518 #ifdef CONFIG_CRYPTO_STATS
519 	union {
520 		atomic64_t encrypt_cnt;
521 		atomic64_t compress_cnt;
522 		atomic64_t generate_cnt;
523 		atomic64_t hash_cnt;
524 		atomic64_t setsecret_cnt;
525 	};
526 	union {
527 		atomic64_t encrypt_tlen;
528 		atomic64_t compress_tlen;
529 		atomic64_t generate_tlen;
530 		atomic64_t hash_tlen;
531 	};
532 	union {
533 		atomic64_t akcipher_err_cnt;
534 		atomic64_t cipher_err_cnt;
535 		atomic64_t compress_err_cnt;
536 		atomic64_t aead_err_cnt;
537 		atomic64_t hash_err_cnt;
538 		atomic64_t rng_err_cnt;
539 		atomic64_t kpp_err_cnt;
540 	};
541 	union {
542 		atomic64_t decrypt_cnt;
543 		atomic64_t decompress_cnt;
544 		atomic64_t seed_cnt;
545 		atomic64_t generate_public_key_cnt;
546 	};
547 	union {
548 		atomic64_t decrypt_tlen;
549 		atomic64_t decompress_tlen;
550 	};
551 	union {
552 		atomic64_t verify_cnt;
553 		atomic64_t compute_shared_secret_cnt;
554 	};
555 	atomic64_t sign_cnt;
556 #endif /* CONFIG_CRYPTO_STATS */
557 
558 } CRYPTO_MINALIGN_ATTR;
559 
560 /*
561  * A helper struct for waiting for completion of async crypto ops
562  */
563 struct crypto_wait {
564 	struct completion completion;
565 	int err;
566 };
567 
568 /*
569  * Macro for declaring a crypto op async wait object on stack
570  */
571 #define DECLARE_CRYPTO_WAIT(_wait) \
572 	struct crypto_wait _wait = { \
573 		COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
574 
575 /*
576  * Async ops completion helper functioons
577  */
578 void crypto_req_done(struct crypto_async_request *req, int err);
579 
580 static inline int crypto_wait_req(int err, struct crypto_wait *wait)
581 {
582 	switch (err) {
583 	case -EINPROGRESS:
584 	case -EBUSY:
585 		wait_for_completion(&wait->completion);
586 		reinit_completion(&wait->completion);
587 		err = wait->err;
588 		break;
589 	};
590 
591 	return err;
592 }
593 
594 static inline void crypto_init_wait(struct crypto_wait *wait)
595 {
596 	init_completion(&wait->completion);
597 }
598 
599 /*
600  * Algorithm registration interface.
601  */
602 int crypto_register_alg(struct crypto_alg *alg);
603 int crypto_unregister_alg(struct crypto_alg *alg);
604 int crypto_register_algs(struct crypto_alg *algs, int count);
605 int crypto_unregister_algs(struct crypto_alg *algs, int count);
606 
607 /*
608  * Algorithm query interface.
609  */
610 int crypto_has_alg(const char *name, u32 type, u32 mask);
611 
612 /*
613  * Transforms: user-instantiated objects which encapsulate algorithms
614  * and core processing logic.  Managed via crypto_alloc_*() and
615  * crypto_free_*(), as well as the various helpers below.
616  */
617 
618 struct ablkcipher_tfm {
619 	int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
620 	              unsigned int keylen);
621 	int (*encrypt)(struct ablkcipher_request *req);
622 	int (*decrypt)(struct ablkcipher_request *req);
623 
624 	struct crypto_ablkcipher *base;
625 
626 	unsigned int ivsize;
627 	unsigned int reqsize;
628 };
629 
630 struct blkcipher_tfm {
631 	void *iv;
632 	int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
633 		      unsigned int keylen);
634 	int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
635 		       struct scatterlist *src, unsigned int nbytes);
636 	int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
637 		       struct scatterlist *src, unsigned int nbytes);
638 };
639 
640 struct cipher_tfm {
641 	int (*cit_setkey)(struct crypto_tfm *tfm,
642 	                  const u8 *key, unsigned int keylen);
643 	void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
644 	void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
645 };
646 
647 struct compress_tfm {
648 	int (*cot_compress)(struct crypto_tfm *tfm,
649 	                    const u8 *src, unsigned int slen,
650 	                    u8 *dst, unsigned int *dlen);
651 	int (*cot_decompress)(struct crypto_tfm *tfm,
652 	                      const u8 *src, unsigned int slen,
653 	                      u8 *dst, unsigned int *dlen);
654 };
655 
656 #define crt_ablkcipher	crt_u.ablkcipher
657 #define crt_blkcipher	crt_u.blkcipher
658 #define crt_cipher	crt_u.cipher
659 #define crt_compress	crt_u.compress
660 
661 struct crypto_tfm {
662 
663 	u32 crt_flags;
664 
665 	union {
666 		struct ablkcipher_tfm ablkcipher;
667 		struct blkcipher_tfm blkcipher;
668 		struct cipher_tfm cipher;
669 		struct compress_tfm compress;
670 	} crt_u;
671 
672 	void (*exit)(struct crypto_tfm *tfm);
673 
674 	struct crypto_alg *__crt_alg;
675 
676 	void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
677 };
678 
679 struct crypto_ablkcipher {
680 	struct crypto_tfm base;
681 };
682 
683 struct crypto_blkcipher {
684 	struct crypto_tfm base;
685 };
686 
687 struct crypto_cipher {
688 	struct crypto_tfm base;
689 };
690 
691 struct crypto_comp {
692 	struct crypto_tfm base;
693 };
694 
695 enum {
696 	CRYPTOA_UNSPEC,
697 	CRYPTOA_ALG,
698 	CRYPTOA_TYPE,
699 	CRYPTOA_U32,
700 	__CRYPTOA_MAX,
701 };
702 
703 #define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
704 
705 /* Maximum number of (rtattr) parameters for each template. */
706 #define CRYPTO_MAX_ATTRS 32
707 
708 struct crypto_attr_alg {
709 	char name[CRYPTO_MAX_ALG_NAME];
710 };
711 
712 struct crypto_attr_type {
713 	u32 type;
714 	u32 mask;
715 };
716 
717 struct crypto_attr_u32 {
718 	u32 num;
719 };
720 
721 /*
722  * Transform user interface.
723  */
724 
725 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
726 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
727 
728 static inline void crypto_free_tfm(struct crypto_tfm *tfm)
729 {
730 	return crypto_destroy_tfm(tfm, tfm);
731 }
732 
733 int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
734 
735 /*
736  * Transform helpers which query the underlying algorithm.
737  */
738 static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
739 {
740 	return tfm->__crt_alg->cra_name;
741 }
742 
743 static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
744 {
745 	return tfm->__crt_alg->cra_driver_name;
746 }
747 
748 static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
749 {
750 	return tfm->__crt_alg->cra_priority;
751 }
752 
753 static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
754 {
755 	return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
756 }
757 
758 static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
759 {
760 	return tfm->__crt_alg->cra_blocksize;
761 }
762 
763 static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
764 {
765 	return tfm->__crt_alg->cra_alignmask;
766 }
767 
768 static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
769 {
770 	return tfm->crt_flags;
771 }
772 
773 static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
774 {
775 	tfm->crt_flags |= flags;
776 }
777 
778 static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
779 {
780 	tfm->crt_flags &= ~flags;
781 }
782 
783 static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
784 {
785 	return tfm->__crt_ctx;
786 }
787 
788 static inline unsigned int crypto_tfm_ctx_alignment(void)
789 {
790 	struct crypto_tfm *tfm;
791 	return __alignof__(tfm->__crt_ctx);
792 }
793 
794 /*
795  * API wrappers.
796  */
797 static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
798 	struct crypto_tfm *tfm)
799 {
800 	return (struct crypto_ablkcipher *)tfm;
801 }
802 
803 static inline u32 crypto_skcipher_type(u32 type)
804 {
805 	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
806 	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
807 	return type;
808 }
809 
810 static inline u32 crypto_skcipher_mask(u32 mask)
811 {
812 	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
813 	mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
814 	return mask;
815 }
816 
817 /**
818  * DOC: Asynchronous Block Cipher API
819  *
820  * Asynchronous block cipher API is used with the ciphers of type
821  * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
822  *
823  * Asynchronous cipher operations imply that the function invocation for a
824  * cipher request returns immediately before the completion of the operation.
825  * The cipher request is scheduled as a separate kernel thread and therefore
826  * load-balanced on the different CPUs via the process scheduler. To allow
827  * the kernel crypto API to inform the caller about the completion of a cipher
828  * request, the caller must provide a callback function. That function is
829  * invoked with the cipher handle when the request completes.
830  *
831  * To support the asynchronous operation, additional information than just the
832  * cipher handle must be supplied to the kernel crypto API. That additional
833  * information is given by filling in the ablkcipher_request data structure.
834  *
835  * For the asynchronous block cipher API, the state is maintained with the tfm
836  * cipher handle. A single tfm can be used across multiple calls and in
837  * parallel. For asynchronous block cipher calls, context data supplied and
838  * only used by the caller can be referenced the request data structure in
839  * addition to the IV used for the cipher request. The maintenance of such
840  * state information would be important for a crypto driver implementer to
841  * have, because when calling the callback function upon completion of the
842  * cipher operation, that callback function may need some information about
843  * which operation just finished if it invoked multiple in parallel. This
844  * state information is unused by the kernel crypto API.
845  */
846 
847 static inline struct crypto_tfm *crypto_ablkcipher_tfm(
848 	struct crypto_ablkcipher *tfm)
849 {
850 	return &tfm->base;
851 }
852 
853 /**
854  * crypto_free_ablkcipher() - zeroize and free cipher handle
855  * @tfm: cipher handle to be freed
856  */
857 static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
858 {
859 	crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
860 }
861 
862 /**
863  * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
864  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
865  *	      ablkcipher
866  * @type: specifies the type of the cipher
867  * @mask: specifies the mask for the cipher
868  *
869  * Return: true when the ablkcipher is known to the kernel crypto API; false
870  *	   otherwise
871  */
872 static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
873 					u32 mask)
874 {
875 	return crypto_has_alg(alg_name, crypto_skcipher_type(type),
876 			      crypto_skcipher_mask(mask));
877 }
878 
879 static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
880 	struct crypto_ablkcipher *tfm)
881 {
882 	return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
883 }
884 
885 /**
886  * crypto_ablkcipher_ivsize() - obtain IV size
887  * @tfm: cipher handle
888  *
889  * The size of the IV for the ablkcipher referenced by the cipher handle is
890  * returned. This IV size may be zero if the cipher does not need an IV.
891  *
892  * Return: IV size in bytes
893  */
894 static inline unsigned int crypto_ablkcipher_ivsize(
895 	struct crypto_ablkcipher *tfm)
896 {
897 	return crypto_ablkcipher_crt(tfm)->ivsize;
898 }
899 
900 /**
901  * crypto_ablkcipher_blocksize() - obtain block size of cipher
902  * @tfm: cipher handle
903  *
904  * The block size for the ablkcipher referenced with the cipher handle is
905  * returned. The caller may use that information to allocate appropriate
906  * memory for the data returned by the encryption or decryption operation
907  *
908  * Return: block size of cipher
909  */
910 static inline unsigned int crypto_ablkcipher_blocksize(
911 	struct crypto_ablkcipher *tfm)
912 {
913 	return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
914 }
915 
916 static inline unsigned int crypto_ablkcipher_alignmask(
917 	struct crypto_ablkcipher *tfm)
918 {
919 	return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
920 }
921 
922 static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
923 {
924 	return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
925 }
926 
927 static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
928 					       u32 flags)
929 {
930 	crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
931 }
932 
933 static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
934 						 u32 flags)
935 {
936 	crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
937 }
938 
939 /**
940  * crypto_ablkcipher_setkey() - set key for cipher
941  * @tfm: cipher handle
942  * @key: buffer holding the key
943  * @keylen: length of the key in bytes
944  *
945  * The caller provided key is set for the ablkcipher referenced by the cipher
946  * handle.
947  *
948  * Note, the key length determines the cipher type. Many block ciphers implement
949  * different cipher modes depending on the key size, such as AES-128 vs AES-192
950  * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
951  * is performed.
952  *
953  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
954  */
955 static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
956 					   const u8 *key, unsigned int keylen)
957 {
958 	struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
959 
960 	return crt->setkey(crt->base, key, keylen);
961 }
962 
963 /**
964  * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
965  * @req: ablkcipher_request out of which the cipher handle is to be obtained
966  *
967  * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
968  * data structure.
969  *
970  * Return: crypto_ablkcipher handle
971  */
972 static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
973 	struct ablkcipher_request *req)
974 {
975 	return __crypto_ablkcipher_cast(req->base.tfm);
976 }
977 
978 static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req,
979 						  int ret)
980 {
981 #ifdef CONFIG_CRYPTO_STATS
982 	struct ablkcipher_tfm *crt =
983 		crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
984 
985 	if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
986 		atomic64_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
987 	} else {
988 		atomic64_inc(&crt->base->base.__crt_alg->encrypt_cnt);
989 		atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen);
990 	}
991 #endif
992 }
993 
994 static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req,
995 						  int ret)
996 {
997 #ifdef CONFIG_CRYPTO_STATS
998 	struct ablkcipher_tfm *crt =
999 		crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
1000 
1001 	if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
1002 		atomic64_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
1003 	} else {
1004 		atomic64_inc(&crt->base->base.__crt_alg->decrypt_cnt);
1005 		atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen);
1006 	}
1007 #endif
1008 }
1009 
1010 /**
1011  * crypto_ablkcipher_encrypt() - encrypt plaintext
1012  * @req: reference to the ablkcipher_request handle that holds all information
1013  *	 needed to perform the cipher operation
1014  *
1015  * Encrypt plaintext data using the ablkcipher_request handle. That data
1016  * structure and how it is filled with data is discussed with the
1017  * ablkcipher_request_* functions.
1018  *
1019  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1020  */
1021 static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
1022 {
1023 	struct ablkcipher_tfm *crt =
1024 		crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
1025 	int ret;
1026 
1027 	ret = crt->encrypt(req);
1028 	crypto_stat_ablkcipher_encrypt(req, ret);
1029 	return ret;
1030 }
1031 
1032 /**
1033  * crypto_ablkcipher_decrypt() - decrypt ciphertext
1034  * @req: reference to the ablkcipher_request handle that holds all information
1035  *	 needed to perform the cipher operation
1036  *
1037  * Decrypt ciphertext data using the ablkcipher_request handle. That data
1038  * structure and how it is filled with data is discussed with the
1039  * ablkcipher_request_* functions.
1040  *
1041  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1042  */
1043 static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
1044 {
1045 	struct ablkcipher_tfm *crt =
1046 		crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
1047 	int ret;
1048 
1049 	ret = crt->decrypt(req);
1050 	crypto_stat_ablkcipher_decrypt(req, ret);
1051 	return ret;
1052 }
1053 
1054 /**
1055  * DOC: Asynchronous Cipher Request Handle
1056  *
1057  * The ablkcipher_request data structure contains all pointers to data
1058  * required for the asynchronous cipher operation. This includes the cipher
1059  * handle (which can be used by multiple ablkcipher_request instances), pointer
1060  * to plaintext and ciphertext, asynchronous callback function, etc. It acts
1061  * as a handle to the ablkcipher_request_* API calls in a similar way as
1062  * ablkcipher handle to the crypto_ablkcipher_* API calls.
1063  */
1064 
1065 /**
1066  * crypto_ablkcipher_reqsize() - obtain size of the request data structure
1067  * @tfm: cipher handle
1068  *
1069  * Return: number of bytes
1070  */
1071 static inline unsigned int crypto_ablkcipher_reqsize(
1072 	struct crypto_ablkcipher *tfm)
1073 {
1074 	return crypto_ablkcipher_crt(tfm)->reqsize;
1075 }
1076 
1077 /**
1078  * ablkcipher_request_set_tfm() - update cipher handle reference in request
1079  * @req: request handle to be modified
1080  * @tfm: cipher handle that shall be added to the request handle
1081  *
1082  * Allow the caller to replace the existing ablkcipher handle in the request
1083  * data structure with a different one.
1084  */
1085 static inline void ablkcipher_request_set_tfm(
1086 	struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
1087 {
1088 	req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
1089 }
1090 
1091 static inline struct ablkcipher_request *ablkcipher_request_cast(
1092 	struct crypto_async_request *req)
1093 {
1094 	return container_of(req, struct ablkcipher_request, base);
1095 }
1096 
1097 /**
1098  * ablkcipher_request_alloc() - allocate request data structure
1099  * @tfm: cipher handle to be registered with the request
1100  * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1101  *
1102  * Allocate the request data structure that must be used with the ablkcipher
1103  * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
1104  * handle is registered in the request data structure.
1105  *
1106  * Return: allocated request handle in case of success, or NULL if out of memory
1107  */
1108 static inline struct ablkcipher_request *ablkcipher_request_alloc(
1109 	struct crypto_ablkcipher *tfm, gfp_t gfp)
1110 {
1111 	struct ablkcipher_request *req;
1112 
1113 	req = kmalloc(sizeof(struct ablkcipher_request) +
1114 		      crypto_ablkcipher_reqsize(tfm), gfp);
1115 
1116 	if (likely(req))
1117 		ablkcipher_request_set_tfm(req, tfm);
1118 
1119 	return req;
1120 }
1121 
1122 /**
1123  * ablkcipher_request_free() - zeroize and free request data structure
1124  * @req: request data structure cipher handle to be freed
1125  */
1126 static inline void ablkcipher_request_free(struct ablkcipher_request *req)
1127 {
1128 	kzfree(req);
1129 }
1130 
1131 /**
1132  * ablkcipher_request_set_callback() - set asynchronous callback function
1133  * @req: request handle
1134  * @flags: specify zero or an ORing of the flags
1135  *	   CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1136  *	   increase the wait queue beyond the initial maximum size;
1137  *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1138  * @compl: callback function pointer to be registered with the request handle
1139  * @data: The data pointer refers to memory that is not used by the kernel
1140  *	  crypto API, but provided to the callback function for it to use. Here,
1141  *	  the caller can provide a reference to memory the callback function can
1142  *	  operate on. As the callback function is invoked asynchronously to the
1143  *	  related functionality, it may need to access data structures of the
1144  *	  related functionality which can be referenced using this pointer. The
1145  *	  callback function can access the memory via the "data" field in the
1146  *	  crypto_async_request data structure provided to the callback function.
1147  *
1148  * This function allows setting the callback function that is triggered once the
1149  * cipher operation completes.
1150  *
1151  * The callback function is registered with the ablkcipher_request handle and
1152  * must comply with the following template::
1153  *
1154  *	void callback_function(struct crypto_async_request *req, int error)
1155  */
1156 static inline void ablkcipher_request_set_callback(
1157 	struct ablkcipher_request *req,
1158 	u32 flags, crypto_completion_t compl, void *data)
1159 {
1160 	req->base.complete = compl;
1161 	req->base.data = data;
1162 	req->base.flags = flags;
1163 }
1164 
1165 /**
1166  * ablkcipher_request_set_crypt() - set data buffers
1167  * @req: request handle
1168  * @src: source scatter / gather list
1169  * @dst: destination scatter / gather list
1170  * @nbytes: number of bytes to process from @src
1171  * @iv: IV for the cipher operation which must comply with the IV size defined
1172  *      by crypto_ablkcipher_ivsize
1173  *
1174  * This function allows setting of the source data and destination data
1175  * scatter / gather lists.
1176  *
1177  * For encryption, the source is treated as the plaintext and the
1178  * destination is the ciphertext. For a decryption operation, the use is
1179  * reversed - the source is the ciphertext and the destination is the plaintext.
1180  */
1181 static inline void ablkcipher_request_set_crypt(
1182 	struct ablkcipher_request *req,
1183 	struct scatterlist *src, struct scatterlist *dst,
1184 	unsigned int nbytes, void *iv)
1185 {
1186 	req->src = src;
1187 	req->dst = dst;
1188 	req->nbytes = nbytes;
1189 	req->info = iv;
1190 }
1191 
1192 /**
1193  * DOC: Synchronous Block Cipher API
1194  *
1195  * The synchronous block cipher API is used with the ciphers of type
1196  * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
1197  *
1198  * Synchronous calls, have a context in the tfm. But since a single tfm can be
1199  * used in multiple calls and in parallel, this info should not be changeable
1200  * (unless a lock is used). This applies, for example, to the symmetric key.
1201  * However, the IV is changeable, so there is an iv field in blkcipher_tfm
1202  * structure for synchronous blkcipher api. So, its the only state info that can
1203  * be kept for synchronous calls without using a big lock across a tfm.
1204  *
1205  * The block cipher API allows the use of a complete cipher, i.e. a cipher
1206  * consisting of a template (a block chaining mode) and a single block cipher
1207  * primitive (e.g. AES).
1208  *
1209  * The plaintext data buffer and the ciphertext data buffer are pointed to
1210  * by using scatter/gather lists. The cipher operation is performed
1211  * on all segments of the provided scatter/gather lists.
1212  *
1213  * The kernel crypto API supports a cipher operation "in-place" which means that
1214  * the caller may provide the same scatter/gather list for the plaintext and
1215  * cipher text. After the completion of the cipher operation, the plaintext
1216  * data is replaced with the ciphertext data in case of an encryption and vice
1217  * versa for a decryption. The caller must ensure that the scatter/gather lists
1218  * for the output data point to sufficiently large buffers, i.e. multiples of
1219  * the block size of the cipher.
1220  */
1221 
1222 static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
1223 	struct crypto_tfm *tfm)
1224 {
1225 	return (struct crypto_blkcipher *)tfm;
1226 }
1227 
1228 static inline struct crypto_blkcipher *crypto_blkcipher_cast(
1229 	struct crypto_tfm *tfm)
1230 {
1231 	BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
1232 	return __crypto_blkcipher_cast(tfm);
1233 }
1234 
1235 /**
1236  * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
1237  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1238  *	      blkcipher cipher
1239  * @type: specifies the type of the cipher
1240  * @mask: specifies the mask for the cipher
1241  *
1242  * Allocate a cipher handle for a block cipher. The returned struct
1243  * crypto_blkcipher is the cipher handle that is required for any subsequent
1244  * API invocation for that block cipher.
1245  *
1246  * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1247  *	   of an error, PTR_ERR() returns the error code.
1248  */
1249 static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
1250 	const char *alg_name, u32 type, u32 mask)
1251 {
1252 	type &= ~CRYPTO_ALG_TYPE_MASK;
1253 	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
1254 	mask |= CRYPTO_ALG_TYPE_MASK;
1255 
1256 	return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
1257 }
1258 
1259 static inline struct crypto_tfm *crypto_blkcipher_tfm(
1260 	struct crypto_blkcipher *tfm)
1261 {
1262 	return &tfm->base;
1263 }
1264 
1265 /**
1266  * crypto_free_blkcipher() - zeroize and free the block cipher handle
1267  * @tfm: cipher handle to be freed
1268  */
1269 static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
1270 {
1271 	crypto_free_tfm(crypto_blkcipher_tfm(tfm));
1272 }
1273 
1274 /**
1275  * crypto_has_blkcipher() - Search for the availability of a block cipher
1276  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1277  *	      block cipher
1278  * @type: specifies the type of the cipher
1279  * @mask: specifies the mask for the cipher
1280  *
1281  * Return: true when the block cipher is known to the kernel crypto API; false
1282  *	   otherwise
1283  */
1284 static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
1285 {
1286 	type &= ~CRYPTO_ALG_TYPE_MASK;
1287 	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
1288 	mask |= CRYPTO_ALG_TYPE_MASK;
1289 
1290 	return crypto_has_alg(alg_name, type, mask);
1291 }
1292 
1293 /**
1294  * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
1295  * @tfm: cipher handle
1296  *
1297  * Return: The character string holding the name of the cipher
1298  */
1299 static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
1300 {
1301 	return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
1302 }
1303 
1304 static inline struct blkcipher_tfm *crypto_blkcipher_crt(
1305 	struct crypto_blkcipher *tfm)
1306 {
1307 	return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
1308 }
1309 
1310 static inline struct blkcipher_alg *crypto_blkcipher_alg(
1311 	struct crypto_blkcipher *tfm)
1312 {
1313 	return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
1314 }
1315 
1316 /**
1317  * crypto_blkcipher_ivsize() - obtain IV size
1318  * @tfm: cipher handle
1319  *
1320  * The size of the IV for the block cipher referenced by the cipher handle is
1321  * returned. This IV size may be zero if the cipher does not need an IV.
1322  *
1323  * Return: IV size in bytes
1324  */
1325 static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
1326 {
1327 	return crypto_blkcipher_alg(tfm)->ivsize;
1328 }
1329 
1330 /**
1331  * crypto_blkcipher_blocksize() - obtain block size of cipher
1332  * @tfm: cipher handle
1333  *
1334  * The block size for the block cipher referenced with the cipher handle is
1335  * returned. The caller may use that information to allocate appropriate
1336  * memory for the data returned by the encryption or decryption operation.
1337  *
1338  * Return: block size of cipher
1339  */
1340 static inline unsigned int crypto_blkcipher_blocksize(
1341 	struct crypto_blkcipher *tfm)
1342 {
1343 	return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
1344 }
1345 
1346 static inline unsigned int crypto_blkcipher_alignmask(
1347 	struct crypto_blkcipher *tfm)
1348 {
1349 	return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
1350 }
1351 
1352 static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
1353 {
1354 	return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
1355 }
1356 
1357 static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
1358 					      u32 flags)
1359 {
1360 	crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
1361 }
1362 
1363 static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
1364 						u32 flags)
1365 {
1366 	crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
1367 }
1368 
1369 /**
1370  * crypto_blkcipher_setkey() - set key for cipher
1371  * @tfm: cipher handle
1372  * @key: buffer holding the key
1373  * @keylen: length of the key in bytes
1374  *
1375  * The caller provided key is set for the block cipher referenced by the cipher
1376  * handle.
1377  *
1378  * Note, the key length determines the cipher type. Many block ciphers implement
1379  * different cipher modes depending on the key size, such as AES-128 vs AES-192
1380  * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1381  * is performed.
1382  *
1383  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1384  */
1385 static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
1386 					  const u8 *key, unsigned int keylen)
1387 {
1388 	return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
1389 						 key, keylen);
1390 }
1391 
1392 /**
1393  * crypto_blkcipher_encrypt() - encrypt plaintext
1394  * @desc: reference to the block cipher handle with meta data
1395  * @dst: scatter/gather list that is filled by the cipher operation with the
1396  *	ciphertext
1397  * @src: scatter/gather list that holds the plaintext
1398  * @nbytes: number of bytes of the plaintext to encrypt.
1399  *
1400  * Encrypt plaintext data using the IV set by the caller with a preceding
1401  * call of crypto_blkcipher_set_iv.
1402  *
1403  * The blkcipher_desc data structure must be filled by the caller and can
1404  * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1405  * with the block cipher handle; desc.flags is filled with either
1406  * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1407  *
1408  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1409  */
1410 static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
1411 					   struct scatterlist *dst,
1412 					   struct scatterlist *src,
1413 					   unsigned int nbytes)
1414 {
1415 	desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1416 	return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1417 }
1418 
1419 /**
1420  * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
1421  * @desc: reference to the block cipher handle with meta data
1422  * @dst: scatter/gather list that is filled by the cipher operation with the
1423  *	ciphertext
1424  * @src: scatter/gather list that holds the plaintext
1425  * @nbytes: number of bytes of the plaintext to encrypt.
1426  *
1427  * Encrypt plaintext data with the use of an IV that is solely used for this
1428  * cipher operation. Any previously set IV is not used.
1429  *
1430  * The blkcipher_desc data structure must be filled by the caller and can
1431  * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1432  * with the block cipher handle; desc.info is filled with the IV to be used for
1433  * the current operation; desc.flags is filled with either
1434  * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1435  *
1436  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1437  */
1438 static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
1439 					      struct scatterlist *dst,
1440 					      struct scatterlist *src,
1441 					      unsigned int nbytes)
1442 {
1443 	return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1444 }
1445 
1446 /**
1447  * crypto_blkcipher_decrypt() - decrypt ciphertext
1448  * @desc: reference to the block cipher handle with meta data
1449  * @dst: scatter/gather list that is filled by the cipher operation with the
1450  *	plaintext
1451  * @src: scatter/gather list that holds the ciphertext
1452  * @nbytes: number of bytes of the ciphertext to decrypt.
1453  *
1454  * Decrypt ciphertext data using the IV set by the caller with a preceding
1455  * call of crypto_blkcipher_set_iv.
1456  *
1457  * The blkcipher_desc data structure must be filled by the caller as documented
1458  * for the crypto_blkcipher_encrypt call above.
1459  *
1460  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1461  *
1462  */
1463 static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
1464 					   struct scatterlist *dst,
1465 					   struct scatterlist *src,
1466 					   unsigned int nbytes)
1467 {
1468 	desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1469 	return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1470 }
1471 
1472 /**
1473  * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
1474  * @desc: reference to the block cipher handle with meta data
1475  * @dst: scatter/gather list that is filled by the cipher operation with the
1476  *	plaintext
1477  * @src: scatter/gather list that holds the ciphertext
1478  * @nbytes: number of bytes of the ciphertext to decrypt.
1479  *
1480  * Decrypt ciphertext data with the use of an IV that is solely used for this
1481  * cipher operation. Any previously set IV is not used.
1482  *
1483  * The blkcipher_desc data structure must be filled by the caller as documented
1484  * for the crypto_blkcipher_encrypt_iv call above.
1485  *
1486  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1487  */
1488 static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
1489 					      struct scatterlist *dst,
1490 					      struct scatterlist *src,
1491 					      unsigned int nbytes)
1492 {
1493 	return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1494 }
1495 
1496 /**
1497  * crypto_blkcipher_set_iv() - set IV for cipher
1498  * @tfm: cipher handle
1499  * @src: buffer holding the IV
1500  * @len: length of the IV in bytes
1501  *
1502  * The caller provided IV is set for the block cipher referenced by the cipher
1503  * handle.
1504  */
1505 static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
1506 					   const u8 *src, unsigned int len)
1507 {
1508 	memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
1509 }
1510 
1511 /**
1512  * crypto_blkcipher_get_iv() - obtain IV from cipher
1513  * @tfm: cipher handle
1514  * @dst: buffer filled with the IV
1515  * @len: length of the buffer dst
1516  *
1517  * The caller can obtain the IV set for the block cipher referenced by the
1518  * cipher handle and store it into the user-provided buffer. If the buffer
1519  * has an insufficient space, the IV is truncated to fit the buffer.
1520  */
1521 static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
1522 					   u8 *dst, unsigned int len)
1523 {
1524 	memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
1525 }
1526 
1527 /**
1528  * DOC: Single Block Cipher API
1529  *
1530  * The single block cipher API is used with the ciphers of type
1531  * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
1532  *
1533  * Using the single block cipher API calls, operations with the basic cipher
1534  * primitive can be implemented. These cipher primitives exclude any block
1535  * chaining operations including IV handling.
1536  *
1537  * The purpose of this single block cipher API is to support the implementation
1538  * of templates or other concepts that only need to perform the cipher operation
1539  * on one block at a time. Templates invoke the underlying cipher primitive
1540  * block-wise and process either the input or the output data of these cipher
1541  * operations.
1542  */
1543 
1544 static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
1545 {
1546 	return (struct crypto_cipher *)tfm;
1547 }
1548 
1549 static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
1550 {
1551 	BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
1552 	return __crypto_cipher_cast(tfm);
1553 }
1554 
1555 /**
1556  * crypto_alloc_cipher() - allocate single block cipher handle
1557  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1558  *	     single block cipher
1559  * @type: specifies the type of the cipher
1560  * @mask: specifies the mask for the cipher
1561  *
1562  * Allocate a cipher handle for a single block cipher. The returned struct
1563  * crypto_cipher is the cipher handle that is required for any subsequent API
1564  * invocation for that single block cipher.
1565  *
1566  * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1567  *	   of an error, PTR_ERR() returns the error code.
1568  */
1569 static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
1570 							u32 type, u32 mask)
1571 {
1572 	type &= ~CRYPTO_ALG_TYPE_MASK;
1573 	type |= CRYPTO_ALG_TYPE_CIPHER;
1574 	mask |= CRYPTO_ALG_TYPE_MASK;
1575 
1576 	return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
1577 }
1578 
1579 static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
1580 {
1581 	return &tfm->base;
1582 }
1583 
1584 /**
1585  * crypto_free_cipher() - zeroize and free the single block cipher handle
1586  * @tfm: cipher handle to be freed
1587  */
1588 static inline void crypto_free_cipher(struct crypto_cipher *tfm)
1589 {
1590 	crypto_free_tfm(crypto_cipher_tfm(tfm));
1591 }
1592 
1593 /**
1594  * crypto_has_cipher() - Search for the availability of a single block cipher
1595  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1596  *	     single block cipher
1597  * @type: specifies the type of the cipher
1598  * @mask: specifies the mask for the cipher
1599  *
1600  * Return: true when the single block cipher is known to the kernel crypto API;
1601  *	   false otherwise
1602  */
1603 static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
1604 {
1605 	type &= ~CRYPTO_ALG_TYPE_MASK;
1606 	type |= CRYPTO_ALG_TYPE_CIPHER;
1607 	mask |= CRYPTO_ALG_TYPE_MASK;
1608 
1609 	return crypto_has_alg(alg_name, type, mask);
1610 }
1611 
1612 static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
1613 {
1614 	return &crypto_cipher_tfm(tfm)->crt_cipher;
1615 }
1616 
1617 /**
1618  * crypto_cipher_blocksize() - obtain block size for cipher
1619  * @tfm: cipher handle
1620  *
1621  * The block size for the single block cipher referenced with the cipher handle
1622  * tfm is returned. The caller may use that information to allocate appropriate
1623  * memory for the data returned by the encryption or decryption operation
1624  *
1625  * Return: block size of cipher
1626  */
1627 static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
1628 {
1629 	return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
1630 }
1631 
1632 static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
1633 {
1634 	return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
1635 }
1636 
1637 static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
1638 {
1639 	return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
1640 }
1641 
1642 static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
1643 					   u32 flags)
1644 {
1645 	crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
1646 }
1647 
1648 static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
1649 					     u32 flags)
1650 {
1651 	crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
1652 }
1653 
1654 /**
1655  * crypto_cipher_setkey() - set key for cipher
1656  * @tfm: cipher handle
1657  * @key: buffer holding the key
1658  * @keylen: length of the key in bytes
1659  *
1660  * The caller provided key is set for the single block cipher referenced by the
1661  * cipher handle.
1662  *
1663  * Note, the key length determines the cipher type. Many block ciphers implement
1664  * different cipher modes depending on the key size, such as AES-128 vs AES-192
1665  * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1666  * is performed.
1667  *
1668  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1669  */
1670 static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1671                                        const u8 *key, unsigned int keylen)
1672 {
1673 	return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
1674 						  key, keylen);
1675 }
1676 
1677 /**
1678  * crypto_cipher_encrypt_one() - encrypt one block of plaintext
1679  * @tfm: cipher handle
1680  * @dst: points to the buffer that will be filled with the ciphertext
1681  * @src: buffer holding the plaintext to be encrypted
1682  *
1683  * Invoke the encryption operation of one block. The caller must ensure that
1684  * the plaintext and ciphertext buffers are at least one block in size.
1685  */
1686 static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1687 					     u8 *dst, const u8 *src)
1688 {
1689 	crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
1690 						dst, src);
1691 }
1692 
1693 /**
1694  * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
1695  * @tfm: cipher handle
1696  * @dst: points to the buffer that will be filled with the plaintext
1697  * @src: buffer holding the ciphertext to be decrypted
1698  *
1699  * Invoke the decryption operation of one block. The caller must ensure that
1700  * the plaintext and ciphertext buffers are at least one block in size.
1701  */
1702 static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1703 					     u8 *dst, const u8 *src)
1704 {
1705 	crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
1706 						dst, src);
1707 }
1708 
1709 static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
1710 {
1711 	return (struct crypto_comp *)tfm;
1712 }
1713 
1714 static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
1715 {
1716 	BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
1717 	       CRYPTO_ALG_TYPE_MASK);
1718 	return __crypto_comp_cast(tfm);
1719 }
1720 
1721 static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
1722 						    u32 type, u32 mask)
1723 {
1724 	type &= ~CRYPTO_ALG_TYPE_MASK;
1725 	type |= CRYPTO_ALG_TYPE_COMPRESS;
1726 	mask |= CRYPTO_ALG_TYPE_MASK;
1727 
1728 	return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
1729 }
1730 
1731 static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
1732 {
1733 	return &tfm->base;
1734 }
1735 
1736 static inline void crypto_free_comp(struct crypto_comp *tfm)
1737 {
1738 	crypto_free_tfm(crypto_comp_tfm(tfm));
1739 }
1740 
1741 static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
1742 {
1743 	type &= ~CRYPTO_ALG_TYPE_MASK;
1744 	type |= CRYPTO_ALG_TYPE_COMPRESS;
1745 	mask |= CRYPTO_ALG_TYPE_MASK;
1746 
1747 	return crypto_has_alg(alg_name, type, mask);
1748 }
1749 
1750 static inline const char *crypto_comp_name(struct crypto_comp *tfm)
1751 {
1752 	return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
1753 }
1754 
1755 static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
1756 {
1757 	return &crypto_comp_tfm(tfm)->crt_compress;
1758 }
1759 
1760 static inline int crypto_comp_compress(struct crypto_comp *tfm,
1761                                        const u8 *src, unsigned int slen,
1762                                        u8 *dst, unsigned int *dlen)
1763 {
1764 	return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
1765 						  src, slen, dst, dlen);
1766 }
1767 
1768 static inline int crypto_comp_decompress(struct crypto_comp *tfm,
1769                                          const u8 *src, unsigned int slen,
1770                                          u8 *dst, unsigned int *dlen)
1771 {
1772 	return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
1773 						    src, slen, dst, dlen);
1774 }
1775 
1776 #endif	/* _LINUX_CRYPTO_H */
1777 
1778