xref: /linux-6.15/include/crypto/algapi.h (revision ed0733ea)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Cryptographic API for algorithms (i.e., low-level API).
4  *
5  * Copyright (c) 2006 Herbert Xu <[email protected]>
6  */
7 #ifndef _CRYPTO_ALGAPI_H
8 #define _CRYPTO_ALGAPI_H
9 
10 #include <linux/align.h>
11 #include <linux/cache.h>
12 #include <linux/crypto.h>
13 #include <linux/kconfig.h>
14 #include <linux/list.h>
15 #include <linux/types.h>
16 
17 #include <asm/unaligned.h>
18 
19 /*
20  * Maximum values for blocksize and alignmask, used to allocate
21  * static buffers that are big enough for any combination of
22  * algs and architectures. Ciphers have a lower maximum size.
23  */
24 #define MAX_ALGAPI_BLOCKSIZE		160
25 #define MAX_ALGAPI_ALIGNMASK		127
26 #define MAX_CIPHER_BLOCKSIZE		16
27 #define MAX_CIPHER_ALIGNMASK		15
28 
29 #ifdef ARCH_DMA_MINALIGN
30 #define CRYPTO_DMA_ALIGN ARCH_DMA_MINALIGN
31 #else
32 #define CRYPTO_DMA_ALIGN CRYPTO_MINALIGN
33 #endif
34 
35 #define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
36 
37 struct crypto_aead;
38 struct crypto_instance;
39 struct module;
40 struct notifier_block;
41 struct rtattr;
42 struct seq_file;
43 struct sk_buff;
44 
45 struct crypto_type {
46 	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
47 	unsigned int (*extsize)(struct crypto_alg *alg);
48 	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
49 	int (*init_tfm)(struct crypto_tfm *tfm);
50 	void (*show)(struct seq_file *m, struct crypto_alg *alg);
51 	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
52 	void (*free)(struct crypto_instance *inst);
53 #ifdef CONFIG_CRYPTO_STATS
54 	int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg);
55 #endif
56 
57 	unsigned int type;
58 	unsigned int maskclear;
59 	unsigned int maskset;
60 	unsigned int tfmsize;
61 };
62 
63 struct crypto_instance {
64 	struct crypto_alg alg;
65 
66 	struct crypto_template *tmpl;
67 
68 	union {
69 		/* Node in list of instances after registration. */
70 		struct hlist_node list;
71 		/* List of attached spawns before registration. */
72 		struct crypto_spawn *spawns;
73 	};
74 
75 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
76 };
77 
78 struct crypto_template {
79 	struct list_head list;
80 	struct hlist_head instances;
81 	struct module *module;
82 
83 	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
84 
85 	char name[CRYPTO_MAX_ALG_NAME];
86 };
87 
88 struct crypto_spawn {
89 	struct list_head list;
90 	struct crypto_alg *alg;
91 	union {
92 		/* Back pointer to instance after registration.*/
93 		struct crypto_instance *inst;
94 		/* Spawn list pointer prior to registration. */
95 		struct crypto_spawn *next;
96 	};
97 	const struct crypto_type *frontend;
98 	u32 mask;
99 	bool dead;
100 	bool registered;
101 };
102 
103 struct crypto_queue {
104 	struct list_head list;
105 	struct list_head *backlog;
106 
107 	unsigned int qlen;
108 	unsigned int max_qlen;
109 };
110 
111 struct scatter_walk {
112 	struct scatterlist *sg;
113 	unsigned int offset;
114 };
115 
116 struct crypto_attr_alg {
117 	char name[CRYPTO_MAX_ALG_NAME];
118 };
119 
120 struct crypto_attr_type {
121 	u32 type;
122 	u32 mask;
123 };
124 
125 void crypto_mod_put(struct crypto_alg *alg);
126 
127 int crypto_register_template(struct crypto_template *tmpl);
128 int crypto_register_templates(struct crypto_template *tmpls, int count);
129 void crypto_unregister_template(struct crypto_template *tmpl);
130 void crypto_unregister_templates(struct crypto_template *tmpls, int count);
131 struct crypto_template *crypto_lookup_template(const char *name);
132 
133 int crypto_register_instance(struct crypto_template *tmpl,
134 			     struct crypto_instance *inst);
135 void crypto_unregister_instance(struct crypto_instance *inst);
136 
137 int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
138 		      const char *name, u32 type, u32 mask);
139 void crypto_drop_spawn(struct crypto_spawn *spawn);
140 struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
141 				    u32 mask);
142 void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
143 
144 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
145 int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
146 const char *crypto_attr_alg_name(struct rtattr *rta);
147 int crypto_inst_setname(struct crypto_instance *inst, const char *name,
148 			struct crypto_alg *alg);
149 
150 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
151 int crypto_enqueue_request(struct crypto_queue *queue,
152 			   struct crypto_async_request *request);
153 void crypto_enqueue_request_head(struct crypto_queue *queue,
154 				 struct crypto_async_request *request);
155 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
156 static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
157 {
158 	return queue->qlen;
159 }
160 
161 void crypto_inc(u8 *a, unsigned int size);
162 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
163 
164 static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
165 {
166 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
167 	    __builtin_constant_p(size) &&
168 	    (size % sizeof(unsigned long)) == 0) {
169 		unsigned long *d = (unsigned long *)dst;
170 		unsigned long *s = (unsigned long *)src;
171 		unsigned long l;
172 
173 		while (size > 0) {
174 			l = get_unaligned(d) ^ get_unaligned(s++);
175 			put_unaligned(l, d++);
176 			size -= sizeof(unsigned long);
177 		}
178 	} else {
179 		__crypto_xor(dst, dst, src, size);
180 	}
181 }
182 
183 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
184 				  unsigned int size)
185 {
186 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
187 	    __builtin_constant_p(size) &&
188 	    (size % sizeof(unsigned long)) == 0) {
189 		unsigned long *d = (unsigned long *)dst;
190 		unsigned long *s1 = (unsigned long *)src1;
191 		unsigned long *s2 = (unsigned long *)src2;
192 		unsigned long l;
193 
194 		while (size > 0) {
195 			l = get_unaligned(s1++) ^ get_unaligned(s2++);
196 			put_unaligned(l, d++);
197 			size -= sizeof(unsigned long);
198 		}
199 	} else {
200 		__crypto_xor(dst, src1, src2, size);
201 	}
202 }
203 
204 static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
205 {
206 	return tfm->__crt_ctx;
207 }
208 
209 static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm,
210 					 unsigned int align)
211 {
212 	if (align <= crypto_tfm_ctx_alignment())
213 		align = 1;
214 
215 	return PTR_ALIGN(crypto_tfm_ctx(tfm), align);
216 }
217 
218 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
219 {
220 	return crypto_tfm_ctx_align(tfm, crypto_tfm_alg_alignmask(tfm) + 1);
221 }
222 
223 static inline unsigned int crypto_dma_align(void)
224 {
225 	return CRYPTO_DMA_ALIGN;
226 }
227 
228 static inline unsigned int crypto_dma_padding(void)
229 {
230 	return (crypto_dma_align() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
231 }
232 
233 static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm)
234 {
235 	return crypto_tfm_ctx_align(tfm, crypto_dma_align());
236 }
237 
238 static inline struct crypto_instance *crypto_tfm_alg_instance(
239 	struct crypto_tfm *tfm)
240 {
241 	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
242 }
243 
244 static inline void *crypto_instance_ctx(struct crypto_instance *inst)
245 {
246 	return inst->__ctx;
247 }
248 
249 static inline struct crypto_async_request *crypto_get_backlog(
250 	struct crypto_queue *queue)
251 {
252 	return queue->backlog == &queue->list ? NULL :
253 	       container_of(queue->backlog, struct crypto_async_request, list);
254 }
255 
256 static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
257 {
258 	return (algt->type ^ off) & algt->mask & off;
259 }
260 
261 /*
262  * When an algorithm uses another algorithm (e.g., if it's an instance of a
263  * template), these are the flags that should always be set on the "outer"
264  * algorithm if any "inner" algorithm has them set.
265  */
266 #define CRYPTO_ALG_INHERITED_FLAGS	\
267 	(CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |	\
268 	 CRYPTO_ALG_ALLOCATES_MEMORY)
269 
270 /*
271  * Given the type and mask that specify the flags restrictions on a template
272  * instance being created, return the mask that should be passed to
273  * crypto_grab_*() (along with type=0) to honor any request the user made to
274  * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
275  */
276 static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
277 {
278 	return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
279 }
280 
281 noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
282 
283 /**
284  * crypto_memneq - Compare two areas of memory without leaking
285  *		   timing information.
286  *
287  * @a: One area of memory
288  * @b: Another area of memory
289  * @size: The size of the area.
290  *
291  * Returns 0 when data is equal, 1 otherwise.
292  */
293 static inline int crypto_memneq(const void *a, const void *b, size_t size)
294 {
295 	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
296 }
297 
298 int crypto_register_notifier(struct notifier_block *nb);
299 int crypto_unregister_notifier(struct notifier_block *nb);
300 
301 /* Crypto notification events. */
302 enum {
303 	CRYPTO_MSG_ALG_REQUEST,
304 	CRYPTO_MSG_ALG_REGISTER,
305 	CRYPTO_MSG_ALG_LOADED,
306 };
307 
308 static inline void crypto_request_complete(struct crypto_async_request *req,
309 					   int err)
310 {
311 	req->complete(req->data, err);
312 }
313 
314 #endif	/* _CRYPTO_ALGAPI_H */
315