xref: /linux-6.15/crypto/cryptd.c (revision 84ede58d)
1 /*
2  * Software async crypto daemon.
3  *
4  * Copyright (c) 2006 Herbert Xu <[email protected]>
5  *
6  * Added AEAD support to cryptd.
7  *    Authors: Tadeusz Struk ([email protected])
8  *             Adrian Hoban <[email protected]>
9  *             Gabriele Paoloni <[email protected]>
10  *             Aidan O'Mahony ([email protected])
11  *    Copyright (c) 2010, Intel Corporation.
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 2 of the License, or (at your option)
16  * any later version.
17  *
18  */
19 
20 #include <crypto/internal/hash.h>
21 #include <crypto/internal/aead.h>
22 #include <crypto/internal/skcipher.h>
23 #include <crypto/cryptd.h>
24 #include <linux/atomic.h>
25 #include <linux/err.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/scatterlist.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/workqueue.h>
34 
35 static unsigned int cryptd_max_cpu_qlen = 1000;
36 module_param(cryptd_max_cpu_qlen, uint, 0);
37 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
38 
39 static struct workqueue_struct *cryptd_wq;
40 
41 struct cryptd_cpu_queue {
42 	struct crypto_queue queue;
43 	struct work_struct work;
44 };
45 
46 struct cryptd_queue {
47 	struct cryptd_cpu_queue __percpu *cpu_queue;
48 };
49 
50 struct cryptd_instance_ctx {
51 	struct crypto_spawn spawn;
52 	struct cryptd_queue *queue;
53 };
54 
55 struct skcipherd_instance_ctx {
56 	struct crypto_skcipher_spawn spawn;
57 	struct cryptd_queue *queue;
58 };
59 
60 struct hashd_instance_ctx {
61 	struct crypto_shash_spawn spawn;
62 	struct cryptd_queue *queue;
63 };
64 
65 struct aead_instance_ctx {
66 	struct crypto_aead_spawn aead_spawn;
67 	struct cryptd_queue *queue;
68 };
69 
70 struct cryptd_skcipher_ctx {
71 	atomic_t refcnt;
72 	struct crypto_sync_skcipher *child;
73 };
74 
75 struct cryptd_skcipher_request_ctx {
76 	crypto_completion_t complete;
77 };
78 
79 struct cryptd_hash_ctx {
80 	atomic_t refcnt;
81 	struct crypto_shash *child;
82 };
83 
84 struct cryptd_hash_request_ctx {
85 	crypto_completion_t complete;
86 	struct shash_desc desc;
87 };
88 
89 struct cryptd_aead_ctx {
90 	atomic_t refcnt;
91 	struct crypto_aead *child;
92 };
93 
94 struct cryptd_aead_request_ctx {
95 	crypto_completion_t complete;
96 };
97 
98 static void cryptd_queue_worker(struct work_struct *work);
99 
100 static int cryptd_init_queue(struct cryptd_queue *queue,
101 			     unsigned int max_cpu_qlen)
102 {
103 	int cpu;
104 	struct cryptd_cpu_queue *cpu_queue;
105 
106 	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
107 	if (!queue->cpu_queue)
108 		return -ENOMEM;
109 	for_each_possible_cpu(cpu) {
110 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111 		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
112 		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
113 	}
114 	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
115 	return 0;
116 }
117 
118 static void cryptd_fini_queue(struct cryptd_queue *queue)
119 {
120 	int cpu;
121 	struct cryptd_cpu_queue *cpu_queue;
122 
123 	for_each_possible_cpu(cpu) {
124 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
125 		BUG_ON(cpu_queue->queue.qlen);
126 	}
127 	free_percpu(queue->cpu_queue);
128 }
129 
130 static int cryptd_enqueue_request(struct cryptd_queue *queue,
131 				  struct crypto_async_request *request)
132 {
133 	int cpu, err;
134 	struct cryptd_cpu_queue *cpu_queue;
135 	atomic_t *refcnt;
136 
137 	cpu = get_cpu();
138 	cpu_queue = this_cpu_ptr(queue->cpu_queue);
139 	err = crypto_enqueue_request(&cpu_queue->queue, request);
140 
141 	refcnt = crypto_tfm_ctx(request->tfm);
142 
143 	if (err == -ENOSPC)
144 		goto out_put_cpu;
145 
146 	queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
147 
148 	if (!atomic_read(refcnt))
149 		goto out_put_cpu;
150 
151 	atomic_inc(refcnt);
152 
153 out_put_cpu:
154 	put_cpu();
155 
156 	return err;
157 }
158 
159 /* Called in workqueue context, do one real cryption work (via
160  * req->complete) and reschedule itself if there are more work to
161  * do. */
162 static void cryptd_queue_worker(struct work_struct *work)
163 {
164 	struct cryptd_cpu_queue *cpu_queue;
165 	struct crypto_async_request *req, *backlog;
166 
167 	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
168 	/*
169 	 * Only handle one request at a time to avoid hogging crypto workqueue.
170 	 * preempt_disable/enable is used to prevent being preempted by
171 	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
172 	 * cryptd_enqueue_request() being accessed from software interrupts.
173 	 */
174 	local_bh_disable();
175 	preempt_disable();
176 	backlog = crypto_get_backlog(&cpu_queue->queue);
177 	req = crypto_dequeue_request(&cpu_queue->queue);
178 	preempt_enable();
179 	local_bh_enable();
180 
181 	if (!req)
182 		return;
183 
184 	if (backlog)
185 		backlog->complete(backlog, -EINPROGRESS);
186 	req->complete(req, 0);
187 
188 	if (cpu_queue->queue.qlen)
189 		queue_work(cryptd_wq, &cpu_queue->work);
190 }
191 
192 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
193 {
194 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
195 	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
196 	return ictx->queue;
197 }
198 
199 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
200 					 u32 *mask)
201 {
202 	struct crypto_attr_type *algt;
203 
204 	algt = crypto_get_attr_type(tb);
205 	if (IS_ERR(algt))
206 		return;
207 
208 	*type |= algt->type & CRYPTO_ALG_INTERNAL;
209 	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
210 }
211 
212 static int cryptd_init_instance(struct crypto_instance *inst,
213 				struct crypto_alg *alg)
214 {
215 	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
216 		     "cryptd(%s)",
217 		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
218 		return -ENAMETOOLONG;
219 
220 	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
221 
222 	inst->alg.cra_priority = alg->cra_priority + 50;
223 	inst->alg.cra_blocksize = alg->cra_blocksize;
224 	inst->alg.cra_alignmask = alg->cra_alignmask;
225 
226 	return 0;
227 }
228 
229 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
230 				   unsigned int tail)
231 {
232 	char *p;
233 	struct crypto_instance *inst;
234 	int err;
235 
236 	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
237 	if (!p)
238 		return ERR_PTR(-ENOMEM);
239 
240 	inst = (void *)(p + head);
241 
242 	err = cryptd_init_instance(inst, alg);
243 	if (err)
244 		goto out_free_inst;
245 
246 out:
247 	return p;
248 
249 out_free_inst:
250 	kfree(p);
251 	p = ERR_PTR(err);
252 	goto out;
253 }
254 
255 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
256 				  const u8 *key, unsigned int keylen)
257 {
258 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
259 	struct crypto_sync_skcipher *child = ctx->child;
260 	int err;
261 
262 	crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
263 	crypto_sync_skcipher_set_flags(child,
264 				       crypto_skcipher_get_flags(parent) &
265 					 CRYPTO_TFM_REQ_MASK);
266 	err = crypto_sync_skcipher_setkey(child, key, keylen);
267 	crypto_skcipher_set_flags(parent,
268 				  crypto_sync_skcipher_get_flags(child) &
269 					  CRYPTO_TFM_RES_MASK);
270 	return err;
271 }
272 
273 static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
274 {
275 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
276 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
277 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
278 	int refcnt = atomic_read(&ctx->refcnt);
279 
280 	local_bh_disable();
281 	rctx->complete(&req->base, err);
282 	local_bh_enable();
283 
284 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
285 		crypto_free_skcipher(tfm);
286 }
287 
288 static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
289 				    int err)
290 {
291 	struct skcipher_request *req = skcipher_request_cast(base);
292 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
293 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
294 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
295 	struct crypto_sync_skcipher *child = ctx->child;
296 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
297 
298 	if (unlikely(err == -EINPROGRESS))
299 		goto out;
300 
301 	skcipher_request_set_sync_tfm(subreq, child);
302 	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
303 				      NULL, NULL);
304 	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
305 				   req->iv);
306 
307 	err = crypto_skcipher_encrypt(subreq);
308 	skcipher_request_zero(subreq);
309 
310 	req->base.complete = rctx->complete;
311 
312 out:
313 	cryptd_skcipher_complete(req, err);
314 }
315 
316 static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
317 				    int err)
318 {
319 	struct skcipher_request *req = skcipher_request_cast(base);
320 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
321 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
322 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
323 	struct crypto_sync_skcipher *child = ctx->child;
324 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
325 
326 	if (unlikely(err == -EINPROGRESS))
327 		goto out;
328 
329 	skcipher_request_set_sync_tfm(subreq, child);
330 	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
331 				      NULL, NULL);
332 	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
333 				   req->iv);
334 
335 	err = crypto_skcipher_decrypt(subreq);
336 	skcipher_request_zero(subreq);
337 
338 	req->base.complete = rctx->complete;
339 
340 out:
341 	cryptd_skcipher_complete(req, err);
342 }
343 
344 static int cryptd_skcipher_enqueue(struct skcipher_request *req,
345 				   crypto_completion_t compl)
346 {
347 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
348 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
349 	struct cryptd_queue *queue;
350 
351 	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
352 	rctx->complete = req->base.complete;
353 	req->base.complete = compl;
354 
355 	return cryptd_enqueue_request(queue, &req->base);
356 }
357 
358 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
359 {
360 	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
361 }
362 
363 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
364 {
365 	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
366 }
367 
368 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
369 {
370 	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
371 	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
372 	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
373 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
374 	struct crypto_skcipher *cipher;
375 
376 	cipher = crypto_spawn_skcipher(spawn);
377 	if (IS_ERR(cipher))
378 		return PTR_ERR(cipher);
379 
380 	ctx->child = (struct crypto_sync_skcipher *)cipher;
381 	crypto_skcipher_set_reqsize(
382 		tfm, sizeof(struct cryptd_skcipher_request_ctx));
383 	return 0;
384 }
385 
386 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
387 {
388 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
389 
390 	crypto_free_sync_skcipher(ctx->child);
391 }
392 
393 static void cryptd_skcipher_free(struct skcipher_instance *inst)
394 {
395 	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
396 
397 	crypto_drop_skcipher(&ctx->spawn);
398 }
399 
400 static int cryptd_create_skcipher(struct crypto_template *tmpl,
401 				  struct rtattr **tb,
402 				  struct cryptd_queue *queue)
403 {
404 	struct skcipherd_instance_ctx *ctx;
405 	struct skcipher_instance *inst;
406 	struct skcipher_alg *alg;
407 	const char *name;
408 	u32 type;
409 	u32 mask;
410 	int err;
411 
412 	type = 0;
413 	mask = CRYPTO_ALG_ASYNC;
414 
415 	cryptd_check_internal(tb, &type, &mask);
416 
417 	name = crypto_attr_alg_name(tb[1]);
418 	if (IS_ERR(name))
419 		return PTR_ERR(name);
420 
421 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
422 	if (!inst)
423 		return -ENOMEM;
424 
425 	ctx = skcipher_instance_ctx(inst);
426 	ctx->queue = queue;
427 
428 	crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
429 	err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
430 	if (err)
431 		goto out_free_inst;
432 
433 	alg = crypto_spawn_skcipher_alg(&ctx->spawn);
434 	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
435 	if (err)
436 		goto out_drop_skcipher;
437 
438 	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
439 				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
440 
441 	inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
442 	inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
443 	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
444 	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
445 
446 	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
447 
448 	inst->alg.init = cryptd_skcipher_init_tfm;
449 	inst->alg.exit = cryptd_skcipher_exit_tfm;
450 
451 	inst->alg.setkey = cryptd_skcipher_setkey;
452 	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
453 	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
454 
455 	inst->free = cryptd_skcipher_free;
456 
457 	err = skcipher_register_instance(tmpl, inst);
458 	if (err) {
459 out_drop_skcipher:
460 		crypto_drop_skcipher(&ctx->spawn);
461 out_free_inst:
462 		kfree(inst);
463 	}
464 	return err;
465 }
466 
467 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
468 {
469 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
470 	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
471 	struct crypto_shash_spawn *spawn = &ictx->spawn;
472 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
473 	struct crypto_shash *hash;
474 
475 	hash = crypto_spawn_shash(spawn);
476 	if (IS_ERR(hash))
477 		return PTR_ERR(hash);
478 
479 	ctx->child = hash;
480 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
481 				 sizeof(struct cryptd_hash_request_ctx) +
482 				 crypto_shash_descsize(hash));
483 	return 0;
484 }
485 
486 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
487 {
488 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
489 
490 	crypto_free_shash(ctx->child);
491 }
492 
493 static int cryptd_hash_setkey(struct crypto_ahash *parent,
494 				   const u8 *key, unsigned int keylen)
495 {
496 	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
497 	struct crypto_shash *child = ctx->child;
498 	int err;
499 
500 	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
501 	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
502 				      CRYPTO_TFM_REQ_MASK);
503 	err = crypto_shash_setkey(child, key, keylen);
504 	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
505 				       CRYPTO_TFM_RES_MASK);
506 	return err;
507 }
508 
509 static int cryptd_hash_enqueue(struct ahash_request *req,
510 				crypto_completion_t compl)
511 {
512 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
513 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
514 	struct cryptd_queue *queue =
515 		cryptd_get_queue(crypto_ahash_tfm(tfm));
516 
517 	rctx->complete = req->base.complete;
518 	req->base.complete = compl;
519 
520 	return cryptd_enqueue_request(queue, &req->base);
521 }
522 
523 static void cryptd_hash_complete(struct ahash_request *req, int err)
524 {
525 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
526 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
527 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
528 	int refcnt = atomic_read(&ctx->refcnt);
529 
530 	local_bh_disable();
531 	rctx->complete(&req->base, err);
532 	local_bh_enable();
533 
534 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
535 		crypto_free_ahash(tfm);
536 }
537 
538 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
539 {
540 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
541 	struct crypto_shash *child = ctx->child;
542 	struct ahash_request *req = ahash_request_cast(req_async);
543 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
544 	struct shash_desc *desc = &rctx->desc;
545 
546 	if (unlikely(err == -EINPROGRESS))
547 		goto out;
548 
549 	desc->tfm = child;
550 
551 	err = crypto_shash_init(desc);
552 
553 	req->base.complete = rctx->complete;
554 
555 out:
556 	cryptd_hash_complete(req, err);
557 }
558 
559 static int cryptd_hash_init_enqueue(struct ahash_request *req)
560 {
561 	return cryptd_hash_enqueue(req, cryptd_hash_init);
562 }
563 
564 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
565 {
566 	struct ahash_request *req = ahash_request_cast(req_async);
567 	struct cryptd_hash_request_ctx *rctx;
568 
569 	rctx = ahash_request_ctx(req);
570 
571 	if (unlikely(err == -EINPROGRESS))
572 		goto out;
573 
574 	err = shash_ahash_update(req, &rctx->desc);
575 
576 	req->base.complete = rctx->complete;
577 
578 out:
579 	cryptd_hash_complete(req, err);
580 }
581 
582 static int cryptd_hash_update_enqueue(struct ahash_request *req)
583 {
584 	return cryptd_hash_enqueue(req, cryptd_hash_update);
585 }
586 
587 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
588 {
589 	struct ahash_request *req = ahash_request_cast(req_async);
590 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
591 
592 	if (unlikely(err == -EINPROGRESS))
593 		goto out;
594 
595 	err = crypto_shash_final(&rctx->desc, req->result);
596 
597 	req->base.complete = rctx->complete;
598 
599 out:
600 	cryptd_hash_complete(req, err);
601 }
602 
603 static int cryptd_hash_final_enqueue(struct ahash_request *req)
604 {
605 	return cryptd_hash_enqueue(req, cryptd_hash_final);
606 }
607 
608 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
609 {
610 	struct ahash_request *req = ahash_request_cast(req_async);
611 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
612 
613 	if (unlikely(err == -EINPROGRESS))
614 		goto out;
615 
616 	err = shash_ahash_finup(req, &rctx->desc);
617 
618 	req->base.complete = rctx->complete;
619 
620 out:
621 	cryptd_hash_complete(req, err);
622 }
623 
624 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
625 {
626 	return cryptd_hash_enqueue(req, cryptd_hash_finup);
627 }
628 
629 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
630 {
631 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
632 	struct crypto_shash *child = ctx->child;
633 	struct ahash_request *req = ahash_request_cast(req_async);
634 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
635 	struct shash_desc *desc = &rctx->desc;
636 
637 	if (unlikely(err == -EINPROGRESS))
638 		goto out;
639 
640 	desc->tfm = child;
641 
642 	err = shash_ahash_digest(req, desc);
643 
644 	req->base.complete = rctx->complete;
645 
646 out:
647 	cryptd_hash_complete(req, err);
648 }
649 
650 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
651 {
652 	return cryptd_hash_enqueue(req, cryptd_hash_digest);
653 }
654 
655 static int cryptd_hash_export(struct ahash_request *req, void *out)
656 {
657 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
658 
659 	return crypto_shash_export(&rctx->desc, out);
660 }
661 
662 static int cryptd_hash_import(struct ahash_request *req, const void *in)
663 {
664 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
665 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
666 	struct shash_desc *desc = cryptd_shash_desc(req);
667 
668 	desc->tfm = ctx->child;
669 
670 	return crypto_shash_import(desc, in);
671 }
672 
673 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
674 			      struct cryptd_queue *queue)
675 {
676 	struct hashd_instance_ctx *ctx;
677 	struct ahash_instance *inst;
678 	struct shash_alg *salg;
679 	struct crypto_alg *alg;
680 	u32 type = 0;
681 	u32 mask = 0;
682 	int err;
683 
684 	cryptd_check_internal(tb, &type, &mask);
685 
686 	salg = shash_attr_alg(tb[1], type, mask);
687 	if (IS_ERR(salg))
688 		return PTR_ERR(salg);
689 
690 	alg = &salg->base;
691 	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
692 				     sizeof(*ctx));
693 	err = PTR_ERR(inst);
694 	if (IS_ERR(inst))
695 		goto out_put_alg;
696 
697 	ctx = ahash_instance_ctx(inst);
698 	ctx->queue = queue;
699 
700 	err = crypto_init_shash_spawn(&ctx->spawn, salg,
701 				      ahash_crypto_instance(inst));
702 	if (err)
703 		goto out_free_inst;
704 
705 	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
706 		(alg->cra_flags & (CRYPTO_ALG_INTERNAL |
707 				   CRYPTO_ALG_OPTIONAL_KEY));
708 
709 	inst->alg.halg.digestsize = salg->digestsize;
710 	inst->alg.halg.statesize = salg->statesize;
711 	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
712 
713 	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
714 	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
715 
716 	inst->alg.init   = cryptd_hash_init_enqueue;
717 	inst->alg.update = cryptd_hash_update_enqueue;
718 	inst->alg.final  = cryptd_hash_final_enqueue;
719 	inst->alg.finup  = cryptd_hash_finup_enqueue;
720 	inst->alg.export = cryptd_hash_export;
721 	inst->alg.import = cryptd_hash_import;
722 	if (crypto_shash_alg_has_setkey(salg))
723 		inst->alg.setkey = cryptd_hash_setkey;
724 	inst->alg.digest = cryptd_hash_digest_enqueue;
725 
726 	err = ahash_register_instance(tmpl, inst);
727 	if (err) {
728 		crypto_drop_shash(&ctx->spawn);
729 out_free_inst:
730 		kfree(inst);
731 	}
732 
733 out_put_alg:
734 	crypto_mod_put(alg);
735 	return err;
736 }
737 
738 static int cryptd_aead_setkey(struct crypto_aead *parent,
739 			      const u8 *key, unsigned int keylen)
740 {
741 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
742 	struct crypto_aead *child = ctx->child;
743 
744 	return crypto_aead_setkey(child, key, keylen);
745 }
746 
747 static int cryptd_aead_setauthsize(struct crypto_aead *parent,
748 				   unsigned int authsize)
749 {
750 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
751 	struct crypto_aead *child = ctx->child;
752 
753 	return crypto_aead_setauthsize(child, authsize);
754 }
755 
756 static void cryptd_aead_crypt(struct aead_request *req,
757 			struct crypto_aead *child,
758 			int err,
759 			int (*crypt)(struct aead_request *req))
760 {
761 	struct cryptd_aead_request_ctx *rctx;
762 	struct cryptd_aead_ctx *ctx;
763 	crypto_completion_t compl;
764 	struct crypto_aead *tfm;
765 	int refcnt;
766 
767 	rctx = aead_request_ctx(req);
768 	compl = rctx->complete;
769 
770 	tfm = crypto_aead_reqtfm(req);
771 
772 	if (unlikely(err == -EINPROGRESS))
773 		goto out;
774 	aead_request_set_tfm(req, child);
775 	err = crypt( req );
776 
777 out:
778 	ctx = crypto_aead_ctx(tfm);
779 	refcnt = atomic_read(&ctx->refcnt);
780 
781 	local_bh_disable();
782 	compl(&req->base, err);
783 	local_bh_enable();
784 
785 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
786 		crypto_free_aead(tfm);
787 }
788 
789 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
790 {
791 	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
792 	struct crypto_aead *child = ctx->child;
793 	struct aead_request *req;
794 
795 	req = container_of(areq, struct aead_request, base);
796 	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
797 }
798 
799 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
800 {
801 	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
802 	struct crypto_aead *child = ctx->child;
803 	struct aead_request *req;
804 
805 	req = container_of(areq, struct aead_request, base);
806 	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
807 }
808 
809 static int cryptd_aead_enqueue(struct aead_request *req,
810 				    crypto_completion_t compl)
811 {
812 	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
813 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
814 	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
815 
816 	rctx->complete = req->base.complete;
817 	req->base.complete = compl;
818 	return cryptd_enqueue_request(queue, &req->base);
819 }
820 
821 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
822 {
823 	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
824 }
825 
826 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
827 {
828 	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
829 }
830 
831 static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
832 {
833 	struct aead_instance *inst = aead_alg_instance(tfm);
834 	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
835 	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
836 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
837 	struct crypto_aead *cipher;
838 
839 	cipher = crypto_spawn_aead(spawn);
840 	if (IS_ERR(cipher))
841 		return PTR_ERR(cipher);
842 
843 	ctx->child = cipher;
844 	crypto_aead_set_reqsize(
845 		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
846 			 crypto_aead_reqsize(cipher)));
847 	return 0;
848 }
849 
850 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
851 {
852 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
853 	crypto_free_aead(ctx->child);
854 }
855 
856 static int cryptd_create_aead(struct crypto_template *tmpl,
857 		              struct rtattr **tb,
858 			      struct cryptd_queue *queue)
859 {
860 	struct aead_instance_ctx *ctx;
861 	struct aead_instance *inst;
862 	struct aead_alg *alg;
863 	const char *name;
864 	u32 type = 0;
865 	u32 mask = CRYPTO_ALG_ASYNC;
866 	int err;
867 
868 	cryptd_check_internal(tb, &type, &mask);
869 
870 	name = crypto_attr_alg_name(tb[1]);
871 	if (IS_ERR(name))
872 		return PTR_ERR(name);
873 
874 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
875 	if (!inst)
876 		return -ENOMEM;
877 
878 	ctx = aead_instance_ctx(inst);
879 	ctx->queue = queue;
880 
881 	crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
882 	err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
883 	if (err)
884 		goto out_free_inst;
885 
886 	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
887 	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
888 	if (err)
889 		goto out_drop_aead;
890 
891 	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
892 				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
893 	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
894 
895 	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
896 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
897 
898 	inst->alg.init = cryptd_aead_init_tfm;
899 	inst->alg.exit = cryptd_aead_exit_tfm;
900 	inst->alg.setkey = cryptd_aead_setkey;
901 	inst->alg.setauthsize = cryptd_aead_setauthsize;
902 	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
903 	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
904 
905 	err = aead_register_instance(tmpl, inst);
906 	if (err) {
907 out_drop_aead:
908 		crypto_drop_aead(&ctx->aead_spawn);
909 out_free_inst:
910 		kfree(inst);
911 	}
912 	return err;
913 }
914 
915 static struct cryptd_queue queue;
916 
917 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
918 {
919 	struct crypto_attr_type *algt;
920 
921 	algt = crypto_get_attr_type(tb);
922 	if (IS_ERR(algt))
923 		return PTR_ERR(algt);
924 
925 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
926 	case CRYPTO_ALG_TYPE_BLKCIPHER:
927 		return cryptd_create_skcipher(tmpl, tb, &queue);
928 	case CRYPTO_ALG_TYPE_HASH:
929 		return cryptd_create_hash(tmpl, tb, &queue);
930 	case CRYPTO_ALG_TYPE_AEAD:
931 		return cryptd_create_aead(tmpl, tb, &queue);
932 	}
933 
934 	return -EINVAL;
935 }
936 
937 static void cryptd_free(struct crypto_instance *inst)
938 {
939 	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
940 	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
941 	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
942 
943 	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
944 	case CRYPTO_ALG_TYPE_AHASH:
945 		crypto_drop_shash(&hctx->spawn);
946 		kfree(ahash_instance(inst));
947 		return;
948 	case CRYPTO_ALG_TYPE_AEAD:
949 		crypto_drop_aead(&aead_ctx->aead_spawn);
950 		kfree(aead_instance(inst));
951 		return;
952 	default:
953 		crypto_drop_spawn(&ctx->spawn);
954 		kfree(inst);
955 	}
956 }
957 
958 static struct crypto_template cryptd_tmpl = {
959 	.name = "cryptd",
960 	.create = cryptd_create,
961 	.free = cryptd_free,
962 	.module = THIS_MODULE,
963 };
964 
965 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
966 					      u32 type, u32 mask)
967 {
968 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
969 	struct cryptd_skcipher_ctx *ctx;
970 	struct crypto_skcipher *tfm;
971 
972 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
973 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
974 		return ERR_PTR(-EINVAL);
975 
976 	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
977 	if (IS_ERR(tfm))
978 		return ERR_CAST(tfm);
979 
980 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
981 		crypto_free_skcipher(tfm);
982 		return ERR_PTR(-EINVAL);
983 	}
984 
985 	ctx = crypto_skcipher_ctx(tfm);
986 	atomic_set(&ctx->refcnt, 1);
987 
988 	return container_of(tfm, struct cryptd_skcipher, base);
989 }
990 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
991 
992 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
993 {
994 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
995 
996 	return &ctx->child->base;
997 }
998 EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
999 
1000 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
1001 {
1002 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1003 
1004 	return atomic_read(&ctx->refcnt) - 1;
1005 }
1006 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1007 
1008 void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1009 {
1010 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1011 
1012 	if (atomic_dec_and_test(&ctx->refcnt))
1013 		crypto_free_skcipher(&tfm->base);
1014 }
1015 EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1016 
1017 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1018 					u32 type, u32 mask)
1019 {
1020 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1021 	struct cryptd_hash_ctx *ctx;
1022 	struct crypto_ahash *tfm;
1023 
1024 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1025 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1026 		return ERR_PTR(-EINVAL);
1027 	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1028 	if (IS_ERR(tfm))
1029 		return ERR_CAST(tfm);
1030 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1031 		crypto_free_ahash(tfm);
1032 		return ERR_PTR(-EINVAL);
1033 	}
1034 
1035 	ctx = crypto_ahash_ctx(tfm);
1036 	atomic_set(&ctx->refcnt, 1);
1037 
1038 	return __cryptd_ahash_cast(tfm);
1039 }
1040 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1041 
1042 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1043 {
1044 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1045 
1046 	return ctx->child;
1047 }
1048 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1049 
1050 struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1051 {
1052 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1053 	return &rctx->desc;
1054 }
1055 EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1056 
1057 bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1058 {
1059 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1060 
1061 	return atomic_read(&ctx->refcnt) - 1;
1062 }
1063 EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1064 
1065 void cryptd_free_ahash(struct cryptd_ahash *tfm)
1066 {
1067 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1068 
1069 	if (atomic_dec_and_test(&ctx->refcnt))
1070 		crypto_free_ahash(&tfm->base);
1071 }
1072 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1073 
1074 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1075 						  u32 type, u32 mask)
1076 {
1077 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1078 	struct cryptd_aead_ctx *ctx;
1079 	struct crypto_aead *tfm;
1080 
1081 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1082 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1083 		return ERR_PTR(-EINVAL);
1084 	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1085 	if (IS_ERR(tfm))
1086 		return ERR_CAST(tfm);
1087 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1088 		crypto_free_aead(tfm);
1089 		return ERR_PTR(-EINVAL);
1090 	}
1091 
1092 	ctx = crypto_aead_ctx(tfm);
1093 	atomic_set(&ctx->refcnt, 1);
1094 
1095 	return __cryptd_aead_cast(tfm);
1096 }
1097 EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1098 
1099 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1100 {
1101 	struct cryptd_aead_ctx *ctx;
1102 	ctx = crypto_aead_ctx(&tfm->base);
1103 	return ctx->child;
1104 }
1105 EXPORT_SYMBOL_GPL(cryptd_aead_child);
1106 
1107 bool cryptd_aead_queued(struct cryptd_aead *tfm)
1108 {
1109 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1110 
1111 	return atomic_read(&ctx->refcnt) - 1;
1112 }
1113 EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1114 
1115 void cryptd_free_aead(struct cryptd_aead *tfm)
1116 {
1117 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1118 
1119 	if (atomic_dec_and_test(&ctx->refcnt))
1120 		crypto_free_aead(&tfm->base);
1121 }
1122 EXPORT_SYMBOL_GPL(cryptd_free_aead);
1123 
1124 static int __init cryptd_init(void)
1125 {
1126 	int err;
1127 
1128 	cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1129 				    1);
1130 	if (!cryptd_wq)
1131 		return -ENOMEM;
1132 
1133 	err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1134 	if (err)
1135 		goto err_destroy_wq;
1136 
1137 	err = crypto_register_template(&cryptd_tmpl);
1138 	if (err)
1139 		goto err_fini_queue;
1140 
1141 	return 0;
1142 
1143 err_fini_queue:
1144 	cryptd_fini_queue(&queue);
1145 err_destroy_wq:
1146 	destroy_workqueue(cryptd_wq);
1147 	return err;
1148 }
1149 
1150 static void __exit cryptd_exit(void)
1151 {
1152 	destroy_workqueue(cryptd_wq);
1153 	cryptd_fini_queue(&queue);
1154 	crypto_unregister_template(&cryptd_tmpl);
1155 }
1156 
1157 subsys_initcall(cryptd_init);
1158 module_exit(cryptd_exit);
1159 
1160 MODULE_LICENSE("GPL");
1161 MODULE_DESCRIPTION("Software async crypto daemon");
1162 MODULE_ALIAS_CRYPTO("cryptd");
1163