1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
2
3 /*-
4 * The author of this code is Angelos D. Keromytis ([email protected])
5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6 *
7 * This code was written by Angelos D. Keromytis in Athens, Greece, in
8 * February 2000. Network Security Technologies Inc. (NSTI) kindly
9 * supported the development of this code.
10 *
11 * Copyright (c) 2000, 2001 Angelos D. Keromytis
12 * Copyright (c) 2014-2021 The FreeBSD Foundation
13 * All rights reserved.
14 *
15 * Portions of this software were developed by John-Mark Gurney
16 * under sponsorship of the FreeBSD Foundation and
17 * Rubicon Communications, LLC (Netgate).
18 *
19 * Portions of this software were developed by Ararat River
20 * Consulting, LLC under sponsorship of the FreeBSD Foundation.
21 *
22 * Permission to use, copy, and modify this software with or without fee
23 * is hereby granted, provided that this entire notice is included in
24 * all source code copies of any software which is or includes a copy or
25 * modification of this software.
26 *
27 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
28 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
29 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
30 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
31 * PURPOSE.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/module.h>
42 #include <sys/sysctl.h>
43 #include <sys/errno.h>
44 #include <sys/random.h>
45 #include <sys/kernel.h>
46 #include <sys/uio.h>
47 #include <sys/lock.h>
48 #include <sys/rwlock.h>
49 #include <sys/endian.h>
50 #include <sys/limits.h>
51 #include <sys/mutex.h>
52
53 #include <crypto/sha1.h>
54 #include <opencrypto/rmd160.h>
55
56 #include <opencrypto/cryptodev.h>
57 #include <opencrypto/xform.h>
58
59 #include <sys/kobj.h>
60 #include <sys/bus.h>
61 #include "cryptodev_if.h"
62
63 struct swcr_auth {
64 void *sw_ictx;
65 void *sw_octx;
66 struct auth_hash *sw_axf;
67 uint16_t sw_mlen;
68 };
69
70 struct swcr_encdec {
71 void *sw_kschedule;
72 struct enc_xform *sw_exf;
73 };
74
75 struct swcr_compdec {
76 struct comp_algo *sw_cxf;
77 };
78
79 struct swcr_session {
80 struct mtx swcr_lock;
81 int (*swcr_process)(struct swcr_session *, struct cryptop *);
82
83 struct swcr_auth swcr_auth;
84 struct swcr_encdec swcr_encdec;
85 struct swcr_compdec swcr_compdec;
86 };
87
88 static int32_t swcr_id;
89
90 static void swcr_freesession(device_t dev, crypto_session_t cses);
91
92 /* Used for CRYPTO_NULL_CBC. */
93 static int
swcr_null(struct swcr_session * ses,struct cryptop * crp)94 swcr_null(struct swcr_session *ses, struct cryptop *crp)
95 {
96
97 return (0);
98 }
99
100 /*
101 * Apply a symmetric encryption/decryption algorithm.
102 */
103 static int
swcr_encdec(struct swcr_session * ses,struct cryptop * crp)104 swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
105 {
106 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
107 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
108 const struct crypto_session_params *csp;
109 struct swcr_encdec *sw;
110 struct enc_xform *exf;
111 size_t inlen, outlen;
112 int i, blks, resid;
113 struct crypto_buffer_cursor cc_in, cc_out;
114 const unsigned char *inblk;
115 unsigned char *outblk;
116 int error;
117 bool encrypting;
118
119 error = 0;
120
121 sw = &ses->swcr_encdec;
122 exf = sw->sw_exf;
123 csp = crypto_get_params(crp->crp_session);
124
125 if (exf->native_blocksize == 0) {
126 /* Check for non-padded data */
127 if ((crp->crp_payload_length % exf->blocksize) != 0)
128 return (EINVAL);
129
130 blks = exf->blocksize;
131 } else
132 blks = exf->native_blocksize;
133
134 if (exf == &enc_xform_aes_icm &&
135 (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
136 return (EINVAL);
137
138 if (crp->crp_cipher_key != NULL) {
139 error = exf->setkey(sw->sw_kschedule,
140 crp->crp_cipher_key, csp->csp_cipher_klen);
141 if (error)
142 return (error);
143 }
144
145 crypto_read_iv(crp, iv);
146
147 if (exf->reinit) {
148 /*
149 * xforms that provide a reinit method perform all IV
150 * handling themselves.
151 */
152 exf->reinit(sw->sw_kschedule, iv, csp->csp_ivlen);
153 }
154
155 ivp = iv;
156
157 crypto_cursor_init(&cc_in, &crp->crp_buf);
158 crypto_cursor_advance(&cc_in, crp->crp_payload_start);
159 inblk = crypto_cursor_segment(&cc_in, &inlen);
160 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
161 crypto_cursor_init(&cc_out, &crp->crp_obuf);
162 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
163 } else
164 cc_out = cc_in;
165 outblk = crypto_cursor_segment(&cc_out, &outlen);
166
167 resid = crp->crp_payload_length;
168 encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
169
170 /*
171 * Loop through encrypting blocks. 'inlen' is the remaining
172 * length of the current segment in the input buffer.
173 * 'outlen' is the remaining length of current segment in the
174 * output buffer.
175 */
176 while (resid >= blks) {
177 /*
178 * If the current block is not contained within the
179 * current input/output segment, use 'blk' as a local
180 * buffer.
181 */
182 if (inlen < blks) {
183 crypto_cursor_copydata(&cc_in, blks, blk);
184 inblk = blk;
185 }
186 if (outlen < blks)
187 outblk = blk;
188
189 /*
190 * Ciphers without a 'reinit' hook are assumed to be
191 * used in CBC mode where the chaining is done here.
192 */
193 if (exf->reinit != NULL) {
194 if (encrypting)
195 exf->encrypt(sw->sw_kschedule, inblk, outblk);
196 else
197 exf->decrypt(sw->sw_kschedule, inblk, outblk);
198 } else if (encrypting) {
199 /* XOR with previous block */
200 for (i = 0; i < blks; i++)
201 outblk[i] = inblk[i] ^ ivp[i];
202
203 exf->encrypt(sw->sw_kschedule, outblk, outblk);
204
205 /*
206 * Keep encrypted block for XOR'ing
207 * with next block
208 */
209 memcpy(iv, outblk, blks);
210 ivp = iv;
211 } else { /* decrypt */
212 /*
213 * Keep encrypted block for XOR'ing
214 * with next block
215 */
216 nivp = (ivp == iv) ? iv2 : iv;
217 memcpy(nivp, inblk, blks);
218
219 exf->decrypt(sw->sw_kschedule, inblk, outblk);
220
221 /* XOR with previous block */
222 for (i = 0; i < blks; i++)
223 outblk[i] ^= ivp[i];
224
225 ivp = nivp;
226 }
227
228 if (inlen < blks) {
229 inblk = crypto_cursor_segment(&cc_in, &inlen);
230 } else {
231 crypto_cursor_advance(&cc_in, blks);
232 inlen -= blks;
233 inblk += blks;
234 }
235
236 if (outlen < blks) {
237 crypto_cursor_copyback(&cc_out, blks, blk);
238 outblk = crypto_cursor_segment(&cc_out, &outlen);
239 } else {
240 crypto_cursor_advance(&cc_out, blks);
241 outlen -= blks;
242 outblk += blks;
243 }
244
245 resid -= blks;
246 }
247
248 /* Handle trailing partial block for stream ciphers. */
249 if (resid > 0) {
250 KASSERT(exf->native_blocksize != 0,
251 ("%s: partial block of %d bytes for cipher %s",
252 __func__, i, exf->name));
253 KASSERT(exf->reinit != NULL,
254 ("%s: partial block cipher %s without reinit hook",
255 __func__, exf->name));
256 KASSERT(resid < blks, ("%s: partial block too big", __func__));
257
258 inblk = crypto_cursor_segment(&cc_in, &inlen);
259 outblk = crypto_cursor_segment(&cc_out, &outlen);
260 if (inlen < resid) {
261 crypto_cursor_copydata(&cc_in, resid, blk);
262 inblk = blk;
263 }
264 if (outlen < resid)
265 outblk = blk;
266 if (encrypting)
267 exf->encrypt_last(sw->sw_kschedule, inblk, outblk,
268 resid);
269 else
270 exf->decrypt_last(sw->sw_kschedule, inblk, outblk,
271 resid);
272 if (outlen < resid)
273 crypto_cursor_copyback(&cc_out, resid, blk);
274 }
275
276 explicit_bzero(blk, sizeof(blk));
277 explicit_bzero(iv, sizeof(iv));
278 explicit_bzero(iv2, sizeof(iv2));
279 return (0);
280 }
281
282 static void
swcr_authprepare(struct auth_hash * axf,struct swcr_auth * sw,const uint8_t * key,int klen)283 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
284 const uint8_t *key, int klen)
285 {
286
287 switch (axf->type) {
288 case CRYPTO_SHA1_HMAC:
289 case CRYPTO_SHA2_224_HMAC:
290 case CRYPTO_SHA2_256_HMAC:
291 case CRYPTO_SHA2_384_HMAC:
292 case CRYPTO_SHA2_512_HMAC:
293 case CRYPTO_NULL_HMAC:
294 case CRYPTO_RIPEMD160_HMAC:
295 hmac_init_ipad(axf, key, klen, sw->sw_ictx);
296 hmac_init_opad(axf, key, klen, sw->sw_octx);
297 break;
298 case CRYPTO_POLY1305:
299 case CRYPTO_BLAKE2B:
300 case CRYPTO_BLAKE2S:
301 axf->Setkey(sw->sw_ictx, key, klen);
302 axf->Init(sw->sw_ictx);
303 break;
304 default:
305 panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
306 }
307 }
308
309 /*
310 * Compute or verify hash.
311 */
312 static int
swcr_authcompute(struct swcr_session * ses,struct cryptop * crp)313 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
314 {
315 u_char aalg[HASH_MAX_LEN];
316 const struct crypto_session_params *csp;
317 struct swcr_auth *sw;
318 struct auth_hash *axf;
319 union authctx ctx;
320 int err;
321
322 sw = &ses->swcr_auth;
323
324 axf = sw->sw_axf;
325
326 csp = crypto_get_params(crp->crp_session);
327 if (crp->crp_auth_key != NULL) {
328 swcr_authprepare(axf, sw, crp->crp_auth_key,
329 csp->csp_auth_klen);
330 }
331
332 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
333
334 if (crp->crp_aad != NULL)
335 err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
336 else
337 err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
338 axf->Update, &ctx);
339 if (err)
340 goto out;
341
342 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
343 CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
344 err = crypto_apply_buf(&crp->crp_obuf,
345 crp->crp_payload_output_start, crp->crp_payload_length,
346 axf->Update, &ctx);
347 else
348 err = crypto_apply(crp, crp->crp_payload_start,
349 crp->crp_payload_length, axf->Update, &ctx);
350 if (err)
351 goto out;
352
353 if (csp->csp_flags & CSP_F_ESN)
354 axf->Update(&ctx, crp->crp_esn, 4);
355
356 axf->Final(aalg, &ctx);
357 if (sw->sw_octx != NULL) {
358 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
359 axf->Update(&ctx, aalg, axf->hashsize);
360 axf->Final(aalg, &ctx);
361 }
362
363 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
364 u_char uaalg[HASH_MAX_LEN];
365
366 crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
367 if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
368 err = EBADMSG;
369 explicit_bzero(uaalg, sizeof(uaalg));
370 } else {
371 /* Inject the authentication data */
372 crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
373 }
374 explicit_bzero(aalg, sizeof(aalg));
375 out:
376 explicit_bzero(&ctx, sizeof(ctx));
377 return (err);
378 }
379
380 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */
381 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */
382
383 static int
swcr_gmac(struct swcr_session * ses,struct cryptop * crp)384 swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
385 {
386 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
387 u_char *blk = (u_char *)blkbuf;
388 u_char tag[GMAC_DIGEST_LEN];
389 u_char iv[AES_BLOCK_LEN];
390 struct crypto_buffer_cursor cc;
391 const u_char *inblk;
392 union authctx ctx;
393 struct swcr_auth *swa;
394 struct auth_hash *axf;
395 uint32_t *blkp;
396 size_t len;
397 int blksz, error, ivlen, resid;
398
399 swa = &ses->swcr_auth;
400 axf = swa->sw_axf;
401
402 bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
403 blksz = GMAC_BLOCK_LEN;
404 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
405 __func__));
406
407 /* Initialize the IV */
408 ivlen = AES_GCM_IV_LEN;
409 crypto_read_iv(crp, iv);
410
411 axf->Reinit(&ctx, iv, ivlen);
412 crypto_cursor_init(&cc, &crp->crp_buf);
413 crypto_cursor_advance(&cc, crp->crp_payload_start);
414 for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) {
415 inblk = crypto_cursor_segment(&cc, &len);
416 if (len >= blksz) {
417 len = rounddown(MIN(len, resid), blksz);
418 crypto_cursor_advance(&cc, len);
419 } else {
420 len = blksz;
421 crypto_cursor_copydata(&cc, len, blk);
422 inblk = blk;
423 }
424 axf->Update(&ctx, inblk, len);
425 }
426 if (resid > 0) {
427 memset(blk, 0, blksz);
428 crypto_cursor_copydata(&cc, resid, blk);
429 axf->Update(&ctx, blk, blksz);
430 }
431
432 /* length block */
433 memset(blk, 0, blksz);
434 blkp = (uint32_t *)blk + 1;
435 *blkp = htobe32(crp->crp_payload_length * 8);
436 axf->Update(&ctx, blk, blksz);
437
438 /* Finalize MAC */
439 axf->Final(tag, &ctx);
440
441 error = 0;
442 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
443 u_char tag2[GMAC_DIGEST_LEN];
444
445 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
446 tag2);
447 if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0)
448 error = EBADMSG;
449 explicit_bzero(tag2, sizeof(tag2));
450 } else {
451 /* Inject the authentication data */
452 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
453 }
454 explicit_bzero(blkbuf, sizeof(blkbuf));
455 explicit_bzero(tag, sizeof(tag));
456 explicit_bzero(iv, sizeof(iv));
457 return (error);
458 }
459
460 static int
swcr_gcm(struct swcr_session * ses,struct cryptop * crp)461 swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
462 {
463 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
464 u_char *blk = (u_char *)blkbuf;
465 u_char tag[GMAC_DIGEST_LEN];
466 struct crypto_buffer_cursor cc_in, cc_out;
467 const u_char *inblk;
468 u_char *outblk;
469 union authctx ctx;
470 struct swcr_auth *swa;
471 struct swcr_encdec *swe;
472 struct auth_hash *axf;
473 struct enc_xform *exf;
474 uint32_t *blkp;
475 size_t len;
476 int blksz, error, ivlen, r, resid;
477
478 swa = &ses->swcr_auth;
479 axf = swa->sw_axf;
480
481 bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
482 blksz = GMAC_BLOCK_LEN;
483 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
484 __func__));
485
486 swe = &ses->swcr_encdec;
487 exf = swe->sw_exf;
488 KASSERT(axf->blocksize == exf->native_blocksize,
489 ("%s: blocksize mismatch", __func__));
490
491 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
492 return (EINVAL);
493
494 ivlen = AES_GCM_IV_LEN;
495
496 /* Supply MAC with IV */
497 axf->Reinit(&ctx, crp->crp_iv, ivlen);
498
499 /* Supply MAC with AAD */
500 if (crp->crp_aad != NULL) {
501 len = rounddown(crp->crp_aad_length, blksz);
502 if (len != 0)
503 axf->Update(&ctx, crp->crp_aad, len);
504 if (crp->crp_aad_length != len) {
505 memset(blk, 0, blksz);
506 memcpy(blk, (char *)crp->crp_aad + len,
507 crp->crp_aad_length - len);
508 axf->Update(&ctx, blk, blksz);
509 }
510 } else {
511 crypto_cursor_init(&cc_in, &crp->crp_buf);
512 crypto_cursor_advance(&cc_in, crp->crp_aad_start);
513 for (resid = crp->crp_aad_length; resid >= blksz;
514 resid -= len) {
515 inblk = crypto_cursor_segment(&cc_in, &len);
516 if (len >= blksz) {
517 len = rounddown(MIN(len, resid), blksz);
518 crypto_cursor_advance(&cc_in, len);
519 } else {
520 len = blksz;
521 crypto_cursor_copydata(&cc_in, len, blk);
522 inblk = blk;
523 }
524 axf->Update(&ctx, inblk, len);
525 }
526 if (resid > 0) {
527 memset(blk, 0, blksz);
528 crypto_cursor_copydata(&cc_in, resid, blk);
529 axf->Update(&ctx, blk, blksz);
530 }
531 }
532
533 if (crp->crp_cipher_key != NULL)
534 exf->setkey(swe->sw_kschedule, crp->crp_cipher_key,
535 crypto_get_params(crp->crp_session)->csp_cipher_klen);
536 exf->reinit(swe->sw_kschedule, crp->crp_iv, ivlen);
537
538 /* Do encryption with MAC */
539 crypto_cursor_init(&cc_in, &crp->crp_buf);
540 crypto_cursor_advance(&cc_in, crp->crp_payload_start);
541 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
542 crypto_cursor_init(&cc_out, &crp->crp_obuf);
543 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
544 } else
545 cc_out = cc_in;
546 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
547 inblk = crypto_cursor_segment(&cc_in, &len);
548 if (len < blksz) {
549 crypto_cursor_copydata(&cc_in, blksz, blk);
550 inblk = blk;
551 } else {
552 crypto_cursor_advance(&cc_in, blksz);
553 }
554 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
555 outblk = crypto_cursor_segment(&cc_out, &len);
556 if (len < blksz)
557 outblk = blk;
558 exf->encrypt(swe->sw_kschedule, inblk, outblk);
559 axf->Update(&ctx, outblk, blksz);
560 if (outblk == blk)
561 crypto_cursor_copyback(&cc_out, blksz, blk);
562 else
563 crypto_cursor_advance(&cc_out, blksz);
564 } else {
565 axf->Update(&ctx, inblk, blksz);
566 }
567 }
568 if (resid > 0) {
569 crypto_cursor_copydata(&cc_in, resid, blk);
570 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
571 exf->encrypt_last(swe->sw_kschedule, blk, blk, resid);
572 crypto_cursor_copyback(&cc_out, resid, blk);
573 }
574 axf->Update(&ctx, blk, resid);
575 }
576
577 /* length block */
578 memset(blk, 0, blksz);
579 blkp = (uint32_t *)blk + 1;
580 *blkp = htobe32(crp->crp_aad_length * 8);
581 blkp = (uint32_t *)blk + 3;
582 *blkp = htobe32(crp->crp_payload_length * 8);
583 axf->Update(&ctx, blk, blksz);
584
585 /* Finalize MAC */
586 axf->Final(tag, &ctx);
587
588 /* Validate tag */
589 error = 0;
590 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
591 u_char tag2[GMAC_DIGEST_LEN];
592
593 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2);
594
595 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
596 explicit_bzero(tag2, sizeof(tag2));
597 if (r != 0) {
598 error = EBADMSG;
599 goto out;
600 }
601
602 /* tag matches, decrypt data */
603 crypto_cursor_init(&cc_in, &crp->crp_buf);
604 crypto_cursor_advance(&cc_in, crp->crp_payload_start);
605 for (resid = crp->crp_payload_length; resid > blksz;
606 resid -= blksz) {
607 inblk = crypto_cursor_segment(&cc_in, &len);
608 if (len < blksz) {
609 crypto_cursor_copydata(&cc_in, blksz, blk);
610 inblk = blk;
611 } else
612 crypto_cursor_advance(&cc_in, blksz);
613 outblk = crypto_cursor_segment(&cc_out, &len);
614 if (len < blksz)
615 outblk = blk;
616 exf->decrypt(swe->sw_kschedule, inblk, outblk);
617 if (outblk == blk)
618 crypto_cursor_copyback(&cc_out, blksz, blk);
619 else
620 crypto_cursor_advance(&cc_out, blksz);
621 }
622 if (resid > 0) {
623 crypto_cursor_copydata(&cc_in, resid, blk);
624 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
625 crypto_cursor_copyback(&cc_out, resid, blk);
626 }
627 } else {
628 /* Inject the authentication data */
629 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
630 }
631
632 out:
633 explicit_bzero(blkbuf, sizeof(blkbuf));
634 explicit_bzero(tag, sizeof(tag));
635
636 return (error);
637 }
638
639 static void
build_ccm_b0(const char * nonce,u_int nonce_length,u_int aad_length,u_int data_length,u_int tag_length,uint8_t * b0)640 build_ccm_b0(const char *nonce, u_int nonce_length, u_int aad_length,
641 u_int data_length, u_int tag_length, uint8_t *b0)
642 {
643 uint8_t *bp;
644 uint8_t flags, L;
645
646 KASSERT(nonce_length >= 7 && nonce_length <= 13,
647 ("nonce_length must be between 7 and 13 bytes"));
648
649 /*
650 * Need to determine the L field value. This is the number of
651 * bytes needed to specify the length of the message; the length
652 * is whatever is left in the 16 bytes after specifying flags and
653 * the nonce.
654 */
655 L = 15 - nonce_length;
656
657 flags = ((aad_length > 0) << 6) +
658 (((tag_length - 2) / 2) << 3) +
659 L - 1;
660
661 /*
662 * Now we need to set up the first block, which has flags, nonce,
663 * and the message length.
664 */
665 b0[0] = flags;
666 memcpy(b0 + 1, nonce, nonce_length);
667 bp = b0 + 1 + nonce_length;
668
669 /* Need to copy L' [aka L-1] bytes of data_length */
670 for (uint8_t *dst = b0 + CCM_CBC_BLOCK_LEN - 1; dst >= bp; dst--) {
671 *dst = data_length;
672 data_length >>= 8;
673 }
674 }
675
676 /* NB: OCF only supports AAD lengths < 2^32. */
677 static int
build_ccm_aad_length(u_int aad_length,uint8_t * blk)678 build_ccm_aad_length(u_int aad_length, uint8_t *blk)
679 {
680 if (aad_length < ((1 << 16) - (1 << 8))) {
681 be16enc(blk, aad_length);
682 return (sizeof(uint16_t));
683 } else {
684 blk[0] = 0xff;
685 blk[1] = 0xfe;
686 be32enc(blk + 2, aad_length);
687 return (2 + sizeof(uint32_t));
688 }
689 }
690
691 static int
swcr_ccm_cbc_mac(struct swcr_session * ses,struct cryptop * crp)692 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
693 {
694 u_char iv[AES_BLOCK_LEN];
695 u_char blk[CCM_CBC_BLOCK_LEN];
696 u_char tag[AES_CBC_MAC_HASH_LEN];
697 union authctx ctx;
698 const struct crypto_session_params *csp;
699 struct swcr_auth *swa;
700 struct auth_hash *axf;
701 int error, ivlen, len;
702
703 csp = crypto_get_params(crp->crp_session);
704 swa = &ses->swcr_auth;
705 axf = swa->sw_axf;
706
707 bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
708
709 /* Initialize the IV */
710 ivlen = csp->csp_ivlen;
711 crypto_read_iv(crp, iv);
712
713 /* Supply MAC with IV */
714 axf->Reinit(&ctx, crp->crp_iv, ivlen);
715
716 /* Supply MAC with b0. */
717 build_ccm_b0(crp->crp_iv, ivlen, crp->crp_payload_length, 0,
718 swa->sw_mlen, blk);
719 axf->Update(&ctx, blk, CCM_CBC_BLOCK_LEN);
720
721 len = build_ccm_aad_length(crp->crp_payload_length, blk);
722 axf->Update(&ctx, blk, len);
723
724 crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
725 axf->Update, &ctx);
726
727 /* Finalize MAC */
728 axf->Final(tag, &ctx);
729
730 error = 0;
731 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
732 u_char tag2[AES_CBC_MAC_HASH_LEN];
733
734 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
735 tag2);
736 if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0)
737 error = EBADMSG;
738 explicit_bzero(tag2, sizeof(tag));
739 } else {
740 /* Inject the authentication data */
741 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
742 }
743 explicit_bzero(tag, sizeof(tag));
744 explicit_bzero(blk, sizeof(blk));
745 explicit_bzero(iv, sizeof(iv));
746 return (error);
747 }
748
749 static int
swcr_ccm(struct swcr_session * ses,struct cryptop * crp)750 swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
751 {
752 const struct crypto_session_params *csp;
753 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
754 u_char *blk = (u_char *)blkbuf;
755 u_char tag[AES_CBC_MAC_HASH_LEN];
756 struct crypto_buffer_cursor cc_in, cc_out;
757 const u_char *inblk;
758 u_char *outblk;
759 union authctx ctx;
760 struct swcr_auth *swa;
761 struct swcr_encdec *swe;
762 struct auth_hash *axf;
763 struct enc_xform *exf;
764 size_t len;
765 int blksz, error, ivlen, r, resid;
766
767 csp = crypto_get_params(crp->crp_session);
768 swa = &ses->swcr_auth;
769 axf = swa->sw_axf;
770
771 bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
772 blksz = AES_BLOCK_LEN;
773 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
774 __func__));
775
776 swe = &ses->swcr_encdec;
777 exf = swe->sw_exf;
778 KASSERT(axf->blocksize == exf->native_blocksize,
779 ("%s: blocksize mismatch", __func__));
780
781 if (crp->crp_payload_length > ccm_max_payload_length(csp))
782 return (EMSGSIZE);
783
784 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
785 return (EINVAL);
786
787 ivlen = csp->csp_ivlen;
788
789 /* Supply MAC with IV */
790 axf->Reinit(&ctx, crp->crp_iv, ivlen);
791
792 /* Supply MAC with b0. */
793 _Static_assert(sizeof(blkbuf) >= CCM_CBC_BLOCK_LEN,
794 "blkbuf too small for b0");
795 build_ccm_b0(crp->crp_iv, ivlen, crp->crp_aad_length,
796 crp->crp_payload_length, swa->sw_mlen, blk);
797 axf->Update(&ctx, blk, CCM_CBC_BLOCK_LEN);
798
799 /* Supply MAC with AAD */
800 if (crp->crp_aad_length != 0) {
801 len = build_ccm_aad_length(crp->crp_aad_length, blk);
802 axf->Update(&ctx, blk, len);
803 if (crp->crp_aad != NULL)
804 axf->Update(&ctx, crp->crp_aad,
805 crp->crp_aad_length);
806 else
807 crypto_apply(crp, crp->crp_aad_start,
808 crp->crp_aad_length, axf->Update, &ctx);
809
810 /* Pad the AAD (including length field) to a full block. */
811 len = (len + crp->crp_aad_length) % CCM_CBC_BLOCK_LEN;
812 if (len != 0) {
813 len = CCM_CBC_BLOCK_LEN - len;
814 memset(blk, 0, CCM_CBC_BLOCK_LEN);
815 axf->Update(&ctx, blk, len);
816 }
817 }
818
819 if (crp->crp_cipher_key != NULL)
820 exf->setkey(swe->sw_kschedule, crp->crp_cipher_key,
821 crypto_get_params(crp->crp_session)->csp_cipher_klen);
822 exf->reinit(swe->sw_kschedule, crp->crp_iv, ivlen);
823
824 /* Do encryption/decryption with MAC */
825 crypto_cursor_init(&cc_in, &crp->crp_buf);
826 crypto_cursor_advance(&cc_in, crp->crp_payload_start);
827 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
828 crypto_cursor_init(&cc_out, &crp->crp_obuf);
829 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
830 } else
831 cc_out = cc_in;
832 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
833 inblk = crypto_cursor_segment(&cc_in, &len);
834 if (len < blksz) {
835 crypto_cursor_copydata(&cc_in, blksz, blk);
836 inblk = blk;
837 } else
838 crypto_cursor_advance(&cc_in, blksz);
839 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
840 outblk = crypto_cursor_segment(&cc_out, &len);
841 if (len < blksz)
842 outblk = blk;
843 axf->Update(&ctx, inblk, blksz);
844 exf->encrypt(swe->sw_kschedule, inblk, outblk);
845 if (outblk == blk)
846 crypto_cursor_copyback(&cc_out, blksz, blk);
847 else
848 crypto_cursor_advance(&cc_out, blksz);
849 } else {
850 /*
851 * One of the problems with CCM+CBC is that
852 * the authentication is done on the
853 * unencrypted data. As a result, we have to
854 * decrypt the data twice: once to generate
855 * the tag and a second time after the tag is
856 * verified.
857 */
858 exf->decrypt(swe->sw_kschedule, inblk, blk);
859 axf->Update(&ctx, blk, blksz);
860 }
861 }
862 if (resid > 0) {
863 crypto_cursor_copydata(&cc_in, resid, blk);
864 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
865 axf->Update(&ctx, blk, resid);
866 exf->encrypt_last(swe->sw_kschedule, blk, blk, resid);
867 crypto_cursor_copyback(&cc_out, resid, blk);
868 } else {
869 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
870 axf->Update(&ctx, blk, resid);
871 }
872 }
873
874 /* Finalize MAC */
875 axf->Final(tag, &ctx);
876
877 /* Validate tag */
878 error = 0;
879 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
880 u_char tag2[AES_CBC_MAC_HASH_LEN];
881
882 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
883 tag2);
884
885 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
886 explicit_bzero(tag2, sizeof(tag2));
887 if (r != 0) {
888 error = EBADMSG;
889 goto out;
890 }
891
892 /* tag matches, decrypt data */
893 exf->reinit(swe->sw_kschedule, crp->crp_iv, ivlen);
894 crypto_cursor_init(&cc_in, &crp->crp_buf);
895 crypto_cursor_advance(&cc_in, crp->crp_payload_start);
896 for (resid = crp->crp_payload_length; resid > blksz;
897 resid -= blksz) {
898 inblk = crypto_cursor_segment(&cc_in, &len);
899 if (len < blksz) {
900 crypto_cursor_copydata(&cc_in, blksz, blk);
901 inblk = blk;
902 } else
903 crypto_cursor_advance(&cc_in, blksz);
904 outblk = crypto_cursor_segment(&cc_out, &len);
905 if (len < blksz)
906 outblk = blk;
907 exf->decrypt(swe->sw_kschedule, inblk, outblk);
908 if (outblk == blk)
909 crypto_cursor_copyback(&cc_out, blksz, blk);
910 else
911 crypto_cursor_advance(&cc_out, blksz);
912 }
913 if (resid > 0) {
914 crypto_cursor_copydata(&cc_in, resid, blk);
915 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
916 crypto_cursor_copyback(&cc_out, resid, blk);
917 }
918 } else {
919 /* Inject the authentication data */
920 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
921 }
922
923 out:
924 explicit_bzero(blkbuf, sizeof(blkbuf));
925 explicit_bzero(tag, sizeof(tag));
926 return (error);
927 }
928
929 static int
swcr_chacha20_poly1305(struct swcr_session * ses,struct cryptop * crp)930 swcr_chacha20_poly1305(struct swcr_session *ses, struct cryptop *crp)
931 {
932 const struct crypto_session_params *csp;
933 uint64_t blkbuf[howmany(CHACHA20_NATIVE_BLOCK_LEN, sizeof(uint64_t))];
934 u_char *blk = (u_char *)blkbuf;
935 u_char tag[POLY1305_HASH_LEN];
936 struct crypto_buffer_cursor cc_in, cc_out;
937 const u_char *inblk;
938 u_char *outblk;
939 uint64_t *blkp;
940 union authctx ctx;
941 struct swcr_auth *swa;
942 struct swcr_encdec *swe;
943 struct auth_hash *axf;
944 struct enc_xform *exf;
945 size_t len;
946 int blksz, error, r, resid;
947
948 swa = &ses->swcr_auth;
949 axf = swa->sw_axf;
950
951 swe = &ses->swcr_encdec;
952 exf = swe->sw_exf;
953 blksz = exf->native_blocksize;
954 KASSERT(blksz <= sizeof(blkbuf), ("%s: blocksize mismatch", __func__));
955
956 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
957 return (EINVAL);
958
959 csp = crypto_get_params(crp->crp_session);
960
961 /* Generate Poly1305 key. */
962 if (crp->crp_cipher_key != NULL)
963 axf->Setkey(&ctx, crp->crp_cipher_key, csp->csp_cipher_klen);
964 else
965 axf->Setkey(&ctx, csp->csp_cipher_key, csp->csp_cipher_klen);
966 axf->Reinit(&ctx, crp->crp_iv, csp->csp_ivlen);
967
968 /* Supply MAC with AAD */
969 if (crp->crp_aad != NULL)
970 axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
971 else
972 crypto_apply(crp, crp->crp_aad_start,
973 crp->crp_aad_length, axf->Update, &ctx);
974 if (crp->crp_aad_length % 16 != 0) {
975 /* padding1 */
976 memset(blk, 0, 16);
977 axf->Update(&ctx, blk, 16 - crp->crp_aad_length % 16);
978 }
979
980 if (crp->crp_cipher_key != NULL)
981 exf->setkey(swe->sw_kschedule, crp->crp_cipher_key,
982 csp->csp_cipher_klen);
983 exf->reinit(swe->sw_kschedule, crp->crp_iv, csp->csp_ivlen);
984
985 /* Do encryption with MAC */
986 crypto_cursor_init(&cc_in, &crp->crp_buf);
987 crypto_cursor_advance(&cc_in, crp->crp_payload_start);
988 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
989 crypto_cursor_init(&cc_out, &crp->crp_obuf);
990 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
991 } else
992 cc_out = cc_in;
993 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
994 inblk = crypto_cursor_segment(&cc_in, &len);
995 if (len < blksz) {
996 crypto_cursor_copydata(&cc_in, blksz, blk);
997 inblk = blk;
998 } else
999 crypto_cursor_advance(&cc_in, blksz);
1000 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1001 outblk = crypto_cursor_segment(&cc_out, &len);
1002 if (len < blksz)
1003 outblk = blk;
1004 exf->encrypt(swe->sw_kschedule, inblk, outblk);
1005 axf->Update(&ctx, outblk, blksz);
1006 if (outblk == blk)
1007 crypto_cursor_copyback(&cc_out, blksz, blk);
1008 else
1009 crypto_cursor_advance(&cc_out, blksz);
1010 } else {
1011 axf->Update(&ctx, inblk, blksz);
1012 }
1013 }
1014 if (resid > 0) {
1015 crypto_cursor_copydata(&cc_in, resid, blk);
1016 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1017 exf->encrypt_last(swe->sw_kschedule, blk, blk, resid);
1018 crypto_cursor_copyback(&cc_out, resid, blk);
1019 }
1020 axf->Update(&ctx, blk, resid);
1021 if (resid % 16 != 0) {
1022 /* padding2 */
1023 memset(blk, 0, 16);
1024 axf->Update(&ctx, blk, 16 - resid % 16);
1025 }
1026 }
1027
1028 /* lengths */
1029 blkp = (uint64_t *)blk;
1030 blkp[0] = htole64(crp->crp_aad_length);
1031 blkp[1] = htole64(crp->crp_payload_length);
1032 axf->Update(&ctx, blk, sizeof(uint64_t) * 2);
1033
1034 /* Finalize MAC */
1035 axf->Final(tag, &ctx);
1036
1037 /* Validate tag */
1038 error = 0;
1039 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1040 u_char tag2[POLY1305_HASH_LEN];
1041
1042 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2);
1043
1044 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
1045 explicit_bzero(tag2, sizeof(tag2));
1046 if (r != 0) {
1047 error = EBADMSG;
1048 goto out;
1049 }
1050
1051 /* tag matches, decrypt data */
1052 crypto_cursor_init(&cc_in, &crp->crp_buf);
1053 crypto_cursor_advance(&cc_in, crp->crp_payload_start);
1054 for (resid = crp->crp_payload_length; resid > blksz;
1055 resid -= blksz) {
1056 inblk = crypto_cursor_segment(&cc_in, &len);
1057 if (len < blksz) {
1058 crypto_cursor_copydata(&cc_in, blksz, blk);
1059 inblk = blk;
1060 } else
1061 crypto_cursor_advance(&cc_in, blksz);
1062 outblk = crypto_cursor_segment(&cc_out, &len);
1063 if (len < blksz)
1064 outblk = blk;
1065 exf->decrypt(swe->sw_kschedule, inblk, outblk);
1066 if (outblk == blk)
1067 crypto_cursor_copyback(&cc_out, blksz, blk);
1068 else
1069 crypto_cursor_advance(&cc_out, blksz);
1070 }
1071 if (resid > 0) {
1072 crypto_cursor_copydata(&cc_in, resid, blk);
1073 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
1074 crypto_cursor_copyback(&cc_out, resid, blk);
1075 }
1076 } else {
1077 /* Inject the authentication data */
1078 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
1079 }
1080
1081 out:
1082 explicit_bzero(blkbuf, sizeof(blkbuf));
1083 explicit_bzero(tag, sizeof(tag));
1084 explicit_bzero(&ctx, sizeof(ctx));
1085 return (error);
1086 }
1087
1088 /*
1089 * Apply a cipher and a digest to perform EtA.
1090 */
1091 static int
swcr_eta(struct swcr_session * ses,struct cryptop * crp)1092 swcr_eta(struct swcr_session *ses, struct cryptop *crp)
1093 {
1094 int error;
1095
1096 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1097 error = swcr_encdec(ses, crp);
1098 if (error == 0)
1099 error = swcr_authcompute(ses, crp);
1100 } else {
1101 error = swcr_authcompute(ses, crp);
1102 if (error == 0)
1103 error = swcr_encdec(ses, crp);
1104 }
1105 return (error);
1106 }
1107
1108 /*
1109 * Apply a compression/decompression algorithm
1110 */
1111 static int
swcr_compdec(struct swcr_session * ses,struct cryptop * crp)1112 swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
1113 {
1114 uint8_t *data, *out;
1115 struct comp_algo *cxf;
1116 int adj;
1117 uint32_t result;
1118
1119 cxf = ses->swcr_compdec.sw_cxf;
1120
1121 /* We must handle the whole buffer of data in one time
1122 * then if there is not all the data in the mbuf, we must
1123 * copy in a buffer.
1124 */
1125
1126 data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT);
1127 if (data == NULL)
1128 return (EINVAL);
1129 crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
1130 data);
1131
1132 if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
1133 result = cxf->compress(data, crp->crp_payload_length, &out);
1134 else
1135 result = cxf->decompress(data, crp->crp_payload_length, &out);
1136
1137 free(data, M_CRYPTO_DATA);
1138 if (result == 0)
1139 return (EINVAL);
1140 crp->crp_olen = result;
1141
1142 /* Check the compressed size when doing compression */
1143 if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
1144 if (result >= crp->crp_payload_length) {
1145 /* Compression was useless, we lost time */
1146 free(out, M_CRYPTO_DATA);
1147 return (0);
1148 }
1149 }
1150
1151 /* Copy back the (de)compressed data. m_copyback is
1152 * extending the mbuf as necessary.
1153 */
1154 crypto_copyback(crp, crp->crp_payload_start, result, out);
1155 if (result < crp->crp_payload_length) {
1156 switch (crp->crp_buf.cb_type) {
1157 case CRYPTO_BUF_MBUF:
1158 case CRYPTO_BUF_SINGLE_MBUF:
1159 adj = result - crp->crp_payload_length;
1160 m_adj(crp->crp_buf.cb_mbuf, adj);
1161 break;
1162 case CRYPTO_BUF_UIO: {
1163 struct uio *uio = crp->crp_buf.cb_uio;
1164 int ind;
1165
1166 adj = crp->crp_payload_length - result;
1167 ind = uio->uio_iovcnt - 1;
1168
1169 while (adj > 0 && ind >= 0) {
1170 if (adj < uio->uio_iov[ind].iov_len) {
1171 uio->uio_iov[ind].iov_len -= adj;
1172 break;
1173 }
1174
1175 adj -= uio->uio_iov[ind].iov_len;
1176 uio->uio_iov[ind].iov_len = 0;
1177 ind--;
1178 uio->uio_iovcnt--;
1179 }
1180 }
1181 break;
1182 case CRYPTO_BUF_VMPAGE:
1183 adj = crp->crp_payload_length - result;
1184 crp->crp_buf.cb_vm_page_len -= adj;
1185 break;
1186 default:
1187 break;
1188 }
1189 }
1190 free(out, M_CRYPTO_DATA);
1191 return 0;
1192 }
1193
1194 static int
swcr_setup_cipher(struct swcr_session * ses,const struct crypto_session_params * csp)1195 swcr_setup_cipher(struct swcr_session *ses,
1196 const struct crypto_session_params *csp)
1197 {
1198 struct swcr_encdec *swe;
1199 struct enc_xform *txf;
1200 int error;
1201
1202 swe = &ses->swcr_encdec;
1203 txf = crypto_cipher(csp);
1204 if (txf->ctxsize != 0) {
1205 swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA,
1206 M_NOWAIT);
1207 if (swe->sw_kschedule == NULL)
1208 return (ENOMEM);
1209 }
1210 if (csp->csp_cipher_key != NULL) {
1211 error = txf->setkey(swe->sw_kschedule,
1212 csp->csp_cipher_key, csp->csp_cipher_klen);
1213 if (error)
1214 return (error);
1215 }
1216 swe->sw_exf = txf;
1217 return (0);
1218 }
1219
1220 static int
swcr_setup_auth(struct swcr_session * ses,const struct crypto_session_params * csp)1221 swcr_setup_auth(struct swcr_session *ses,
1222 const struct crypto_session_params *csp)
1223 {
1224 struct swcr_auth *swa;
1225 struct auth_hash *axf;
1226
1227 swa = &ses->swcr_auth;
1228
1229 axf = crypto_auth_hash(csp);
1230 swa->sw_axf = axf;
1231 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1232 return (EINVAL);
1233 if (csp->csp_auth_mlen == 0)
1234 swa->sw_mlen = axf->hashsize;
1235 else
1236 swa->sw_mlen = csp->csp_auth_mlen;
1237 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1238 if (swa->sw_ictx == NULL)
1239 return (ENOBUFS);
1240
1241 switch (csp->csp_auth_alg) {
1242 case CRYPTO_SHA1_HMAC:
1243 case CRYPTO_SHA2_224_HMAC:
1244 case CRYPTO_SHA2_256_HMAC:
1245 case CRYPTO_SHA2_384_HMAC:
1246 case CRYPTO_SHA2_512_HMAC:
1247 case CRYPTO_NULL_HMAC:
1248 case CRYPTO_RIPEMD160_HMAC:
1249 swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1250 M_NOWAIT);
1251 if (swa->sw_octx == NULL)
1252 return (ENOBUFS);
1253
1254 if (csp->csp_auth_key != NULL) {
1255 swcr_authprepare(axf, swa, csp->csp_auth_key,
1256 csp->csp_auth_klen);
1257 }
1258
1259 if (csp->csp_mode == CSP_MODE_DIGEST)
1260 ses->swcr_process = swcr_authcompute;
1261 break;
1262 case CRYPTO_SHA1:
1263 case CRYPTO_SHA2_224:
1264 case CRYPTO_SHA2_256:
1265 case CRYPTO_SHA2_384:
1266 case CRYPTO_SHA2_512:
1267 axf->Init(swa->sw_ictx);
1268 if (csp->csp_mode == CSP_MODE_DIGEST)
1269 ses->swcr_process = swcr_authcompute;
1270 break;
1271 case CRYPTO_AES_NIST_GMAC:
1272 axf->Init(swa->sw_ictx);
1273 axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1274 csp->csp_auth_klen);
1275 if (csp->csp_mode == CSP_MODE_DIGEST)
1276 ses->swcr_process = swcr_gmac;
1277 break;
1278 case CRYPTO_POLY1305:
1279 case CRYPTO_BLAKE2B:
1280 case CRYPTO_BLAKE2S:
1281 /*
1282 * Blake2b and Blake2s support an optional key but do
1283 * not require one.
1284 */
1285 if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
1286 axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1287 csp->csp_auth_klen);
1288 axf->Init(swa->sw_ictx);
1289 if (csp->csp_mode == CSP_MODE_DIGEST)
1290 ses->swcr_process = swcr_authcompute;
1291 break;
1292 case CRYPTO_AES_CCM_CBC_MAC:
1293 axf->Init(swa->sw_ictx);
1294 axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1295 csp->csp_auth_klen);
1296 if (csp->csp_mode == CSP_MODE_DIGEST)
1297 ses->swcr_process = swcr_ccm_cbc_mac;
1298 break;
1299 }
1300
1301 return (0);
1302 }
1303
1304 static int
swcr_setup_gcm(struct swcr_session * ses,const struct crypto_session_params * csp)1305 swcr_setup_gcm(struct swcr_session *ses,
1306 const struct crypto_session_params *csp)
1307 {
1308 struct swcr_auth *swa;
1309 struct auth_hash *axf;
1310
1311 if (csp->csp_ivlen != AES_GCM_IV_LEN)
1312 return (EINVAL);
1313
1314 /* First, setup the auth side. */
1315 swa = &ses->swcr_auth;
1316 switch (csp->csp_cipher_klen * 8) {
1317 case 128:
1318 axf = &auth_hash_nist_gmac_aes_128;
1319 break;
1320 case 192:
1321 axf = &auth_hash_nist_gmac_aes_192;
1322 break;
1323 case 256:
1324 axf = &auth_hash_nist_gmac_aes_256;
1325 break;
1326 default:
1327 return (EINVAL);
1328 }
1329 swa->sw_axf = axf;
1330 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1331 return (EINVAL);
1332 if (csp->csp_auth_mlen == 0)
1333 swa->sw_mlen = axf->hashsize;
1334 else
1335 swa->sw_mlen = csp->csp_auth_mlen;
1336 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1337 if (swa->sw_ictx == NULL)
1338 return (ENOBUFS);
1339 axf->Init(swa->sw_ictx);
1340 if (csp->csp_cipher_key != NULL)
1341 axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1342 csp->csp_cipher_klen);
1343
1344 /* Second, setup the cipher side. */
1345 return (swcr_setup_cipher(ses, csp));
1346 }
1347
1348 static int
swcr_setup_ccm(struct swcr_session * ses,const struct crypto_session_params * csp)1349 swcr_setup_ccm(struct swcr_session *ses,
1350 const struct crypto_session_params *csp)
1351 {
1352 struct swcr_auth *swa;
1353 struct auth_hash *axf;
1354
1355 /* First, setup the auth side. */
1356 swa = &ses->swcr_auth;
1357 switch (csp->csp_cipher_klen * 8) {
1358 case 128:
1359 axf = &auth_hash_ccm_cbc_mac_128;
1360 break;
1361 case 192:
1362 axf = &auth_hash_ccm_cbc_mac_192;
1363 break;
1364 case 256:
1365 axf = &auth_hash_ccm_cbc_mac_256;
1366 break;
1367 default:
1368 return (EINVAL);
1369 }
1370 swa->sw_axf = axf;
1371 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1372 return (EINVAL);
1373 if (csp->csp_auth_mlen == 0)
1374 swa->sw_mlen = axf->hashsize;
1375 else
1376 swa->sw_mlen = csp->csp_auth_mlen;
1377 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1378 if (swa->sw_ictx == NULL)
1379 return (ENOBUFS);
1380 axf->Init(swa->sw_ictx);
1381 if (csp->csp_cipher_key != NULL)
1382 axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1383 csp->csp_cipher_klen);
1384
1385 /* Second, setup the cipher side. */
1386 return (swcr_setup_cipher(ses, csp));
1387 }
1388
1389 static int
swcr_setup_chacha20_poly1305(struct swcr_session * ses,const struct crypto_session_params * csp)1390 swcr_setup_chacha20_poly1305(struct swcr_session *ses,
1391 const struct crypto_session_params *csp)
1392 {
1393 struct swcr_auth *swa;
1394 struct auth_hash *axf;
1395
1396 /* First, setup the auth side. */
1397 swa = &ses->swcr_auth;
1398 axf = &auth_hash_chacha20_poly1305;
1399 swa->sw_axf = axf;
1400 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1401 return (EINVAL);
1402 if (csp->csp_auth_mlen == 0)
1403 swa->sw_mlen = axf->hashsize;
1404 else
1405 swa->sw_mlen = csp->csp_auth_mlen;
1406
1407 /* The auth state is regenerated for each nonce. */
1408
1409 /* Second, setup the cipher side. */
1410 return (swcr_setup_cipher(ses, csp));
1411 }
1412
1413 static bool
swcr_auth_supported(const struct crypto_session_params * csp)1414 swcr_auth_supported(const struct crypto_session_params *csp)
1415 {
1416 struct auth_hash *axf;
1417
1418 axf = crypto_auth_hash(csp);
1419 if (axf == NULL)
1420 return (false);
1421 switch (csp->csp_auth_alg) {
1422 case CRYPTO_SHA1_HMAC:
1423 case CRYPTO_SHA2_224_HMAC:
1424 case CRYPTO_SHA2_256_HMAC:
1425 case CRYPTO_SHA2_384_HMAC:
1426 case CRYPTO_SHA2_512_HMAC:
1427 case CRYPTO_NULL_HMAC:
1428 case CRYPTO_RIPEMD160_HMAC:
1429 break;
1430 case CRYPTO_AES_NIST_GMAC:
1431 switch (csp->csp_auth_klen * 8) {
1432 case 128:
1433 case 192:
1434 case 256:
1435 break;
1436 default:
1437 return (false);
1438 }
1439 if (csp->csp_auth_key == NULL)
1440 return (false);
1441 if (csp->csp_ivlen != AES_GCM_IV_LEN)
1442 return (false);
1443 break;
1444 case CRYPTO_POLY1305:
1445 if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1446 return (false);
1447 break;
1448 case CRYPTO_AES_CCM_CBC_MAC:
1449 switch (csp->csp_auth_klen * 8) {
1450 case 128:
1451 case 192:
1452 case 256:
1453 break;
1454 default:
1455 return (false);
1456 }
1457 if (csp->csp_auth_key == NULL)
1458 return (false);
1459 break;
1460 }
1461 return (true);
1462 }
1463
1464 static bool
swcr_cipher_supported(const struct crypto_session_params * csp)1465 swcr_cipher_supported(const struct crypto_session_params *csp)
1466 {
1467 struct enc_xform *txf;
1468
1469 txf = crypto_cipher(csp);
1470 if (txf == NULL)
1471 return (false);
1472 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1473 txf->ivsize != csp->csp_ivlen)
1474 return (false);
1475 return (true);
1476 }
1477
1478 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
1479
1480 static int
swcr_probesession(device_t dev,const struct crypto_session_params * csp)1481 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1482 {
1483 if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
1484 return (EINVAL);
1485 switch (csp->csp_mode) {
1486 case CSP_MODE_COMPRESS:
1487 switch (csp->csp_cipher_alg) {
1488 case CRYPTO_DEFLATE_COMP:
1489 break;
1490 default:
1491 return (EINVAL);
1492 }
1493 break;
1494 case CSP_MODE_CIPHER:
1495 switch (csp->csp_cipher_alg) {
1496 case CRYPTO_AES_NIST_GCM_16:
1497 case CRYPTO_AES_CCM_16:
1498 case CRYPTO_CHACHA20_POLY1305:
1499 return (EINVAL);
1500 default:
1501 if (!swcr_cipher_supported(csp))
1502 return (EINVAL);
1503 break;
1504 }
1505 break;
1506 case CSP_MODE_DIGEST:
1507 if (!swcr_auth_supported(csp))
1508 return (EINVAL);
1509 break;
1510 case CSP_MODE_AEAD:
1511 switch (csp->csp_cipher_alg) {
1512 case CRYPTO_AES_NIST_GCM_16:
1513 case CRYPTO_AES_CCM_16:
1514 case CRYPTO_CHACHA20_POLY1305:
1515 break;
1516 default:
1517 return (EINVAL);
1518 }
1519 break;
1520 case CSP_MODE_ETA:
1521 /* AEAD algorithms cannot be used for EtA. */
1522 switch (csp->csp_cipher_alg) {
1523 case CRYPTO_AES_NIST_GCM_16:
1524 case CRYPTO_AES_CCM_16:
1525 case CRYPTO_CHACHA20_POLY1305:
1526 return (EINVAL);
1527 }
1528 switch (csp->csp_auth_alg) {
1529 case CRYPTO_AES_NIST_GMAC:
1530 case CRYPTO_AES_CCM_CBC_MAC:
1531 return (EINVAL);
1532 }
1533
1534 if (!swcr_cipher_supported(csp) ||
1535 !swcr_auth_supported(csp))
1536 return (EINVAL);
1537 break;
1538 default:
1539 return (EINVAL);
1540 }
1541
1542 return (CRYPTODEV_PROBE_SOFTWARE);
1543 }
1544
1545 /*
1546 * Generate a new software session.
1547 */
1548 static int
swcr_newsession(device_t dev,crypto_session_t cses,const struct crypto_session_params * csp)1549 swcr_newsession(device_t dev, crypto_session_t cses,
1550 const struct crypto_session_params *csp)
1551 {
1552 struct swcr_session *ses;
1553 struct swcr_encdec *swe;
1554 struct swcr_auth *swa;
1555 struct comp_algo *cxf;
1556 int error;
1557
1558 ses = crypto_get_driver_session(cses);
1559 mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
1560
1561 error = 0;
1562 swe = &ses->swcr_encdec;
1563 swa = &ses->swcr_auth;
1564 switch (csp->csp_mode) {
1565 case CSP_MODE_COMPRESS:
1566 switch (csp->csp_cipher_alg) {
1567 case CRYPTO_DEFLATE_COMP:
1568 cxf = &comp_algo_deflate;
1569 break;
1570 #ifdef INVARIANTS
1571 default:
1572 panic("bad compression algo");
1573 #endif
1574 }
1575 ses->swcr_compdec.sw_cxf = cxf;
1576 ses->swcr_process = swcr_compdec;
1577 break;
1578 case CSP_MODE_CIPHER:
1579 switch (csp->csp_cipher_alg) {
1580 case CRYPTO_NULL_CBC:
1581 ses->swcr_process = swcr_null;
1582 break;
1583 #ifdef INVARIANTS
1584 case CRYPTO_AES_NIST_GCM_16:
1585 case CRYPTO_AES_CCM_16:
1586 case CRYPTO_CHACHA20_POLY1305:
1587 panic("bad cipher algo");
1588 #endif
1589 default:
1590 error = swcr_setup_cipher(ses, csp);
1591 if (error == 0)
1592 ses->swcr_process = swcr_encdec;
1593 }
1594 break;
1595 case CSP_MODE_DIGEST:
1596 error = swcr_setup_auth(ses, csp);
1597 break;
1598 case CSP_MODE_AEAD:
1599 switch (csp->csp_cipher_alg) {
1600 case CRYPTO_AES_NIST_GCM_16:
1601 error = swcr_setup_gcm(ses, csp);
1602 if (error == 0)
1603 ses->swcr_process = swcr_gcm;
1604 break;
1605 case CRYPTO_AES_CCM_16:
1606 error = swcr_setup_ccm(ses, csp);
1607 if (error == 0)
1608 ses->swcr_process = swcr_ccm;
1609 break;
1610 case CRYPTO_CHACHA20_POLY1305:
1611 error = swcr_setup_chacha20_poly1305(ses, csp);
1612 if (error == 0)
1613 ses->swcr_process = swcr_chacha20_poly1305;
1614 break;
1615 #ifdef INVARIANTS
1616 default:
1617 panic("bad aead algo");
1618 #endif
1619 }
1620 break;
1621 case CSP_MODE_ETA:
1622 #ifdef INVARIANTS
1623 switch (csp->csp_cipher_alg) {
1624 case CRYPTO_AES_NIST_GCM_16:
1625 case CRYPTO_AES_CCM_16:
1626 case CRYPTO_CHACHA20_POLY1305:
1627 panic("bad eta cipher algo");
1628 }
1629 switch (csp->csp_auth_alg) {
1630 case CRYPTO_AES_NIST_GMAC:
1631 case CRYPTO_AES_CCM_CBC_MAC:
1632 panic("bad eta auth algo");
1633 }
1634 #endif
1635
1636 error = swcr_setup_auth(ses, csp);
1637 if (error)
1638 break;
1639 if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1640 /* Effectively degrade to digest mode. */
1641 ses->swcr_process = swcr_authcompute;
1642 break;
1643 }
1644
1645 error = swcr_setup_cipher(ses, csp);
1646 if (error == 0)
1647 ses->swcr_process = swcr_eta;
1648 break;
1649 default:
1650 error = EINVAL;
1651 }
1652
1653 if (error)
1654 swcr_freesession(dev, cses);
1655 return (error);
1656 }
1657
1658 static void
swcr_freesession(device_t dev,crypto_session_t cses)1659 swcr_freesession(device_t dev, crypto_session_t cses)
1660 {
1661 struct swcr_session *ses;
1662
1663 ses = crypto_get_driver_session(cses);
1664
1665 mtx_destroy(&ses->swcr_lock);
1666
1667 zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA);
1668 zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA);
1669 zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA);
1670 }
1671
1672 /*
1673 * Process a software request.
1674 */
1675 static int
swcr_process(device_t dev,struct cryptop * crp,int hint)1676 swcr_process(device_t dev, struct cryptop *crp, int hint)
1677 {
1678 struct swcr_session *ses;
1679
1680 ses = crypto_get_driver_session(crp->crp_session);
1681 mtx_lock(&ses->swcr_lock);
1682
1683 crp->crp_etype = ses->swcr_process(ses, crp);
1684
1685 mtx_unlock(&ses->swcr_lock);
1686 crypto_done(crp);
1687 return (0);
1688 }
1689
1690 static void
swcr_identify(driver_t * drv,device_t parent)1691 swcr_identify(driver_t *drv, device_t parent)
1692 {
1693 /* NB: order 10 is so we get attached after h/w devices */
1694 if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1695 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1696 panic("cryptosoft: could not attach");
1697 }
1698
1699 static int
swcr_probe(device_t dev)1700 swcr_probe(device_t dev)
1701 {
1702 device_set_desc(dev, "software crypto");
1703 device_quiet(dev);
1704 return (BUS_PROBE_NOWILDCARD);
1705 }
1706
1707 static int
swcr_attach(device_t dev)1708 swcr_attach(device_t dev)
1709 {
1710
1711 swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1712 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1713 if (swcr_id < 0) {
1714 device_printf(dev, "cannot initialize!");
1715 return (ENXIO);
1716 }
1717
1718 return (0);
1719 }
1720
1721 static int
swcr_detach(device_t dev)1722 swcr_detach(device_t dev)
1723 {
1724 crypto_unregister_all(swcr_id);
1725 return 0;
1726 }
1727
1728 static device_method_t swcr_methods[] = {
1729 DEVMETHOD(device_identify, swcr_identify),
1730 DEVMETHOD(device_probe, swcr_probe),
1731 DEVMETHOD(device_attach, swcr_attach),
1732 DEVMETHOD(device_detach, swcr_detach),
1733
1734 DEVMETHOD(cryptodev_probesession, swcr_probesession),
1735 DEVMETHOD(cryptodev_newsession, swcr_newsession),
1736 DEVMETHOD(cryptodev_freesession,swcr_freesession),
1737 DEVMETHOD(cryptodev_process, swcr_process),
1738
1739 {0, 0},
1740 };
1741
1742 static driver_t swcr_driver = {
1743 "cryptosoft",
1744 swcr_methods,
1745 0, /* NB: no softc */
1746 };
1747 static devclass_t swcr_devclass;
1748
1749 /*
1750 * NB: We explicitly reference the crypto module so we
1751 * get the necessary ordering when built as a loadable
1752 * module. This is required because we bundle the crypto
1753 * module code together with the cryptosoft driver (otherwise
1754 * normal module dependencies would handle things).
1755 */
1756 extern int crypto_modevent(struct module *, int, void *);
1757 /* XXX where to attach */
1758 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1759 MODULE_VERSION(cryptosoft, 1);
1760 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1761