1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/zfs_context.h>
28 #include <sys/modctl.h>
29 #include <sys/crypto/common.h>
30 #include <sys/crypto/icp.h>
31 #include <sys/crypto/spi.h>
32 
33 #include <sha1/sha1.h>
34 #include <sha1/sha1_impl.h>
35 
36 /*
37  * The sha1 module is created with two modlinkages:
38  * - a modlmisc that allows consumers to directly call the entry points
39  *   SHA1Init, SHA1Update, and SHA1Final.
40  * - a modlcrypto that allows the module to register with the Kernel
41  *   Cryptographic Framework (KCF) as a software provider for the SHA1
42  *   mechanisms.
43  */
44 
45 static struct modlcrypto modlcrypto = {
46 	&mod_cryptoops,
47 	"SHA1 Kernel SW Provider 1.1"
48 };
49 
50 static struct modlinkage modlinkage = {
51 	MODREV_1, { &modlcrypto, NULL }
52 };
53 
54 
55 /*
56  * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
57  * by KCF to one of the entry points.
58  */
59 
60 #define	PROV_SHA1_CTX(ctx)	((sha1_ctx_t *)(ctx)->cc_provider_private)
61 #define	PROV_SHA1_HMAC_CTX(ctx)	((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
62 
63 /* to extract the digest length passed as mechanism parameter */
64 #define	PROV_SHA1_GET_DIGEST_LEN(m, len) {				\
65 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
66 		(len) = (uint32_t)*((ulong_t *)(void *)mechanism->cm_param); \
67 	else {								\
68 		ulong_t tmp_ulong;					\
69 		bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t));	\
70 		(len) = (uint32_t)tmp_ulong;				\
71 	}								\
72 }
73 
74 #define	PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) {	\
75 	SHA1Init(ctx);					\
76 	SHA1Update(ctx, key, len);			\
77 	SHA1Final(digest, ctx);				\
78 }
79 
80 /*
81  * Mechanism info structure passed to KCF during registration.
82  */
83 static crypto_mech_info_t sha1_mech_info_tab[] = {
84 	/* SHA1 */
85 	{SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
86 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
87 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
88 	/* SHA1-HMAC */
89 	{SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
90 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
91 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
92 	    CRYPTO_KEYSIZE_UNIT_IN_BYTES},
93 	/* SHA1-HMAC GENERAL */
94 	{SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
95 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
96 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
97 	    CRYPTO_KEYSIZE_UNIT_IN_BYTES}
98 };
99 
100 static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
101 
102 static crypto_control_ops_t sha1_control_ops = {
103 	sha1_provider_status
104 };
105 
106 static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
107     crypto_req_handle_t);
108 static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
109     crypto_req_handle_t);
110 static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
111     crypto_req_handle_t);
112 static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
113     crypto_req_handle_t);
114 static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
115     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
116     crypto_req_handle_t);
117 
118 static crypto_digest_ops_t sha1_digest_ops = {
119 	.digest_init = sha1_digest_init,
120 	.digest = sha1_digest,
121 	.digest_update = sha1_digest_update,
122 	.digest_key = NULL,
123 	.digest_final = sha1_digest_final,
124 	.digest_atomic = sha1_digest_atomic
125 };
126 
127 static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
128     crypto_spi_ctx_template_t, crypto_req_handle_t);
129 static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
130     crypto_req_handle_t);
131 static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
132 static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
133     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
134     crypto_spi_ctx_template_t, crypto_req_handle_t);
135 static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
136     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
137     crypto_spi_ctx_template_t, crypto_req_handle_t);
138 
139 static crypto_mac_ops_t sha1_mac_ops = {
140 	.mac_init = sha1_mac_init,
141 	.mac = NULL,
142 	.mac_update = sha1_mac_update,
143 	.mac_final = sha1_mac_final,
144 	.mac_atomic = sha1_mac_atomic,
145 	.mac_verify_atomic = sha1_mac_verify_atomic
146 };
147 
148 static int sha1_create_ctx_template(crypto_provider_handle_t,
149     crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
150     size_t *, crypto_req_handle_t);
151 static int sha1_free_context(crypto_ctx_t *);
152 
153 static crypto_ctx_ops_t sha1_ctx_ops = {
154 	.create_ctx_template = sha1_create_ctx_template,
155 	.free_context = sha1_free_context
156 };
157 
158 static crypto_ops_t sha1_crypto_ops = {{{{{
159 	&sha1_control_ops,
160 	&sha1_digest_ops,
161 	NULL,
162 	&sha1_mac_ops,
163 	NULL,
164 	NULL,
165 	NULL,
166 	NULL,
167 	NULL,
168 	NULL,
169 	NULL,
170 	NULL,
171 	NULL,
172 	&sha1_ctx_ops,
173 }}}}};
174 
175 static crypto_provider_info_t sha1_prov_info = {{{{
176 	CRYPTO_SPI_VERSION_1,
177 	"SHA1 Software Provider",
178 	CRYPTO_SW_PROVIDER,
179 	NULL,
180 	&sha1_crypto_ops,
181 	sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
182 	sha1_mech_info_tab
183 }}}};
184 
185 static crypto_kcf_provider_handle_t sha1_prov_handle = 0;
186 
187 int
sha1_mod_init(void)188 sha1_mod_init(void)
189 {
190 	int ret;
191 
192 	if ((ret = mod_install(&modlinkage)) != 0)
193 		return (ret);
194 
195 	/*
196 	 * Register with KCF. If the registration fails, log an
197 	 * error but do not uninstall the module, since the functionality
198 	 * provided by misc/sha1 should still be available.
199 	 */
200 	if ((ret = crypto_register_provider(&sha1_prov_info,
201 	    &sha1_prov_handle)) != CRYPTO_SUCCESS)
202 		cmn_err(CE_WARN, "sha1 _init: "
203 		    "crypto_register_provider() failed (0x%x)", ret);
204 
205 	return (0);
206 }
207 
208 int
sha1_mod_fini(void)209 sha1_mod_fini(void)
210 {
211 	int ret;
212 
213 	if (sha1_prov_handle != 0) {
214 		if ((ret = crypto_unregister_provider(sha1_prov_handle)) !=
215 		    CRYPTO_SUCCESS) {
216 			cmn_err(CE_WARN,
217 			    "sha1 _fini: crypto_unregister_provider() "
218 			    "failed (0x%x)", ret);
219 			return (EBUSY);
220 		}
221 		sha1_prov_handle = 0;
222 	}
223 
224 	return (mod_remove(&modlinkage));
225 }
226 
227 /*
228  * KCF software provider control entry points.
229  */
230 /* ARGSUSED */
231 static void
sha1_provider_status(crypto_provider_handle_t provider,uint_t * status)232 sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
233 {
234 	*status = CRYPTO_PROVIDER_READY;
235 }
236 
237 /*
238  * KCF software provider digest entry points.
239  */
240 
241 static int
sha1_digest_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_req_handle_t req)242 sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
243     crypto_req_handle_t req)
244 {
245 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
246 		return (CRYPTO_MECHANISM_INVALID);
247 
248 	/*
249 	 * Allocate and initialize SHA1 context.
250 	 */
251 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
252 	    crypto_kmflag(req));
253 	if (ctx->cc_provider_private == NULL)
254 		return (CRYPTO_HOST_MEMORY);
255 
256 	PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
257 	SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
258 
259 	return (CRYPTO_SUCCESS);
260 }
261 
262 /*
263  * Helper SHA1 digest update function for uio data.
264  */
265 static int
sha1_digest_update_uio(SHA1_CTX * sha1_ctx,crypto_data_t * data)266 sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
267 {
268 	off_t offset = data->cd_offset;
269 	size_t length = data->cd_length;
270 	uint_t vec_idx = 0;
271 	size_t cur_len;
272 
273 	/* we support only kernel buffer */
274 	if (uio_segflg(data->cd_uio) != UIO_SYSSPACE)
275 		return (CRYPTO_ARGUMENTS_BAD);
276 
277 	/*
278 	 * Jump to the first iovec containing data to be
279 	 * digested.
280 	 */
281 	offset = uio_index_at_offset(data->cd_uio, offset, &vec_idx);
282 	if (vec_idx == uio_iovcnt(data->cd_uio)) {
283 		/*
284 		 * The caller specified an offset that is larger than the
285 		 * total size of the buffers it provided.
286 		 */
287 		return (CRYPTO_DATA_LEN_RANGE);
288 	}
289 
290 	/*
291 	 * Now do the digesting on the iovecs.
292 	 */
293 	while (vec_idx < uio_iovcnt(data->cd_uio) && length > 0) {
294 		cur_len = MIN(uio_iovlen(data->cd_uio, vec_idx) -
295 		    offset, length);
296 
297 		SHA1Update(sha1_ctx,
298 		    (uint8_t *)uio_iovbase(data->cd_uio, vec_idx) + offset,
299 		    cur_len);
300 
301 		length -= cur_len;
302 		vec_idx++;
303 		offset = 0;
304 	}
305 
306 	if (vec_idx == uio_iovcnt(data->cd_uio) && length > 0) {
307 		/*
308 		 * The end of the specified iovec's was reached but
309 		 * the length requested could not be processed, i.e.
310 		 * The caller requested to digest more data than it provided.
311 		 */
312 		return (CRYPTO_DATA_LEN_RANGE);
313 	}
314 
315 	return (CRYPTO_SUCCESS);
316 }
317 
318 /*
319  * Helper SHA1 digest final function for uio data.
320  * digest_len is the length of the desired digest. If digest_len
321  * is smaller than the default SHA1 digest length, the caller
322  * must pass a scratch buffer, digest_scratch, which must
323  * be at least SHA1_DIGEST_LENGTH bytes.
324  */
325 static int
sha1_digest_final_uio(SHA1_CTX * sha1_ctx,crypto_data_t * digest,ulong_t digest_len,uchar_t * digest_scratch)326 sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
327     ulong_t digest_len, uchar_t *digest_scratch)
328 {
329 	off_t offset = digest->cd_offset;
330 	uint_t vec_idx = 0;
331 
332 	/* we support only kernel buffer */
333 	if (uio_segflg(digest->cd_uio) != UIO_SYSSPACE)
334 		return (CRYPTO_ARGUMENTS_BAD);
335 
336 	/*
337 	 * Jump to the first iovec containing ptr to the digest to
338 	 * be returned.
339 	 */
340 	offset = uio_index_at_offset(digest->cd_uio, offset, &vec_idx);
341 	if (vec_idx == uio_iovcnt(digest->cd_uio)) {
342 		/*
343 		 * The caller specified an offset that is
344 		 * larger than the total size of the buffers
345 		 * it provided.
346 		 */
347 		return (CRYPTO_DATA_LEN_RANGE);
348 	}
349 
350 	if (offset + digest_len <=
351 	    uio_iovlen(digest->cd_uio, vec_idx)) {
352 		/*
353 		 * The computed SHA1 digest will fit in the current
354 		 * iovec.
355 		 */
356 		if (digest_len != SHA1_DIGEST_LENGTH) {
357 			/*
358 			 * The caller requested a short digest. Digest
359 			 * into a scratch buffer and return to
360 			 * the user only what was requested.
361 			 */
362 			SHA1Final(digest_scratch, sha1_ctx);
363 			bcopy(digest_scratch, (uchar_t *)uio_iovbase(digest->
364 			    cd_uio, vec_idx) + offset,
365 			    digest_len);
366 		} else {
367 			SHA1Final((uchar_t *)uio_iovbase(digest->
368 			    cd_uio, vec_idx) + offset,
369 			    sha1_ctx);
370 		}
371 	} else {
372 		/*
373 		 * The computed digest will be crossing one or more iovec's.
374 		 * This is bad performance-wise but we need to support it.
375 		 * Allocate a small scratch buffer on the stack and
376 		 * copy it piece meal to the specified digest iovec's.
377 		 */
378 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
379 		off_t scratch_offset = 0;
380 		size_t length = digest_len;
381 		size_t cur_len;
382 
383 		SHA1Final(digest_tmp, sha1_ctx);
384 
385 		while (vec_idx < uio_iovcnt(digest->cd_uio) && length > 0) {
386 			cur_len = MIN(uio_iovlen(digest->cd_uio, vec_idx) -
387 			    offset, length);
388 			bcopy(digest_tmp + scratch_offset,
389 			    uio_iovbase(digest->cd_uio, vec_idx) + offset,
390 			    cur_len);
391 
392 			length -= cur_len;
393 			vec_idx++;
394 			scratch_offset += cur_len;
395 			offset = 0;
396 		}
397 
398 		if (vec_idx == uio_iovcnt(digest->cd_uio) && length > 0) {
399 			/*
400 			 * The end of the specified iovec's was reached but
401 			 * the length requested could not be processed, i.e.
402 			 * The caller requested to digest more data than it
403 			 * provided.
404 			 */
405 			return (CRYPTO_DATA_LEN_RANGE);
406 		}
407 	}
408 
409 	return (CRYPTO_SUCCESS);
410 }
411 
412 /* ARGSUSED */
413 static int
sha1_digest(crypto_ctx_t * ctx,crypto_data_t * data,crypto_data_t * digest,crypto_req_handle_t req)414 sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
415     crypto_req_handle_t req)
416 {
417 	int ret = CRYPTO_SUCCESS;
418 
419 	ASSERT(ctx->cc_provider_private != NULL);
420 
421 	/*
422 	 * We need to just return the length needed to store the output.
423 	 * We should not destroy the context for the following cases.
424 	 */
425 	if ((digest->cd_length == 0) ||
426 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
427 		digest->cd_length = SHA1_DIGEST_LENGTH;
428 		return (CRYPTO_BUFFER_TOO_SMALL);
429 	}
430 
431 	/*
432 	 * Do the SHA1 update on the specified input data.
433 	 */
434 	switch (data->cd_format) {
435 	case CRYPTO_DATA_RAW:
436 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
437 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
438 		    data->cd_length);
439 		break;
440 	case CRYPTO_DATA_UIO:
441 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
442 		    data);
443 		break;
444 	default:
445 		ret = CRYPTO_ARGUMENTS_BAD;
446 	}
447 
448 	if (ret != CRYPTO_SUCCESS) {
449 		/* the update failed, free context and bail */
450 		kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
451 		ctx->cc_provider_private = NULL;
452 		digest->cd_length = 0;
453 		return (ret);
454 	}
455 
456 	/*
457 	 * Do a SHA1 final, must be done separately since the digest
458 	 * type can be different than the input data type.
459 	 */
460 	switch (digest->cd_format) {
461 	case CRYPTO_DATA_RAW:
462 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
463 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
464 		break;
465 	case CRYPTO_DATA_UIO:
466 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
467 		    digest, SHA1_DIGEST_LENGTH, NULL);
468 		break;
469 	default:
470 		ret = CRYPTO_ARGUMENTS_BAD;
471 	}
472 
473 	/* all done, free context and return */
474 
475 	if (ret == CRYPTO_SUCCESS) {
476 		digest->cd_length = SHA1_DIGEST_LENGTH;
477 	} else {
478 		digest->cd_length = 0;
479 	}
480 
481 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
482 	ctx->cc_provider_private = NULL;
483 	return (ret);
484 }
485 
486 /* ARGSUSED */
487 static int
sha1_digest_update(crypto_ctx_t * ctx,crypto_data_t * data,crypto_req_handle_t req)488 sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
489     crypto_req_handle_t req)
490 {
491 	int ret = CRYPTO_SUCCESS;
492 
493 	ASSERT(ctx->cc_provider_private != NULL);
494 
495 	/*
496 	 * Do the SHA1 update on the specified input data.
497 	 */
498 	switch (data->cd_format) {
499 	case CRYPTO_DATA_RAW:
500 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
501 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
502 		    data->cd_length);
503 		break;
504 	case CRYPTO_DATA_UIO:
505 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
506 		    data);
507 		break;
508 	default:
509 		ret = CRYPTO_ARGUMENTS_BAD;
510 	}
511 
512 	return (ret);
513 }
514 
515 /* ARGSUSED */
516 static int
sha1_digest_final(crypto_ctx_t * ctx,crypto_data_t * digest,crypto_req_handle_t req)517 sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
518     crypto_req_handle_t req)
519 {
520 	int ret = CRYPTO_SUCCESS;
521 
522 	ASSERT(ctx->cc_provider_private != NULL);
523 
524 	/*
525 	 * We need to just return the length needed to store the output.
526 	 * We should not destroy the context for the following cases.
527 	 */
528 	if ((digest->cd_length == 0) ||
529 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
530 		digest->cd_length = SHA1_DIGEST_LENGTH;
531 		return (CRYPTO_BUFFER_TOO_SMALL);
532 	}
533 
534 	/*
535 	 * Do a SHA1 final.
536 	 */
537 	switch (digest->cd_format) {
538 	case CRYPTO_DATA_RAW:
539 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
540 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
541 		break;
542 	case CRYPTO_DATA_UIO:
543 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
544 		    digest, SHA1_DIGEST_LENGTH, NULL);
545 		break;
546 	default:
547 		ret = CRYPTO_ARGUMENTS_BAD;
548 	}
549 
550 	/* all done, free context and return */
551 
552 	if (ret == CRYPTO_SUCCESS) {
553 		digest->cd_length = SHA1_DIGEST_LENGTH;
554 	} else {
555 		digest->cd_length = 0;
556 	}
557 
558 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
559 	ctx->cc_provider_private = NULL;
560 
561 	return (ret);
562 }
563 
564 /* ARGSUSED */
565 static int
sha1_digest_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_data_t * data,crypto_data_t * digest,crypto_req_handle_t req)566 sha1_digest_atomic(crypto_provider_handle_t provider,
567     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
568     crypto_data_t *data, crypto_data_t *digest,
569     crypto_req_handle_t req)
570 {
571 	int ret = CRYPTO_SUCCESS;
572 	SHA1_CTX sha1_ctx;
573 
574 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
575 		return (CRYPTO_MECHANISM_INVALID);
576 
577 	/*
578 	 * Do the SHA1 init.
579 	 */
580 	SHA1Init(&sha1_ctx);
581 
582 	/*
583 	 * Do the SHA1 update on the specified input data.
584 	 */
585 	switch (data->cd_format) {
586 	case CRYPTO_DATA_RAW:
587 		SHA1Update(&sha1_ctx,
588 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
589 		    data->cd_length);
590 		break;
591 	case CRYPTO_DATA_UIO:
592 		ret = sha1_digest_update_uio(&sha1_ctx, data);
593 		break;
594 	default:
595 		ret = CRYPTO_ARGUMENTS_BAD;
596 	}
597 
598 	if (ret != CRYPTO_SUCCESS) {
599 		/* the update failed, bail */
600 		digest->cd_length = 0;
601 		return (ret);
602 	}
603 
604 	/*
605 	 * Do a SHA1 final, must be done separately since the digest
606 	 * type can be different than the input data type.
607 	 */
608 	switch (digest->cd_format) {
609 	case CRYPTO_DATA_RAW:
610 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
611 		    digest->cd_offset, &sha1_ctx);
612 		break;
613 	case CRYPTO_DATA_UIO:
614 		ret = sha1_digest_final_uio(&sha1_ctx, digest,
615 		    SHA1_DIGEST_LENGTH, NULL);
616 		break;
617 	default:
618 		ret = CRYPTO_ARGUMENTS_BAD;
619 	}
620 
621 	if (ret == CRYPTO_SUCCESS) {
622 		digest->cd_length = SHA1_DIGEST_LENGTH;
623 	} else {
624 		digest->cd_length = 0;
625 	}
626 
627 	return (ret);
628 }
629 
630 /*
631  * KCF software provider mac entry points.
632  *
633  * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
634  *
635  * Init:
636  * The initialization routine initializes what we denote
637  * as the inner and outer contexts by doing
638  * - for inner context: SHA1(key XOR ipad)
639  * - for outer context: SHA1(key XOR opad)
640  *
641  * Update:
642  * Each subsequent SHA1 HMAC update will result in an
643  * update of the inner context with the specified data.
644  *
645  * Final:
646  * The SHA1 HMAC final will do a SHA1 final operation on the
647  * inner context, and the resulting digest will be used
648  * as the data for an update on the outer context. Last
649  * but not least, a SHA1 final on the outer context will
650  * be performed to obtain the SHA1 HMAC digest to return
651  * to the user.
652  */
653 
654 /*
655  * Initialize a SHA1-HMAC context.
656  */
657 static void
sha1_mac_init_ctx(sha1_hmac_ctx_t * ctx,void * keyval,uint_t length_in_bytes)658 sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
659 {
660 	uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
661 	uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
662 	uint_t i;
663 
664 	bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
665 	bzero(opad, SHA1_HMAC_BLOCK_SIZE);
666 
667 	bcopy(keyval, ipad, length_in_bytes);
668 	bcopy(keyval, opad, length_in_bytes);
669 
670 	/* XOR key with ipad (0x36) and opad (0x5c) */
671 	for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
672 		ipad[i] ^= 0x36363636;
673 		opad[i] ^= 0x5c5c5c5c;
674 	}
675 
676 	/* perform SHA1 on ipad */
677 	SHA1Init(&ctx->hc_icontext);
678 	SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
679 
680 	/* perform SHA1 on opad */
681 	SHA1Init(&ctx->hc_ocontext);
682 	SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
683 }
684 
685 /*
686  */
687 static int
sha1_mac_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)688 sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
689     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
690     crypto_req_handle_t req)
691 {
692 	int ret = CRYPTO_SUCCESS;
693 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
694 
695 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
696 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
697 		return (CRYPTO_MECHANISM_INVALID);
698 
699 	/* Add support for key by attributes (RFE 4706552) */
700 	if (key->ck_format != CRYPTO_KEY_RAW)
701 		return (CRYPTO_ARGUMENTS_BAD);
702 
703 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
704 	    crypto_kmflag(req));
705 	if (ctx->cc_provider_private == NULL)
706 		return (CRYPTO_HOST_MEMORY);
707 
708 	if (ctx_template != NULL) {
709 		/* reuse context template */
710 		bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
711 		    sizeof (sha1_hmac_ctx_t));
712 	} else {
713 		/* no context template, compute context */
714 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
715 			uchar_t digested_key[SHA1_DIGEST_LENGTH];
716 			sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
717 
718 			/*
719 			 * Hash the passed-in key to get a smaller key.
720 			 * The inner context is used since it hasn't been
721 			 * initialized yet.
722 			 */
723 			PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
724 			    key->ck_data, keylen_in_bytes, digested_key);
725 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
726 			    digested_key, SHA1_DIGEST_LENGTH);
727 		} else {
728 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
729 			    key->ck_data, keylen_in_bytes);
730 		}
731 	}
732 
733 	/*
734 	 * Get the mechanism parameters, if applicable.
735 	 */
736 	PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
737 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
738 		if (mechanism->cm_param == NULL ||
739 		    mechanism->cm_param_len != sizeof (ulong_t))
740 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
741 		PROV_SHA1_GET_DIGEST_LEN(mechanism,
742 		    PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
743 		if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
744 		    SHA1_DIGEST_LENGTH)
745 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
746 	}
747 
748 	if (ret != CRYPTO_SUCCESS) {
749 		bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
750 		kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
751 		ctx->cc_provider_private = NULL;
752 	}
753 
754 	return (ret);
755 }
756 
757 /* ARGSUSED */
758 static int
sha1_mac_update(crypto_ctx_t * ctx,crypto_data_t * data,crypto_req_handle_t req)759 sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
760 {
761 	int ret = CRYPTO_SUCCESS;
762 
763 	ASSERT(ctx->cc_provider_private != NULL);
764 
765 	/*
766 	 * Do a SHA1 update of the inner context using the specified
767 	 * data.
768 	 */
769 	switch (data->cd_format) {
770 	case CRYPTO_DATA_RAW:
771 		SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
772 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
773 		    data->cd_length);
774 		break;
775 	case CRYPTO_DATA_UIO:
776 		ret = sha1_digest_update_uio(
777 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
778 		break;
779 	default:
780 		ret = CRYPTO_ARGUMENTS_BAD;
781 	}
782 
783 	return (ret);
784 }
785 
786 /* ARGSUSED */
787 static int
sha1_mac_final(crypto_ctx_t * ctx,crypto_data_t * mac,crypto_req_handle_t req)788 sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
789 {
790 	int ret = CRYPTO_SUCCESS;
791 	uchar_t digest[SHA1_DIGEST_LENGTH];
792 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
793 
794 	ASSERT(ctx->cc_provider_private != NULL);
795 
796 	if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
797 	    SHA1_HMAC_GEN_MECH_INFO_TYPE)
798 		digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
799 
800 	/*
801 	 * We need to just return the length needed to store the output.
802 	 * We should not destroy the context for the following cases.
803 	 */
804 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
805 		mac->cd_length = digest_len;
806 		return (CRYPTO_BUFFER_TOO_SMALL);
807 	}
808 
809 	/*
810 	 * Do a SHA1 final on the inner context.
811 	 */
812 	SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
813 
814 	/*
815 	 * Do a SHA1 update on the outer context, feeding the inner
816 	 * digest as data.
817 	 */
818 	SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
819 	    SHA1_DIGEST_LENGTH);
820 
821 	/*
822 	 * Do a SHA1 final on the outer context, storing the computing
823 	 * digest in the users buffer.
824 	 */
825 	switch (mac->cd_format) {
826 	case CRYPTO_DATA_RAW:
827 		if (digest_len != SHA1_DIGEST_LENGTH) {
828 			/*
829 			 * The caller requested a short digest. Digest
830 			 * into a scratch buffer and return to
831 			 * the user only what was requested.
832 			 */
833 			SHA1Final(digest,
834 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
835 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
836 			    mac->cd_offset, digest_len);
837 		} else {
838 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
839 			    mac->cd_offset,
840 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
841 		}
842 		break;
843 	case CRYPTO_DATA_UIO:
844 		ret = sha1_digest_final_uio(
845 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
846 		    digest_len, digest);
847 		break;
848 	default:
849 		ret = CRYPTO_ARGUMENTS_BAD;
850 	}
851 
852 	if (ret == CRYPTO_SUCCESS) {
853 		mac->cd_length = digest_len;
854 	} else {
855 		mac->cd_length = 0;
856 	}
857 
858 	bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
859 	kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
860 	ctx->cc_provider_private = NULL;
861 
862 	return (ret);
863 }
864 
865 #define	SHA1_MAC_UPDATE(data, ctx, ret) {				\
866 	switch (data->cd_format) {					\
867 	case CRYPTO_DATA_RAW:						\
868 		SHA1Update(&(ctx).hc_icontext,				\
869 		    (uint8_t *)data->cd_raw.iov_base +			\
870 		    data->cd_offset, data->cd_length);			\
871 		break;							\
872 	case CRYPTO_DATA_UIO:						\
873 		ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
874 		break;							\
875 	default:							\
876 		ret = CRYPTO_ARGUMENTS_BAD;				\
877 	}								\
878 }
879 
880 /* ARGSUSED */
881 static int
sha1_mac_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * mac,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)882 sha1_mac_atomic(crypto_provider_handle_t provider,
883     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
884     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
885     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
886 {
887 	int ret = CRYPTO_SUCCESS;
888 	uchar_t digest[SHA1_DIGEST_LENGTH];
889 	sha1_hmac_ctx_t sha1_hmac_ctx;
890 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
891 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
892 
893 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
894 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
895 		return (CRYPTO_MECHANISM_INVALID);
896 
897 	/* Add support for key by attributes (RFE 4706552) */
898 	if (key->ck_format != CRYPTO_KEY_RAW)
899 		return (CRYPTO_ARGUMENTS_BAD);
900 
901 	if (ctx_template != NULL) {
902 		/* reuse context template */
903 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
904 	} else {
905 		/* no context template, initialize context */
906 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
907 			/*
908 			 * Hash the passed-in key to get a smaller key.
909 			 * The inner context is used since it hasn't been
910 			 * initialized yet.
911 			 */
912 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
913 			    key->ck_data, keylen_in_bytes, digest);
914 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
915 			    SHA1_DIGEST_LENGTH);
916 		} else {
917 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
918 			    keylen_in_bytes);
919 		}
920 	}
921 
922 	/* get the mechanism parameters, if applicable */
923 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
924 		if (mechanism->cm_param == NULL ||
925 		    mechanism->cm_param_len != sizeof (ulong_t)) {
926 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
927 			goto bail;
928 		}
929 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
930 		if (digest_len > SHA1_DIGEST_LENGTH) {
931 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
932 			goto bail;
933 		}
934 	}
935 
936 	/* do a SHA1 update of the inner context using the specified data */
937 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
938 	if (ret != CRYPTO_SUCCESS)
939 		/* the update failed, free context and bail */
940 		goto bail;
941 
942 	/*
943 	 * Do a SHA1 final on the inner context.
944 	 */
945 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
946 
947 	/*
948 	 * Do an SHA1 update on the outer context, feeding the inner
949 	 * digest as data.
950 	 */
951 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
952 
953 	/*
954 	 * Do a SHA1 final on the outer context, storing the computed
955 	 * digest in the users buffer.
956 	 */
957 	switch (mac->cd_format) {
958 	case CRYPTO_DATA_RAW:
959 		if (digest_len != SHA1_DIGEST_LENGTH) {
960 			/*
961 			 * The caller requested a short digest. Digest
962 			 * into a scratch buffer and return to
963 			 * the user only what was requested.
964 			 */
965 			SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
966 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
967 			    mac->cd_offset, digest_len);
968 		} else {
969 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
970 			    mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
971 		}
972 		break;
973 	case CRYPTO_DATA_UIO:
974 		ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
975 		    digest_len, digest);
976 		break;
977 	default:
978 		ret = CRYPTO_ARGUMENTS_BAD;
979 	}
980 
981 	if (ret == CRYPTO_SUCCESS) {
982 		mac->cd_length = digest_len;
983 	} else {
984 		mac->cd_length = 0;
985 	}
986 	/* Extra paranoia: zeroize the context on the stack */
987 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
988 
989 	return (ret);
990 bail:
991 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
992 	mac->cd_length = 0;
993 	return (ret);
994 }
995 
996 /* ARGSUSED */
997 static int
sha1_mac_verify_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * mac,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)998 sha1_mac_verify_atomic(crypto_provider_handle_t provider,
999     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1000     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1001     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1002 {
1003 	int ret = CRYPTO_SUCCESS;
1004 	uchar_t digest[SHA1_DIGEST_LENGTH];
1005 	sha1_hmac_ctx_t sha1_hmac_ctx;
1006 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
1007 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1008 
1009 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1010 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1011 		return (CRYPTO_MECHANISM_INVALID);
1012 
1013 	/* Add support for key by attributes (RFE 4706552) */
1014 	if (key->ck_format != CRYPTO_KEY_RAW)
1015 		return (CRYPTO_ARGUMENTS_BAD);
1016 
1017 	if (ctx_template != NULL) {
1018 		/* reuse context template */
1019 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1020 	} else {
1021 		/* no context template, initialize context */
1022 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1023 			/*
1024 			 * Hash the passed-in key to get a smaller key.
1025 			 * The inner context is used since it hasn't been
1026 			 * initialized yet.
1027 			 */
1028 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1029 			    key->ck_data, keylen_in_bytes, digest);
1030 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1031 			    SHA1_DIGEST_LENGTH);
1032 		} else {
1033 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1034 			    keylen_in_bytes);
1035 		}
1036 	}
1037 
1038 	/* get the mechanism parameters, if applicable */
1039 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1040 		if (mechanism->cm_param == NULL ||
1041 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1042 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1043 			goto bail;
1044 		}
1045 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1046 		if (digest_len > SHA1_DIGEST_LENGTH) {
1047 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1048 			goto bail;
1049 		}
1050 	}
1051 
1052 	if (mac->cd_length != digest_len) {
1053 		ret = CRYPTO_INVALID_MAC;
1054 		goto bail;
1055 	}
1056 
1057 	/* do a SHA1 update of the inner context using the specified data */
1058 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1059 	if (ret != CRYPTO_SUCCESS)
1060 		/* the update failed, free context and bail */
1061 		goto bail;
1062 
1063 	/* do a SHA1 final on the inner context */
1064 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1065 
1066 	/*
1067 	 * Do an SHA1 update on the outer context, feeding the inner
1068 	 * digest as data.
1069 	 */
1070 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1071 
1072 	/*
1073 	 * Do a SHA1 final on the outer context, storing the computed
1074 	 * digest in the users buffer.
1075 	 */
1076 	SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1077 
1078 	/*
1079 	 * Compare the computed digest against the expected digest passed
1080 	 * as argument.
1081 	 */
1082 
1083 	switch (mac->cd_format) {
1084 
1085 	case CRYPTO_DATA_RAW:
1086 		if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
1087 		    mac->cd_offset, digest_len) != 0)
1088 			ret = CRYPTO_INVALID_MAC;
1089 		break;
1090 
1091 	case CRYPTO_DATA_UIO: {
1092 		off_t offset = mac->cd_offset;
1093 		uint_t vec_idx = 0;
1094 		off_t scratch_offset = 0;
1095 		size_t length = digest_len;
1096 		size_t cur_len;
1097 
1098 		/* we support only kernel buffer */
1099 		if (uio_segflg(mac->cd_uio) != UIO_SYSSPACE)
1100 			return (CRYPTO_ARGUMENTS_BAD);
1101 
1102 		/* jump to the first iovec containing the expected digest */
1103 		offset = uio_index_at_offset(mac->cd_uio, offset, &vec_idx);
1104 		if (vec_idx == uio_iovcnt(mac->cd_uio)) {
1105 			/*
1106 			 * The caller specified an offset that is
1107 			 * larger than the total size of the buffers
1108 			 * it provided.
1109 			 */
1110 			ret = CRYPTO_DATA_LEN_RANGE;
1111 			break;
1112 		}
1113 
1114 		/* do the comparison of computed digest vs specified one */
1115 		while (vec_idx < uio_iovcnt(mac->cd_uio) && length > 0) {
1116 			cur_len = MIN(uio_iovlen(mac->cd_uio, vec_idx) -
1117 			    offset, length);
1118 
1119 			if (bcmp(digest + scratch_offset,
1120 			    uio_iovbase(mac->cd_uio, vec_idx) + offset,
1121 			    cur_len) != 0) {
1122 				ret = CRYPTO_INVALID_MAC;
1123 				break;
1124 			}
1125 
1126 			length -= cur_len;
1127 			vec_idx++;
1128 			scratch_offset += cur_len;
1129 			offset = 0;
1130 		}
1131 		break;
1132 	}
1133 
1134 	default:
1135 		ret = CRYPTO_ARGUMENTS_BAD;
1136 	}
1137 
1138 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1139 	return (ret);
1140 bail:
1141 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1142 	mac->cd_length = 0;
1143 	return (ret);
1144 }
1145 
1146 /*
1147  * KCF software provider context management entry points.
1148  */
1149 
1150 /* ARGSUSED */
1151 static int
sha1_create_ctx_template(crypto_provider_handle_t provider,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t * ctx_template,size_t * ctx_template_size,crypto_req_handle_t req)1152 sha1_create_ctx_template(crypto_provider_handle_t provider,
1153     crypto_mechanism_t *mechanism, crypto_key_t *key,
1154     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
1155     crypto_req_handle_t req)
1156 {
1157 	sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
1158 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1159 
1160 	if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
1161 	    (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
1162 		return (CRYPTO_MECHANISM_INVALID);
1163 	}
1164 
1165 	/* Add support for key by attributes (RFE 4706552) */
1166 	if (key->ck_format != CRYPTO_KEY_RAW)
1167 		return (CRYPTO_ARGUMENTS_BAD);
1168 
1169 	/*
1170 	 * Allocate and initialize SHA1 context.
1171 	 */
1172 	sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
1173 	    crypto_kmflag(req));
1174 	if (sha1_hmac_ctx_tmpl == NULL)
1175 		return (CRYPTO_HOST_MEMORY);
1176 
1177 	if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1178 		uchar_t digested_key[SHA1_DIGEST_LENGTH];
1179 
1180 		/*
1181 		 * Hash the passed-in key to get a smaller key.
1182 		 * The inner context is used since it hasn't been
1183 		 * initialized yet.
1184 		 */
1185 		PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
1186 		    key->ck_data, keylen_in_bytes, digested_key);
1187 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
1188 		    SHA1_DIGEST_LENGTH);
1189 	} else {
1190 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
1191 		    keylen_in_bytes);
1192 	}
1193 
1194 	sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
1195 	*ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
1196 	*ctx_template_size = sizeof (sha1_hmac_ctx_t);
1197 
1198 
1199 	return (CRYPTO_SUCCESS);
1200 }
1201 
1202 static int
sha1_free_context(crypto_ctx_t * ctx)1203 sha1_free_context(crypto_ctx_t *ctx)
1204 {
1205 	uint_t ctx_len;
1206 	sha1_mech_type_t mech_type;
1207 
1208 	if (ctx->cc_provider_private == NULL)
1209 		return (CRYPTO_SUCCESS);
1210 
1211 	/*
1212 	 * We have to free either SHA1 or SHA1-HMAC contexts, which
1213 	 * have different lengths.
1214 	 */
1215 
1216 	mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
1217 	if (mech_type == SHA1_MECH_INFO_TYPE)
1218 		ctx_len = sizeof (sha1_ctx_t);
1219 	else {
1220 		ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
1221 		    mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
1222 		ctx_len = sizeof (sha1_hmac_ctx_t);
1223 	}
1224 
1225 	bzero(ctx->cc_provider_private, ctx_len);
1226 	kmem_free(ctx->cc_provider_private, ctx_len);
1227 	ctx->cc_provider_private = NULL;
1228 
1229 	return (CRYPTO_SUCCESS);
1230 }
1231