1 /*-
2 * Copyright (c) 2006 Pawel Jakub Dawidek <[email protected]>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/module.h>
32 #include <sys/malloc.h>
33 #include <sys/libkern.h>
34 #include <sys/endian.h>
35 #include <sys/pcpu.h>
36 #if defined(__amd64__) || defined(__i386__)
37 #include <machine/cpufunc.h>
38 #include <machine/cputypes.h>
39 #include <machine/fpu.h>
40 #include <machine/md_var.h>
41 #include <machine/specialreg.h>
42 #endif
43 #include <machine/pcb.h>
44
45 #include <opencrypto/cryptodev.h>
46 #include <opencrypto/xform.h>
47
48 #include <crypto/via/padlock.h>
49
50 /*
51 * Implementation notes.
52 *
53 * Some VIA CPUs provides SHA1 and SHA256 acceleration.
54 * We implement all HMAC algorithms provided by crypto(9) framework, but we do
55 * the crypto work in software unless this is HMAC/SHA1 or HMAC/SHA256 and
56 * our CPU can accelerate it.
57 *
58 * Additional CPU instructions, which preform SHA1 and SHA256 are one-shot
59 * functions - we have only one chance to give the data, CPU itself will add
60 * the padding and calculate hash automatically.
61 * This means, it is not possible to implement common init(), update(), final()
62 * methods.
63 * The way I've choosen is to keep adding data to the buffer on update()
64 * (reallocating the buffer if necessary) and call XSHA{1,256} instruction on
65 * final().
66 */
67
68 struct padlock_sha_ctx {
69 uint8_t *psc_buf;
70 int psc_offset;
71 int psc_size;
72 };
73 CTASSERT(sizeof(struct padlock_sha_ctx) <= sizeof(union authctx));
74
75 static void padlock_sha_init(void *vctx);
76 static int padlock_sha_update(void *vctx, const void *buf, u_int bufsize);
77 static void padlock_sha1_final(uint8_t *hash, void *vctx);
78 static void padlock_sha256_final(uint8_t *hash, void *vctx);
79
80 static const struct auth_hash padlock_hmac_sha1 = {
81 .type = CRYPTO_SHA1_HMAC,
82 .name = "HMAC-SHA1",
83 .keysize = SHA1_BLOCK_LEN,
84 .hashsize = SHA1_HASH_LEN,
85 .ctxsize = sizeof(struct padlock_sha_ctx),
86 .blocksize = SHA1_BLOCK_LEN,
87 .Init = padlock_sha_init,
88 .Update = padlock_sha_update,
89 .Final = padlock_sha1_final,
90 };
91
92 static const struct auth_hash padlock_hmac_sha256 = {
93 .type = CRYPTO_SHA2_256_HMAC,
94 .name = "HMAC-SHA2-256",
95 .keysize = SHA2_256_BLOCK_LEN,
96 .hashsize = SHA2_256_HASH_LEN,
97 .ctxsize = sizeof(struct padlock_sha_ctx),
98 .blocksize = SHA2_256_BLOCK_LEN,
99 .Init = padlock_sha_init,
100 .Update = padlock_sha_update,
101 .Final = padlock_sha256_final,
102 };
103
104 MALLOC_DECLARE(M_PADLOCK);
105
106 static __inline void
padlock_output_block(uint32_t * src,uint32_t * dst,size_t count)107 padlock_output_block(uint32_t *src, uint32_t *dst, size_t count)
108 {
109
110 while (count-- > 0)
111 *dst++ = bswap32(*src++);
112 }
113
114 static void
padlock_do_sha1(const u_char * in,u_char * out,int count)115 padlock_do_sha1(const u_char *in, u_char *out, int count)
116 {
117 u_char buf[128+16]; /* PadLock needs at least 128 bytes buffer. */
118 u_char *result = PADLOCK_ALIGN(buf);
119
120 ((uint32_t *)result)[0] = 0x67452301;
121 ((uint32_t *)result)[1] = 0xEFCDAB89;
122 ((uint32_t *)result)[2] = 0x98BADCFE;
123 ((uint32_t *)result)[3] = 0x10325476;
124 ((uint32_t *)result)[4] = 0xC3D2E1F0;
125
126 __asm __volatile(
127 ".byte 0xf3, 0x0f, 0xa6, 0xc8" /* rep xsha1 */
128 : "+S"(in), "+D"(result)
129 : "c"(count), "a"(0)
130 );
131
132 padlock_output_block((uint32_t *)result, (uint32_t *)out,
133 SHA1_HASH_LEN / sizeof(uint32_t));
134 }
135
136 static void
padlock_do_sha256(const char * in,char * out,int count)137 padlock_do_sha256(const char *in, char *out, int count)
138 {
139 char buf[128+16]; /* PadLock needs at least 128 bytes buffer. */
140 char *result = PADLOCK_ALIGN(buf);
141
142 ((uint32_t *)result)[0] = 0x6A09E667;
143 ((uint32_t *)result)[1] = 0xBB67AE85;
144 ((uint32_t *)result)[2] = 0x3C6EF372;
145 ((uint32_t *)result)[3] = 0xA54FF53A;
146 ((uint32_t *)result)[4] = 0x510E527F;
147 ((uint32_t *)result)[5] = 0x9B05688C;
148 ((uint32_t *)result)[6] = 0x1F83D9AB;
149 ((uint32_t *)result)[7] = 0x5BE0CD19;
150
151 __asm __volatile(
152 ".byte 0xf3, 0x0f, 0xa6, 0xd0" /* rep xsha256 */
153 : "+S"(in), "+D"(result)
154 : "c"(count), "a"(0)
155 );
156
157 padlock_output_block((uint32_t *)result, (uint32_t *)out,
158 SHA2_256_HASH_LEN / sizeof(uint32_t));
159 }
160
161 static void
padlock_sha_init(void * vctx)162 padlock_sha_init(void *vctx)
163 {
164 struct padlock_sha_ctx *ctx;
165
166 ctx = vctx;
167 ctx->psc_buf = NULL;
168 ctx->psc_offset = 0;
169 ctx->psc_size = 0;
170 }
171
172 static int
padlock_sha_update(void * vctx,const void * buf,u_int bufsize)173 padlock_sha_update(void *vctx, const void *buf, u_int bufsize)
174 {
175 struct padlock_sha_ctx *ctx;
176
177 ctx = vctx;
178 if (ctx->psc_size - ctx->psc_offset < bufsize) {
179 ctx->psc_size = MAX(ctx->psc_size * 2, ctx->psc_size + bufsize);
180 ctx->psc_buf = realloc(ctx->psc_buf, ctx->psc_size, M_PADLOCK,
181 M_NOWAIT);
182 if(ctx->psc_buf == NULL)
183 return (ENOMEM);
184 }
185 bcopy(buf, ctx->psc_buf + ctx->psc_offset, bufsize);
186 ctx->psc_offset += bufsize;
187 return (0);
188 }
189
190 static void
padlock_sha_free(void * vctx)191 padlock_sha_free(void *vctx)
192 {
193 struct padlock_sha_ctx *ctx;
194
195 ctx = vctx;
196 if (ctx->psc_buf != NULL) {
197 zfree(ctx->psc_buf, M_PADLOCK);
198 ctx->psc_buf = NULL;
199 ctx->psc_offset = 0;
200 ctx->psc_size = 0;
201 }
202 }
203
204 static void
padlock_sha1_final(uint8_t * hash,void * vctx)205 padlock_sha1_final(uint8_t *hash, void *vctx)
206 {
207 struct padlock_sha_ctx *ctx;
208
209 ctx = vctx;
210 padlock_do_sha1(ctx->psc_buf, hash, ctx->psc_offset);
211 padlock_sha_free(ctx);
212 }
213
214 static void
padlock_sha256_final(uint8_t * hash,void * vctx)215 padlock_sha256_final(uint8_t *hash, void *vctx)
216 {
217 struct padlock_sha_ctx *ctx;
218
219 ctx = vctx;
220 padlock_do_sha256(ctx->psc_buf, hash, ctx->psc_offset);
221 padlock_sha_free(ctx);
222 }
223
224 static void
padlock_copy_ctx(const struct auth_hash * axf,void * sctx,void * dctx)225 padlock_copy_ctx(const struct auth_hash *axf, void *sctx, void *dctx)
226 {
227
228 if ((via_feature_xcrypt & VIA_HAS_SHA) != 0 &&
229 (axf->type == CRYPTO_SHA1_HMAC ||
230 axf->type == CRYPTO_SHA2_256_HMAC)) {
231 struct padlock_sha_ctx *spctx = sctx, *dpctx = dctx;
232
233 dpctx->psc_offset = spctx->psc_offset;
234 dpctx->psc_size = spctx->psc_size;
235 dpctx->psc_buf = malloc(dpctx->psc_size, M_PADLOCK, M_WAITOK);
236 bcopy(spctx->psc_buf, dpctx->psc_buf, dpctx->psc_size);
237 } else {
238 bcopy(sctx, dctx, axf->ctxsize);
239 }
240 }
241
242 static void
padlock_free_ctx(const struct auth_hash * axf,void * ctx)243 padlock_free_ctx(const struct auth_hash *axf, void *ctx)
244 {
245
246 if ((via_feature_xcrypt & VIA_HAS_SHA) != 0 &&
247 (axf->type == CRYPTO_SHA1_HMAC ||
248 axf->type == CRYPTO_SHA2_256_HMAC)) {
249 padlock_sha_free(ctx);
250 }
251 }
252
253 static void
padlock_hash_key_setup(struct padlock_session * ses,const uint8_t * key,int klen)254 padlock_hash_key_setup(struct padlock_session *ses, const uint8_t *key,
255 int klen)
256 {
257 const struct auth_hash *axf;
258
259 axf = ses->ses_axf;
260
261 /*
262 * Try to free contexts before using them, because
263 * padlock_hash_key_setup() can be called twice - once from
264 * padlock_newsession() and again from padlock_process().
265 */
266 padlock_free_ctx(axf, ses->ses_ictx);
267 padlock_free_ctx(axf, ses->ses_octx);
268
269 hmac_init_ipad(axf, key, klen, ses->ses_ictx);
270 hmac_init_opad(axf, key, klen, ses->ses_octx);
271 }
272
273 /*
274 * Compute keyed-hash authenticator.
275 */
276 static int
padlock_authcompute(struct padlock_session * ses,struct cryptop * crp)277 padlock_authcompute(struct padlock_session *ses, struct cryptop *crp)
278 {
279 u_char hash[HASH_MAX_LEN], hash2[HASH_MAX_LEN];
280 const struct auth_hash *axf;
281 union authctx ctx;
282 int error;
283
284 axf = ses->ses_axf;
285
286 padlock_copy_ctx(axf, ses->ses_ictx, &ctx);
287 error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
288 axf->Update, &ctx);
289 if (error != 0) {
290 padlock_free_ctx(axf, &ctx);
291 return (error);
292 }
293 error = crypto_apply(crp, crp->crp_payload_start,
294 crp->crp_payload_length, axf->Update, &ctx);
295 if (error != 0) {
296 padlock_free_ctx(axf, &ctx);
297 return (error);
298 }
299 axf->Final(hash, &ctx);
300
301 padlock_copy_ctx(axf, ses->ses_octx, &ctx);
302 axf->Update(&ctx, hash, axf->hashsize);
303 axf->Final(hash, &ctx);
304
305 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
306 crypto_copydata(crp, crp->crp_digest_start, ses->ses_mlen,
307 hash2);
308 if (timingsafe_bcmp(hash, hash2, ses->ses_mlen) != 0)
309 return (EBADMSG);
310 } else
311 crypto_copyback(crp, crp->crp_digest_start, ses->ses_mlen,
312 hash);
313 return (0);
314 }
315
316 /* Find software structure which describes HMAC algorithm. */
317 static const struct auth_hash *
padlock_hash_lookup(int alg)318 padlock_hash_lookup(int alg)
319 {
320 const struct auth_hash *axf;
321
322 switch (alg) {
323 case CRYPTO_NULL_HMAC:
324 axf = &auth_hash_null;
325 break;
326 case CRYPTO_SHA1_HMAC:
327 if ((via_feature_xcrypt & VIA_HAS_SHA) != 0)
328 axf = &padlock_hmac_sha1;
329 else
330 axf = &auth_hash_hmac_sha1;
331 break;
332 case CRYPTO_RIPEMD160_HMAC:
333 axf = &auth_hash_hmac_ripemd_160;
334 break;
335 case CRYPTO_SHA2_256_HMAC:
336 if ((via_feature_xcrypt & VIA_HAS_SHA) != 0)
337 axf = &padlock_hmac_sha256;
338 else
339 axf = &auth_hash_hmac_sha2_256;
340 break;
341 case CRYPTO_SHA2_384_HMAC:
342 axf = &auth_hash_hmac_sha2_384;
343 break;
344 case CRYPTO_SHA2_512_HMAC:
345 axf = &auth_hash_hmac_sha2_512;
346 break;
347 default:
348 axf = NULL;
349 break;
350 }
351 return (axf);
352 }
353
354 bool
padlock_hash_check(const struct crypto_session_params * csp)355 padlock_hash_check(const struct crypto_session_params *csp)
356 {
357
358 return (padlock_hash_lookup(csp->csp_auth_alg) != NULL);
359 }
360
361 int
padlock_hash_setup(struct padlock_session * ses,const struct crypto_session_params * csp)362 padlock_hash_setup(struct padlock_session *ses,
363 const struct crypto_session_params *csp)
364 {
365
366 ses->ses_axf = padlock_hash_lookup(csp->csp_auth_alg);
367 if (csp->csp_auth_mlen == 0)
368 ses->ses_mlen = ses->ses_axf->hashsize;
369 else
370 ses->ses_mlen = csp->csp_auth_mlen;
371
372 /* Allocate memory for HMAC inner and outer contexts. */
373 ses->ses_ictx = malloc(ses->ses_axf->ctxsize, M_PADLOCK,
374 M_ZERO | M_NOWAIT);
375 ses->ses_octx = malloc(ses->ses_axf->ctxsize, M_PADLOCK,
376 M_ZERO | M_NOWAIT);
377 if (ses->ses_ictx == NULL || ses->ses_octx == NULL)
378 return (ENOMEM);
379
380 /* Setup key if given. */
381 if (csp->csp_auth_key != NULL) {
382 padlock_hash_key_setup(ses, csp->csp_auth_key,
383 csp->csp_auth_klen);
384 }
385 return (0);
386 }
387
388 int
padlock_hash_process(struct padlock_session * ses,struct cryptop * crp,const struct crypto_session_params * csp)389 padlock_hash_process(struct padlock_session *ses, struct cryptop *crp,
390 const struct crypto_session_params *csp)
391 {
392 struct thread *td;
393 int error;
394
395 td = curthread;
396 fpu_kern_enter(td, NULL, FPU_KERN_NORMAL | FPU_KERN_NOCTX);
397 if (crp->crp_auth_key != NULL)
398 padlock_hash_key_setup(ses, crp->crp_auth_key,
399 csp->csp_auth_klen);
400
401 error = padlock_authcompute(ses, crp);
402 fpu_kern_leave(td, NULL);
403 return (error);
404 }
405
406 void
padlock_hash_free(struct padlock_session * ses)407 padlock_hash_free(struct padlock_session *ses)
408 {
409
410 if (ses->ses_ictx != NULL) {
411 padlock_free_ctx(ses->ses_axf, ses->ses_ictx);
412 zfree(ses->ses_ictx, M_PADLOCK);
413 ses->ses_ictx = NULL;
414 }
415 if (ses->ses_octx != NULL) {
416 padlock_free_ctx(ses->ses_axf, ses->ses_octx);
417 zfree(ses->ses_octx, M_PADLOCK);
418 ses->ses_octx = NULL;
419 }
420 }
421