1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2009-2011 Semihalf.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * CESA SRAM Memory Map:
31 *
32 * +------------------------+ <= sc->sc_sram_base_va + CESA_SRAM_SIZE
33 * | |
34 * | DATA |
35 * | |
36 * +------------------------+ <= sc->sc_sram_base_va + CESA_DATA(0)
37 * | struct cesa_sa_data |
38 * +------------------------+
39 * | struct cesa_sa_hdesc |
40 * +------------------------+ <= sc->sc_sram_base_va
41 */
42
43 #include <sys/cdefs.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/endian.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/mbuf.h>
51 #include <sys/module.h>
52 #include <sys/mutex.h>
53 #include <sys/rman.h>
54
55 #include <machine/bus.h>
56 #include <machine/intr.h>
57 #include <machine/resource.h>
58 #include <machine/fdt.h>
59
60 #include <dev/fdt/simplebus.h>
61 #include <dev/fdt/fdt_common.h>
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64
65 #include <crypto/sha1.h>
66 #include <crypto/sha2/sha256.h>
67 #include <crypto/rijndael/rijndael.h>
68 #include <opencrypto/cryptodev.h>
69 #include <opencrypto/xform.h>
70 #include "cryptodev_if.h"
71
72 #include <arm/mv/mvreg.h>
73 #include <arm/mv/mvvar.h>
74 #include "cesa.h"
75
76 static int cesa_probe(device_t);
77 static int cesa_attach(device_t);
78 static int cesa_attach_late(device_t);
79 static int cesa_detach(device_t);
80 static void cesa_intr(void *);
81 static int cesa_probesession(device_t,
82 const struct crypto_session_params *);
83 static int cesa_newsession(device_t, crypto_session_t,
84 const struct crypto_session_params *);
85 static int cesa_process(device_t, struct cryptop *, int);
86
87 static struct resource_spec cesa_res_spec[] = {
88 { SYS_RES_MEMORY, 0, RF_ACTIVE },
89 { SYS_RES_MEMORY, 1, RF_ACTIVE },
90 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
91 { -1, 0 }
92 };
93
94 static device_method_t cesa_methods[] = {
95 /* Device interface */
96 DEVMETHOD(device_probe, cesa_probe),
97 DEVMETHOD(device_attach, cesa_attach),
98 DEVMETHOD(device_detach, cesa_detach),
99
100 /* Crypto device methods */
101 DEVMETHOD(cryptodev_probesession, cesa_probesession),
102 DEVMETHOD(cryptodev_newsession, cesa_newsession),
103 DEVMETHOD(cryptodev_process, cesa_process),
104
105 DEVMETHOD_END
106 };
107
108 static driver_t cesa_driver = {
109 "cesa",
110 cesa_methods,
111 sizeof (struct cesa_softc)
112 };
113
114 DRIVER_MODULE(cesa, simplebus, cesa_driver, 0, 0);
115 MODULE_DEPEND(cesa, crypto, 1, 1, 1);
116
117 static void
cesa_dump_cshd(struct cesa_softc * sc,struct cesa_sa_hdesc * cshd)118 cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd)
119 {
120 #ifdef DEBUG
121 device_t dev;
122
123 dev = sc->sc_dev;
124 device_printf(dev, "CESA SA Hardware Descriptor:\n");
125 device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config);
126 device_printf(dev, "\t\te_src: 0x%08X\n", cshd->cshd_enc_src);
127 device_printf(dev, "\t\te_dst: 0x%08X\n", cshd->cshd_enc_dst);
128 device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen);
129 device_printf(dev, "\t\te_key: 0x%08X\n", cshd->cshd_enc_key);
130 device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv);
131 device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf);
132 device_printf(dev, "\t\tm_src: 0x%08X\n", cshd->cshd_mac_src);
133 device_printf(dev, "\t\tm_dst: 0x%08X\n", cshd->cshd_mac_dst);
134 device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen);
135 device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen);
136 device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in);
137 device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out);
138 #endif
139 }
140
141 static void
cesa_alloc_dma_mem_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)142 cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
143 {
144 struct cesa_dma_mem *cdm;
145
146 if (error)
147 return;
148
149 KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1."));
150 cdm = arg;
151 cdm->cdm_paddr = segs->ds_addr;
152 }
153
154 static int
cesa_alloc_dma_mem(struct cesa_softc * sc,struct cesa_dma_mem * cdm,bus_size_t size)155 cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm,
156 bus_size_t size)
157 {
158 int error;
159
160 KASSERT(cdm->cdm_vaddr == NULL,
161 ("%s(): DMA memory descriptor in use.", __func__));
162
163 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
164 PAGE_SIZE, 0, /* alignment, boundary */
165 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
166 BUS_SPACE_MAXADDR, /* highaddr */
167 NULL, NULL, /* filtfunc, filtfuncarg */
168 size, 1, /* maxsize, nsegments */
169 size, 0, /* maxsegsz, flags */
170 NULL, NULL, /* lockfunc, lockfuncarg */
171 &cdm->cdm_tag); /* dmat */
172 if (error) {
173 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
174 " %i!\n", error);
175
176 goto err1;
177 }
178
179 error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr,
180 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map);
181 if (error) {
182 device_printf(sc->sc_dev, "failed to allocate DMA safe"
183 " memory, error %i!\n", error);
184
185 goto err2;
186 }
187
188 error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr,
189 size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT);
190 if (error) {
191 device_printf(sc->sc_dev, "cannot get address of the DMA"
192 " memory, error %i\n", error);
193
194 goto err3;
195 }
196
197 return (0);
198 err3:
199 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
200 err2:
201 bus_dma_tag_destroy(cdm->cdm_tag);
202 err1:
203 cdm->cdm_vaddr = NULL;
204 return (error);
205 }
206
207 static void
cesa_free_dma_mem(struct cesa_dma_mem * cdm)208 cesa_free_dma_mem(struct cesa_dma_mem *cdm)
209 {
210
211 bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map);
212 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
213 bus_dma_tag_destroy(cdm->cdm_tag);
214 cdm->cdm_vaddr = NULL;
215 }
216
217 static void
cesa_sync_dma_mem(struct cesa_dma_mem * cdm,bus_dmasync_op_t op)218 cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op)
219 {
220
221 /* Sync only if dma memory is valid */
222 if (cdm->cdm_vaddr != NULL)
223 bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op);
224 }
225
226 static void
cesa_sync_desc(struct cesa_softc * sc,bus_dmasync_op_t op)227 cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op)
228 {
229
230 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op);
231 cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op);
232 cesa_sync_dma_mem(&sc->sc_requests_cdm, op);
233 }
234
235 static struct cesa_request *
cesa_alloc_request(struct cesa_softc * sc)236 cesa_alloc_request(struct cesa_softc *sc)
237 {
238 struct cesa_request *cr;
239
240 CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests);
241 if (!cr)
242 return (NULL);
243
244 STAILQ_INIT(&cr->cr_tdesc);
245 STAILQ_INIT(&cr->cr_sdesc);
246
247 return (cr);
248 }
249
250 static void
cesa_free_request(struct cesa_softc * sc,struct cesa_request * cr)251 cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr)
252 {
253
254 /* Free TDMA descriptors assigned to this request */
255 CESA_LOCK(sc, tdesc);
256 STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc);
257 CESA_UNLOCK(sc, tdesc);
258
259 /* Free SA descriptors assigned to this request */
260 CESA_LOCK(sc, sdesc);
261 STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc);
262 CESA_UNLOCK(sc, sdesc);
263
264 /* Unload DMA memory associated with request */
265 if (cr->cr_dmap_loaded) {
266 bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap);
267 cr->cr_dmap_loaded = 0;
268 }
269
270 CESA_GENERIC_FREE_LOCKED(sc, cr, requests);
271 }
272
273 static void
cesa_enqueue_request(struct cesa_softc * sc,struct cesa_request * cr)274 cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr)
275 {
276
277 CESA_LOCK(sc, requests);
278 STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq);
279 CESA_UNLOCK(sc, requests);
280 }
281
282 static struct cesa_tdma_desc *
cesa_alloc_tdesc(struct cesa_softc * sc)283 cesa_alloc_tdesc(struct cesa_softc *sc)
284 {
285 struct cesa_tdma_desc *ctd;
286
287 CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc);
288
289 if (!ctd)
290 device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. "
291 "Consider increasing CESA_TDMA_DESCRIPTORS.\n");
292
293 return (ctd);
294 }
295
296 static struct cesa_sa_desc *
cesa_alloc_sdesc(struct cesa_softc * sc,struct cesa_request * cr)297 cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr)
298 {
299 struct cesa_sa_desc *csd;
300
301 CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc);
302 if (!csd) {
303 device_printf(sc->sc_dev, "SA descriptors pool exhaused. "
304 "Consider increasing CESA_SA_DESCRIPTORS.\n");
305 return (NULL);
306 }
307
308 STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq);
309
310 /* Fill-in SA descriptor with default values */
311 csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key);
312 csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv);
313 csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv);
314 csd->csd_cshd->cshd_enc_src = 0;
315 csd->csd_cshd->cshd_enc_dst = 0;
316 csd->csd_cshd->cshd_enc_dlen = 0;
317 csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash);
318 csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in);
319 csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out);
320 csd->csd_cshd->cshd_mac_src = 0;
321 csd->csd_cshd->cshd_mac_dlen = 0;
322
323 return (csd);
324 }
325
326 static struct cesa_tdma_desc *
cesa_tdma_copy(struct cesa_softc * sc,bus_addr_t dst,bus_addr_t src,bus_size_t size)327 cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src,
328 bus_size_t size)
329 {
330 struct cesa_tdma_desc *ctd;
331
332 ctd = cesa_alloc_tdesc(sc);
333 if (!ctd)
334 return (NULL);
335
336 ctd->ctd_cthd->cthd_dst = dst;
337 ctd->ctd_cthd->cthd_src = src;
338 ctd->ctd_cthd->cthd_byte_count = size;
339
340 /* Handle special control packet */
341 if (size != 0)
342 ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED;
343 else
344 ctd->ctd_cthd->cthd_flags = 0;
345
346 return (ctd);
347 }
348
349 static struct cesa_tdma_desc *
cesa_tdma_copyin_sa_data(struct cesa_softc * sc,struct cesa_request * cr)350 cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
351 {
352
353 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa +
354 sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr,
355 sizeof(struct cesa_sa_data)));
356 }
357
358 static struct cesa_tdma_desc *
cesa_tdma_copyout_sa_data(struct cesa_softc * sc,struct cesa_request * cr)359 cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
360 {
361
362 return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base_pa +
363 sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data)));
364 }
365
366 static struct cesa_tdma_desc *
cesa_tdma_copy_sdesc(struct cesa_softc * sc,struct cesa_sa_desc * csd)367 cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd)
368 {
369
370 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa, csd->csd_cshd_paddr,
371 sizeof(struct cesa_sa_hdesc)));
372 }
373
374 static void
cesa_append_tdesc(struct cesa_request * cr,struct cesa_tdma_desc * ctd)375 cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd)
376 {
377 struct cesa_tdma_desc *ctd_prev;
378
379 if (!STAILQ_EMPTY(&cr->cr_tdesc)) {
380 ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq);
381 ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr;
382 }
383
384 ctd->ctd_cthd->cthd_next = 0;
385 STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq);
386 }
387
388 static int
cesa_append_packet(struct cesa_softc * sc,struct cesa_request * cr,struct cesa_packet * cp,struct cesa_sa_desc * csd)389 cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr,
390 struct cesa_packet *cp, struct cesa_sa_desc *csd)
391 {
392 struct cesa_tdma_desc *ctd, *tmp;
393
394 /* Copy SA descriptor for this packet */
395 ctd = cesa_tdma_copy_sdesc(sc, csd);
396 if (!ctd)
397 return (ENOMEM);
398
399 cesa_append_tdesc(cr, ctd);
400
401 /* Copy data to be processed */
402 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp)
403 cesa_append_tdesc(cr, ctd);
404 STAILQ_INIT(&cp->cp_copyin);
405
406 /* Insert control descriptor */
407 ctd = cesa_tdma_copy(sc, 0, 0, 0);
408 if (!ctd)
409 return (ENOMEM);
410
411 cesa_append_tdesc(cr, ctd);
412
413 /* Copy back results */
414 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp)
415 cesa_append_tdesc(cr, ctd);
416 STAILQ_INIT(&cp->cp_copyout);
417
418 return (0);
419 }
420
421 static void
cesa_set_mkey(struct cesa_session * cs,int alg,const uint8_t * mkey,int mklen)422 cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen)
423 {
424 union authctx auth_ctx;
425 uint32_t *hout;
426 uint32_t *hin;
427 int i;
428
429 hin = (uint32_t *)cs->cs_hiv_in;
430 hout = (uint32_t *)cs->cs_hiv_out;
431
432 switch (alg) {
433 case CRYPTO_SHA1_HMAC:
434 hmac_init_ipad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx);
435 memcpy(hin, auth_ctx.sha1ctx.h.b32,
436 sizeof(auth_ctx.sha1ctx.h.b32));
437 hmac_init_opad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx);
438 memcpy(hout, auth_ctx.sha1ctx.h.b32,
439 sizeof(auth_ctx.sha1ctx.h.b32));
440 break;
441 case CRYPTO_SHA2_256_HMAC:
442 hmac_init_ipad(&auth_hash_hmac_sha2_256, mkey, mklen,
443 &auth_ctx);
444 memcpy(hin, auth_ctx.sha256ctx.state,
445 sizeof(auth_ctx.sha256ctx.state));
446 hmac_init_opad(&auth_hash_hmac_sha2_256, mkey, mklen,
447 &auth_ctx);
448 memcpy(hout, auth_ctx.sha256ctx.state,
449 sizeof(auth_ctx.sha256ctx.state));
450 break;
451 default:
452 panic("shouldn't get here");
453 }
454
455 for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) {
456 hin[i] = htobe32(hin[i]);
457 hout[i] = htobe32(hout[i]);
458 }
459 explicit_bzero(&auth_ctx, sizeof(auth_ctx));
460 }
461
462 static int
cesa_prep_aes_key(struct cesa_session * cs,const struct crypto_session_params * csp)463 cesa_prep_aes_key(struct cesa_session *cs,
464 const struct crypto_session_params *csp)
465 {
466 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
467 uint32_t *dkey;
468 int i;
469
470 rijndaelKeySetupEnc(ek, cs->cs_key, csp->csp_cipher_klen * 8);
471
472 cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK;
473 dkey = (uint32_t *)cs->cs_aes_dkey;
474
475 switch (csp->csp_cipher_klen) {
476 case 16:
477 cs->cs_config |= CESA_CSH_AES_KLEN_128;
478 for (i = 0; i < 4; i++)
479 *dkey++ = htobe32(ek[4 * 10 + i]);
480 break;
481 case 24:
482 cs->cs_config |= CESA_CSH_AES_KLEN_192;
483 for (i = 0; i < 4; i++)
484 *dkey++ = htobe32(ek[4 * 12 + i]);
485 for (i = 0; i < 2; i++)
486 *dkey++ = htobe32(ek[4 * 11 + 2 + i]);
487 break;
488 case 32:
489 cs->cs_config |= CESA_CSH_AES_KLEN_256;
490 for (i = 0; i < 4; i++)
491 *dkey++ = htobe32(ek[4 * 14 + i]);
492 for (i = 0; i < 4; i++)
493 *dkey++ = htobe32(ek[4 * 13 + i]);
494 break;
495 default:
496 return (EINVAL);
497 }
498
499 return (0);
500 }
501
502 static void
cesa_start_packet(struct cesa_packet * cp,unsigned int size)503 cesa_start_packet(struct cesa_packet *cp, unsigned int size)
504 {
505
506 cp->cp_size = size;
507 cp->cp_offset = 0;
508 STAILQ_INIT(&cp->cp_copyin);
509 STAILQ_INIT(&cp->cp_copyout);
510 }
511
512 static int
cesa_fill_packet(struct cesa_softc * sc,struct cesa_packet * cp,bus_dma_segment_t * seg)513 cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp,
514 bus_dma_segment_t *seg)
515 {
516 struct cesa_tdma_desc *ctd;
517 unsigned int bsize;
518
519 /* Calculate size of block copy */
520 bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset);
521
522 if (bsize > 0) {
523 ctd = cesa_tdma_copy(sc, sc->sc_sram_base_pa +
524 CESA_DATA(cp->cp_offset), seg->ds_addr, bsize);
525 if (!ctd)
526 return (-ENOMEM);
527
528 STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq);
529
530 ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base_pa +
531 CESA_DATA(cp->cp_offset), bsize);
532 if (!ctd)
533 return (-ENOMEM);
534
535 STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq);
536
537 seg->ds_len -= bsize;
538 seg->ds_addr += bsize;
539 cp->cp_offset += bsize;
540 }
541
542 return (bsize);
543 }
544
545 static void
cesa_create_chain_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)546 cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
547 {
548 unsigned int mpsize, fragmented;
549 unsigned int mlen, mskip, tmlen;
550 struct cesa_chain_info *cci;
551 unsigned int elen, eskip;
552 unsigned int skip, len;
553 struct cesa_sa_desc *csd;
554 struct cesa_request *cr;
555 struct cryptop *crp;
556 struct cesa_softc *sc;
557 struct cesa_packet cp;
558 bus_dma_segment_t seg;
559 uint32_t config;
560 int size;
561
562 cci = arg;
563 sc = cci->cci_sc;
564 cr = cci->cci_cr;
565 crp = cr->cr_crp;
566
567 if (error) {
568 cci->cci_error = error;
569 return;
570 }
571
572 /*
573 * Only do a combined op if the AAD is adjacent to the payload
574 * and the AAD length is a multiple of the IV length. The
575 * checks against 'config' are to avoid recursing when the
576 * logic below invokes separate operations.
577 */
578 config = cci->cci_config;
579 if (((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC ||
580 (config & CESA_CSHD_OP_MASK) == CESA_CSHD_ENC_AND_MAC) &&
581 crp->crp_aad_length != 0 &&
582 (crp->crp_aad_length & (cr->cr_cs->cs_ivlen - 1)) != 0) {
583 /*
584 * Data alignment in the request does not meet CESA requiremnts
585 * for combined encryption/decryption and hashing. We have to
586 * split the request to separate operations and process them
587 * one by one.
588 */
589 if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) {
590 config &= ~CESA_CSHD_OP_MASK;
591
592 cci->cci_config = config | CESA_CSHD_MAC;
593 cesa_create_chain_cb(cci, segs, nseg, 0);
594
595 cci->cci_config = config | CESA_CSHD_ENC;
596 cesa_create_chain_cb(cci, segs, nseg, 0);
597 } else {
598 config &= ~CESA_CSHD_OP_MASK;
599
600 cci->cci_config = config | CESA_CSHD_ENC;
601 cesa_create_chain_cb(cci, segs, nseg, 0);
602
603 cci->cci_config = config | CESA_CSHD_MAC;
604 cesa_create_chain_cb(cci, segs, nseg, 0);
605 }
606
607 return;
608 }
609
610 mskip = mlen = eskip = elen = 0;
611
612 if (crp->crp_aad_length == 0) {
613 skip = crp->crp_payload_start;
614 len = crp->crp_payload_length;
615 switch (config & CESA_CSHD_OP_MASK) {
616 case CESA_CSHD_ENC:
617 eskip = skip;
618 elen = len;
619 break;
620 case CESA_CSHD_MAC:
621 mskip = skip;
622 mlen = len;
623 break;
624 default:
625 eskip = skip;
626 elen = len;
627 mskip = skip;
628 mlen = len;
629 break;
630 }
631 } else {
632 /*
633 * For an encryption-only separate request, only
634 * process the payload. For combined requests and
635 * hash-only requests, process the entire region.
636 */
637 switch (config & CESA_CSHD_OP_MASK) {
638 case CESA_CSHD_ENC:
639 skip = crp->crp_payload_start;
640 len = crp->crp_payload_length;
641 eskip = skip;
642 elen = len;
643 break;
644 case CESA_CSHD_MAC:
645 skip = crp->crp_aad_start;
646 len = crp->crp_aad_length + crp->crp_payload_length;
647 mskip = skip;
648 mlen = len;
649 break;
650 default:
651 skip = crp->crp_aad_start;
652 len = crp->crp_aad_length + crp->crp_payload_length;
653 mskip = skip;
654 mlen = len;
655 eskip = crp->crp_payload_start;
656 elen = crp->crp_payload_length;
657 break;
658 }
659 }
660
661 tmlen = mlen;
662 fragmented = 0;
663 mpsize = CESA_MAX_PACKET_SIZE;
664 mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1));
665
666 /* Start first packet in chain */
667 cesa_start_packet(&cp, MIN(mpsize, len));
668
669 while (nseg-- && len > 0) {
670 seg = *(segs++);
671
672 /*
673 * Skip data in buffer on which neither ENC nor MAC operation
674 * is requested.
675 */
676 if (skip > 0) {
677 size = MIN(skip, seg.ds_len);
678 skip -= size;
679
680 seg.ds_addr += size;
681 seg.ds_len -= size;
682
683 if (eskip > 0)
684 eskip -= size;
685
686 if (mskip > 0)
687 mskip -= size;
688
689 if (seg.ds_len == 0)
690 continue;
691 }
692
693 while (1) {
694 /*
695 * Fill in current packet with data. Break if there is
696 * no more data in current DMA segment or an error
697 * occurred.
698 */
699 size = cesa_fill_packet(sc, &cp, &seg);
700 if (size <= 0) {
701 error = -size;
702 break;
703 }
704
705 len -= size;
706
707 /* If packet is full, append it to the chain */
708 if (cp.cp_size == cp.cp_offset) {
709 csd = cesa_alloc_sdesc(sc, cr);
710 if (!csd) {
711 error = ENOMEM;
712 break;
713 }
714
715 /* Create SA descriptor for this packet */
716 csd->csd_cshd->cshd_config = cci->cci_config;
717 csd->csd_cshd->cshd_mac_total_dlen = tmlen;
718
719 /*
720 * Enable fragmentation if request will not fit
721 * into one packet.
722 */
723 if (len > 0) {
724 if (!fragmented) {
725 fragmented = 1;
726 csd->csd_cshd->cshd_config |=
727 CESA_CSHD_FRAG_FIRST;
728 } else
729 csd->csd_cshd->cshd_config |=
730 CESA_CSHD_FRAG_MIDDLE;
731 } else if (fragmented)
732 csd->csd_cshd->cshd_config |=
733 CESA_CSHD_FRAG_LAST;
734
735 if (eskip < cp.cp_size && elen > 0) {
736 csd->csd_cshd->cshd_enc_src =
737 CESA_DATA(eskip);
738 csd->csd_cshd->cshd_enc_dst =
739 CESA_DATA(eskip);
740 csd->csd_cshd->cshd_enc_dlen =
741 MIN(elen, cp.cp_size - eskip);
742 }
743
744 if (mskip < cp.cp_size && mlen > 0) {
745 csd->csd_cshd->cshd_mac_src =
746 CESA_DATA(mskip);
747 csd->csd_cshd->cshd_mac_dlen =
748 MIN(mlen, cp.cp_size - mskip);
749 }
750
751 elen -= csd->csd_cshd->cshd_enc_dlen;
752 eskip -= MIN(eskip, cp.cp_size);
753 mlen -= csd->csd_cshd->cshd_mac_dlen;
754 mskip -= MIN(mskip, cp.cp_size);
755
756 cesa_dump_cshd(sc, csd->csd_cshd);
757
758 /* Append packet to the request */
759 error = cesa_append_packet(sc, cr, &cp, csd);
760 if (error)
761 break;
762
763 /* Start a new packet, as current is full */
764 cesa_start_packet(&cp, MIN(mpsize, len));
765 }
766 }
767
768 if (error)
769 break;
770 }
771
772 if (error) {
773 /*
774 * Move all allocated resources to the request. They will be
775 * freed later.
776 */
777 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin);
778 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout);
779 cci->cci_error = error;
780 }
781 }
782
783 static int
cesa_create_chain(struct cesa_softc * sc,const struct crypto_session_params * csp,struct cesa_request * cr)784 cesa_create_chain(struct cesa_softc *sc,
785 const struct crypto_session_params *csp, struct cesa_request *cr)
786 {
787 struct cesa_chain_info cci;
788 struct cesa_tdma_desc *ctd;
789 uint32_t config;
790 int error;
791
792 error = 0;
793 CESA_LOCK_ASSERT(sc, sessions);
794
795 /* Create request metadata */
796 if (csp->csp_cipher_klen != 0) {
797 if (csp->csp_cipher_alg == CRYPTO_AES_CBC &&
798 !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op))
799 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey,
800 csp->csp_cipher_klen);
801 else
802 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key,
803 csp->csp_cipher_klen);
804 }
805
806 if (csp->csp_auth_klen != 0) {
807 memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in,
808 CESA_MAX_HASH_LEN);
809 memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out,
810 CESA_MAX_HASH_LEN);
811 }
812
813 ctd = cesa_tdma_copyin_sa_data(sc, cr);
814 if (!ctd)
815 return (ENOMEM);
816
817 cesa_append_tdesc(cr, ctd);
818
819 /* Prepare SA configuration */
820 config = cr->cr_cs->cs_config;
821
822 if (csp->csp_cipher_alg != 0 &&
823 !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op))
824 config |= CESA_CSHD_DECRYPT;
825 switch (csp->csp_mode) {
826 case CSP_MODE_CIPHER:
827 config |= CESA_CSHD_ENC;
828 break;
829 case CSP_MODE_DIGEST:
830 config |= CESA_CSHD_MAC;
831 break;
832 case CSP_MODE_ETA:
833 config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC :
834 CESA_CSHD_ENC_AND_MAC;
835 break;
836 }
837
838 /* Create data packets */
839 cci.cci_sc = sc;
840 cci.cci_cr = cr;
841 cci.cci_config = config;
842 cci.cci_error = 0;
843
844 error = bus_dmamap_load_crp(sc->sc_data_dtag, cr->cr_dmap, cr->cr_crp,
845 cesa_create_chain_cb, &cci, BUS_DMA_NOWAIT);
846
847 if (!error)
848 cr->cr_dmap_loaded = 1;
849
850 if (cci.cci_error)
851 error = cci.cci_error;
852
853 if (error)
854 return (error);
855
856 /* Read back request metadata */
857 ctd = cesa_tdma_copyout_sa_data(sc, cr);
858 if (!ctd)
859 return (ENOMEM);
860
861 cesa_append_tdesc(cr, ctd);
862
863 return (0);
864 }
865
866 static void
cesa_execute(struct cesa_softc * sc)867 cesa_execute(struct cesa_softc *sc)
868 {
869 struct cesa_tdma_desc *prev_ctd, *ctd;
870 struct cesa_request *prev_cr, *cr;
871
872 CESA_LOCK(sc, requests);
873
874 /*
875 * If ready list is empty, there is nothing to execute. If queued list
876 * is not empty, the hardware is busy and we cannot start another
877 * execution.
878 */
879 if (STAILQ_EMPTY(&sc->sc_ready_requests) ||
880 !STAILQ_EMPTY(&sc->sc_queued_requests)) {
881 CESA_UNLOCK(sc, requests);
882 return;
883 }
884
885 /* Move all ready requests to queued list */
886 STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests);
887 STAILQ_INIT(&sc->sc_ready_requests);
888
889 /* Create one execution chain from all requests on the list */
890 if (STAILQ_FIRST(&sc->sc_queued_requests) !=
891 STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) {
892 prev_cr = NULL;
893 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD |
894 BUS_DMASYNC_POSTWRITE);
895
896 STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) {
897 if (prev_cr) {
898 ctd = STAILQ_FIRST(&cr->cr_tdesc);
899 prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc,
900 cesa_tdma_desc, ctd_stq);
901
902 prev_ctd->ctd_cthd->cthd_next =
903 ctd->ctd_cthd_paddr;
904 }
905
906 prev_cr = cr;
907 }
908
909 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD |
910 BUS_DMASYNC_PREWRITE);
911 }
912
913 /* Start chain execution in hardware */
914 cr = STAILQ_FIRST(&sc->sc_queued_requests);
915 ctd = STAILQ_FIRST(&cr->cr_tdesc);
916
917 CESA_TDMA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr);
918
919 if (sc->sc_soc_id == MV_DEV_88F6828 ||
920 sc->sc_soc_id == MV_DEV_88F6820 ||
921 sc->sc_soc_id == MV_DEV_88F6810)
922 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE | CESA_SA_CMD_SHA2);
923 else
924 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE);
925
926 CESA_UNLOCK(sc, requests);
927 }
928
929 static int
cesa_setup_sram(struct cesa_softc * sc)930 cesa_setup_sram(struct cesa_softc *sc)
931 {
932 phandle_t sram_node;
933 ihandle_t sram_ihandle;
934 pcell_t sram_handle, sram_reg[2];
935 void *sram_va;
936 int rv;
937
938 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "sram-handle",
939 (void *)&sram_handle, sizeof(sram_handle));
940 if (rv <= 0)
941 return (rv);
942
943 sram_ihandle = (ihandle_t)sram_handle;
944 sram_node = OF_instance_to_package(sram_ihandle);
945
946 rv = OF_getencprop(sram_node, "reg", (void *)sram_reg, sizeof(sram_reg));
947 if (rv <= 0)
948 return (rv);
949
950 sc->sc_sram_base_pa = sram_reg[0];
951 /* Store SRAM size to be able to unmap in detach() */
952 sc->sc_sram_size = sram_reg[1];
953
954 if (sc->sc_soc_id != MV_DEV_88F6828 &&
955 sc->sc_soc_id != MV_DEV_88F6820 &&
956 sc->sc_soc_id != MV_DEV_88F6810)
957 return (0);
958
959 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */
960 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size);
961 if (sram_va == NULL)
962 return (ENOMEM);
963 sc->sc_sram_base_va = sram_va;
964
965 return (0);
966 }
967
968 /*
969 * Function: device_from_node
970 * This function returns appropriate device_t to phandle_t
971 * Parameters:
972 * root - device where you want to start search
973 * if you provide NULL here, function will take
974 * "root0" device as root.
975 * node - we are checking every device_t to be
976 * appropriate with this.
977 */
978 static device_t
device_from_node(device_t root,phandle_t node)979 device_from_node(device_t root, phandle_t node)
980 {
981 device_t *children, retval;
982 int nkid, i;
983
984 /* Nothing matches no node */
985 if (node == -1)
986 return (NULL);
987
988 if (root == NULL)
989 /* Get root of device tree */
990 if ((root = device_lookup_by_name("root0")) == NULL)
991 return (NULL);
992
993 if (device_get_children(root, &children, &nkid) != 0)
994 return (NULL);
995
996 retval = NULL;
997 for (i = 0; i < nkid; i++) {
998 /* Check if device and node matches */
999 if (OFW_BUS_GET_NODE(root, children[i]) == node) {
1000 retval = children[i];
1001 break;
1002 }
1003 /* or go deeper */
1004 if ((retval = device_from_node(children[i], node)) != NULL)
1005 break;
1006 }
1007 free(children, M_TEMP);
1008
1009 return (retval);
1010 }
1011
1012 static int
cesa_setup_sram_armada(struct cesa_softc * sc)1013 cesa_setup_sram_armada(struct cesa_softc *sc)
1014 {
1015 phandle_t sram_node;
1016 ihandle_t sram_ihandle;
1017 pcell_t sram_handle[2];
1018 void *sram_va;
1019 int rv, j;
1020 struct resource_list rl;
1021 struct resource_list_entry *rle;
1022 struct simplebus_softc *ssc;
1023 device_t sdev;
1024
1025 /* Get refs to SRAMS from CESA node */
1026 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "marvell,crypto-srams",
1027 (void *)sram_handle, sizeof(sram_handle));
1028 if (rv <= 0)
1029 return (rv);
1030
1031 if (sc->sc_cesa_engine_id >= 2)
1032 return (ENXIO);
1033
1034 /* Get SRAM node on the basis of sc_cesa_engine_id */
1035 sram_ihandle = (ihandle_t)sram_handle[sc->sc_cesa_engine_id];
1036 sram_node = OF_instance_to_package(sram_ihandle);
1037
1038 /* Get device_t of simplebus (sram_node parent) */
1039 sdev = device_from_node(NULL, OF_parent(sram_node));
1040 if (!sdev)
1041 return (ENXIO);
1042
1043 ssc = device_get_softc(sdev);
1044
1045 resource_list_init(&rl);
1046 /* Parse reg property to resource list */
1047 ofw_bus_reg_to_rl(sdev, sram_node, ssc->acells,
1048 ssc->scells, &rl);
1049
1050 /* We expect only one resource */
1051 rle = resource_list_find(&rl, SYS_RES_MEMORY, 0);
1052 if (rle == NULL)
1053 return (ENXIO);
1054
1055 /* Remap through ranges property */
1056 for (j = 0; j < ssc->nranges; j++) {
1057 if (rle->start >= ssc->ranges[j].bus &&
1058 rle->end < ssc->ranges[j].bus + ssc->ranges[j].size) {
1059 rle->start -= ssc->ranges[j].bus;
1060 rle->start += ssc->ranges[j].host;
1061 rle->end -= ssc->ranges[j].bus;
1062 rle->end += ssc->ranges[j].host;
1063 }
1064 }
1065
1066 sc->sc_sram_base_pa = rle->start;
1067 sc->sc_sram_size = rle->count;
1068
1069 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */
1070 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size);
1071 if (sram_va == NULL)
1072 return (ENOMEM);
1073 sc->sc_sram_base_va = sram_va;
1074
1075 return (0);
1076 }
1077
1078 struct ofw_compat_data cesa_devices[] = {
1079 { "mrvl,cesa", (uintptr_t)true },
1080 { "marvell,armada-38x-crypto", (uintptr_t)true },
1081 { NULL, 0 }
1082 };
1083
1084 static int
cesa_probe(device_t dev)1085 cesa_probe(device_t dev)
1086 {
1087
1088 if (!ofw_bus_status_okay(dev))
1089 return (ENXIO);
1090
1091 if (!ofw_bus_search_compatible(dev, cesa_devices)->ocd_data)
1092 return (ENXIO);
1093
1094 device_set_desc(dev, "Marvell Cryptographic Engine and Security "
1095 "Accelerator");
1096
1097 return (BUS_PROBE_DEFAULT);
1098 }
1099
1100 static int
cesa_attach(device_t dev)1101 cesa_attach(device_t dev)
1102 {
1103 static int engine_idx = 0;
1104 struct simplebus_devinfo *ndi;
1105 struct resource_list *rl;
1106 struct cesa_softc *sc;
1107
1108 if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto"))
1109 return (cesa_attach_late(dev));
1110
1111 /*
1112 * Get simplebus_devinfo which contains
1113 * resource list filled with adresses and
1114 * interrupts read form FDT.
1115 * Let's correct it by splitting resources
1116 * for each engine.
1117 */
1118 if ((ndi = device_get_ivars(dev)) == NULL)
1119 return (ENXIO);
1120
1121 rl = &ndi->rl;
1122
1123 switch (engine_idx) {
1124 case 0:
1125 /* Update regs values */
1126 resource_list_add(rl, SYS_RES_MEMORY, 0, CESA0_TDMA_ADDR,
1127 CESA0_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE);
1128 resource_list_add(rl, SYS_RES_MEMORY, 1, CESA0_CESA_ADDR,
1129 CESA0_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE);
1130
1131 /* Remove unused interrupt */
1132 resource_list_delete(rl, SYS_RES_IRQ, 1);
1133 break;
1134
1135 case 1:
1136 /* Update regs values */
1137 resource_list_add(rl, SYS_RES_MEMORY, 0, CESA1_TDMA_ADDR,
1138 CESA1_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE);
1139 resource_list_add(rl, SYS_RES_MEMORY, 1, CESA1_CESA_ADDR,
1140 CESA1_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE);
1141
1142 /* Remove unused interrupt */
1143 resource_list_delete(rl, SYS_RES_IRQ, 0);
1144 resource_list_find(rl, SYS_RES_IRQ, 1)->rid = 0;
1145 break;
1146
1147 default:
1148 device_printf(dev, "Bad cesa engine_idx\n");
1149 return (ENXIO);
1150 }
1151
1152 sc = device_get_softc(dev);
1153 sc->sc_cesa_engine_id = engine_idx;
1154
1155 /*
1156 * Call simplebus_add_device only once.
1157 * It will create second cesa driver instance
1158 * with the same FDT node as first instance.
1159 * When second driver reach this function,
1160 * it will be configured to use second cesa engine
1161 */
1162 if (engine_idx == 0)
1163 simplebus_add_device(device_get_parent(dev), ofw_bus_get_node(dev),
1164 0, "cesa", 1, NULL);
1165
1166 engine_idx++;
1167
1168 return (cesa_attach_late(dev));
1169 }
1170
1171 static int
cesa_attach_late(device_t dev)1172 cesa_attach_late(device_t dev)
1173 {
1174 struct cesa_softc *sc;
1175 uint32_t d, r, val;
1176 int error;
1177 int i;
1178
1179 sc = device_get_softc(dev);
1180 sc->sc_blocked = 0;
1181 sc->sc_error = 0;
1182 sc->sc_dev = dev;
1183
1184 soc_id(&d, &r);
1185
1186 switch (d) {
1187 case MV_DEV_88F6281:
1188 case MV_DEV_88F6282:
1189 /* Check if CESA peripheral device has power turned on */
1190 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) ==
1191 CPU_PM_CTRL_CRYPTO) {
1192 device_printf(dev, "not powered on\n");
1193 return (ENXIO);
1194 }
1195 sc->sc_tperr = 0;
1196 break;
1197 case MV_DEV_88F6828:
1198 case MV_DEV_88F6820:
1199 case MV_DEV_88F6810:
1200 sc->sc_tperr = 0;
1201 break;
1202 case MV_DEV_MV78100:
1203 case MV_DEV_MV78100_Z0:
1204 /* Check if CESA peripheral device has power turned on */
1205 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) !=
1206 CPU_PM_CTRL_CRYPTO) {
1207 device_printf(dev, "not powered on\n");
1208 return (ENXIO);
1209 }
1210 sc->sc_tperr = CESA_ICR_TPERR;
1211 break;
1212 default:
1213 return (ENXIO);
1214 }
1215
1216 sc->sc_soc_id = d;
1217
1218 /* Initialize mutexes */
1219 mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev),
1220 "CESA Shared Data", MTX_DEF);
1221 mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev),
1222 "CESA TDMA Descriptors Pool", MTX_DEF);
1223 mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev),
1224 "CESA SA Descriptors Pool", MTX_DEF);
1225 mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev),
1226 "CESA Requests Pool", MTX_DEF);
1227 mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
1228 "CESA Sessions Pool", MTX_DEF);
1229
1230 /* Allocate I/O and IRQ resources */
1231 error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res);
1232 if (error) {
1233 device_printf(dev, "could not allocate resources\n");
1234 goto err0;
1235 }
1236
1237 /* Acquire SRAM base address */
1238 if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto"))
1239 error = cesa_setup_sram(sc);
1240 else
1241 error = cesa_setup_sram_armada(sc);
1242
1243 if (error) {
1244 device_printf(dev, "could not setup SRAM\n");
1245 goto err1;
1246 }
1247
1248 /* Setup interrupt handler */
1249 error = bus_setup_intr(dev, sc->sc_res[RES_CESA_IRQ], INTR_TYPE_NET |
1250 INTR_MPSAFE, NULL, cesa_intr, sc, &(sc->sc_icookie));
1251 if (error) {
1252 device_printf(dev, "could not setup engine completion irq\n");
1253 goto err2;
1254 }
1255
1256 /* Create DMA tag for processed data */
1257 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1258 1, 0, /* alignment, boundary */
1259 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1260 BUS_SPACE_MAXADDR, /* highaddr */
1261 NULL, NULL, /* filtfunc, filtfuncarg */
1262 CESA_MAX_REQUEST_SIZE, /* maxsize */
1263 CESA_MAX_FRAGMENTS, /* nsegments */
1264 CESA_MAX_REQUEST_SIZE, 0, /* maxsegsz, flags */
1265 NULL, NULL, /* lockfunc, lockfuncarg */
1266 &sc->sc_data_dtag); /* dmat */
1267 if (error)
1268 goto err3;
1269
1270 /* Initialize data structures: TDMA Descriptors Pool */
1271 error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm,
1272 CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc));
1273 if (error)
1274 goto err4;
1275
1276 STAILQ_INIT(&sc->sc_free_tdesc);
1277 for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) {
1278 sc->sc_tdesc[i].ctd_cthd =
1279 (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i;
1280 sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr +
1281 (i * sizeof(struct cesa_tdma_hdesc));
1282 STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i],
1283 ctd_stq);
1284 }
1285
1286 /* Initialize data structures: SA Descriptors Pool */
1287 error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm,
1288 CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc));
1289 if (error)
1290 goto err5;
1291
1292 STAILQ_INIT(&sc->sc_free_sdesc);
1293 for (i = 0; i < CESA_SA_DESCRIPTORS; i++) {
1294 sc->sc_sdesc[i].csd_cshd =
1295 (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i;
1296 sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr +
1297 (i * sizeof(struct cesa_sa_hdesc));
1298 STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i],
1299 csd_stq);
1300 }
1301
1302 /* Initialize data structures: Requests Pool */
1303 error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm,
1304 CESA_REQUESTS * sizeof(struct cesa_sa_data));
1305 if (error)
1306 goto err6;
1307
1308 STAILQ_INIT(&sc->sc_free_requests);
1309 STAILQ_INIT(&sc->sc_ready_requests);
1310 STAILQ_INIT(&sc->sc_queued_requests);
1311 for (i = 0; i < CESA_REQUESTS; i++) {
1312 sc->sc_requests[i].cr_csd =
1313 (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i;
1314 sc->sc_requests[i].cr_csd_paddr =
1315 sc->sc_requests_cdm.cdm_paddr +
1316 (i * sizeof(struct cesa_sa_data));
1317
1318 /* Preallocate DMA maps */
1319 error = bus_dmamap_create(sc->sc_data_dtag, 0,
1320 &sc->sc_requests[i].cr_dmap);
1321 if (error && i > 0) {
1322 i--;
1323 do {
1324 bus_dmamap_destroy(sc->sc_data_dtag,
1325 sc->sc_requests[i].cr_dmap);
1326 } while (i--);
1327
1328 goto err7;
1329 }
1330
1331 STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i],
1332 cr_stq);
1333 }
1334
1335 /*
1336 * Initialize TDMA:
1337 * - Burst limit: 128 bytes,
1338 * - Outstanding reads enabled,
1339 * - No byte-swap.
1340 */
1341 val = CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 |
1342 CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE;
1343
1344 if (sc->sc_soc_id == MV_DEV_88F6828 ||
1345 sc->sc_soc_id == MV_DEV_88F6820 ||
1346 sc->sc_soc_id == MV_DEV_88F6810)
1347 val |= CESA_TDMA_NUM_OUTSTAND;
1348
1349 CESA_TDMA_WRITE(sc, CESA_TDMA_CR, val);
1350
1351 /*
1352 * Initialize SA:
1353 * - SA descriptor is present at beginning of CESA SRAM,
1354 * - Multi-packet chain mode,
1355 * - Cooperation with TDMA enabled.
1356 */
1357 CESA_REG_WRITE(sc, CESA_SA_DPR, 0);
1358 CESA_REG_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA |
1359 CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE);
1360
1361 /* Unmask interrupts */
1362 CESA_REG_WRITE(sc, CESA_ICR, 0);
1363 CESA_REG_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr);
1364 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0);
1365 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS |
1366 CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT |
1367 CESA_TDMA_EMR_DATA_ERROR);
1368
1369 /* Register in OCF */
1370 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct cesa_session),
1371 CRYPTOCAP_F_HARDWARE);
1372 if (sc->sc_cid < 0) {
1373 device_printf(dev, "could not get crypto driver id\n");
1374 goto err8;
1375 }
1376
1377 return (0);
1378 err8:
1379 for (i = 0; i < CESA_REQUESTS; i++)
1380 bus_dmamap_destroy(sc->sc_data_dtag,
1381 sc->sc_requests[i].cr_dmap);
1382 err7:
1383 cesa_free_dma_mem(&sc->sc_requests_cdm);
1384 err6:
1385 cesa_free_dma_mem(&sc->sc_sdesc_cdm);
1386 err5:
1387 cesa_free_dma_mem(&sc->sc_tdesc_cdm);
1388 err4:
1389 bus_dma_tag_destroy(sc->sc_data_dtag);
1390 err3:
1391 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie);
1392 err2:
1393 if (sc->sc_soc_id == MV_DEV_88F6828 ||
1394 sc->sc_soc_id == MV_DEV_88F6820 ||
1395 sc->sc_soc_id == MV_DEV_88F6810)
1396 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size);
1397 err1:
1398 bus_release_resources(dev, cesa_res_spec, sc->sc_res);
1399 err0:
1400 mtx_destroy(&sc->sc_sessions_lock);
1401 mtx_destroy(&sc->sc_requests_lock);
1402 mtx_destroy(&sc->sc_sdesc_lock);
1403 mtx_destroy(&sc->sc_tdesc_lock);
1404 mtx_destroy(&sc->sc_sc_lock);
1405 return (ENXIO);
1406 }
1407
1408 static int
cesa_detach(device_t dev)1409 cesa_detach(device_t dev)
1410 {
1411 struct cesa_softc *sc;
1412 int i;
1413
1414 sc = device_get_softc(dev);
1415
1416 /* TODO: Wait for queued requests completion before shutdown. */
1417
1418 /* Mask interrupts */
1419 CESA_REG_WRITE(sc, CESA_ICM, 0);
1420 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, 0);
1421
1422 /* Unregister from OCF */
1423 crypto_unregister_all(sc->sc_cid);
1424
1425 /* Free DMA Maps */
1426 for (i = 0; i < CESA_REQUESTS; i++)
1427 bus_dmamap_destroy(sc->sc_data_dtag,
1428 sc->sc_requests[i].cr_dmap);
1429
1430 /* Free DMA Memory */
1431 cesa_free_dma_mem(&sc->sc_requests_cdm);
1432 cesa_free_dma_mem(&sc->sc_sdesc_cdm);
1433 cesa_free_dma_mem(&sc->sc_tdesc_cdm);
1434
1435 /* Free DMA Tag */
1436 bus_dma_tag_destroy(sc->sc_data_dtag);
1437
1438 /* Stop interrupt */
1439 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie);
1440
1441 /* Relase I/O and IRQ resources */
1442 bus_release_resources(dev, cesa_res_spec, sc->sc_res);
1443
1444 /* Unmap SRAM memory */
1445 if (sc->sc_soc_id == MV_DEV_88F6828 ||
1446 sc->sc_soc_id == MV_DEV_88F6820 ||
1447 sc->sc_soc_id == MV_DEV_88F6810)
1448 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size);
1449
1450 /* Destroy mutexes */
1451 mtx_destroy(&sc->sc_sessions_lock);
1452 mtx_destroy(&sc->sc_requests_lock);
1453 mtx_destroy(&sc->sc_sdesc_lock);
1454 mtx_destroy(&sc->sc_tdesc_lock);
1455 mtx_destroy(&sc->sc_sc_lock);
1456
1457 return (0);
1458 }
1459
1460 static void
cesa_intr(void * arg)1461 cesa_intr(void *arg)
1462 {
1463 STAILQ_HEAD(, cesa_request) requests;
1464 struct cesa_request *cr, *tmp;
1465 struct cesa_softc *sc;
1466 uint32_t ecr, icr;
1467 uint8_t hash[HASH_MAX_LEN];
1468 int blocked;
1469
1470 sc = arg;
1471
1472 /* Ack interrupt */
1473 ecr = CESA_TDMA_READ(sc, CESA_TDMA_ECR);
1474 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0);
1475 icr = CESA_REG_READ(sc, CESA_ICR);
1476 CESA_REG_WRITE(sc, CESA_ICR, 0);
1477
1478 /* Check for TDMA errors */
1479 if (ecr & CESA_TDMA_ECR_MISS) {
1480 device_printf(sc->sc_dev, "TDMA Miss error detected!\n");
1481 sc->sc_error = EIO;
1482 }
1483
1484 if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) {
1485 device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n");
1486 sc->sc_error = EIO;
1487 }
1488
1489 if (ecr & CESA_TDMA_ECR_BOTH_HIT) {
1490 device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n");
1491 sc->sc_error = EIO;
1492 }
1493
1494 if (ecr & CESA_TDMA_ECR_DATA_ERROR) {
1495 device_printf(sc->sc_dev, "TDMA Data error detected!\n");
1496 sc->sc_error = EIO;
1497 }
1498
1499 /* Check for CESA errors */
1500 if (icr & sc->sc_tperr) {
1501 device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n");
1502 sc->sc_error = EIO;
1503 }
1504
1505 /* If there is nothing more to do, return */
1506 if ((icr & CESA_ICR_ACCTDMA) == 0)
1507 return;
1508
1509 /* Get all finished requests */
1510 CESA_LOCK(sc, requests);
1511 STAILQ_INIT(&requests);
1512 STAILQ_CONCAT(&requests, &sc->sc_queued_requests);
1513 STAILQ_INIT(&sc->sc_queued_requests);
1514 CESA_UNLOCK(sc, requests);
1515
1516 /* Execute all ready requests */
1517 cesa_execute(sc);
1518
1519 /* Process completed requests */
1520 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD |
1521 BUS_DMASYNC_POSTWRITE);
1522
1523 STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) {
1524 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap,
1525 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1526
1527 cr->cr_crp->crp_etype = sc->sc_error;
1528 if (cr->cr_cs->cs_hlen != 0 && cr->cr_crp->crp_etype == 0) {
1529 if (cr->cr_crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
1530 crypto_copydata(cr->cr_crp,
1531 cr->cr_crp->crp_digest_start,
1532 cr->cr_cs->cs_hlen, hash);
1533 if (timingsafe_bcmp(hash, cr->cr_csd->csd_hash,
1534 cr->cr_cs->cs_hlen) != 0)
1535 cr->cr_crp->crp_etype = EBADMSG;
1536 } else
1537 crypto_copyback(cr->cr_crp,
1538 cr->cr_crp->crp_digest_start,
1539 cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash);
1540 }
1541 crypto_done(cr->cr_crp);
1542 cesa_free_request(sc, cr);
1543 }
1544
1545 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD |
1546 BUS_DMASYNC_PREWRITE);
1547
1548 sc->sc_error = 0;
1549
1550 /* Unblock driver if it ran out of resources */
1551 CESA_LOCK(sc, sc);
1552 blocked = sc->sc_blocked;
1553 sc->sc_blocked = 0;
1554 CESA_UNLOCK(sc, sc);
1555
1556 if (blocked)
1557 crypto_unblock(sc->sc_cid, blocked);
1558 }
1559
1560 static bool
cesa_cipher_supported(const struct crypto_session_params * csp)1561 cesa_cipher_supported(const struct crypto_session_params *csp)
1562 {
1563
1564 switch (csp->csp_cipher_alg) {
1565 case CRYPTO_AES_CBC:
1566 if (csp->csp_ivlen != AES_BLOCK_LEN)
1567 return (false);
1568 break;
1569 default:
1570 return (false);
1571 }
1572
1573 if (csp->csp_cipher_klen > CESA_MAX_KEY_LEN)
1574 return (false);
1575
1576 return (true);
1577 }
1578
1579 static bool
cesa_auth_supported(struct cesa_softc * sc,const struct crypto_session_params * csp)1580 cesa_auth_supported(struct cesa_softc *sc,
1581 const struct crypto_session_params *csp)
1582 {
1583
1584 switch (csp->csp_auth_alg) {
1585 case CRYPTO_SHA2_256_HMAC:
1586 if (!(sc->sc_soc_id == MV_DEV_88F6828 ||
1587 sc->sc_soc_id == MV_DEV_88F6820 ||
1588 sc->sc_soc_id == MV_DEV_88F6810))
1589 return (false);
1590 /* FALLTHROUGH */
1591 case CRYPTO_SHA1:
1592 case CRYPTO_SHA1_HMAC:
1593 break;
1594 default:
1595 return (false);
1596 }
1597
1598 if (csp->csp_auth_klen > CESA_MAX_MKEY_LEN)
1599 return (false);
1600
1601 return (true);
1602 }
1603
1604 static int
cesa_probesession(device_t dev,const struct crypto_session_params * csp)1605 cesa_probesession(device_t dev, const struct crypto_session_params *csp)
1606 {
1607 struct cesa_softc *sc;
1608
1609 sc = device_get_softc(dev);
1610 if (csp->csp_flags != 0)
1611 return (EINVAL);
1612 switch (csp->csp_mode) {
1613 case CSP_MODE_DIGEST:
1614 if (!cesa_auth_supported(sc, csp))
1615 return (EINVAL);
1616 break;
1617 case CSP_MODE_CIPHER:
1618 if (!cesa_cipher_supported(csp))
1619 return (EINVAL);
1620 break;
1621 case CSP_MODE_ETA:
1622 if (!cesa_auth_supported(sc, csp) ||
1623 !cesa_cipher_supported(csp))
1624 return (EINVAL);
1625 break;
1626 default:
1627 return (EINVAL);
1628 }
1629 return (CRYPTODEV_PROBE_HARDWARE);
1630 }
1631
1632 static int
cesa_newsession(device_t dev,crypto_session_t cses,const struct crypto_session_params * csp)1633 cesa_newsession(device_t dev, crypto_session_t cses,
1634 const struct crypto_session_params *csp)
1635 {
1636 struct cesa_session *cs;
1637 int error;
1638
1639 error = 0;
1640
1641 /* Allocate session */
1642 cs = crypto_get_driver_session(cses);
1643
1644 /* Prepare CESA configuration */
1645 cs->cs_config = 0;
1646 cs->cs_ivlen = 1;
1647 cs->cs_mblen = 1;
1648
1649 switch (csp->csp_cipher_alg) {
1650 case CRYPTO_AES_CBC:
1651 cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC;
1652 cs->cs_ivlen = AES_BLOCK_LEN;
1653 break;
1654 }
1655
1656 switch (csp->csp_auth_alg) {
1657 case CRYPTO_SHA1:
1658 cs->cs_mblen = 1;
1659 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN :
1660 csp->csp_auth_mlen;
1661 cs->cs_config |= CESA_CSHD_SHA1;
1662 break;
1663 case CRYPTO_SHA1_HMAC:
1664 cs->cs_mblen = SHA1_BLOCK_LEN;
1665 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN :
1666 csp->csp_auth_mlen;
1667 cs->cs_config |= CESA_CSHD_SHA1_HMAC;
1668 if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN)
1669 cs->cs_config |= CESA_CSHD_96_BIT_HMAC;
1670 break;
1671 case CRYPTO_SHA2_256_HMAC:
1672 cs->cs_mblen = SHA2_256_BLOCK_LEN;
1673 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA2_256_HASH_LEN :
1674 csp->csp_auth_mlen;
1675 cs->cs_config |= CESA_CSHD_SHA2_256_HMAC;
1676 break;
1677 }
1678
1679 /* Save cipher key */
1680 if (csp->csp_cipher_key != NULL) {
1681 memcpy(cs->cs_key, csp->csp_cipher_key,
1682 csp->csp_cipher_klen);
1683 if (csp->csp_cipher_alg == CRYPTO_AES_CBC)
1684 error = cesa_prep_aes_key(cs, csp);
1685 }
1686
1687 /* Save digest key */
1688 if (csp->csp_auth_key != NULL)
1689 cesa_set_mkey(cs, csp->csp_auth_alg, csp->csp_auth_key,
1690 csp->csp_auth_klen);
1691
1692 return (error);
1693 }
1694
1695 static int
cesa_process(device_t dev,struct cryptop * crp,int hint)1696 cesa_process(device_t dev, struct cryptop *crp, int hint)
1697 {
1698 const struct crypto_session_params *csp;
1699 struct cesa_request *cr;
1700 struct cesa_session *cs;
1701 struct cesa_softc *sc;
1702 int error;
1703
1704 sc = device_get_softc(dev);
1705 error = 0;
1706
1707 cs = crypto_get_driver_session(crp->crp_session);
1708 csp = crypto_get_params(crp->crp_session);
1709
1710 /* Check and parse input */
1711 if (crypto_buffer_len(&crp->crp_buf) > CESA_MAX_REQUEST_SIZE) {
1712 crp->crp_etype = E2BIG;
1713 crypto_done(crp);
1714 return (0);
1715 }
1716
1717 /*
1718 * For requests with AAD, only requests where the AAD is
1719 * immediately adjacent to the payload are supported.
1720 */
1721 if (crp->crp_aad_length != 0 &&
1722 (crp->crp_aad_start + crp->crp_aad_length) !=
1723 crp->crp_payload_start) {
1724 crp->crp_etype = EINVAL;
1725 crypto_done(crp);
1726 return (0);
1727 }
1728
1729 /*
1730 * Get request descriptor. Block driver if there is no free
1731 * descriptors in pool.
1732 */
1733 cr = cesa_alloc_request(sc);
1734 if (!cr) {
1735 CESA_LOCK(sc, sc);
1736 sc->sc_blocked = CRYPTO_SYMQ;
1737 CESA_UNLOCK(sc, sc);
1738 return (ERESTART);
1739 }
1740
1741 /* Prepare request */
1742 cr->cr_crp = crp;
1743 cr->cr_cs = cs;
1744
1745 CESA_LOCK(sc, sessions);
1746 cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1747
1748 if (csp->csp_cipher_alg != 0)
1749 crypto_read_iv(crp, cr->cr_csd->csd_iv);
1750
1751 if (crp->crp_cipher_key != NULL) {
1752 memcpy(cs->cs_key, crp->crp_cipher_key,
1753 csp->csp_cipher_klen);
1754 if (csp->csp_cipher_alg == CRYPTO_AES_CBC)
1755 error = cesa_prep_aes_key(cs, csp);
1756 }
1757
1758 if (!error && crp->crp_auth_key != NULL)
1759 cesa_set_mkey(cs, csp->csp_auth_alg, crp->crp_auth_key,
1760 csp->csp_auth_klen);
1761
1762 /* Convert request to chain of TDMA and SA descriptors */
1763 if (!error)
1764 error = cesa_create_chain(sc, csp, cr);
1765
1766 cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1767 CESA_UNLOCK(sc, sessions);
1768
1769 if (error) {
1770 cesa_free_request(sc, cr);
1771 crp->crp_etype = error;
1772 crypto_done(crp);
1773 return (0);
1774 }
1775
1776 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD |
1777 BUS_DMASYNC_PREWRITE);
1778
1779 /* Enqueue request to execution */
1780 cesa_enqueue_request(sc, cr);
1781
1782 /* Start execution, if we have no more requests in queue */
1783 if ((hint & CRYPTO_HINT_MORE) == 0)
1784 cesa_execute(sc);
1785
1786 return (0);
1787 }
1788