1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2009-2011 Semihalf.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * CESA SRAM Memory Map:
31 *
32 * +------------------------+ <= sc->sc_sram_base_va + CESA_SRAM_SIZE
33 * | |
34 * | DATA |
35 * | |
36 * +------------------------+ <= sc->sc_sram_base_va + CESA_DATA(0)
37 * | struct cesa_sa_data |
38 * +------------------------+
39 * | struct cesa_sa_hdesc |
40 * +------------------------+ <= sc->sc_sram_base_va
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/bus.h>
49 #include <sys/endian.h>
50 #include <sys/kernel.h>
51 #include <sys/lock.h>
52 #include <sys/mbuf.h>
53 #include <sys/module.h>
54 #include <sys/mutex.h>
55 #include <sys/rman.h>
56
57 #include <machine/bus.h>
58 #include <machine/intr.h>
59 #include <machine/resource.h>
60 #include <machine/fdt.h>
61
62 #include <dev/fdt/simplebus.h>
63 #include <dev/fdt/fdt_common.h>
64 #include <dev/ofw/ofw_bus.h>
65 #include <dev/ofw/ofw_bus_subr.h>
66
67 #include <crypto/sha1.h>
68 #include <crypto/sha2/sha256.h>
69 #include <crypto/rijndael/rijndael.h>
70 #include <opencrypto/cryptodev.h>
71 #include <opencrypto/xform.h>
72 #include "cryptodev_if.h"
73
74 #include <arm/mv/mvreg.h>
75 #include <arm/mv/mvvar.h>
76 #include "cesa.h"
77
78 static int cesa_probe(device_t);
79 static int cesa_attach(device_t);
80 static int cesa_attach_late(device_t);
81 static int cesa_detach(device_t);
82 static void cesa_intr(void *);
83 static int cesa_probesession(device_t,
84 const struct crypto_session_params *);
85 static int cesa_newsession(device_t, crypto_session_t,
86 const struct crypto_session_params *);
87 static int cesa_process(device_t, struct cryptop *, int);
88
89 static struct resource_spec cesa_res_spec[] = {
90 { SYS_RES_MEMORY, 0, RF_ACTIVE },
91 { SYS_RES_MEMORY, 1, RF_ACTIVE },
92 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
93 { -1, 0 }
94 };
95
96 static device_method_t cesa_methods[] = {
97 /* Device interface */
98 DEVMETHOD(device_probe, cesa_probe),
99 DEVMETHOD(device_attach, cesa_attach),
100 DEVMETHOD(device_detach, cesa_detach),
101
102 /* Crypto device methods */
103 DEVMETHOD(cryptodev_probesession, cesa_probesession),
104 DEVMETHOD(cryptodev_newsession, cesa_newsession),
105 DEVMETHOD(cryptodev_process, cesa_process),
106
107 DEVMETHOD_END
108 };
109
110 static driver_t cesa_driver = {
111 "cesa",
112 cesa_methods,
113 sizeof (struct cesa_softc)
114 };
115 static devclass_t cesa_devclass;
116
117 DRIVER_MODULE(cesa, simplebus, cesa_driver, cesa_devclass, 0, 0);
118 MODULE_DEPEND(cesa, crypto, 1, 1, 1);
119
120 static void
cesa_dump_cshd(struct cesa_softc * sc,struct cesa_sa_hdesc * cshd)121 cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd)
122 {
123 #ifdef DEBUG
124 device_t dev;
125
126 dev = sc->sc_dev;
127 device_printf(dev, "CESA SA Hardware Descriptor:\n");
128 device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config);
129 device_printf(dev, "\t\te_src: 0x%08X\n", cshd->cshd_enc_src);
130 device_printf(dev, "\t\te_dst: 0x%08X\n", cshd->cshd_enc_dst);
131 device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen);
132 device_printf(dev, "\t\te_key: 0x%08X\n", cshd->cshd_enc_key);
133 device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv);
134 device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf);
135 device_printf(dev, "\t\tm_src: 0x%08X\n", cshd->cshd_mac_src);
136 device_printf(dev, "\t\tm_dst: 0x%08X\n", cshd->cshd_mac_dst);
137 device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen);
138 device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen);
139 device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in);
140 device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out);
141 #endif
142 }
143
144 static void
cesa_alloc_dma_mem_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)145 cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
146 {
147 struct cesa_dma_mem *cdm;
148
149 if (error)
150 return;
151
152 KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1."));
153 cdm = arg;
154 cdm->cdm_paddr = segs->ds_addr;
155 }
156
157 static int
cesa_alloc_dma_mem(struct cesa_softc * sc,struct cesa_dma_mem * cdm,bus_size_t size)158 cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm,
159 bus_size_t size)
160 {
161 int error;
162
163 KASSERT(cdm->cdm_vaddr == NULL,
164 ("%s(): DMA memory descriptor in use.", __func__));
165
166 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
167 PAGE_SIZE, 0, /* alignment, boundary */
168 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
169 BUS_SPACE_MAXADDR, /* highaddr */
170 NULL, NULL, /* filtfunc, filtfuncarg */
171 size, 1, /* maxsize, nsegments */
172 size, 0, /* maxsegsz, flags */
173 NULL, NULL, /* lockfunc, lockfuncarg */
174 &cdm->cdm_tag); /* dmat */
175 if (error) {
176 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
177 " %i!\n", error);
178
179 goto err1;
180 }
181
182 error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr,
183 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map);
184 if (error) {
185 device_printf(sc->sc_dev, "failed to allocate DMA safe"
186 " memory, error %i!\n", error);
187
188 goto err2;
189 }
190
191 error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr,
192 size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT);
193 if (error) {
194 device_printf(sc->sc_dev, "cannot get address of the DMA"
195 " memory, error %i\n", error);
196
197 goto err3;
198 }
199
200 return (0);
201 err3:
202 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
203 err2:
204 bus_dma_tag_destroy(cdm->cdm_tag);
205 err1:
206 cdm->cdm_vaddr = NULL;
207 return (error);
208 }
209
210 static void
cesa_free_dma_mem(struct cesa_dma_mem * cdm)211 cesa_free_dma_mem(struct cesa_dma_mem *cdm)
212 {
213
214 bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map);
215 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
216 bus_dma_tag_destroy(cdm->cdm_tag);
217 cdm->cdm_vaddr = NULL;
218 }
219
220 static void
cesa_sync_dma_mem(struct cesa_dma_mem * cdm,bus_dmasync_op_t op)221 cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op)
222 {
223
224 /* Sync only if dma memory is valid */
225 if (cdm->cdm_vaddr != NULL)
226 bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op);
227 }
228
229 static void
cesa_sync_desc(struct cesa_softc * sc,bus_dmasync_op_t op)230 cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op)
231 {
232
233 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op);
234 cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op);
235 cesa_sync_dma_mem(&sc->sc_requests_cdm, op);
236 }
237
238 static struct cesa_request *
cesa_alloc_request(struct cesa_softc * sc)239 cesa_alloc_request(struct cesa_softc *sc)
240 {
241 struct cesa_request *cr;
242
243 CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests);
244 if (!cr)
245 return (NULL);
246
247 STAILQ_INIT(&cr->cr_tdesc);
248 STAILQ_INIT(&cr->cr_sdesc);
249
250 return (cr);
251 }
252
253 static void
cesa_free_request(struct cesa_softc * sc,struct cesa_request * cr)254 cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr)
255 {
256
257 /* Free TDMA descriptors assigned to this request */
258 CESA_LOCK(sc, tdesc);
259 STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc);
260 CESA_UNLOCK(sc, tdesc);
261
262 /* Free SA descriptors assigned to this request */
263 CESA_LOCK(sc, sdesc);
264 STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc);
265 CESA_UNLOCK(sc, sdesc);
266
267 /* Unload DMA memory associated with request */
268 if (cr->cr_dmap_loaded) {
269 bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap);
270 cr->cr_dmap_loaded = 0;
271 }
272
273 CESA_GENERIC_FREE_LOCKED(sc, cr, requests);
274 }
275
276 static void
cesa_enqueue_request(struct cesa_softc * sc,struct cesa_request * cr)277 cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr)
278 {
279
280 CESA_LOCK(sc, requests);
281 STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq);
282 CESA_UNLOCK(sc, requests);
283 }
284
285 static struct cesa_tdma_desc *
cesa_alloc_tdesc(struct cesa_softc * sc)286 cesa_alloc_tdesc(struct cesa_softc *sc)
287 {
288 struct cesa_tdma_desc *ctd;
289
290 CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc);
291
292 if (!ctd)
293 device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. "
294 "Consider increasing CESA_TDMA_DESCRIPTORS.\n");
295
296 return (ctd);
297 }
298
299 static struct cesa_sa_desc *
cesa_alloc_sdesc(struct cesa_softc * sc,struct cesa_request * cr)300 cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr)
301 {
302 struct cesa_sa_desc *csd;
303
304 CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc);
305 if (!csd) {
306 device_printf(sc->sc_dev, "SA descriptors pool exhaused. "
307 "Consider increasing CESA_SA_DESCRIPTORS.\n");
308 return (NULL);
309 }
310
311 STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq);
312
313 /* Fill-in SA descriptor with default values */
314 csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key);
315 csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv);
316 csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv);
317 csd->csd_cshd->cshd_enc_src = 0;
318 csd->csd_cshd->cshd_enc_dst = 0;
319 csd->csd_cshd->cshd_enc_dlen = 0;
320 csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash);
321 csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in);
322 csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out);
323 csd->csd_cshd->cshd_mac_src = 0;
324 csd->csd_cshd->cshd_mac_dlen = 0;
325
326 return (csd);
327 }
328
329 static struct cesa_tdma_desc *
cesa_tdma_copy(struct cesa_softc * sc,bus_addr_t dst,bus_addr_t src,bus_size_t size)330 cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src,
331 bus_size_t size)
332 {
333 struct cesa_tdma_desc *ctd;
334
335 ctd = cesa_alloc_tdesc(sc);
336 if (!ctd)
337 return (NULL);
338
339 ctd->ctd_cthd->cthd_dst = dst;
340 ctd->ctd_cthd->cthd_src = src;
341 ctd->ctd_cthd->cthd_byte_count = size;
342
343 /* Handle special control packet */
344 if (size != 0)
345 ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED;
346 else
347 ctd->ctd_cthd->cthd_flags = 0;
348
349 return (ctd);
350 }
351
352 static struct cesa_tdma_desc *
cesa_tdma_copyin_sa_data(struct cesa_softc * sc,struct cesa_request * cr)353 cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
354 {
355
356 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa +
357 sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr,
358 sizeof(struct cesa_sa_data)));
359 }
360
361 static struct cesa_tdma_desc *
cesa_tdma_copyout_sa_data(struct cesa_softc * sc,struct cesa_request * cr)362 cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
363 {
364
365 return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base_pa +
366 sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data)));
367 }
368
369 static struct cesa_tdma_desc *
cesa_tdma_copy_sdesc(struct cesa_softc * sc,struct cesa_sa_desc * csd)370 cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd)
371 {
372
373 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa, csd->csd_cshd_paddr,
374 sizeof(struct cesa_sa_hdesc)));
375 }
376
377 static void
cesa_append_tdesc(struct cesa_request * cr,struct cesa_tdma_desc * ctd)378 cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd)
379 {
380 struct cesa_tdma_desc *ctd_prev;
381
382 if (!STAILQ_EMPTY(&cr->cr_tdesc)) {
383 ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq);
384 ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr;
385 }
386
387 ctd->ctd_cthd->cthd_next = 0;
388 STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq);
389 }
390
391 static int
cesa_append_packet(struct cesa_softc * sc,struct cesa_request * cr,struct cesa_packet * cp,struct cesa_sa_desc * csd)392 cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr,
393 struct cesa_packet *cp, struct cesa_sa_desc *csd)
394 {
395 struct cesa_tdma_desc *ctd, *tmp;
396
397 /* Copy SA descriptor for this packet */
398 ctd = cesa_tdma_copy_sdesc(sc, csd);
399 if (!ctd)
400 return (ENOMEM);
401
402 cesa_append_tdesc(cr, ctd);
403
404 /* Copy data to be processed */
405 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp)
406 cesa_append_tdesc(cr, ctd);
407 STAILQ_INIT(&cp->cp_copyin);
408
409 /* Insert control descriptor */
410 ctd = cesa_tdma_copy(sc, 0, 0, 0);
411 if (!ctd)
412 return (ENOMEM);
413
414 cesa_append_tdesc(cr, ctd);
415
416 /* Copy back results */
417 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp)
418 cesa_append_tdesc(cr, ctd);
419 STAILQ_INIT(&cp->cp_copyout);
420
421 return (0);
422 }
423
424 static void
cesa_set_mkey(struct cesa_session * cs,int alg,const uint8_t * mkey,int mklen)425 cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen)
426 {
427 union authctx auth_ctx;
428 uint32_t *hout;
429 uint32_t *hin;
430 int i;
431
432 hin = (uint32_t *)cs->cs_hiv_in;
433 hout = (uint32_t *)cs->cs_hiv_out;
434
435 switch (alg) {
436 case CRYPTO_SHA1_HMAC:
437 hmac_init_ipad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx);
438 memcpy(hin, auth_ctx.sha1ctx.h.b32,
439 sizeof(auth_ctx.sha1ctx.h.b32));
440 hmac_init_opad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx);
441 memcpy(hout, auth_ctx.sha1ctx.h.b32,
442 sizeof(auth_ctx.sha1ctx.h.b32));
443 break;
444 case CRYPTO_SHA2_256_HMAC:
445 hmac_init_ipad(&auth_hash_hmac_sha2_256, mkey, mklen,
446 &auth_ctx);
447 memcpy(hin, auth_ctx.sha256ctx.state,
448 sizeof(auth_ctx.sha256ctx.state));
449 hmac_init_opad(&auth_hash_hmac_sha2_256, mkey, mklen,
450 &auth_ctx);
451 memcpy(hout, auth_ctx.sha256ctx.state,
452 sizeof(auth_ctx.sha256ctx.state));
453 break;
454 default:
455 panic("shouldn't get here");
456 }
457
458 for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) {
459 hin[i] = htobe32(hin[i]);
460 hout[i] = htobe32(hout[i]);
461 }
462 explicit_bzero(&auth_ctx, sizeof(auth_ctx));
463 }
464
465 static int
cesa_prep_aes_key(struct cesa_session * cs,const struct crypto_session_params * csp)466 cesa_prep_aes_key(struct cesa_session *cs,
467 const struct crypto_session_params *csp)
468 {
469 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
470 uint32_t *dkey;
471 int i;
472
473 rijndaelKeySetupEnc(ek, cs->cs_key, csp->csp_cipher_klen * 8);
474
475 cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK;
476 dkey = (uint32_t *)cs->cs_aes_dkey;
477
478 switch (csp->csp_cipher_klen) {
479 case 16:
480 cs->cs_config |= CESA_CSH_AES_KLEN_128;
481 for (i = 0; i < 4; i++)
482 *dkey++ = htobe32(ek[4 * 10 + i]);
483 break;
484 case 24:
485 cs->cs_config |= CESA_CSH_AES_KLEN_192;
486 for (i = 0; i < 4; i++)
487 *dkey++ = htobe32(ek[4 * 12 + i]);
488 for (i = 0; i < 2; i++)
489 *dkey++ = htobe32(ek[4 * 11 + 2 + i]);
490 break;
491 case 32:
492 cs->cs_config |= CESA_CSH_AES_KLEN_256;
493 for (i = 0; i < 4; i++)
494 *dkey++ = htobe32(ek[4 * 14 + i]);
495 for (i = 0; i < 4; i++)
496 *dkey++ = htobe32(ek[4 * 13 + i]);
497 break;
498 default:
499 return (EINVAL);
500 }
501
502 return (0);
503 }
504
505 static void
cesa_start_packet(struct cesa_packet * cp,unsigned int size)506 cesa_start_packet(struct cesa_packet *cp, unsigned int size)
507 {
508
509 cp->cp_size = size;
510 cp->cp_offset = 0;
511 STAILQ_INIT(&cp->cp_copyin);
512 STAILQ_INIT(&cp->cp_copyout);
513 }
514
515 static int
cesa_fill_packet(struct cesa_softc * sc,struct cesa_packet * cp,bus_dma_segment_t * seg)516 cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp,
517 bus_dma_segment_t *seg)
518 {
519 struct cesa_tdma_desc *ctd;
520 unsigned int bsize;
521
522 /* Calculate size of block copy */
523 bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset);
524
525 if (bsize > 0) {
526 ctd = cesa_tdma_copy(sc, sc->sc_sram_base_pa +
527 CESA_DATA(cp->cp_offset), seg->ds_addr, bsize);
528 if (!ctd)
529 return (-ENOMEM);
530
531 STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq);
532
533 ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base_pa +
534 CESA_DATA(cp->cp_offset), bsize);
535 if (!ctd)
536 return (-ENOMEM);
537
538 STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq);
539
540 seg->ds_len -= bsize;
541 seg->ds_addr += bsize;
542 cp->cp_offset += bsize;
543 }
544
545 return (bsize);
546 }
547
548 static void
cesa_create_chain_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)549 cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
550 {
551 unsigned int mpsize, fragmented;
552 unsigned int mlen, mskip, tmlen;
553 struct cesa_chain_info *cci;
554 unsigned int elen, eskip;
555 unsigned int skip, len;
556 struct cesa_sa_desc *csd;
557 struct cesa_request *cr;
558 struct cryptop *crp;
559 struct cesa_softc *sc;
560 struct cesa_packet cp;
561 bus_dma_segment_t seg;
562 uint32_t config;
563 int size;
564
565 cci = arg;
566 sc = cci->cci_sc;
567 cr = cci->cci_cr;
568 crp = cr->cr_crp;
569
570 if (error) {
571 cci->cci_error = error;
572 return;
573 }
574
575 /*
576 * Only do a combined op if the AAD is adjacent to the payload
577 * and the AAD length is a multiple of the IV length. The
578 * checks against 'config' are to avoid recursing when the
579 * logic below invokes separate operations.
580 */
581 config = cci->cci_config;
582 if (((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC ||
583 (config & CESA_CSHD_OP_MASK) == CESA_CSHD_ENC_AND_MAC) &&
584 crp->crp_aad_length != 0 &&
585 (crp->crp_aad_length & (cr->cr_cs->cs_ivlen - 1)) != 0) {
586 /*
587 * Data alignment in the request does not meet CESA requiremnts
588 * for combined encryption/decryption and hashing. We have to
589 * split the request to separate operations and process them
590 * one by one.
591 */
592 if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) {
593 config &= ~CESA_CSHD_OP_MASK;
594
595 cci->cci_config = config | CESA_CSHD_MAC;
596 cesa_create_chain_cb(cci, segs, nseg, 0);
597
598 cci->cci_config = config | CESA_CSHD_ENC;
599 cesa_create_chain_cb(cci, segs, nseg, 0);
600 } else {
601 config &= ~CESA_CSHD_OP_MASK;
602
603 cci->cci_config = config | CESA_CSHD_ENC;
604 cesa_create_chain_cb(cci, segs, nseg, 0);
605
606 cci->cci_config = config | CESA_CSHD_MAC;
607 cesa_create_chain_cb(cci, segs, nseg, 0);
608 }
609
610 return;
611 }
612
613 mskip = mlen = eskip = elen = 0;
614
615 if (crp->crp_aad_length == 0) {
616 skip = crp->crp_payload_start;
617 len = crp->crp_payload_length;
618 switch (config & CESA_CSHD_OP_MASK) {
619 case CESA_CSHD_ENC:
620 eskip = skip;
621 elen = len;
622 break;
623 case CESA_CSHD_MAC:
624 mskip = skip;
625 mlen = len;
626 break;
627 default:
628 eskip = skip;
629 elen = len;
630 mskip = skip;
631 mlen = len;
632 break;
633 }
634 } else {
635 /*
636 * For an encryption-only separate request, only
637 * process the payload. For combined requests and
638 * hash-only requests, process the entire region.
639 */
640 switch (config & CESA_CSHD_OP_MASK) {
641 case CESA_CSHD_ENC:
642 skip = crp->crp_payload_start;
643 len = crp->crp_payload_length;
644 eskip = skip;
645 elen = len;
646 break;
647 case CESA_CSHD_MAC:
648 skip = crp->crp_aad_start;
649 len = crp->crp_aad_length + crp->crp_payload_length;
650 mskip = skip;
651 mlen = len;
652 break;
653 default:
654 skip = crp->crp_aad_start;
655 len = crp->crp_aad_length + crp->crp_payload_length;
656 mskip = skip;
657 mlen = len;
658 eskip = crp->crp_payload_start;
659 elen = crp->crp_payload_length;
660 break;
661 }
662 }
663
664 tmlen = mlen;
665 fragmented = 0;
666 mpsize = CESA_MAX_PACKET_SIZE;
667 mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1));
668
669 /* Start first packet in chain */
670 cesa_start_packet(&cp, MIN(mpsize, len));
671
672 while (nseg-- && len > 0) {
673 seg = *(segs++);
674
675 /*
676 * Skip data in buffer on which neither ENC nor MAC operation
677 * is requested.
678 */
679 if (skip > 0) {
680 size = MIN(skip, seg.ds_len);
681 skip -= size;
682
683 seg.ds_addr += size;
684 seg.ds_len -= size;
685
686 if (eskip > 0)
687 eskip -= size;
688
689 if (mskip > 0)
690 mskip -= size;
691
692 if (seg.ds_len == 0)
693 continue;
694 }
695
696 while (1) {
697 /*
698 * Fill in current packet with data. Break if there is
699 * no more data in current DMA segment or an error
700 * occurred.
701 */
702 size = cesa_fill_packet(sc, &cp, &seg);
703 if (size <= 0) {
704 error = -size;
705 break;
706 }
707
708 len -= size;
709
710 /* If packet is full, append it to the chain */
711 if (cp.cp_size == cp.cp_offset) {
712 csd = cesa_alloc_sdesc(sc, cr);
713 if (!csd) {
714 error = ENOMEM;
715 break;
716 }
717
718 /* Create SA descriptor for this packet */
719 csd->csd_cshd->cshd_config = cci->cci_config;
720 csd->csd_cshd->cshd_mac_total_dlen = tmlen;
721
722 /*
723 * Enable fragmentation if request will not fit
724 * into one packet.
725 */
726 if (len > 0) {
727 if (!fragmented) {
728 fragmented = 1;
729 csd->csd_cshd->cshd_config |=
730 CESA_CSHD_FRAG_FIRST;
731 } else
732 csd->csd_cshd->cshd_config |=
733 CESA_CSHD_FRAG_MIDDLE;
734 } else if (fragmented)
735 csd->csd_cshd->cshd_config |=
736 CESA_CSHD_FRAG_LAST;
737
738 if (eskip < cp.cp_size && elen > 0) {
739 csd->csd_cshd->cshd_enc_src =
740 CESA_DATA(eskip);
741 csd->csd_cshd->cshd_enc_dst =
742 CESA_DATA(eskip);
743 csd->csd_cshd->cshd_enc_dlen =
744 MIN(elen, cp.cp_size - eskip);
745 }
746
747 if (mskip < cp.cp_size && mlen > 0) {
748 csd->csd_cshd->cshd_mac_src =
749 CESA_DATA(mskip);
750 csd->csd_cshd->cshd_mac_dlen =
751 MIN(mlen, cp.cp_size - mskip);
752 }
753
754 elen -= csd->csd_cshd->cshd_enc_dlen;
755 eskip -= MIN(eskip, cp.cp_size);
756 mlen -= csd->csd_cshd->cshd_mac_dlen;
757 mskip -= MIN(mskip, cp.cp_size);
758
759 cesa_dump_cshd(sc, csd->csd_cshd);
760
761 /* Append packet to the request */
762 error = cesa_append_packet(sc, cr, &cp, csd);
763 if (error)
764 break;
765
766 /* Start a new packet, as current is full */
767 cesa_start_packet(&cp, MIN(mpsize, len));
768 }
769 }
770
771 if (error)
772 break;
773 }
774
775 if (error) {
776 /*
777 * Move all allocated resources to the request. They will be
778 * freed later.
779 */
780 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin);
781 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout);
782 cci->cci_error = error;
783 }
784 }
785
786 static int
cesa_create_chain(struct cesa_softc * sc,const struct crypto_session_params * csp,struct cesa_request * cr)787 cesa_create_chain(struct cesa_softc *sc,
788 const struct crypto_session_params *csp, struct cesa_request *cr)
789 {
790 struct cesa_chain_info cci;
791 struct cesa_tdma_desc *ctd;
792 uint32_t config;
793 int error;
794
795 error = 0;
796 CESA_LOCK_ASSERT(sc, sessions);
797
798 /* Create request metadata */
799 if (csp->csp_cipher_klen != 0) {
800 if (csp->csp_cipher_alg == CRYPTO_AES_CBC &&
801 !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op))
802 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey,
803 csp->csp_cipher_klen);
804 else
805 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key,
806 csp->csp_cipher_klen);
807 }
808
809 if (csp->csp_auth_klen != 0) {
810 memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in,
811 CESA_MAX_HASH_LEN);
812 memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out,
813 CESA_MAX_HASH_LEN);
814 }
815
816 ctd = cesa_tdma_copyin_sa_data(sc, cr);
817 if (!ctd)
818 return (ENOMEM);
819
820 cesa_append_tdesc(cr, ctd);
821
822 /* Prepare SA configuration */
823 config = cr->cr_cs->cs_config;
824
825 if (csp->csp_cipher_alg != 0 &&
826 !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op))
827 config |= CESA_CSHD_DECRYPT;
828 switch (csp->csp_mode) {
829 case CSP_MODE_CIPHER:
830 config |= CESA_CSHD_ENC;
831 break;
832 case CSP_MODE_DIGEST:
833 config |= CESA_CSHD_MAC;
834 break;
835 case CSP_MODE_ETA:
836 config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC :
837 CESA_CSHD_ENC_AND_MAC;
838 break;
839 }
840
841 /* Create data packets */
842 cci.cci_sc = sc;
843 cci.cci_cr = cr;
844 cci.cci_config = config;
845 cci.cci_error = 0;
846
847 error = bus_dmamap_load_crp(sc->sc_data_dtag, cr->cr_dmap, cr->cr_crp,
848 cesa_create_chain_cb, &cci, BUS_DMA_NOWAIT);
849
850 if (!error)
851 cr->cr_dmap_loaded = 1;
852
853 if (cci.cci_error)
854 error = cci.cci_error;
855
856 if (error)
857 return (error);
858
859 /* Read back request metadata */
860 ctd = cesa_tdma_copyout_sa_data(sc, cr);
861 if (!ctd)
862 return (ENOMEM);
863
864 cesa_append_tdesc(cr, ctd);
865
866 return (0);
867 }
868
869 static void
cesa_execute(struct cesa_softc * sc)870 cesa_execute(struct cesa_softc *sc)
871 {
872 struct cesa_tdma_desc *prev_ctd, *ctd;
873 struct cesa_request *prev_cr, *cr;
874
875 CESA_LOCK(sc, requests);
876
877 /*
878 * If ready list is empty, there is nothing to execute. If queued list
879 * is not empty, the hardware is busy and we cannot start another
880 * execution.
881 */
882 if (STAILQ_EMPTY(&sc->sc_ready_requests) ||
883 !STAILQ_EMPTY(&sc->sc_queued_requests)) {
884 CESA_UNLOCK(sc, requests);
885 return;
886 }
887
888 /* Move all ready requests to queued list */
889 STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests);
890 STAILQ_INIT(&sc->sc_ready_requests);
891
892 /* Create one execution chain from all requests on the list */
893 if (STAILQ_FIRST(&sc->sc_queued_requests) !=
894 STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) {
895 prev_cr = NULL;
896 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD |
897 BUS_DMASYNC_POSTWRITE);
898
899 STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) {
900 if (prev_cr) {
901 ctd = STAILQ_FIRST(&cr->cr_tdesc);
902 prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc,
903 cesa_tdma_desc, ctd_stq);
904
905 prev_ctd->ctd_cthd->cthd_next =
906 ctd->ctd_cthd_paddr;
907 }
908
909 prev_cr = cr;
910 }
911
912 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD |
913 BUS_DMASYNC_PREWRITE);
914 }
915
916 /* Start chain execution in hardware */
917 cr = STAILQ_FIRST(&sc->sc_queued_requests);
918 ctd = STAILQ_FIRST(&cr->cr_tdesc);
919
920 CESA_TDMA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr);
921
922 if (sc->sc_soc_id == MV_DEV_88F6828 ||
923 sc->sc_soc_id == MV_DEV_88F6820 ||
924 sc->sc_soc_id == MV_DEV_88F6810)
925 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE | CESA_SA_CMD_SHA2);
926 else
927 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE);
928
929 CESA_UNLOCK(sc, requests);
930 }
931
932 static int
cesa_setup_sram(struct cesa_softc * sc)933 cesa_setup_sram(struct cesa_softc *sc)
934 {
935 phandle_t sram_node;
936 ihandle_t sram_ihandle;
937 pcell_t sram_handle, sram_reg[2];
938 void *sram_va;
939 int rv;
940
941 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "sram-handle",
942 (void *)&sram_handle, sizeof(sram_handle));
943 if (rv <= 0)
944 return (rv);
945
946 sram_ihandle = (ihandle_t)sram_handle;
947 sram_node = OF_instance_to_package(sram_ihandle);
948
949 rv = OF_getencprop(sram_node, "reg", (void *)sram_reg, sizeof(sram_reg));
950 if (rv <= 0)
951 return (rv);
952
953 sc->sc_sram_base_pa = sram_reg[0];
954 /* Store SRAM size to be able to unmap in detach() */
955 sc->sc_sram_size = sram_reg[1];
956
957 if (sc->sc_soc_id != MV_DEV_88F6828 &&
958 sc->sc_soc_id != MV_DEV_88F6820 &&
959 sc->sc_soc_id != MV_DEV_88F6810)
960 return (0);
961
962 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */
963 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size);
964 if (sram_va == NULL)
965 return (ENOMEM);
966 sc->sc_sram_base_va = (vm_offset_t)sram_va;
967
968 return (0);
969 }
970
971 /*
972 * Function: device_from_node
973 * This function returns appropriate device_t to phandle_t
974 * Parameters:
975 * root - device where you want to start search
976 * if you provide NULL here, function will take
977 * "root0" device as root.
978 * node - we are checking every device_t to be
979 * appropriate with this.
980 */
981 static device_t
device_from_node(device_t root,phandle_t node)982 device_from_node(device_t root, phandle_t node)
983 {
984 device_t *children, retval;
985 int nkid, i;
986
987 /* Nothing matches no node */
988 if (node == -1)
989 return (NULL);
990
991 if (root == NULL)
992 /* Get root of device tree */
993 if ((root = device_lookup_by_name("root0")) == NULL)
994 return (NULL);
995
996 if (device_get_children(root, &children, &nkid) != 0)
997 return (NULL);
998
999 retval = NULL;
1000 for (i = 0; i < nkid; i++) {
1001 /* Check if device and node matches */
1002 if (OFW_BUS_GET_NODE(root, children[i]) == node) {
1003 retval = children[i];
1004 break;
1005 }
1006 /* or go deeper */
1007 if ((retval = device_from_node(children[i], node)) != NULL)
1008 break;
1009 }
1010 free(children, M_TEMP);
1011
1012 return (retval);
1013 }
1014
1015 static int
cesa_setup_sram_armada(struct cesa_softc * sc)1016 cesa_setup_sram_armada(struct cesa_softc *sc)
1017 {
1018 phandle_t sram_node;
1019 ihandle_t sram_ihandle;
1020 pcell_t sram_handle[2];
1021 void *sram_va;
1022 int rv, j;
1023 struct resource_list rl;
1024 struct resource_list_entry *rle;
1025 struct simplebus_softc *ssc;
1026 device_t sdev;
1027
1028 /* Get refs to SRAMS from CESA node */
1029 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "marvell,crypto-srams",
1030 (void *)sram_handle, sizeof(sram_handle));
1031 if (rv <= 0)
1032 return (rv);
1033
1034 if (sc->sc_cesa_engine_id >= 2)
1035 return (ENXIO);
1036
1037 /* Get SRAM node on the basis of sc_cesa_engine_id */
1038 sram_ihandle = (ihandle_t)sram_handle[sc->sc_cesa_engine_id];
1039 sram_node = OF_instance_to_package(sram_ihandle);
1040
1041 /* Get device_t of simplebus (sram_node parent) */
1042 sdev = device_from_node(NULL, OF_parent(sram_node));
1043 if (!sdev)
1044 return (ENXIO);
1045
1046 ssc = device_get_softc(sdev);
1047
1048 resource_list_init(&rl);
1049 /* Parse reg property to resource list */
1050 ofw_bus_reg_to_rl(sdev, sram_node, ssc->acells,
1051 ssc->scells, &rl);
1052
1053 /* We expect only one resource */
1054 rle = resource_list_find(&rl, SYS_RES_MEMORY, 0);
1055 if (rle == NULL)
1056 return (ENXIO);
1057
1058 /* Remap through ranges property */
1059 for (j = 0; j < ssc->nranges; j++) {
1060 if (rle->start >= ssc->ranges[j].bus &&
1061 rle->end < ssc->ranges[j].bus + ssc->ranges[j].size) {
1062 rle->start -= ssc->ranges[j].bus;
1063 rle->start += ssc->ranges[j].host;
1064 rle->end -= ssc->ranges[j].bus;
1065 rle->end += ssc->ranges[j].host;
1066 }
1067 }
1068
1069 sc->sc_sram_base_pa = rle->start;
1070 sc->sc_sram_size = rle->count;
1071
1072 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */
1073 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size);
1074 if (sram_va == NULL)
1075 return (ENOMEM);
1076 sc->sc_sram_base_va = (vm_offset_t)sram_va;
1077
1078 return (0);
1079 }
1080
1081 struct ofw_compat_data cesa_devices[] = {
1082 { "mrvl,cesa", (uintptr_t)true },
1083 { "marvell,armada-38x-crypto", (uintptr_t)true },
1084 { NULL, 0 }
1085 };
1086
1087 static int
cesa_probe(device_t dev)1088 cesa_probe(device_t dev)
1089 {
1090
1091 if (!ofw_bus_status_okay(dev))
1092 return (ENXIO);
1093
1094 if (!ofw_bus_search_compatible(dev, cesa_devices)->ocd_data)
1095 return (ENXIO);
1096
1097 device_set_desc(dev, "Marvell Cryptographic Engine and Security "
1098 "Accelerator");
1099
1100 return (BUS_PROBE_DEFAULT);
1101 }
1102
1103 static int
cesa_attach(device_t dev)1104 cesa_attach(device_t dev)
1105 {
1106 static int engine_idx = 0;
1107 struct simplebus_devinfo *ndi;
1108 struct resource_list *rl;
1109 struct cesa_softc *sc;
1110
1111 if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto"))
1112 return (cesa_attach_late(dev));
1113
1114 /*
1115 * Get simplebus_devinfo which contains
1116 * resource list filled with adresses and
1117 * interrupts read form FDT.
1118 * Let's correct it by splitting resources
1119 * for each engine.
1120 */
1121 if ((ndi = device_get_ivars(dev)) == NULL)
1122 return (ENXIO);
1123
1124 rl = &ndi->rl;
1125
1126 switch (engine_idx) {
1127 case 0:
1128 /* Update regs values */
1129 resource_list_add(rl, SYS_RES_MEMORY, 0, CESA0_TDMA_ADDR,
1130 CESA0_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE);
1131 resource_list_add(rl, SYS_RES_MEMORY, 1, CESA0_CESA_ADDR,
1132 CESA0_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE);
1133
1134 /* Remove unused interrupt */
1135 resource_list_delete(rl, SYS_RES_IRQ, 1);
1136 break;
1137
1138 case 1:
1139 /* Update regs values */
1140 resource_list_add(rl, SYS_RES_MEMORY, 0, CESA1_TDMA_ADDR,
1141 CESA1_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE);
1142 resource_list_add(rl, SYS_RES_MEMORY, 1, CESA1_CESA_ADDR,
1143 CESA1_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE);
1144
1145 /* Remove unused interrupt */
1146 resource_list_delete(rl, SYS_RES_IRQ, 0);
1147 resource_list_find(rl, SYS_RES_IRQ, 1)->rid = 0;
1148 break;
1149
1150 default:
1151 device_printf(dev, "Bad cesa engine_idx\n");
1152 return (ENXIO);
1153 }
1154
1155 sc = device_get_softc(dev);
1156 sc->sc_cesa_engine_id = engine_idx;
1157
1158 /*
1159 * Call simplebus_add_device only once.
1160 * It will create second cesa driver instance
1161 * with the same FDT node as first instance.
1162 * When second driver reach this function,
1163 * it will be configured to use second cesa engine
1164 */
1165 if (engine_idx == 0)
1166 simplebus_add_device(device_get_parent(dev), ofw_bus_get_node(dev),
1167 0, "cesa", 1, NULL);
1168
1169 engine_idx++;
1170
1171 return (cesa_attach_late(dev));
1172 }
1173
1174 static int
cesa_attach_late(device_t dev)1175 cesa_attach_late(device_t dev)
1176 {
1177 struct cesa_softc *sc;
1178 uint32_t d, r, val;
1179 int error;
1180 int i;
1181
1182 sc = device_get_softc(dev);
1183 sc->sc_blocked = 0;
1184 sc->sc_error = 0;
1185 sc->sc_dev = dev;
1186
1187 soc_id(&d, &r);
1188
1189 switch (d) {
1190 case MV_DEV_88F6281:
1191 case MV_DEV_88F6282:
1192 /* Check if CESA peripheral device has power turned on */
1193 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) ==
1194 CPU_PM_CTRL_CRYPTO) {
1195 device_printf(dev, "not powered on\n");
1196 return (ENXIO);
1197 }
1198 sc->sc_tperr = 0;
1199 break;
1200 case MV_DEV_88F6828:
1201 case MV_DEV_88F6820:
1202 case MV_DEV_88F6810:
1203 sc->sc_tperr = 0;
1204 break;
1205 case MV_DEV_MV78100:
1206 case MV_DEV_MV78100_Z0:
1207 /* Check if CESA peripheral device has power turned on */
1208 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) !=
1209 CPU_PM_CTRL_CRYPTO) {
1210 device_printf(dev, "not powered on\n");
1211 return (ENXIO);
1212 }
1213 sc->sc_tperr = CESA_ICR_TPERR;
1214 break;
1215 default:
1216 return (ENXIO);
1217 }
1218
1219 sc->sc_soc_id = d;
1220
1221 /* Initialize mutexes */
1222 mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev),
1223 "CESA Shared Data", MTX_DEF);
1224 mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev),
1225 "CESA TDMA Descriptors Pool", MTX_DEF);
1226 mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev),
1227 "CESA SA Descriptors Pool", MTX_DEF);
1228 mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev),
1229 "CESA Requests Pool", MTX_DEF);
1230 mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
1231 "CESA Sessions Pool", MTX_DEF);
1232
1233 /* Allocate I/O and IRQ resources */
1234 error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res);
1235 if (error) {
1236 device_printf(dev, "could not allocate resources\n");
1237 goto err0;
1238 }
1239
1240 /* Acquire SRAM base address */
1241 if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto"))
1242 error = cesa_setup_sram(sc);
1243 else
1244 error = cesa_setup_sram_armada(sc);
1245
1246 if (error) {
1247 device_printf(dev, "could not setup SRAM\n");
1248 goto err1;
1249 }
1250
1251 /* Setup interrupt handler */
1252 error = bus_setup_intr(dev, sc->sc_res[RES_CESA_IRQ], INTR_TYPE_NET |
1253 INTR_MPSAFE, NULL, cesa_intr, sc, &(sc->sc_icookie));
1254 if (error) {
1255 device_printf(dev, "could not setup engine completion irq\n");
1256 goto err2;
1257 }
1258
1259 /* Create DMA tag for processed data */
1260 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1261 1, 0, /* alignment, boundary */
1262 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1263 BUS_SPACE_MAXADDR, /* highaddr */
1264 NULL, NULL, /* filtfunc, filtfuncarg */
1265 CESA_MAX_REQUEST_SIZE, /* maxsize */
1266 CESA_MAX_FRAGMENTS, /* nsegments */
1267 CESA_MAX_REQUEST_SIZE, 0, /* maxsegsz, flags */
1268 NULL, NULL, /* lockfunc, lockfuncarg */
1269 &sc->sc_data_dtag); /* dmat */
1270 if (error)
1271 goto err3;
1272
1273 /* Initialize data structures: TDMA Descriptors Pool */
1274 error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm,
1275 CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc));
1276 if (error)
1277 goto err4;
1278
1279 STAILQ_INIT(&sc->sc_free_tdesc);
1280 for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) {
1281 sc->sc_tdesc[i].ctd_cthd =
1282 (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i;
1283 sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr +
1284 (i * sizeof(struct cesa_tdma_hdesc));
1285 STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i],
1286 ctd_stq);
1287 }
1288
1289 /* Initialize data structures: SA Descriptors Pool */
1290 error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm,
1291 CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc));
1292 if (error)
1293 goto err5;
1294
1295 STAILQ_INIT(&sc->sc_free_sdesc);
1296 for (i = 0; i < CESA_SA_DESCRIPTORS; i++) {
1297 sc->sc_sdesc[i].csd_cshd =
1298 (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i;
1299 sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr +
1300 (i * sizeof(struct cesa_sa_hdesc));
1301 STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i],
1302 csd_stq);
1303 }
1304
1305 /* Initialize data structures: Requests Pool */
1306 error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm,
1307 CESA_REQUESTS * sizeof(struct cesa_sa_data));
1308 if (error)
1309 goto err6;
1310
1311 STAILQ_INIT(&sc->sc_free_requests);
1312 STAILQ_INIT(&sc->sc_ready_requests);
1313 STAILQ_INIT(&sc->sc_queued_requests);
1314 for (i = 0; i < CESA_REQUESTS; i++) {
1315 sc->sc_requests[i].cr_csd =
1316 (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i;
1317 sc->sc_requests[i].cr_csd_paddr =
1318 sc->sc_requests_cdm.cdm_paddr +
1319 (i * sizeof(struct cesa_sa_data));
1320
1321 /* Preallocate DMA maps */
1322 error = bus_dmamap_create(sc->sc_data_dtag, 0,
1323 &sc->sc_requests[i].cr_dmap);
1324 if (error && i > 0) {
1325 i--;
1326 do {
1327 bus_dmamap_destroy(sc->sc_data_dtag,
1328 sc->sc_requests[i].cr_dmap);
1329 } while (i--);
1330
1331 goto err7;
1332 }
1333
1334 STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i],
1335 cr_stq);
1336 }
1337
1338 /*
1339 * Initialize TDMA:
1340 * - Burst limit: 128 bytes,
1341 * - Outstanding reads enabled,
1342 * - No byte-swap.
1343 */
1344 val = CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 |
1345 CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE;
1346
1347 if (sc->sc_soc_id == MV_DEV_88F6828 ||
1348 sc->sc_soc_id == MV_DEV_88F6820 ||
1349 sc->sc_soc_id == MV_DEV_88F6810)
1350 val |= CESA_TDMA_NUM_OUTSTAND;
1351
1352 CESA_TDMA_WRITE(sc, CESA_TDMA_CR, val);
1353
1354 /*
1355 * Initialize SA:
1356 * - SA descriptor is present at beginning of CESA SRAM,
1357 * - Multi-packet chain mode,
1358 * - Cooperation with TDMA enabled.
1359 */
1360 CESA_REG_WRITE(sc, CESA_SA_DPR, 0);
1361 CESA_REG_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA |
1362 CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE);
1363
1364 /* Unmask interrupts */
1365 CESA_REG_WRITE(sc, CESA_ICR, 0);
1366 CESA_REG_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr);
1367 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0);
1368 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS |
1369 CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT |
1370 CESA_TDMA_EMR_DATA_ERROR);
1371
1372 /* Register in OCF */
1373 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct cesa_session),
1374 CRYPTOCAP_F_HARDWARE);
1375 if (sc->sc_cid < 0) {
1376 device_printf(dev, "could not get crypto driver id\n");
1377 goto err8;
1378 }
1379
1380 return (0);
1381 err8:
1382 for (i = 0; i < CESA_REQUESTS; i++)
1383 bus_dmamap_destroy(sc->sc_data_dtag,
1384 sc->sc_requests[i].cr_dmap);
1385 err7:
1386 cesa_free_dma_mem(&sc->sc_requests_cdm);
1387 err6:
1388 cesa_free_dma_mem(&sc->sc_sdesc_cdm);
1389 err5:
1390 cesa_free_dma_mem(&sc->sc_tdesc_cdm);
1391 err4:
1392 bus_dma_tag_destroy(sc->sc_data_dtag);
1393 err3:
1394 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie);
1395 err2:
1396 if (sc->sc_soc_id == MV_DEV_88F6828 ||
1397 sc->sc_soc_id == MV_DEV_88F6820 ||
1398 sc->sc_soc_id == MV_DEV_88F6810)
1399 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size);
1400 err1:
1401 bus_release_resources(dev, cesa_res_spec, sc->sc_res);
1402 err0:
1403 mtx_destroy(&sc->sc_sessions_lock);
1404 mtx_destroy(&sc->sc_requests_lock);
1405 mtx_destroy(&sc->sc_sdesc_lock);
1406 mtx_destroy(&sc->sc_tdesc_lock);
1407 mtx_destroy(&sc->sc_sc_lock);
1408 return (ENXIO);
1409 }
1410
1411 static int
cesa_detach(device_t dev)1412 cesa_detach(device_t dev)
1413 {
1414 struct cesa_softc *sc;
1415 int i;
1416
1417 sc = device_get_softc(dev);
1418
1419 /* TODO: Wait for queued requests completion before shutdown. */
1420
1421 /* Mask interrupts */
1422 CESA_REG_WRITE(sc, CESA_ICM, 0);
1423 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, 0);
1424
1425 /* Unregister from OCF */
1426 crypto_unregister_all(sc->sc_cid);
1427
1428 /* Free DMA Maps */
1429 for (i = 0; i < CESA_REQUESTS; i++)
1430 bus_dmamap_destroy(sc->sc_data_dtag,
1431 sc->sc_requests[i].cr_dmap);
1432
1433 /* Free DMA Memory */
1434 cesa_free_dma_mem(&sc->sc_requests_cdm);
1435 cesa_free_dma_mem(&sc->sc_sdesc_cdm);
1436 cesa_free_dma_mem(&sc->sc_tdesc_cdm);
1437
1438 /* Free DMA Tag */
1439 bus_dma_tag_destroy(sc->sc_data_dtag);
1440
1441 /* Stop interrupt */
1442 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie);
1443
1444 /* Relase I/O and IRQ resources */
1445 bus_release_resources(dev, cesa_res_spec, sc->sc_res);
1446
1447 /* Unmap SRAM memory */
1448 if (sc->sc_soc_id == MV_DEV_88F6828 ||
1449 sc->sc_soc_id == MV_DEV_88F6820 ||
1450 sc->sc_soc_id == MV_DEV_88F6810)
1451 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size);
1452
1453 /* Destroy mutexes */
1454 mtx_destroy(&sc->sc_sessions_lock);
1455 mtx_destroy(&sc->sc_requests_lock);
1456 mtx_destroy(&sc->sc_sdesc_lock);
1457 mtx_destroy(&sc->sc_tdesc_lock);
1458 mtx_destroy(&sc->sc_sc_lock);
1459
1460 return (0);
1461 }
1462
1463 static void
cesa_intr(void * arg)1464 cesa_intr(void *arg)
1465 {
1466 STAILQ_HEAD(, cesa_request) requests;
1467 struct cesa_request *cr, *tmp;
1468 struct cesa_softc *sc;
1469 uint32_t ecr, icr;
1470 uint8_t hash[HASH_MAX_LEN];
1471 int blocked;
1472
1473 sc = arg;
1474
1475 /* Ack interrupt */
1476 ecr = CESA_TDMA_READ(sc, CESA_TDMA_ECR);
1477 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0);
1478 icr = CESA_REG_READ(sc, CESA_ICR);
1479 CESA_REG_WRITE(sc, CESA_ICR, 0);
1480
1481 /* Check for TDMA errors */
1482 if (ecr & CESA_TDMA_ECR_MISS) {
1483 device_printf(sc->sc_dev, "TDMA Miss error detected!\n");
1484 sc->sc_error = EIO;
1485 }
1486
1487 if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) {
1488 device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n");
1489 sc->sc_error = EIO;
1490 }
1491
1492 if (ecr & CESA_TDMA_ECR_BOTH_HIT) {
1493 device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n");
1494 sc->sc_error = EIO;
1495 }
1496
1497 if (ecr & CESA_TDMA_ECR_DATA_ERROR) {
1498 device_printf(sc->sc_dev, "TDMA Data error detected!\n");
1499 sc->sc_error = EIO;
1500 }
1501
1502 /* Check for CESA errors */
1503 if (icr & sc->sc_tperr) {
1504 device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n");
1505 sc->sc_error = EIO;
1506 }
1507
1508 /* If there is nothing more to do, return */
1509 if ((icr & CESA_ICR_ACCTDMA) == 0)
1510 return;
1511
1512 /* Get all finished requests */
1513 CESA_LOCK(sc, requests);
1514 STAILQ_INIT(&requests);
1515 STAILQ_CONCAT(&requests, &sc->sc_queued_requests);
1516 STAILQ_INIT(&sc->sc_queued_requests);
1517 CESA_UNLOCK(sc, requests);
1518
1519 /* Execute all ready requests */
1520 cesa_execute(sc);
1521
1522 /* Process completed requests */
1523 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD |
1524 BUS_DMASYNC_POSTWRITE);
1525
1526 STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) {
1527 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap,
1528 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1529
1530 cr->cr_crp->crp_etype = sc->sc_error;
1531 if (cr->cr_cs->cs_hlen != 0 && cr->cr_crp->crp_etype == 0) {
1532 if (cr->cr_crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
1533 crypto_copydata(cr->cr_crp,
1534 cr->cr_crp->crp_digest_start,
1535 cr->cr_cs->cs_hlen, hash);
1536 if (timingsafe_bcmp(hash, cr->cr_csd->csd_hash,
1537 cr->cr_cs->cs_hlen) != 0)
1538 cr->cr_crp->crp_etype = EBADMSG;
1539 } else
1540 crypto_copyback(cr->cr_crp,
1541 cr->cr_crp->crp_digest_start,
1542 cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash);
1543 }
1544 crypto_done(cr->cr_crp);
1545 cesa_free_request(sc, cr);
1546 }
1547
1548 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD |
1549 BUS_DMASYNC_PREWRITE);
1550
1551 sc->sc_error = 0;
1552
1553 /* Unblock driver if it ran out of resources */
1554 CESA_LOCK(sc, sc);
1555 blocked = sc->sc_blocked;
1556 sc->sc_blocked = 0;
1557 CESA_UNLOCK(sc, sc);
1558
1559 if (blocked)
1560 crypto_unblock(sc->sc_cid, blocked);
1561 }
1562
1563 static bool
cesa_cipher_supported(const struct crypto_session_params * csp)1564 cesa_cipher_supported(const struct crypto_session_params *csp)
1565 {
1566
1567 switch (csp->csp_cipher_alg) {
1568 case CRYPTO_AES_CBC:
1569 if (csp->csp_ivlen != AES_BLOCK_LEN)
1570 return (false);
1571 break;
1572 default:
1573 return (false);
1574 }
1575
1576 if (csp->csp_cipher_klen > CESA_MAX_KEY_LEN)
1577 return (false);
1578
1579 return (true);
1580 }
1581
1582 static bool
cesa_auth_supported(struct cesa_softc * sc,const struct crypto_session_params * csp)1583 cesa_auth_supported(struct cesa_softc *sc,
1584 const struct crypto_session_params *csp)
1585 {
1586
1587 switch (csp->csp_auth_alg) {
1588 case CRYPTO_SHA2_256_HMAC:
1589 if (!(sc->sc_soc_id == MV_DEV_88F6828 ||
1590 sc->sc_soc_id == MV_DEV_88F6820 ||
1591 sc->sc_soc_id == MV_DEV_88F6810))
1592 return (false);
1593 /* FALLTHROUGH */
1594 case CRYPTO_SHA1:
1595 case CRYPTO_SHA1_HMAC:
1596 break;
1597 default:
1598 return (false);
1599 }
1600
1601 if (csp->csp_auth_klen > CESA_MAX_MKEY_LEN)
1602 return (false);
1603
1604 return (true);
1605 }
1606
1607 static int
cesa_probesession(device_t dev,const struct crypto_session_params * csp)1608 cesa_probesession(device_t dev, const struct crypto_session_params *csp)
1609 {
1610 struct cesa_softc *sc;
1611
1612 sc = device_get_softc(dev);
1613 if (csp->csp_flags != 0)
1614 return (EINVAL);
1615 switch (csp->csp_mode) {
1616 case CSP_MODE_DIGEST:
1617 if (!cesa_auth_supported(sc, csp))
1618 return (EINVAL);
1619 break;
1620 case CSP_MODE_CIPHER:
1621 if (!cesa_cipher_supported(csp))
1622 return (EINVAL);
1623 break;
1624 case CSP_MODE_ETA:
1625 if (!cesa_auth_supported(sc, csp) ||
1626 !cesa_cipher_supported(csp))
1627 return (EINVAL);
1628 break;
1629 default:
1630 return (EINVAL);
1631 }
1632 return (CRYPTODEV_PROBE_HARDWARE);
1633 }
1634
1635 static int
cesa_newsession(device_t dev,crypto_session_t cses,const struct crypto_session_params * csp)1636 cesa_newsession(device_t dev, crypto_session_t cses,
1637 const struct crypto_session_params *csp)
1638 {
1639 struct cesa_session *cs;
1640 struct cesa_softc *sc;
1641 int error;
1642
1643 sc = device_get_softc(dev);
1644 error = 0;
1645
1646 /* Allocate session */
1647 cs = crypto_get_driver_session(cses);
1648
1649 /* Prepare CESA configuration */
1650 cs->cs_config = 0;
1651 cs->cs_ivlen = 1;
1652 cs->cs_mblen = 1;
1653
1654 switch (csp->csp_cipher_alg) {
1655 case CRYPTO_AES_CBC:
1656 cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC;
1657 cs->cs_ivlen = AES_BLOCK_LEN;
1658 break;
1659 }
1660
1661 switch (csp->csp_auth_alg) {
1662 case CRYPTO_SHA1:
1663 cs->cs_mblen = 1;
1664 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN :
1665 csp->csp_auth_mlen;
1666 cs->cs_config |= CESA_CSHD_SHA1;
1667 break;
1668 case CRYPTO_SHA1_HMAC:
1669 cs->cs_mblen = SHA1_BLOCK_LEN;
1670 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN :
1671 csp->csp_auth_mlen;
1672 cs->cs_config |= CESA_CSHD_SHA1_HMAC;
1673 if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN)
1674 cs->cs_config |= CESA_CSHD_96_BIT_HMAC;
1675 break;
1676 case CRYPTO_SHA2_256_HMAC:
1677 cs->cs_mblen = SHA2_256_BLOCK_LEN;
1678 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA2_256_HASH_LEN :
1679 csp->csp_auth_mlen;
1680 cs->cs_config |= CESA_CSHD_SHA2_256_HMAC;
1681 break;
1682 }
1683
1684 /* Save cipher key */
1685 if (csp->csp_cipher_key != NULL) {
1686 memcpy(cs->cs_key, csp->csp_cipher_key,
1687 csp->csp_cipher_klen);
1688 if (csp->csp_cipher_alg == CRYPTO_AES_CBC)
1689 error = cesa_prep_aes_key(cs, csp);
1690 }
1691
1692 /* Save digest key */
1693 if (csp->csp_auth_key != NULL)
1694 cesa_set_mkey(cs, csp->csp_auth_alg, csp->csp_auth_key,
1695 csp->csp_auth_klen);
1696
1697 return (error);
1698 }
1699
1700 static int
cesa_process(device_t dev,struct cryptop * crp,int hint)1701 cesa_process(device_t dev, struct cryptop *crp, int hint)
1702 {
1703 const struct crypto_session_params *csp;
1704 struct cesa_request *cr;
1705 struct cesa_session *cs;
1706 struct cesa_softc *sc;
1707 int error;
1708
1709 sc = device_get_softc(dev);
1710 error = 0;
1711
1712 cs = crypto_get_driver_session(crp->crp_session);
1713 csp = crypto_get_params(crp->crp_session);
1714
1715 /* Check and parse input */
1716 if (crypto_buffer_len(&crp->crp_buf) > CESA_MAX_REQUEST_SIZE) {
1717 crp->crp_etype = E2BIG;
1718 crypto_done(crp);
1719 return (0);
1720 }
1721
1722 /*
1723 * For requests with AAD, only requests where the AAD is
1724 * immediately adjacent to the payload are supported.
1725 */
1726 if (crp->crp_aad_length != 0 &&
1727 (crp->crp_aad_start + crp->crp_aad_length) !=
1728 crp->crp_payload_start) {
1729 crp->crp_etype = EINVAL;
1730 crypto_done(crp);
1731 return (0);
1732 }
1733
1734 /*
1735 * Get request descriptor. Block driver if there is no free
1736 * descriptors in pool.
1737 */
1738 cr = cesa_alloc_request(sc);
1739 if (!cr) {
1740 CESA_LOCK(sc, sc);
1741 sc->sc_blocked = CRYPTO_SYMQ;
1742 CESA_UNLOCK(sc, sc);
1743 return (ERESTART);
1744 }
1745
1746 /* Prepare request */
1747 cr->cr_crp = crp;
1748 cr->cr_cs = cs;
1749
1750 CESA_LOCK(sc, sessions);
1751 cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1752
1753 if (csp->csp_cipher_alg != 0)
1754 crypto_read_iv(crp, cr->cr_csd->csd_iv);
1755
1756 if (crp->crp_cipher_key != NULL) {
1757 memcpy(cs->cs_key, crp->crp_cipher_key,
1758 csp->csp_cipher_klen);
1759 if (csp->csp_cipher_alg == CRYPTO_AES_CBC)
1760 error = cesa_prep_aes_key(cs, csp);
1761 }
1762
1763 if (!error && crp->crp_auth_key != NULL)
1764 cesa_set_mkey(cs, csp->csp_auth_alg, crp->crp_auth_key,
1765 csp->csp_auth_klen);
1766
1767 /* Convert request to chain of TDMA and SA descriptors */
1768 if (!error)
1769 error = cesa_create_chain(sc, csp, cr);
1770
1771 cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1772 CESA_UNLOCK(sc, sessions);
1773
1774 if (error) {
1775 cesa_free_request(sc, cr);
1776 crp->crp_etype = error;
1777 crypto_done(crp);
1778 return (0);
1779 }
1780
1781 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD |
1782 BUS_DMASYNC_PREWRITE);
1783
1784 /* Enqueue request to execution */
1785 cesa_enqueue_request(sc, cr);
1786
1787 /* Start execution, if we have no more requests in queue */
1788 if ((hint & CRYPTO_HINT_MORE) == 0)
1789 cesa_execute(sc);
1790
1791 return (0);
1792 }
1793