1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2022 NXP
5 *
6 */
7
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31 #include <rte_hexdump.h>
32
33 #include "dpaa2_sec_priv.h"
34 #include "dpaa2_sec_event.h"
35 #include "dpaa2_sec_logs.h"
36
37 /* RTA header files */
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
41 #include <desc/algo.h>
42
43 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
44 * a pointer to the shared descriptor
45 */
46 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
47 #define FSL_VENDOR_ID 0x1957
48 #define FSL_DEVICE_ID 0x410
49 #define FSL_SUBSYSTEM_SEC 1
50 #define FSL_MC_DPSECI_DEVID 3
51
52 #define NO_PREFETCH 0
53
54 #define DRIVER_DUMP_MODE "drv_dump_mode"
55 #define DRIVER_STRICT_ORDER "drv_strict_order"
56
57 /* DPAA2_SEC_DP_DUMP levels */
58 enum dpaa2_sec_dump_levels {
59 DPAA2_SEC_DP_NO_DUMP,
60 DPAA2_SEC_DP_ERR_DUMP,
61 DPAA2_SEC_DP_FULL_DUMP
62 };
63
64 uint8_t cryptodev_driver_id;
65 uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
66
67 static inline void
free_fle(const struct qbman_fd * fd,struct dpaa2_sec_qp * qp)68 free_fle(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
69 {
70 struct qbman_fle *fle;
71 struct rte_crypto_op *op;
72
73 #ifdef RTE_LIB_SECURITY
74 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
75 return;
76 #endif
77 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
78 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
79 /* free the fle memory */
80 if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src)))
81 rte_mempool_put(qp->fle_pool, (void *)(fle-1));
82 else
83 rte_free((void *)(fle-1));
84 }
85
86 #ifdef RTE_LIB_SECURITY
87 static inline int
build_proto_compound_sg_fd(dpaa2_sec_session * sess,struct rte_crypto_op * op,struct qbman_fd * fd,uint16_t bpid)88 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
89 struct rte_crypto_op *op,
90 struct qbman_fd *fd, uint16_t bpid)
91 {
92 struct rte_crypto_sym_op *sym_op = op->sym;
93 struct ctxt_priv *priv = sess->ctxt;
94 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
95 struct sec_flow_context *flc;
96 struct rte_mbuf *mbuf;
97 uint32_t in_len = 0, out_len = 0;
98
99 if (sym_op->m_dst)
100 mbuf = sym_op->m_dst;
101 else
102 mbuf = sym_op->m_src;
103
104 /* first FLE entry used to store mbuf and session ctxt */
105 fle = (struct qbman_fle *)rte_malloc(NULL,
106 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
107 RTE_CACHE_LINE_SIZE);
108 if (unlikely(!fle)) {
109 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
110 return -ENOMEM;
111 }
112 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
113 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
114 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
115
116 /* Save the shared descriptor */
117 flc = &priv->flc_desc[0].flc;
118
119 op_fle = fle + 1;
120 ip_fle = fle + 2;
121 sge = fle + 3;
122
123 if (likely(bpid < MAX_BPID)) {
124 DPAA2_SET_FD_BPID(fd, bpid);
125 DPAA2_SET_FLE_BPID(op_fle, bpid);
126 DPAA2_SET_FLE_BPID(ip_fle, bpid);
127 } else {
128 DPAA2_SET_FD_IVP(fd);
129 DPAA2_SET_FLE_IVP(op_fle);
130 DPAA2_SET_FLE_IVP(ip_fle);
131 }
132
133 /* Configure FD as a FRAME LIST */
134 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
135 DPAA2_SET_FD_COMPOUND_FMT(fd);
136 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
137
138 /* Configure Output FLE with Scatter/Gather Entry */
139 DPAA2_SET_FLE_SG_EXT(op_fle);
140 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
141
142 /* Configure Output SGE for Encap/Decap */
143 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
144 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
145 /* o/p segs */
146 while (mbuf->next) {
147 sge->length = mbuf->data_len;
148 out_len += sge->length;
149 sge++;
150 mbuf = mbuf->next;
151 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
152 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
153 }
154 /* using buf_len for last buf - so that extra data can be added */
155 sge->length = mbuf->buf_len - mbuf->data_off;
156 out_len += sge->length;
157
158 DPAA2_SET_FLE_FIN(sge);
159 op_fle->length = out_len;
160
161 sge++;
162 mbuf = sym_op->m_src;
163
164 /* Configure Input FLE with Scatter/Gather Entry */
165 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
166 DPAA2_SET_FLE_SG_EXT(ip_fle);
167 DPAA2_SET_FLE_FIN(ip_fle);
168
169 /* Configure input SGE for Encap/Decap */
170 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
171 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
172 sge->length = mbuf->data_len;
173 in_len += sge->length;
174
175 mbuf = mbuf->next;
176 /* i/p segs */
177 while (mbuf) {
178 sge++;
179 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
180 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
181 sge->length = mbuf->data_len;
182 in_len += sge->length;
183 mbuf = mbuf->next;
184 }
185 ip_fle->length = in_len;
186 DPAA2_SET_FLE_FIN(sge);
187
188 /* In case of PDCP, per packet HFN is stored in
189 * mbuf priv after sym_op.
190 */
191 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
192 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
193 sess->pdcp.hfn_ovd_offset);
194 /*enable HFN override override */
195 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
196 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
197 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
198 }
199 DPAA2_SET_FD_LEN(fd, ip_fle->length);
200
201 return 0;
202 }
203
204 static inline int
build_proto_compound_fd(dpaa2_sec_session * sess,struct rte_crypto_op * op,struct qbman_fd * fd,uint16_t bpid,struct dpaa2_sec_qp * qp)205 build_proto_compound_fd(dpaa2_sec_session *sess,
206 struct rte_crypto_op *op,
207 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
208 {
209 struct rte_crypto_sym_op *sym_op = op->sym;
210 struct ctxt_priv *priv = sess->ctxt;
211 struct qbman_fle *fle, *ip_fle, *op_fle;
212 struct sec_flow_context *flc;
213 struct rte_mbuf *src_mbuf = sym_op->m_src;
214 struct rte_mbuf *dst_mbuf = sym_op->m_dst;
215 int retval;
216
217 if (!dst_mbuf)
218 dst_mbuf = src_mbuf;
219
220 /* Save the shared descriptor */
221 flc = &priv->flc_desc[0].flc;
222
223 /* we are using the first FLE entry to store Mbuf */
224 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
225 if (retval) {
226 DPAA2_SEC_DP_DEBUG("Proto: Memory alloc failed");
227 return -ENOMEM;
228 }
229 memset(fle, 0, FLE_POOL_BUF_SIZE);
230 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
231 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
232
233 op_fle = fle + 1;
234 ip_fle = fle + 2;
235
236 if (likely(bpid < MAX_BPID)) {
237 DPAA2_SET_FD_BPID(fd, bpid);
238 DPAA2_SET_FLE_BPID(op_fle, bpid);
239 DPAA2_SET_FLE_BPID(ip_fle, bpid);
240 } else {
241 DPAA2_SET_FD_IVP(fd);
242 DPAA2_SET_FLE_IVP(op_fle);
243 DPAA2_SET_FLE_IVP(ip_fle);
244 }
245
246 /* Configure FD as a FRAME LIST */
247 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
248 DPAA2_SET_FD_COMPOUND_FMT(fd);
249 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
250
251 /* Configure Output FLE with dst mbuf data */
252 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
253 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
254 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
255
256 /* Configure Input FLE with src mbuf data */
257 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
258 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
259 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
260
261 DPAA2_SET_FD_LEN(fd, ip_fle->length);
262 DPAA2_SET_FLE_FIN(ip_fle);
263
264 /* In case of PDCP, per packet HFN is stored in
265 * mbuf priv after sym_op.
266 */
267 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
268 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
269 sess->pdcp.hfn_ovd_offset);
270 /*enable HFN override override */
271 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
272 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
273 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
274 }
275
276 return 0;
277
278 }
279
280 static inline int
build_proto_fd(dpaa2_sec_session * sess,struct rte_crypto_op * op,struct qbman_fd * fd,uint16_t bpid,struct dpaa2_sec_qp * qp)281 build_proto_fd(dpaa2_sec_session *sess,
282 struct rte_crypto_op *op,
283 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
284 {
285 struct rte_crypto_sym_op *sym_op = op->sym;
286 if (sym_op->m_dst)
287 return build_proto_compound_fd(sess, op, fd, bpid, qp);
288
289 struct ctxt_priv *priv = sess->ctxt;
290 struct sec_flow_context *flc;
291 struct rte_mbuf *mbuf = sym_op->m_src;
292
293 if (likely(bpid < MAX_BPID))
294 DPAA2_SET_FD_BPID(fd, bpid);
295 else
296 DPAA2_SET_FD_IVP(fd);
297
298 /* Save the shared descriptor */
299 flc = &priv->flc_desc[0].flc;
300
301 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
302 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
303 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
304 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
305
306 /* save physical address of mbuf */
307 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
308 mbuf->buf_iova = (size_t)op;
309
310 return 0;
311 }
312 #endif
313
314 static inline int
build_authenc_gcm_sg_fd(dpaa2_sec_session * sess,struct rte_crypto_op * op,struct qbman_fd * fd,__rte_unused uint16_t bpid)315 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
316 struct rte_crypto_op *op,
317 struct qbman_fd *fd, __rte_unused uint16_t bpid)
318 {
319 struct rte_crypto_sym_op *sym_op = op->sym;
320 struct ctxt_priv *priv = sess->ctxt;
321 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
322 struct sec_flow_context *flc;
323 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
324 int icv_len = sess->digest_length;
325 uint8_t *old_icv;
326 struct rte_mbuf *mbuf;
327 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
328 sess->iv.offset);
329
330 if (sym_op->m_dst)
331 mbuf = sym_op->m_dst;
332 else
333 mbuf = sym_op->m_src;
334
335 /* first FLE entry used to store mbuf and session ctxt */
336 fle = (struct qbman_fle *)rte_malloc(NULL,
337 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
338 RTE_CACHE_LINE_SIZE);
339 if (unlikely(!fle)) {
340 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
341 return -ENOMEM;
342 }
343 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
344 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
345 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
346
347 op_fle = fle + 1;
348 ip_fle = fle + 2;
349 sge = fle + 3;
350
351 /* Save the shared descriptor */
352 flc = &priv->flc_desc[0].flc;
353
354 /* Configure FD as a FRAME LIST */
355 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
356 DPAA2_SET_FD_COMPOUND_FMT(fd);
357 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
358
359 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
360 "iv-len=%d data_off: 0x%x\n",
361 sym_op->aead.data.offset,
362 sym_op->aead.data.length,
363 sess->digest_length,
364 sess->iv.length,
365 sym_op->m_src->data_off);
366
367 /* Configure Output FLE with Scatter/Gather Entry */
368 DPAA2_SET_FLE_SG_EXT(op_fle);
369 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
370
371 if (auth_only_len)
372 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
373
374 op_fle->length = (sess->dir == DIR_ENC) ?
375 (sym_op->aead.data.length + icv_len) :
376 sym_op->aead.data.length;
377
378 /* Configure Output SGE for Encap/Decap */
379 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
380 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
381 sge->length = mbuf->data_len - sym_op->aead.data.offset;
382
383 mbuf = mbuf->next;
384 /* o/p segs */
385 while (mbuf) {
386 sge++;
387 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
388 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
389 sge->length = mbuf->data_len;
390 mbuf = mbuf->next;
391 }
392 sge->length -= icv_len;
393
394 if (sess->dir == DIR_ENC) {
395 sge++;
396 DPAA2_SET_FLE_ADDR(sge,
397 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
398 sge->length = icv_len;
399 }
400 DPAA2_SET_FLE_FIN(sge);
401
402 sge++;
403 mbuf = sym_op->m_src;
404
405 /* Configure Input FLE with Scatter/Gather Entry */
406 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
407 DPAA2_SET_FLE_SG_EXT(ip_fle);
408 DPAA2_SET_FLE_FIN(ip_fle);
409 ip_fle->length = (sess->dir == DIR_ENC) ?
410 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
411 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
412 icv_len);
413
414 /* Configure Input SGE for Encap/Decap */
415 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
416 sge->length = sess->iv.length;
417
418 sge++;
419 if (auth_only_len) {
420 DPAA2_SET_FLE_ADDR(sge,
421 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
422 sge->length = auth_only_len;
423 sge++;
424 }
425
426 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
427 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
428 mbuf->data_off);
429 sge->length = mbuf->data_len - sym_op->aead.data.offset;
430
431 mbuf = mbuf->next;
432 /* i/p segs */
433 while (mbuf) {
434 sge++;
435 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
436 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
437 sge->length = mbuf->data_len;
438 mbuf = mbuf->next;
439 }
440
441 if (sess->dir == DIR_DEC) {
442 sge++;
443 old_icv = (uint8_t *)(sge + 1);
444 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
445 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
446 sge->length = icv_len;
447 }
448
449 DPAA2_SET_FLE_FIN(sge);
450 if (auth_only_len) {
451 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
452 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
453 }
454 DPAA2_SET_FD_LEN(fd, ip_fle->length);
455
456 return 0;
457 }
458
459 static inline int
build_authenc_gcm_fd(dpaa2_sec_session * sess,struct rte_crypto_op * op,struct qbman_fd * fd,uint16_t bpid,struct dpaa2_sec_qp * qp)460 build_authenc_gcm_fd(dpaa2_sec_session *sess,
461 struct rte_crypto_op *op,
462 struct qbman_fd *fd, uint16_t bpid,
463 struct dpaa2_sec_qp *qp)
464 {
465 struct rte_crypto_sym_op *sym_op = op->sym;
466 struct ctxt_priv *priv = sess->ctxt;
467 struct qbman_fle *fle, *sge;
468 struct sec_flow_context *flc;
469 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
470 int icv_len = sess->digest_length, retval;
471 uint8_t *old_icv;
472 struct rte_mbuf *dst;
473 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
474 sess->iv.offset);
475
476 if (sym_op->m_dst)
477 dst = sym_op->m_dst;
478 else
479 dst = sym_op->m_src;
480
481 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
482 * Currently we donot know which FLE has the mbuf stored.
483 * So while retreiving we can go back 1 FLE from the FD -ADDR
484 * to get the MBUF Addr from the previous FLE.
485 * We can have a better approach to use the inline Mbuf
486 */
487 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
488 if (retval) {
489 DPAA2_SEC_DP_DEBUG("GCM: no buffer available in fle pool");
490 return -ENOMEM;
491 }
492 memset(fle, 0, FLE_POOL_BUF_SIZE);
493 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
494 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
495 fle = fle + 1;
496 sge = fle + 2;
497 if (likely(bpid < MAX_BPID)) {
498 DPAA2_SET_FD_BPID(fd, bpid);
499 DPAA2_SET_FLE_BPID(fle, bpid);
500 DPAA2_SET_FLE_BPID(fle + 1, bpid);
501 DPAA2_SET_FLE_BPID(sge, bpid);
502 DPAA2_SET_FLE_BPID(sge + 1, bpid);
503 DPAA2_SET_FLE_BPID(sge + 2, bpid);
504 DPAA2_SET_FLE_BPID(sge + 3, bpid);
505 } else {
506 DPAA2_SET_FD_IVP(fd);
507 DPAA2_SET_FLE_IVP(fle);
508 DPAA2_SET_FLE_IVP((fle + 1));
509 DPAA2_SET_FLE_IVP(sge);
510 DPAA2_SET_FLE_IVP((sge + 1));
511 DPAA2_SET_FLE_IVP((sge + 2));
512 DPAA2_SET_FLE_IVP((sge + 3));
513 }
514
515 /* Save the shared descriptor */
516 flc = &priv->flc_desc[0].flc;
517 /* Configure FD as a FRAME LIST */
518 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
519 DPAA2_SET_FD_COMPOUND_FMT(fd);
520 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
521
522 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
523 "iv-len=%d data_off: 0x%x\n",
524 sym_op->aead.data.offset,
525 sym_op->aead.data.length,
526 sess->digest_length,
527 sess->iv.length,
528 sym_op->m_src->data_off);
529
530 /* Configure Output FLE with Scatter/Gather Entry */
531 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
532 if (auth_only_len)
533 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
534 fle->length = (sess->dir == DIR_ENC) ?
535 (sym_op->aead.data.length + icv_len) :
536 sym_op->aead.data.length;
537
538 DPAA2_SET_FLE_SG_EXT(fle);
539
540 /* Configure Output SGE for Encap/Decap */
541 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
542 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
543 sge->length = sym_op->aead.data.length;
544
545 if (sess->dir == DIR_ENC) {
546 sge++;
547 DPAA2_SET_FLE_ADDR(sge,
548 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
549 sge->length = sess->digest_length;
550 }
551 DPAA2_SET_FLE_FIN(sge);
552
553 sge++;
554 fle++;
555
556 /* Configure Input FLE with Scatter/Gather Entry */
557 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
558 DPAA2_SET_FLE_SG_EXT(fle);
559 DPAA2_SET_FLE_FIN(fle);
560 fle->length = (sess->dir == DIR_ENC) ?
561 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
562 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
563 sess->digest_length);
564
565 /* Configure Input SGE for Encap/Decap */
566 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
567 sge->length = sess->iv.length;
568 sge++;
569 if (auth_only_len) {
570 DPAA2_SET_FLE_ADDR(sge,
571 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
572 sge->length = auth_only_len;
573 DPAA2_SET_FLE_BPID(sge, bpid);
574 sge++;
575 }
576
577 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
578 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
579 sym_op->m_src->data_off);
580 sge->length = sym_op->aead.data.length;
581 if (sess->dir == DIR_DEC) {
582 sge++;
583 old_icv = (uint8_t *)(sge + 1);
584 memcpy(old_icv, sym_op->aead.digest.data,
585 sess->digest_length);
586 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
587 sge->length = sess->digest_length;
588 }
589 DPAA2_SET_FLE_FIN(sge);
590
591 if (auth_only_len) {
592 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
593 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
594 }
595
596 DPAA2_SET_FD_LEN(fd, fle->length);
597 return 0;
598 }
599
600 static inline int
build_authenc_sg_fd(dpaa2_sec_session * sess,struct rte_crypto_op * op,struct qbman_fd * fd,__rte_unused uint16_t bpid)601 build_authenc_sg_fd(dpaa2_sec_session *sess,
602 struct rte_crypto_op *op,
603 struct qbman_fd *fd, __rte_unused uint16_t bpid)
604 {
605 struct rte_crypto_sym_op *sym_op = op->sym;
606 struct ctxt_priv *priv = sess->ctxt;
607 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
608 struct sec_flow_context *flc;
609 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
610 sym_op->auth.data.offset;
611 uint16_t auth_tail_len = sym_op->auth.data.length -
612 sym_op->cipher.data.length - auth_hdr_len;
613 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
614 int icv_len = sess->digest_length;
615 uint8_t *old_icv;
616 struct rte_mbuf *mbuf;
617 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
618 sess->iv.offset);
619
620 if (sym_op->m_dst)
621 mbuf = sym_op->m_dst;
622 else
623 mbuf = sym_op->m_src;
624
625 /* first FLE entry used to store mbuf and session ctxt */
626 fle = (struct qbman_fle *)rte_malloc(NULL,
627 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
628 RTE_CACHE_LINE_SIZE);
629 if (unlikely(!fle)) {
630 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
631 return -ENOMEM;
632 }
633 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
634 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
635 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
636
637 op_fle = fle + 1;
638 ip_fle = fle + 2;
639 sge = fle + 3;
640
641 /* Save the shared descriptor */
642 flc = &priv->flc_desc[0].flc;
643
644 /* Configure FD as a FRAME LIST */
645 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
646 DPAA2_SET_FD_COMPOUND_FMT(fd);
647 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
648
649 DPAA2_SEC_DP_DEBUG(
650 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
651 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
652 sym_op->auth.data.offset,
653 sym_op->auth.data.length,
654 sess->digest_length,
655 sym_op->cipher.data.offset,
656 sym_op->cipher.data.length,
657 sess->iv.length,
658 sym_op->m_src->data_off);
659
660 /* Configure Output FLE with Scatter/Gather Entry */
661 DPAA2_SET_FLE_SG_EXT(op_fle);
662 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
663
664 if (auth_only_len)
665 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
666
667 op_fle->length = (sess->dir == DIR_ENC) ?
668 (sym_op->cipher.data.length + icv_len) :
669 sym_op->cipher.data.length;
670
671 /* Configure Output SGE for Encap/Decap */
672 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
673 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
674 sge->length = mbuf->data_len - sym_op->auth.data.offset;
675
676 mbuf = mbuf->next;
677 /* o/p segs */
678 while (mbuf) {
679 sge++;
680 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
681 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
682 sge->length = mbuf->data_len;
683 mbuf = mbuf->next;
684 }
685 sge->length -= icv_len;
686
687 if (sess->dir == DIR_ENC) {
688 sge++;
689 DPAA2_SET_FLE_ADDR(sge,
690 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
691 sge->length = icv_len;
692 }
693 DPAA2_SET_FLE_FIN(sge);
694
695 sge++;
696 mbuf = sym_op->m_src;
697
698 /* Configure Input FLE with Scatter/Gather Entry */
699 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
700 DPAA2_SET_FLE_SG_EXT(ip_fle);
701 DPAA2_SET_FLE_FIN(ip_fle);
702 ip_fle->length = (sess->dir == DIR_ENC) ?
703 (sym_op->auth.data.length + sess->iv.length) :
704 (sym_op->auth.data.length + sess->iv.length +
705 icv_len);
706
707 /* Configure Input SGE for Encap/Decap */
708 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
709 sge->length = sess->iv.length;
710
711 sge++;
712 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
713 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
714 mbuf->data_off);
715 sge->length = mbuf->data_len - sym_op->auth.data.offset;
716
717 mbuf = mbuf->next;
718 /* i/p segs */
719 while (mbuf) {
720 sge++;
721 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
722 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
723 sge->length = mbuf->data_len;
724 mbuf = mbuf->next;
725 }
726 sge->length -= icv_len;
727
728 if (sess->dir == DIR_DEC) {
729 sge++;
730 old_icv = (uint8_t *)(sge + 1);
731 memcpy(old_icv, sym_op->auth.digest.data,
732 icv_len);
733 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
734 sge->length = icv_len;
735 }
736
737 DPAA2_SET_FLE_FIN(sge);
738 if (auth_only_len) {
739 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
740 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
741 }
742 DPAA2_SET_FD_LEN(fd, ip_fle->length);
743
744 return 0;
745 }
746
747 static inline int
build_authenc_fd(dpaa2_sec_session * sess,struct rte_crypto_op * op,struct qbman_fd * fd,uint16_t bpid,struct dpaa2_sec_qp * qp)748 build_authenc_fd(dpaa2_sec_session *sess,
749 struct rte_crypto_op *op,
750 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
751 {
752 struct rte_crypto_sym_op *sym_op = op->sym;
753 struct ctxt_priv *priv = sess->ctxt;
754 struct qbman_fle *fle, *sge;
755 struct sec_flow_context *flc;
756 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
757 sym_op->auth.data.offset;
758 uint16_t auth_tail_len = sym_op->auth.data.length -
759 sym_op->cipher.data.length - auth_hdr_len;
760 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
761
762 int icv_len = sess->digest_length, retval;
763 uint8_t *old_icv;
764 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
765 sess->iv.offset);
766 struct rte_mbuf *dst;
767
768 if (sym_op->m_dst)
769 dst = sym_op->m_dst;
770 else
771 dst = sym_op->m_src;
772
773 /* we are using the first FLE entry to store Mbuf.
774 * Currently we donot know which FLE has the mbuf stored.
775 * So while retreiving we can go back 1 FLE from the FD -ADDR
776 * to get the MBUF Addr from the previous FLE.
777 * We can have a better approach to use the inline Mbuf
778 */
779 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
780 if (retval) {
781 DPAA2_SEC_DP_DEBUG("AUTHENC: no buffer available in fle pool");
782 return -ENOMEM;
783 }
784 memset(fle, 0, FLE_POOL_BUF_SIZE);
785 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
786 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
787 fle = fle + 1;
788 sge = fle + 2;
789 if (likely(bpid < MAX_BPID)) {
790 DPAA2_SET_FD_BPID(fd, bpid);
791 DPAA2_SET_FLE_BPID(fle, bpid);
792 DPAA2_SET_FLE_BPID(fle + 1, bpid);
793 DPAA2_SET_FLE_BPID(sge, bpid);
794 DPAA2_SET_FLE_BPID(sge + 1, bpid);
795 DPAA2_SET_FLE_BPID(sge + 2, bpid);
796 DPAA2_SET_FLE_BPID(sge + 3, bpid);
797 } else {
798 DPAA2_SET_FD_IVP(fd);
799 DPAA2_SET_FLE_IVP(fle);
800 DPAA2_SET_FLE_IVP((fle + 1));
801 DPAA2_SET_FLE_IVP(sge);
802 DPAA2_SET_FLE_IVP((sge + 1));
803 DPAA2_SET_FLE_IVP((sge + 2));
804 DPAA2_SET_FLE_IVP((sge + 3));
805 }
806
807 /* Save the shared descriptor */
808 flc = &priv->flc_desc[0].flc;
809 /* Configure FD as a FRAME LIST */
810 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
811 DPAA2_SET_FD_COMPOUND_FMT(fd);
812 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
813
814 DPAA2_SEC_DP_DEBUG(
815 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
816 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
817 sym_op->auth.data.offset,
818 sym_op->auth.data.length,
819 sess->digest_length,
820 sym_op->cipher.data.offset,
821 sym_op->cipher.data.length,
822 sess->iv.length,
823 sym_op->m_src->data_off);
824
825 /* Configure Output FLE with Scatter/Gather Entry */
826 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
827 if (auth_only_len)
828 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
829 fle->length = (sess->dir == DIR_ENC) ?
830 (sym_op->cipher.data.length + icv_len) :
831 sym_op->cipher.data.length;
832
833 DPAA2_SET_FLE_SG_EXT(fle);
834
835 /* Configure Output SGE for Encap/Decap */
836 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
837 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
838 dst->data_off);
839 sge->length = sym_op->cipher.data.length;
840
841 if (sess->dir == DIR_ENC) {
842 sge++;
843 DPAA2_SET_FLE_ADDR(sge,
844 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
845 sge->length = sess->digest_length;
846 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
847 sess->iv.length));
848 }
849 DPAA2_SET_FLE_FIN(sge);
850
851 sge++;
852 fle++;
853
854 /* Configure Input FLE with Scatter/Gather Entry */
855 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
856 DPAA2_SET_FLE_SG_EXT(fle);
857 DPAA2_SET_FLE_FIN(fle);
858 fle->length = (sess->dir == DIR_ENC) ?
859 (sym_op->auth.data.length + sess->iv.length) :
860 (sym_op->auth.data.length + sess->iv.length +
861 sess->digest_length);
862
863 /* Configure Input SGE for Encap/Decap */
864 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
865 sge->length = sess->iv.length;
866 sge++;
867
868 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
869 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
870 sym_op->m_src->data_off);
871 sge->length = sym_op->auth.data.length;
872 if (sess->dir == DIR_DEC) {
873 sge++;
874 old_icv = (uint8_t *)(sge + 1);
875 memcpy(old_icv, sym_op->auth.digest.data,
876 sess->digest_length);
877 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
878 sge->length = sess->digest_length;
879 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
880 sess->digest_length +
881 sess->iv.length));
882 }
883 DPAA2_SET_FLE_FIN(sge);
884 if (auth_only_len) {
885 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
886 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
887 }
888 return 0;
889 }
890
build_auth_sg_fd(dpaa2_sec_session * sess,struct rte_crypto_op * op,struct qbman_fd * fd,__rte_unused uint16_t bpid)891 static inline int build_auth_sg_fd(
892 dpaa2_sec_session *sess,
893 struct rte_crypto_op *op,
894 struct qbman_fd *fd,
895 __rte_unused uint16_t bpid)
896 {
897 struct rte_crypto_sym_op *sym_op = op->sym;
898 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
899 struct sec_flow_context *flc;
900 struct ctxt_priv *priv = sess->ctxt;
901 int data_len, data_offset;
902 uint8_t *old_digest;
903 struct rte_mbuf *mbuf;
904
905 data_len = sym_op->auth.data.length;
906 data_offset = sym_op->auth.data.offset;
907
908 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
909 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
910 if ((data_len & 7) || (data_offset & 7)) {
911 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
912 return -ENOTSUP;
913 }
914
915 data_len = data_len >> 3;
916 data_offset = data_offset >> 3;
917 }
918
919 mbuf = sym_op->m_src;
920 fle = (struct qbman_fle *)rte_malloc(NULL,
921 FLE_SG_MEM_SIZE(mbuf->nb_segs),
922 RTE_CACHE_LINE_SIZE);
923 if (unlikely(!fle)) {
924 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
925 return -ENOMEM;
926 }
927 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
928 /* first FLE entry used to store mbuf and session ctxt */
929 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
930 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
931 op_fle = fle + 1;
932 ip_fle = fle + 2;
933 sge = fle + 3;
934
935 flc = &priv->flc_desc[DESC_INITFINAL].flc;
936 /* sg FD */
937 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
938 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
939 DPAA2_SET_FD_COMPOUND_FMT(fd);
940
941 /* o/p fle */
942 DPAA2_SET_FLE_ADDR(op_fle,
943 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
944 op_fle->length = sess->digest_length;
945
946 /* i/p fle */
947 DPAA2_SET_FLE_SG_EXT(ip_fle);
948 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
949 ip_fle->length = data_len;
950
951 if (sess->iv.length) {
952 uint8_t *iv_ptr;
953
954 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
955 sess->iv.offset);
956
957 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
958 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
959 sge->length = 12;
960 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
961 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
962 sge->length = 8;
963 } else {
964 sge->length = sess->iv.length;
965 }
966 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
967 ip_fle->length += sge->length;
968 sge++;
969 }
970 /* i/p 1st seg */
971 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
972 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
973
974 if (data_len <= (mbuf->data_len - data_offset)) {
975 sge->length = data_len;
976 data_len = 0;
977 } else {
978 sge->length = mbuf->data_len - data_offset;
979
980 /* remaining i/p segs */
981 while ((data_len = data_len - sge->length) &&
982 (mbuf = mbuf->next)) {
983 sge++;
984 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
985 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
986 if (data_len > mbuf->data_len)
987 sge->length = mbuf->data_len;
988 else
989 sge->length = data_len;
990 }
991 }
992
993 if (sess->dir == DIR_DEC) {
994 /* Digest verification case */
995 sge++;
996 old_digest = (uint8_t *)(sge + 1);
997 rte_memcpy(old_digest, sym_op->auth.digest.data,
998 sess->digest_length);
999 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1000 sge->length = sess->digest_length;
1001 ip_fle->length += sess->digest_length;
1002 }
1003 DPAA2_SET_FLE_FIN(sge);
1004 DPAA2_SET_FLE_FIN(ip_fle);
1005 DPAA2_SET_FD_LEN(fd, ip_fle->length);
1006
1007 return 0;
1008 }
1009
1010 static inline int
build_auth_fd(dpaa2_sec_session * sess,struct rte_crypto_op * op,struct qbman_fd * fd,uint16_t bpid,struct dpaa2_sec_qp * qp)1011 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1012 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
1013 {
1014 struct rte_crypto_sym_op *sym_op = op->sym;
1015 struct qbman_fle *fle, *sge;
1016 struct sec_flow_context *flc;
1017 struct ctxt_priv *priv = sess->ctxt;
1018 int data_len, data_offset;
1019 uint8_t *old_digest;
1020 int retval;
1021
1022 data_len = sym_op->auth.data.length;
1023 data_offset = sym_op->auth.data.offset;
1024
1025 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1026 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1027 if ((data_len & 7) || (data_offset & 7)) {
1028 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1029 return -ENOTSUP;
1030 }
1031
1032 data_len = data_len >> 3;
1033 data_offset = data_offset >> 3;
1034 }
1035
1036 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
1037 if (retval) {
1038 DPAA2_SEC_DP_DEBUG("AUTH: no buffer available in fle pool");
1039 return -ENOMEM;
1040 }
1041 memset(fle, 0, FLE_POOL_BUF_SIZE);
1042 /* TODO we are using the first FLE entry to store Mbuf.
1043 * Currently we donot know which FLE has the mbuf stored.
1044 * So while retreiving we can go back 1 FLE from the FD -ADDR
1045 * to get the MBUF Addr from the previous FLE.
1046 * We can have a better approach to use the inline Mbuf
1047 */
1048 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1049 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1050 fle = fle + 1;
1051 sge = fle + 2;
1052
1053 if (likely(bpid < MAX_BPID)) {
1054 DPAA2_SET_FD_BPID(fd, bpid);
1055 DPAA2_SET_FLE_BPID(fle, bpid);
1056 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1057 DPAA2_SET_FLE_BPID(sge, bpid);
1058 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1059 } else {
1060 DPAA2_SET_FD_IVP(fd);
1061 DPAA2_SET_FLE_IVP(fle);
1062 DPAA2_SET_FLE_IVP((fle + 1));
1063 DPAA2_SET_FLE_IVP(sge);
1064 DPAA2_SET_FLE_IVP((sge + 1));
1065 }
1066
1067 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1068 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1069 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1070 DPAA2_SET_FD_COMPOUND_FMT(fd);
1071
1072 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1073 fle->length = sess->digest_length;
1074 fle++;
1075
1076 /* Setting input FLE */
1077 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1078 DPAA2_SET_FLE_SG_EXT(fle);
1079 fle->length = data_len;
1080
1081 if (sess->iv.length) {
1082 uint8_t *iv_ptr;
1083
1084 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1085 sess->iv.offset);
1086
1087 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1088 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1089 sge->length = 12;
1090 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1091 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1092 sge->length = 8;
1093 } else {
1094 sge->length = sess->iv.length;
1095 }
1096
1097 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1098 fle->length = fle->length + sge->length;
1099 sge++;
1100 }
1101
1102 /* Setting data to authenticate */
1103 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1104 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1105 sge->length = data_len;
1106
1107 if (sess->dir == DIR_DEC) {
1108 sge++;
1109 old_digest = (uint8_t *)(sge + 1);
1110 rte_memcpy(old_digest, sym_op->auth.digest.data,
1111 sess->digest_length);
1112 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1113 sge->length = sess->digest_length;
1114 fle->length = fle->length + sess->digest_length;
1115 }
1116
1117 DPAA2_SET_FLE_FIN(sge);
1118 DPAA2_SET_FLE_FIN(fle);
1119 DPAA2_SET_FD_LEN(fd, fle->length);
1120
1121 return 0;
1122 }
1123
1124 static int
build_cipher_sg_fd(dpaa2_sec_session * sess,struct rte_crypto_op * op,struct qbman_fd * fd,__rte_unused uint16_t bpid)1125 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1126 struct qbman_fd *fd, __rte_unused uint16_t bpid)
1127 {
1128 struct rte_crypto_sym_op *sym_op = op->sym;
1129 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1130 int data_len, data_offset;
1131 struct sec_flow_context *flc;
1132 struct ctxt_priv *priv = sess->ctxt;
1133 struct rte_mbuf *mbuf;
1134 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1135 sess->iv.offset);
1136
1137 data_len = sym_op->cipher.data.length;
1138 data_offset = sym_op->cipher.data.offset;
1139
1140 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1141 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1142 if ((data_len & 7) || (data_offset & 7)) {
1143 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1144 return -ENOTSUP;
1145 }
1146
1147 data_len = data_len >> 3;
1148 data_offset = data_offset >> 3;
1149 }
1150
1151 if (sym_op->m_dst)
1152 mbuf = sym_op->m_dst;
1153 else
1154 mbuf = sym_op->m_src;
1155
1156 /* first FLE entry used to store mbuf and session ctxt */
1157 fle = (struct qbman_fle *)rte_malloc(NULL,
1158 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1159 RTE_CACHE_LINE_SIZE);
1160 if (!fle) {
1161 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1162 return -ENOMEM;
1163 }
1164 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1165 /* first FLE entry used to store mbuf and session ctxt */
1166 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1167 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1168
1169 op_fle = fle + 1;
1170 ip_fle = fle + 2;
1171 sge = fle + 3;
1172
1173 flc = &priv->flc_desc[0].flc;
1174
1175 DPAA2_SEC_DP_DEBUG(
1176 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1177 " data_off: 0x%x\n",
1178 data_offset,
1179 data_len,
1180 sess->iv.length,
1181 sym_op->m_src->data_off);
1182
1183 /* o/p fle */
1184 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1185 op_fle->length = data_len;
1186 DPAA2_SET_FLE_SG_EXT(op_fle);
1187
1188 /* o/p 1st seg */
1189 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1190 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1191 sge->length = mbuf->data_len - data_offset;
1192
1193 mbuf = mbuf->next;
1194 /* o/p segs */
1195 while (mbuf) {
1196 sge++;
1197 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1198 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1199 sge->length = mbuf->data_len;
1200 mbuf = mbuf->next;
1201 }
1202 DPAA2_SET_FLE_FIN(sge);
1203
1204 DPAA2_SEC_DP_DEBUG(
1205 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1206 flc, fle, fle->addr_hi, fle->addr_lo,
1207 fle->length);
1208
1209 /* i/p fle */
1210 mbuf = sym_op->m_src;
1211 sge++;
1212 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1213 ip_fle->length = sess->iv.length + data_len;
1214 DPAA2_SET_FLE_SG_EXT(ip_fle);
1215
1216 /* i/p IV */
1217 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1218 DPAA2_SET_FLE_OFFSET(sge, 0);
1219 sge->length = sess->iv.length;
1220
1221 sge++;
1222
1223 /* i/p 1st seg */
1224 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1225 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1226 sge->length = mbuf->data_len - data_offset;
1227
1228 mbuf = mbuf->next;
1229 /* i/p segs */
1230 while (mbuf) {
1231 sge++;
1232 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1233 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1234 sge->length = mbuf->data_len;
1235 mbuf = mbuf->next;
1236 }
1237 DPAA2_SET_FLE_FIN(sge);
1238 DPAA2_SET_FLE_FIN(ip_fle);
1239
1240 /* sg fd */
1241 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1242 DPAA2_SET_FD_LEN(fd, ip_fle->length);
1243 DPAA2_SET_FD_COMPOUND_FMT(fd);
1244 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1245
1246 DPAA2_SEC_DP_DEBUG(
1247 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1248 " off =%d, len =%d\n",
1249 DPAA2_GET_FD_ADDR(fd),
1250 DPAA2_GET_FD_BPID(fd),
1251 rte_dpaa2_bpid_info[bpid].meta_data_size,
1252 DPAA2_GET_FD_OFFSET(fd),
1253 DPAA2_GET_FD_LEN(fd));
1254 return 0;
1255 }
1256
1257 static int
build_cipher_fd(dpaa2_sec_session * sess,struct rte_crypto_op * op,struct qbman_fd * fd,uint16_t bpid,struct dpaa2_sec_qp * qp)1258 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1259 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
1260 {
1261 struct rte_crypto_sym_op *sym_op = op->sym;
1262 struct qbman_fle *fle, *sge;
1263 int retval, data_len, data_offset;
1264 struct sec_flow_context *flc;
1265 struct ctxt_priv *priv = sess->ctxt;
1266 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1267 sess->iv.offset);
1268 struct rte_mbuf *dst;
1269
1270 data_len = sym_op->cipher.data.length;
1271 data_offset = sym_op->cipher.data.offset;
1272
1273 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1274 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1275 if ((data_len & 7) || (data_offset & 7)) {
1276 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1277 return -ENOTSUP;
1278 }
1279
1280 data_len = data_len >> 3;
1281 data_offset = data_offset >> 3;
1282 }
1283
1284 if (sym_op->m_dst)
1285 dst = sym_op->m_dst;
1286 else
1287 dst = sym_op->m_src;
1288
1289 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
1290 if (retval) {
1291 DPAA2_SEC_DP_DEBUG("CIPHER: no buffer available in fle pool");
1292 return -ENOMEM;
1293 }
1294 memset(fle, 0, FLE_POOL_BUF_SIZE);
1295 /* TODO we are using the first FLE entry to store Mbuf.
1296 * Currently we donot know which FLE has the mbuf stored.
1297 * So while retreiving we can go back 1 FLE from the FD -ADDR
1298 * to get the MBUF Addr from the previous FLE.
1299 * We can have a better approach to use the inline Mbuf
1300 */
1301 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1302 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1303 fle = fle + 1;
1304 sge = fle + 2;
1305
1306 if (likely(bpid < MAX_BPID)) {
1307 DPAA2_SET_FD_BPID(fd, bpid);
1308 DPAA2_SET_FLE_BPID(fle, bpid);
1309 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1310 DPAA2_SET_FLE_BPID(sge, bpid);
1311 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1312 } else {
1313 DPAA2_SET_FD_IVP(fd);
1314 DPAA2_SET_FLE_IVP(fle);
1315 DPAA2_SET_FLE_IVP((fle + 1));
1316 DPAA2_SET_FLE_IVP(sge);
1317 DPAA2_SET_FLE_IVP((sge + 1));
1318 }
1319
1320 flc = &priv->flc_desc[0].flc;
1321 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1322 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1323 DPAA2_SET_FD_COMPOUND_FMT(fd);
1324 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1325
1326 DPAA2_SEC_DP_DEBUG(
1327 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1328 " data_off: 0x%x\n",
1329 data_offset,
1330 data_len,
1331 sess->iv.length,
1332 sym_op->m_src->data_off);
1333
1334 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1335 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1336
1337 fle->length = data_len + sess->iv.length;
1338
1339 DPAA2_SEC_DP_DEBUG(
1340 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1341 flc, fle, fle->addr_hi, fle->addr_lo,
1342 fle->length);
1343
1344 fle++;
1345
1346 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1347 fle->length = data_len + sess->iv.length;
1348
1349 DPAA2_SET_FLE_SG_EXT(fle);
1350
1351 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1352 sge->length = sess->iv.length;
1353
1354 sge++;
1355 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1356 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1357
1358 sge->length = data_len;
1359 DPAA2_SET_FLE_FIN(sge);
1360 DPAA2_SET_FLE_FIN(fle);
1361
1362 DPAA2_SEC_DP_DEBUG(
1363 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1364 " off =%d, len =%d\n",
1365 DPAA2_GET_FD_ADDR(fd),
1366 DPAA2_GET_FD_BPID(fd),
1367 rte_dpaa2_bpid_info[bpid].meta_data_size,
1368 DPAA2_GET_FD_OFFSET(fd),
1369 DPAA2_GET_FD_LEN(fd));
1370
1371 return 0;
1372 }
1373
1374 static inline int
build_sec_fd(struct rte_crypto_op * op,struct qbman_fd * fd,uint16_t bpid,struct dpaa2_sec_qp * qp)1375 build_sec_fd(struct rte_crypto_op *op,
1376 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
1377 {
1378 int ret = -1;
1379 dpaa2_sec_session *sess;
1380
1381 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1382 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1383 op->sym->session, cryptodev_driver_id);
1384 #ifdef RTE_LIB_SECURITY
1385 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1386 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1387 op->sym->sec_session);
1388 #endif
1389 else {
1390 DPAA2_SEC_DP_ERR("Session type invalid\n");
1391 return -ENOTSUP;
1392 }
1393
1394 if (!sess) {
1395 DPAA2_SEC_DP_ERR("Session not available\n");
1396 return -EINVAL;
1397 }
1398
1399 /* Any of the buffer is segmented*/
1400 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1401 ((op->sym->m_dst != NULL) &&
1402 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1403 switch (sess->ctxt_type) {
1404 case DPAA2_SEC_CIPHER:
1405 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1406 break;
1407 case DPAA2_SEC_AUTH:
1408 ret = build_auth_sg_fd(sess, op, fd, bpid);
1409 break;
1410 case DPAA2_SEC_AEAD:
1411 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1412 break;
1413 case DPAA2_SEC_CIPHER_HASH:
1414 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1415 break;
1416 #ifdef RTE_LIB_SECURITY
1417 case DPAA2_SEC_IPSEC:
1418 case DPAA2_SEC_PDCP:
1419 ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1420 break;
1421 #endif
1422 case DPAA2_SEC_HASH_CIPHER:
1423 default:
1424 DPAA2_SEC_ERR("error: Unsupported session");
1425 }
1426 } else {
1427 switch (sess->ctxt_type) {
1428 case DPAA2_SEC_CIPHER:
1429 ret = build_cipher_fd(sess, op, fd, bpid, qp);
1430 break;
1431 case DPAA2_SEC_AUTH:
1432 ret = build_auth_fd(sess, op, fd, bpid, qp);
1433 break;
1434 case DPAA2_SEC_AEAD:
1435 ret = build_authenc_gcm_fd(sess, op, fd, bpid, qp);
1436 break;
1437 case DPAA2_SEC_CIPHER_HASH:
1438 ret = build_authenc_fd(sess, op, fd, bpid, qp);
1439 break;
1440 #ifdef RTE_LIB_SECURITY
1441 case DPAA2_SEC_IPSEC:
1442 ret = build_proto_fd(sess, op, fd, bpid, qp);
1443 break;
1444 case DPAA2_SEC_PDCP:
1445 ret = build_proto_compound_fd(sess, op, fd, bpid, qp);
1446 break;
1447 #endif
1448 case DPAA2_SEC_HASH_CIPHER:
1449 default:
1450 DPAA2_SEC_ERR("error: Unsupported session");
1451 ret = -ENOTSUP;
1452 }
1453 }
1454 return ret;
1455 }
1456
1457 static uint16_t
dpaa2_sec_enqueue_burst(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)1458 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1459 uint16_t nb_ops)
1460 {
1461 /* Function to transmit the frames to given device and VQ*/
1462 uint32_t loop;
1463 int32_t ret;
1464 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1465 uint32_t frames_to_send, retry_count;
1466 struct qbman_eq_desc eqdesc;
1467 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1468 struct qbman_swp *swp;
1469 uint16_t num_tx = 0;
1470 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1471 /*todo - need to support multiple buffer pools */
1472 uint16_t bpid;
1473 struct rte_mempool *mb_pool;
1474
1475 if (unlikely(nb_ops == 0))
1476 return 0;
1477
1478 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1479 DPAA2_SEC_ERR("sessionless crypto op not supported");
1480 return 0;
1481 }
1482 /*Prepare enqueue descriptor*/
1483 qbman_eq_desc_clear(&eqdesc);
1484 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1485 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1486 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1487
1488 if (!DPAA2_PER_LCORE_DPIO) {
1489 ret = dpaa2_affine_qbman_swp();
1490 if (ret) {
1491 DPAA2_SEC_ERR(
1492 "Failed to allocate IO portal, tid: %d\n",
1493 rte_gettid());
1494 return 0;
1495 }
1496 }
1497 swp = DPAA2_PER_LCORE_PORTAL;
1498
1499 while (nb_ops) {
1500 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1501 dpaa2_eqcr_size : nb_ops;
1502
1503 for (loop = 0; loop < frames_to_send; loop++) {
1504 if (*dpaa2_seqn((*ops)->sym->m_src)) {
1505 if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) {
1506 DPAA2_PER_LCORE_DQRR_SIZE--;
1507 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 <<
1508 *dpaa2_seqn((*ops)->sym->m_src) &
1509 QBMAN_EQCR_DCA_IDXMASK);
1510 }
1511 flags[loop] = *dpaa2_seqn((*ops)->sym->m_src);
1512 *dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN;
1513 }
1514
1515 /*Clear the unused FD fields before sending*/
1516 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1517 mb_pool = (*ops)->sym->m_src->pool;
1518 bpid = mempool_to_bpid(mb_pool);
1519 ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
1520 if (ret) {
1521 DPAA2_SEC_DP_DEBUG("FD build failed\n");
1522 goto skip_tx;
1523 }
1524 ops++;
1525 }
1526
1527 loop = 0;
1528 retry_count = 0;
1529 while (loop < frames_to_send) {
1530 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1531 &fd_arr[loop],
1532 &flags[loop],
1533 frames_to_send - loop);
1534 if (unlikely(ret < 0)) {
1535 retry_count++;
1536 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1537 num_tx += loop;
1538 nb_ops -= loop;
1539 DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
1540 /* freeing the fle buffers */
1541 while (loop < frames_to_send) {
1542 free_fle(&fd_arr[loop],
1543 dpaa2_qp);
1544 loop++;
1545 }
1546 goto skip_tx;
1547 }
1548 } else {
1549 loop += ret;
1550 retry_count = 0;
1551 }
1552 }
1553
1554 num_tx += loop;
1555 nb_ops -= loop;
1556 }
1557 skip_tx:
1558 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1559 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1560 return num_tx;
1561 }
1562
1563 #ifdef RTE_LIB_SECURITY
1564 static inline struct rte_crypto_op *
sec_simple_fd_to_mbuf(const struct qbman_fd * fd)1565 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1566 {
1567 struct rte_crypto_op *op;
1568 uint16_t len = DPAA2_GET_FD_LEN(fd);
1569 int16_t diff = 0;
1570 dpaa2_sec_session *sess_priv __rte_unused;
1571
1572 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1573 DPAA2_SEC_ERR("error: non inline buffer");
1574 return NULL;
1575 }
1576 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1577 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1578 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1579
1580 diff = len - mbuf->pkt_len;
1581 mbuf->pkt_len += diff;
1582 mbuf->data_len += diff;
1583 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1584 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1585 op->sym->aead.digest.phys_addr = 0L;
1586
1587 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1588 op->sym->sec_session);
1589 if (sess_priv->dir == DIR_ENC)
1590 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1591 else
1592 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1593
1594 if (unlikely(fd->simple.frc)) {
1595 DPAA2_SEC_ERR("SEC returned Error - %x",
1596 fd->simple.frc);
1597 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1598 } else {
1599 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1600 }
1601
1602 return op;
1603 }
1604 #endif
1605
1606 static inline struct rte_crypto_op *
sec_fd_to_mbuf(const struct qbman_fd * fd,struct dpaa2_sec_qp * qp)1607 sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
1608 {
1609 struct qbman_fle *fle;
1610 struct rte_crypto_op *op;
1611 struct rte_mbuf *dst, *src;
1612
1613 #ifdef RTE_LIB_SECURITY
1614 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1615 return sec_simple_fd_to_mbuf(fd);
1616 #endif
1617 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1618
1619 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1620 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1621
1622 /* we are using the first FLE entry to store Mbuf.
1623 * Currently we donot know which FLE has the mbuf stored.
1624 * So while retreiving we can go back 1 FLE from the FD -ADDR
1625 * to get the MBUF Addr from the previous FLE.
1626 * We can have a better approach to use the inline Mbuf
1627 */
1628
1629 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1630
1631 /* Prefeth op */
1632 src = op->sym->m_src;
1633 rte_prefetch0(src);
1634
1635 if (op->sym->m_dst) {
1636 dst = op->sym->m_dst;
1637 rte_prefetch0(dst);
1638 } else
1639 dst = src;
1640
1641 #ifdef RTE_LIB_SECURITY
1642 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1643 uint16_t len = DPAA2_GET_FD_LEN(fd);
1644 dst->pkt_len = len;
1645 while (dst->next != NULL) {
1646 len -= dst->data_len;
1647 dst = dst->next;
1648 }
1649 dst->data_len = len;
1650 }
1651 #endif
1652 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1653 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1654 (void *)dst,
1655 dst->buf_addr,
1656 DPAA2_GET_FD_ADDR(fd),
1657 DPAA2_GET_FD_BPID(fd),
1658 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1659 DPAA2_GET_FD_OFFSET(fd),
1660 DPAA2_GET_FD_LEN(fd));
1661
1662 /* free the fle memory */
1663 if (likely(rte_pktmbuf_is_contiguous(src))) {
1664 rte_mempool_put(qp->fle_pool, (void *)(fle-1));
1665 } else
1666 rte_free((void *)(fle-1));
1667
1668 return op;
1669 }
1670
1671 static void
dpaa2_sec_dump(struct rte_crypto_op * op)1672 dpaa2_sec_dump(struct rte_crypto_op *op)
1673 {
1674 int i;
1675 dpaa2_sec_session *sess = NULL;
1676 struct ctxt_priv *priv;
1677 uint8_t bufsize;
1678 struct rte_crypto_sym_op *sym_op;
1679
1680 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1681 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1682 op->sym->session, cryptodev_driver_id);
1683 #ifdef RTE_LIBRTE_SECURITY
1684 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1685 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1686 op->sym->sec_session);
1687 #endif
1688
1689 if (sess == NULL)
1690 goto mbuf_dump;
1691
1692 priv = (struct ctxt_priv *)sess->ctxt;
1693 printf("\n****************************************\n"
1694 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
1695 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
1696 "\tCipher key len:\t%zd\n", sess->ctxt_type,
1697 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
1698 sess->cipher_alg, sess->auth_alg, sess->aead_alg,
1699 sess->cipher_key.length);
1700 rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
1701 sess->cipher_key.length);
1702 rte_hexdump(stdout, "auth key", sess->auth_key.data,
1703 sess->auth_key.length);
1704 printf("\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
1705 "\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only"
1706 " len:\t%d\n\taead cipher text:\t%d\n",
1707 sess->auth_key.length, sess->iv.length, sess->iv.offset,
1708 sess->digest_length, sess->status,
1709 sess->ext_params.aead_ctxt.auth_only_len,
1710 sess->ext_params.aead_ctxt.auth_cipher_text);
1711 #ifdef RTE_LIBRTE_SECURITY
1712 printf("PDCP session params:\n"
1713 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
1714 "\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n"
1715 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
1716 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
1717 sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
1718 sess->pdcp.hfn_threshold);
1719
1720 #endif
1721 bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl;
1722 printf("Descriptor Dump:\n");
1723 for (i = 0; i < bufsize; i++)
1724 printf("\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]);
1725
1726 printf("\n");
1727 mbuf_dump:
1728 sym_op = op->sym;
1729 if (sym_op->m_src) {
1730 printf("Source mbuf:\n");
1731 rte_pktmbuf_dump(stdout, sym_op->m_src, sym_op->m_src->data_len);
1732 }
1733 if (sym_op->m_dst) {
1734 printf("Destination mbuf:\n");
1735 rte_pktmbuf_dump(stdout, sym_op->m_dst, sym_op->m_dst->data_len);
1736 }
1737
1738 printf("Session address = %p\ncipher offset: %d, length: %d\n"
1739 "auth offset: %d, length: %d\n aead offset: %d, length: %d\n"
1740 , sym_op->session,
1741 sym_op->cipher.data.offset, sym_op->cipher.data.length,
1742 sym_op->auth.data.offset, sym_op->auth.data.length,
1743 sym_op->aead.data.offset, sym_op->aead.data.length);
1744 printf("\n");
1745
1746 }
1747
1748 static void
dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci,struct dpaa2_queue * dpaa2_q)1749 dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci,
1750 struct dpaa2_queue *dpaa2_q)
1751 {
1752 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1753 struct rte_crypto_op *op;
1754 struct qbman_fd *fd;
1755 struct dpaa2_sec_qp *dpaa2_qp;
1756
1757 dpaa2_qp = container_of(dpaa2_q, struct dpaa2_sec_qp, tx_vq);
1758 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1759 op = sec_fd_to_mbuf(fd, dpaa2_qp);
1760 /* Instead of freeing, enqueue it to the sec tx queue (sec->core)
1761 * after setting an error in FD. But this will have performance impact.
1762 */
1763 rte_pktmbuf_free(op->sym->m_src);
1764 }
1765
1766 static void
dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue * dpaa2_q,struct rte_mbuf * m,struct qbman_eq_desc * eqdesc)1767 dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1768 struct rte_mbuf *m,
1769 struct qbman_eq_desc *eqdesc)
1770 {
1771 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1772 struct eqresp_metadata *eqresp_meta;
1773 struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private;
1774 uint16_t orpid, seqnum;
1775 uint8_t dq_idx;
1776
1777 if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1778 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1779 DPAA2_EQCR_OPRID_SHIFT;
1780 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1781 DPAA2_EQCR_SEQNUM_SHIFT;
1782
1783
1784 if (!priv->en_loose_ordered) {
1785 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1786 qbman_eq_desc_set_response(eqdesc, (uint64_t)
1787 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1788 dpio_dev->eqresp_pi]), 1);
1789 qbman_eq_desc_set_token(eqdesc, 1);
1790
1791 eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi];
1792 eqresp_meta->dpaa2_q = dpaa2_q;
1793 eqresp_meta->mp = m->pool;
1794
1795 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1796 dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0);
1797 } else {
1798 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1799 }
1800 } else {
1801 dq_idx = *dpaa2_seqn(m) - 1;
1802 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1803 DPAA2_PER_LCORE_DQRR_SIZE--;
1804 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1805 }
1806 *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1807 }
1808
1809
1810 static uint16_t
dpaa2_sec_enqueue_burst_ordered(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)1811 dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
1812 uint16_t nb_ops)
1813 {
1814 /* Function to transmit the frames to given device and VQ*/
1815 uint32_t loop;
1816 int32_t ret;
1817 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1818 uint32_t frames_to_send, num_free_eq_desc, retry_count;
1819 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1820 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1821 struct qbman_swp *swp;
1822 uint16_t num_tx = 0;
1823 uint16_t bpid;
1824 struct rte_mempool *mb_pool;
1825 struct dpaa2_sec_dev_private *priv =
1826 dpaa2_qp->tx_vq.crypto_data->dev_private;
1827
1828 if (unlikely(nb_ops == 0))
1829 return 0;
1830
1831 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1832 DPAA2_SEC_ERR("sessionless crypto op not supported");
1833 return 0;
1834 }
1835
1836 if (!DPAA2_PER_LCORE_DPIO) {
1837 ret = dpaa2_affine_qbman_swp();
1838 if (ret) {
1839 DPAA2_SEC_ERR("Failure in affining portal");
1840 return 0;
1841 }
1842 }
1843 swp = DPAA2_PER_LCORE_PORTAL;
1844
1845 while (nb_ops) {
1846 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1847 dpaa2_eqcr_size : nb_ops;
1848
1849 if (!priv->en_loose_ordered) {
1850 if (*dpaa2_seqn((*ops)->sym->m_src)) {
1851 num_free_eq_desc = dpaa2_free_eq_descriptors();
1852 if (num_free_eq_desc < frames_to_send)
1853 frames_to_send = num_free_eq_desc;
1854 }
1855 }
1856
1857 for (loop = 0; loop < frames_to_send; loop++) {
1858 /*Prepare enqueue descriptor*/
1859 qbman_eq_desc_clear(&eqdesc[loop]);
1860 qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid);
1861
1862 if (*dpaa2_seqn((*ops)->sym->m_src))
1863 dpaa2_sec_set_enqueue_descriptor(
1864 &dpaa2_qp->tx_vq,
1865 (*ops)->sym->m_src,
1866 &eqdesc[loop]);
1867 else
1868 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1869 DPAA2_EQ_RESP_ERR_FQ);
1870
1871 /*Clear the unused FD fields before sending*/
1872 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1873 mb_pool = (*ops)->sym->m_src->pool;
1874 bpid = mempool_to_bpid(mb_pool);
1875 ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
1876 if (ret) {
1877 DPAA2_SEC_DP_DEBUG("FD build failed\n");
1878 goto skip_tx;
1879 }
1880 ops++;
1881 }
1882
1883 loop = 0;
1884 retry_count = 0;
1885 while (loop < frames_to_send) {
1886 ret = qbman_swp_enqueue_multiple_desc(swp,
1887 &eqdesc[loop], &fd_arr[loop],
1888 frames_to_send - loop);
1889 if (unlikely(ret < 0)) {
1890 retry_count++;
1891 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1892 num_tx += loop;
1893 nb_ops -= loop;
1894 DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
1895 /* freeing the fle buffers */
1896 while (loop < frames_to_send) {
1897 free_fle(&fd_arr[loop],
1898 dpaa2_qp);
1899 loop++;
1900 }
1901 goto skip_tx;
1902 }
1903 } else {
1904 loop += ret;
1905 retry_count = 0;
1906 }
1907 }
1908
1909 num_tx += loop;
1910 nb_ops -= loop;
1911 }
1912
1913 skip_tx:
1914 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1915 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1916 return num_tx;
1917 }
1918
1919 static uint16_t
dpaa2_sec_dequeue_burst(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)1920 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1921 uint16_t nb_ops)
1922 {
1923 /* Function is responsible to receive frames for a given device and VQ*/
1924 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1925 struct qbman_result *dq_storage;
1926 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1927 int ret, num_rx = 0;
1928 uint8_t is_last = 0, status;
1929 struct qbman_swp *swp;
1930 const struct qbman_fd *fd;
1931 struct qbman_pull_desc pulldesc;
1932
1933 if (!DPAA2_PER_LCORE_DPIO) {
1934 ret = dpaa2_affine_qbman_swp();
1935 if (ret) {
1936 DPAA2_SEC_ERR(
1937 "Failed to allocate IO portal, tid: %d\n",
1938 rte_gettid());
1939 return 0;
1940 }
1941 }
1942 swp = DPAA2_PER_LCORE_PORTAL;
1943 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1944
1945 qbman_pull_desc_clear(&pulldesc);
1946 qbman_pull_desc_set_numframes(&pulldesc,
1947 (nb_ops > dpaa2_dqrr_size) ?
1948 dpaa2_dqrr_size : nb_ops);
1949 qbman_pull_desc_set_fq(&pulldesc, fqid);
1950 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1951 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1952 1);
1953
1954 /*Issue a volatile dequeue command. */
1955 while (1) {
1956 if (qbman_swp_pull(swp, &pulldesc)) {
1957 DPAA2_SEC_WARN(
1958 "SEC VDQ command is not issued : QBMAN busy");
1959 /* Portal was busy, try again */
1960 continue;
1961 }
1962 break;
1963 };
1964
1965 /* Receive the packets till Last Dequeue entry is found with
1966 * respect to the above issues PULL command.
1967 */
1968 while (!is_last) {
1969 /* Check if the previous issued command is completed.
1970 * Also seems like the SWP is shared between the Ethernet Driver
1971 * and the SEC driver.
1972 */
1973 while (!qbman_check_command_complete(dq_storage))
1974 ;
1975
1976 /* Loop until the dq_storage is updated with
1977 * new token by QBMAN
1978 */
1979 while (!qbman_check_new_result(dq_storage))
1980 ;
1981 /* Check whether Last Pull command is Expired and
1982 * setting Condition for Loop termination
1983 */
1984 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1985 is_last = 1;
1986 /* Check for valid frame. */
1987 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1988 if (unlikely(
1989 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1990 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1991 continue;
1992 }
1993 }
1994
1995 fd = qbman_result_DQ_fd(dq_storage);
1996 ops[num_rx] = sec_fd_to_mbuf(fd, dpaa2_qp);
1997
1998 if (unlikely(fd->simple.frc)) {
1999 /* TODO Parse SEC errors */
2000 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) {
2001 DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
2002 fd->simple.frc);
2003 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP)
2004 dpaa2_sec_dump(ops[num_rx]);
2005 }
2006
2007 dpaa2_qp->rx_vq.err_pkts += 1;
2008 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
2009 } else {
2010 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2011 }
2012
2013 num_rx++;
2014 dq_storage++;
2015 } /* End of Packet Rx loop */
2016
2017 dpaa2_qp->rx_vq.rx_pkts += num_rx;
2018
2019 DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx,
2020 dpaa2_qp->rx_vq.err_pkts);
2021 /*Return the total number of packets received to DPAA2 app*/
2022 return num_rx;
2023 }
2024
2025 /** Release queue pair */
2026 static int
dpaa2_sec_queue_pair_release(struct rte_cryptodev * dev,uint16_t queue_pair_id)2027 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
2028 {
2029 struct dpaa2_sec_qp *qp =
2030 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
2031
2032 PMD_INIT_FUNC_TRACE();
2033
2034 if (qp->rx_vq.q_storage) {
2035 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
2036 rte_free(qp->rx_vq.q_storage);
2037 }
2038 rte_mempool_free(qp->fle_pool);
2039 rte_free(qp);
2040
2041 dev->data->queue_pairs[queue_pair_id] = NULL;
2042
2043 return 0;
2044 }
2045
2046 /** Setup a queue pair */
2047 static int
dpaa2_sec_queue_pair_setup(struct rte_cryptodev * dev,uint16_t qp_id,const struct rte_cryptodev_qp_conf * qp_conf,__rte_unused int socket_id)2048 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2049 const struct rte_cryptodev_qp_conf *qp_conf,
2050 __rte_unused int socket_id)
2051 {
2052 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2053 struct dpaa2_sec_qp *qp;
2054 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2055 struct dpseci_rx_queue_cfg cfg;
2056 int32_t retcode;
2057 char str[30];
2058
2059 PMD_INIT_FUNC_TRACE();
2060
2061 /* If qp is already in use free ring memory and qp metadata. */
2062 if (dev->data->queue_pairs[qp_id] != NULL) {
2063 DPAA2_SEC_INFO("QP already setup");
2064 return 0;
2065 }
2066
2067 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
2068 dev, qp_id, qp_conf);
2069
2070 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
2071
2072 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
2073 RTE_CACHE_LINE_SIZE);
2074 if (!qp) {
2075 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
2076 return -ENOMEM;
2077 }
2078
2079 qp->rx_vq.crypto_data = dev->data;
2080 qp->tx_vq.crypto_data = dev->data;
2081 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
2082 sizeof(struct queue_storage_info_t),
2083 RTE_CACHE_LINE_SIZE);
2084 if (!qp->rx_vq.q_storage) {
2085 DPAA2_SEC_ERR("malloc failed for q_storage");
2086 return -ENOMEM;
2087 }
2088 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
2089
2090 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
2091 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
2092 return -ENOMEM;
2093 }
2094
2095 dev->data->queue_pairs[qp_id] = qp;
2096
2097 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d_%d",
2098 getpid(), dev->data->dev_id, qp_id);
2099 qp->fle_pool = rte_mempool_create((const char *)str,
2100 qp_conf->nb_descriptors,
2101 FLE_POOL_BUF_SIZE,
2102 FLE_POOL_CACHE_SIZE, 0,
2103 NULL, NULL, NULL, NULL,
2104 SOCKET_ID_ANY, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET);
2105 if (!qp->fle_pool) {
2106 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
2107 return -ENOMEM;
2108 }
2109
2110 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
2111 cfg.user_ctx = (size_t)(&qp->rx_vq);
2112 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
2113 qp_id, &cfg);
2114 return retcode;
2115 }
2116
2117 /** Returns the size of the aesni gcm session structure */
2118 static unsigned int
dpaa2_sec_sym_session_get_size(struct rte_cryptodev * dev __rte_unused)2119 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2120 {
2121 PMD_INIT_FUNC_TRACE();
2122
2123 return sizeof(dpaa2_sec_session);
2124 }
2125
2126 static int
dpaa2_sec_cipher_init(struct rte_crypto_sym_xform * xform,dpaa2_sec_session * session)2127 dpaa2_sec_cipher_init(struct rte_crypto_sym_xform *xform,
2128 dpaa2_sec_session *session)
2129 {
2130 struct alginfo cipherdata;
2131 int bufsize, ret = 0;
2132 struct ctxt_priv *priv;
2133 struct sec_flow_context *flc;
2134
2135 PMD_INIT_FUNC_TRACE();
2136
2137 /* For SEC CIPHER only one descriptor is required. */
2138 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2139 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2140 RTE_CACHE_LINE_SIZE);
2141 if (priv == NULL) {
2142 DPAA2_SEC_ERR("No Memory for priv CTXT");
2143 return -ENOMEM;
2144 }
2145
2146 flc = &priv->flc_desc[0].flc;
2147
2148 session->ctxt_type = DPAA2_SEC_CIPHER;
2149 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2150 RTE_CACHE_LINE_SIZE);
2151 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2152 DPAA2_SEC_ERR("No Memory for cipher key");
2153 rte_free(priv);
2154 return -ENOMEM;
2155 }
2156 session->cipher_key.length = xform->cipher.key.length;
2157
2158 memcpy(session->cipher_key.data, xform->cipher.key.data,
2159 xform->cipher.key.length);
2160 cipherdata.key = (size_t)session->cipher_key.data;
2161 cipherdata.keylen = session->cipher_key.length;
2162 cipherdata.key_enc_flags = 0;
2163 cipherdata.key_type = RTA_DATA_IMM;
2164
2165 /* Set IV parameters */
2166 session->iv.offset = xform->cipher.iv.offset;
2167 session->iv.length = xform->cipher.iv.length;
2168 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2169 DIR_ENC : DIR_DEC;
2170
2171 switch (xform->cipher.algo) {
2172 case RTE_CRYPTO_CIPHER_AES_CBC:
2173 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2174 cipherdata.algmode = OP_ALG_AAI_CBC;
2175 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2176 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2177 SHR_NEVER, &cipherdata,
2178 session->iv.length,
2179 session->dir);
2180 break;
2181 case RTE_CRYPTO_CIPHER_3DES_CBC:
2182 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2183 cipherdata.algmode = OP_ALG_AAI_CBC;
2184 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2185 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2186 SHR_NEVER, &cipherdata,
2187 session->iv.length,
2188 session->dir);
2189 break;
2190 case RTE_CRYPTO_CIPHER_DES_CBC:
2191 cipherdata.algtype = OP_ALG_ALGSEL_DES;
2192 cipherdata.algmode = OP_ALG_AAI_CBC;
2193 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2194 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2195 SHR_NEVER, &cipherdata,
2196 session->iv.length,
2197 session->dir);
2198 break;
2199 case RTE_CRYPTO_CIPHER_AES_CTR:
2200 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2201 cipherdata.algmode = OP_ALG_AAI_CTR;
2202 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2203 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2204 SHR_NEVER, &cipherdata,
2205 session->iv.length,
2206 session->dir);
2207 break;
2208 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2209 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
2210 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
2211 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
2212 &cipherdata,
2213 session->dir);
2214 break;
2215 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2216 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
2217 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
2218 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
2219 &cipherdata,
2220 session->dir);
2221 break;
2222 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2223 case RTE_CRYPTO_CIPHER_AES_F8:
2224 case RTE_CRYPTO_CIPHER_AES_ECB:
2225 case RTE_CRYPTO_CIPHER_3DES_ECB:
2226 case RTE_CRYPTO_CIPHER_3DES_CTR:
2227 case RTE_CRYPTO_CIPHER_AES_XTS:
2228 case RTE_CRYPTO_CIPHER_ARC4:
2229 case RTE_CRYPTO_CIPHER_NULL:
2230 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2231 xform->cipher.algo);
2232 ret = -ENOTSUP;
2233 goto error_out;
2234 default:
2235 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2236 xform->cipher.algo);
2237 ret = -ENOTSUP;
2238 goto error_out;
2239 }
2240
2241 if (bufsize < 0) {
2242 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
2243 ret = -EINVAL;
2244 goto error_out;
2245 }
2246
2247 flc->word1_sdl = (uint8_t)bufsize;
2248 session->ctxt = priv;
2249
2250 #ifdef CAAM_DESC_DEBUG
2251 int i;
2252 for (i = 0; i < bufsize; i++)
2253 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
2254 #endif
2255 return ret;
2256
2257 error_out:
2258 rte_free(session->cipher_key.data);
2259 rte_free(priv);
2260 return ret;
2261 }
2262
2263 static int
dpaa2_sec_auth_init(struct rte_crypto_sym_xform * xform,dpaa2_sec_session * session)2264 dpaa2_sec_auth_init(struct rte_crypto_sym_xform *xform,
2265 dpaa2_sec_session *session)
2266 {
2267 struct alginfo authdata;
2268 int bufsize, ret = 0;
2269 struct ctxt_priv *priv;
2270 struct sec_flow_context *flc;
2271
2272 PMD_INIT_FUNC_TRACE();
2273
2274 /* For SEC AUTH three descriptors are required for various stages */
2275 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2276 sizeof(struct ctxt_priv) + 3 *
2277 sizeof(struct sec_flc_desc),
2278 RTE_CACHE_LINE_SIZE);
2279 if (priv == NULL) {
2280 DPAA2_SEC_ERR("No Memory for priv CTXT");
2281 return -ENOMEM;
2282 }
2283
2284 flc = &priv->flc_desc[DESC_INITFINAL].flc;
2285
2286 session->ctxt_type = DPAA2_SEC_AUTH;
2287 session->auth_key.length = xform->auth.key.length;
2288 if (xform->auth.key.length) {
2289 session->auth_key.data = rte_zmalloc(NULL,
2290 xform->auth.key.length,
2291 RTE_CACHE_LINE_SIZE);
2292 if (session->auth_key.data == NULL) {
2293 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
2294 rte_free(priv);
2295 return -ENOMEM;
2296 }
2297 memcpy(session->auth_key.data, xform->auth.key.data,
2298 xform->auth.key.length);
2299 authdata.key = (size_t)session->auth_key.data;
2300 authdata.key_enc_flags = 0;
2301 authdata.key_type = RTA_DATA_IMM;
2302 }
2303 authdata.keylen = session->auth_key.length;
2304
2305 session->digest_length = xform->auth.digest_length;
2306 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2307 DIR_ENC : DIR_DEC;
2308
2309 switch (xform->auth.algo) {
2310 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2311 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2312 authdata.algmode = OP_ALG_AAI_HMAC;
2313 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2314 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2315 1, 0, SHR_NEVER, &authdata,
2316 !session->dir,
2317 session->digest_length);
2318 break;
2319 case RTE_CRYPTO_AUTH_MD5_HMAC:
2320 authdata.algtype = OP_ALG_ALGSEL_MD5;
2321 authdata.algmode = OP_ALG_AAI_HMAC;
2322 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2323 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2324 1, 0, SHR_NEVER, &authdata,
2325 !session->dir,
2326 session->digest_length);
2327 break;
2328 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2329 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2330 authdata.algmode = OP_ALG_AAI_HMAC;
2331 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2332 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2333 1, 0, SHR_NEVER, &authdata,
2334 !session->dir,
2335 session->digest_length);
2336 break;
2337 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2338 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2339 authdata.algmode = OP_ALG_AAI_HMAC;
2340 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2341 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2342 1, 0, SHR_NEVER, &authdata,
2343 !session->dir,
2344 session->digest_length);
2345 break;
2346 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2347 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2348 authdata.algmode = OP_ALG_AAI_HMAC;
2349 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2350 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2351 1, 0, SHR_NEVER, &authdata,
2352 !session->dir,
2353 session->digest_length);
2354 break;
2355 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2356 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2357 authdata.algmode = OP_ALG_AAI_HMAC;
2358 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2359 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2360 1, 0, SHR_NEVER, &authdata,
2361 !session->dir,
2362 session->digest_length);
2363 break;
2364 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2365 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2366 authdata.algmode = OP_ALG_AAI_F9;
2367 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2368 session->iv.offset = xform->auth.iv.offset;
2369 session->iv.length = xform->auth.iv.length;
2370 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2371 1, 0, &authdata,
2372 !session->dir,
2373 session->digest_length);
2374 break;
2375 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2376 authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2377 authdata.algmode = OP_ALG_AAI_F9;
2378 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2379 session->iv.offset = xform->auth.iv.offset;
2380 session->iv.length = xform->auth.iv.length;
2381 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2382 1, 0, &authdata,
2383 !session->dir,
2384 session->digest_length);
2385 break;
2386 case RTE_CRYPTO_AUTH_SHA1:
2387 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2388 authdata.algmode = OP_ALG_AAI_HASH;
2389 session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2390 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2391 1, 0, SHR_NEVER, &authdata,
2392 !session->dir,
2393 session->digest_length);
2394 break;
2395 case RTE_CRYPTO_AUTH_MD5:
2396 authdata.algtype = OP_ALG_ALGSEL_MD5;
2397 authdata.algmode = OP_ALG_AAI_HASH;
2398 session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2399 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2400 1, 0, SHR_NEVER, &authdata,
2401 !session->dir,
2402 session->digest_length);
2403 break;
2404 case RTE_CRYPTO_AUTH_SHA256:
2405 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2406 authdata.algmode = OP_ALG_AAI_HASH;
2407 session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2408 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2409 1, 0, SHR_NEVER, &authdata,
2410 !session->dir,
2411 session->digest_length);
2412 break;
2413 case RTE_CRYPTO_AUTH_SHA384:
2414 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2415 authdata.algmode = OP_ALG_AAI_HASH;
2416 session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2417 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2418 1, 0, SHR_NEVER, &authdata,
2419 !session->dir,
2420 session->digest_length);
2421 break;
2422 case RTE_CRYPTO_AUTH_SHA512:
2423 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2424 authdata.algmode = OP_ALG_AAI_HASH;
2425 session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2426 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2427 1, 0, SHR_NEVER, &authdata,
2428 !session->dir,
2429 session->digest_length);
2430 break;
2431 case RTE_CRYPTO_AUTH_SHA224:
2432 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2433 authdata.algmode = OP_ALG_AAI_HASH;
2434 session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2435 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2436 1, 0, SHR_NEVER, &authdata,
2437 !session->dir,
2438 session->digest_length);
2439 break;
2440 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2441 authdata.algtype = OP_ALG_ALGSEL_AES;
2442 authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2443 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2444 bufsize = cnstr_shdsc_aes_mac(
2445 priv->flc_desc[DESC_INITFINAL].desc,
2446 1, 0, SHR_NEVER, &authdata,
2447 !session->dir,
2448 session->digest_length);
2449 break;
2450 case RTE_CRYPTO_AUTH_AES_CMAC:
2451 authdata.algtype = OP_ALG_ALGSEL_AES;
2452 authdata.algmode = OP_ALG_AAI_CMAC;
2453 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2454 bufsize = cnstr_shdsc_aes_mac(
2455 priv->flc_desc[DESC_INITFINAL].desc,
2456 1, 0, SHR_NEVER, &authdata,
2457 !session->dir,
2458 session->digest_length);
2459 break;
2460 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2461 case RTE_CRYPTO_AUTH_AES_GMAC:
2462 case RTE_CRYPTO_AUTH_KASUMI_F9:
2463 case RTE_CRYPTO_AUTH_NULL:
2464 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2465 xform->auth.algo);
2466 ret = -ENOTSUP;
2467 goto error_out;
2468 default:
2469 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2470 xform->auth.algo);
2471 ret = -ENOTSUP;
2472 goto error_out;
2473 }
2474
2475 if (bufsize < 0) {
2476 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2477 ret = -EINVAL;
2478 goto error_out;
2479 }
2480
2481 flc->word1_sdl = (uint8_t)bufsize;
2482 session->ctxt = priv;
2483 #ifdef CAAM_DESC_DEBUG
2484 int i;
2485 for (i = 0; i < bufsize; i++)
2486 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2487 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2488 #endif
2489
2490 return ret;
2491
2492 error_out:
2493 rte_free(session->auth_key.data);
2494 rte_free(priv);
2495 return ret;
2496 }
2497
2498 static int
dpaa2_sec_aead_init(struct rte_crypto_sym_xform * xform,dpaa2_sec_session * session)2499 dpaa2_sec_aead_init(struct rte_crypto_sym_xform *xform,
2500 dpaa2_sec_session *session)
2501 {
2502 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2503 struct alginfo aeaddata;
2504 int bufsize;
2505 struct ctxt_priv *priv;
2506 struct sec_flow_context *flc;
2507 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2508 int err, ret = 0;
2509
2510 PMD_INIT_FUNC_TRACE();
2511
2512 /* Set IV parameters */
2513 session->iv.offset = aead_xform->iv.offset;
2514 session->iv.length = aead_xform->iv.length;
2515 session->ctxt_type = DPAA2_SEC_AEAD;
2516
2517 /* For SEC AEAD only one descriptor is required */
2518 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2519 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2520 RTE_CACHE_LINE_SIZE);
2521 if (priv == NULL) {
2522 DPAA2_SEC_ERR("No Memory for priv CTXT");
2523 return -ENOMEM;
2524 }
2525
2526 flc = &priv->flc_desc[0].flc;
2527
2528 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2529 RTE_CACHE_LINE_SIZE);
2530 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2531 DPAA2_SEC_ERR("No Memory for aead key");
2532 rte_free(priv);
2533 return -ENOMEM;
2534 }
2535 memcpy(session->aead_key.data, aead_xform->key.data,
2536 aead_xform->key.length);
2537
2538 session->digest_length = aead_xform->digest_length;
2539 session->aead_key.length = aead_xform->key.length;
2540 ctxt->auth_only_len = aead_xform->aad_length;
2541
2542 aeaddata.key = (size_t)session->aead_key.data;
2543 aeaddata.keylen = session->aead_key.length;
2544 aeaddata.key_enc_flags = 0;
2545 aeaddata.key_type = RTA_DATA_IMM;
2546
2547 switch (aead_xform->algo) {
2548 case RTE_CRYPTO_AEAD_AES_GCM:
2549 aeaddata.algtype = OP_ALG_ALGSEL_AES;
2550 aeaddata.algmode = OP_ALG_AAI_GCM;
2551 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2552 break;
2553 case RTE_CRYPTO_AEAD_AES_CCM:
2554 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2555 aead_xform->algo);
2556 ret = -ENOTSUP;
2557 goto error_out;
2558 default:
2559 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2560 aead_xform->algo);
2561 ret = -ENOTSUP;
2562 goto error_out;
2563 }
2564 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2565 DIR_ENC : DIR_DEC;
2566
2567 priv->flc_desc[0].desc[0] = aeaddata.keylen;
2568 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2569 DESC_JOB_IO_LEN,
2570 (unsigned int *)priv->flc_desc[0].desc,
2571 &priv->flc_desc[0].desc[1], 1);
2572
2573 if (err < 0) {
2574 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2575 ret = -EINVAL;
2576 goto error_out;
2577 }
2578 if (priv->flc_desc[0].desc[1] & 1) {
2579 aeaddata.key_type = RTA_DATA_IMM;
2580 } else {
2581 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2582 aeaddata.key_type = RTA_DATA_PTR;
2583 }
2584 priv->flc_desc[0].desc[0] = 0;
2585 priv->flc_desc[0].desc[1] = 0;
2586
2587 if (session->dir == DIR_ENC)
2588 bufsize = cnstr_shdsc_gcm_encap(
2589 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2590 &aeaddata, session->iv.length,
2591 session->digest_length);
2592 else
2593 bufsize = cnstr_shdsc_gcm_decap(
2594 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2595 &aeaddata, session->iv.length,
2596 session->digest_length);
2597 if (bufsize < 0) {
2598 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2599 ret = -EINVAL;
2600 goto error_out;
2601 }
2602
2603 flc->word1_sdl = (uint8_t)bufsize;
2604 session->ctxt = priv;
2605 #ifdef CAAM_DESC_DEBUG
2606 int i;
2607 for (i = 0; i < bufsize; i++)
2608 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2609 i, priv->flc_desc[0].desc[i]);
2610 #endif
2611 return ret;
2612
2613 error_out:
2614 rte_free(session->aead_key.data);
2615 rte_free(priv);
2616 return ret;
2617 }
2618
2619
2620 static int
dpaa2_sec_aead_chain_init(struct rte_crypto_sym_xform * xform,dpaa2_sec_session * session)2621 dpaa2_sec_aead_chain_init(struct rte_crypto_sym_xform *xform,
2622 dpaa2_sec_session *session)
2623 {
2624 struct alginfo authdata, cipherdata;
2625 int bufsize;
2626 struct ctxt_priv *priv;
2627 struct sec_flow_context *flc;
2628 struct rte_crypto_cipher_xform *cipher_xform;
2629 struct rte_crypto_auth_xform *auth_xform;
2630 int err, ret = 0;
2631
2632 PMD_INIT_FUNC_TRACE();
2633
2634 if (session->ext_params.aead_ctxt.auth_cipher_text) {
2635 cipher_xform = &xform->cipher;
2636 auth_xform = &xform->next->auth;
2637 session->ctxt_type =
2638 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2639 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2640 } else {
2641 cipher_xform = &xform->next->cipher;
2642 auth_xform = &xform->auth;
2643 session->ctxt_type =
2644 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2645 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2646 }
2647
2648 /* Set IV parameters */
2649 session->iv.offset = cipher_xform->iv.offset;
2650 session->iv.length = cipher_xform->iv.length;
2651
2652 /* For SEC AEAD only one descriptor is required */
2653 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2654 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2655 RTE_CACHE_LINE_SIZE);
2656 if (priv == NULL) {
2657 DPAA2_SEC_ERR("No Memory for priv CTXT");
2658 return -ENOMEM;
2659 }
2660
2661 flc = &priv->flc_desc[0].flc;
2662
2663 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2664 RTE_CACHE_LINE_SIZE);
2665 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2666 DPAA2_SEC_ERR("No Memory for cipher key");
2667 rte_free(priv);
2668 return -ENOMEM;
2669 }
2670 session->cipher_key.length = cipher_xform->key.length;
2671 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2672 RTE_CACHE_LINE_SIZE);
2673 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2674 DPAA2_SEC_ERR("No Memory for auth key");
2675 rte_free(session->cipher_key.data);
2676 rte_free(priv);
2677 return -ENOMEM;
2678 }
2679 session->auth_key.length = auth_xform->key.length;
2680 memcpy(session->cipher_key.data, cipher_xform->key.data,
2681 cipher_xform->key.length);
2682 memcpy(session->auth_key.data, auth_xform->key.data,
2683 auth_xform->key.length);
2684
2685 authdata.key = (size_t)session->auth_key.data;
2686 authdata.keylen = session->auth_key.length;
2687 authdata.key_enc_flags = 0;
2688 authdata.key_type = RTA_DATA_IMM;
2689
2690 session->digest_length = auth_xform->digest_length;
2691
2692 switch (auth_xform->algo) {
2693 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2694 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2695 authdata.algmode = OP_ALG_AAI_HMAC;
2696 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2697 break;
2698 case RTE_CRYPTO_AUTH_MD5_HMAC:
2699 authdata.algtype = OP_ALG_ALGSEL_MD5;
2700 authdata.algmode = OP_ALG_AAI_HMAC;
2701 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2702 break;
2703 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2704 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2705 authdata.algmode = OP_ALG_AAI_HMAC;
2706 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2707 break;
2708 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2709 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2710 authdata.algmode = OP_ALG_AAI_HMAC;
2711 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2712 break;
2713 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2714 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2715 authdata.algmode = OP_ALG_AAI_HMAC;
2716 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2717 break;
2718 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2719 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2720 authdata.algmode = OP_ALG_AAI_HMAC;
2721 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2722 break;
2723 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2724 authdata.algtype = OP_ALG_ALGSEL_AES;
2725 authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2726 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2727 break;
2728 case RTE_CRYPTO_AUTH_AES_CMAC:
2729 authdata.algtype = OP_ALG_ALGSEL_AES;
2730 authdata.algmode = OP_ALG_AAI_CMAC;
2731 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2732 break;
2733 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2734 case RTE_CRYPTO_AUTH_AES_GMAC:
2735 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2736 case RTE_CRYPTO_AUTH_NULL:
2737 case RTE_CRYPTO_AUTH_SHA1:
2738 case RTE_CRYPTO_AUTH_SHA256:
2739 case RTE_CRYPTO_AUTH_SHA512:
2740 case RTE_CRYPTO_AUTH_SHA224:
2741 case RTE_CRYPTO_AUTH_SHA384:
2742 case RTE_CRYPTO_AUTH_MD5:
2743 case RTE_CRYPTO_AUTH_KASUMI_F9:
2744 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2745 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2746 auth_xform->algo);
2747 ret = -ENOTSUP;
2748 goto error_out;
2749 default:
2750 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2751 auth_xform->algo);
2752 ret = -ENOTSUP;
2753 goto error_out;
2754 }
2755 cipherdata.key = (size_t)session->cipher_key.data;
2756 cipherdata.keylen = session->cipher_key.length;
2757 cipherdata.key_enc_flags = 0;
2758 cipherdata.key_type = RTA_DATA_IMM;
2759
2760 switch (cipher_xform->algo) {
2761 case RTE_CRYPTO_CIPHER_AES_CBC:
2762 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2763 cipherdata.algmode = OP_ALG_AAI_CBC;
2764 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2765 break;
2766 case RTE_CRYPTO_CIPHER_3DES_CBC:
2767 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2768 cipherdata.algmode = OP_ALG_AAI_CBC;
2769 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2770 break;
2771 case RTE_CRYPTO_CIPHER_DES_CBC:
2772 cipherdata.algtype = OP_ALG_ALGSEL_DES;
2773 cipherdata.algmode = OP_ALG_AAI_CBC;
2774 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2775 break;
2776 case RTE_CRYPTO_CIPHER_AES_CTR:
2777 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2778 cipherdata.algmode = OP_ALG_AAI_CTR;
2779 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2780 break;
2781 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2782 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2783 case RTE_CRYPTO_CIPHER_NULL:
2784 case RTE_CRYPTO_CIPHER_3DES_ECB:
2785 case RTE_CRYPTO_CIPHER_3DES_CTR:
2786 case RTE_CRYPTO_CIPHER_AES_ECB:
2787 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2788 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2789 cipher_xform->algo);
2790 ret = -ENOTSUP;
2791 goto error_out;
2792 default:
2793 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2794 cipher_xform->algo);
2795 ret = -ENOTSUP;
2796 goto error_out;
2797 }
2798 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2799 DIR_ENC : DIR_DEC;
2800
2801 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2802 priv->flc_desc[0].desc[1] = authdata.keylen;
2803 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2804 DESC_JOB_IO_LEN,
2805 (unsigned int *)priv->flc_desc[0].desc,
2806 &priv->flc_desc[0].desc[2], 2);
2807
2808 if (err < 0) {
2809 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2810 ret = -EINVAL;
2811 goto error_out;
2812 }
2813 if (priv->flc_desc[0].desc[2] & 1) {
2814 cipherdata.key_type = RTA_DATA_IMM;
2815 } else {
2816 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2817 cipherdata.key_type = RTA_DATA_PTR;
2818 }
2819 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2820 authdata.key_type = RTA_DATA_IMM;
2821 } else {
2822 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2823 authdata.key_type = RTA_DATA_PTR;
2824 }
2825 priv->flc_desc[0].desc[0] = 0;
2826 priv->flc_desc[0].desc[1] = 0;
2827 priv->flc_desc[0].desc[2] = 0;
2828
2829 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2830 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2831 0, SHR_SERIAL,
2832 &cipherdata, &authdata,
2833 session->iv.length,
2834 session->digest_length,
2835 session->dir);
2836 if (bufsize < 0) {
2837 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2838 ret = -EINVAL;
2839 goto error_out;
2840 }
2841 } else {
2842 DPAA2_SEC_ERR("Hash before cipher not supported");
2843 ret = -ENOTSUP;
2844 goto error_out;
2845 }
2846
2847 flc->word1_sdl = (uint8_t)bufsize;
2848 session->ctxt = priv;
2849 #ifdef CAAM_DESC_DEBUG
2850 int i;
2851 for (i = 0; i < bufsize; i++)
2852 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2853 i, priv->flc_desc[0].desc[i]);
2854 #endif
2855
2856 return ret;
2857
2858 error_out:
2859 rte_free(session->cipher_key.data);
2860 rte_free(session->auth_key.data);
2861 rte_free(priv);
2862 return ret;
2863 }
2864
2865 static int
dpaa2_sec_set_session_parameters(struct rte_crypto_sym_xform * xform,void * sess)2866 dpaa2_sec_set_session_parameters(struct rte_crypto_sym_xform *xform, void *sess)
2867 {
2868 dpaa2_sec_session *session = sess;
2869 int ret;
2870
2871 PMD_INIT_FUNC_TRACE();
2872
2873 if (unlikely(sess == NULL)) {
2874 DPAA2_SEC_ERR("Invalid session struct");
2875 return -EINVAL;
2876 }
2877
2878 memset(session, 0, sizeof(dpaa2_sec_session));
2879 /* Default IV length = 0 */
2880 session->iv.length = 0;
2881
2882 /* Cipher Only */
2883 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2884 ret = dpaa2_sec_cipher_init(xform, session);
2885
2886 /* Authentication Only */
2887 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2888 xform->next == NULL) {
2889 ret = dpaa2_sec_auth_init(xform, session);
2890
2891 /* Cipher then Authenticate */
2892 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2893 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2894 session->ext_params.aead_ctxt.auth_cipher_text = true;
2895 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2896 ret = dpaa2_sec_auth_init(xform, session);
2897 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2898 ret = dpaa2_sec_cipher_init(xform, session);
2899 else
2900 ret = dpaa2_sec_aead_chain_init(xform, session);
2901 /* Authenticate then Cipher */
2902 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2903 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2904 session->ext_params.aead_ctxt.auth_cipher_text = false;
2905 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2906 ret = dpaa2_sec_cipher_init(xform, session);
2907 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2908 ret = dpaa2_sec_auth_init(xform, session);
2909 else
2910 ret = dpaa2_sec_aead_chain_init(xform, session);
2911 /* AEAD operation for AES-GCM kind of Algorithms */
2912 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2913 xform->next == NULL) {
2914 ret = dpaa2_sec_aead_init(xform, session);
2915
2916 } else {
2917 DPAA2_SEC_ERR("Invalid crypto type");
2918 return -EINVAL;
2919 }
2920
2921 return ret;
2922 }
2923
2924 #ifdef RTE_LIB_SECURITY
2925 static int
dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform * aead_xform,dpaa2_sec_session * session,struct alginfo * aeaddata)2926 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2927 dpaa2_sec_session *session,
2928 struct alginfo *aeaddata)
2929 {
2930 PMD_INIT_FUNC_TRACE();
2931
2932 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2933 RTE_CACHE_LINE_SIZE);
2934 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2935 DPAA2_SEC_ERR("No Memory for aead key");
2936 return -ENOMEM;
2937 }
2938 memcpy(session->aead_key.data, aead_xform->key.data,
2939 aead_xform->key.length);
2940
2941 session->digest_length = aead_xform->digest_length;
2942 session->aead_key.length = aead_xform->key.length;
2943
2944 aeaddata->key = (size_t)session->aead_key.data;
2945 aeaddata->keylen = session->aead_key.length;
2946 aeaddata->key_enc_flags = 0;
2947 aeaddata->key_type = RTA_DATA_IMM;
2948
2949 switch (aead_xform->algo) {
2950 case RTE_CRYPTO_AEAD_AES_GCM:
2951 switch (session->digest_length) {
2952 case 8:
2953 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2954 break;
2955 case 12:
2956 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2957 break;
2958 case 16:
2959 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2960 break;
2961 default:
2962 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2963 session->digest_length);
2964 return -EINVAL;
2965 }
2966 aeaddata->algmode = OP_ALG_AAI_GCM;
2967 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2968 break;
2969 case RTE_CRYPTO_AEAD_AES_CCM:
2970 switch (session->digest_length) {
2971 case 8:
2972 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2973 break;
2974 case 12:
2975 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2976 break;
2977 case 16:
2978 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2979 break;
2980 default:
2981 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2982 session->digest_length);
2983 return -EINVAL;
2984 }
2985 aeaddata->algmode = OP_ALG_AAI_CCM;
2986 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2987 break;
2988 default:
2989 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2990 aead_xform->algo);
2991 return -ENOTSUP;
2992 }
2993 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2994 DIR_ENC : DIR_DEC;
2995
2996 return 0;
2997 }
2998
2999 static int
dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform * cipher_xform,struct rte_crypto_auth_xform * auth_xform,dpaa2_sec_session * session,struct alginfo * cipherdata,struct alginfo * authdata)3000 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
3001 struct rte_crypto_auth_xform *auth_xform,
3002 dpaa2_sec_session *session,
3003 struct alginfo *cipherdata,
3004 struct alginfo *authdata)
3005 {
3006 if (cipher_xform) {
3007 session->cipher_key.data = rte_zmalloc(NULL,
3008 cipher_xform->key.length,
3009 RTE_CACHE_LINE_SIZE);
3010 if (session->cipher_key.data == NULL &&
3011 cipher_xform->key.length > 0) {
3012 DPAA2_SEC_ERR("No Memory for cipher key");
3013 return -ENOMEM;
3014 }
3015
3016 session->cipher_key.length = cipher_xform->key.length;
3017 memcpy(session->cipher_key.data, cipher_xform->key.data,
3018 cipher_xform->key.length);
3019 session->cipher_alg = cipher_xform->algo;
3020 } else {
3021 session->cipher_key.data = NULL;
3022 session->cipher_key.length = 0;
3023 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3024 }
3025
3026 if (auth_xform) {
3027 session->auth_key.data = rte_zmalloc(NULL,
3028 auth_xform->key.length,
3029 RTE_CACHE_LINE_SIZE);
3030 if (session->auth_key.data == NULL &&
3031 auth_xform->key.length > 0) {
3032 DPAA2_SEC_ERR("No Memory for auth key");
3033 return -ENOMEM;
3034 }
3035 session->auth_key.length = auth_xform->key.length;
3036 memcpy(session->auth_key.data, auth_xform->key.data,
3037 auth_xform->key.length);
3038 session->auth_alg = auth_xform->algo;
3039 session->digest_length = auth_xform->digest_length;
3040 } else {
3041 session->auth_key.data = NULL;
3042 session->auth_key.length = 0;
3043 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
3044 }
3045
3046 authdata->key = (size_t)session->auth_key.data;
3047 authdata->keylen = session->auth_key.length;
3048 authdata->key_enc_flags = 0;
3049 authdata->key_type = RTA_DATA_IMM;
3050 switch (session->auth_alg) {
3051 case RTE_CRYPTO_AUTH_SHA1_HMAC:
3052 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
3053 authdata->algmode = OP_ALG_AAI_HMAC;
3054 break;
3055 case RTE_CRYPTO_AUTH_MD5_HMAC:
3056 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
3057 authdata->algmode = OP_ALG_AAI_HMAC;
3058 break;
3059 case RTE_CRYPTO_AUTH_SHA256_HMAC:
3060 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
3061 authdata->algmode = OP_ALG_AAI_HMAC;
3062 if (session->digest_length != 16)
3063 DPAA2_SEC_WARN(
3064 "+++Using sha256-hmac truncated len is non-standard,"
3065 "it will not work with lookaside proto");
3066 break;
3067 case RTE_CRYPTO_AUTH_SHA384_HMAC:
3068 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
3069 authdata->algmode = OP_ALG_AAI_HMAC;
3070 break;
3071 case RTE_CRYPTO_AUTH_SHA512_HMAC:
3072 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
3073 authdata->algmode = OP_ALG_AAI_HMAC;
3074 break;
3075 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
3076 authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96;
3077 authdata->algmode = OP_ALG_AAI_XCBC_MAC;
3078 break;
3079 case RTE_CRYPTO_AUTH_AES_CMAC:
3080 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
3081 authdata->algmode = OP_ALG_AAI_CMAC;
3082 break;
3083 case RTE_CRYPTO_AUTH_NULL:
3084 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
3085 break;
3086 case RTE_CRYPTO_AUTH_SHA224_HMAC:
3087 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3088 case RTE_CRYPTO_AUTH_SHA1:
3089 case RTE_CRYPTO_AUTH_SHA256:
3090 case RTE_CRYPTO_AUTH_SHA512:
3091 case RTE_CRYPTO_AUTH_SHA224:
3092 case RTE_CRYPTO_AUTH_SHA384:
3093 case RTE_CRYPTO_AUTH_MD5:
3094 case RTE_CRYPTO_AUTH_AES_GMAC:
3095 case RTE_CRYPTO_AUTH_KASUMI_F9:
3096 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
3097 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3098 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3099 session->auth_alg);
3100 return -ENOTSUP;
3101 default:
3102 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
3103 session->auth_alg);
3104 return -ENOTSUP;
3105 }
3106 cipherdata->key = (size_t)session->cipher_key.data;
3107 cipherdata->keylen = session->cipher_key.length;
3108 cipherdata->key_enc_flags = 0;
3109 cipherdata->key_type = RTA_DATA_IMM;
3110
3111 switch (session->cipher_alg) {
3112 case RTE_CRYPTO_CIPHER_AES_CBC:
3113 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
3114 cipherdata->algmode = OP_ALG_AAI_CBC;
3115 break;
3116 case RTE_CRYPTO_CIPHER_3DES_CBC:
3117 cipherdata->algtype = OP_PCL_IPSEC_3DES;
3118 cipherdata->algmode = OP_ALG_AAI_CBC;
3119 break;
3120 case RTE_CRYPTO_CIPHER_DES_CBC:
3121 cipherdata->algtype = OP_PCL_IPSEC_DES;
3122 cipherdata->algmode = OP_ALG_AAI_CBC;
3123 break;
3124 case RTE_CRYPTO_CIPHER_AES_CTR:
3125 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
3126 cipherdata->algmode = OP_ALG_AAI_CTR;
3127 break;
3128 case RTE_CRYPTO_CIPHER_NULL:
3129 cipherdata->algtype = OP_PCL_IPSEC_NULL;
3130 break;
3131 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3132 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3133 case RTE_CRYPTO_CIPHER_3DES_ECB:
3134 case RTE_CRYPTO_CIPHER_3DES_CTR:
3135 case RTE_CRYPTO_CIPHER_AES_ECB:
3136 case RTE_CRYPTO_CIPHER_KASUMI_F8:
3137 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
3138 session->cipher_alg);
3139 return -ENOTSUP;
3140 default:
3141 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3142 session->cipher_alg);
3143 return -ENOTSUP;
3144 }
3145
3146 return 0;
3147 }
3148
3149 static int
dpaa2_sec_set_ipsec_session(struct rte_cryptodev * dev,struct rte_security_session_conf * conf,void * sess)3150 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
3151 struct rte_security_session_conf *conf,
3152 void *sess)
3153 {
3154 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
3155 struct rte_crypto_cipher_xform *cipher_xform = NULL;
3156 struct rte_crypto_auth_xform *auth_xform = NULL;
3157 struct rte_crypto_aead_xform *aead_xform = NULL;
3158 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3159 struct ctxt_priv *priv;
3160 struct alginfo authdata, cipherdata;
3161 int bufsize;
3162 struct sec_flow_context *flc;
3163 int ret = -1;
3164
3165 PMD_INIT_FUNC_TRACE();
3166
3167 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3168 sizeof(struct ctxt_priv) +
3169 sizeof(struct sec_flc_desc),
3170 RTE_CACHE_LINE_SIZE);
3171
3172 if (priv == NULL) {
3173 DPAA2_SEC_ERR("No memory for priv CTXT");
3174 return -ENOMEM;
3175 }
3176
3177 flc = &priv->flc_desc[0].flc;
3178
3179 if (ipsec_xform->life.bytes_hard_limit != 0 ||
3180 ipsec_xform->life.bytes_soft_limit != 0 ||
3181 ipsec_xform->life.packets_hard_limit != 0 ||
3182 ipsec_xform->life.packets_soft_limit != 0)
3183 return -ENOTSUP;
3184
3185 memset(session, 0, sizeof(dpaa2_sec_session));
3186
3187 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3188 cipher_xform = &conf->crypto_xform->cipher;
3189 if (conf->crypto_xform->next)
3190 auth_xform = &conf->crypto_xform->next->auth;
3191 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
3192 session, &cipherdata, &authdata);
3193 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3194 auth_xform = &conf->crypto_xform->auth;
3195 if (conf->crypto_xform->next)
3196 cipher_xform = &conf->crypto_xform->next->cipher;
3197 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
3198 session, &cipherdata, &authdata);
3199 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
3200 aead_xform = &conf->crypto_xform->aead;
3201 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
3202 session, &cipherdata);
3203 authdata.keylen = 0;
3204 authdata.algtype = 0;
3205 } else {
3206 DPAA2_SEC_ERR("XFORM not specified");
3207 ret = -EINVAL;
3208 goto out;
3209 }
3210 if (ret) {
3211 DPAA2_SEC_ERR("Failed to process xform");
3212 goto out;
3213 }
3214
3215 session->ctxt_type = DPAA2_SEC_IPSEC;
3216 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
3217 uint8_t *hdr = NULL;
3218 struct ip ip4_hdr;
3219 struct rte_ipv6_hdr ip6_hdr;
3220 struct ipsec_encap_pdb encap_pdb;
3221
3222 flc->dhr = SEC_FLC_DHR_OUTBOUND;
3223 /* For Sec Proto only one descriptor is required. */
3224 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
3225
3226 /* copy algo specific data to PDB */
3227 switch (cipherdata.algtype) {
3228 case OP_PCL_IPSEC_AES_CTR:
3229 encap_pdb.ctr.ctr_initial = 0x00000001;
3230 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3231 break;
3232 case OP_PCL_IPSEC_AES_GCM8:
3233 case OP_PCL_IPSEC_AES_GCM12:
3234 case OP_PCL_IPSEC_AES_GCM16:
3235 memcpy(encap_pdb.gcm.salt,
3236 (uint8_t *)&(ipsec_xform->salt), 4);
3237 break;
3238 }
3239
3240 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3241 PDBOPTS_ESP_OIHI_PDB_INL |
3242 PDBOPTS_ESP_IVSRC |
3243 PDBHMO_ESP_SNR;
3244 if (ipsec_xform->options.dec_ttl)
3245 encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3246 if (ipsec_xform->options.esn)
3247 encap_pdb.options |= PDBOPTS_ESP_ESN;
3248 encap_pdb.spi = ipsec_xform->spi;
3249 session->dir = DIR_ENC;
3250 if (ipsec_xform->tunnel.type ==
3251 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3252 encap_pdb.ip_hdr_len = sizeof(struct ip);
3253 ip4_hdr.ip_v = IPVERSION;
3254 ip4_hdr.ip_hl = 5;
3255 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
3256 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
3257 ip4_hdr.ip_id = 0;
3258 ip4_hdr.ip_off = 0;
3259 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
3260 ip4_hdr.ip_p = IPPROTO_ESP;
3261 ip4_hdr.ip_sum = 0;
3262 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
3263 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
3264 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
3265 &ip4_hdr, sizeof(struct ip));
3266 hdr = (uint8_t *)&ip4_hdr;
3267 } else if (ipsec_xform->tunnel.type ==
3268 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
3269 ip6_hdr.vtc_flow = rte_cpu_to_be_32(
3270 DPAA2_IPv6_DEFAULT_VTC_FLOW |
3271 ((ipsec_xform->tunnel.ipv6.dscp <<
3272 RTE_IPV6_HDR_TC_SHIFT) &
3273 RTE_IPV6_HDR_TC_MASK) |
3274 ((ipsec_xform->tunnel.ipv6.flabel <<
3275 RTE_IPV6_HDR_FL_SHIFT) &
3276 RTE_IPV6_HDR_FL_MASK));
3277 /* Payload length will be updated by HW */
3278 ip6_hdr.payload_len = 0;
3279 ip6_hdr.hop_limits =
3280 ipsec_xform->tunnel.ipv6.hlimit;
3281 ip6_hdr.proto = (ipsec_xform->proto ==
3282 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3283 IPPROTO_ESP : IPPROTO_AH;
3284 memcpy(&ip6_hdr.src_addr,
3285 &ipsec_xform->tunnel.ipv6.src_addr, 16);
3286 memcpy(&ip6_hdr.dst_addr,
3287 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
3288 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
3289 hdr = (uint8_t *)&ip6_hdr;
3290 }
3291
3292 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
3293 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3294 SHR_WAIT : SHR_SERIAL, &encap_pdb,
3295 hdr, &cipherdata, &authdata);
3296 } else if (ipsec_xform->direction ==
3297 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3298 struct ipsec_decap_pdb decap_pdb;
3299
3300 flc->dhr = SEC_FLC_DHR_INBOUND;
3301 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
3302 /* copy algo specific data to PDB */
3303 switch (cipherdata.algtype) {
3304 case OP_PCL_IPSEC_AES_CTR:
3305 decap_pdb.ctr.ctr_initial = 0x00000001;
3306 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3307 break;
3308 case OP_PCL_IPSEC_AES_GCM8:
3309 case OP_PCL_IPSEC_AES_GCM12:
3310 case OP_PCL_IPSEC_AES_GCM16:
3311 memcpy(decap_pdb.gcm.salt,
3312 (uint8_t *)&(ipsec_xform->salt), 4);
3313 break;
3314 }
3315
3316 decap_pdb.options = (ipsec_xform->tunnel.type ==
3317 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
3318 sizeof(struct ip) << 16 :
3319 sizeof(struct rte_ipv6_hdr) << 16;
3320 if (ipsec_xform->options.esn)
3321 decap_pdb.options |= PDBOPTS_ESP_ESN;
3322
3323 if (ipsec_xform->replay_win_sz) {
3324 uint32_t win_sz;
3325 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3326
3327 if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
3328 DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
3329 win_sz = 128;
3330 }
3331 switch (win_sz) {
3332 case 1:
3333 case 2:
3334 case 4:
3335 case 8:
3336 case 16:
3337 case 32:
3338 decap_pdb.options |= PDBOPTS_ESP_ARS32;
3339 break;
3340 case 64:
3341 decap_pdb.options |= PDBOPTS_ESP_ARS64;
3342 break;
3343 case 256:
3344 decap_pdb.options |= PDBOPTS_ESP_ARS256;
3345 break;
3346 case 512:
3347 decap_pdb.options |= PDBOPTS_ESP_ARS512;
3348 break;
3349 case 1024:
3350 decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3351 break;
3352 case 128:
3353 default:
3354 decap_pdb.options |= PDBOPTS_ESP_ARS128;
3355 }
3356 }
3357 session->dir = DIR_DEC;
3358 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3359 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3360 SHR_WAIT : SHR_SERIAL,
3361 &decap_pdb, &cipherdata, &authdata);
3362 } else
3363 goto out;
3364
3365 if (bufsize < 0) {
3366 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3367 goto out;
3368 }
3369
3370 flc->word1_sdl = (uint8_t)bufsize;
3371
3372 /* Enable the stashing control bit */
3373 DPAA2_SET_FLC_RSC(flc);
3374 flc->word2_rflc_31_0 = lower_32_bits(
3375 (size_t)&(((struct dpaa2_sec_qp *)
3376 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3377 flc->word3_rflc_63_32 = upper_32_bits(
3378 (size_t)&(((struct dpaa2_sec_qp *)
3379 dev->data->queue_pairs[0])->rx_vq));
3380
3381 /* Set EWS bit i.e. enable write-safe */
3382 DPAA2_SET_FLC_EWS(flc);
3383 /* Set BS = 1 i.e reuse input buffers as output buffers */
3384 DPAA2_SET_FLC_REUSE_BS(flc);
3385 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3386 DPAA2_SET_FLC_REUSE_FF(flc);
3387
3388 session->ctxt = priv;
3389
3390 return 0;
3391 out:
3392 rte_free(session->auth_key.data);
3393 rte_free(session->cipher_key.data);
3394 rte_free(priv);
3395 return ret;
3396 }
3397
3398 static int
dpaa2_sec_set_pdcp_session(struct rte_cryptodev * dev,struct rte_security_session_conf * conf,void * sess)3399 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3400 struct rte_security_session_conf *conf,
3401 void *sess)
3402 {
3403 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3404 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3405 struct rte_crypto_auth_xform *auth_xform = NULL;
3406 struct rte_crypto_cipher_xform *cipher_xform = NULL;
3407 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3408 struct ctxt_priv *priv;
3409 struct alginfo authdata, cipherdata;
3410 struct alginfo *p_authdata = NULL;
3411 int bufsize = -1;
3412 struct sec_flow_context *flc;
3413 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3414 int swap = true;
3415 #else
3416 int swap = false;
3417 #endif
3418
3419 PMD_INIT_FUNC_TRACE();
3420
3421 memset(session, 0, sizeof(dpaa2_sec_session));
3422
3423 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3424 sizeof(struct ctxt_priv) +
3425 sizeof(struct sec_flc_desc),
3426 RTE_CACHE_LINE_SIZE);
3427
3428 if (priv == NULL) {
3429 DPAA2_SEC_ERR("No memory for priv CTXT");
3430 return -ENOMEM;
3431 }
3432
3433 flc = &priv->flc_desc[0].flc;
3434
3435 /* find xfrm types */
3436 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3437 cipher_xform = &xform->cipher;
3438 if (xform->next != NULL &&
3439 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3440 session->ext_params.aead_ctxt.auth_cipher_text = true;
3441 auth_xform = &xform->next->auth;
3442 }
3443 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3444 auth_xform = &xform->auth;
3445 if (xform->next != NULL &&
3446 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3447 session->ext_params.aead_ctxt.auth_cipher_text = false;
3448 cipher_xform = &xform->next->cipher;
3449 }
3450 } else {
3451 DPAA2_SEC_ERR("Invalid crypto type");
3452 return -EINVAL;
3453 }
3454
3455 session->ctxt_type = DPAA2_SEC_PDCP;
3456 if (cipher_xform) {
3457 session->cipher_key.data = rte_zmalloc(NULL,
3458 cipher_xform->key.length,
3459 RTE_CACHE_LINE_SIZE);
3460 if (session->cipher_key.data == NULL &&
3461 cipher_xform->key.length > 0) {
3462 DPAA2_SEC_ERR("No Memory for cipher key");
3463 rte_free(priv);
3464 return -ENOMEM;
3465 }
3466 session->cipher_key.length = cipher_xform->key.length;
3467 memcpy(session->cipher_key.data, cipher_xform->key.data,
3468 cipher_xform->key.length);
3469 session->dir =
3470 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3471 DIR_ENC : DIR_DEC;
3472 session->cipher_alg = cipher_xform->algo;
3473 } else {
3474 session->cipher_key.data = NULL;
3475 session->cipher_key.length = 0;
3476 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3477 session->dir = DIR_ENC;
3478 }
3479
3480 session->pdcp.domain = pdcp_xform->domain;
3481 session->pdcp.bearer = pdcp_xform->bearer;
3482 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3483 session->pdcp.sn_size = pdcp_xform->sn_size;
3484 session->pdcp.hfn = pdcp_xform->hfn;
3485 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3486 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3487 /* hfv ovd offset location is stored in iv.offset value*/
3488 if (cipher_xform)
3489 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3490
3491 cipherdata.key = (size_t)session->cipher_key.data;
3492 cipherdata.keylen = session->cipher_key.length;
3493 cipherdata.key_enc_flags = 0;
3494 cipherdata.key_type = RTA_DATA_IMM;
3495
3496 switch (session->cipher_alg) {
3497 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3498 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3499 break;
3500 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3501 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3502 break;
3503 case RTE_CRYPTO_CIPHER_AES_CTR:
3504 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3505 break;
3506 case RTE_CRYPTO_CIPHER_NULL:
3507 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3508 break;
3509 default:
3510 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3511 session->cipher_alg);
3512 goto out;
3513 }
3514
3515 if (auth_xform) {
3516 session->auth_key.data = rte_zmalloc(NULL,
3517 auth_xform->key.length,
3518 RTE_CACHE_LINE_SIZE);
3519 if (!session->auth_key.data &&
3520 auth_xform->key.length > 0) {
3521 DPAA2_SEC_ERR("No Memory for auth key");
3522 rte_free(session->cipher_key.data);
3523 rte_free(priv);
3524 return -ENOMEM;
3525 }
3526 session->auth_key.length = auth_xform->key.length;
3527 memcpy(session->auth_key.data, auth_xform->key.data,
3528 auth_xform->key.length);
3529 session->auth_alg = auth_xform->algo;
3530 } else {
3531 session->auth_key.data = NULL;
3532 session->auth_key.length = 0;
3533 session->auth_alg = 0;
3534 }
3535 authdata.key = (size_t)session->auth_key.data;
3536 authdata.keylen = session->auth_key.length;
3537 authdata.key_enc_flags = 0;
3538 authdata.key_type = RTA_DATA_IMM;
3539
3540 if (session->auth_alg) {
3541 switch (session->auth_alg) {
3542 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3543 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3544 break;
3545 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3546 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3547 break;
3548 case RTE_CRYPTO_AUTH_AES_CMAC:
3549 authdata.algtype = PDCP_AUTH_TYPE_AES;
3550 break;
3551 case RTE_CRYPTO_AUTH_NULL:
3552 authdata.algtype = PDCP_AUTH_TYPE_NULL;
3553 break;
3554 default:
3555 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3556 session->auth_alg);
3557 goto out;
3558 }
3559
3560 p_authdata = &authdata;
3561 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3562 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3563 goto out;
3564 }
3565
3566 if (pdcp_xform->sdap_enabled) {
3567 int nb_keys_to_inline =
3568 rta_inline_pdcp_sdap_query(authdata.algtype,
3569 cipherdata.algtype,
3570 session->pdcp.sn_size,
3571 session->pdcp.hfn_ovd);
3572 if (nb_keys_to_inline >= 1) {
3573 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3574 cipherdata.key_type = RTA_DATA_PTR;
3575 }
3576 if (nb_keys_to_inline >= 2) {
3577 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
3578 authdata.key_type = RTA_DATA_PTR;
3579 }
3580 } else {
3581 if (rta_inline_pdcp_query(authdata.algtype,
3582 cipherdata.algtype,
3583 session->pdcp.sn_size,
3584 session->pdcp.hfn_ovd)) {
3585 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3586 cipherdata.key_type = RTA_DATA_PTR;
3587 }
3588 }
3589
3590 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3591 if (session->dir == DIR_ENC)
3592 bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3593 priv->flc_desc[0].desc, 1, swap,
3594 pdcp_xform->hfn,
3595 session->pdcp.sn_size,
3596 pdcp_xform->bearer,
3597 pdcp_xform->pkt_dir,
3598 pdcp_xform->hfn_threshold,
3599 &cipherdata, &authdata);
3600 else if (session->dir == DIR_DEC)
3601 bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3602 priv->flc_desc[0].desc, 1, swap,
3603 pdcp_xform->hfn,
3604 session->pdcp.sn_size,
3605 pdcp_xform->bearer,
3606 pdcp_xform->pkt_dir,
3607 pdcp_xform->hfn_threshold,
3608 &cipherdata, &authdata);
3609
3610 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
3611 bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
3612 1, swap, &authdata);
3613 } else {
3614 if (session->dir == DIR_ENC) {
3615 if (pdcp_xform->sdap_enabled)
3616 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3617 priv->flc_desc[0].desc, 1, swap,
3618 session->pdcp.sn_size,
3619 pdcp_xform->hfn,
3620 pdcp_xform->bearer,
3621 pdcp_xform->pkt_dir,
3622 pdcp_xform->hfn_threshold,
3623 &cipherdata, p_authdata);
3624 else
3625 bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3626 priv->flc_desc[0].desc, 1, swap,
3627 session->pdcp.sn_size,
3628 pdcp_xform->hfn,
3629 pdcp_xform->bearer,
3630 pdcp_xform->pkt_dir,
3631 pdcp_xform->hfn_threshold,
3632 &cipherdata, p_authdata);
3633 } else if (session->dir == DIR_DEC) {
3634 if (pdcp_xform->sdap_enabled)
3635 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3636 priv->flc_desc[0].desc, 1, swap,
3637 session->pdcp.sn_size,
3638 pdcp_xform->hfn,
3639 pdcp_xform->bearer,
3640 pdcp_xform->pkt_dir,
3641 pdcp_xform->hfn_threshold,
3642 &cipherdata, p_authdata);
3643 else
3644 bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3645 priv->flc_desc[0].desc, 1, swap,
3646 session->pdcp.sn_size,
3647 pdcp_xform->hfn,
3648 pdcp_xform->bearer,
3649 pdcp_xform->pkt_dir,
3650 pdcp_xform->hfn_threshold,
3651 &cipherdata, p_authdata);
3652 }
3653 }
3654
3655 if (bufsize < 0) {
3656 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3657 goto out;
3658 }
3659
3660 /* Enable the stashing control bit */
3661 DPAA2_SET_FLC_RSC(flc);
3662 flc->word2_rflc_31_0 = lower_32_bits(
3663 (size_t)&(((struct dpaa2_sec_qp *)
3664 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3665 flc->word3_rflc_63_32 = upper_32_bits(
3666 (size_t)&(((struct dpaa2_sec_qp *)
3667 dev->data->queue_pairs[0])->rx_vq));
3668
3669 flc->word1_sdl = (uint8_t)bufsize;
3670
3671 /* TODO - check the perf impact or
3672 * align as per descriptor type
3673 * Set EWS bit i.e. enable write-safe
3674 * DPAA2_SET_FLC_EWS(flc);
3675 */
3676
3677 /* Set BS = 1 i.e reuse input buffers as output buffers */
3678 DPAA2_SET_FLC_REUSE_BS(flc);
3679 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3680 DPAA2_SET_FLC_REUSE_FF(flc);
3681
3682 session->ctxt = priv;
3683
3684 return 0;
3685 out:
3686 rte_free(session->auth_key.data);
3687 rte_free(session->cipher_key.data);
3688 rte_free(priv);
3689 return -EINVAL;
3690 }
3691
3692 static int
dpaa2_sec_security_session_create(void * dev,struct rte_security_session_conf * conf,struct rte_security_session * sess,struct rte_mempool * mempool)3693 dpaa2_sec_security_session_create(void *dev,
3694 struct rte_security_session_conf *conf,
3695 struct rte_security_session *sess,
3696 struct rte_mempool *mempool)
3697 {
3698 void *sess_private_data;
3699 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3700 int ret;
3701
3702 if (rte_mempool_get(mempool, &sess_private_data)) {
3703 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3704 return -ENOMEM;
3705 }
3706
3707 switch (conf->protocol) {
3708 case RTE_SECURITY_PROTOCOL_IPSEC:
3709 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3710 sess_private_data);
3711 break;
3712 case RTE_SECURITY_PROTOCOL_MACSEC:
3713 return -ENOTSUP;
3714 case RTE_SECURITY_PROTOCOL_PDCP:
3715 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3716 sess_private_data);
3717 break;
3718 default:
3719 return -EINVAL;
3720 }
3721 if (ret != 0) {
3722 DPAA2_SEC_ERR("Failed to configure session parameters");
3723 /* Return session to mempool */
3724 rte_mempool_put(mempool, sess_private_data);
3725 return ret;
3726 }
3727
3728 set_sec_session_private_data(sess, sess_private_data);
3729
3730 return ret;
3731 }
3732
3733 /** Clear the memory of session so it doesn't leave key material behind */
3734 static int
dpaa2_sec_security_session_destroy(void * dev __rte_unused,struct rte_security_session * sess)3735 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3736 struct rte_security_session *sess)
3737 {
3738 PMD_INIT_FUNC_TRACE();
3739 void *sess_priv = get_sec_session_private_data(sess);
3740
3741 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3742
3743 if (sess_priv) {
3744 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3745
3746 rte_free(s->ctxt);
3747 rte_free(s->cipher_key.data);
3748 rte_free(s->auth_key.data);
3749 memset(s, 0, sizeof(dpaa2_sec_session));
3750 set_sec_session_private_data(sess, NULL);
3751 rte_mempool_put(sess_mp, sess_priv);
3752 }
3753 return 0;
3754 }
3755 #endif
3756 static int
dpaa2_sec_sym_session_configure(struct rte_cryptodev * dev,struct rte_crypto_sym_xform * xform,struct rte_cryptodev_sym_session * sess,struct rte_mempool * mempool)3757 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3758 struct rte_crypto_sym_xform *xform,
3759 struct rte_cryptodev_sym_session *sess,
3760 struct rte_mempool *mempool)
3761 {
3762 void *sess_private_data;
3763 int ret;
3764
3765 if (rte_mempool_get(mempool, &sess_private_data)) {
3766 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3767 return -ENOMEM;
3768 }
3769
3770 ret = dpaa2_sec_set_session_parameters(xform, sess_private_data);
3771 if (ret != 0) {
3772 DPAA2_SEC_ERR("Failed to configure session parameters");
3773 /* Return session to mempool */
3774 rte_mempool_put(mempool, sess_private_data);
3775 return ret;
3776 }
3777
3778 set_sym_session_private_data(sess, dev->driver_id,
3779 sess_private_data);
3780
3781 return 0;
3782 }
3783
3784 /** Clear the memory of session so it doesn't leave key material behind */
3785 static void
dpaa2_sec_sym_session_clear(struct rte_cryptodev * dev,struct rte_cryptodev_sym_session * sess)3786 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3787 struct rte_cryptodev_sym_session *sess)
3788 {
3789 PMD_INIT_FUNC_TRACE();
3790 uint8_t index = dev->driver_id;
3791 void *sess_priv = get_sym_session_private_data(sess, index);
3792 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3793
3794 if (sess_priv) {
3795 rte_free(s->ctxt);
3796 rte_free(s->cipher_key.data);
3797 rte_free(s->auth_key.data);
3798 memset(s, 0, sizeof(dpaa2_sec_session));
3799 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3800 set_sym_session_private_data(sess, index, NULL);
3801 rte_mempool_put(sess_mp, sess_priv);
3802 }
3803 }
3804
3805 static int
dpaa2_sec_dev_configure(struct rte_cryptodev * dev __rte_unused,struct rte_cryptodev_config * config __rte_unused)3806 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3807 struct rte_cryptodev_config *config __rte_unused)
3808 {
3809 PMD_INIT_FUNC_TRACE();
3810
3811 return 0;
3812 }
3813
3814 static int
dpaa2_sec_dev_start(struct rte_cryptodev * dev)3815 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3816 {
3817 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3818 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3819 struct dpseci_attr attr;
3820 struct dpaa2_queue *dpaa2_q;
3821 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3822 dev->data->queue_pairs;
3823 struct dpseci_rx_queue_attr rx_attr;
3824 struct dpseci_tx_queue_attr tx_attr;
3825 int ret, i;
3826
3827 PMD_INIT_FUNC_TRACE();
3828
3829 /* Change the tx burst function if ordered queues are used */
3830 if (priv->en_ordered)
3831 dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered;
3832
3833 memset(&attr, 0, sizeof(struct dpseci_attr));
3834
3835 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3836 if (ret) {
3837 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3838 priv->hw_id);
3839 goto get_attr_failure;
3840 }
3841 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3842 if (ret) {
3843 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3844 goto get_attr_failure;
3845 }
3846 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3847 dpaa2_q = &qp[i]->rx_vq;
3848 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3849 &rx_attr);
3850 dpaa2_q->fqid = rx_attr.fqid;
3851 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3852 }
3853 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3854 dpaa2_q = &qp[i]->tx_vq;
3855 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3856 &tx_attr);
3857 dpaa2_q->fqid = tx_attr.fqid;
3858 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3859 }
3860
3861 return 0;
3862 get_attr_failure:
3863 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3864 return -1;
3865 }
3866
3867 static void
dpaa2_sec_dev_stop(struct rte_cryptodev * dev)3868 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3869 {
3870 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3871 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3872 int ret;
3873
3874 PMD_INIT_FUNC_TRACE();
3875
3876 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3877 if (ret) {
3878 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3879 priv->hw_id);
3880 return;
3881 }
3882
3883 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3884 if (ret < 0) {
3885 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3886 return;
3887 }
3888 }
3889
3890 static int
dpaa2_sec_dev_close(struct rte_cryptodev * dev __rte_unused)3891 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
3892 {
3893 PMD_INIT_FUNC_TRACE();
3894
3895 return 0;
3896 }
3897
3898 static void
dpaa2_sec_dev_infos_get(struct rte_cryptodev * dev,struct rte_cryptodev_info * info)3899 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3900 struct rte_cryptodev_info *info)
3901 {
3902 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3903
3904 PMD_INIT_FUNC_TRACE();
3905 if (info != NULL) {
3906 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3907 info->feature_flags = dev->feature_flags;
3908 info->capabilities = dpaa2_sec_capabilities;
3909 /* No limit of number of sessions */
3910 info->sym.max_nb_sessions = 0;
3911 info->driver_id = cryptodev_driver_id;
3912 }
3913 }
3914
3915 static
dpaa2_sec_stats_get(struct rte_cryptodev * dev,struct rte_cryptodev_stats * stats)3916 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3917 struct rte_cryptodev_stats *stats)
3918 {
3919 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3920 struct fsl_mc_io dpseci;
3921 struct dpseci_sec_counters counters = {0};
3922 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3923 dev->data->queue_pairs;
3924 int ret, i;
3925
3926 PMD_INIT_FUNC_TRACE();
3927 if (stats == NULL) {
3928 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3929 return;
3930 }
3931 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3932 if (qp == NULL || qp[i] == NULL) {
3933 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3934 continue;
3935 }
3936
3937 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3938 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3939 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3940 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3941 }
3942
3943 /* In case as secondary process access stats, MCP portal in priv-hw
3944 * may have primary process address. Need the secondary process
3945 * based MCP portal address for this object.
3946 */
3947 dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3948 ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3949 &counters);
3950 if (ret) {
3951 DPAA2_SEC_ERR("SEC counters failed");
3952 } else {
3953 DPAA2_SEC_INFO("dpseci hardware stats:"
3954 "\n\tNum of Requests Dequeued = %" PRIu64
3955 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3956 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3957 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3958 "\n\tNum of Outbound Bytes Protected = %" PRIu64
3959 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3960 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3961 counters.dequeued_requests,
3962 counters.ob_enc_requests,
3963 counters.ib_dec_requests,
3964 counters.ob_enc_bytes,
3965 counters.ob_prot_bytes,
3966 counters.ib_dec_bytes,
3967 counters.ib_valid_bytes);
3968 }
3969 }
3970
3971 static
dpaa2_sec_stats_reset(struct rte_cryptodev * dev)3972 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3973 {
3974 int i;
3975 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3976 (dev->data->queue_pairs);
3977
3978 PMD_INIT_FUNC_TRACE();
3979
3980 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3981 if (qp[i] == NULL) {
3982 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3983 continue;
3984 }
3985 qp[i]->tx_vq.rx_pkts = 0;
3986 qp[i]->tx_vq.tx_pkts = 0;
3987 qp[i]->tx_vq.err_pkts = 0;
3988 qp[i]->rx_vq.rx_pkts = 0;
3989 qp[i]->rx_vq.tx_pkts = 0;
3990 qp[i]->rx_vq.err_pkts = 0;
3991 }
3992 }
3993
3994 static void __rte_hot
dpaa2_sec_process_parallel_event(struct qbman_swp * swp,const struct qbman_fd * fd,const struct qbman_result * dq,struct dpaa2_queue * rxq,struct rte_event * ev)3995 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3996 const struct qbman_fd *fd,
3997 const struct qbman_result *dq,
3998 struct dpaa2_queue *rxq,
3999 struct rte_event *ev)
4000 {
4001 struct dpaa2_sec_qp *qp;
4002 /* Prefetching mbuf */
4003 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
4004 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
4005
4006 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
4007 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
4008
4009 qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
4010 ev->flow_id = rxq->ev.flow_id;
4011 ev->sub_event_type = rxq->ev.sub_event_type;
4012 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4013 ev->op = RTE_EVENT_OP_NEW;
4014 ev->sched_type = rxq->ev.sched_type;
4015 ev->queue_id = rxq->ev.queue_id;
4016 ev->priority = rxq->ev.priority;
4017 ev->event_ptr = sec_fd_to_mbuf(fd, qp);
4018
4019 qbman_swp_dqrr_consume(swp, dq);
4020 }
4021 static void
dpaa2_sec_process_atomic_event(struct qbman_swp * swp __rte_unused,const struct qbman_fd * fd,const struct qbman_result * dq,struct dpaa2_queue * rxq,struct rte_event * ev)4022 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
4023 const struct qbman_fd *fd,
4024 const struct qbman_result *dq,
4025 struct dpaa2_queue *rxq,
4026 struct rte_event *ev)
4027 {
4028 uint8_t dqrr_index;
4029 struct dpaa2_sec_qp *qp;
4030 struct rte_crypto_op *crypto_op;
4031 /* Prefetching mbuf */
4032 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
4033 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
4034
4035 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
4036 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
4037
4038 qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
4039 ev->flow_id = rxq->ev.flow_id;
4040 ev->sub_event_type = rxq->ev.sub_event_type;
4041 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4042 ev->op = RTE_EVENT_OP_NEW;
4043 ev->sched_type = rxq->ev.sched_type;
4044 ev->queue_id = rxq->ev.queue_id;
4045 ev->priority = rxq->ev.priority;
4046
4047 crypto_op = sec_fd_to_mbuf(fd, qp);
4048 dqrr_index = qbman_get_dqrr_idx(dq);
4049 *dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
4050 DPAA2_PER_LCORE_DQRR_SIZE++;
4051 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
4052 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
4053 ev->event_ptr = crypto_op;
4054 }
4055
4056 static void __rte_hot
dpaa2_sec_process_ordered_event(struct qbman_swp * swp,const struct qbman_fd * fd,const struct qbman_result * dq,struct dpaa2_queue * rxq,struct rte_event * ev)4057 dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
4058 const struct qbman_fd *fd,
4059 const struct qbman_result *dq,
4060 struct dpaa2_queue *rxq,
4061 struct rte_event *ev)
4062 {
4063 struct rte_crypto_op *crypto_op;
4064 struct dpaa2_sec_qp *qp;
4065
4066 /* Prefetching mbuf */
4067 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
4068 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
4069
4070 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
4071 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
4072
4073 qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
4074 ev->flow_id = rxq->ev.flow_id;
4075 ev->sub_event_type = rxq->ev.sub_event_type;
4076 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4077 ev->op = RTE_EVENT_OP_NEW;
4078 ev->sched_type = rxq->ev.sched_type;
4079 ev->queue_id = rxq->ev.queue_id;
4080 ev->priority = rxq->ev.priority;
4081 crypto_op = sec_fd_to_mbuf(fd, qp);
4082
4083 *dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
4084 *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
4085 DPAA2_EQCR_OPRID_SHIFT;
4086 *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) <<
4087 DPAA2_EQCR_SEQNUM_SHIFT;
4088
4089 qbman_swp_dqrr_consume(swp, dq);
4090 ev->event_ptr = crypto_op;
4091 }
4092
4093 int
dpaa2_sec_eventq_attach(const struct rte_cryptodev * dev,int qp_id,struct dpaa2_dpcon_dev * dpcon,const struct rte_event * event)4094 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
4095 int qp_id,
4096 struct dpaa2_dpcon_dev *dpcon,
4097 const struct rte_event *event)
4098 {
4099 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4100 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4101 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
4102 struct dpseci_rx_queue_cfg cfg;
4103 uint8_t priority;
4104 int ret;
4105
4106 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
4107 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
4108 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
4109 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
4110 else if (event->sched_type == RTE_SCHED_TYPE_ORDERED)
4111 qp->rx_vq.cb = dpaa2_sec_process_ordered_event;
4112 else
4113 return -EINVAL;
4114
4115 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
4116 (dpcon->num_priorities - 1);
4117
4118 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
4119 cfg.options = DPSECI_QUEUE_OPT_DEST;
4120 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
4121 cfg.dest_cfg.dest_id = dpcon->dpcon_id;
4122 cfg.dest_cfg.priority = priority;
4123
4124 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
4125 cfg.user_ctx = (size_t)(qp);
4126 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
4127 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
4128 cfg.order_preservation_en = 1;
4129 }
4130
4131 if (event->sched_type == RTE_SCHED_TYPE_ORDERED) {
4132 struct opr_cfg ocfg;
4133
4134 /* Restoration window size = 256 frames */
4135 ocfg.oprrws = 3;
4136 /* Restoration window size = 512 frames for LX2 */
4137 if (dpaa2_svr_family == SVR_LX2160A)
4138 ocfg.oprrws = 4;
4139 /* Auto advance NESN window enabled */
4140 ocfg.oa = 1;
4141 /* Late arrival window size disabled */
4142 ocfg.olws = 0;
4143 /* ORL resource exhaustaion advance NESN disabled */
4144 ocfg.oeane = 0;
4145
4146 if (priv->en_loose_ordered)
4147 ocfg.oloe = 1;
4148 else
4149 ocfg.oloe = 0;
4150
4151 ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token,
4152 qp_id, OPR_OPT_CREATE, &ocfg);
4153 if (ret) {
4154 RTE_LOG(ERR, PMD, "Error setting opr: ret: %d\n", ret);
4155 return ret;
4156 }
4157 qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf;
4158 priv->en_ordered = 1;
4159 }
4160
4161 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
4162 qp_id, &cfg);
4163 if (ret) {
4164 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
4165 return ret;
4166 }
4167
4168 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
4169
4170 return 0;
4171 }
4172
4173 int
dpaa2_sec_eventq_detach(const struct rte_cryptodev * dev,int qp_id)4174 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
4175 int qp_id)
4176 {
4177 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4178 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4179 struct dpseci_rx_queue_cfg cfg;
4180 int ret;
4181
4182 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
4183 cfg.options = DPSECI_QUEUE_OPT_DEST;
4184 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
4185
4186 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
4187 qp_id, &cfg);
4188 if (ret)
4189 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
4190
4191 return ret;
4192 }
4193
4194 static struct rte_cryptodev_ops crypto_ops = {
4195 .dev_configure = dpaa2_sec_dev_configure,
4196 .dev_start = dpaa2_sec_dev_start,
4197 .dev_stop = dpaa2_sec_dev_stop,
4198 .dev_close = dpaa2_sec_dev_close,
4199 .dev_infos_get = dpaa2_sec_dev_infos_get,
4200 .stats_get = dpaa2_sec_stats_get,
4201 .stats_reset = dpaa2_sec_stats_reset,
4202 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
4203 .queue_pair_release = dpaa2_sec_queue_pair_release,
4204 .sym_session_get_size = dpaa2_sec_sym_session_get_size,
4205 .sym_session_configure = dpaa2_sec_sym_session_configure,
4206 .sym_session_clear = dpaa2_sec_sym_session_clear,
4207 /* Raw data-path API related operations */
4208 .sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
4209 .sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
4210 };
4211
4212 #ifdef RTE_LIB_SECURITY
4213 static const struct rte_security_capability *
dpaa2_sec_capabilities_get(void * device __rte_unused)4214 dpaa2_sec_capabilities_get(void *device __rte_unused)
4215 {
4216 return dpaa2_sec_security_cap;
4217 }
4218
4219 static const struct rte_security_ops dpaa2_sec_security_ops = {
4220 .session_create = dpaa2_sec_security_session_create,
4221 .session_update = NULL,
4222 .session_stats_get = NULL,
4223 .session_destroy = dpaa2_sec_security_session_destroy,
4224 .set_pkt_metadata = NULL,
4225 .capabilities_get = dpaa2_sec_capabilities_get
4226 };
4227 #endif
4228
4229 static int
dpaa2_sec_uninit(const struct rte_cryptodev * dev)4230 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
4231 {
4232 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4233 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4234 int ret;
4235
4236 PMD_INIT_FUNC_TRACE();
4237
4238 /* Function is reverse of dpaa2_sec_dev_init.
4239 * It does the following:
4240 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
4241 * 2. Close the DPSECI device
4242 * 3. Free the allocated resources.
4243 */
4244
4245 /*Close the device at underlying layer*/
4246 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
4247 if (ret) {
4248 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
4249 return -1;
4250 }
4251
4252 /*Free the allocated memory for ethernet private data and dpseci*/
4253 priv->hw = NULL;
4254 rte_free(dpseci);
4255 rte_free(dev->security_ctx);
4256
4257 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
4258 dev->data->name, rte_socket_id());
4259
4260 return 0;
4261 }
4262
4263 static int
check_devargs_handler(const char * key,const char * value,void * opaque)4264 check_devargs_handler(const char *key, const char *value,
4265 void *opaque)
4266 {
4267 struct rte_cryptodev *dev = (struct rte_cryptodev *)opaque;
4268 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4269
4270 if (!strcmp(key, "drv_strict_order")) {
4271 priv->en_loose_ordered = false;
4272 } else if (!strcmp(key, "drv_dump_mode")) {
4273 dpaa2_sec_dp_dump = atoi(value);
4274 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
4275 DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
4276 "supported, changing to FULL error"
4277 " prints\n");
4278 dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
4279 }
4280 } else
4281 return -1;
4282
4283 return 0;
4284 }
4285
4286 static void
dpaa2_sec_get_devargs(struct rte_cryptodev * cryptodev,const char * key)4287 dpaa2_sec_get_devargs(struct rte_cryptodev *cryptodev, const char *key)
4288 {
4289 struct rte_kvargs *kvlist;
4290 struct rte_devargs *devargs;
4291
4292 devargs = cryptodev->device->devargs;
4293 if (!devargs)
4294 return;
4295
4296 kvlist = rte_kvargs_parse(devargs->args, NULL);
4297 if (!kvlist)
4298 return;
4299
4300 if (!rte_kvargs_count(kvlist, key)) {
4301 rte_kvargs_free(kvlist);
4302 return;
4303 }
4304
4305 rte_kvargs_process(kvlist, key,
4306 check_devargs_handler, (void *)cryptodev);
4307 rte_kvargs_free(kvlist);
4308 }
4309
4310 static int
dpaa2_sec_dev_init(struct rte_cryptodev * cryptodev)4311 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
4312 {
4313 struct dpaa2_sec_dev_private *internals;
4314 struct rte_device *dev = cryptodev->device;
4315 struct rte_dpaa2_device *dpaa2_dev;
4316 #ifdef RTE_LIB_SECURITY
4317 struct rte_security_ctx *security_instance;
4318 #endif
4319 struct fsl_mc_io *dpseci;
4320 uint16_t token;
4321 struct dpseci_attr attr;
4322 int retcode, hw_id;
4323
4324 PMD_INIT_FUNC_TRACE();
4325 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
4326 hw_id = dpaa2_dev->object_id;
4327
4328 cryptodev->driver_id = cryptodev_driver_id;
4329 cryptodev->dev_ops = &crypto_ops;
4330
4331 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
4332 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
4333 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
4334 RTE_CRYPTODEV_FF_HW_ACCELERATED |
4335 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
4336 RTE_CRYPTODEV_FF_SECURITY |
4337 RTE_CRYPTODEV_FF_SYM_RAW_DP |
4338 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
4339 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
4340 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
4341 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
4342 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
4343
4344 internals = cryptodev->data->dev_private;
4345
4346 /*
4347 * For secondary processes, we don't initialise any further as primary
4348 * has already done this work. Only check we don't need a different
4349 * RX function
4350 */
4351 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
4352 DPAA2_SEC_DEBUG("Device already init by primary process");
4353 return 0;
4354 }
4355 #ifdef RTE_LIB_SECURITY
4356 /* Initialize security_ctx only for primary process*/
4357 security_instance = rte_malloc("rte_security_instances_ops",
4358 sizeof(struct rte_security_ctx), 0);
4359 if (security_instance == NULL)
4360 return -ENOMEM;
4361 security_instance->device = (void *)cryptodev;
4362 security_instance->ops = &dpaa2_sec_security_ops;
4363 security_instance->sess_cnt = 0;
4364 cryptodev->security_ctx = security_instance;
4365 #endif
4366 /*Open the rte device via MC and save the handle for further use*/
4367 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
4368 sizeof(struct fsl_mc_io), 0);
4369 if (!dpseci) {
4370 DPAA2_SEC_ERR(
4371 "Error in allocating the memory for dpsec object");
4372 return -ENOMEM;
4373 }
4374 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
4375
4376 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
4377 if (retcode != 0) {
4378 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
4379 retcode);
4380 goto init_error;
4381 }
4382 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
4383 if (retcode != 0) {
4384 DPAA2_SEC_ERR(
4385 "Cannot get dpsec device attributed: Error = %x",
4386 retcode);
4387 goto init_error;
4388 }
4389 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
4390 "dpsec-%u", hw_id);
4391
4392 internals->max_nb_queue_pairs = attr.num_tx_queues;
4393 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
4394 internals->hw = dpseci;
4395 internals->token = token;
4396 internals->en_loose_ordered = true;
4397
4398 dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE);
4399 dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER);
4400 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
4401 return 0;
4402
4403 init_error:
4404 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
4405
4406 /* dpaa2_sec_uninit(crypto_dev_name); */
4407 return -EFAULT;
4408 }
4409
4410 static int
cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver * dpaa2_drv __rte_unused,struct rte_dpaa2_device * dpaa2_dev)4411 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
4412 struct rte_dpaa2_device *dpaa2_dev)
4413 {
4414 struct rte_cryptodev *cryptodev;
4415 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
4416
4417 int retval;
4418
4419 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
4420 dpaa2_dev->object_id);
4421
4422 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
4423 if (cryptodev == NULL)
4424 return -ENOMEM;
4425
4426 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4427 cryptodev->data->dev_private = rte_zmalloc_socket(
4428 "cryptodev private structure",
4429 sizeof(struct dpaa2_sec_dev_private),
4430 RTE_CACHE_LINE_SIZE,
4431 rte_socket_id());
4432
4433 if (cryptodev->data->dev_private == NULL)
4434 rte_panic("Cannot allocate memzone for private "
4435 "device data");
4436 }
4437
4438 dpaa2_dev->cryptodev = cryptodev;
4439 cryptodev->device = &dpaa2_dev->device;
4440
4441 /* init user callbacks */
4442 TAILQ_INIT(&(cryptodev->link_intr_cbs));
4443
4444 if (dpaa2_svr_family == SVR_LX2160A)
4445 rta_set_sec_era(RTA_SEC_ERA_10);
4446 else
4447 rta_set_sec_era(RTA_SEC_ERA_8);
4448
4449 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
4450
4451 /* Invoke PMD device initialization function */
4452 retval = dpaa2_sec_dev_init(cryptodev);
4453 if (retval == 0) {
4454 rte_cryptodev_pmd_probing_finish(cryptodev);
4455 return 0;
4456 }
4457
4458 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
4459 rte_free(cryptodev->data->dev_private);
4460
4461 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
4462
4463 return -ENXIO;
4464 }
4465
4466 static int
cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device * dpaa2_dev)4467 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4468 {
4469 struct rte_cryptodev *cryptodev;
4470 int ret;
4471
4472 cryptodev = dpaa2_dev->cryptodev;
4473 if (cryptodev == NULL)
4474 return -ENODEV;
4475
4476 ret = dpaa2_sec_uninit(cryptodev);
4477 if (ret)
4478 return ret;
4479
4480 return rte_cryptodev_pmd_destroy(cryptodev);
4481 }
4482
4483 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4484 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4485 .drv_type = DPAA2_CRYPTO,
4486 .driver = {
4487 .name = "DPAA2 SEC PMD"
4488 },
4489 .probe = cryptodev_dpaa2_sec_probe,
4490 .remove = cryptodev_dpaa2_sec_remove,
4491 };
4492
4493 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4494
4495 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4496 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4497 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4498 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD,
4499 DRIVER_STRICT_ORDER "=<int>"
4500 DRIVER_DUMP_MODE "=<int>");
4501 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
4502