1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2022 NXP
5 *
6 */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_io.h>
24 #include <rte_ip.h>
25 #include <rte_kvargs.h>
26 #include <rte_malloc.h>
27 #include <rte_mbuf.h>
28 #include <rte_memcpy.h>
29 #include <rte_string_fns.h>
30 #include <rte_spinlock.h>
31 #include <rte_hexdump.h>
32
33 #include <fsl_usd.h>
34 #include <fsl_qman.h>
35 #include <dpaa_of.h>
36
37 /* RTA header files */
38 #include <desc/common.h>
39 #include <desc/algo.h>
40 #include <desc/ipsec.h>
41 #include <desc/pdcp.h>
42 #include <desc/sdap.h>
43
44 #include <rte_dpaa_bus.h>
45 #include <dpaa_sec.h>
46 #include <dpaa_sec_event.h>
47 #include <dpaa_sec_log.h>
48 #include <dpaax_iova_table.h>
49
50 #define DRIVER_DUMP_MODE "drv_dump_mode"
51
52 /* DPAA_SEC_DP_DUMP levels */
53 enum dpaa_sec_dump_levels {
54 DPAA_SEC_DP_NO_DUMP,
55 DPAA_SEC_DP_ERR_DUMP,
56 DPAA_SEC_DP_FULL_DUMP
57 };
58
59 uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP;
60
61 uint8_t dpaa_cryptodev_driver_id;
62
63 static inline void
dpaa_sec_op_ending(struct dpaa_sec_op_ctx * ctx)64 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
65 {
66 if (!ctx->fd_status) {
67 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
68 } else {
69 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
70 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
71 }
72 }
73
74 static inline struct dpaa_sec_op_ctx *
dpaa_sec_alloc_ctx(dpaa_sec_session * ses,int sg_count)75 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
76 {
77 struct dpaa_sec_op_ctx *ctx;
78 int i, retval;
79
80 retval = rte_mempool_get(
81 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
82 (void **)(&ctx));
83 if (!ctx || retval) {
84 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
85 return NULL;
86 }
87 /*
88 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
89 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
90 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
91 * each packet, memset is costlier than dcbz_64().
92 */
93 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
94 dcbz_64(&ctx->job.sg[i]);
95
96 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
97 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
98
99 return ctx;
100 }
101
102 static void
ern_sec_fq_handler(struct qman_portal * qm __rte_unused,struct qman_fq * fq,const struct qm_mr_entry * msg)103 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
104 struct qman_fq *fq,
105 const struct qm_mr_entry *msg)
106 {
107 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
108 fq->fqid, msg->ern.rc, msg->ern.seqnum);
109 }
110
111 /* initialize the queue with dest chan as caam chan so that
112 * all the packets in this queue could be dispatched into caam
113 */
114 static int
dpaa_sec_init_rx(struct qman_fq * fq_in,rte_iova_t hwdesc,uint32_t fqid_out)115 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
116 uint32_t fqid_out)
117 {
118 struct qm_mcc_initfq fq_opts;
119 uint32_t flags;
120 int ret = -1;
121
122 /* Clear FQ options */
123 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
124
125 flags = QMAN_INITFQ_FLAG_SCHED;
126 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
127 QM_INITFQ_WE_CONTEXTB;
128
129 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
130 fq_opts.fqd.context_b = fqid_out;
131 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
132 fq_opts.fqd.dest.wq = 0;
133
134 fq_in->cb.ern = ern_sec_fq_handler;
135
136 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
137
138 ret = qman_init_fq(fq_in, flags, &fq_opts);
139 if (unlikely(ret != 0))
140 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
141
142 return ret;
143 }
144
145 /* something is put into in_fq and caam put the crypto result into out_fq */
146 static enum qman_cb_dqrr_result
dqrr_out_fq_cb_rx(struct qman_portal * qm __always_unused,struct qman_fq * fq __always_unused,const struct qm_dqrr_entry * dqrr)147 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
148 struct qman_fq *fq __always_unused,
149 const struct qm_dqrr_entry *dqrr)
150 {
151 const struct qm_fd *fd;
152 struct dpaa_sec_job *job;
153 struct dpaa_sec_op_ctx *ctx;
154
155 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
156 return qman_cb_dqrr_consume;
157
158 fd = &dqrr->fd;
159 /* sg is embedded in an op ctx,
160 * sg[0] is for output
161 * sg[1] for input
162 */
163 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
164
165 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
166 ctx->fd_status = fd->status;
167 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
168 struct qm_sg_entry *sg_out;
169 uint32_t len;
170 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
171 ctx->op->sym->m_src : ctx->op->sym->m_dst;
172
173 sg_out = &job->sg[0];
174 hw_sg_to_cpu(sg_out);
175 len = sg_out->length;
176 mbuf->pkt_len = len;
177 while (mbuf->next != NULL) {
178 len -= mbuf->data_len;
179 mbuf = mbuf->next;
180 }
181 mbuf->data_len = len;
182 }
183 dpaa_sec_op_ending(ctx);
184
185 return qman_cb_dqrr_consume;
186 }
187
188 /* caam result is put into this queue */
189 static int
dpaa_sec_init_tx(struct qman_fq * fq)190 dpaa_sec_init_tx(struct qman_fq *fq)
191 {
192 int ret;
193 struct qm_mcc_initfq opts;
194 uint32_t flags;
195
196 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
197 QMAN_FQ_FLAG_DYNAMIC_FQID;
198
199 ret = qman_create_fq(0, flags, fq);
200 if (unlikely(ret)) {
201 DPAA_SEC_ERR("qman_create_fq failed");
202 return ret;
203 }
204
205 memset(&opts, 0, sizeof(opts));
206 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
207 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
208
209 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
210
211 fq->cb.dqrr = dqrr_out_fq_cb_rx;
212 fq->cb.ern = ern_sec_fq_handler;
213
214 ret = qman_init_fq(fq, 0, &opts);
215 if (unlikely(ret)) {
216 DPAA_SEC_ERR("unable to init caam source fq!");
217 return ret;
218 }
219
220 return ret;
221 }
222
is_aead(dpaa_sec_session * ses)223 static inline int is_aead(dpaa_sec_session *ses)
224 {
225 return ((ses->cipher_alg == 0) &&
226 (ses->auth_alg == 0) &&
227 (ses->aead_alg != 0));
228 }
229
is_encode(dpaa_sec_session * ses)230 static inline int is_encode(dpaa_sec_session *ses)
231 {
232 return ses->dir == DIR_ENC;
233 }
234
is_decode(dpaa_sec_session * ses)235 static inline int is_decode(dpaa_sec_session *ses)
236 {
237 return ses->dir == DIR_DEC;
238 }
239
240 #ifdef RTE_LIB_SECURITY
241 static int
dpaa_sec_prep_pdcp_cdb(dpaa_sec_session * ses)242 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
243 {
244 struct alginfo authdata = {0}, cipherdata = {0};
245 struct sec_cdb *cdb = &ses->cdb;
246 struct alginfo *p_authdata = NULL;
247 int32_t shared_desc_len = 0;
248 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
249 int swap = false;
250 #else
251 int swap = true;
252 #endif
253
254 cipherdata.key = (size_t)ses->cipher_key.data;
255 cipherdata.keylen = ses->cipher_key.length;
256 cipherdata.key_enc_flags = 0;
257 cipherdata.key_type = RTA_DATA_IMM;
258 cipherdata.algtype = ses->cipher_key.alg;
259 cipherdata.algmode = ses->cipher_key.algmode;
260
261 if (ses->auth_alg) {
262 authdata.key = (size_t)ses->auth_key.data;
263 authdata.keylen = ses->auth_key.length;
264 authdata.key_enc_flags = 0;
265 authdata.key_type = RTA_DATA_IMM;
266 authdata.algtype = ses->auth_key.alg;
267 authdata.algmode = ses->auth_key.algmode;
268
269 p_authdata = &authdata;
270 }
271
272 if (ses->pdcp.sdap_enabled) {
273 int nb_keys_to_inline =
274 rta_inline_pdcp_sdap_query(authdata.algtype,
275 cipherdata.algtype,
276 ses->pdcp.sn_size,
277 ses->pdcp.hfn_ovd);
278 if (nb_keys_to_inline >= 1) {
279 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
280 (size_t)cipherdata.key);
281 cipherdata.key_type = RTA_DATA_PTR;
282 }
283 if (nb_keys_to_inline >= 2) {
284 authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
285 (size_t)authdata.key);
286 authdata.key_type = RTA_DATA_PTR;
287 }
288 } else {
289 if (rta_inline_pdcp_query(authdata.algtype,
290 cipherdata.algtype,
291 ses->pdcp.sn_size,
292 ses->pdcp.hfn_ovd)) {
293 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
294 (size_t)cipherdata.key);
295 cipherdata.key_type = RTA_DATA_PTR;
296 }
297 }
298
299 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
300 if (ses->dir == DIR_ENC)
301 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
302 cdb->sh_desc, 1, swap,
303 ses->pdcp.hfn,
304 ses->pdcp.sn_size,
305 ses->pdcp.bearer,
306 ses->pdcp.pkt_dir,
307 ses->pdcp.hfn_threshold,
308 &cipherdata, &authdata);
309 else if (ses->dir == DIR_DEC)
310 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
311 cdb->sh_desc, 1, swap,
312 ses->pdcp.hfn,
313 ses->pdcp.sn_size,
314 ses->pdcp.bearer,
315 ses->pdcp.pkt_dir,
316 ses->pdcp.hfn_threshold,
317 &cipherdata, &authdata);
318 } else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
319 shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
320 1, swap, &authdata);
321 } else {
322 if (ses->dir == DIR_ENC) {
323 if (ses->pdcp.sdap_enabled)
324 shared_desc_len =
325 cnstr_shdsc_pdcp_sdap_u_plane_encap(
326 cdb->sh_desc, 1, swap,
327 ses->pdcp.sn_size,
328 ses->pdcp.hfn,
329 ses->pdcp.bearer,
330 ses->pdcp.pkt_dir,
331 ses->pdcp.hfn_threshold,
332 &cipherdata, p_authdata);
333 else
334 shared_desc_len =
335 cnstr_shdsc_pdcp_u_plane_encap(
336 cdb->sh_desc, 1, swap,
337 ses->pdcp.sn_size,
338 ses->pdcp.hfn,
339 ses->pdcp.bearer,
340 ses->pdcp.pkt_dir,
341 ses->pdcp.hfn_threshold,
342 &cipherdata, p_authdata);
343 } else if (ses->dir == DIR_DEC) {
344 if (ses->pdcp.sdap_enabled)
345 shared_desc_len =
346 cnstr_shdsc_pdcp_sdap_u_plane_decap(
347 cdb->sh_desc, 1, swap,
348 ses->pdcp.sn_size,
349 ses->pdcp.hfn,
350 ses->pdcp.bearer,
351 ses->pdcp.pkt_dir,
352 ses->pdcp.hfn_threshold,
353 &cipherdata, p_authdata);
354 else
355 shared_desc_len =
356 cnstr_shdsc_pdcp_u_plane_decap(
357 cdb->sh_desc, 1, swap,
358 ses->pdcp.sn_size,
359 ses->pdcp.hfn,
360 ses->pdcp.bearer,
361 ses->pdcp.pkt_dir,
362 ses->pdcp.hfn_threshold,
363 &cipherdata, p_authdata);
364 }
365 }
366 return shared_desc_len;
367 }
368
369 /* prepare ipsec proto command block of the session */
370 static int
dpaa_sec_prep_ipsec_cdb(dpaa_sec_session * ses)371 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
372 {
373 struct alginfo cipherdata = {0}, authdata = {0};
374 struct sec_cdb *cdb = &ses->cdb;
375 int32_t shared_desc_len = 0;
376 int err;
377 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
378 int swap = false;
379 #else
380 int swap = true;
381 #endif
382
383 cipherdata.key = (size_t)ses->cipher_key.data;
384 cipherdata.keylen = ses->cipher_key.length;
385 cipherdata.key_enc_flags = 0;
386 cipherdata.key_type = RTA_DATA_IMM;
387 cipherdata.algtype = ses->cipher_key.alg;
388 cipherdata.algmode = ses->cipher_key.algmode;
389
390 if (ses->auth_key.length) {
391 authdata.key = (size_t)ses->auth_key.data;
392 authdata.keylen = ses->auth_key.length;
393 authdata.key_enc_flags = 0;
394 authdata.key_type = RTA_DATA_IMM;
395 authdata.algtype = ses->auth_key.alg;
396 authdata.algmode = ses->auth_key.algmode;
397 }
398
399 cdb->sh_desc[0] = cipherdata.keylen;
400 cdb->sh_desc[1] = authdata.keylen;
401 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
402 DESC_JOB_IO_LEN,
403 (unsigned int *)cdb->sh_desc,
404 &cdb->sh_desc[2], 2);
405
406 if (err < 0) {
407 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
408 return err;
409 }
410 if (cdb->sh_desc[2] & 1)
411 cipherdata.key_type = RTA_DATA_IMM;
412 else {
413 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
414 (void *)(size_t)cipherdata.key);
415 cipherdata.key_type = RTA_DATA_PTR;
416 }
417 if (cdb->sh_desc[2] & (1<<1))
418 authdata.key_type = RTA_DATA_IMM;
419 else {
420 authdata.key = (size_t)rte_dpaa_mem_vtop(
421 (void *)(size_t)authdata.key);
422 authdata.key_type = RTA_DATA_PTR;
423 }
424
425 cdb->sh_desc[0] = 0;
426 cdb->sh_desc[1] = 0;
427 cdb->sh_desc[2] = 0;
428 if (ses->dir == DIR_ENC) {
429 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
430 cdb->sh_desc,
431 true, swap, SHR_SERIAL,
432 &ses->encap_pdb,
433 (uint8_t *)&ses->ip4_hdr,
434 &cipherdata, &authdata);
435 } else if (ses->dir == DIR_DEC) {
436 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
437 cdb->sh_desc,
438 true, swap, SHR_SERIAL,
439 &ses->decap_pdb,
440 &cipherdata, &authdata);
441 }
442 return shared_desc_len;
443 }
444 #endif
445 /* prepare command block of the session */
446 static int
dpaa_sec_prep_cdb(dpaa_sec_session * ses)447 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
448 {
449 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
450 int32_t shared_desc_len = 0;
451 struct sec_cdb *cdb = &ses->cdb;
452 int err;
453 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
454 int swap = false;
455 #else
456 int swap = true;
457 #endif
458
459 memset(cdb, 0, sizeof(struct sec_cdb));
460
461 switch (ses->ctxt) {
462 #ifdef RTE_LIB_SECURITY
463 case DPAA_SEC_IPSEC:
464 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
465 break;
466 case DPAA_SEC_PDCP:
467 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
468 break;
469 #endif
470 case DPAA_SEC_CIPHER:
471 alginfo_c.key = (size_t)ses->cipher_key.data;
472 alginfo_c.keylen = ses->cipher_key.length;
473 alginfo_c.key_enc_flags = 0;
474 alginfo_c.key_type = RTA_DATA_IMM;
475 alginfo_c.algtype = ses->cipher_key.alg;
476 alginfo_c.algmode = ses->cipher_key.algmode;
477
478 switch (ses->cipher_alg) {
479 case RTE_CRYPTO_CIPHER_AES_CBC:
480 case RTE_CRYPTO_CIPHER_3DES_CBC:
481 case RTE_CRYPTO_CIPHER_DES_CBC:
482 case RTE_CRYPTO_CIPHER_AES_CTR:
483 case RTE_CRYPTO_CIPHER_3DES_CTR:
484 shared_desc_len = cnstr_shdsc_blkcipher(
485 cdb->sh_desc, true,
486 swap, SHR_NEVER, &alginfo_c,
487 ses->iv.length,
488 ses->dir);
489 break;
490 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
491 shared_desc_len = cnstr_shdsc_snow_f8(
492 cdb->sh_desc, true, swap,
493 &alginfo_c,
494 ses->dir);
495 break;
496 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
497 shared_desc_len = cnstr_shdsc_zuce(
498 cdb->sh_desc, true, swap,
499 &alginfo_c,
500 ses->dir);
501 break;
502 default:
503 DPAA_SEC_ERR("unsupported cipher alg %d",
504 ses->cipher_alg);
505 return -ENOTSUP;
506 }
507 break;
508 case DPAA_SEC_AUTH:
509 alginfo_a.key = (size_t)ses->auth_key.data;
510 alginfo_a.keylen = ses->auth_key.length;
511 alginfo_a.key_enc_flags = 0;
512 alginfo_a.key_type = RTA_DATA_IMM;
513 alginfo_a.algtype = ses->auth_key.alg;
514 alginfo_a.algmode = ses->auth_key.algmode;
515 switch (ses->auth_alg) {
516 case RTE_CRYPTO_AUTH_MD5:
517 case RTE_CRYPTO_AUTH_SHA1:
518 case RTE_CRYPTO_AUTH_SHA224:
519 case RTE_CRYPTO_AUTH_SHA256:
520 case RTE_CRYPTO_AUTH_SHA384:
521 case RTE_CRYPTO_AUTH_SHA512:
522 shared_desc_len = cnstr_shdsc_hash(
523 cdb->sh_desc, true,
524 swap, SHR_NEVER, &alginfo_a,
525 !ses->dir,
526 ses->digest_length);
527 break;
528 case RTE_CRYPTO_AUTH_MD5_HMAC:
529 case RTE_CRYPTO_AUTH_SHA1_HMAC:
530 case RTE_CRYPTO_AUTH_SHA224_HMAC:
531 case RTE_CRYPTO_AUTH_SHA256_HMAC:
532 case RTE_CRYPTO_AUTH_SHA384_HMAC:
533 case RTE_CRYPTO_AUTH_SHA512_HMAC:
534 shared_desc_len = cnstr_shdsc_hmac(
535 cdb->sh_desc, true,
536 swap, SHR_NEVER, &alginfo_a,
537 !ses->dir,
538 ses->digest_length);
539 break;
540 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
541 shared_desc_len = cnstr_shdsc_snow_f9(
542 cdb->sh_desc, true, swap,
543 &alginfo_a,
544 !ses->dir,
545 ses->digest_length);
546 break;
547 case RTE_CRYPTO_AUTH_ZUC_EIA3:
548 shared_desc_len = cnstr_shdsc_zuca(
549 cdb->sh_desc, true, swap,
550 &alginfo_a,
551 !ses->dir,
552 ses->digest_length);
553 break;
554 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
555 case RTE_CRYPTO_AUTH_AES_CMAC:
556 shared_desc_len = cnstr_shdsc_aes_mac(
557 cdb->sh_desc,
558 true, swap, SHR_NEVER,
559 &alginfo_a,
560 !ses->dir,
561 ses->digest_length);
562 break;
563 default:
564 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
565 }
566 break;
567 case DPAA_SEC_AEAD:
568 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
569 DPAA_SEC_ERR("not supported aead alg");
570 return -ENOTSUP;
571 }
572 alginfo.key = (size_t)ses->aead_key.data;
573 alginfo.keylen = ses->aead_key.length;
574 alginfo.key_enc_flags = 0;
575 alginfo.key_type = RTA_DATA_IMM;
576 alginfo.algtype = ses->aead_key.alg;
577 alginfo.algmode = ses->aead_key.algmode;
578
579 if (ses->dir == DIR_ENC)
580 shared_desc_len = cnstr_shdsc_gcm_encap(
581 cdb->sh_desc, true, swap, SHR_NEVER,
582 &alginfo,
583 ses->iv.length,
584 ses->digest_length);
585 else
586 shared_desc_len = cnstr_shdsc_gcm_decap(
587 cdb->sh_desc, true, swap, SHR_NEVER,
588 &alginfo,
589 ses->iv.length,
590 ses->digest_length);
591 break;
592 case DPAA_SEC_CIPHER_HASH:
593 alginfo_c.key = (size_t)ses->cipher_key.data;
594 alginfo_c.keylen = ses->cipher_key.length;
595 alginfo_c.key_enc_flags = 0;
596 alginfo_c.key_type = RTA_DATA_IMM;
597 alginfo_c.algtype = ses->cipher_key.alg;
598 alginfo_c.algmode = ses->cipher_key.algmode;
599
600 alginfo_a.key = (size_t)ses->auth_key.data;
601 alginfo_a.keylen = ses->auth_key.length;
602 alginfo_a.key_enc_flags = 0;
603 alginfo_a.key_type = RTA_DATA_IMM;
604 alginfo_a.algtype = ses->auth_key.alg;
605 alginfo_a.algmode = ses->auth_key.algmode;
606
607 cdb->sh_desc[0] = alginfo_c.keylen;
608 cdb->sh_desc[1] = alginfo_a.keylen;
609 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
610 DESC_JOB_IO_LEN,
611 (unsigned int *)cdb->sh_desc,
612 &cdb->sh_desc[2], 2);
613
614 if (err < 0) {
615 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
616 return err;
617 }
618 if (cdb->sh_desc[2] & 1)
619 alginfo_c.key_type = RTA_DATA_IMM;
620 else {
621 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
622 (void *)(size_t)alginfo_c.key);
623 alginfo_c.key_type = RTA_DATA_PTR;
624 }
625 if (cdb->sh_desc[2] & (1<<1))
626 alginfo_a.key_type = RTA_DATA_IMM;
627 else {
628 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
629 (void *)(size_t)alginfo_a.key);
630 alginfo_a.key_type = RTA_DATA_PTR;
631 }
632 cdb->sh_desc[0] = 0;
633 cdb->sh_desc[1] = 0;
634 cdb->sh_desc[2] = 0;
635 /* Auth_only_len is set as 0 here and it will be
636 * overwritten in fd for each packet.
637 */
638 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
639 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
640 ses->iv.length,
641 ses->digest_length, ses->dir);
642 break;
643 case DPAA_SEC_HASH_CIPHER:
644 default:
645 DPAA_SEC_ERR("error: Unsupported session");
646 return -ENOTSUP;
647 }
648
649 if (shared_desc_len < 0) {
650 DPAA_SEC_ERR("error in preparing command block");
651 return shared_desc_len;
652 }
653
654 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
655 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
656 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
657
658 return 0;
659 }
660
661 static void
dpaa_sec_dump(struct dpaa_sec_op_ctx * ctx,struct dpaa_sec_qp * qp)662 dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp)
663 {
664 struct dpaa_sec_job *job = &ctx->job;
665 struct rte_crypto_op *op = ctx->op;
666 dpaa_sec_session *sess = NULL;
667 struct sec_cdb c_cdb, *cdb;
668 uint8_t bufsize;
669 struct rte_crypto_sym_op *sym_op;
670 struct qm_sg_entry sg[2];
671
672 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
673 sess = (dpaa_sec_session *)
674 get_sym_session_private_data(
675 op->sym->session,
676 dpaa_cryptodev_driver_id);
677 #ifdef RTE_LIBRTE_SECURITY
678 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
679 sess = (dpaa_sec_session *)
680 get_sec_session_private_data(
681 op->sym->sec_session);
682 #endif
683 if (sess == NULL) {
684 printf("session is NULL\n");
685 goto mbuf_dump;
686 }
687
688 cdb = &sess->cdb;
689 rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb));
690 #ifdef RTE_LIBRTE_SECURITY
691 printf("\nsession protocol type = %d\n", sess->proto_alg);
692 #endif
693 printf("\n****************************************\n"
694 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
695 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
696 "\tCipher key len:\t%"PRIu64"\n\tCipher alg:\t%d\n"
697 "\tCipher algmode:\t%d\n", sess->ctxt,
698 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
699 sess->cipher_alg, sess->auth_alg, sess->aead_alg,
700 (uint64_t)sess->cipher_key.length, sess->cipher_key.alg,
701 sess->cipher_key.algmode);
702 rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
703 sess->cipher_key.length);
704 rte_hexdump(stdout, "auth key", sess->auth_key.data,
705 sess->auth_key.length);
706 printf("\tAuth key len:\t%"PRIu64"\n\tAuth alg:\t%d\n"
707 "\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
708 "\tdigest length:\t%d\n\tauth only len:\t\t%d\n"
709 "\taead cipher text:\t%d\n",
710 (uint64_t)sess->auth_key.length, sess->auth_key.alg,
711 sess->auth_key.algmode,
712 sess->iv.length, sess->iv.offset,
713 sess->digest_length, sess->auth_only_len,
714 sess->auth_cipher_text);
715 #ifdef RTE_LIBRTE_SECURITY
716 printf("PDCP session params:\n"
717 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
718 "\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:"
719 "\t%d\n\thfn:\t\t%d\n"
720 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
721 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
722 sess->pdcp.sn_size, sess->pdcp.sdap_enabled,
723 sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
724 sess->pdcp.hfn_threshold);
725 #endif
726 c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word);
727 c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word);
728 bufsize = c_cdb.sh_hdr.hi.field.idlen;
729
730 printf("cdb = %p\n\n", cdb);
731 printf("Descriptor size = %d\n", bufsize);
732 int m;
733 for (m = 0; m < bufsize; m++)
734 printf("0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m]));
735
736 printf("\n");
737 mbuf_dump:
738 sym_op = op->sym;
739 if (sym_op->m_src) {
740 printf("Source mbuf:\n");
741 rte_pktmbuf_dump(stdout, sym_op->m_src,
742 sym_op->m_src->data_len);
743 }
744 if (sym_op->m_dst) {
745 printf("Destination mbuf:\n");
746 rte_pktmbuf_dump(stdout, sym_op->m_dst,
747 sym_op->m_dst->data_len);
748 }
749
750 printf("Session address = %p\ncipher offset: %d, length: %d\n"
751 "auth offset: %d, length: %d\n aead offset: %d, length: %d\n",
752 sym_op->session, sym_op->cipher.data.offset,
753 sym_op->cipher.data.length,
754 sym_op->auth.data.offset, sym_op->auth.data.length,
755 sym_op->aead.data.offset, sym_op->aead.data.length);
756 printf("\n");
757
758 printf("******************************************************\n");
759 printf("ctx info:\n");
760 printf("job->sg[0] output info:\n");
761 memcpy(&sg[0], &job->sg[0], sizeof(sg[0]));
762 printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
763 "\n\tbpid = %d\n\toffset = %d\n",
764 (uint64_t)sg[0].addr, sg[0].length, sg[0].final,
765 sg[0].extension, sg[0].bpid, sg[0].offset);
766 printf("\njob->sg[1] input info:\n");
767 memcpy(&sg[1], &job->sg[1], sizeof(sg[1]));
768 hw_sg_to_cpu(&sg[1]);
769 printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
770 "\n\tbpid = %d\n\toffset = %d\n",
771 (uint64_t)sg[1].addr, sg[1].length, sg[1].final,
772 sg[1].extension, sg[1].bpid, sg[1].offset);
773
774 printf("\nctx pool addr = %p\n", ctx->ctx_pool);
775 if (ctx->ctx_pool)
776 printf("ctx pool available counts = %d\n",
777 rte_mempool_avail_count(ctx->ctx_pool));
778
779 printf("\nop pool addr = %p\n", op->mempool);
780 if (op->mempool)
781 printf("op pool available counts = %d\n",
782 rte_mempool_avail_count(op->mempool));
783
784 printf("********************************************************\n");
785 printf("Queue data:\n");
786 printf("\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n"
787 "\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts"
788 "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n",
789 qp->outq.fqid, qp->outq.state, qp->outq.nb_desc,
790 qp->ctx_pool, qp->rx_pkts, qp->tx_pkts,
791 qp->rx_errs, qp->tx_errs);
792 }
793
794 /* qp is lockless, should be accessed by only one thread */
795 static int
dpaa_sec_deq(struct dpaa_sec_qp * qp,struct rte_crypto_op ** ops,int nb_ops)796 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
797 {
798 struct qman_fq *fq;
799 unsigned int pkts = 0;
800 int num_rx_bufs, ret;
801 struct qm_dqrr_entry *dq;
802 uint32_t vdqcr_flags = 0;
803
804 fq = &qp->outq;
805 /*
806 * Until request for four buffers, we provide exact number of buffers.
807 * Otherwise we do not set the QM_VDQCR_EXACT flag.
808 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
809 * requested, so we request two less in this case.
810 */
811 if (nb_ops < 4) {
812 vdqcr_flags = QM_VDQCR_EXACT;
813 num_rx_bufs = nb_ops;
814 } else {
815 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
816 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
817 }
818 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
819 if (ret)
820 return 0;
821
822 do {
823 const struct qm_fd *fd;
824 struct dpaa_sec_job *job;
825 struct dpaa_sec_op_ctx *ctx;
826 struct rte_crypto_op *op;
827
828 dq = qman_dequeue(fq);
829 if (!dq)
830 continue;
831
832 fd = &dq->fd;
833 /* sg is embedded in an op ctx,
834 * sg[0] is for output
835 * sg[1] for input
836 */
837 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
838
839 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
840 ctx->fd_status = fd->status;
841 op = ctx->op;
842 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
843 struct qm_sg_entry *sg_out;
844 uint32_t len;
845 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
846 op->sym->m_src : op->sym->m_dst;
847
848 sg_out = &job->sg[0];
849 hw_sg_to_cpu(sg_out);
850 len = sg_out->length;
851 mbuf->pkt_len = len;
852 while (mbuf->next != NULL) {
853 len -= mbuf->data_len;
854 mbuf = mbuf->next;
855 }
856 mbuf->data_len = len;
857 }
858 if (!ctx->fd_status) {
859 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
860 } else {
861 if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) {
862 DPAA_SEC_DP_WARN("SEC return err:0x%x\n",
863 ctx->fd_status);
864 if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP)
865 dpaa_sec_dump(ctx, qp);
866 }
867 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
868 }
869 ops[pkts++] = op;
870
871 /* report op status to sym->op and then free the ctx memory */
872 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
873
874 qman_dqrr_consume(fq, dq);
875 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
876
877 return pkts;
878 }
879
880 static inline struct dpaa_sec_job *
build_auth_only_sg(struct rte_crypto_op * op,dpaa_sec_session * ses)881 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
882 {
883 struct rte_crypto_sym_op *sym = op->sym;
884 struct rte_mbuf *mbuf = sym->m_src;
885 struct dpaa_sec_job *cf;
886 struct dpaa_sec_op_ctx *ctx;
887 struct qm_sg_entry *sg, *out_sg, *in_sg;
888 phys_addr_t start_addr;
889 uint8_t *old_digest, extra_segs;
890 int data_len, data_offset;
891
892 data_len = sym->auth.data.length;
893 data_offset = sym->auth.data.offset;
894
895 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
896 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
897 if ((data_len & 7) || (data_offset & 7)) {
898 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
899 return NULL;
900 }
901
902 data_len = data_len >> 3;
903 data_offset = data_offset >> 3;
904 }
905
906 if (is_decode(ses))
907 extra_segs = 3;
908 else
909 extra_segs = 2;
910
911 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
912 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
913 MAX_SG_ENTRIES);
914 return NULL;
915 }
916 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
917 if (!ctx)
918 return NULL;
919
920 cf = &ctx->job;
921 ctx->op = op;
922 old_digest = ctx->digest;
923
924 /* output */
925 out_sg = &cf->sg[0];
926 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
927 out_sg->length = ses->digest_length;
928 cpu_to_hw_sg(out_sg);
929
930 /* input */
931 in_sg = &cf->sg[1];
932 /* need to extend the input to a compound frame */
933 in_sg->extension = 1;
934 in_sg->final = 1;
935 in_sg->length = data_len;
936 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
937
938 /* 1st seg */
939 sg = in_sg + 1;
940
941 if (ses->iv.length) {
942 uint8_t *iv_ptr;
943
944 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
945 ses->iv.offset);
946
947 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
948 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
949 sg->length = 12;
950 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
951 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
952 sg->length = 8;
953 } else {
954 sg->length = ses->iv.length;
955 }
956 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
957 in_sg->length += sg->length;
958 cpu_to_hw_sg(sg);
959 sg++;
960 }
961
962 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
963 sg->offset = data_offset;
964
965 if (data_len <= (mbuf->data_len - data_offset)) {
966 sg->length = data_len;
967 } else {
968 sg->length = mbuf->data_len - data_offset;
969
970 /* remaining i/p segs */
971 while ((data_len = data_len - sg->length) &&
972 (mbuf = mbuf->next)) {
973 cpu_to_hw_sg(sg);
974 sg++;
975 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
976 if (data_len > mbuf->data_len)
977 sg->length = mbuf->data_len;
978 else
979 sg->length = data_len;
980 }
981 }
982
983 if (is_decode(ses)) {
984 /* Digest verification case */
985 cpu_to_hw_sg(sg);
986 sg++;
987 rte_memcpy(old_digest, sym->auth.digest.data,
988 ses->digest_length);
989 start_addr = rte_dpaa_mem_vtop(old_digest);
990 qm_sg_entry_set64(sg, start_addr);
991 sg->length = ses->digest_length;
992 in_sg->length += ses->digest_length;
993 }
994 sg->final = 1;
995 cpu_to_hw_sg(sg);
996 cpu_to_hw_sg(in_sg);
997
998 return cf;
999 }
1000
1001 /**
1002 * packet looks like:
1003 * |<----data_len------->|
1004 * |ip_header|ah_header|icv|payload|
1005 * ^
1006 * |
1007 * mbuf->pkt.data
1008 */
1009 static inline struct dpaa_sec_job *
build_auth_only(struct rte_crypto_op * op,dpaa_sec_session * ses)1010 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1011 {
1012 struct rte_crypto_sym_op *sym = op->sym;
1013 struct rte_mbuf *mbuf = sym->m_src;
1014 struct dpaa_sec_job *cf;
1015 struct dpaa_sec_op_ctx *ctx;
1016 struct qm_sg_entry *sg, *in_sg;
1017 rte_iova_t start_addr;
1018 uint8_t *old_digest;
1019 int data_len, data_offset;
1020
1021 data_len = sym->auth.data.length;
1022 data_offset = sym->auth.data.offset;
1023
1024 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1025 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1026 if ((data_len & 7) || (data_offset & 7)) {
1027 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
1028 return NULL;
1029 }
1030
1031 data_len = data_len >> 3;
1032 data_offset = data_offset >> 3;
1033 }
1034
1035 ctx = dpaa_sec_alloc_ctx(ses, 4);
1036 if (!ctx)
1037 return NULL;
1038
1039 cf = &ctx->job;
1040 ctx->op = op;
1041 old_digest = ctx->digest;
1042
1043 start_addr = rte_pktmbuf_iova(mbuf);
1044 /* output */
1045 sg = &cf->sg[0];
1046 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1047 sg->length = ses->digest_length;
1048 cpu_to_hw_sg(sg);
1049
1050 /* input */
1051 in_sg = &cf->sg[1];
1052 /* need to extend the input to a compound frame */
1053 in_sg->extension = 1;
1054 in_sg->final = 1;
1055 in_sg->length = data_len;
1056 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1057 sg = &cf->sg[2];
1058
1059 if (ses->iv.length) {
1060 uint8_t *iv_ptr;
1061
1062 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1063 ses->iv.offset);
1064
1065 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1066 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1067 sg->length = 12;
1068 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1069 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1070 sg->length = 8;
1071 } else {
1072 sg->length = ses->iv.length;
1073 }
1074 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
1075 in_sg->length += sg->length;
1076 cpu_to_hw_sg(sg);
1077 sg++;
1078 }
1079
1080 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1081 sg->offset = data_offset;
1082 sg->length = data_len;
1083
1084 if (is_decode(ses)) {
1085 /* Digest verification case */
1086 cpu_to_hw_sg(sg);
1087 /* hash result or digest, save digest first */
1088 rte_memcpy(old_digest, sym->auth.digest.data,
1089 ses->digest_length);
1090 /* let's check digest by hw */
1091 start_addr = rte_dpaa_mem_vtop(old_digest);
1092 sg++;
1093 qm_sg_entry_set64(sg, start_addr);
1094 sg->length = ses->digest_length;
1095 in_sg->length += ses->digest_length;
1096 }
1097 sg->final = 1;
1098 cpu_to_hw_sg(sg);
1099 cpu_to_hw_sg(in_sg);
1100
1101 return cf;
1102 }
1103
1104 static inline struct dpaa_sec_job *
build_cipher_only_sg(struct rte_crypto_op * op,dpaa_sec_session * ses)1105 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1106 {
1107 struct rte_crypto_sym_op *sym = op->sym;
1108 struct dpaa_sec_job *cf;
1109 struct dpaa_sec_op_ctx *ctx;
1110 struct qm_sg_entry *sg, *out_sg, *in_sg;
1111 struct rte_mbuf *mbuf;
1112 uint8_t req_segs;
1113 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1114 ses->iv.offset);
1115 int data_len, data_offset;
1116
1117 data_len = sym->cipher.data.length;
1118 data_offset = sym->cipher.data.offset;
1119
1120 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1121 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1122 if ((data_len & 7) || (data_offset & 7)) {
1123 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1124 return NULL;
1125 }
1126
1127 data_len = data_len >> 3;
1128 data_offset = data_offset >> 3;
1129 }
1130
1131 if (sym->m_dst) {
1132 mbuf = sym->m_dst;
1133 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1134 } else {
1135 mbuf = sym->m_src;
1136 req_segs = mbuf->nb_segs * 2 + 3;
1137 }
1138 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1139 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1140 MAX_SG_ENTRIES);
1141 return NULL;
1142 }
1143
1144 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1145 if (!ctx)
1146 return NULL;
1147
1148 cf = &ctx->job;
1149 ctx->op = op;
1150
1151 /* output */
1152 out_sg = &cf->sg[0];
1153 out_sg->extension = 1;
1154 out_sg->length = data_len;
1155 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1156 cpu_to_hw_sg(out_sg);
1157
1158 /* 1st seg */
1159 sg = &cf->sg[2];
1160 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1161 sg->length = mbuf->data_len - data_offset;
1162 sg->offset = data_offset;
1163
1164 /* Successive segs */
1165 mbuf = mbuf->next;
1166 while (mbuf) {
1167 cpu_to_hw_sg(sg);
1168 sg++;
1169 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1170 sg->length = mbuf->data_len;
1171 mbuf = mbuf->next;
1172 }
1173 sg->final = 1;
1174 cpu_to_hw_sg(sg);
1175
1176 /* input */
1177 mbuf = sym->m_src;
1178 in_sg = &cf->sg[1];
1179 in_sg->extension = 1;
1180 in_sg->final = 1;
1181 in_sg->length = data_len + ses->iv.length;
1182
1183 sg++;
1184 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1185 cpu_to_hw_sg(in_sg);
1186
1187 /* IV */
1188 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1189 sg->length = ses->iv.length;
1190 cpu_to_hw_sg(sg);
1191
1192 /* 1st seg */
1193 sg++;
1194 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1195 sg->length = mbuf->data_len - data_offset;
1196 sg->offset = data_offset;
1197
1198 /* Successive segs */
1199 mbuf = mbuf->next;
1200 while (mbuf) {
1201 cpu_to_hw_sg(sg);
1202 sg++;
1203 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1204 sg->length = mbuf->data_len;
1205 mbuf = mbuf->next;
1206 }
1207 sg->final = 1;
1208 cpu_to_hw_sg(sg);
1209
1210 return cf;
1211 }
1212
1213 static inline struct dpaa_sec_job *
build_cipher_only(struct rte_crypto_op * op,dpaa_sec_session * ses)1214 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1215 {
1216 struct rte_crypto_sym_op *sym = op->sym;
1217 struct dpaa_sec_job *cf;
1218 struct dpaa_sec_op_ctx *ctx;
1219 struct qm_sg_entry *sg;
1220 rte_iova_t src_start_addr, dst_start_addr;
1221 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1222 ses->iv.offset);
1223 int data_len, data_offset;
1224
1225 data_len = sym->cipher.data.length;
1226 data_offset = sym->cipher.data.offset;
1227
1228 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1229 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1230 if ((data_len & 7) || (data_offset & 7)) {
1231 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1232 return NULL;
1233 }
1234
1235 data_len = data_len >> 3;
1236 data_offset = data_offset >> 3;
1237 }
1238
1239 ctx = dpaa_sec_alloc_ctx(ses, 4);
1240 if (!ctx)
1241 return NULL;
1242
1243 cf = &ctx->job;
1244 ctx->op = op;
1245
1246 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1247
1248 if (sym->m_dst)
1249 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1250 else
1251 dst_start_addr = src_start_addr;
1252
1253 /* output */
1254 sg = &cf->sg[0];
1255 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1256 sg->length = data_len + ses->iv.length;
1257 cpu_to_hw_sg(sg);
1258
1259 /* input */
1260 sg = &cf->sg[1];
1261
1262 /* need to extend the input to a compound frame */
1263 sg->extension = 1;
1264 sg->final = 1;
1265 sg->length = data_len + ses->iv.length;
1266 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1267 cpu_to_hw_sg(sg);
1268
1269 sg = &cf->sg[2];
1270 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1271 sg->length = ses->iv.length;
1272 cpu_to_hw_sg(sg);
1273
1274 sg++;
1275 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1276 sg->length = data_len;
1277 sg->final = 1;
1278 cpu_to_hw_sg(sg);
1279
1280 return cf;
1281 }
1282
1283 static inline struct dpaa_sec_job *
build_cipher_auth_gcm_sg(struct rte_crypto_op * op,dpaa_sec_session * ses)1284 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1285 {
1286 struct rte_crypto_sym_op *sym = op->sym;
1287 struct dpaa_sec_job *cf;
1288 struct dpaa_sec_op_ctx *ctx;
1289 struct qm_sg_entry *sg, *out_sg, *in_sg;
1290 struct rte_mbuf *mbuf;
1291 uint8_t req_segs;
1292 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1293 ses->iv.offset);
1294
1295 if (sym->m_dst) {
1296 mbuf = sym->m_dst;
1297 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1298 } else {
1299 mbuf = sym->m_src;
1300 req_segs = mbuf->nb_segs * 2 + 4;
1301 }
1302
1303 if (ses->auth_only_len)
1304 req_segs++;
1305
1306 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1307 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1308 MAX_SG_ENTRIES);
1309 return NULL;
1310 }
1311
1312 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1313 if (!ctx)
1314 return NULL;
1315
1316 cf = &ctx->job;
1317 ctx->op = op;
1318
1319 rte_prefetch0(cf->sg);
1320
1321 /* output */
1322 out_sg = &cf->sg[0];
1323 out_sg->extension = 1;
1324 if (is_encode(ses))
1325 out_sg->length = sym->aead.data.length + ses->digest_length;
1326 else
1327 out_sg->length = sym->aead.data.length;
1328
1329 /* output sg entries */
1330 sg = &cf->sg[2];
1331 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1332 cpu_to_hw_sg(out_sg);
1333
1334 /* 1st seg */
1335 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1336 sg->length = mbuf->data_len - sym->aead.data.offset;
1337 sg->offset = sym->aead.data.offset;
1338
1339 /* Successive segs */
1340 mbuf = mbuf->next;
1341 while (mbuf) {
1342 cpu_to_hw_sg(sg);
1343 sg++;
1344 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1345 sg->length = mbuf->data_len;
1346 mbuf = mbuf->next;
1347 }
1348 sg->length -= ses->digest_length;
1349
1350 if (is_encode(ses)) {
1351 cpu_to_hw_sg(sg);
1352 /* set auth output */
1353 sg++;
1354 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1355 sg->length = ses->digest_length;
1356 }
1357 sg->final = 1;
1358 cpu_to_hw_sg(sg);
1359
1360 /* input */
1361 mbuf = sym->m_src;
1362 in_sg = &cf->sg[1];
1363 in_sg->extension = 1;
1364 in_sg->final = 1;
1365 if (is_encode(ses))
1366 in_sg->length = ses->iv.length + sym->aead.data.length
1367 + ses->auth_only_len;
1368 else
1369 in_sg->length = ses->iv.length + sym->aead.data.length
1370 + ses->auth_only_len + ses->digest_length;
1371
1372 /* input sg entries */
1373 sg++;
1374 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1375 cpu_to_hw_sg(in_sg);
1376
1377 /* 1st seg IV */
1378 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1379 sg->length = ses->iv.length;
1380 cpu_to_hw_sg(sg);
1381
1382 /* 2nd seg auth only */
1383 if (ses->auth_only_len) {
1384 sg++;
1385 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1386 sg->length = ses->auth_only_len;
1387 cpu_to_hw_sg(sg);
1388 }
1389
1390 /* 3rd seg */
1391 sg++;
1392 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1393 sg->length = mbuf->data_len - sym->aead.data.offset;
1394 sg->offset = sym->aead.data.offset;
1395
1396 /* Successive segs */
1397 mbuf = mbuf->next;
1398 while (mbuf) {
1399 cpu_to_hw_sg(sg);
1400 sg++;
1401 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1402 sg->length = mbuf->data_len;
1403 mbuf = mbuf->next;
1404 }
1405
1406 if (is_decode(ses)) {
1407 cpu_to_hw_sg(sg);
1408 sg++;
1409 memcpy(ctx->digest, sym->aead.digest.data,
1410 ses->digest_length);
1411 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1412 sg->length = ses->digest_length;
1413 }
1414 sg->final = 1;
1415 cpu_to_hw_sg(sg);
1416
1417 return cf;
1418 }
1419
1420 static inline struct dpaa_sec_job *
build_cipher_auth_gcm(struct rte_crypto_op * op,dpaa_sec_session * ses)1421 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1422 {
1423 struct rte_crypto_sym_op *sym = op->sym;
1424 struct dpaa_sec_job *cf;
1425 struct dpaa_sec_op_ctx *ctx;
1426 struct qm_sg_entry *sg;
1427 uint32_t length = 0;
1428 rte_iova_t src_start_addr, dst_start_addr;
1429 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1430 ses->iv.offset);
1431
1432 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1433
1434 if (sym->m_dst)
1435 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1436 else
1437 dst_start_addr = src_start_addr;
1438
1439 ctx = dpaa_sec_alloc_ctx(ses, 7);
1440 if (!ctx)
1441 return NULL;
1442
1443 cf = &ctx->job;
1444 ctx->op = op;
1445
1446 /* input */
1447 rte_prefetch0(cf->sg);
1448 sg = &cf->sg[2];
1449 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1450 if (is_encode(ses)) {
1451 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1452 sg->length = ses->iv.length;
1453 length += sg->length;
1454 cpu_to_hw_sg(sg);
1455
1456 sg++;
1457 if (ses->auth_only_len) {
1458 qm_sg_entry_set64(sg,
1459 rte_dpaa_mem_vtop(sym->aead.aad.data));
1460 sg->length = ses->auth_only_len;
1461 length += sg->length;
1462 cpu_to_hw_sg(sg);
1463 sg++;
1464 }
1465 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1466 sg->length = sym->aead.data.length;
1467 length += sg->length;
1468 sg->final = 1;
1469 cpu_to_hw_sg(sg);
1470 } else {
1471 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1472 sg->length = ses->iv.length;
1473 length += sg->length;
1474 cpu_to_hw_sg(sg);
1475
1476 sg++;
1477 if (ses->auth_only_len) {
1478 qm_sg_entry_set64(sg,
1479 rte_dpaa_mem_vtop(sym->aead.aad.data));
1480 sg->length = ses->auth_only_len;
1481 length += sg->length;
1482 cpu_to_hw_sg(sg);
1483 sg++;
1484 }
1485 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1486 sg->length = sym->aead.data.length;
1487 length += sg->length;
1488 cpu_to_hw_sg(sg);
1489
1490 memcpy(ctx->digest, sym->aead.digest.data,
1491 ses->digest_length);
1492 sg++;
1493
1494 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1495 sg->length = ses->digest_length;
1496 length += sg->length;
1497 sg->final = 1;
1498 cpu_to_hw_sg(sg);
1499 }
1500 /* input compound frame */
1501 cf->sg[1].length = length;
1502 cf->sg[1].extension = 1;
1503 cf->sg[1].final = 1;
1504 cpu_to_hw_sg(&cf->sg[1]);
1505
1506 /* output */
1507 sg++;
1508 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1509 qm_sg_entry_set64(sg,
1510 dst_start_addr + sym->aead.data.offset);
1511 sg->length = sym->aead.data.length;
1512 length = sg->length;
1513 if (is_encode(ses)) {
1514 cpu_to_hw_sg(sg);
1515 /* set auth output */
1516 sg++;
1517 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1518 sg->length = ses->digest_length;
1519 length += sg->length;
1520 }
1521 sg->final = 1;
1522 cpu_to_hw_sg(sg);
1523
1524 /* output compound frame */
1525 cf->sg[0].length = length;
1526 cf->sg[0].extension = 1;
1527 cpu_to_hw_sg(&cf->sg[0]);
1528
1529 return cf;
1530 }
1531
1532 static inline struct dpaa_sec_job *
build_cipher_auth_sg(struct rte_crypto_op * op,dpaa_sec_session * ses)1533 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1534 {
1535 struct rte_crypto_sym_op *sym = op->sym;
1536 struct dpaa_sec_job *cf;
1537 struct dpaa_sec_op_ctx *ctx;
1538 struct qm_sg_entry *sg, *out_sg, *in_sg;
1539 struct rte_mbuf *mbuf;
1540 uint8_t req_segs;
1541 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1542 ses->iv.offset);
1543
1544 if (sym->m_dst) {
1545 mbuf = sym->m_dst;
1546 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1547 } else {
1548 mbuf = sym->m_src;
1549 req_segs = mbuf->nb_segs * 2 + 4;
1550 }
1551
1552 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1553 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1554 MAX_SG_ENTRIES);
1555 return NULL;
1556 }
1557
1558 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1559 if (!ctx)
1560 return NULL;
1561
1562 cf = &ctx->job;
1563 ctx->op = op;
1564
1565 rte_prefetch0(cf->sg);
1566
1567 /* output */
1568 out_sg = &cf->sg[0];
1569 out_sg->extension = 1;
1570 if (is_encode(ses))
1571 out_sg->length = sym->auth.data.length + ses->digest_length;
1572 else
1573 out_sg->length = sym->auth.data.length;
1574
1575 /* output sg entries */
1576 sg = &cf->sg[2];
1577 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1578 cpu_to_hw_sg(out_sg);
1579
1580 /* 1st seg */
1581 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1582 sg->length = mbuf->data_len - sym->auth.data.offset;
1583 sg->offset = sym->auth.data.offset;
1584
1585 /* Successive segs */
1586 mbuf = mbuf->next;
1587 while (mbuf) {
1588 cpu_to_hw_sg(sg);
1589 sg++;
1590 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1591 sg->length = mbuf->data_len;
1592 mbuf = mbuf->next;
1593 }
1594 sg->length -= ses->digest_length;
1595
1596 if (is_encode(ses)) {
1597 cpu_to_hw_sg(sg);
1598 /* set auth output */
1599 sg++;
1600 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1601 sg->length = ses->digest_length;
1602 }
1603 sg->final = 1;
1604 cpu_to_hw_sg(sg);
1605
1606 /* input */
1607 mbuf = sym->m_src;
1608 in_sg = &cf->sg[1];
1609 in_sg->extension = 1;
1610 in_sg->final = 1;
1611 if (is_encode(ses))
1612 in_sg->length = ses->iv.length + sym->auth.data.length;
1613 else
1614 in_sg->length = ses->iv.length + sym->auth.data.length
1615 + ses->digest_length;
1616
1617 /* input sg entries */
1618 sg++;
1619 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1620 cpu_to_hw_sg(in_sg);
1621
1622 /* 1st seg IV */
1623 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1624 sg->length = ses->iv.length;
1625 cpu_to_hw_sg(sg);
1626
1627 /* 2nd seg */
1628 sg++;
1629 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1630 sg->length = mbuf->data_len - sym->auth.data.offset;
1631 sg->offset = sym->auth.data.offset;
1632
1633 /* Successive segs */
1634 mbuf = mbuf->next;
1635 while (mbuf) {
1636 cpu_to_hw_sg(sg);
1637 sg++;
1638 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1639 sg->length = mbuf->data_len;
1640 mbuf = mbuf->next;
1641 }
1642
1643 sg->length -= ses->digest_length;
1644 if (is_decode(ses)) {
1645 cpu_to_hw_sg(sg);
1646 sg++;
1647 memcpy(ctx->digest, sym->auth.digest.data,
1648 ses->digest_length);
1649 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1650 sg->length = ses->digest_length;
1651 }
1652 sg->final = 1;
1653 cpu_to_hw_sg(sg);
1654
1655 return cf;
1656 }
1657
1658 static inline struct dpaa_sec_job *
build_cipher_auth(struct rte_crypto_op * op,dpaa_sec_session * ses)1659 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1660 {
1661 struct rte_crypto_sym_op *sym = op->sym;
1662 struct dpaa_sec_job *cf;
1663 struct dpaa_sec_op_ctx *ctx;
1664 struct qm_sg_entry *sg;
1665 rte_iova_t src_start_addr, dst_start_addr;
1666 uint32_t length = 0;
1667 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1668 ses->iv.offset);
1669
1670 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1671 if (sym->m_dst)
1672 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1673 else
1674 dst_start_addr = src_start_addr;
1675
1676 ctx = dpaa_sec_alloc_ctx(ses, 7);
1677 if (!ctx)
1678 return NULL;
1679
1680 cf = &ctx->job;
1681 ctx->op = op;
1682
1683 /* input */
1684 rte_prefetch0(cf->sg);
1685 sg = &cf->sg[2];
1686 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1687 if (is_encode(ses)) {
1688 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1689 sg->length = ses->iv.length;
1690 length += sg->length;
1691 cpu_to_hw_sg(sg);
1692
1693 sg++;
1694 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1695 sg->length = sym->auth.data.length;
1696 length += sg->length;
1697 sg->final = 1;
1698 cpu_to_hw_sg(sg);
1699 } else {
1700 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1701 sg->length = ses->iv.length;
1702 length += sg->length;
1703 cpu_to_hw_sg(sg);
1704
1705 sg++;
1706
1707 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1708 sg->length = sym->auth.data.length;
1709 length += sg->length;
1710 cpu_to_hw_sg(sg);
1711
1712 memcpy(ctx->digest, sym->auth.digest.data,
1713 ses->digest_length);
1714 sg++;
1715
1716 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1717 sg->length = ses->digest_length;
1718 length += sg->length;
1719 sg->final = 1;
1720 cpu_to_hw_sg(sg);
1721 }
1722 /* input compound frame */
1723 cf->sg[1].length = length;
1724 cf->sg[1].extension = 1;
1725 cf->sg[1].final = 1;
1726 cpu_to_hw_sg(&cf->sg[1]);
1727
1728 /* output */
1729 sg++;
1730 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1731 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1732 sg->length = sym->cipher.data.length;
1733 length = sg->length;
1734 if (is_encode(ses)) {
1735 cpu_to_hw_sg(sg);
1736 /* set auth output */
1737 sg++;
1738 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1739 sg->length = ses->digest_length;
1740 length += sg->length;
1741 }
1742 sg->final = 1;
1743 cpu_to_hw_sg(sg);
1744
1745 /* output compound frame */
1746 cf->sg[0].length = length;
1747 cf->sg[0].extension = 1;
1748 cpu_to_hw_sg(&cf->sg[0]);
1749
1750 return cf;
1751 }
1752
1753 #ifdef RTE_LIB_SECURITY
1754 static inline struct dpaa_sec_job *
build_proto(struct rte_crypto_op * op,dpaa_sec_session * ses)1755 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1756 {
1757 struct rte_crypto_sym_op *sym = op->sym;
1758 struct dpaa_sec_job *cf;
1759 struct dpaa_sec_op_ctx *ctx;
1760 struct qm_sg_entry *sg;
1761 phys_addr_t src_start_addr, dst_start_addr;
1762
1763 ctx = dpaa_sec_alloc_ctx(ses, 2);
1764 if (!ctx)
1765 return NULL;
1766 cf = &ctx->job;
1767 ctx->op = op;
1768
1769 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1770
1771 if (sym->m_dst)
1772 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1773 else
1774 dst_start_addr = src_start_addr;
1775
1776 /* input */
1777 sg = &cf->sg[1];
1778 qm_sg_entry_set64(sg, src_start_addr);
1779 sg->length = sym->m_src->pkt_len;
1780 sg->final = 1;
1781 cpu_to_hw_sg(sg);
1782
1783 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1784 /* output */
1785 sg = &cf->sg[0];
1786 qm_sg_entry_set64(sg, dst_start_addr);
1787 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1788 cpu_to_hw_sg(sg);
1789
1790 return cf;
1791 }
1792
1793 static inline struct dpaa_sec_job *
build_proto_sg(struct rte_crypto_op * op,dpaa_sec_session * ses)1794 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1795 {
1796 struct rte_crypto_sym_op *sym = op->sym;
1797 struct dpaa_sec_job *cf;
1798 struct dpaa_sec_op_ctx *ctx;
1799 struct qm_sg_entry *sg, *out_sg, *in_sg;
1800 struct rte_mbuf *mbuf;
1801 uint8_t req_segs;
1802 uint32_t in_len = 0, out_len = 0;
1803
1804 if (sym->m_dst)
1805 mbuf = sym->m_dst;
1806 else
1807 mbuf = sym->m_src;
1808
1809 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1810 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1811 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1812 MAX_SG_ENTRIES);
1813 return NULL;
1814 }
1815
1816 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1817 if (!ctx)
1818 return NULL;
1819 cf = &ctx->job;
1820 ctx->op = op;
1821 /* output */
1822 out_sg = &cf->sg[0];
1823 out_sg->extension = 1;
1824 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1825
1826 /* 1st seg */
1827 sg = &cf->sg[2];
1828 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1829 sg->offset = 0;
1830
1831 /* Successive segs */
1832 while (mbuf->next) {
1833 sg->length = mbuf->data_len;
1834 out_len += sg->length;
1835 mbuf = mbuf->next;
1836 cpu_to_hw_sg(sg);
1837 sg++;
1838 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1839 sg->offset = 0;
1840 }
1841 sg->length = mbuf->buf_len - mbuf->data_off;
1842 out_len += sg->length;
1843 sg->final = 1;
1844 cpu_to_hw_sg(sg);
1845
1846 out_sg->length = out_len;
1847 cpu_to_hw_sg(out_sg);
1848
1849 /* input */
1850 mbuf = sym->m_src;
1851 in_sg = &cf->sg[1];
1852 in_sg->extension = 1;
1853 in_sg->final = 1;
1854 in_len = mbuf->data_len;
1855
1856 sg++;
1857 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1858
1859 /* 1st seg */
1860 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1861 sg->length = mbuf->data_len;
1862 sg->offset = 0;
1863
1864 /* Successive segs */
1865 mbuf = mbuf->next;
1866 while (mbuf) {
1867 cpu_to_hw_sg(sg);
1868 sg++;
1869 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1870 sg->length = mbuf->data_len;
1871 sg->offset = 0;
1872 in_len += sg->length;
1873 mbuf = mbuf->next;
1874 }
1875 sg->final = 1;
1876 cpu_to_hw_sg(sg);
1877
1878 in_sg->length = in_len;
1879 cpu_to_hw_sg(in_sg);
1880
1881 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1882
1883 return cf;
1884 }
1885 #endif
1886
1887 static uint16_t
dpaa_sec_enqueue_burst(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)1888 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1889 uint16_t nb_ops)
1890 {
1891 /* Function to transmit the frames to given device and queuepair */
1892 uint32_t loop;
1893 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1894 uint16_t num_tx = 0;
1895 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1896 uint32_t frames_to_send;
1897 struct rte_crypto_op *op;
1898 struct dpaa_sec_job *cf;
1899 dpaa_sec_session *ses;
1900 uint16_t auth_hdr_len, auth_tail_len;
1901 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1902 struct qman_fq *inq[DPAA_SEC_BURST];
1903
1904 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1905 if (rte_dpaa_portal_init((void *)0)) {
1906 DPAA_SEC_ERR("Failure in affining portal");
1907 return 0;
1908 }
1909 }
1910
1911 while (nb_ops) {
1912 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1913 DPAA_SEC_BURST : nb_ops;
1914 for (loop = 0; loop < frames_to_send; loop++) {
1915 op = *(ops++);
1916 if (*dpaa_seqn(op->sym->m_src) != 0) {
1917 index = *dpaa_seqn(op->sym->m_src) - 1;
1918 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1919 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1920 flags[loop] = ((index & 0x0f) << 8);
1921 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1922 DPAA_PER_LCORE_DQRR_SIZE--;
1923 DPAA_PER_LCORE_DQRR_HELD &=
1924 ~(1 << index);
1925 }
1926 }
1927
1928 switch (op->sess_type) {
1929 case RTE_CRYPTO_OP_WITH_SESSION:
1930 ses = (dpaa_sec_session *)
1931 get_sym_session_private_data(
1932 op->sym->session,
1933 dpaa_cryptodev_driver_id);
1934 break;
1935 #ifdef RTE_LIB_SECURITY
1936 case RTE_CRYPTO_OP_SECURITY_SESSION:
1937 ses = (dpaa_sec_session *)
1938 get_sec_session_private_data(
1939 op->sym->sec_session);
1940 break;
1941 #endif
1942 default:
1943 DPAA_SEC_DP_ERR(
1944 "sessionless crypto op not supported");
1945 frames_to_send = loop;
1946 nb_ops = loop;
1947 goto send_pkts;
1948 }
1949
1950 if (!ses) {
1951 DPAA_SEC_DP_ERR("session not available");
1952 frames_to_send = loop;
1953 nb_ops = loop;
1954 goto send_pkts;
1955 }
1956
1957 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1958 if (dpaa_sec_attach_sess_q(qp, ses)) {
1959 frames_to_send = loop;
1960 nb_ops = loop;
1961 goto send_pkts;
1962 }
1963 } else if (unlikely(ses->qp[rte_lcore_id() %
1964 MAX_DPAA_CORES] != qp)) {
1965 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1966 " New qp = %p\n",
1967 ses->qp[rte_lcore_id() %
1968 MAX_DPAA_CORES], qp);
1969 frames_to_send = loop;
1970 nb_ops = loop;
1971 goto send_pkts;
1972 }
1973
1974 auth_hdr_len = op->sym->auth.data.length -
1975 op->sym->cipher.data.length;
1976 auth_tail_len = 0;
1977
1978 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1979 ((op->sym->m_dst == NULL) ||
1980 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1981 switch (ses->ctxt) {
1982 #ifdef RTE_LIB_SECURITY
1983 case DPAA_SEC_PDCP:
1984 case DPAA_SEC_IPSEC:
1985 cf = build_proto(op, ses);
1986 break;
1987 #endif
1988 case DPAA_SEC_AUTH:
1989 cf = build_auth_only(op, ses);
1990 break;
1991 case DPAA_SEC_CIPHER:
1992 cf = build_cipher_only(op, ses);
1993 break;
1994 case DPAA_SEC_AEAD:
1995 cf = build_cipher_auth_gcm(op, ses);
1996 auth_hdr_len = ses->auth_only_len;
1997 break;
1998 case DPAA_SEC_CIPHER_HASH:
1999 auth_hdr_len =
2000 op->sym->cipher.data.offset
2001 - op->sym->auth.data.offset;
2002 auth_tail_len =
2003 op->sym->auth.data.length
2004 - op->sym->cipher.data.length
2005 - auth_hdr_len;
2006 cf = build_cipher_auth(op, ses);
2007 break;
2008 default:
2009 DPAA_SEC_DP_ERR("not supported ops");
2010 frames_to_send = loop;
2011 nb_ops = loop;
2012 goto send_pkts;
2013 }
2014 } else {
2015 switch (ses->ctxt) {
2016 #ifdef RTE_LIB_SECURITY
2017 case DPAA_SEC_PDCP:
2018 case DPAA_SEC_IPSEC:
2019 cf = build_proto_sg(op, ses);
2020 break;
2021 #endif
2022 case DPAA_SEC_AUTH:
2023 cf = build_auth_only_sg(op, ses);
2024 break;
2025 case DPAA_SEC_CIPHER:
2026 cf = build_cipher_only_sg(op, ses);
2027 break;
2028 case DPAA_SEC_AEAD:
2029 cf = build_cipher_auth_gcm_sg(op, ses);
2030 auth_hdr_len = ses->auth_only_len;
2031 break;
2032 case DPAA_SEC_CIPHER_HASH:
2033 auth_hdr_len =
2034 op->sym->cipher.data.offset
2035 - op->sym->auth.data.offset;
2036 auth_tail_len =
2037 op->sym->auth.data.length
2038 - op->sym->cipher.data.length
2039 - auth_hdr_len;
2040 cf = build_cipher_auth_sg(op, ses);
2041 break;
2042 default:
2043 DPAA_SEC_DP_ERR("not supported ops");
2044 frames_to_send = loop;
2045 nb_ops = loop;
2046 goto send_pkts;
2047 }
2048 }
2049 if (unlikely(!cf)) {
2050 frames_to_send = loop;
2051 nb_ops = loop;
2052 goto send_pkts;
2053 }
2054
2055 fd = &fds[loop];
2056 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
2057 fd->opaque_addr = 0;
2058 fd->cmd = 0;
2059 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
2060 fd->_format1 = qm_fd_compound;
2061 fd->length29 = 2 * sizeof(struct qm_sg_entry);
2062
2063 /* Auth_only_len is set as 0 in descriptor and it is
2064 * overwritten here in the fd.cmd which will update
2065 * the DPOVRD reg.
2066 */
2067 if (auth_hdr_len || auth_tail_len) {
2068 fd->cmd = 0x80000000;
2069 fd->cmd |=
2070 ((auth_tail_len << 16) | auth_hdr_len);
2071 }
2072
2073 #ifdef RTE_LIB_SECURITY
2074 /* In case of PDCP, per packet HFN is stored in
2075 * mbuf priv after sym_op.
2076 */
2077 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
2078 fd->cmd = 0x80000000 |
2079 *((uint32_t *)((uint8_t *)op +
2080 ses->pdcp.hfn_ovd_offset));
2081 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
2082 *((uint32_t *)((uint8_t *)op +
2083 ses->pdcp.hfn_ovd_offset)),
2084 ses->pdcp.hfn_ovd);
2085 }
2086 #endif
2087 }
2088 send_pkts:
2089 loop = 0;
2090 while (loop < frames_to_send) {
2091 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
2092 &flags[loop], frames_to_send - loop);
2093 }
2094 nb_ops -= frames_to_send;
2095 num_tx += frames_to_send;
2096 }
2097
2098 dpaa_qp->tx_pkts += num_tx;
2099 dpaa_qp->tx_errs += nb_ops - num_tx;
2100
2101 return num_tx;
2102 }
2103
2104 static uint16_t
dpaa_sec_dequeue_burst(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)2105 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
2106 uint16_t nb_ops)
2107 {
2108 uint16_t num_rx;
2109 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
2110
2111 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2112 if (rte_dpaa_portal_init((void *)0)) {
2113 DPAA_SEC_ERR("Failure in affining portal");
2114 return 0;
2115 }
2116 }
2117
2118 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
2119
2120 dpaa_qp->rx_pkts += num_rx;
2121 dpaa_qp->rx_errs += nb_ops - num_rx;
2122
2123 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
2124
2125 return num_rx;
2126 }
2127
2128 /** Release queue pair */
2129 static int
dpaa_sec_queue_pair_release(struct rte_cryptodev * dev,uint16_t qp_id)2130 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
2131 uint16_t qp_id)
2132 {
2133 struct dpaa_sec_dev_private *internals;
2134 struct dpaa_sec_qp *qp = NULL;
2135
2136 PMD_INIT_FUNC_TRACE();
2137
2138 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
2139
2140 internals = dev->data->dev_private;
2141 if (qp_id >= internals->max_nb_queue_pairs) {
2142 DPAA_SEC_ERR("Max supported qpid %d",
2143 internals->max_nb_queue_pairs);
2144 return -EINVAL;
2145 }
2146
2147 qp = &internals->qps[qp_id];
2148 rte_mempool_free(qp->ctx_pool);
2149 qp->internals = NULL;
2150 dev->data->queue_pairs[qp_id] = NULL;
2151
2152 return 0;
2153 }
2154
2155 /** Setup a queue pair */
2156 static int
dpaa_sec_queue_pair_setup(struct rte_cryptodev * dev,uint16_t qp_id,__rte_unused const struct rte_cryptodev_qp_conf * qp_conf,__rte_unused int socket_id)2157 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2158 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2159 __rte_unused int socket_id)
2160 {
2161 struct dpaa_sec_dev_private *internals;
2162 struct dpaa_sec_qp *qp = NULL;
2163 char str[20];
2164
2165 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2166
2167 internals = dev->data->dev_private;
2168 if (qp_id >= internals->max_nb_queue_pairs) {
2169 DPAA_SEC_ERR("Max supported qpid %d",
2170 internals->max_nb_queue_pairs);
2171 return -EINVAL;
2172 }
2173
2174 qp = &internals->qps[qp_id];
2175 qp->internals = internals;
2176 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2177 dev->data->dev_id, qp_id);
2178 if (!qp->ctx_pool) {
2179 qp->ctx_pool = rte_mempool_create((const char *)str,
2180 CTX_POOL_NUM_BUFS,
2181 CTX_POOL_BUF_SIZE,
2182 CTX_POOL_CACHE_SIZE, 0,
2183 NULL, NULL, NULL, NULL,
2184 SOCKET_ID_ANY, 0);
2185 if (!qp->ctx_pool) {
2186 DPAA_SEC_ERR("%s create failed\n", str);
2187 return -ENOMEM;
2188 }
2189 } else
2190 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2191 dev->data->dev_id, qp_id);
2192 dev->data->queue_pairs[qp_id] = qp;
2193
2194 return 0;
2195 }
2196
2197 /** Returns the size of session structure */
2198 static unsigned int
dpaa_sec_sym_session_get_size(struct rte_cryptodev * dev __rte_unused)2199 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2200 {
2201 PMD_INIT_FUNC_TRACE();
2202
2203 return sizeof(dpaa_sec_session);
2204 }
2205
2206 static int
dpaa_sec_cipher_init(struct rte_cryptodev * dev __rte_unused,struct rte_crypto_sym_xform * xform,dpaa_sec_session * session)2207 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2208 struct rte_crypto_sym_xform *xform,
2209 dpaa_sec_session *session)
2210 {
2211 session->ctxt = DPAA_SEC_CIPHER;
2212 session->cipher_alg = xform->cipher.algo;
2213 session->iv.length = xform->cipher.iv.length;
2214 session->iv.offset = xform->cipher.iv.offset;
2215 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2216 RTE_CACHE_LINE_SIZE);
2217 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2218 DPAA_SEC_ERR("No Memory for cipher key");
2219 return -ENOMEM;
2220 }
2221 session->cipher_key.length = xform->cipher.key.length;
2222
2223 memcpy(session->cipher_key.data, xform->cipher.key.data,
2224 xform->cipher.key.length);
2225 switch (xform->cipher.algo) {
2226 case RTE_CRYPTO_CIPHER_AES_CBC:
2227 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2228 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2229 break;
2230 case RTE_CRYPTO_CIPHER_DES_CBC:
2231 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2232 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2233 break;
2234 case RTE_CRYPTO_CIPHER_3DES_CBC:
2235 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2236 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2237 break;
2238 case RTE_CRYPTO_CIPHER_AES_CTR:
2239 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2240 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2241 break;
2242 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2243 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2244 break;
2245 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2246 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2247 break;
2248 default:
2249 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2250 xform->cipher.algo);
2251 return -ENOTSUP;
2252 }
2253 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2254 DIR_ENC : DIR_DEC;
2255
2256 return 0;
2257 }
2258
2259 static int
dpaa_sec_auth_init(struct rte_cryptodev * dev __rte_unused,struct rte_crypto_sym_xform * xform,dpaa_sec_session * session)2260 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2261 struct rte_crypto_sym_xform *xform,
2262 dpaa_sec_session *session)
2263 {
2264 session->ctxt = DPAA_SEC_AUTH;
2265 session->auth_alg = xform->auth.algo;
2266 session->auth_key.length = xform->auth.key.length;
2267 if (xform->auth.key.length) {
2268 session->auth_key.data =
2269 rte_zmalloc(NULL, xform->auth.key.length,
2270 RTE_CACHE_LINE_SIZE);
2271 if (session->auth_key.data == NULL) {
2272 DPAA_SEC_ERR("No Memory for auth key");
2273 return -ENOMEM;
2274 }
2275 memcpy(session->auth_key.data, xform->auth.key.data,
2276 xform->auth.key.length);
2277
2278 }
2279 session->digest_length = xform->auth.digest_length;
2280 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2281 session->iv.offset = xform->auth.iv.offset;
2282 session->iv.length = xform->auth.iv.length;
2283 }
2284
2285 switch (xform->auth.algo) {
2286 case RTE_CRYPTO_AUTH_SHA1:
2287 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2288 session->auth_key.algmode = OP_ALG_AAI_HASH;
2289 break;
2290 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2291 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2292 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2293 break;
2294 case RTE_CRYPTO_AUTH_MD5:
2295 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2296 session->auth_key.algmode = OP_ALG_AAI_HASH;
2297 break;
2298 case RTE_CRYPTO_AUTH_MD5_HMAC:
2299 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2300 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2301 break;
2302 case RTE_CRYPTO_AUTH_SHA224:
2303 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2304 session->auth_key.algmode = OP_ALG_AAI_HASH;
2305 break;
2306 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2307 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2308 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2309 break;
2310 case RTE_CRYPTO_AUTH_SHA256:
2311 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2312 session->auth_key.algmode = OP_ALG_AAI_HASH;
2313 break;
2314 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2315 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2316 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2317 break;
2318 case RTE_CRYPTO_AUTH_SHA384:
2319 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2320 session->auth_key.algmode = OP_ALG_AAI_HASH;
2321 break;
2322 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2323 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2324 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2325 break;
2326 case RTE_CRYPTO_AUTH_SHA512:
2327 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2328 session->auth_key.algmode = OP_ALG_AAI_HASH;
2329 break;
2330 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2331 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2332 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2333 break;
2334 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2335 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2336 session->auth_key.algmode = OP_ALG_AAI_F9;
2337 break;
2338 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2339 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2340 session->auth_key.algmode = OP_ALG_AAI_F9;
2341 break;
2342 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2343 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2344 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2345 break;
2346 case RTE_CRYPTO_AUTH_AES_CMAC:
2347 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2348 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2349 break;
2350 default:
2351 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2352 xform->auth.algo);
2353 return -ENOTSUP;
2354 }
2355
2356 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2357 DIR_ENC : DIR_DEC;
2358
2359 return 0;
2360 }
2361
2362 static int
dpaa_sec_chain_init(struct rte_cryptodev * dev __rte_unused,struct rte_crypto_sym_xform * xform,dpaa_sec_session * session)2363 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2364 struct rte_crypto_sym_xform *xform,
2365 dpaa_sec_session *session)
2366 {
2367
2368 struct rte_crypto_cipher_xform *cipher_xform;
2369 struct rte_crypto_auth_xform *auth_xform;
2370
2371 session->ctxt = DPAA_SEC_CIPHER_HASH;
2372 if (session->auth_cipher_text) {
2373 cipher_xform = &xform->cipher;
2374 auth_xform = &xform->next->auth;
2375 } else {
2376 cipher_xform = &xform->next->cipher;
2377 auth_xform = &xform->auth;
2378 }
2379
2380 /* Set IV parameters */
2381 session->iv.offset = cipher_xform->iv.offset;
2382 session->iv.length = cipher_xform->iv.length;
2383
2384 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2385 RTE_CACHE_LINE_SIZE);
2386 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2387 DPAA_SEC_ERR("No Memory for cipher key");
2388 return -ENOMEM;
2389 }
2390 session->cipher_key.length = cipher_xform->key.length;
2391 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2392 RTE_CACHE_LINE_SIZE);
2393 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2394 DPAA_SEC_ERR("No Memory for auth key");
2395 return -ENOMEM;
2396 }
2397 session->auth_key.length = auth_xform->key.length;
2398 memcpy(session->cipher_key.data, cipher_xform->key.data,
2399 cipher_xform->key.length);
2400 memcpy(session->auth_key.data, auth_xform->key.data,
2401 auth_xform->key.length);
2402
2403 session->digest_length = auth_xform->digest_length;
2404 session->auth_alg = auth_xform->algo;
2405
2406 switch (auth_xform->algo) {
2407 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2408 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2409 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2410 break;
2411 case RTE_CRYPTO_AUTH_MD5_HMAC:
2412 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2413 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2414 break;
2415 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2416 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2417 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2418 break;
2419 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2420 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2421 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2422 break;
2423 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2424 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2425 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2426 break;
2427 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2428 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2429 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2430 break;
2431 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2432 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2433 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2434 break;
2435 case RTE_CRYPTO_AUTH_AES_CMAC:
2436 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2437 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2438 break;
2439 default:
2440 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2441 auth_xform->algo);
2442 return -ENOTSUP;
2443 }
2444
2445 session->cipher_alg = cipher_xform->algo;
2446
2447 switch (cipher_xform->algo) {
2448 case RTE_CRYPTO_CIPHER_AES_CBC:
2449 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2450 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2451 break;
2452 case RTE_CRYPTO_CIPHER_DES_CBC:
2453 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2454 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2455 break;
2456 case RTE_CRYPTO_CIPHER_3DES_CBC:
2457 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2458 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2459 break;
2460 case RTE_CRYPTO_CIPHER_AES_CTR:
2461 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2462 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2463 break;
2464 default:
2465 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2466 cipher_xform->algo);
2467 return -ENOTSUP;
2468 }
2469 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2470 DIR_ENC : DIR_DEC;
2471 return 0;
2472 }
2473
2474 static int
dpaa_sec_aead_init(struct rte_cryptodev * dev __rte_unused,struct rte_crypto_sym_xform * xform,dpaa_sec_session * session)2475 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2476 struct rte_crypto_sym_xform *xform,
2477 dpaa_sec_session *session)
2478 {
2479 session->aead_alg = xform->aead.algo;
2480 session->ctxt = DPAA_SEC_AEAD;
2481 session->iv.length = xform->aead.iv.length;
2482 session->iv.offset = xform->aead.iv.offset;
2483 session->auth_only_len = xform->aead.aad_length;
2484 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2485 RTE_CACHE_LINE_SIZE);
2486 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2487 DPAA_SEC_ERR("No Memory for aead key\n");
2488 return -ENOMEM;
2489 }
2490 session->aead_key.length = xform->aead.key.length;
2491 session->digest_length = xform->aead.digest_length;
2492
2493 memcpy(session->aead_key.data, xform->aead.key.data,
2494 xform->aead.key.length);
2495
2496 switch (session->aead_alg) {
2497 case RTE_CRYPTO_AEAD_AES_GCM:
2498 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2499 session->aead_key.algmode = OP_ALG_AAI_GCM;
2500 break;
2501 default:
2502 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2503 return -ENOTSUP;
2504 }
2505
2506 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2507 DIR_ENC : DIR_DEC;
2508
2509 return 0;
2510 }
2511
2512 static struct qman_fq *
dpaa_sec_attach_rxq(struct dpaa_sec_dev_private * qi)2513 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2514 {
2515 unsigned int i;
2516
2517 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2518 if (qi->inq_attach[i] == 0) {
2519 qi->inq_attach[i] = 1;
2520 return &qi->inq[i];
2521 }
2522 }
2523 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2524
2525 return NULL;
2526 }
2527
2528 static int
dpaa_sec_detach_rxq(struct dpaa_sec_dev_private * qi,struct qman_fq * fq)2529 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2530 {
2531 unsigned int i;
2532
2533 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2534 if (&qi->inq[i] == fq) {
2535 if (qman_retire_fq(fq, NULL) != 0)
2536 DPAA_SEC_DEBUG("Queue is not retired\n");
2537 qman_oos_fq(fq);
2538 qi->inq_attach[i] = 0;
2539 return 0;
2540 }
2541 }
2542 return -1;
2543 }
2544
2545 int
dpaa_sec_attach_sess_q(struct dpaa_sec_qp * qp,dpaa_sec_session * sess)2546 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2547 {
2548 int ret;
2549
2550 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2551 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2552 ret = rte_dpaa_portal_init((void *)0);
2553 if (ret) {
2554 DPAA_SEC_ERR("Failure in affining portal");
2555 return ret;
2556 }
2557 }
2558 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2559 rte_dpaa_mem_vtop(&sess->cdb),
2560 qman_fq_fqid(&qp->outq));
2561 if (ret)
2562 DPAA_SEC_ERR("Unable to init sec queue");
2563
2564 return ret;
2565 }
2566
2567 static inline void
free_session_data(dpaa_sec_session * s)2568 free_session_data(dpaa_sec_session *s)
2569 {
2570 if (is_aead(s))
2571 rte_free(s->aead_key.data);
2572 else {
2573 rte_free(s->auth_key.data);
2574 rte_free(s->cipher_key.data);
2575 }
2576 memset(s, 0, sizeof(dpaa_sec_session));
2577 }
2578
2579 static int
dpaa_sec_set_session_parameters(struct rte_cryptodev * dev,struct rte_crypto_sym_xform * xform,void * sess)2580 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2581 struct rte_crypto_sym_xform *xform, void *sess)
2582 {
2583 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2584 dpaa_sec_session *session = sess;
2585 uint32_t i;
2586 int ret;
2587
2588 PMD_INIT_FUNC_TRACE();
2589
2590 if (unlikely(sess == NULL)) {
2591 DPAA_SEC_ERR("invalid session struct");
2592 return -EINVAL;
2593 }
2594 memset(session, 0, sizeof(dpaa_sec_session));
2595
2596 /* Default IV length = 0 */
2597 session->iv.length = 0;
2598
2599 /* Cipher Only */
2600 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2601 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2602 ret = dpaa_sec_cipher_init(dev, xform, session);
2603
2604 /* Authentication Only */
2605 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2606 xform->next == NULL) {
2607 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2608 session->ctxt = DPAA_SEC_AUTH;
2609 ret = dpaa_sec_auth_init(dev, xform, session);
2610
2611 /* Cipher then Authenticate */
2612 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2613 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2614 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2615 session->auth_cipher_text = 1;
2616 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2617 ret = dpaa_sec_auth_init(dev, xform, session);
2618 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2619 ret = dpaa_sec_cipher_init(dev, xform, session);
2620 else
2621 ret = dpaa_sec_chain_init(dev, xform, session);
2622 } else {
2623 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2624 return -ENOTSUP;
2625 }
2626 /* Authenticate then Cipher */
2627 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2628 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2629 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2630 session->auth_cipher_text = 0;
2631 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2632 ret = dpaa_sec_cipher_init(dev, xform, session);
2633 else if (xform->next->cipher.algo
2634 == RTE_CRYPTO_CIPHER_NULL)
2635 ret = dpaa_sec_auth_init(dev, xform, session);
2636 else
2637 ret = dpaa_sec_chain_init(dev, xform, session);
2638 } else {
2639 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2640 return -ENOTSUP;
2641 }
2642
2643 /* AEAD operation for AES-GCM kind of Algorithms */
2644 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2645 xform->next == NULL) {
2646 ret = dpaa_sec_aead_init(dev, xform, session);
2647
2648 } else {
2649 DPAA_SEC_ERR("Invalid crypto type");
2650 return -EINVAL;
2651 }
2652 if (ret) {
2653 DPAA_SEC_ERR("unable to init session");
2654 goto err1;
2655 }
2656
2657 rte_spinlock_lock(&internals->lock);
2658 for (i = 0; i < MAX_DPAA_CORES; i++) {
2659 session->inq[i] = dpaa_sec_attach_rxq(internals);
2660 if (session->inq[i] == NULL) {
2661 DPAA_SEC_ERR("unable to attach sec queue");
2662 rte_spinlock_unlock(&internals->lock);
2663 ret = -EBUSY;
2664 goto err1;
2665 }
2666 }
2667 rte_spinlock_unlock(&internals->lock);
2668
2669 return 0;
2670
2671 err1:
2672 free_session_data(session);
2673 return ret;
2674 }
2675
2676 static int
dpaa_sec_sym_session_configure(struct rte_cryptodev * dev,struct rte_crypto_sym_xform * xform,struct rte_cryptodev_sym_session * sess,struct rte_mempool * mempool)2677 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2678 struct rte_crypto_sym_xform *xform,
2679 struct rte_cryptodev_sym_session *sess,
2680 struct rte_mempool *mempool)
2681 {
2682 void *sess_private_data;
2683 int ret;
2684
2685 PMD_INIT_FUNC_TRACE();
2686
2687 if (rte_mempool_get(mempool, &sess_private_data)) {
2688 DPAA_SEC_ERR("Couldn't get object from session mempool");
2689 return -ENOMEM;
2690 }
2691
2692 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2693 if (ret != 0) {
2694 DPAA_SEC_ERR("failed to configure session parameters");
2695
2696 /* Return session to mempool */
2697 rte_mempool_put(mempool, sess_private_data);
2698 return ret;
2699 }
2700
2701 set_sym_session_private_data(sess, dev->driver_id,
2702 sess_private_data);
2703
2704 ret = dpaa_sec_prep_cdb(sess_private_data);
2705 if (ret) {
2706 DPAA_SEC_ERR("Unable to prepare sec cdb");
2707 return ret;
2708 }
2709
2710 return 0;
2711 }
2712
2713 static inline void
free_session_memory(struct rte_cryptodev * dev,dpaa_sec_session * s)2714 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2715 {
2716 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2717 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2718 uint8_t i;
2719
2720 for (i = 0; i < MAX_DPAA_CORES; i++) {
2721 if (s->inq[i])
2722 dpaa_sec_detach_rxq(qi, s->inq[i]);
2723 s->inq[i] = NULL;
2724 s->qp[i] = NULL;
2725 }
2726 free_session_data(s);
2727 rte_mempool_put(sess_mp, (void *)s);
2728 }
2729
2730 /** Clear the memory of session so it doesn't leave key material behind */
2731 static void
dpaa_sec_sym_session_clear(struct rte_cryptodev * dev,struct rte_cryptodev_sym_session * sess)2732 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2733 struct rte_cryptodev_sym_session *sess)
2734 {
2735 PMD_INIT_FUNC_TRACE();
2736 uint8_t index = dev->driver_id;
2737 void *sess_priv = get_sym_session_private_data(sess, index);
2738 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2739
2740 if (sess_priv) {
2741 free_session_memory(dev, s);
2742 set_sym_session_private_data(sess, index, NULL);
2743 }
2744 }
2745
2746 #ifdef RTE_LIB_SECURITY
2747 static int
dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform * aead_xform,struct rte_security_ipsec_xform * ipsec_xform,dpaa_sec_session * session)2748 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2749 struct rte_security_ipsec_xform *ipsec_xform,
2750 dpaa_sec_session *session)
2751 {
2752 PMD_INIT_FUNC_TRACE();
2753
2754 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2755 RTE_CACHE_LINE_SIZE);
2756 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2757 DPAA_SEC_ERR("No Memory for aead key");
2758 return -ENOMEM;
2759 }
2760 memcpy(session->aead_key.data, aead_xform->key.data,
2761 aead_xform->key.length);
2762
2763 session->digest_length = aead_xform->digest_length;
2764 session->aead_key.length = aead_xform->key.length;
2765
2766 switch (aead_xform->algo) {
2767 case RTE_CRYPTO_AEAD_AES_GCM:
2768 switch (session->digest_length) {
2769 case 8:
2770 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2771 break;
2772 case 12:
2773 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2774 break;
2775 case 16:
2776 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2777 break;
2778 default:
2779 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2780 session->digest_length);
2781 return -EINVAL;
2782 }
2783 if (session->dir == DIR_ENC) {
2784 memcpy(session->encap_pdb.gcm.salt,
2785 (uint8_t *)&(ipsec_xform->salt), 4);
2786 } else {
2787 memcpy(session->decap_pdb.gcm.salt,
2788 (uint8_t *)&(ipsec_xform->salt), 4);
2789 }
2790 session->aead_key.algmode = OP_ALG_AAI_GCM;
2791 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2792 break;
2793 default:
2794 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2795 aead_xform->algo);
2796 return -ENOTSUP;
2797 }
2798 return 0;
2799 }
2800
2801 static int
dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform * cipher_xform,struct rte_crypto_auth_xform * auth_xform,struct rte_security_ipsec_xform * ipsec_xform,dpaa_sec_session * session)2802 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2803 struct rte_crypto_auth_xform *auth_xform,
2804 struct rte_security_ipsec_xform *ipsec_xform,
2805 dpaa_sec_session *session)
2806 {
2807 if (cipher_xform) {
2808 session->cipher_key.data = rte_zmalloc(NULL,
2809 cipher_xform->key.length,
2810 RTE_CACHE_LINE_SIZE);
2811 if (session->cipher_key.data == NULL &&
2812 cipher_xform->key.length > 0) {
2813 DPAA_SEC_ERR("No Memory for cipher key");
2814 return -ENOMEM;
2815 }
2816
2817 session->cipher_key.length = cipher_xform->key.length;
2818 memcpy(session->cipher_key.data, cipher_xform->key.data,
2819 cipher_xform->key.length);
2820 session->cipher_alg = cipher_xform->algo;
2821 } else {
2822 session->cipher_key.data = NULL;
2823 session->cipher_key.length = 0;
2824 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2825 }
2826
2827 if (auth_xform) {
2828 session->auth_key.data = rte_zmalloc(NULL,
2829 auth_xform->key.length,
2830 RTE_CACHE_LINE_SIZE);
2831 if (session->auth_key.data == NULL &&
2832 auth_xform->key.length > 0) {
2833 DPAA_SEC_ERR("No Memory for auth key");
2834 return -ENOMEM;
2835 }
2836 session->auth_key.length = auth_xform->key.length;
2837 memcpy(session->auth_key.data, auth_xform->key.data,
2838 auth_xform->key.length);
2839 session->auth_alg = auth_xform->algo;
2840 session->digest_length = auth_xform->digest_length;
2841 } else {
2842 session->auth_key.data = NULL;
2843 session->auth_key.length = 0;
2844 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2845 }
2846
2847 switch (session->auth_alg) {
2848 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2849 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2850 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2851 break;
2852 case RTE_CRYPTO_AUTH_MD5_HMAC:
2853 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2854 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2855 break;
2856 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2857 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2858 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2859 if (session->digest_length != 16)
2860 DPAA_SEC_WARN(
2861 "+++Using sha256-hmac truncated len is non-standard,"
2862 "it will not work with lookaside proto");
2863 break;
2864 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2865 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2866 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2867 break;
2868 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2869 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2870 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2871 break;
2872 case RTE_CRYPTO_AUTH_AES_CMAC:
2873 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2874 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2875 break;
2876 case RTE_CRYPTO_AUTH_NULL:
2877 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2878 break;
2879 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2880 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2881 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2882 break;
2883 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2884 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2885 case RTE_CRYPTO_AUTH_SHA1:
2886 case RTE_CRYPTO_AUTH_SHA256:
2887 case RTE_CRYPTO_AUTH_SHA512:
2888 case RTE_CRYPTO_AUTH_SHA224:
2889 case RTE_CRYPTO_AUTH_SHA384:
2890 case RTE_CRYPTO_AUTH_MD5:
2891 case RTE_CRYPTO_AUTH_AES_GMAC:
2892 case RTE_CRYPTO_AUTH_KASUMI_F9:
2893 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2894 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2895 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2896 session->auth_alg);
2897 return -ENOTSUP;
2898 default:
2899 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2900 session->auth_alg);
2901 return -ENOTSUP;
2902 }
2903
2904 switch (session->cipher_alg) {
2905 case RTE_CRYPTO_CIPHER_AES_CBC:
2906 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2907 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2908 break;
2909 case RTE_CRYPTO_CIPHER_DES_CBC:
2910 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2911 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2912 break;
2913 case RTE_CRYPTO_CIPHER_3DES_CBC:
2914 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2915 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2916 break;
2917 case RTE_CRYPTO_CIPHER_AES_CTR:
2918 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2919 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2920 if (session->dir == DIR_ENC) {
2921 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2922 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2923 } else {
2924 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2925 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2926 }
2927 break;
2928 case RTE_CRYPTO_CIPHER_NULL:
2929 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2930 break;
2931 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2932 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2933 case RTE_CRYPTO_CIPHER_3DES_ECB:
2934 case RTE_CRYPTO_CIPHER_AES_ECB:
2935 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2936 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2937 session->cipher_alg);
2938 return -ENOTSUP;
2939 default:
2940 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2941 session->cipher_alg);
2942 return -ENOTSUP;
2943 }
2944
2945 return 0;
2946 }
2947
2948 static int
dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev * dev,struct rte_security_session_conf * conf,void * sess)2949 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2950 struct rte_security_session_conf *conf,
2951 void *sess)
2952 {
2953 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2954 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2955 struct rte_crypto_auth_xform *auth_xform = NULL;
2956 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2957 struct rte_crypto_aead_xform *aead_xform = NULL;
2958 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2959 uint32_t i;
2960 int ret;
2961
2962 PMD_INIT_FUNC_TRACE();
2963
2964 memset(session, 0, sizeof(dpaa_sec_session));
2965 session->proto_alg = conf->protocol;
2966 session->ctxt = DPAA_SEC_IPSEC;
2967
2968 if (ipsec_xform->life.bytes_hard_limit != 0 ||
2969 ipsec_xform->life.bytes_soft_limit != 0 ||
2970 ipsec_xform->life.packets_hard_limit != 0 ||
2971 ipsec_xform->life.packets_soft_limit != 0)
2972 return -ENOTSUP;
2973
2974 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2975 session->dir = DIR_ENC;
2976 else
2977 session->dir = DIR_DEC;
2978
2979 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2980 cipher_xform = &conf->crypto_xform->cipher;
2981 if (conf->crypto_xform->next)
2982 auth_xform = &conf->crypto_xform->next->auth;
2983 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2984 ipsec_xform, session);
2985 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2986 auth_xform = &conf->crypto_xform->auth;
2987 if (conf->crypto_xform->next)
2988 cipher_xform = &conf->crypto_xform->next->cipher;
2989 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2990 ipsec_xform, session);
2991 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2992 aead_xform = &conf->crypto_xform->aead;
2993 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2994 ipsec_xform, session);
2995 } else {
2996 DPAA_SEC_ERR("XFORM not specified");
2997 ret = -EINVAL;
2998 goto out;
2999 }
3000 if (ret) {
3001 DPAA_SEC_ERR("Failed to process xform");
3002 goto out;
3003 }
3004
3005 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
3006 if (ipsec_xform->tunnel.type ==
3007 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3008 session->ip4_hdr.ip_v = IPVERSION;
3009 session->ip4_hdr.ip_hl = 5;
3010 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
3011 sizeof(session->ip4_hdr));
3012 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
3013 session->ip4_hdr.ip_id = 0;
3014 session->ip4_hdr.ip_off = 0;
3015 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
3016 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
3017 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3018 IPPROTO_ESP : IPPROTO_AH;
3019 session->ip4_hdr.ip_sum = 0;
3020 session->ip4_hdr.ip_src =
3021 ipsec_xform->tunnel.ipv4.src_ip;
3022 session->ip4_hdr.ip_dst =
3023 ipsec_xform->tunnel.ipv4.dst_ip;
3024 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
3025 (void *)&session->ip4_hdr,
3026 sizeof(struct ip));
3027 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
3028 } else if (ipsec_xform->tunnel.type ==
3029 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
3030 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
3031 DPAA_IPv6_DEFAULT_VTC_FLOW |
3032 ((ipsec_xform->tunnel.ipv6.dscp <<
3033 RTE_IPV6_HDR_TC_SHIFT) &
3034 RTE_IPV6_HDR_TC_MASK) |
3035 ((ipsec_xform->tunnel.ipv6.flabel <<
3036 RTE_IPV6_HDR_FL_SHIFT) &
3037 RTE_IPV6_HDR_FL_MASK));
3038 /* Payload length will be updated by HW */
3039 session->ip6_hdr.payload_len = 0;
3040 session->ip6_hdr.hop_limits =
3041 ipsec_xform->tunnel.ipv6.hlimit;
3042 session->ip6_hdr.proto = (ipsec_xform->proto ==
3043 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3044 IPPROTO_ESP : IPPROTO_AH;
3045 memcpy(&session->ip6_hdr.src_addr,
3046 &ipsec_xform->tunnel.ipv6.src_addr, 16);
3047 memcpy(&session->ip6_hdr.dst_addr,
3048 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
3049 session->encap_pdb.ip_hdr_len =
3050 sizeof(struct rte_ipv6_hdr);
3051 }
3052
3053 session->encap_pdb.options =
3054 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3055 PDBOPTS_ESP_OIHI_PDB_INL |
3056 PDBOPTS_ESP_IVSRC |
3057 PDBHMO_ESP_SNR;
3058 if (ipsec_xform->options.dec_ttl)
3059 session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3060 if (ipsec_xform->options.esn)
3061 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
3062 session->encap_pdb.spi = ipsec_xform->spi;
3063
3064 } else if (ipsec_xform->direction ==
3065 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3066 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
3067 session->decap_pdb.options = sizeof(struct ip) << 16;
3068 else
3069 session->decap_pdb.options =
3070 sizeof(struct rte_ipv6_hdr) << 16;
3071 if (ipsec_xform->options.esn)
3072 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
3073 if (ipsec_xform->replay_win_sz) {
3074 uint32_t win_sz;
3075 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3076
3077 switch (win_sz) {
3078 case 1:
3079 case 2:
3080 case 4:
3081 case 8:
3082 case 16:
3083 case 32:
3084 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
3085 break;
3086 case 64:
3087 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
3088 break;
3089 default:
3090 session->decap_pdb.options |=
3091 PDBOPTS_ESP_ARS128;
3092 }
3093 }
3094 } else
3095 goto out;
3096 rte_spinlock_lock(&internals->lock);
3097 for (i = 0; i < MAX_DPAA_CORES; i++) {
3098 session->inq[i] = dpaa_sec_attach_rxq(internals);
3099 if (session->inq[i] == NULL) {
3100 DPAA_SEC_ERR("unable to attach sec queue");
3101 rte_spinlock_unlock(&internals->lock);
3102 goto out;
3103 }
3104 }
3105 rte_spinlock_unlock(&internals->lock);
3106
3107 return 0;
3108 out:
3109 free_session_data(session);
3110 return -1;
3111 }
3112
3113 static int
dpaa_sec_set_pdcp_session(struct rte_cryptodev * dev,struct rte_security_session_conf * conf,void * sess)3114 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
3115 struct rte_security_session_conf *conf,
3116 void *sess)
3117 {
3118 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3119 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3120 struct rte_crypto_auth_xform *auth_xform = NULL;
3121 struct rte_crypto_cipher_xform *cipher_xform = NULL;
3122 dpaa_sec_session *session = (dpaa_sec_session *)sess;
3123 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
3124 uint32_t i;
3125 int ret;
3126
3127 PMD_INIT_FUNC_TRACE();
3128
3129 memset(session, 0, sizeof(dpaa_sec_session));
3130
3131 /* find xfrm types */
3132 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3133 cipher_xform = &xform->cipher;
3134 if (xform->next != NULL &&
3135 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
3136 auth_xform = &xform->next->auth;
3137 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3138 auth_xform = &xform->auth;
3139 if (xform->next != NULL &&
3140 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
3141 cipher_xform = &xform->next->cipher;
3142 } else {
3143 DPAA_SEC_ERR("Invalid crypto type");
3144 return -EINVAL;
3145 }
3146
3147 session->proto_alg = conf->protocol;
3148 session->ctxt = DPAA_SEC_PDCP;
3149
3150 if (cipher_xform) {
3151 switch (cipher_xform->algo) {
3152 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3153 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
3154 break;
3155 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3156 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3157 break;
3158 case RTE_CRYPTO_CIPHER_AES_CTR:
3159 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3160 break;
3161 case RTE_CRYPTO_CIPHER_NULL:
3162 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3163 break;
3164 default:
3165 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3166 session->cipher_alg);
3167 return -EINVAL;
3168 }
3169
3170 session->cipher_key.data = rte_zmalloc(NULL,
3171 cipher_xform->key.length,
3172 RTE_CACHE_LINE_SIZE);
3173 if (session->cipher_key.data == NULL &&
3174 cipher_xform->key.length > 0) {
3175 DPAA_SEC_ERR("No Memory for cipher key");
3176 return -ENOMEM;
3177 }
3178 session->cipher_key.length = cipher_xform->key.length;
3179 memcpy(session->cipher_key.data, cipher_xform->key.data,
3180 cipher_xform->key.length);
3181 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3182 DIR_ENC : DIR_DEC;
3183 session->cipher_alg = cipher_xform->algo;
3184 } else {
3185 session->cipher_key.data = NULL;
3186 session->cipher_key.length = 0;
3187 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3188 session->dir = DIR_ENC;
3189 }
3190
3191 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3192 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3193 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3194 DPAA_SEC_ERR(
3195 "PDCP Seq Num size should be 5/12 bits for cmode");
3196 ret = -EINVAL;
3197 goto out;
3198 }
3199 }
3200
3201 if (auth_xform) {
3202 switch (auth_xform->algo) {
3203 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3204 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3205 break;
3206 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3207 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3208 break;
3209 case RTE_CRYPTO_AUTH_AES_CMAC:
3210 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3211 break;
3212 case RTE_CRYPTO_AUTH_NULL:
3213 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3214 break;
3215 default:
3216 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3217 session->auth_alg);
3218 rte_free(session->cipher_key.data);
3219 return -EINVAL;
3220 }
3221 session->auth_key.data = rte_zmalloc(NULL,
3222 auth_xform->key.length,
3223 RTE_CACHE_LINE_SIZE);
3224 if (!session->auth_key.data &&
3225 auth_xform->key.length > 0) {
3226 DPAA_SEC_ERR("No Memory for auth key");
3227 rte_free(session->cipher_key.data);
3228 return -ENOMEM;
3229 }
3230 session->auth_key.length = auth_xform->key.length;
3231 memcpy(session->auth_key.data, auth_xform->key.data,
3232 auth_xform->key.length);
3233 session->auth_alg = auth_xform->algo;
3234 } else {
3235 session->auth_key.data = NULL;
3236 session->auth_key.length = 0;
3237 session->auth_alg = 0;
3238 }
3239 session->pdcp.domain = pdcp_xform->domain;
3240 session->pdcp.bearer = pdcp_xform->bearer;
3241 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3242 session->pdcp.sn_size = pdcp_xform->sn_size;
3243 session->pdcp.hfn = pdcp_xform->hfn;
3244 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3245 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3246 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3247 if (cipher_xform)
3248 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3249
3250 rte_spinlock_lock(&dev_priv->lock);
3251 for (i = 0; i < MAX_DPAA_CORES; i++) {
3252 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3253 if (session->inq[i] == NULL) {
3254 DPAA_SEC_ERR("unable to attach sec queue");
3255 rte_spinlock_unlock(&dev_priv->lock);
3256 ret = -EBUSY;
3257 goto out;
3258 }
3259 }
3260 rte_spinlock_unlock(&dev_priv->lock);
3261 return 0;
3262 out:
3263 rte_free(session->auth_key.data);
3264 rte_free(session->cipher_key.data);
3265 memset(session, 0, sizeof(dpaa_sec_session));
3266 return ret;
3267 }
3268
3269 static int
dpaa_sec_security_session_create(void * dev,struct rte_security_session_conf * conf,struct rte_security_session * sess,struct rte_mempool * mempool)3270 dpaa_sec_security_session_create(void *dev,
3271 struct rte_security_session_conf *conf,
3272 struct rte_security_session *sess,
3273 struct rte_mempool *mempool)
3274 {
3275 void *sess_private_data;
3276 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3277 int ret;
3278
3279 if (rte_mempool_get(mempool, &sess_private_data)) {
3280 DPAA_SEC_ERR("Couldn't get object from session mempool");
3281 return -ENOMEM;
3282 }
3283
3284 switch (conf->protocol) {
3285 case RTE_SECURITY_PROTOCOL_IPSEC:
3286 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3287 sess_private_data);
3288 break;
3289 case RTE_SECURITY_PROTOCOL_PDCP:
3290 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3291 sess_private_data);
3292 break;
3293 case RTE_SECURITY_PROTOCOL_MACSEC:
3294 return -ENOTSUP;
3295 default:
3296 return -EINVAL;
3297 }
3298 if (ret != 0) {
3299 DPAA_SEC_ERR("failed to configure session parameters");
3300 /* Return session to mempool */
3301 rte_mempool_put(mempool, sess_private_data);
3302 return ret;
3303 }
3304
3305 set_sec_session_private_data(sess, sess_private_data);
3306
3307 ret = dpaa_sec_prep_cdb(sess_private_data);
3308 if (ret) {
3309 DPAA_SEC_ERR("Unable to prepare sec cdb");
3310 return ret;
3311 }
3312
3313 return ret;
3314 }
3315
3316 /** Clear the memory of session so it doesn't leave key material behind */
3317 static int
dpaa_sec_security_session_destroy(void * dev __rte_unused,struct rte_security_session * sess)3318 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3319 struct rte_security_session *sess)
3320 {
3321 PMD_INIT_FUNC_TRACE();
3322 void *sess_priv = get_sec_session_private_data(sess);
3323 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3324
3325 if (sess_priv) {
3326 free_session_memory((struct rte_cryptodev *)dev, s);
3327 set_sec_session_private_data(sess, NULL);
3328 }
3329 return 0;
3330 }
3331 #endif
3332 static int
dpaa_sec_dev_configure(struct rte_cryptodev * dev __rte_unused,struct rte_cryptodev_config * config __rte_unused)3333 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3334 struct rte_cryptodev_config *config __rte_unused)
3335 {
3336 PMD_INIT_FUNC_TRACE();
3337
3338 return 0;
3339 }
3340
3341 static int
dpaa_sec_dev_start(struct rte_cryptodev * dev __rte_unused)3342 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3343 {
3344 PMD_INIT_FUNC_TRACE();
3345 return 0;
3346 }
3347
3348 static void
dpaa_sec_dev_stop(struct rte_cryptodev * dev __rte_unused)3349 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3350 {
3351 PMD_INIT_FUNC_TRACE();
3352 }
3353
3354 static int
dpaa_sec_dev_close(struct rte_cryptodev * dev)3355 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3356 {
3357 PMD_INIT_FUNC_TRACE();
3358
3359 if (dev == NULL)
3360 return -ENOMEM;
3361
3362 return 0;
3363 }
3364
3365 static void
dpaa_sec_dev_infos_get(struct rte_cryptodev * dev,struct rte_cryptodev_info * info)3366 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3367 struct rte_cryptodev_info *info)
3368 {
3369 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3370
3371 PMD_INIT_FUNC_TRACE();
3372 if (info != NULL) {
3373 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3374 info->feature_flags = dev->feature_flags;
3375 info->capabilities = dpaa_sec_capabilities;
3376 info->sym.max_nb_sessions = internals->max_nb_sessions;
3377 info->driver_id = dpaa_cryptodev_driver_id;
3378 }
3379 }
3380
3381 static enum qman_cb_dqrr_result
dpaa_sec_process_parallel_event(void * event,struct qman_portal * qm __always_unused,struct qman_fq * outq,const struct qm_dqrr_entry * dqrr,void ** bufs)3382 dpaa_sec_process_parallel_event(void *event,
3383 struct qman_portal *qm __always_unused,
3384 struct qman_fq *outq,
3385 const struct qm_dqrr_entry *dqrr,
3386 void **bufs)
3387 {
3388 const struct qm_fd *fd;
3389 struct dpaa_sec_job *job;
3390 struct dpaa_sec_op_ctx *ctx;
3391 struct rte_event *ev = (struct rte_event *)event;
3392
3393 fd = &dqrr->fd;
3394
3395 /* sg is embedded in an op ctx,
3396 * sg[0] is for output
3397 * sg[1] for input
3398 */
3399 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3400
3401 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3402 ctx->fd_status = fd->status;
3403 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3404 struct qm_sg_entry *sg_out;
3405 uint32_t len;
3406
3407 sg_out = &job->sg[0];
3408 hw_sg_to_cpu(sg_out);
3409 len = sg_out->length;
3410 ctx->op->sym->m_src->pkt_len = len;
3411 ctx->op->sym->m_src->data_len = len;
3412 }
3413 if (!ctx->fd_status) {
3414 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3415 } else {
3416 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3417 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3418 }
3419 ev->event_ptr = (void *)ctx->op;
3420
3421 ev->flow_id = outq->ev.flow_id;
3422 ev->sub_event_type = outq->ev.sub_event_type;
3423 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3424 ev->op = RTE_EVENT_OP_NEW;
3425 ev->sched_type = outq->ev.sched_type;
3426 ev->queue_id = outq->ev.queue_id;
3427 ev->priority = outq->ev.priority;
3428 *bufs = (void *)ctx->op;
3429
3430 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3431
3432 return qman_cb_dqrr_consume;
3433 }
3434
3435 static enum qman_cb_dqrr_result
dpaa_sec_process_atomic_event(void * event,struct qman_portal * qm __rte_unused,struct qman_fq * outq,const struct qm_dqrr_entry * dqrr,void ** bufs)3436 dpaa_sec_process_atomic_event(void *event,
3437 struct qman_portal *qm __rte_unused,
3438 struct qman_fq *outq,
3439 const struct qm_dqrr_entry *dqrr,
3440 void **bufs)
3441 {
3442 u8 index;
3443 const struct qm_fd *fd;
3444 struct dpaa_sec_job *job;
3445 struct dpaa_sec_op_ctx *ctx;
3446 struct rte_event *ev = (struct rte_event *)event;
3447
3448 fd = &dqrr->fd;
3449
3450 /* sg is embedded in an op ctx,
3451 * sg[0] is for output
3452 * sg[1] for input
3453 */
3454 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3455
3456 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3457 ctx->fd_status = fd->status;
3458 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3459 struct qm_sg_entry *sg_out;
3460 uint32_t len;
3461
3462 sg_out = &job->sg[0];
3463 hw_sg_to_cpu(sg_out);
3464 len = sg_out->length;
3465 ctx->op->sym->m_src->pkt_len = len;
3466 ctx->op->sym->m_src->data_len = len;
3467 }
3468 if (!ctx->fd_status) {
3469 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3470 } else {
3471 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3472 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3473 }
3474 ev->event_ptr = (void *)ctx->op;
3475 ev->flow_id = outq->ev.flow_id;
3476 ev->sub_event_type = outq->ev.sub_event_type;
3477 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3478 ev->op = RTE_EVENT_OP_NEW;
3479 ev->sched_type = outq->ev.sched_type;
3480 ev->queue_id = outq->ev.queue_id;
3481 ev->priority = outq->ev.priority;
3482
3483 /* Save active dqrr entries */
3484 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3485 DPAA_PER_LCORE_DQRR_SIZE++;
3486 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3487 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3488 ev->impl_opaque = index + 1;
3489 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3490 *bufs = (void *)ctx->op;
3491
3492 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3493
3494 return qman_cb_dqrr_defer;
3495 }
3496
3497 int
dpaa_sec_eventq_attach(const struct rte_cryptodev * dev,int qp_id,uint16_t ch_id,const struct rte_event * event)3498 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3499 int qp_id,
3500 uint16_t ch_id,
3501 const struct rte_event *event)
3502 {
3503 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3504 struct qm_mcc_initfq opts = {0};
3505
3506 int ret;
3507
3508 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3509 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3510 opts.fqd.dest.channel = ch_id;
3511
3512 switch (event->sched_type) {
3513 case RTE_SCHED_TYPE_ATOMIC:
3514 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3515 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3516 * configuration with HOLD_ACTIVE setting
3517 */
3518 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3519 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3520 break;
3521 case RTE_SCHED_TYPE_ORDERED:
3522 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3523 return -ENOTSUP;
3524 default:
3525 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3526 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3527 break;
3528 }
3529
3530 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3531 if (unlikely(ret)) {
3532 DPAA_SEC_ERR("unable to init caam source fq!");
3533 return ret;
3534 }
3535
3536 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3537
3538 return 0;
3539 }
3540
3541 int
dpaa_sec_eventq_detach(const struct rte_cryptodev * dev,int qp_id)3542 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3543 int qp_id)
3544 {
3545 struct qm_mcc_initfq opts = {0};
3546 int ret;
3547 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3548
3549 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3550 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3551 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3552 qp->outq.cb.ern = ern_sec_fq_handler;
3553 qman_retire_fq(&qp->outq, NULL);
3554 qman_oos_fq(&qp->outq);
3555 ret = qman_init_fq(&qp->outq, 0, &opts);
3556 if (ret)
3557 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3558 qp->outq.cb.dqrr = NULL;
3559
3560 return ret;
3561 }
3562
3563 static struct rte_cryptodev_ops crypto_ops = {
3564 .dev_configure = dpaa_sec_dev_configure,
3565 .dev_start = dpaa_sec_dev_start,
3566 .dev_stop = dpaa_sec_dev_stop,
3567 .dev_close = dpaa_sec_dev_close,
3568 .dev_infos_get = dpaa_sec_dev_infos_get,
3569 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3570 .queue_pair_release = dpaa_sec_queue_pair_release,
3571 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3572 .sym_session_configure = dpaa_sec_sym_session_configure,
3573 .sym_session_clear = dpaa_sec_sym_session_clear,
3574 /* Raw data-path API related operations */
3575 .sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
3576 .sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
3577 };
3578
3579 #ifdef RTE_LIB_SECURITY
3580 static const struct rte_security_capability *
dpaa_sec_capabilities_get(void * device __rte_unused)3581 dpaa_sec_capabilities_get(void *device __rte_unused)
3582 {
3583 return dpaa_sec_security_cap;
3584 }
3585
3586 static const struct rte_security_ops dpaa_sec_security_ops = {
3587 .session_create = dpaa_sec_security_session_create,
3588 .session_update = NULL,
3589 .session_stats_get = NULL,
3590 .session_destroy = dpaa_sec_security_session_destroy,
3591 .set_pkt_metadata = NULL,
3592 .capabilities_get = dpaa_sec_capabilities_get
3593 };
3594 #endif
3595 static int
dpaa_sec_uninit(struct rte_cryptodev * dev)3596 dpaa_sec_uninit(struct rte_cryptodev *dev)
3597 {
3598 struct dpaa_sec_dev_private *internals;
3599
3600 if (dev == NULL)
3601 return -ENODEV;
3602
3603 internals = dev->data->dev_private;
3604 rte_free(dev->security_ctx);
3605
3606 rte_free(internals);
3607
3608 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3609 dev->data->name, rte_socket_id());
3610
3611 return 0;
3612 }
3613
3614 static int
check_devargs_handler(__rte_unused const char * key,const char * value,__rte_unused void * opaque)3615 check_devargs_handler(__rte_unused const char *key, const char *value,
3616 __rte_unused void *opaque)
3617 {
3618 dpaa_sec_dp_dump = atoi(value);
3619 if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) {
3620 DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not "
3621 "supported, changing to FULL error prints\n");
3622 dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP;
3623 }
3624
3625 return 0;
3626 }
3627
3628 static void
dpaa_sec_get_devargs(struct rte_devargs * devargs,const char * key)3629 dpaa_sec_get_devargs(struct rte_devargs *devargs, const char *key)
3630 {
3631 struct rte_kvargs *kvlist;
3632
3633 if (!devargs)
3634 return;
3635
3636 kvlist = rte_kvargs_parse(devargs->args, NULL);
3637 if (!kvlist)
3638 return;
3639
3640 if (!rte_kvargs_count(kvlist, key)) {
3641 rte_kvargs_free(kvlist);
3642 return;
3643 }
3644
3645 rte_kvargs_process(kvlist, key,
3646 check_devargs_handler, NULL);
3647 rte_kvargs_free(kvlist);
3648 }
3649
3650 static int
dpaa_sec_dev_init(struct rte_cryptodev * cryptodev)3651 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3652 {
3653 struct dpaa_sec_dev_private *internals;
3654 #ifdef RTE_LIB_SECURITY
3655 struct rte_security_ctx *security_instance;
3656 #endif
3657 struct dpaa_sec_qp *qp;
3658 uint32_t i, flags;
3659 int ret;
3660 void *cmd_map;
3661 int map_fd = -1;
3662
3663 PMD_INIT_FUNC_TRACE();
3664
3665 internals = cryptodev->data->dev_private;
3666 map_fd = open("/dev/mem", O_RDWR);
3667 if (unlikely(map_fd < 0)) {
3668 DPAA_SEC_ERR("Unable to open (/dev/mem)");
3669 return map_fd;
3670 }
3671 internals->sec_hw = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
3672 MAP_SHARED, map_fd, SEC_BASE_ADDR);
3673 if (internals->sec_hw == MAP_FAILED) {
3674 DPAA_SEC_ERR("Memory map failed");
3675 close(map_fd);
3676 return -EINVAL;
3677 }
3678 cmd_map = (uint8_t *)internals->sec_hw +
3679 (BLOCK_OFFSET * QI_BLOCK_NUMBER) + CMD_REG;
3680 if (!(be32_to_cpu(rte_read32(cmd_map)) & QICTL_DQEN))
3681 /* enable QI interface */
3682 rte_write32(cpu_to_be32(QICTL_DQEN), cmd_map);
3683
3684 ret = munmap(internals->sec_hw, MAP_SIZE);
3685 if (ret)
3686 DPAA_SEC_WARN("munmap failed\n");
3687
3688 close(map_fd);
3689 cryptodev->driver_id = dpaa_cryptodev_driver_id;
3690 cryptodev->dev_ops = &crypto_ops;
3691
3692 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3693 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3694 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3695 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3696 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3697 RTE_CRYPTODEV_FF_SECURITY |
3698 RTE_CRYPTODEV_FF_SYM_RAW_DP |
3699 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3700 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3701 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3702 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3703 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3704
3705 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3706 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3707
3708 /*
3709 * For secondary processes, we don't initialise any further as primary
3710 * has already done this work. Only check we don't need a different
3711 * RX function
3712 */
3713 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3714 DPAA_SEC_WARN("Device already init by primary process");
3715 return 0;
3716 }
3717 #ifdef RTE_LIB_SECURITY
3718 /* Initialize security_ctx only for primary process*/
3719 security_instance = rte_malloc("rte_security_instances_ops",
3720 sizeof(struct rte_security_ctx), 0);
3721 if (security_instance == NULL)
3722 return -ENOMEM;
3723 security_instance->device = (void *)cryptodev;
3724 security_instance->ops = &dpaa_sec_security_ops;
3725 security_instance->sess_cnt = 0;
3726 cryptodev->security_ctx = security_instance;
3727 #endif
3728 rte_spinlock_init(&internals->lock);
3729 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3730 /* init qman fq for queue pair */
3731 qp = &internals->qps[i];
3732 ret = dpaa_sec_init_tx(&qp->outq);
3733 if (ret) {
3734 DPAA_SEC_ERR("config tx of queue pair %d", i);
3735 goto init_error;
3736 }
3737 }
3738
3739 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3740 QMAN_FQ_FLAG_TO_DCPORTAL;
3741 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3742 /* create rx qman fq for sessions*/
3743 ret = qman_create_fq(0, flags, &internals->inq[i]);
3744 if (unlikely(ret != 0)) {
3745 DPAA_SEC_ERR("sec qman_create_fq failed");
3746 goto init_error;
3747 }
3748 }
3749
3750 dpaa_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
3751
3752 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3753 return 0;
3754
3755 init_error:
3756 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3757
3758 rte_free(cryptodev->security_ctx);
3759 return -EFAULT;
3760 }
3761
3762 static int
cryptodev_dpaa_sec_probe(struct rte_dpaa_driver * dpaa_drv __rte_unused,struct rte_dpaa_device * dpaa_dev)3763 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3764 struct rte_dpaa_device *dpaa_dev)
3765 {
3766 struct rte_cryptodev *cryptodev;
3767 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3768
3769 int retval;
3770
3771 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3772 return 0;
3773
3774 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3775
3776 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3777 if (cryptodev == NULL)
3778 return -ENOMEM;
3779
3780 cryptodev->data->dev_private = rte_zmalloc_socket(
3781 "cryptodev private structure",
3782 sizeof(struct dpaa_sec_dev_private),
3783 RTE_CACHE_LINE_SIZE,
3784 rte_socket_id());
3785
3786 if (cryptodev->data->dev_private == NULL)
3787 rte_panic("Cannot allocate memzone for private "
3788 "device data");
3789
3790 dpaa_dev->crypto_dev = cryptodev;
3791 cryptodev->device = &dpaa_dev->device;
3792
3793 /* init user callbacks */
3794 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3795
3796 /* if sec device version is not configured */
3797 if (!rta_get_sec_era()) {
3798 const struct device_node *caam_node;
3799
3800 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3801 const uint32_t *prop = of_get_property(caam_node,
3802 "fsl,sec-era",
3803 NULL);
3804 if (prop) {
3805 rta_set_sec_era(
3806 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3807 break;
3808 }
3809 }
3810 }
3811
3812 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3813 retval = rte_dpaa_portal_init((void *)1);
3814 if (retval) {
3815 DPAA_SEC_ERR("Unable to initialize portal");
3816 goto out;
3817 }
3818 }
3819
3820 /* Invoke PMD device initialization function */
3821 retval = dpaa_sec_dev_init(cryptodev);
3822 if (retval == 0) {
3823 rte_cryptodev_pmd_probing_finish(cryptodev);
3824 return 0;
3825 }
3826
3827 retval = -ENXIO;
3828 out:
3829 /* In case of error, cleanup is done */
3830 rte_free(cryptodev->data->dev_private);
3831
3832 rte_cryptodev_pmd_release_device(cryptodev);
3833
3834 return retval;
3835 }
3836
3837 static int
cryptodev_dpaa_sec_remove(struct rte_dpaa_device * dpaa_dev)3838 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3839 {
3840 struct rte_cryptodev *cryptodev;
3841 int ret;
3842
3843 cryptodev = dpaa_dev->crypto_dev;
3844 if (cryptodev == NULL)
3845 return -ENODEV;
3846
3847 ret = dpaa_sec_uninit(cryptodev);
3848 if (ret)
3849 return ret;
3850
3851 return rte_cryptodev_pmd_destroy(cryptodev);
3852 }
3853
3854 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3855 .drv_type = FSL_DPAA_CRYPTO,
3856 .driver = {
3857 .name = "DPAA SEC PMD"
3858 },
3859 .probe = cryptodev_dpaa_sec_probe,
3860 .remove = cryptodev_dpaa_sec_remove,
3861 };
3862
3863 static struct cryptodev_driver dpaa_sec_crypto_drv;
3864
3865 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3866 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3867 dpaa_cryptodev_driver_id);
3868 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA_SEC_PMD,
3869 DRIVER_DUMP_MODE "=<int>");
3870 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
3871