1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021-2022 NXP
3 */
4
5 #include <cryptodev_pmd.h>
6 #include <rte_fslmc.h>
7 #include <fslmc_vfio.h>
8 #include <dpaa2_hw_pvt.h>
9 #include <dpaa2_hw_dpio.h>
10
11 #include "dpaa2_sec_priv.h"
12 #include "dpaa2_sec_logs.h"
13
14 #include <desc/algo.h>
15
16 struct dpaa2_sec_raw_dp_ctx {
17 dpaa2_sec_session *session;
18 uint32_t tail;
19 uint32_t head;
20 uint16_t cached_enqueue;
21 uint16_t cached_dequeue;
22 };
23
24 static int
build_raw_dp_chain_fd(uint8_t * drv_ctx,struct rte_crypto_sgl * sgl,struct rte_crypto_sgl * dest_sgl,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,union rte_crypto_sym_ofs ofs,void * userdata,struct qbman_fd * fd)25 build_raw_dp_chain_fd(uint8_t *drv_ctx,
26 struct rte_crypto_sgl *sgl,
27 struct rte_crypto_sgl *dest_sgl,
28 struct rte_crypto_va_iova_ptr *iv,
29 struct rte_crypto_va_iova_ptr *digest,
30 struct rte_crypto_va_iova_ptr *auth_iv,
31 union rte_crypto_sym_ofs ofs,
32 void *userdata,
33 struct qbman_fd *fd)
34 {
35 RTE_SET_USED(auth_iv);
36
37 dpaa2_sec_session *sess =
38 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
39 struct ctxt_priv *priv = sess->ctxt;
40 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
41 struct sec_flow_context *flc;
42 int data_len = 0, auth_len = 0, cipher_len = 0;
43 unsigned int i = 0;
44 uint16_t auth_hdr_len = ofs.ofs.cipher.head -
45 ofs.ofs.auth.head;
46
47 uint16_t auth_tail_len;
48 uint32_t auth_only_len;
49 int icv_len = sess->digest_length;
50 uint8_t *old_icv;
51 uint8_t *iv_ptr = iv->va;
52
53 for (i = 0; i < sgl->num; i++)
54 data_len += sgl->vec[i].len;
55
56 cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
57 auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
58 auth_tail_len = auth_len - cipher_len - auth_hdr_len;
59 auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
60 /* first FLE entry used to store session ctxt */
61 fle = (struct qbman_fle *)rte_malloc(NULL,
62 FLE_SG_MEM_SIZE(2 * sgl->num),
63 RTE_CACHE_LINE_SIZE);
64 if (unlikely(!fle)) {
65 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
66 return -ENOMEM;
67 }
68 memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
69 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
70 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
71
72 op_fle = fle + 1;
73 ip_fle = fle + 2;
74 sge = fle + 3;
75
76 /* Save the shared descriptor */
77 flc = &priv->flc_desc[0].flc;
78
79 /* Configure FD as a FRAME LIST */
80 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
81 DPAA2_SET_FD_COMPOUND_FMT(fd);
82 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
83
84 /* Configure Output FLE with Scatter/Gather Entry */
85 DPAA2_SET_FLE_SG_EXT(op_fle);
86 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
87
88 if (auth_only_len)
89 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
90
91 op_fle->length = (sess->dir == DIR_ENC) ?
92 (cipher_len + icv_len) :
93 cipher_len;
94
95 /* OOP */
96 if (dest_sgl) {
97 /* Configure Output SGE for Encap/Decap */
98 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
99 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
100 sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
101
102 /* o/p segs */
103 for (i = 1; i < dest_sgl->num; i++) {
104 sge++;
105 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
106 DPAA2_SET_FLE_OFFSET(sge, 0);
107 sge->length = dest_sgl->vec[i].len;
108 }
109 sge->length -= ofs.ofs.cipher.tail;
110 } else {
111 /* Configure Output SGE for Encap/Decap */
112 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
113 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
114 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
115
116 /* o/p segs */
117 for (i = 1; i < sgl->num; i++) {
118 sge++;
119 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
120 DPAA2_SET_FLE_OFFSET(sge, 0);
121 sge->length = sgl->vec[i].len;
122 }
123 sge->length -= ofs.ofs.cipher.tail;
124 }
125
126 if (sess->dir == DIR_ENC) {
127 sge++;
128 DPAA2_SET_FLE_ADDR(sge,
129 digest->iova);
130 sge->length = icv_len;
131 }
132 DPAA2_SET_FLE_FIN(sge);
133
134 sge++;
135
136 /* Configure Input FLE with Scatter/Gather Entry */
137 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
138 DPAA2_SET_FLE_SG_EXT(ip_fle);
139 DPAA2_SET_FLE_FIN(ip_fle);
140
141 ip_fle->length = (sess->dir == DIR_ENC) ?
142 (auth_len + sess->iv.length) :
143 (auth_len + sess->iv.length +
144 icv_len);
145
146 /* Configure Input SGE for Encap/Decap */
147 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
148 sge->length = sess->iv.length;
149
150 sge++;
151 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
152 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
153 sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
154
155 for (i = 1; i < sgl->num; i++) {
156 sge++;
157 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
158 DPAA2_SET_FLE_OFFSET(sge, 0);
159 sge->length = sgl->vec[i].len;
160 }
161
162 if (sess->dir == DIR_DEC) {
163 sge++;
164 old_icv = (uint8_t *)(sge + 1);
165 memcpy(old_icv, digest->va,
166 icv_len);
167 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
168 sge->length = icv_len;
169 }
170
171 DPAA2_SET_FLE_FIN(sge);
172 if (auth_only_len) {
173 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
174 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
175 }
176 DPAA2_SET_FD_LEN(fd, ip_fle->length);
177
178 return 0;
179 }
180
181 static int
build_raw_dp_aead_fd(uint8_t * drv_ctx,struct rte_crypto_sgl * sgl,struct rte_crypto_sgl * dest_sgl,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,union rte_crypto_sym_ofs ofs,void * userdata,struct qbman_fd * fd)182 build_raw_dp_aead_fd(uint8_t *drv_ctx,
183 struct rte_crypto_sgl *sgl,
184 struct rte_crypto_sgl *dest_sgl,
185 struct rte_crypto_va_iova_ptr *iv,
186 struct rte_crypto_va_iova_ptr *digest,
187 struct rte_crypto_va_iova_ptr *auth_iv,
188 union rte_crypto_sym_ofs ofs,
189 void *userdata,
190 struct qbman_fd *fd)
191 {
192 dpaa2_sec_session *sess =
193 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
194 struct ctxt_priv *priv = sess->ctxt;
195 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
196 struct sec_flow_context *flc;
197 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
198 int icv_len = sess->digest_length;
199 uint8_t *old_icv;
200 uint8_t *IV_ptr = iv->va;
201 unsigned int i = 0;
202 int data_len = 0, aead_len = 0;
203
204 for (i = 0; i < sgl->num; i++)
205 data_len += sgl->vec[i].len;
206
207 aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
208
209 /* first FLE entry used to store mbuf and session ctxt */
210 fle = (struct qbman_fle *)rte_malloc(NULL,
211 FLE_SG_MEM_SIZE(2 * sgl->num),
212 RTE_CACHE_LINE_SIZE);
213 if (unlikely(!fle)) {
214 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
215 return -ENOMEM;
216 }
217 memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
218 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
219 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
220
221 op_fle = fle + 1;
222 ip_fle = fle + 2;
223 sge = fle + 3;
224
225 /* Save the shared descriptor */
226 flc = &priv->flc_desc[0].flc;
227
228 /* Configure FD as a FRAME LIST */
229 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
230 DPAA2_SET_FD_COMPOUND_FMT(fd);
231 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
232
233 /* Configure Output FLE with Scatter/Gather Entry */
234 DPAA2_SET_FLE_SG_EXT(op_fle);
235 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
236
237 if (auth_only_len)
238 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
239
240 op_fle->length = (sess->dir == DIR_ENC) ?
241 (aead_len + icv_len) :
242 aead_len;
243
244 /* OOP */
245 if (dest_sgl) {
246 /* Configure Output SGE for Encap/Decap */
247 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
248 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
249 sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
250
251 /* o/p segs */
252 for (i = 1; i < dest_sgl->num; i++) {
253 sge++;
254 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
255 DPAA2_SET_FLE_OFFSET(sge, 0);
256 sge->length = dest_sgl->vec[i].len;
257 }
258 } else {
259 /* Configure Output SGE for Encap/Decap */
260 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
261 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
262 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
263
264 /* o/p segs */
265 for (i = 1; i < sgl->num; i++) {
266 sge++;
267 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
268 DPAA2_SET_FLE_OFFSET(sge, 0);
269 sge->length = sgl->vec[i].len;
270 }
271 }
272
273 if (sess->dir == DIR_ENC) {
274 sge++;
275 DPAA2_SET_FLE_ADDR(sge, digest->iova);
276 sge->length = icv_len;
277 }
278 DPAA2_SET_FLE_FIN(sge);
279
280 sge++;
281
282 /* Configure Input FLE with Scatter/Gather Entry */
283 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
284 DPAA2_SET_FLE_SG_EXT(ip_fle);
285 DPAA2_SET_FLE_FIN(ip_fle);
286 ip_fle->length = (sess->dir == DIR_ENC) ?
287 (aead_len + sess->iv.length + auth_only_len) :
288 (aead_len + sess->iv.length + auth_only_len +
289 icv_len);
290
291 /* Configure Input SGE for Encap/Decap */
292 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
293 sge->length = sess->iv.length;
294
295 sge++;
296 if (auth_only_len) {
297 DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
298 sge->length = auth_only_len;
299 sge++;
300 }
301
302 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
303 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
304 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
305
306 /* i/p segs */
307 for (i = 1; i < sgl->num; i++) {
308 sge++;
309 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
310 DPAA2_SET_FLE_OFFSET(sge, 0);
311 sge->length = sgl->vec[i].len;
312 }
313
314 if (sess->dir == DIR_DEC) {
315 sge++;
316 old_icv = (uint8_t *)(sge + 1);
317 memcpy(old_icv, digest->va, icv_len);
318 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
319 sge->length = icv_len;
320 }
321
322 DPAA2_SET_FLE_FIN(sge);
323 if (auth_only_len) {
324 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
325 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
326 }
327 DPAA2_SET_FD_LEN(fd, ip_fle->length);
328
329 return 0;
330 }
331
332 static int
build_raw_dp_auth_fd(uint8_t * drv_ctx,struct rte_crypto_sgl * sgl,struct rte_crypto_sgl * dest_sgl,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,union rte_crypto_sym_ofs ofs,void * userdata,struct qbman_fd * fd)333 build_raw_dp_auth_fd(uint8_t *drv_ctx,
334 struct rte_crypto_sgl *sgl,
335 struct rte_crypto_sgl *dest_sgl,
336 struct rte_crypto_va_iova_ptr *iv,
337 struct rte_crypto_va_iova_ptr *digest,
338 struct rte_crypto_va_iova_ptr *auth_iv,
339 union rte_crypto_sym_ofs ofs,
340 void *userdata,
341 struct qbman_fd *fd)
342 {
343 RTE_SET_USED(iv);
344 RTE_SET_USED(auth_iv);
345 RTE_SET_USED(dest_sgl);
346
347 dpaa2_sec_session *sess =
348 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
349 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
350 struct sec_flow_context *flc;
351 int total_len = 0, data_len = 0, data_offset;
352 uint8_t *old_digest;
353 struct ctxt_priv *priv = sess->ctxt;
354 unsigned int i;
355
356 for (i = 0; i < sgl->num; i++)
357 total_len += sgl->vec[i].len;
358
359 data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
360 data_offset = ofs.ofs.auth.head;
361
362 /* For SNOW3G and ZUC, lengths in bits only supported */
363 fle = (struct qbman_fle *)rte_malloc(NULL,
364 FLE_SG_MEM_SIZE(2 * sgl->num),
365 RTE_CACHE_LINE_SIZE);
366 if (unlikely(!fle)) {
367 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
368 return -ENOMEM;
369 }
370 memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
371 /* first FLE entry used to store mbuf and session ctxt */
372 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
373 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
374 op_fle = fle + 1;
375 ip_fle = fle + 2;
376 sge = fle + 3;
377
378 flc = &priv->flc_desc[DESC_INITFINAL].flc;
379
380 /* sg FD */
381 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
382 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
383 DPAA2_SET_FD_COMPOUND_FMT(fd);
384
385 /* o/p fle */
386 DPAA2_SET_FLE_ADDR(op_fle,
387 DPAA2_VADDR_TO_IOVA(digest->va));
388 op_fle->length = sess->digest_length;
389
390 /* i/p fle */
391 DPAA2_SET_FLE_SG_EXT(ip_fle);
392 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
393 ip_fle->length = data_len;
394
395 if (sess->iv.length) {
396 uint8_t *iv_ptr;
397
398 iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
399 sess->iv.offset);
400
401 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
402 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
403 sge->length = 12;
404 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
405 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
406 sge->length = 8;
407 } else {
408 sge->length = sess->iv.length;
409 }
410 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
411 ip_fle->length += sge->length;
412 sge++;
413 }
414 /* i/p 1st seg */
415 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
416 DPAA2_SET_FLE_OFFSET(sge, data_offset);
417
418 if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
419 sge->length = data_len;
420 data_len = 0;
421 } else {
422 sge->length = sgl->vec[0].len - data_offset;
423 for (i = 1; i < sgl->num; i++) {
424 sge++;
425 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
426 DPAA2_SET_FLE_OFFSET(sge, 0);
427 sge->length = sgl->vec[i].len;
428 }
429 }
430 if (sess->dir == DIR_DEC) {
431 /* Digest verification case */
432 sge++;
433 old_digest = (uint8_t *)(sge + 1);
434 rte_memcpy(old_digest, digest->va,
435 sess->digest_length);
436 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
437 sge->length = sess->digest_length;
438 ip_fle->length += sess->digest_length;
439 }
440 DPAA2_SET_FLE_FIN(sge);
441 DPAA2_SET_FLE_FIN(ip_fle);
442 DPAA2_SET_FD_LEN(fd, ip_fle->length);
443
444 return 0;
445 }
446
447 static int
build_raw_dp_proto_fd(uint8_t * drv_ctx,struct rte_crypto_sgl * sgl,struct rte_crypto_sgl * dest_sgl,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,union rte_crypto_sym_ofs ofs,void * userdata,struct qbman_fd * fd)448 build_raw_dp_proto_fd(uint8_t *drv_ctx,
449 struct rte_crypto_sgl *sgl,
450 struct rte_crypto_sgl *dest_sgl,
451 struct rte_crypto_va_iova_ptr *iv,
452 struct rte_crypto_va_iova_ptr *digest,
453 struct rte_crypto_va_iova_ptr *auth_iv,
454 union rte_crypto_sym_ofs ofs,
455 void *userdata,
456 struct qbman_fd *fd)
457 {
458 RTE_SET_USED(iv);
459 RTE_SET_USED(digest);
460 RTE_SET_USED(auth_iv);
461 RTE_SET_USED(ofs);
462
463 dpaa2_sec_session *sess =
464 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
465 struct ctxt_priv *priv = sess->ctxt;
466 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
467 struct sec_flow_context *flc;
468 uint32_t in_len = 0, out_len = 0, i;
469
470 /* first FLE entry used to store mbuf and session ctxt */
471 fle = (struct qbman_fle *)rte_malloc(NULL,
472 FLE_SG_MEM_SIZE(2 * sgl->num),
473 RTE_CACHE_LINE_SIZE);
474 if (unlikely(!fle)) {
475 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
476 return -ENOMEM;
477 }
478 memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
479 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
480 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
481
482 /* Save the shared descriptor */
483 flc = &priv->flc_desc[0].flc;
484 op_fle = fle + 1;
485 ip_fle = fle + 2;
486 sge = fle + 3;
487
488 DPAA2_SET_FD_IVP(fd);
489 DPAA2_SET_FLE_IVP(op_fle);
490 DPAA2_SET_FLE_IVP(ip_fle);
491
492 /* Configure FD as a FRAME LIST */
493 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
494 DPAA2_SET_FD_COMPOUND_FMT(fd);
495 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
496
497 /* Configure Output FLE with Scatter/Gather Entry */
498 DPAA2_SET_FLE_SG_EXT(op_fle);
499 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
500
501 /* OOP */
502 if (dest_sgl) {
503 /* Configure Output SGE for Encap/Decap */
504 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
505 DPAA2_SET_FLE_OFFSET(sge, 0);
506 sge->length = dest_sgl->vec[0].len;
507 out_len += sge->length;
508 /* o/p segs */
509 for (i = 1; i < dest_sgl->num; i++) {
510 sge++;
511 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
512 DPAA2_SET_FLE_OFFSET(sge, 0);
513 sge->length = dest_sgl->vec[i].len;
514 out_len += sge->length;
515 }
516 sge->length = dest_sgl->vec[i - 1].tot_len;
517
518 } else {
519 /* Configure Output SGE for Encap/Decap */
520 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
521 DPAA2_SET_FLE_OFFSET(sge, 0);
522 sge->length = sgl->vec[0].len;
523 out_len += sge->length;
524 /* o/p segs */
525 for (i = 1; i < sgl->num; i++) {
526 sge++;
527 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
528 DPAA2_SET_FLE_OFFSET(sge, 0);
529 sge->length = sgl->vec[i].len;
530 out_len += sge->length;
531 }
532 sge->length = sgl->vec[i - 1].tot_len;
533 }
534 out_len += sge->length;
535
536 DPAA2_SET_FLE_FIN(sge);
537 op_fle->length = out_len;
538
539 sge++;
540
541 /* Configure Input FLE with Scatter/Gather Entry */
542 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
543 DPAA2_SET_FLE_SG_EXT(ip_fle);
544 DPAA2_SET_FLE_FIN(ip_fle);
545
546 /* Configure input SGE for Encap/Decap */
547 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
548 DPAA2_SET_FLE_OFFSET(sge, 0);
549 sge->length = sgl->vec[0].len;
550 in_len += sge->length;
551 /* i/p segs */
552 for (i = 1; i < sgl->num; i++) {
553 sge++;
554 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
555 DPAA2_SET_FLE_OFFSET(sge, 0);
556 sge->length = sgl->vec[i].len;
557 in_len += sge->length;
558 }
559
560 ip_fle->length = in_len;
561 DPAA2_SET_FLE_FIN(sge);
562
563 /* In case of PDCP, per packet HFN is stored in
564 * mbuf priv after sym_op.
565 */
566 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
567 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
568 sess->pdcp.hfn_ovd_offset);
569 /* enable HFN override */
570 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
571 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
572 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
573 }
574 DPAA2_SET_FD_LEN(fd, ip_fle->length);
575
576 return 0;
577 }
578
579 static int
build_raw_dp_cipher_fd(uint8_t * drv_ctx,struct rte_crypto_sgl * sgl,struct rte_crypto_sgl * dest_sgl,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,union rte_crypto_sym_ofs ofs,void * userdata,struct qbman_fd * fd)580 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
581 struct rte_crypto_sgl *sgl,
582 struct rte_crypto_sgl *dest_sgl,
583 struct rte_crypto_va_iova_ptr *iv,
584 struct rte_crypto_va_iova_ptr *digest,
585 struct rte_crypto_va_iova_ptr *auth_iv,
586 union rte_crypto_sym_ofs ofs,
587 void *userdata,
588 struct qbman_fd *fd)
589 {
590 RTE_SET_USED(digest);
591 RTE_SET_USED(auth_iv);
592
593 dpaa2_sec_session *sess =
594 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
595 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
596 int total_len = 0, data_len = 0, data_offset;
597 struct sec_flow_context *flc;
598 struct ctxt_priv *priv = sess->ctxt;
599 unsigned int i;
600
601 for (i = 0; i < sgl->num; i++)
602 total_len += sgl->vec[i].len;
603
604 data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
605 data_offset = ofs.ofs.cipher.head;
606
607 /* For SNOW3G and ZUC, lengths in bits only supported */
608 /* first FLE entry used to store mbuf and session ctxt */
609 fle = (struct qbman_fle *)rte_malloc(NULL,
610 FLE_SG_MEM_SIZE(2*sgl->num),
611 RTE_CACHE_LINE_SIZE);
612 if (!fle) {
613 DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
614 return -ENOMEM;
615 }
616 memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
617 /* first FLE entry used to store userdata and session ctxt */
618 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
619 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
620
621 op_fle = fle + 1;
622 ip_fle = fle + 2;
623 sge = fle + 3;
624
625 flc = &priv->flc_desc[0].flc;
626
627 DPAA2_SEC_DP_DEBUG(
628 "RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
629 data_offset,
630 data_len,
631 sess->iv.length);
632
633 /* o/p fle */
634 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
635 op_fle->length = data_len;
636 DPAA2_SET_FLE_SG_EXT(op_fle);
637
638 /* OOP */
639 if (dest_sgl) {
640 /* o/p 1st seg */
641 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
642 DPAA2_SET_FLE_OFFSET(sge, data_offset);
643 sge->length = dest_sgl->vec[0].len - data_offset;
644
645 /* o/p segs */
646 for (i = 1; i < dest_sgl->num; i++) {
647 sge++;
648 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
649 DPAA2_SET_FLE_OFFSET(sge, 0);
650 sge->length = dest_sgl->vec[i].len;
651 }
652 } else {
653 /* o/p 1st seg */
654 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
655 DPAA2_SET_FLE_OFFSET(sge, data_offset);
656 sge->length = sgl->vec[0].len - data_offset;
657
658 /* o/p segs */
659 for (i = 1; i < sgl->num; i++) {
660 sge++;
661 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
662 DPAA2_SET_FLE_OFFSET(sge, 0);
663 sge->length = sgl->vec[i].len;
664 }
665 }
666 DPAA2_SET_FLE_FIN(sge);
667
668 DPAA2_SEC_DP_DEBUG(
669 "RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
670 flc, fle, fle->addr_hi, fle->addr_lo,
671 fle->length);
672
673 /* i/p fle */
674 sge++;
675 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
676 ip_fle->length = sess->iv.length + data_len;
677 DPAA2_SET_FLE_SG_EXT(ip_fle);
678
679 /* i/p IV */
680 DPAA2_SET_FLE_ADDR(sge, iv->iova);
681 DPAA2_SET_FLE_OFFSET(sge, 0);
682 sge->length = sess->iv.length;
683
684 sge++;
685
686 /* i/p 1st seg */
687 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
688 DPAA2_SET_FLE_OFFSET(sge, data_offset);
689 sge->length = sgl->vec[0].len - data_offset;
690
691 /* i/p segs */
692 for (i = 1; i < sgl->num; i++) {
693 sge++;
694 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
695 DPAA2_SET_FLE_OFFSET(sge, 0);
696 sge->length = sgl->vec[i].len;
697 }
698 DPAA2_SET_FLE_FIN(sge);
699 DPAA2_SET_FLE_FIN(ip_fle);
700
701 /* sg fd */
702 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
703 DPAA2_SET_FD_LEN(fd, ip_fle->length);
704 DPAA2_SET_FD_COMPOUND_FMT(fd);
705 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
706
707 DPAA2_SEC_DP_DEBUG(
708 "RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
709 DPAA2_GET_FD_ADDR(fd),
710 DPAA2_GET_FD_OFFSET(fd),
711 DPAA2_GET_FD_LEN(fd));
712
713 return 0;
714 }
715
716 static __rte_always_inline uint32_t
dpaa2_sec_raw_enqueue_burst(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)717 dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
718 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
719 void *user_data[], int *status)
720 {
721 RTE_SET_USED(user_data);
722 uint32_t loop;
723 int32_t ret;
724 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
725 uint32_t frames_to_send, retry_count;
726 struct qbman_eq_desc eqdesc;
727 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
728 dpaa2_sec_session *sess =
729 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
730 struct qbman_swp *swp;
731 uint16_t num_tx = 0;
732 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
733
734 if (unlikely(vec->num == 0))
735 return 0;
736
737 if (sess == NULL) {
738 DPAA2_SEC_ERR("sessionless raw crypto not supported");
739 return 0;
740 }
741 /*Prepare enqueue descriptor*/
742 qbman_eq_desc_clear(&eqdesc);
743 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
744 qbman_eq_desc_set_response(&eqdesc, 0, 0);
745 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
746
747 if (!DPAA2_PER_LCORE_DPIO) {
748 ret = dpaa2_affine_qbman_swp();
749 if (ret) {
750 DPAA2_SEC_ERR(
751 "Failed to allocate IO portal, tid: %d\n",
752 rte_gettid());
753 return 0;
754 }
755 }
756 swp = DPAA2_PER_LCORE_PORTAL;
757
758 while (vec->num) {
759 frames_to_send = (vec->num > dpaa2_eqcr_size) ?
760 dpaa2_eqcr_size : vec->num;
761
762 for (loop = 0; loop < frames_to_send; loop++) {
763 /*Clear the unused FD fields before sending*/
764 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
765 ret = sess->build_raw_dp_fd(drv_ctx,
766 &vec->src_sgl[loop],
767 &vec->dest_sgl[loop],
768 &vec->iv[loop],
769 &vec->digest[loop],
770 &vec->auth_iv[loop],
771 ofs,
772 user_data[loop],
773 &fd_arr[loop]);
774 if (ret) {
775 DPAA2_SEC_ERR("error: Improper packet contents"
776 " for crypto operation");
777 goto skip_tx;
778 }
779 status[loop] = 1;
780 }
781
782 loop = 0;
783 retry_count = 0;
784 while (loop < frames_to_send) {
785 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
786 &fd_arr[loop],
787 &flags[loop],
788 frames_to_send - loop);
789 if (unlikely(ret < 0)) {
790 retry_count++;
791 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
792 num_tx += loop;
793 vec->num -= loop;
794 goto skip_tx;
795 }
796 } else {
797 loop += ret;
798 retry_count = 0;
799 }
800 }
801
802 num_tx += loop;
803 vec->num -= loop;
804 }
805 skip_tx:
806 dpaa2_qp->tx_vq.tx_pkts += num_tx;
807 dpaa2_qp->tx_vq.err_pkts += vec->num;
808
809 return num_tx;
810 }
811
812 static __rte_always_inline int
dpaa2_sec_raw_enqueue(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data_vec,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * aad_or_auth_iv,void * user_data)813 dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
814 struct rte_crypto_vec *data_vec,
815 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
816 struct rte_crypto_va_iova_ptr *iv,
817 struct rte_crypto_va_iova_ptr *digest,
818 struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
819 void *user_data)
820 {
821 RTE_SET_USED(qp_data);
822 RTE_SET_USED(drv_ctx);
823 RTE_SET_USED(data_vec);
824 RTE_SET_USED(n_data_vecs);
825 RTE_SET_USED(ofs);
826 RTE_SET_USED(iv);
827 RTE_SET_USED(digest);
828 RTE_SET_USED(aad_or_auth_iv);
829 RTE_SET_USED(user_data);
830
831 return 0;
832 }
833
834 static inline void *
sec_fd_to_userdata(const struct qbman_fd * fd)835 sec_fd_to_userdata(const struct qbman_fd *fd)
836 {
837 struct qbman_fle *fle;
838 void *userdata;
839 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
840
841 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
842 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
843 userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
844 /* free the fle memory */
845 rte_free((void *)(fle-1));
846
847 return userdata;
848 }
849
850 static __rte_always_inline uint32_t
dpaa2_sec_raw_dequeue_burst(void * qp_data,uint8_t * drv_ctx,rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,uint32_t max_nb_to_dequeue,rte_cryptodev_raw_post_dequeue_t post_dequeue,void ** out_user_data,uint8_t is_user_data_array,uint32_t * n_success,int * dequeue_status)851 dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
852 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
853 uint32_t max_nb_to_dequeue,
854 rte_cryptodev_raw_post_dequeue_t post_dequeue,
855 void **out_user_data, uint8_t is_user_data_array,
856 uint32_t *n_success, int *dequeue_status)
857 {
858 RTE_SET_USED(drv_ctx);
859 RTE_SET_USED(get_dequeue_count);
860
861 /* Function is responsible to receive frames for a given device and VQ*/
862 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
863 struct qbman_result *dq_storage;
864 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
865 int ret, num_rx = 0;
866 uint8_t is_last = 0, status, is_success = 0;
867 struct qbman_swp *swp;
868 const struct qbman_fd *fd;
869 struct qbman_pull_desc pulldesc;
870 void *user_data;
871 uint32_t nb_ops = max_nb_to_dequeue;
872
873 if (!DPAA2_PER_LCORE_DPIO) {
874 ret = dpaa2_affine_qbman_swp();
875 if (ret) {
876 DPAA2_SEC_ERR(
877 "Failed to allocate IO portal, tid: %d\n",
878 rte_gettid());
879 return 0;
880 }
881 }
882 swp = DPAA2_PER_LCORE_PORTAL;
883 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
884
885 qbman_pull_desc_clear(&pulldesc);
886 qbman_pull_desc_set_numframes(&pulldesc,
887 (nb_ops > dpaa2_dqrr_size) ?
888 dpaa2_dqrr_size : nb_ops);
889 qbman_pull_desc_set_fq(&pulldesc, fqid);
890 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
891 (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
892 1);
893
894 /*Issue a volatile dequeue command. */
895 while (1) {
896 if (qbman_swp_pull(swp, &pulldesc)) {
897 DPAA2_SEC_WARN(
898 "SEC VDQ command is not issued : QBMAN busy");
899 /* Portal was busy, try again */
900 continue;
901 }
902 break;
903 };
904
905 /* Receive the packets till Last Dequeue entry is found with
906 * respect to the above issues PULL command.
907 */
908 while (!is_last) {
909 /* Check if the previous issued command is completed.
910 * Also seems like the SWP is shared between the Ethernet Driver
911 * and the SEC driver.
912 */
913 while (!qbman_check_command_complete(dq_storage))
914 ;
915
916 /* Loop until the dq_storage is updated with
917 * new token by QBMAN
918 */
919 while (!qbman_check_new_result(dq_storage))
920 ;
921 /* Check whether Last Pull command is Expired and
922 * setting Condition for Loop termination
923 */
924 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
925 is_last = 1;
926 /* Check for valid frame. */
927 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
928 if (unlikely(
929 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
930 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
931 continue;
932 }
933 }
934
935 fd = qbman_result_DQ_fd(dq_storage);
936 user_data = sec_fd_to_userdata(fd);
937 if (is_user_data_array)
938 out_user_data[num_rx] = user_data;
939 else
940 out_user_data[0] = user_data;
941 if (unlikely(fd->simple.frc)) {
942 /* TODO Parse SEC errors */
943 DPAA2_SEC_ERR("SEC returned Error - %x",
944 fd->simple.frc);
945 is_success = false;
946 } else {
947 is_success = true;
948 }
949 post_dequeue(user_data, num_rx, is_success);
950
951 num_rx++;
952 dq_storage++;
953 } /* End of Packet Rx loop */
954
955 dpaa2_qp->rx_vq.rx_pkts += num_rx;
956 *dequeue_status = 1;
957 *n_success = num_rx;
958
959 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
960 /*Return the total number of packets received to DPAA2 app*/
961 return num_rx;
962 }
963
964 static __rte_always_inline void *
dpaa2_sec_raw_dequeue(void * qp_data,uint8_t * drv_ctx,int * dequeue_status,enum rte_crypto_op_status * op_status)965 dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
966 enum rte_crypto_op_status *op_status)
967 {
968 RTE_SET_USED(qp_data);
969 RTE_SET_USED(drv_ctx);
970 RTE_SET_USED(dequeue_status);
971 RTE_SET_USED(op_status);
972
973 return NULL;
974 }
975
976 static __rte_always_inline int
dpaa2_sec_raw_enqueue_done(void * qp_data,uint8_t * drv_ctx,uint32_t n)977 dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
978 {
979 RTE_SET_USED(qp_data);
980 RTE_SET_USED(drv_ctx);
981 RTE_SET_USED(n);
982
983 return 0;
984 }
985
986 static __rte_always_inline int
dpaa2_sec_raw_dequeue_done(void * qp_data,uint8_t * drv_ctx,uint32_t n)987 dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
988 {
989 RTE_SET_USED(qp_data);
990 RTE_SET_USED(drv_ctx);
991 RTE_SET_USED(n);
992
993 return 0;
994 }
995
996 int
dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev * dev,uint16_t qp_id,struct rte_crypto_raw_dp_ctx * raw_dp_ctx,enum rte_crypto_op_sess_type sess_type,union rte_cryptodev_session_ctx session_ctx,uint8_t is_update)997 dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
998 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
999 enum rte_crypto_op_sess_type sess_type,
1000 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
1001 {
1002 dpaa2_sec_session *sess;
1003 struct dpaa2_sec_raw_dp_ctx *dp_ctx;
1004 RTE_SET_USED(qp_id);
1005
1006 if (!is_update) {
1007 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
1008 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
1009 }
1010
1011 if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1012 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1013 session_ctx.sec_sess);
1014 else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1015 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1016 session_ctx.crypto_sess, cryptodev_driver_id);
1017 else
1018 return -ENOTSUP;
1019 raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
1020 raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
1021 raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
1022 raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
1023 raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
1024 raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
1025
1026 if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
1027 sess->build_raw_dp_fd = build_raw_dp_chain_fd;
1028 else if (sess->ctxt_type == DPAA2_SEC_AEAD)
1029 sess->build_raw_dp_fd = build_raw_dp_aead_fd;
1030 else if (sess->ctxt_type == DPAA2_SEC_AUTH)
1031 sess->build_raw_dp_fd = build_raw_dp_auth_fd;
1032 else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
1033 sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
1034 else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1035 sess->ctxt_type == DPAA2_SEC_PDCP)
1036 sess->build_raw_dp_fd = build_raw_dp_proto_fd;
1037 else
1038 return -ENOTSUP;
1039 dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
1040 dp_ctx->session = sess;
1041
1042 return 0;
1043 }
1044
1045 int
dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev * dev)1046 dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
1047 {
1048 return sizeof(struct dpaa2_sec_raw_dp_ctx);
1049 }
1050