1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 NXP
3  */
4 
5 #include <cryptodev_pmd.h>
6 #include <rte_fslmc.h>
7 #include <fslmc_vfio.h>
8 #include <dpaa2_hw_pvt.h>
9 #include <dpaa2_hw_dpio.h>
10 
11 #include "dpaa2_sec_priv.h"
12 #include "dpaa2_sec_logs.h"
13 
14 #include <desc/algo.h>
15 
16 struct dpaa2_sec_raw_dp_ctx {
17 	dpaa2_sec_session *session;
18 	uint32_t tail;
19 	uint32_t head;
20 	uint16_t cached_enqueue;
21 	uint16_t cached_dequeue;
22 };
23 
24 static int
25 build_raw_dp_chain_fd(uint8_t *drv_ctx,
26 		       struct rte_crypto_sgl *sgl,
27 		       struct rte_crypto_sgl *dest_sgl,
28 		       struct rte_crypto_va_iova_ptr *iv,
29 		       struct rte_crypto_va_iova_ptr *digest,
30 		       struct rte_crypto_va_iova_ptr *auth_iv,
31 		       union rte_crypto_sym_ofs ofs,
32 		       void *userdata,
33 		       struct qbman_fd *fd)
34 {
35 	RTE_SET_USED(auth_iv);
36 
37 	dpaa2_sec_session *sess =
38 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
39 	struct ctxt_priv *priv = sess->ctxt;
40 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
41 	struct sec_flow_context *flc;
42 	int data_len = 0, auth_len = 0, cipher_len = 0;
43 	unsigned int i = 0;
44 	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
45 				ofs.ofs.auth.head;
46 
47 	uint16_t auth_tail_len = ofs.ofs.auth.tail;
48 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
49 	int icv_len = sess->digest_length;
50 	uint8_t *old_icv;
51 	uint8_t *iv_ptr = iv->va;
52 
53 	for (i = 0; i < sgl->num; i++)
54 		data_len += sgl->vec[i].len;
55 
56 	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
57 	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
58 	/* first FLE entry used to store session ctxt */
59 	fle = (struct qbman_fle *)rte_malloc(NULL,
60 			FLE_SG_MEM_SIZE(2 * sgl->num),
61 			RTE_CACHE_LINE_SIZE);
62 	if (unlikely(!fle)) {
63 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
64 		return -ENOMEM;
65 	}
66 	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
67 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
68 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
69 
70 	op_fle = fle + 1;
71 	ip_fle = fle + 2;
72 	sge = fle + 3;
73 
74 	/* Save the shared descriptor */
75 	flc = &priv->flc_desc[0].flc;
76 
77 	/* Configure FD as a FRAME LIST */
78 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
79 	DPAA2_SET_FD_COMPOUND_FMT(fd);
80 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
81 
82 	/* Configure Output FLE with Scatter/Gather Entry */
83 	DPAA2_SET_FLE_SG_EXT(op_fle);
84 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
85 
86 	if (auth_only_len)
87 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
88 
89 	op_fle->length = (sess->dir == DIR_ENC) ?
90 			(cipher_len + icv_len) :
91 			cipher_len;
92 
93 	/* OOP */
94 	if (dest_sgl) {
95 		/* Configure Output SGE for Encap/Decap */
96 		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
97 		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
98 		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
99 
100 		/* o/p segs */
101 		for (i = 1; i < dest_sgl->num; i++) {
102 			sge++;
103 			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
104 			DPAA2_SET_FLE_OFFSET(sge, 0);
105 			sge->length = dest_sgl->vec[i].len;
106 		}
107 	} else {
108 		/* Configure Output SGE for Encap/Decap */
109 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
110 		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
111 		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
112 
113 		/* o/p segs */
114 		for (i = 1; i < sgl->num; i++) {
115 			sge++;
116 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
117 			DPAA2_SET_FLE_OFFSET(sge, 0);
118 			sge->length = sgl->vec[i].len;
119 		}
120 	}
121 
122 	if (sess->dir == DIR_ENC) {
123 		sge++;
124 		DPAA2_SET_FLE_ADDR(sge,
125 			digest->iova);
126 		sge->length = icv_len;
127 	}
128 	DPAA2_SET_FLE_FIN(sge);
129 
130 	sge++;
131 
132 	/* Configure Input FLE with Scatter/Gather Entry */
133 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
134 	DPAA2_SET_FLE_SG_EXT(ip_fle);
135 	DPAA2_SET_FLE_FIN(ip_fle);
136 
137 	ip_fle->length = (sess->dir == DIR_ENC) ?
138 			(auth_len + sess->iv.length) :
139 			(auth_len + sess->iv.length +
140 			icv_len);
141 
142 	/* Configure Input SGE for Encap/Decap */
143 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
144 	sge->length = sess->iv.length;
145 
146 	sge++;
147 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
148 	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
149 	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
150 
151 	for (i = 1; i < sgl->num; i++) {
152 		sge++;
153 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
154 		DPAA2_SET_FLE_OFFSET(sge, 0);
155 		sge->length = sgl->vec[i].len;
156 	}
157 
158 	if (sess->dir == DIR_DEC) {
159 		sge++;
160 		old_icv = (uint8_t *)(sge + 1);
161 		memcpy(old_icv, digest->va,
162 			icv_len);
163 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
164 		sge->length = icv_len;
165 	}
166 
167 	DPAA2_SET_FLE_FIN(sge);
168 	if (auth_only_len) {
169 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
170 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
171 	}
172 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
173 
174 	return 0;
175 }
176 
177 static int
178 build_raw_dp_aead_fd(uint8_t *drv_ctx,
179 		       struct rte_crypto_sgl *sgl,
180 		       struct rte_crypto_sgl *dest_sgl,
181 		       struct rte_crypto_va_iova_ptr *iv,
182 		       struct rte_crypto_va_iova_ptr *digest,
183 		       struct rte_crypto_va_iova_ptr *auth_iv,
184 		       union rte_crypto_sym_ofs ofs,
185 		       void *userdata,
186 		       struct qbman_fd *fd)
187 {
188 	dpaa2_sec_session *sess =
189 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
190 	struct ctxt_priv *priv = sess->ctxt;
191 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
192 	struct sec_flow_context *flc;
193 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
194 	int icv_len = sess->digest_length;
195 	uint8_t *old_icv;
196 	uint8_t *IV_ptr = iv->va;
197 	unsigned int i = 0;
198 	int data_len = 0, aead_len = 0;
199 
200 	for (i = 0; i < sgl->num; i++)
201 		data_len += sgl->vec[i].len;
202 
203 	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
204 
205 	/* first FLE entry used to store mbuf and session ctxt */
206 	fle = (struct qbman_fle *)rte_malloc(NULL,
207 			FLE_SG_MEM_SIZE(2 * sgl->num),
208 			RTE_CACHE_LINE_SIZE);
209 	if (unlikely(!fle)) {
210 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
211 		return -ENOMEM;
212 	}
213 	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
214 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
215 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
216 
217 	op_fle = fle + 1;
218 	ip_fle = fle + 2;
219 	sge = fle + 3;
220 
221 	/* Save the shared descriptor */
222 	flc = &priv->flc_desc[0].flc;
223 
224 	/* Configure FD as a FRAME LIST */
225 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
226 	DPAA2_SET_FD_COMPOUND_FMT(fd);
227 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
228 
229 	/* Configure Output FLE with Scatter/Gather Entry */
230 	DPAA2_SET_FLE_SG_EXT(op_fle);
231 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
232 
233 	if (auth_only_len)
234 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
235 
236 	op_fle->length = (sess->dir == DIR_ENC) ?
237 			(aead_len + icv_len) :
238 			aead_len;
239 
240 	/* OOP */
241 	if (dest_sgl) {
242 		/* Configure Output SGE for Encap/Decap */
243 		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
244 		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
245 		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
246 
247 		/* o/p segs */
248 		for (i = 1; i < dest_sgl->num; i++) {
249 			sge++;
250 			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
251 			DPAA2_SET_FLE_OFFSET(sge, 0);
252 			sge->length = dest_sgl->vec[i].len;
253 		}
254 	} else {
255 		/* Configure Output SGE for Encap/Decap */
256 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
257 		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
258 		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
259 
260 		/* o/p segs */
261 		for (i = 1; i < sgl->num; i++) {
262 			sge++;
263 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
264 			DPAA2_SET_FLE_OFFSET(sge, 0);
265 			sge->length = sgl->vec[i].len;
266 		}
267 	}
268 
269 	if (sess->dir == DIR_ENC) {
270 		sge++;
271 		DPAA2_SET_FLE_ADDR(sge, digest->iova);
272 		sge->length = icv_len;
273 	}
274 	DPAA2_SET_FLE_FIN(sge);
275 
276 	sge++;
277 
278 	/* Configure Input FLE with Scatter/Gather Entry */
279 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
280 	DPAA2_SET_FLE_SG_EXT(ip_fle);
281 	DPAA2_SET_FLE_FIN(ip_fle);
282 	ip_fle->length = (sess->dir == DIR_ENC) ?
283 		(aead_len + sess->iv.length + auth_only_len) :
284 		(aead_len + sess->iv.length + auth_only_len +
285 		icv_len);
286 
287 	/* Configure Input SGE for Encap/Decap */
288 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
289 	sge->length = sess->iv.length;
290 
291 	sge++;
292 	if (auth_only_len) {
293 		DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
294 		sge->length = auth_only_len;
295 		sge++;
296 	}
297 
298 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
299 	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
300 	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
301 
302 	/* i/p segs */
303 	for (i = 1; i < sgl->num; i++) {
304 		sge++;
305 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
306 		DPAA2_SET_FLE_OFFSET(sge, 0);
307 		sge->length = sgl->vec[i].len;
308 	}
309 
310 	if (sess->dir == DIR_DEC) {
311 		sge++;
312 		old_icv = (uint8_t *)(sge + 1);
313 		memcpy(old_icv,  digest->va, icv_len);
314 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
315 		sge->length = icv_len;
316 	}
317 
318 	DPAA2_SET_FLE_FIN(sge);
319 	if (auth_only_len) {
320 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
321 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
322 	}
323 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
324 
325 	return 0;
326 }
327 
328 static int
329 build_raw_dp_auth_fd(uint8_t *drv_ctx,
330 		       struct rte_crypto_sgl *sgl,
331 		       struct rte_crypto_sgl *dest_sgl,
332 		       struct rte_crypto_va_iova_ptr *iv,
333 		       struct rte_crypto_va_iova_ptr *digest,
334 		       struct rte_crypto_va_iova_ptr *auth_iv,
335 		       union rte_crypto_sym_ofs ofs,
336 		       void *userdata,
337 		       struct qbman_fd *fd)
338 {
339 	RTE_SET_USED(iv);
340 	RTE_SET_USED(auth_iv);
341 	RTE_SET_USED(dest_sgl);
342 
343 	dpaa2_sec_session *sess =
344 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
345 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
346 	struct sec_flow_context *flc;
347 	int total_len = 0, data_len = 0, data_offset;
348 	uint8_t *old_digest;
349 	struct ctxt_priv *priv = sess->ctxt;
350 	unsigned int i;
351 
352 	for (i = 0; i < sgl->num; i++)
353 		total_len += sgl->vec[i].len;
354 
355 	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
356 	data_offset = ofs.ofs.auth.head;
357 
358 	/* For SNOW3G and ZUC, lengths in bits only supported */
359 	fle = (struct qbman_fle *)rte_malloc(NULL,
360 		FLE_SG_MEM_SIZE(2 * sgl->num),
361 			RTE_CACHE_LINE_SIZE);
362 	if (unlikely(!fle)) {
363 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
364 		return -ENOMEM;
365 	}
366 	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
367 	/* first FLE entry used to store mbuf and session ctxt */
368 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
369 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
370 	op_fle = fle + 1;
371 	ip_fle = fle + 2;
372 	sge = fle + 3;
373 
374 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
375 
376 	/* sg FD */
377 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
378 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
379 	DPAA2_SET_FD_COMPOUND_FMT(fd);
380 
381 	/* o/p fle */
382 	DPAA2_SET_FLE_ADDR(op_fle,
383 			DPAA2_VADDR_TO_IOVA(digest->va));
384 	op_fle->length = sess->digest_length;
385 
386 	/* i/p fle */
387 	DPAA2_SET_FLE_SG_EXT(ip_fle);
388 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
389 	ip_fle->length = data_len;
390 
391 	if (sess->iv.length) {
392 		uint8_t *iv_ptr;
393 
394 		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
395 						sess->iv.offset);
396 
397 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
398 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
399 			sge->length = 12;
400 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
401 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
402 			sge->length = 8;
403 		} else {
404 			sge->length = sess->iv.length;
405 		}
406 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
407 		ip_fle->length += sge->length;
408 		sge++;
409 	}
410 	/* i/p 1st seg */
411 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
412 	DPAA2_SET_FLE_OFFSET(sge, data_offset);
413 
414 	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
415 		sge->length = data_len;
416 		data_len = 0;
417 	} else {
418 		sge->length = sgl->vec[0].len - data_offset;
419 		for (i = 1; i < sgl->num; i++) {
420 			sge++;
421 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
422 			DPAA2_SET_FLE_OFFSET(sge, 0);
423 			sge->length = sgl->vec[i].len;
424 		}
425 	}
426 	if (sess->dir == DIR_DEC) {
427 		/* Digest verification case */
428 		sge++;
429 		old_digest = (uint8_t *)(sge + 1);
430 		rte_memcpy(old_digest, digest->va,
431 			sess->digest_length);
432 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
433 		sge->length = sess->digest_length;
434 		ip_fle->length += sess->digest_length;
435 	}
436 	DPAA2_SET_FLE_FIN(sge);
437 	DPAA2_SET_FLE_FIN(ip_fle);
438 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
439 
440 	return 0;
441 }
442 
443 static int
444 build_raw_dp_proto_fd(uint8_t *drv_ctx,
445 		       struct rte_crypto_sgl *sgl,
446 		       struct rte_crypto_sgl *dest_sgl,
447 		       struct rte_crypto_va_iova_ptr *iv,
448 		       struct rte_crypto_va_iova_ptr *digest,
449 		       struct rte_crypto_va_iova_ptr *auth_iv,
450 		       union rte_crypto_sym_ofs ofs,
451 		       void *userdata,
452 		       struct qbman_fd *fd)
453 {
454 	RTE_SET_USED(iv);
455 	RTE_SET_USED(digest);
456 	RTE_SET_USED(auth_iv);
457 	RTE_SET_USED(ofs);
458 
459 	dpaa2_sec_session *sess =
460 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
461 	struct ctxt_priv *priv = sess->ctxt;
462 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
463 	struct sec_flow_context *flc;
464 	uint32_t in_len = 0, out_len = 0, i;
465 
466 	/* first FLE entry used to store mbuf and session ctxt */
467 	fle = (struct qbman_fle *)rte_malloc(NULL,
468 			FLE_SG_MEM_SIZE(2 * sgl->num),
469 			RTE_CACHE_LINE_SIZE);
470 	if (unlikely(!fle)) {
471 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
472 		return -ENOMEM;
473 	}
474 	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
475 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
476 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
477 
478 	/* Save the shared descriptor */
479 	flc = &priv->flc_desc[0].flc;
480 	op_fle = fle + 1;
481 	ip_fle = fle + 2;
482 	sge = fle + 3;
483 
484 	DPAA2_SET_FD_IVP(fd);
485 	DPAA2_SET_FLE_IVP(op_fle);
486 	DPAA2_SET_FLE_IVP(ip_fle);
487 
488 	/* Configure FD as a FRAME LIST */
489 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
490 	DPAA2_SET_FD_COMPOUND_FMT(fd);
491 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
492 
493 	/* Configure Output FLE with Scatter/Gather Entry */
494 	DPAA2_SET_FLE_SG_EXT(op_fle);
495 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
496 
497 	/* OOP */
498 	if (dest_sgl) {
499 		/* Configure Output SGE for Encap/Decap */
500 		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
501 		DPAA2_SET_FLE_OFFSET(sge, 0);
502 		sge->length = dest_sgl->vec[0].len;
503 		out_len += sge->length;
504 		/* o/p segs */
505 		for (i = 1; i < dest_sgl->num; i++) {
506 			sge++;
507 			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
508 			DPAA2_SET_FLE_OFFSET(sge, 0);
509 			sge->length = dest_sgl->vec[i].len;
510 			out_len += sge->length;
511 		}
512 		sge->length = dest_sgl->vec[i - 1].tot_len;
513 
514 	} else {
515 		/* Configure Output SGE for Encap/Decap */
516 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
517 		DPAA2_SET_FLE_OFFSET(sge, 0);
518 		sge->length = sgl->vec[0].len;
519 		out_len += sge->length;
520 		/* o/p segs */
521 		for (i = 1; i < sgl->num; i++) {
522 			sge++;
523 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
524 			DPAA2_SET_FLE_OFFSET(sge, 0);
525 			sge->length = sgl->vec[i].len;
526 			out_len += sge->length;
527 		}
528 		sge->length = sgl->vec[i - 1].tot_len;
529 	}
530 	out_len += sge->length;
531 
532 	DPAA2_SET_FLE_FIN(sge);
533 	op_fle->length = out_len;
534 
535 	sge++;
536 
537 	/* Configure Input FLE with Scatter/Gather Entry */
538 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
539 	DPAA2_SET_FLE_SG_EXT(ip_fle);
540 	DPAA2_SET_FLE_FIN(ip_fle);
541 
542 	/* Configure input SGE for Encap/Decap */
543 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
544 	DPAA2_SET_FLE_OFFSET(sge, 0);
545 	sge->length = sgl->vec[0].len;
546 	in_len += sge->length;
547 	/* i/p segs */
548 	for (i = 1; i < sgl->num; i++) {
549 		sge++;
550 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
551 		DPAA2_SET_FLE_OFFSET(sge, 0);
552 		sge->length = sgl->vec[i].len;
553 		in_len += sge->length;
554 	}
555 
556 	ip_fle->length = in_len;
557 	DPAA2_SET_FLE_FIN(sge);
558 
559 	/* In case of PDCP, per packet HFN is stored in
560 	 * mbuf priv after sym_op.
561 	 */
562 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
563 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
564 				sess->pdcp.hfn_ovd_offset);
565 		/* enable HFN override */
566 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
567 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
568 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
569 	}
570 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
571 
572 	return 0;
573 }
574 
575 static int
576 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
577 		       struct rte_crypto_sgl *sgl,
578 		       struct rte_crypto_sgl *dest_sgl,
579 		       struct rte_crypto_va_iova_ptr *iv,
580 		       struct rte_crypto_va_iova_ptr *digest,
581 		       struct rte_crypto_va_iova_ptr *auth_iv,
582 		       union rte_crypto_sym_ofs ofs,
583 		       void *userdata,
584 		       struct qbman_fd *fd)
585 {
586 	RTE_SET_USED(digest);
587 	RTE_SET_USED(auth_iv);
588 
589 	dpaa2_sec_session *sess =
590 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
591 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
592 	int total_len = 0, data_len = 0, data_offset;
593 	struct sec_flow_context *flc;
594 	struct ctxt_priv *priv = sess->ctxt;
595 	unsigned int i;
596 
597 	for (i = 0; i < sgl->num; i++)
598 		total_len += sgl->vec[i].len;
599 
600 	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
601 	data_offset = ofs.ofs.cipher.head;
602 
603 	/* For SNOW3G and ZUC, lengths in bits only supported */
604 	/* first FLE entry used to store mbuf and session ctxt */
605 	fle = (struct qbman_fle *)rte_malloc(NULL,
606 			FLE_SG_MEM_SIZE(2*sgl->num),
607 			RTE_CACHE_LINE_SIZE);
608 	if (!fle) {
609 		DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
610 		return -ENOMEM;
611 	}
612 	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
613 	/* first FLE entry used to store userdata and session ctxt */
614 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
615 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
616 
617 	op_fle = fle + 1;
618 	ip_fle = fle + 2;
619 	sge = fle + 3;
620 
621 	flc = &priv->flc_desc[0].flc;
622 
623 	DPAA2_SEC_DP_DEBUG(
624 		"RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
625 		data_offset,
626 		data_len,
627 		sess->iv.length);
628 
629 	/* o/p fle */
630 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
631 	op_fle->length = data_len;
632 	DPAA2_SET_FLE_SG_EXT(op_fle);
633 
634 	/* OOP */
635 	if (dest_sgl) {
636 		/* o/p 1st seg */
637 		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
638 		DPAA2_SET_FLE_OFFSET(sge, data_offset);
639 		sge->length = dest_sgl->vec[0].len - data_offset;
640 
641 		/* o/p segs */
642 		for (i = 1; i < dest_sgl->num; i++) {
643 			sge++;
644 			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
645 			DPAA2_SET_FLE_OFFSET(sge, 0);
646 			sge->length = dest_sgl->vec[i].len;
647 		}
648 	} else {
649 		/* o/p 1st seg */
650 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
651 		DPAA2_SET_FLE_OFFSET(sge, data_offset);
652 		sge->length = sgl->vec[0].len - data_offset;
653 
654 		/* o/p segs */
655 		for (i = 1; i < sgl->num; i++) {
656 			sge++;
657 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
658 			DPAA2_SET_FLE_OFFSET(sge, 0);
659 			sge->length = sgl->vec[i].len;
660 		}
661 	}
662 	DPAA2_SET_FLE_FIN(sge);
663 
664 	DPAA2_SEC_DP_DEBUG(
665 		"RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
666 		flc, fle, fle->addr_hi, fle->addr_lo,
667 		fle->length);
668 
669 	/* i/p fle */
670 	sge++;
671 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
672 	ip_fle->length = sess->iv.length + data_len;
673 	DPAA2_SET_FLE_SG_EXT(ip_fle);
674 
675 	/* i/p IV */
676 	DPAA2_SET_FLE_ADDR(sge, iv->iova);
677 	DPAA2_SET_FLE_OFFSET(sge, 0);
678 	sge->length = sess->iv.length;
679 
680 	sge++;
681 
682 	/* i/p 1st seg */
683 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
684 	DPAA2_SET_FLE_OFFSET(sge, data_offset);
685 	sge->length = sgl->vec[0].len - data_offset;
686 
687 	/* i/p segs */
688 	for (i = 1; i < sgl->num; i++) {
689 		sge++;
690 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
691 		DPAA2_SET_FLE_OFFSET(sge, 0);
692 		sge->length = sgl->vec[i].len;
693 	}
694 	DPAA2_SET_FLE_FIN(sge);
695 	DPAA2_SET_FLE_FIN(ip_fle);
696 
697 	/* sg fd */
698 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
699 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
700 	DPAA2_SET_FD_COMPOUND_FMT(fd);
701 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
702 
703 	DPAA2_SEC_DP_DEBUG(
704 		"RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
705 		DPAA2_GET_FD_ADDR(fd),
706 		DPAA2_GET_FD_OFFSET(fd),
707 		DPAA2_GET_FD_LEN(fd));
708 
709 	return 0;
710 }
711 
712 static __rte_always_inline uint32_t
713 dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
714 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
715 	void *user_data[], int *status)
716 {
717 	RTE_SET_USED(user_data);
718 	uint32_t loop;
719 	int32_t ret;
720 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
721 	uint32_t frames_to_send, retry_count;
722 	struct qbman_eq_desc eqdesc;
723 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
724 	dpaa2_sec_session *sess =
725 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
726 	struct qbman_swp *swp;
727 	uint16_t num_tx = 0;
728 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
729 
730 	if (unlikely(vec->num == 0))
731 		return 0;
732 
733 	if (sess == NULL) {
734 		DPAA2_SEC_ERR("sessionless raw crypto not supported");
735 		return 0;
736 	}
737 	/*Prepare enqueue descriptor*/
738 	qbman_eq_desc_clear(&eqdesc);
739 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
740 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
741 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
742 
743 	if (!DPAA2_PER_LCORE_DPIO) {
744 		ret = dpaa2_affine_qbman_swp();
745 		if (ret) {
746 			DPAA2_SEC_ERR(
747 				"Failed to allocate IO portal, tid: %d\n",
748 				rte_gettid());
749 			return 0;
750 		}
751 	}
752 	swp = DPAA2_PER_LCORE_PORTAL;
753 
754 	while (vec->num) {
755 		frames_to_send = (vec->num > dpaa2_eqcr_size) ?
756 			dpaa2_eqcr_size : vec->num;
757 
758 		for (loop = 0; loop < frames_to_send; loop++) {
759 			/*Clear the unused FD fields before sending*/
760 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
761 			ret = sess->build_raw_dp_fd(drv_ctx,
762 						    &vec->src_sgl[loop],
763 						    &vec->dest_sgl[loop],
764 						    &vec->iv[loop],
765 						    &vec->digest[loop],
766 						    &vec->auth_iv[loop],
767 						    ofs,
768 						    user_data[loop],
769 						    &fd_arr[loop]);
770 			if (ret) {
771 				DPAA2_SEC_ERR("error: Improper packet contents"
772 					      " for crypto operation");
773 				goto skip_tx;
774 			}
775 			status[loop] = 1;
776 		}
777 
778 		loop = 0;
779 		retry_count = 0;
780 		while (loop < frames_to_send) {
781 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
782 							 &fd_arr[loop],
783 							 &flags[loop],
784 							 frames_to_send - loop);
785 			if (unlikely(ret < 0)) {
786 				retry_count++;
787 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
788 					num_tx += loop;
789 					vec->num -= loop;
790 					goto skip_tx;
791 				}
792 			} else {
793 				loop += ret;
794 				retry_count = 0;
795 			}
796 		}
797 
798 		num_tx += loop;
799 		vec->num -= loop;
800 	}
801 skip_tx:
802 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
803 	dpaa2_qp->tx_vq.err_pkts += vec->num;
804 
805 	return num_tx;
806 }
807 
808 static __rte_always_inline int
809 dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
810 	struct rte_crypto_vec *data_vec,
811 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
812 	struct rte_crypto_va_iova_ptr *iv,
813 	struct rte_crypto_va_iova_ptr *digest,
814 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
815 	void *user_data)
816 {
817 	RTE_SET_USED(qp_data);
818 	RTE_SET_USED(drv_ctx);
819 	RTE_SET_USED(data_vec);
820 	RTE_SET_USED(n_data_vecs);
821 	RTE_SET_USED(ofs);
822 	RTE_SET_USED(iv);
823 	RTE_SET_USED(digest);
824 	RTE_SET_USED(aad_or_auth_iv);
825 	RTE_SET_USED(user_data);
826 
827 	return 0;
828 }
829 
830 static inline void *
831 sec_fd_to_userdata(const struct qbman_fd *fd)
832 {
833 	struct qbman_fle *fle;
834 	void *userdata;
835 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
836 
837 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
838 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
839 	userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
840 	/* free the fle memory */
841 	rte_free((void *)(fle-1));
842 
843 	return userdata;
844 }
845 
846 static __rte_always_inline uint32_t
847 dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
848 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
849 	uint32_t max_nb_to_dequeue,
850 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
851 	void **out_user_data, uint8_t is_user_data_array,
852 	uint32_t *n_success, int *dequeue_status)
853 {
854 	RTE_SET_USED(drv_ctx);
855 	RTE_SET_USED(get_dequeue_count);
856 
857 	/* Function is responsible to receive frames for a given device and VQ*/
858 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
859 	struct qbman_result *dq_storage;
860 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
861 	int ret, num_rx = 0;
862 	uint8_t is_last = 0, status, is_success = 0;
863 	struct qbman_swp *swp;
864 	const struct qbman_fd *fd;
865 	struct qbman_pull_desc pulldesc;
866 	void *user_data;
867 	uint32_t nb_ops = max_nb_to_dequeue;
868 
869 	if (!DPAA2_PER_LCORE_DPIO) {
870 		ret = dpaa2_affine_qbman_swp();
871 		if (ret) {
872 			DPAA2_SEC_ERR(
873 				"Failed to allocate IO portal, tid: %d\n",
874 				rte_gettid());
875 			return 0;
876 		}
877 	}
878 	swp = DPAA2_PER_LCORE_PORTAL;
879 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
880 
881 	qbman_pull_desc_clear(&pulldesc);
882 	qbman_pull_desc_set_numframes(&pulldesc,
883 				      (nb_ops > dpaa2_dqrr_size) ?
884 				      dpaa2_dqrr_size : nb_ops);
885 	qbman_pull_desc_set_fq(&pulldesc, fqid);
886 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
887 				    (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
888 				    1);
889 
890 	/*Issue a volatile dequeue command. */
891 	while (1) {
892 		if (qbman_swp_pull(swp, &pulldesc)) {
893 			DPAA2_SEC_WARN(
894 				"SEC VDQ command is not issued : QBMAN busy");
895 			/* Portal was busy, try again */
896 			continue;
897 		}
898 		break;
899 	};
900 
901 	/* Receive the packets till Last Dequeue entry is found with
902 	 * respect to the above issues PULL command.
903 	 */
904 	while (!is_last) {
905 		/* Check if the previous issued command is completed.
906 		 * Also seems like the SWP is shared between the Ethernet Driver
907 		 * and the SEC driver.
908 		 */
909 		while (!qbman_check_command_complete(dq_storage))
910 			;
911 
912 		/* Loop until the dq_storage is updated with
913 		 * new token by QBMAN
914 		 */
915 		while (!qbman_check_new_result(dq_storage))
916 			;
917 		/* Check whether Last Pull command is Expired and
918 		 * setting Condition for Loop termination
919 		 */
920 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
921 			is_last = 1;
922 			/* Check for valid frame. */
923 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
924 			if (unlikely(
925 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
926 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
927 				continue;
928 			}
929 		}
930 
931 		fd = qbman_result_DQ_fd(dq_storage);
932 		user_data = sec_fd_to_userdata(fd);
933 		if (is_user_data_array)
934 			out_user_data[num_rx] = user_data;
935 		else
936 			out_user_data[0] = user_data;
937 		if (unlikely(fd->simple.frc)) {
938 			/* TODO Parse SEC errors */
939 			DPAA2_SEC_ERR("SEC returned Error - %x",
940 				      fd->simple.frc);
941 			is_success = false;
942 		} else {
943 			is_success = true;
944 		}
945 		post_dequeue(user_data, num_rx, is_success);
946 
947 		num_rx++;
948 		dq_storage++;
949 	} /* End of Packet Rx loop */
950 
951 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
952 	*dequeue_status = 1;
953 	*n_success = num_rx;
954 
955 	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
956 	/*Return the total number of packets received to DPAA2 app*/
957 	return num_rx;
958 }
959 
960 static __rte_always_inline void *
961 dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
962 		enum rte_crypto_op_status *op_status)
963 {
964 	RTE_SET_USED(qp_data);
965 	RTE_SET_USED(drv_ctx);
966 	RTE_SET_USED(dequeue_status);
967 	RTE_SET_USED(op_status);
968 
969 	return NULL;
970 }
971 
972 static __rte_always_inline int
973 dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
974 {
975 	RTE_SET_USED(qp_data);
976 	RTE_SET_USED(drv_ctx);
977 	RTE_SET_USED(n);
978 
979 	return 0;
980 }
981 
982 static __rte_always_inline int
983 dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
984 {
985 	RTE_SET_USED(qp_data);
986 	RTE_SET_USED(drv_ctx);
987 	RTE_SET_USED(n);
988 
989 	return 0;
990 }
991 
992 int
993 dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
994 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
995 	enum rte_crypto_op_sess_type sess_type,
996 	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
997 {
998 	dpaa2_sec_session *sess;
999 	struct dpaa2_sec_raw_dp_ctx *dp_ctx;
1000 	RTE_SET_USED(qp_id);
1001 
1002 	if (!is_update) {
1003 		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
1004 		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
1005 	}
1006 
1007 	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1008 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1009 				session_ctx.sec_sess);
1010 	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1011 		sess = (dpaa2_sec_session *)get_sym_session_private_data(
1012 			session_ctx.crypto_sess, cryptodev_driver_id);
1013 	else
1014 		return -ENOTSUP;
1015 	raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
1016 	raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
1017 	raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
1018 	raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
1019 	raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
1020 	raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
1021 
1022 	if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
1023 		sess->build_raw_dp_fd = build_raw_dp_chain_fd;
1024 	else if (sess->ctxt_type == DPAA2_SEC_AEAD)
1025 		sess->build_raw_dp_fd = build_raw_dp_aead_fd;
1026 	else if (sess->ctxt_type == DPAA2_SEC_AUTH)
1027 		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
1028 	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
1029 		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
1030 	else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1031 		sess->ctxt_type == DPAA2_SEC_PDCP)
1032 		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
1033 	else
1034 		return -ENOTSUP;
1035 	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
1036 	dp_ctx->session = sess;
1037 
1038 	return 0;
1039 }
1040 
1041 int
1042 dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
1043 {
1044 	return sizeof(struct dpaa2_sec_raw_dp_ctx);
1045 }
1046