1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_errno.h>
8 
9 #include "roc_cpt.h"
10 
11 #include "cnxk_ae.h"
12 #include "cnxk_cryptodev.h"
13 #include "cnxk_cryptodev_ops.h"
14 #include "cnxk_cryptodev_capabilities.h"
15 #include "cnxk_se.h"
16 
17 #define CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS 5
18 #define CNXK_CPT_MAX_ASYM_OP_MOD_LEN	1024
19 
20 static int
cnxk_cpt_get_mlen(void)21 cnxk_cpt_get_mlen(void)
22 {
23 	uint32_t len;
24 
25 	/* For MAC */
26 	len = 2 * sizeof(uint64_t);
27 	len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
28 
29 	len += ROC_SE_OFF_CTRL_LEN + ROC_CPT_AES_CBC_IV_LEN;
30 	len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
31 			       (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
32 				2) * ROC_SE_SG_ENTRY_SIZE),
33 			      8);
34 
35 	return len;
36 }
37 
38 static int
cnxk_cpt_asym_get_mlen(void)39 cnxk_cpt_asym_get_mlen(void)
40 {
41 	uint32_t len;
42 
43 	/* To hold RPTR */
44 	len = sizeof(uint64_t);
45 
46 	/* Get meta len for asymmetric operations */
47 	len += CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS * CNXK_CPT_MAX_ASYM_OP_MOD_LEN;
48 
49 	return len;
50 }
51 
52 int
cnxk_cpt_dev_config(struct rte_cryptodev * dev,struct rte_cryptodev_config * conf)53 cnxk_cpt_dev_config(struct rte_cryptodev *dev,
54 		    struct rte_cryptodev_config *conf)
55 {
56 	struct cnxk_cpt_vf *vf = dev->data->dev_private;
57 	struct roc_cpt *roc_cpt = &vf->cpt;
58 	uint16_t nb_lf_avail, nb_lf;
59 	int ret;
60 
61 	dev->feature_flags = cnxk_cpt_default_ff_get() & ~conf->ff_disable;
62 
63 	nb_lf_avail = roc_cpt->nb_lf_avail;
64 	nb_lf = conf->nb_queue_pairs;
65 
66 	if (nb_lf > nb_lf_avail)
67 		return -ENOTSUP;
68 
69 	ret = roc_cpt_dev_configure(roc_cpt, nb_lf);
70 	if (ret) {
71 		plt_err("Could not configure device");
72 		return ret;
73 	}
74 
75 	if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
76 		/* Initialize shared FPM table */
77 		ret = roc_ae_fpm_get(vf->cnxk_fpm_iova);
78 		if (ret) {
79 			plt_err("Could not get FPM table");
80 			return ret;
81 		}
82 
83 		/* Init EC grp table */
84 		ret = roc_ae_ec_grp_get(vf->ec_grp);
85 		if (ret) {
86 			plt_err("Could not get EC grp table");
87 			roc_ae_fpm_put();
88 			return ret;
89 		}
90 	}
91 
92 	return 0;
93 }
94 
95 int
cnxk_cpt_dev_start(struct rte_cryptodev * dev)96 cnxk_cpt_dev_start(struct rte_cryptodev *dev)
97 {
98 	struct cnxk_cpt_vf *vf = dev->data->dev_private;
99 	struct roc_cpt *roc_cpt = &vf->cpt;
100 	uint16_t nb_lf = roc_cpt->nb_lf;
101 	uint16_t qp_id;
102 
103 	for (qp_id = 0; qp_id < nb_lf; qp_id++) {
104 		/* Application may not setup all queue pair */
105 		if (roc_cpt->lf[qp_id] == NULL)
106 			continue;
107 
108 		roc_cpt_iq_enable(roc_cpt->lf[qp_id]);
109 	}
110 
111 	return 0;
112 }
113 
114 void
cnxk_cpt_dev_stop(struct rte_cryptodev * dev)115 cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
116 {
117 	struct cnxk_cpt_vf *vf = dev->data->dev_private;
118 	struct roc_cpt *roc_cpt = &vf->cpt;
119 	uint16_t nb_lf = roc_cpt->nb_lf;
120 	uint16_t qp_id;
121 
122 	for (qp_id = 0; qp_id < nb_lf; qp_id++) {
123 		if (roc_cpt->lf[qp_id] == NULL)
124 			continue;
125 
126 		roc_cpt_iq_disable(roc_cpt->lf[qp_id]);
127 	}
128 }
129 
130 int
cnxk_cpt_dev_close(struct rte_cryptodev * dev)131 cnxk_cpt_dev_close(struct rte_cryptodev *dev)
132 {
133 	struct cnxk_cpt_vf *vf = dev->data->dev_private;
134 	uint16_t i;
135 	int ret;
136 
137 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
138 		ret = cnxk_cpt_queue_pair_release(dev, i);
139 		if (ret < 0) {
140 			plt_err("Could not release queue pair %u", i);
141 			return ret;
142 		}
143 	}
144 
145 	if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
146 		roc_ae_fpm_put();
147 		roc_ae_ec_grp_put();
148 	}
149 
150 	roc_cpt_dev_clear(&vf->cpt);
151 
152 	return 0;
153 }
154 
155 void
cnxk_cpt_dev_info_get(struct rte_cryptodev * dev,struct rte_cryptodev_info * info)156 cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
157 		      struct rte_cryptodev_info *info)
158 {
159 	struct cnxk_cpt_vf *vf = dev->data->dev_private;
160 	struct roc_cpt *roc_cpt = &vf->cpt;
161 
162 	info->max_nb_queue_pairs =
163 		RTE_MIN(roc_cpt->nb_lf_avail, vf->max_qps_limit);
164 	plt_cpt_dbg("max_nb_queue_pairs %u", info->max_nb_queue_pairs);
165 
166 	info->feature_flags = cnxk_cpt_default_ff_get();
167 	info->capabilities = cnxk_crypto_capabilities_get(vf);
168 	info->sym.max_nb_sessions = 0;
169 	info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
170 	info->min_mbuf_tailroom_req = CNXK_CPT_MIN_TAILROOM_REQ;
171 }
172 
173 static void
qp_memzone_name_get(char * name,int size,int dev_id,int qp_id)174 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
175 {
176 	snprintf(name, size, "cnxk_cpt_pq_mem_%u:%u", dev_id, qp_id);
177 }
178 
179 static int
cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev * dev,struct cnxk_cpt_qp * qp,uint8_t qp_id,uint32_t nb_elements)180 cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
181 				struct cnxk_cpt_qp *qp, uint8_t qp_id,
182 				uint32_t nb_elements)
183 {
184 	char mempool_name[RTE_MEMPOOL_NAMESIZE];
185 	struct cpt_qp_meta_info *meta_info;
186 	int lcore_cnt = rte_lcore_count();
187 	struct rte_mempool *pool;
188 	int mb_pool_sz, mlen = 8;
189 	uint32_t cache_sz;
190 
191 	if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
192 		/* Get meta len */
193 		mlen = cnxk_cpt_get_mlen();
194 	}
195 
196 	if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
197 
198 		/* Get meta len required for asymmetric operations */
199 		mlen = RTE_MAX(mlen, cnxk_cpt_asym_get_mlen());
200 	}
201 
202 	mb_pool_sz = nb_elements;
203 	cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
204 
205 	/* For poll mode, core that enqueues and core that dequeues can be
206 	 * different. For event mode, all cores are allowed to use same crypto
207 	 * queue pair.
208 	 */
209 
210 	mb_pool_sz += (RTE_MAX(2, lcore_cnt) * cache_sz);
211 
212 	/* Allocate mempool */
213 
214 	snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
215 		 dev->data->dev_id, qp_id);
216 
217 	pool = rte_mempool_create(mempool_name, mb_pool_sz, mlen, cache_sz, 0,
218 				  NULL, NULL, NULL, NULL, rte_socket_id(), 0);
219 
220 	if (pool == NULL) {
221 		plt_err("Could not create mempool for metabuf");
222 		return rte_errno;
223 	}
224 
225 	meta_info = &qp->meta_info;
226 
227 	meta_info->pool = pool;
228 	meta_info->mlen = mlen;
229 
230 	return 0;
231 }
232 
233 static void
cnxk_cpt_metabuf_mempool_destroy(struct cnxk_cpt_qp * qp)234 cnxk_cpt_metabuf_mempool_destroy(struct cnxk_cpt_qp *qp)
235 {
236 	struct cpt_qp_meta_info *meta_info = &qp->meta_info;
237 
238 	rte_mempool_free(meta_info->pool);
239 
240 	meta_info->pool = NULL;
241 	meta_info->mlen = 0;
242 }
243 
244 static struct cnxk_cpt_qp *
cnxk_cpt_qp_create(const struct rte_cryptodev * dev,uint16_t qp_id,uint32_t iq_len)245 cnxk_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
246 		   uint32_t iq_len)
247 {
248 	const struct rte_memzone *pq_mem;
249 	char name[RTE_MEMZONE_NAMESIZE];
250 	struct cnxk_cpt_qp *qp;
251 	uint32_t len;
252 	uint8_t *va;
253 	int ret;
254 
255 	/* Allocate queue pair */
256 	qp = rte_zmalloc_socket("CNXK Crypto PMD Queue Pair", sizeof(*qp),
257 				ROC_ALIGN, 0);
258 	if (qp == NULL) {
259 		plt_err("Could not allocate queue pair");
260 		return NULL;
261 	}
262 
263 	/* For pending queue */
264 	len = iq_len * sizeof(struct cpt_inflight_req);
265 
266 	qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
267 			    qp_id);
268 
269 	pq_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
270 					     RTE_MEMZONE_SIZE_HINT_ONLY |
271 						     RTE_MEMZONE_256MB,
272 					     RTE_CACHE_LINE_SIZE);
273 	if (pq_mem == NULL) {
274 		plt_err("Could not allocate reserved memzone");
275 		goto qp_free;
276 	}
277 
278 	va = pq_mem->addr;
279 
280 	memset(va, 0, len);
281 
282 	ret = cnxk_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
283 	if (ret) {
284 		plt_err("Could not create mempool for metabuf");
285 		goto pq_mem_free;
286 	}
287 
288 	/* Initialize pending queue */
289 	qp->pend_q.req_queue = pq_mem->addr;
290 	qp->pend_q.head = 0;
291 	qp->pend_q.tail = 0;
292 
293 	return qp;
294 
295 pq_mem_free:
296 	rte_memzone_free(pq_mem);
297 qp_free:
298 	rte_free(qp);
299 	return NULL;
300 }
301 
302 static int
cnxk_cpt_qp_destroy(const struct rte_cryptodev * dev,struct cnxk_cpt_qp * qp)303 cnxk_cpt_qp_destroy(const struct rte_cryptodev *dev, struct cnxk_cpt_qp *qp)
304 {
305 	const struct rte_memzone *pq_mem;
306 	char name[RTE_MEMZONE_NAMESIZE];
307 	int ret;
308 
309 	cnxk_cpt_metabuf_mempool_destroy(qp);
310 
311 	qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
312 			    qp->lf.lf_id);
313 
314 	pq_mem = rte_memzone_lookup(name);
315 
316 	ret = rte_memzone_free(pq_mem);
317 	if (ret)
318 		return ret;
319 
320 	rte_free(qp);
321 
322 	return 0;
323 }
324 
325 int
cnxk_cpt_queue_pair_release(struct rte_cryptodev * dev,uint16_t qp_id)326 cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
327 {
328 	struct cnxk_cpt_qp *qp = dev->data->queue_pairs[qp_id];
329 	struct cnxk_cpt_vf *vf = dev->data->dev_private;
330 	struct roc_cpt *roc_cpt = &vf->cpt;
331 	struct roc_cpt_lf *lf;
332 	int ret;
333 
334 	if (qp == NULL)
335 		return -EINVAL;
336 
337 	lf = roc_cpt->lf[qp_id];
338 	if (lf == NULL)
339 		return -ENOTSUP;
340 
341 	roc_cpt_lf_fini(lf);
342 
343 	ret = cnxk_cpt_qp_destroy(dev, qp);
344 	if (ret) {
345 		plt_err("Could not destroy queue pair %d", qp_id);
346 		return ret;
347 	}
348 
349 	roc_cpt->lf[qp_id] = NULL;
350 	dev->data->queue_pairs[qp_id] = NULL;
351 
352 	return 0;
353 }
354 
355 int
cnxk_cpt_queue_pair_setup(struct rte_cryptodev * dev,uint16_t qp_id,const struct rte_cryptodev_qp_conf * conf,int socket_id __rte_unused)356 cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
357 			  const struct rte_cryptodev_qp_conf *conf,
358 			  int socket_id __rte_unused)
359 {
360 	struct cnxk_cpt_vf *vf = dev->data->dev_private;
361 	struct roc_cpt *roc_cpt = &vf->cpt;
362 	struct rte_pci_device *pci_dev;
363 	struct cnxk_cpt_qp *qp;
364 	uint32_t nb_desc;
365 	int ret;
366 
367 	if (dev->data->queue_pairs[qp_id] != NULL)
368 		cnxk_cpt_queue_pair_release(dev, qp_id);
369 
370 	pci_dev = RTE_DEV_TO_PCI(dev->device);
371 
372 	if (pci_dev->mem_resource[2].addr == NULL) {
373 		plt_err("Invalid PCI mem address");
374 		return -EIO;
375 	}
376 
377 	/* Update nb_desc to next power of 2 to aid in pending queue checks */
378 	nb_desc = plt_align32pow2(conf->nb_descriptors);
379 
380 	qp = cnxk_cpt_qp_create(dev, qp_id, nb_desc);
381 	if (qp == NULL) {
382 		plt_err("Could not create queue pair %d", qp_id);
383 		return -ENOMEM;
384 	}
385 
386 	qp->lf.lf_id = qp_id;
387 	qp->lf.nb_desc = nb_desc;
388 
389 	ret = roc_cpt_lf_init(roc_cpt, &qp->lf);
390 	if (ret < 0) {
391 		plt_err("Could not initialize queue pair %d", qp_id);
392 		ret = -EINVAL;
393 		goto exit;
394 	}
395 
396 	qp->pend_q.pq_mask = qp->lf.nb_desc - 1;
397 
398 	roc_cpt->lf[qp_id] = &qp->lf;
399 
400 	ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
401 	if (ret < 0) {
402 		roc_cpt->lf[qp_id] = NULL;
403 		plt_err("Could not init lmtline for queue pair %d", qp_id);
404 		goto exit;
405 	}
406 
407 	qp->sess_mp = conf->mp_session;
408 	qp->sess_mp_priv = conf->mp_session_private;
409 	dev->data->queue_pairs[qp_id] = qp;
410 
411 	return 0;
412 
413 exit:
414 	cnxk_cpt_qp_destroy(dev, qp);
415 	return ret;
416 }
417 
418 unsigned int
cnxk_cpt_sym_session_get_size(struct rte_cryptodev * dev __rte_unused)419 cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
420 {
421 	return sizeof(struct cnxk_se_sess);
422 }
423 
424 static int
cnxk_sess_fill(struct rte_crypto_sym_xform * xform,struct cnxk_se_sess * sess)425 cnxk_sess_fill(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
426 {
427 	struct rte_crypto_sym_xform *aead_xfrm = NULL;
428 	struct rte_crypto_sym_xform *c_xfrm = NULL;
429 	struct rte_crypto_sym_xform *a_xfrm = NULL;
430 	bool ciph_then_auth = false;
431 
432 	if (xform == NULL)
433 		return -EINVAL;
434 
435 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
436 		c_xfrm = xform;
437 		a_xfrm = xform->next;
438 		ciph_then_auth = true;
439 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
440 		c_xfrm = xform->next;
441 		a_xfrm = xform;
442 		ciph_then_auth = false;
443 	} else {
444 		aead_xfrm = xform;
445 	}
446 
447 	if (c_xfrm != NULL && c_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
448 		plt_dp_err("Invalid type in cipher xform");
449 		return -EINVAL;
450 	}
451 
452 	if (a_xfrm != NULL && a_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
453 		plt_dp_err("Invalid type in auth xform");
454 		return -EINVAL;
455 	}
456 
457 	if (aead_xfrm != NULL && aead_xfrm->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
458 		plt_dp_err("Invalid type in AEAD xform");
459 		return -EINVAL;
460 	}
461 
462 	if ((c_xfrm == NULL || c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL) &&
463 	    a_xfrm != NULL && a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL &&
464 	    a_xfrm->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
465 		plt_dp_err("Null cipher + null auth verify is not supported");
466 		return -ENOTSUP;
467 	}
468 
469 	/* Cipher only */
470 	if (c_xfrm != NULL &&
471 	    (a_xfrm == NULL || a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)) {
472 		if (fill_sess_cipher(c_xfrm, sess))
473 			return -ENOTSUP;
474 		else
475 			return 0;
476 	}
477 
478 	/* Auth only */
479 	if (a_xfrm != NULL &&
480 	    (c_xfrm == NULL || c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL)) {
481 		if (fill_sess_auth(a_xfrm, sess))
482 			return -ENOTSUP;
483 		else
484 			return 0;
485 	}
486 
487 	/* AEAD */
488 	if (aead_xfrm != NULL) {
489 		if (fill_sess_aead(aead_xfrm, sess))
490 			return -ENOTSUP;
491 		else
492 			return 0;
493 	}
494 
495 	/* Chained ops */
496 	if (c_xfrm == NULL || a_xfrm == NULL) {
497 		plt_dp_err("Invalid xforms");
498 		return -EINVAL;
499 	}
500 
501 	if (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
502 	    a_xfrm->auth.algo == RTE_CRYPTO_AUTH_SHA1) {
503 		plt_dp_err("3DES-CBC + SHA1 is not supported");
504 		return -ENOTSUP;
505 	}
506 
507 	/* Cipher then auth */
508 	if (ciph_then_auth) {
509 		if (fill_sess_cipher(c_xfrm, sess))
510 			return -ENOTSUP;
511 		if (fill_sess_auth(a_xfrm, sess))
512 			return -ENOTSUP;
513 		else
514 			return 0;
515 	}
516 
517 	/* else */
518 
519 	if (c_xfrm->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
520 		switch (a_xfrm->auth.algo) {
521 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
522 			switch (c_xfrm->cipher.algo) {
523 			case RTE_CRYPTO_CIPHER_AES_CBC:
524 				break;
525 			default:
526 				return -ENOTSUP;
527 			}
528 			break;
529 		default:
530 			return -ENOTSUP;
531 		}
532 	}
533 
534 	if (fill_sess_auth(a_xfrm, sess))
535 		return -ENOTSUP;
536 	if (fill_sess_cipher(c_xfrm, sess))
537 		return -ENOTSUP;
538 	else
539 		return 0;
540 }
541 
542 static uint64_t
cnxk_cpt_inst_w7_get(struct cnxk_se_sess * sess,struct roc_cpt * roc_cpt)543 cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
544 {
545 	union cpt_inst_w7 inst_w7;
546 
547 	inst_w7.s.cptr = (uint64_t)&sess->roc_se_ctx.se_ctx;
548 
549 	/* Set the engine group */
550 	if (sess->zsk_flag || sess->chacha_poly)
551 		inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
552 	else
553 		inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
554 
555 	return inst_w7.u64;
556 }
557 
558 int
sym_session_configure(struct roc_cpt * roc_cpt,int driver_id,struct rte_crypto_sym_xform * xform,struct rte_cryptodev_sym_session * sess,struct rte_mempool * pool)559 sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
560 		      struct rte_crypto_sym_xform *xform,
561 		      struct rte_cryptodev_sym_session *sess,
562 		      struct rte_mempool *pool)
563 {
564 	struct cnxk_se_sess *sess_priv;
565 	void *priv;
566 	int ret;
567 
568 	if (unlikely(rte_mempool_get(pool, &priv))) {
569 		plt_dp_err("Could not allocate session private data");
570 		return -ENOMEM;
571 	}
572 
573 	memset(priv, 0, sizeof(struct cnxk_se_sess));
574 
575 	sess_priv = priv;
576 
577 	ret = cnxk_sess_fill(xform, sess_priv);
578 	if (ret)
579 		goto priv_put;
580 
581 	if ((sess_priv->roc_se_ctx.fc_type == ROC_SE_HASH_HMAC) &&
582 	    cpt_mac_len_verify(&xform->auth)) {
583 		plt_dp_err("MAC length is not supported");
584 		if (sess_priv->roc_se_ctx.auth_key != NULL) {
585 			plt_free(sess_priv->roc_se_ctx.auth_key);
586 			sess_priv->roc_se_ctx.auth_key = NULL;
587 		}
588 
589 		ret = -ENOTSUP;
590 		goto priv_put;
591 	}
592 
593 	sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
594 
595 	set_sym_session_private_data(sess, driver_id, sess_priv);
596 
597 	return 0;
598 
599 priv_put:
600 	rte_mempool_put(pool, priv);
601 
602 	return ret;
603 }
604 
605 int
cnxk_cpt_sym_session_configure(struct rte_cryptodev * dev,struct rte_crypto_sym_xform * xform,struct rte_cryptodev_sym_session * sess,struct rte_mempool * pool)606 cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
607 			       struct rte_crypto_sym_xform *xform,
608 			       struct rte_cryptodev_sym_session *sess,
609 			       struct rte_mempool *pool)
610 {
611 	struct cnxk_cpt_vf *vf = dev->data->dev_private;
612 	struct roc_cpt *roc_cpt = &vf->cpt;
613 	uint8_t driver_id;
614 
615 	driver_id = dev->driver_id;
616 
617 	return sym_session_configure(roc_cpt, driver_id, xform, sess, pool);
618 }
619 
620 void
sym_session_clear(int driver_id,struct rte_cryptodev_sym_session * sess)621 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
622 {
623 	void *priv = get_sym_session_private_data(sess, driver_id);
624 	struct cnxk_se_sess *sess_priv;
625 	struct rte_mempool *pool;
626 
627 	if (priv == NULL)
628 		return;
629 
630 	sess_priv = priv;
631 
632 	if (sess_priv->roc_se_ctx.auth_key != NULL)
633 		plt_free(sess_priv->roc_se_ctx.auth_key);
634 
635 	memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
636 
637 	pool = rte_mempool_from_obj(priv);
638 
639 	set_sym_session_private_data(sess, driver_id, NULL);
640 
641 	rte_mempool_put(pool, priv);
642 }
643 
644 void
cnxk_cpt_sym_session_clear(struct rte_cryptodev * dev,struct rte_cryptodev_sym_session * sess)645 cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
646 			   struct rte_cryptodev_sym_session *sess)
647 {
648 	return sym_session_clear(dev->driver_id, sess);
649 }
650 
651 unsigned int
cnxk_ae_session_size_get(struct rte_cryptodev * dev __rte_unused)652 cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused)
653 {
654 	return sizeof(struct cnxk_ae_sess);
655 }
656 
657 void
cnxk_ae_session_clear(struct rte_cryptodev * dev,struct rte_cryptodev_asym_session * sess)658 cnxk_ae_session_clear(struct rte_cryptodev *dev,
659 		      struct rte_cryptodev_asym_session *sess)
660 {
661 	struct cnxk_ae_sess *priv;
662 
663 	priv = (struct cnxk_ae_sess *) sess->sess_private_data;
664 	if (priv == NULL)
665 		return;
666 
667 	/* Free resources allocated in session_cfg */
668 	cnxk_ae_free_session_parameters(priv);
669 
670 	/* Reset and free object back to pool */
671 	memset(priv, 0, cnxk_ae_session_size_get(dev));
672 }
673 
674 int
cnxk_ae_session_cfg(struct rte_cryptodev * dev,struct rte_crypto_asym_xform * xform,struct rte_cryptodev_asym_session * sess)675 cnxk_ae_session_cfg(struct rte_cryptodev *dev,
676 		    struct rte_crypto_asym_xform *xform,
677 		    struct rte_cryptodev_asym_session *sess)
678 {
679 	struct cnxk_ae_sess *priv =
680 			(struct cnxk_ae_sess *) sess->sess_private_data;
681 	struct cnxk_cpt_vf *vf = dev->data->dev_private;
682 	struct roc_cpt *roc_cpt = &vf->cpt;
683 	union cpt_inst_w7 w7;
684 	int ret;
685 
686 	ret = cnxk_ae_fill_session_parameters(priv, xform);
687 	if (ret)
688 		return ret;
689 
690 	w7.u64 = 0;
691 	w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_AE];
692 	priv->cpt_inst_w7 = w7.u64;
693 	priv->cnxk_fpm_iova = vf->cnxk_fpm_iova;
694 	priv->ec_grp = vf->ec_grp;
695 
696 	return 0;
697 }
698 
699 void
cnxk_cpt_dump_on_err(struct cnxk_cpt_qp * qp)700 cnxk_cpt_dump_on_err(struct cnxk_cpt_qp *qp)
701 {
702 	struct pending_queue *pend_q = &qp->pend_q;
703 	uint64_t inflight, enq_ptr, deq_ptr, insts;
704 	union cpt_lf_q_inst_ptr inst_ptr;
705 	union cpt_lf_inprog lf_inprog;
706 
707 	plt_print("Lcore ID: %d, LF/QP ID: %d", rte_lcore_id(), qp->lf.lf_id);
708 	plt_print("");
709 	plt_print("S/w pending queue:");
710 	plt_print("\tHead: %"PRIu64"", pend_q->head);
711 	plt_print("\tTail: %"PRIu64"", pend_q->tail);
712 	plt_print("\tMask: 0x%"PRIx64"", pend_q->pq_mask);
713 	plt_print("\tInflight count: %"PRIu64"",
714 		  pending_queue_infl_cnt(pend_q->head, pend_q->tail,
715 					 pend_q->pq_mask));
716 
717 	plt_print("");
718 	plt_print("H/w pending queue:");
719 
720 	lf_inprog.u = plt_read64(qp->lf.rbase + CPT_LF_INPROG);
721 	inflight = lf_inprog.s.inflight;
722 	plt_print("\tInflight in engines: %"PRIu64"", inflight);
723 
724 	inst_ptr.u = plt_read64(qp->lf.rbase + CPT_LF_Q_INST_PTR);
725 
726 	enq_ptr = inst_ptr.s.nq_ptr;
727 	deq_ptr = inst_ptr.s.dq_ptr;
728 
729 	if (enq_ptr >= deq_ptr)
730 		insts = enq_ptr - deq_ptr;
731 	else
732 		insts = (enq_ptr + pend_q->pq_mask + 1 + 320 + 40) - deq_ptr;
733 
734 	plt_print("\tNQ ptr: 0x%"PRIx64"", enq_ptr);
735 	plt_print("\tDQ ptr: 0x%"PRIx64"", deq_ptr);
736 	plt_print("Insts waiting in CPT: %"PRIu64"", insts);
737 
738 	plt_print("");
739 	roc_cpt_afs_print(qp->lf.roc_cpt);
740 }
741