1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_event_crypto_adapter.h>
8 #include <rte_ip.h>
9 #include <rte_vect.h>
10 
11 #include "cn9k_cryptodev.h"
12 #include "cn9k_cryptodev_ops.h"
13 #include "cn9k_ipsec.h"
14 #include "cn9k_ipsec_la_ops.h"
15 #include "cnxk_ae.h"
16 #include "cnxk_cryptodev.h"
17 #include "cnxk_cryptodev_ops.h"
18 #include "cnxk_se.h"
19 
20 static __rte_always_inline int __rte_hot
cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp * qp,struct rte_crypto_op * op,struct cnxk_se_sess * sess,struct cpt_inflight_req * infl_req,struct cpt_inst_s * inst)21 cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
22 		       struct cnxk_se_sess *sess,
23 		       struct cpt_inflight_req *infl_req,
24 		       struct cpt_inst_s *inst)
25 {
26 	uint64_t cpt_op;
27 	int ret;
28 
29 	cpt_op = sess->cpt_op;
30 
31 	if (cpt_op & ROC_SE_OP_CIPHER_MASK)
32 		ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
33 	else
34 		ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
35 					 inst);
36 
37 	return ret;
38 }
39 
40 static __rte_always_inline int __rte_hot
cn9k_cpt_sec_inst_fill(struct rte_crypto_op * op,struct cpt_inflight_req * infl_req,struct cpt_inst_s * inst)41 cn9k_cpt_sec_inst_fill(struct rte_crypto_op *op,
42 		       struct cpt_inflight_req *infl_req,
43 		       struct cpt_inst_s *inst)
44 {
45 	struct rte_crypto_sym_op *sym_op = op->sym;
46 	struct cn9k_sec_session *priv;
47 	struct cn9k_ipsec_sa *sa;
48 
49 	if (unlikely(sym_op->m_dst && sym_op->m_dst != sym_op->m_src)) {
50 		plt_dp_err("Out of place is not supported");
51 		return -ENOTSUP;
52 	}
53 
54 	if (unlikely(!rte_pktmbuf_is_contiguous(sym_op->m_src))) {
55 		plt_dp_err("Scatter Gather mode is not supported");
56 		return -ENOTSUP;
57 	}
58 
59 	priv = get_sec_session_private_data(op->sym->sec_session);
60 	sa = &priv->sa;
61 
62 	if (sa->dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
63 		return process_outb_sa(op, sa, inst);
64 
65 	infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND;
66 
67 	return process_inb_sa(op, sa, inst);
68 }
69 
70 static inline struct cnxk_se_sess *
cn9k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp * qp,struct rte_crypto_op * op)71 cn9k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
72 {
73 	const int driver_id = cn9k_cryptodev_driver_id;
74 	struct rte_crypto_sym_op *sym_op = op->sym;
75 	struct rte_cryptodev_sym_session *sess;
76 	struct cnxk_se_sess *priv;
77 	int ret;
78 
79 	/* Create temporary session */
80 	sess = rte_cryptodev_sym_session_create(qp->sess_mp);
81 	if (sess == NULL)
82 		return NULL;
83 
84 	ret = sym_session_configure(qp->lf.roc_cpt, driver_id, sym_op->xform,
85 				    sess, qp->sess_mp_priv);
86 	if (ret)
87 		goto sess_put;
88 
89 	priv = get_sym_session_private_data(sess, driver_id);
90 
91 	sym_op->session = sess;
92 
93 	return priv;
94 
95 sess_put:
96 	rte_mempool_put(qp->sess_mp, sess);
97 	return NULL;
98 }
99 
100 static inline int
cn9k_cpt_inst_prep(struct cnxk_cpt_qp * qp,struct rte_crypto_op * op,struct cpt_inflight_req * infl_req,struct cpt_inst_s * inst)101 cn9k_cpt_inst_prep(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
102 		   struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
103 {
104 	int ret;
105 
106 	if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
107 		struct rte_crypto_sym_op *sym_op;
108 		struct cnxk_se_sess *sess;
109 
110 		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
111 			sym_op = op->sym;
112 			sess = get_sym_session_private_data(
113 				sym_op->session, cn9k_cryptodev_driver_id);
114 			ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
115 						     inst);
116 			inst->w7.u64 = sess->cpt_inst_w7;
117 		} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
118 			ret = cn9k_cpt_sec_inst_fill(op, infl_req, inst);
119 		else {
120 			sess = cn9k_cpt_sym_temp_sess_create(qp, op);
121 			if (unlikely(sess == NULL)) {
122 				plt_dp_err("Could not create temp session");
123 				return -1;
124 			}
125 
126 			ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
127 						     inst);
128 			if (unlikely(ret)) {
129 				sym_session_clear(cn9k_cryptodev_driver_id,
130 						  op->sym->session);
131 				rte_mempool_put(qp->sess_mp, op->sym->session);
132 			}
133 			inst->w7.u64 = sess->cpt_inst_w7;
134 		}
135 	} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
136 		struct rte_crypto_asym_op *asym_op;
137 		struct cnxk_ae_sess *sess;
138 
139 		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
140 			asym_op = op->asym;
141 			sess = (struct cnxk_ae_sess *)
142 					asym_op->session->sess_private_data;
143 			ret = cnxk_ae_enqueue(qp, op, infl_req, inst, sess);
144 			inst->w7.u64 = sess->cpt_inst_w7;
145 		} else {
146 			ret = -EINVAL;
147 		}
148 	} else {
149 		ret = -EINVAL;
150 		plt_dp_err("Unsupported op type");
151 	}
152 
153 	return ret;
154 }
155 
156 static inline void
cn9k_cpt_inst_submit(struct cpt_inst_s * inst,uint64_t lmtline,uint64_t io_addr)157 cn9k_cpt_inst_submit(struct cpt_inst_s *inst, uint64_t lmtline,
158 		     uint64_t io_addr)
159 {
160 	uint64_t lmt_status;
161 
162 	do {
163 		/* Copy CPT command to LMTLINE */
164 		roc_lmt_mov64((void *)lmtline, inst);
165 
166 		/*
167 		 * Make sure compiler does not reorder memcpy and ldeor.
168 		 * LMTST transactions are always flushed from the write
169 		 * buffer immediately, a DMB is not required to push out
170 		 * LMTSTs.
171 		 */
172 		rte_io_wmb();
173 		lmt_status = roc_lmt_submit_ldeor(io_addr);
174 	} while (lmt_status == 0);
175 }
176 
177 static __plt_always_inline void
cn9k_cpt_inst_submit_dual(struct cpt_inst_s * inst,uint64_t lmtline,uint64_t io_addr)178 cn9k_cpt_inst_submit_dual(struct cpt_inst_s *inst, uint64_t lmtline,
179 			  uint64_t io_addr)
180 {
181 	uint64_t lmt_status;
182 
183 	do {
184 		/* Copy 2 CPT inst_s to LMTLINE */
185 #if defined(RTE_ARCH_ARM64)
186 		uint64_t *s = (uint64_t *)inst;
187 		uint64_t *d = (uint64_t *)lmtline;
188 
189 		vst1q_u64(&d[0], vld1q_u64(&s[0]));
190 		vst1q_u64(&d[2], vld1q_u64(&s[2]));
191 		vst1q_u64(&d[4], vld1q_u64(&s[4]));
192 		vst1q_u64(&d[6], vld1q_u64(&s[6]));
193 		vst1q_u64(&d[8], vld1q_u64(&s[8]));
194 		vst1q_u64(&d[10], vld1q_u64(&s[10]));
195 		vst1q_u64(&d[12], vld1q_u64(&s[12]));
196 		vst1q_u64(&d[14], vld1q_u64(&s[14]));
197 #else
198 		roc_lmt_mov_seg((void *)lmtline, inst, 8);
199 #endif
200 
201 		/*
202 		 * Make sure compiler does not reorder memcpy and ldeor.
203 		 * LMTST transactions are always flushed from the write
204 		 * buffer immediately, a DMB is not required to push out
205 		 * LMTSTs.
206 		 */
207 		rte_io_wmb();
208 		lmt_status = roc_lmt_submit_ldeor(io_addr);
209 	} while (lmt_status == 0);
210 }
211 
212 static uint16_t
cn9k_cpt_enqueue_burst(void * qptr,struct rte_crypto_op ** ops,uint16_t nb_ops)213 cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
214 {
215 	struct cpt_inflight_req *infl_req_1, *infl_req_2;
216 	struct cpt_inst_s inst[2] __rte_cache_aligned;
217 	struct rte_crypto_op *op_1, *op_2;
218 	uint16_t nb_allowed, count = 0;
219 	struct cnxk_cpt_qp *qp = qptr;
220 	struct pending_queue *pend_q;
221 	uint64_t head;
222 	int ret;
223 
224 	const union cpt_res_s res = {
225 		.cn10k.compcode = CPT_COMP_NOT_DONE,
226 	};
227 
228 	pend_q = &qp->pend_q;
229 
230 	const uint64_t lmt_base = qp->lf.lmt_base;
231 	const uint64_t io_addr = qp->lf.io_addr;
232 	const uint64_t pq_mask = pend_q->pq_mask;
233 
234 	/* Clear w0, w2, w3 of both inst */
235 
236 	inst[0].w0.u64 = 0;
237 	inst[0].w2.u64 = 0;
238 	inst[0].w3.u64 = 0;
239 	inst[1].w0.u64 = 0;
240 	inst[1].w2.u64 = 0;
241 	inst[1].w3.u64 = 0;
242 
243 	head = pend_q->head;
244 	nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);
245 	nb_ops = RTE_MIN(nb_ops, nb_allowed);
246 
247 	if (unlikely(nb_ops & 1)) {
248 		op_1 = ops[0];
249 		infl_req_1 = &pend_q->req_queue[head];
250 		infl_req_1->op_flags = 0;
251 
252 		ret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);
253 		if (unlikely(ret)) {
254 			plt_dp_err("Could not process op: %p", op_1);
255 			return 0;
256 		}
257 
258 		infl_req_1->cop = op_1;
259 		infl_req_1->res.cn9k.compcode = CPT_COMP_NOT_DONE;
260 		inst[0].res_addr = (uint64_t)&infl_req_1->res;
261 
262 		cn9k_cpt_inst_submit(&inst[0], lmt_base, io_addr);
263 		pending_queue_advance(&head, pq_mask);
264 		count++;
265 	}
266 
267 	while (count < nb_ops) {
268 		op_1 = ops[count];
269 		op_2 = ops[count + 1];
270 
271 		infl_req_1 = &pend_q->req_queue[head];
272 		pending_queue_advance(&head, pq_mask);
273 		infl_req_2 = &pend_q->req_queue[head];
274 		pending_queue_advance(&head, pq_mask);
275 
276 		infl_req_1->cop = op_1;
277 		infl_req_2->cop = op_2;
278 		infl_req_1->op_flags = 0;
279 		infl_req_2->op_flags = 0;
280 
281 		__atomic_store_n(&infl_req_1->res.u64[0], res.u64[0],
282 				 __ATOMIC_RELAXED);
283 		inst[0].res_addr = (uint64_t)&infl_req_1->res;
284 
285 		__atomic_store_n(&infl_req_2->res.u64[0], res.u64[0],
286 				 __ATOMIC_RELAXED);
287 		inst[1].res_addr = (uint64_t)&infl_req_2->res;
288 
289 		ret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);
290 		if (unlikely(ret)) {
291 			plt_dp_err("Could not process op: %p", op_1);
292 			pending_queue_retreat(&head, pq_mask, 2);
293 			break;
294 		}
295 
296 		ret = cn9k_cpt_inst_prep(qp, op_2, infl_req_2, &inst[1]);
297 		if (unlikely(ret)) {
298 			plt_dp_err("Could not process op: %p", op_2);
299 			pending_queue_retreat(&head, pq_mask, 1);
300 			cn9k_cpt_inst_submit(&inst[0], lmt_base, io_addr);
301 			count++;
302 			break;
303 		}
304 
305 		cn9k_cpt_inst_submit_dual(&inst[0], lmt_base, io_addr);
306 
307 		count += 2;
308 	}
309 
310 	rte_atomic_thread_fence(__ATOMIC_RELEASE);
311 
312 	pend_q->head = head;
313 	pend_q->time_out = rte_get_timer_cycles() +
314 			   DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
315 
316 	return count;
317 }
318 
319 uint16_t
cn9k_cpt_crypto_adapter_enqueue(uintptr_t base,struct rte_crypto_op * op)320 cn9k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
321 {
322 	union rte_event_crypto_metadata *ec_mdata;
323 	struct cpt_inflight_req *infl_req;
324 	struct rte_event *rsp_info;
325 	struct cnxk_cpt_qp *qp;
326 	struct cpt_inst_s inst;
327 	uint8_t cdev_id;
328 	uint16_t qp_id;
329 	int ret;
330 
331 	ec_mdata = cnxk_event_crypto_mdata_get(op);
332 	if (!ec_mdata) {
333 		rte_errno = EINVAL;
334 		return 0;
335 	}
336 
337 	cdev_id = ec_mdata->request_info.cdev_id;
338 	qp_id = ec_mdata->request_info.queue_pair_id;
339 	qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
340 	rsp_info = &ec_mdata->response_info;
341 
342 	if (unlikely(!qp->ca.enabled)) {
343 		rte_errno = EINVAL;
344 		return 0;
345 	}
346 
347 	if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&infl_req))) {
348 		rte_errno = ENOMEM;
349 		return 0;
350 	}
351 	infl_req->op_flags = 0;
352 
353 	ret = cn9k_cpt_inst_prep(qp, op, infl_req, &inst);
354 	if (unlikely(ret)) {
355 		plt_dp_err("Could not process op: %p", op);
356 		rte_mempool_put(qp->ca.req_mp, infl_req);
357 		return 0;
358 	}
359 
360 	infl_req->cop = op;
361 	infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
362 	infl_req->qp = qp;
363 	inst.w0.u64 = 0;
364 	inst.res_addr = (uint64_t)&infl_req->res;
365 	inst.w2.u64 = CNXK_CPT_INST_W2(
366 		(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
367 		rsp_info->sched_type, rsp_info->queue_id, 0);
368 	inst.w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
369 
370 	if (roc_cpt_is_iq_full(&qp->lf)) {
371 		rte_mempool_put(qp->ca.req_mp, infl_req);
372 		rte_errno = EAGAIN;
373 		return 0;
374 	}
375 
376 	if (!rsp_info->sched_type)
377 		roc_sso_hws_head_wait(base);
378 
379 	cn9k_cpt_inst_submit(&inst, qp->lmtline.lmt_base, qp->lmtline.io_addr);
380 
381 	return 1;
382 }
383 
384 static inline void
cn9k_cpt_sec_post_process(struct rte_crypto_op * cop,struct cpt_inflight_req * infl_req)385 cn9k_cpt_sec_post_process(struct rte_crypto_op *cop,
386 			  struct cpt_inflight_req *infl_req)
387 {
388 	struct rte_crypto_sym_op *sym_op = cop->sym;
389 	struct rte_mbuf *m = sym_op->m_src;
390 	struct rte_ipv6_hdr *ip6;
391 	struct rte_ipv4_hdr *ip;
392 	uint16_t m_len = 0;
393 	char *data;
394 
395 	if (infl_req->op_flags & CPT_OP_FLAGS_IPSEC_DIR_INBOUND) {
396 		data = rte_pktmbuf_mtod(m, char *);
397 
398 		ip = (struct rte_ipv4_hdr *)(data + ROC_IE_ON_INB_RPTR_HDR);
399 
400 		if (((ip->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER) ==
401 		    IPVERSION) {
402 			m_len = rte_be_to_cpu_16(ip->total_length);
403 		} else {
404 			PLT_ASSERT(((ip->version_ihl & 0xf0) >>
405 				    RTE_IPV4_IHL_MULTIPLIER) == 6);
406 			ip6 = (struct rte_ipv6_hdr *)ip;
407 			m_len = rte_be_to_cpu_16(ip6->payload_len) +
408 				sizeof(struct rte_ipv6_hdr);
409 		}
410 
411 		m->data_len = m_len;
412 		m->pkt_len = m_len;
413 		m->data_off += ROC_IE_ON_INB_RPTR_HDR;
414 	}
415 }
416 
417 static inline void
cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp * qp,struct rte_crypto_op * cop,struct cpt_inflight_req * infl_req,struct cpt_cn9k_res_s * res)418 cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
419 			      struct cpt_inflight_req *infl_req,
420 			      struct cpt_cn9k_res_s *res)
421 {
422 	unsigned int sz;
423 
424 	if (likely(res->compcode == CPT_COMP_GOOD)) {
425 		if (unlikely(res->uc_compcode)) {
426 			if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
427 				cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
428 			else
429 				cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
430 
431 			plt_dp_info("Request failed with microcode error");
432 			plt_dp_info("MC completion code 0x%x",
433 				    res->uc_compcode);
434 			goto temp_sess_free;
435 		}
436 
437 		cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
438 		if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
439 			if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
440 				cn9k_cpt_sec_post_process(cop, infl_req);
441 				return;
442 			}
443 
444 			/* Verify authentication data if required */
445 			if (unlikely(infl_req->op_flags &
446 				     CPT_OP_FLAGS_AUTH_VERIFY)) {
447 				uintptr_t *rsp = infl_req->mdata;
448 				compl_auth_verify(cop, (uint8_t *)rsp[0],
449 						  rsp[1]);
450 			}
451 		} else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
452 			struct rte_crypto_asym_op *op = cop->asym;
453 			uintptr_t *mdata = infl_req->mdata;
454 			struct cnxk_ae_sess *sess;
455 
456 			sess = (struct cnxk_ae_sess *)
457 					op->session->sess_private_data;
458 
459 			cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
460 		}
461 	} else {
462 		cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
463 		plt_dp_info("HW completion code 0x%x", res->compcode);
464 
465 		switch (res->compcode) {
466 		case CPT_COMP_INSTERR:
467 			plt_dp_err("Request failed with instruction error");
468 			break;
469 		case CPT_COMP_FAULT:
470 			plt_dp_err("Request failed with DMA fault");
471 			break;
472 		case CPT_COMP_HWERR:
473 			plt_dp_err("Request failed with hardware error");
474 			break;
475 		default:
476 			plt_dp_err(
477 				"Request failed with unknown completion code");
478 		}
479 	}
480 
481 temp_sess_free:
482 	if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
483 		if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
484 			sym_session_clear(cn9k_cryptodev_driver_id,
485 					  cop->sym->session);
486 			sz = rte_cryptodev_sym_get_existing_header_session_size(
487 				cop->sym->session);
488 			memset(cop->sym->session, 0, sz);
489 			rte_mempool_put(qp->sess_mp, cop->sym->session);
490 			cop->sym->session = NULL;
491 		}
492 	}
493 }
494 
495 uintptr_t
cn9k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)496 cn9k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
497 {
498 	struct cpt_inflight_req *infl_req;
499 	struct rte_crypto_op *cop;
500 	struct cnxk_cpt_qp *qp;
501 	union cpt_res_s res;
502 
503 	infl_req = (struct cpt_inflight_req *)(get_work1);
504 	cop = infl_req->cop;
505 	qp = infl_req->qp;
506 
507 	res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);
508 
509 	cn9k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req, &res.cn9k);
510 
511 	if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
512 		rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
513 
514 	rte_mempool_put(qp->ca.req_mp, infl_req);
515 	return (uintptr_t)cop;
516 }
517 
518 static uint16_t
cn9k_cpt_dequeue_burst(void * qptr,struct rte_crypto_op ** ops,uint16_t nb_ops)519 cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
520 {
521 	struct cpt_inflight_req *infl_req;
522 	struct cnxk_cpt_qp *qp = qptr;
523 	struct pending_queue *pend_q;
524 	uint64_t infl_cnt, pq_tail;
525 	struct rte_crypto_op *cop;
526 	union cpt_res_s res;
527 	int i;
528 
529 	pend_q = &qp->pend_q;
530 
531 	const uint64_t pq_mask = pend_q->pq_mask;
532 
533 	pq_tail = pend_q->tail;
534 	infl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask);
535 	nb_ops = RTE_MIN(nb_ops, infl_cnt);
536 
537 	/* Ensure infl_cnt isn't read before data lands */
538 	rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
539 
540 	for (i = 0; i < nb_ops; i++) {
541 		infl_req = &pend_q->req_queue[pq_tail];
542 
543 		res.u64[0] = __atomic_load_n(&infl_req->res.u64[0],
544 					     __ATOMIC_RELAXED);
545 
546 		if (unlikely(res.cn9k.compcode == CPT_COMP_NOT_DONE)) {
547 			if (unlikely(rte_get_timer_cycles() >
548 				     pend_q->time_out)) {
549 				plt_err("Request timed out");
550 				cnxk_cpt_dump_on_err(qp);
551 				pend_q->time_out = rte_get_timer_cycles() +
552 						   DEFAULT_COMMAND_TIMEOUT *
553 							   rte_get_timer_hz();
554 			}
555 			break;
556 		}
557 
558 		pending_queue_advance(&pq_tail, pq_mask);
559 
560 		cop = infl_req->cop;
561 
562 		ops[i] = cop;
563 
564 		cn9k_cpt_dequeue_post_process(qp, cop, infl_req, &res.cn9k);
565 
566 		if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
567 			rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
568 	}
569 
570 	pend_q->tail = pq_tail;
571 
572 	return i;
573 }
574 void
cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev * dev)575 cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
576 {
577 	dev->enqueue_burst = cn9k_cpt_enqueue_burst;
578 	dev->dequeue_burst = cn9k_cpt_dequeue_burst;
579 
580 	rte_mb();
581 }
582 
583 static void
cn9k_cpt_dev_info_get(struct rte_cryptodev * dev,struct rte_cryptodev_info * info)584 cn9k_cpt_dev_info_get(struct rte_cryptodev *dev,
585 		      struct rte_cryptodev_info *info)
586 {
587 	if (info != NULL) {
588 		cnxk_cpt_dev_info_get(dev, info);
589 		info->driver_id = cn9k_cryptodev_driver_id;
590 	}
591 }
592 
593 struct rte_cryptodev_ops cn9k_cpt_ops = {
594 	/* Device control ops */
595 	.dev_configure = cnxk_cpt_dev_config,
596 	.dev_start = cnxk_cpt_dev_start,
597 	.dev_stop = cnxk_cpt_dev_stop,
598 	.dev_close = cnxk_cpt_dev_close,
599 	.dev_infos_get = cn9k_cpt_dev_info_get,
600 
601 	.stats_get = NULL,
602 	.stats_reset = NULL,
603 	.queue_pair_setup = cnxk_cpt_queue_pair_setup,
604 	.queue_pair_release = cnxk_cpt_queue_pair_release,
605 
606 	/* Symmetric crypto ops */
607 	.sym_session_get_size = cnxk_cpt_sym_session_get_size,
608 	.sym_session_configure = cnxk_cpt_sym_session_configure,
609 	.sym_session_clear = cnxk_cpt_sym_session_clear,
610 
611 	/* Asymmetric crypto ops */
612 	.asym_session_get_size = cnxk_ae_session_size_get,
613 	.asym_session_configure = cnxk_ae_session_cfg,
614 	.asym_session_clear = cnxk_ae_session_clear,
615 
616 };
617