xref: /dpdk/drivers/event/cnxk/cn9k_eventdev.c (revision 29fd052d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8 
9 #define CN9K_DUAL_WS_NB_WS	    2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11 
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
13 	deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
14 
15 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
16 	enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)]
17 
18 static int
19 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
20 {
21 	struct cnxk_sso_evdev *dev = arg;
22 	struct cn9k_sso_hws_dual *dws;
23 	struct cn9k_sso_hws *ws;
24 	int rc;
25 
26 	if (dev->dual_ws) {
27 		dws = port;
28 		rc = roc_sso_hws_link(&dev->sso,
29 				      CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
30 				      nb_link);
31 		rc |= roc_sso_hws_link(&dev->sso,
32 				       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
33 				       map, nb_link);
34 	} else {
35 		ws = port;
36 		rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
37 	}
38 
39 	return rc;
40 }
41 
42 static int
43 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
44 {
45 	struct cnxk_sso_evdev *dev = arg;
46 	struct cn9k_sso_hws_dual *dws;
47 	struct cn9k_sso_hws *ws;
48 	int rc;
49 
50 	if (dev->dual_ws) {
51 		dws = port;
52 		rc = roc_sso_hws_unlink(&dev->sso,
53 					CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
54 					map, nb_link);
55 		rc |= roc_sso_hws_unlink(&dev->sso,
56 					 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
57 					 map, nb_link);
58 	} else {
59 		ws = port;
60 		rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
61 	}
62 
63 	return rc;
64 }
65 
66 static void
67 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
68 {
69 	struct cnxk_sso_evdev *dev = arg;
70 	struct cn9k_sso_hws_dual *dws;
71 	struct cn9k_sso_hws *ws;
72 	uint64_t val;
73 
74 	/* Set get_work tmo for HWS */
75 	val = NSEC2USEC(dev->deq_tmo_ns);
76 	val = val ? val - 1 : 0;
77 	if (dev->dual_ws) {
78 		dws = hws;
79 		dws->grp_base = grp_base;
80 		dws->fc_mem = (uint64_t *)dev->fc_iova;
81 		dws->xaq_lmt = dev->xaq_lmt;
82 
83 		plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
84 		plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
85 	} else {
86 		ws = hws;
87 		ws->grp_base = grp_base;
88 		ws->fc_mem = (uint64_t *)dev->fc_iova;
89 		ws->xaq_lmt = dev->xaq_lmt;
90 
91 		plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
92 	}
93 }
94 
95 static void
96 cn9k_sso_hws_release(void *arg, void *hws)
97 {
98 	struct cnxk_sso_evdev *dev = arg;
99 	struct cn9k_sso_hws_dual *dws;
100 	struct cn9k_sso_hws *ws;
101 	uint16_t i;
102 
103 	if (dev->dual_ws) {
104 		dws = hws;
105 		for (i = 0; i < dev->nb_event_queues; i++) {
106 			roc_sso_hws_unlink(&dev->sso,
107 					   CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), &i, 1);
108 			roc_sso_hws_unlink(&dev->sso,
109 					   CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1), &i, 1);
110 		}
111 		memset(dws, 0, sizeof(*dws));
112 	} else {
113 		ws = hws;
114 		for (i = 0; i < dev->nb_event_queues; i++)
115 			roc_sso_hws_unlink(&dev->sso, ws->hws_id, &i, 1);
116 		memset(ws, 0, sizeof(*ws));
117 	}
118 }
119 
120 static void
121 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
122 			  cnxk_handle_event_t fn, void *arg)
123 {
124 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
125 	struct cn9k_sso_hws_dual *dws;
126 	struct cn9k_sso_hws *ws;
127 	uint64_t cq_ds_cnt = 1;
128 	uint64_t aq_cnt = 1;
129 	uint64_t ds_cnt = 1;
130 	struct rte_event ev;
131 	uintptr_t ws_base;
132 	uint64_t val, req;
133 
134 	plt_write64(0, base + SSO_LF_GGRP_QCTL);
135 
136 	req = queue_id;	    /* GGRP ID */
137 	req |= BIT_ULL(18); /* Grouped */
138 	req |= BIT_ULL(16); /* WAIT */
139 
140 	aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
141 	ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
142 	cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
143 	cq_ds_cnt &= 0x3FFF3FFF0000;
144 
145 	if (dev->dual_ws) {
146 		dws = hws;
147 		ws_base = dws->base[0];
148 	} else {
149 		ws = hws;
150 		ws_base = ws->base;
151 	}
152 
153 	while (aq_cnt || cq_ds_cnt || ds_cnt) {
154 		plt_write64(req, ws_base + SSOW_LF_GWS_OP_GET_WORK0);
155 		cn9k_sso_hws_get_work_empty(ws_base, &ev);
156 		if (fn != NULL && ev.u64 != 0)
157 			fn(arg, ev);
158 		if (ev.sched_type != SSO_TT_EMPTY)
159 			cnxk_sso_hws_swtag_flush(
160 				ws_base + SSOW_LF_GWS_TAG,
161 				ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
162 		do {
163 			val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
164 		} while (val & BIT_ULL(56));
165 		aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
166 		ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
167 		cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
168 		/* Extract cq and ds count */
169 		cq_ds_cnt &= 0x3FFF3FFF0000;
170 	}
171 
172 	plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
173 }
174 
175 static void
176 cn9k_sso_hws_reset(void *arg, void *hws)
177 {
178 	struct cnxk_sso_evdev *dev = arg;
179 	struct cn9k_sso_hws_dual *dws;
180 	struct cn9k_sso_hws *ws;
181 	uint64_t pend_state;
182 	uint8_t pend_tt;
183 	uintptr_t base;
184 	uint64_t tag;
185 	uint8_t i;
186 
187 	dws = hws;
188 	ws = hws;
189 	for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
190 		base = dev->dual_ws ? dws->base[i] : ws->base;
191 		/* Wait till getwork/swtp/waitw/desched completes. */
192 		do {
193 			pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
194 		} while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
195 				       BIT_ULL(56)));
196 
197 		tag = plt_read64(base + SSOW_LF_GWS_TAG);
198 		pend_tt = (tag >> 32) & 0x3;
199 		if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
200 			if (pend_tt == SSO_TT_ATOMIC ||
201 			    pend_tt == SSO_TT_ORDERED)
202 				cnxk_sso_hws_swtag_untag(
203 					base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
204 			plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
205 		}
206 
207 		/* Wait for desched to complete. */
208 		do {
209 			pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
210 		} while (pend_state & BIT_ULL(58));
211 	}
212 }
213 
214 void
215 cn9k_sso_set_rsrc(void *arg)
216 {
217 	struct cnxk_sso_evdev *dev = arg;
218 
219 	if (dev->dual_ws)
220 		dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
221 	else
222 		dev->max_event_ports = dev->sso.max_hws;
223 	dev->max_event_queues =
224 		dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
225 			      RTE_EVENT_MAX_QUEUES_PER_DEV :
226 			      dev->sso.max_hwgrp;
227 }
228 
229 static int
230 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
231 {
232 	struct cnxk_sso_evdev *dev = arg;
233 
234 	if (dev->dual_ws)
235 		hws = hws * CN9K_DUAL_WS_NB_WS;
236 
237 	return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
238 }
239 
240 static int
241 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
242 {
243 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
244 	int i;
245 
246 	if (dev->tx_adptr_data == NULL)
247 		return 0;
248 
249 	for (i = 0; i < dev->nb_event_ports; i++) {
250 		if (dev->dual_ws) {
251 			struct cn9k_sso_hws_dual *dws =
252 				event_dev->data->ports[i];
253 			void *ws_cookie;
254 
255 			ws_cookie = cnxk_sso_hws_get_cookie(dws);
256 			ws_cookie = rte_realloc_socket(
257 				ws_cookie,
258 				sizeof(struct cnxk_sso_hws_cookie) +
259 					sizeof(struct cn9k_sso_hws_dual) +
260 					dev->tx_adptr_data_sz,
261 				RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
262 			if (ws_cookie == NULL)
263 				return -ENOMEM;
264 			dws = RTE_PTR_ADD(ws_cookie,
265 					  sizeof(struct cnxk_sso_hws_cookie));
266 			memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
267 			       dev->tx_adptr_data_sz);
268 			event_dev->data->ports[i] = dws;
269 		} else {
270 			struct cn9k_sso_hws *ws = event_dev->data->ports[i];
271 			void *ws_cookie;
272 
273 			ws_cookie = cnxk_sso_hws_get_cookie(ws);
274 			ws_cookie = rte_realloc_socket(
275 				ws_cookie,
276 				sizeof(struct cnxk_sso_hws_cookie) +
277 					sizeof(struct cn9k_sso_hws_dual) +
278 					dev->tx_adptr_data_sz,
279 				RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
280 			if (ws_cookie == NULL)
281 				return -ENOMEM;
282 			ws = RTE_PTR_ADD(ws_cookie,
283 					 sizeof(struct cnxk_sso_hws_cookie));
284 			memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
285 			       dev->tx_adptr_data_sz);
286 			event_dev->data->ports[i] = ws;
287 		}
288 	}
289 	rte_mb();
290 
291 	return 0;
292 }
293 
294 static void
295 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
296 {
297 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
298 	/* Single WS modes */
299 	const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
300 #define R(name, flags)[flags] = cn9k_sso_hws_deq_##name,
301 		NIX_RX_FASTPATH_MODES
302 #undef R
303 	};
304 
305 	const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
306 #define R(name, flags)[flags] = cn9k_sso_hws_deq_burst_##name,
307 		NIX_RX_FASTPATH_MODES
308 #undef R
309 	};
310 
311 	const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
312 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_##name,
313 		NIX_RX_FASTPATH_MODES
314 #undef R
315 	};
316 
317 	const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
318 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_burst_##name,
319 		NIX_RX_FASTPATH_MODES
320 #undef R
321 	};
322 
323 	const event_dequeue_t sso_hws_deq_ca[NIX_RX_OFFLOAD_MAX] = {
324 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_##name,
325 		NIX_RX_FASTPATH_MODES
326 #undef R
327 	};
328 
329 	const event_dequeue_burst_t sso_hws_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
330 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_burst_##name,
331 		NIX_RX_FASTPATH_MODES
332 #undef R
333 	};
334 
335 	const event_dequeue_t sso_hws_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
336 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_##name,
337 		NIX_RX_FASTPATH_MODES
338 #undef R
339 	};
340 
341 	const event_dequeue_burst_t sso_hws_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
342 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_burst_##name,
343 		NIX_RX_FASTPATH_MODES
344 #undef R
345 	};
346 
347 	const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
348 #define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_##name,
349 		NIX_RX_FASTPATH_MODES
350 #undef R
351 	};
352 
353 	const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
354 #define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_burst_##name,
355 		NIX_RX_FASTPATH_MODES
356 #undef R
357 	};
358 
359 	const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
360 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_##name,
361 		NIX_RX_FASTPATH_MODES
362 #undef R
363 	};
364 
365 	const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
366 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
367 		NIX_RX_FASTPATH_MODES
368 #undef R
369 	};
370 
371 	const event_dequeue_t sso_hws_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
372 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_seg_##name,
373 		NIX_RX_FASTPATH_MODES
374 #undef R
375 	};
376 
377 	const event_dequeue_burst_t sso_hws_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
378 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_seg_burst_##name,
379 		NIX_RX_FASTPATH_MODES
380 #undef R
381 	};
382 
383 	const event_dequeue_t sso_hws_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
384 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_seg_##name,
385 		NIX_RX_FASTPATH_MODES
386 #undef R
387 	};
388 
389 	const event_dequeue_burst_t sso_hws_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
390 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_seg_burst_##name,
391 		NIX_RX_FASTPATH_MODES
392 #undef R
393 	};
394 
395 	/* Dual WS modes */
396 	const event_dequeue_t sso_hws_dual_deq[NIX_RX_OFFLOAD_MAX] = {
397 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_##name,
398 		NIX_RX_FASTPATH_MODES
399 #undef R
400 	};
401 
402 	const event_dequeue_burst_t sso_hws_dual_deq_burst[NIX_RX_OFFLOAD_MAX] = {
403 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_burst_##name,
404 		NIX_RX_FASTPATH_MODES
405 #undef R
406 	};
407 
408 	const event_dequeue_t sso_hws_dual_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
409 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_##name,
410 		NIX_RX_FASTPATH_MODES
411 #undef R
412 	};
413 
414 	const event_dequeue_burst_t sso_hws_dual_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
415 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
416 		NIX_RX_FASTPATH_MODES
417 #undef R
418 	};
419 
420 	const event_dequeue_t sso_hws_dual_deq_ca[NIX_RX_OFFLOAD_MAX] = {
421 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_##name,
422 		NIX_RX_FASTPATH_MODES
423 #undef R
424 	};
425 
426 	const event_dequeue_burst_t sso_hws_dual_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
427 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_burst_##name,
428 		NIX_RX_FASTPATH_MODES
429 #undef R
430 	};
431 
432 	const event_dequeue_t sso_hws_dual_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
433 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_##name,
434 		NIX_RX_FASTPATH_MODES
435 #undef R
436 	};
437 
438 	const event_dequeue_burst_t sso_hws_dual_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
439 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_burst_##name,
440 		NIX_RX_FASTPATH_MODES
441 #undef R
442 	};
443 
444 	const event_dequeue_t sso_hws_dual_deq_seg[NIX_RX_OFFLOAD_MAX] = {
445 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_##name,
446 		NIX_RX_FASTPATH_MODES
447 #undef R
448 	};
449 
450 	const event_dequeue_burst_t sso_hws_dual_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
451 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_burst_##name,
452 		NIX_RX_FASTPATH_MODES
453 #undef R
454 	};
455 
456 	const event_dequeue_t sso_hws_dual_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
457 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
458 		NIX_RX_FASTPATH_MODES
459 #undef R
460 	};
461 
462 	const event_dequeue_burst_t sso_hws_dual_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
463 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
464 		NIX_RX_FASTPATH_MODES
465 #undef R
466 	};
467 
468 	const event_dequeue_t sso_hws_dual_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
469 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_seg_##name,
470 		NIX_RX_FASTPATH_MODES
471 #undef R
472 	};
473 
474 	const event_dequeue_burst_t sso_hws_dual_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
475 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
476 		NIX_RX_FASTPATH_MODES
477 #undef R
478 	};
479 
480 	const event_dequeue_t sso_hws_dual_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
481 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_seg_##name,
482 		NIX_RX_FASTPATH_MODES
483 #undef R
484 	};
485 
486 	const event_dequeue_burst_t sso_hws_dual_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
487 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_seg_burst_##name,
488 		NIX_RX_FASTPATH_MODES
489 #undef R
490 	};
491 
492 	/* Tx modes */
493 	const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
494 #define T(name, sz, flags)[flags] = cn9k_sso_hws_tx_adptr_enq_##name,
495 		NIX_TX_FASTPATH_MODES
496 #undef T
497 	};
498 
499 	const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
500 #define T(name, sz, flags)[flags] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
501 		NIX_TX_FASTPATH_MODES
502 #undef T
503 	};
504 
505 	const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
506 #define T(name, sz, flags)[flags] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
507 		NIX_TX_FASTPATH_MODES
508 #undef T
509 	};
510 
511 	const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
512 #define T(name, sz, flags)[flags] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
513 		NIX_TX_FASTPATH_MODES
514 #undef T
515 	};
516 
517 	event_dev->enqueue = cn9k_sso_hws_enq;
518 	event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
519 	event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
520 	event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
521 	if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
522 		CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
523 		CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
524 				      sso_hws_deq_seg_burst);
525 		if (dev->is_timeout_deq) {
526 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
527 					      sso_hws_deq_tmo_seg);
528 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
529 					      sso_hws_deq_tmo_seg_burst);
530 		}
531 		if (dev->is_ca_internal_port) {
532 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
533 					      sso_hws_deq_ca_seg);
534 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
535 					      sso_hws_deq_ca_seg_burst);
536 		}
537 
538 		if (dev->is_ca_internal_port && dev->is_timeout_deq) {
539 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
540 					      sso_hws_deq_tmo_ca_seg);
541 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
542 					      sso_hws_deq_tmo_ca_seg_burst);
543 		}
544 	} else {
545 		CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
546 		CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
547 				      sso_hws_deq_burst);
548 		if (dev->is_timeout_deq) {
549 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
550 					      sso_hws_deq_tmo);
551 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
552 					      sso_hws_deq_tmo_burst);
553 		}
554 		if (dev->is_ca_internal_port) {
555 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
556 					      sso_hws_deq_ca);
557 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
558 					      sso_hws_deq_ca_burst);
559 		}
560 
561 		if (dev->is_ca_internal_port && dev->is_timeout_deq) {
562 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
563 					      sso_hws_deq_tmo_ca);
564 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
565 					      sso_hws_deq_tmo_ca_burst);
566 		}
567 	}
568 	event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
569 
570 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
571 		CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
572 				      sso_hws_tx_adptr_enq_seg);
573 	else
574 		CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
575 				      sso_hws_tx_adptr_enq);
576 
577 	if (dev->dual_ws) {
578 		event_dev->enqueue = cn9k_sso_hws_dual_enq;
579 		event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
580 		event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
581 		event_dev->enqueue_forward_burst =
582 			cn9k_sso_hws_dual_enq_fwd_burst;
583 		event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
584 
585 		if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
586 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
587 					      sso_hws_dual_deq_seg);
588 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
589 					      sso_hws_dual_deq_seg_burst);
590 			if (dev->is_timeout_deq) {
591 				CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
592 						      sso_hws_dual_deq_tmo_seg);
593 				CN9K_SET_EVDEV_DEQ_OP(
594 					dev, event_dev->dequeue_burst,
595 					sso_hws_dual_deq_tmo_seg_burst);
596 			}
597 			if (dev->is_ca_internal_port) {
598 				CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
599 						      sso_hws_dual_deq_ca_seg);
600 				CN9K_SET_EVDEV_DEQ_OP(
601 					dev, event_dev->dequeue_burst,
602 					sso_hws_dual_deq_ca_seg_burst);
603 			}
604 			if (dev->is_ca_internal_port && dev->is_timeout_deq) {
605 				CN9K_SET_EVDEV_DEQ_OP(
606 					dev, event_dev->dequeue,
607 					sso_hws_dual_deq_tmo_ca_seg);
608 				CN9K_SET_EVDEV_DEQ_OP(
609 					dev, event_dev->dequeue_burst,
610 					sso_hws_dual_deq_tmo_ca_seg_burst);
611 			}
612 		} else {
613 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
614 					      sso_hws_dual_deq);
615 			CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
616 					      sso_hws_dual_deq_burst);
617 			if (dev->is_timeout_deq) {
618 				CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
619 						      sso_hws_dual_deq_tmo);
620 				CN9K_SET_EVDEV_DEQ_OP(
621 					dev, event_dev->dequeue_burst,
622 					sso_hws_dual_deq_tmo_burst);
623 			}
624 			if (dev->is_ca_internal_port) {
625 				CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
626 						      sso_hws_dual_deq_ca);
627 				CN9K_SET_EVDEV_DEQ_OP(
628 					dev, event_dev->dequeue_burst,
629 					sso_hws_dual_deq_ca_burst);
630 			}
631 			if (dev->is_ca_internal_port && dev->is_timeout_deq) {
632 				CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
633 						      sso_hws_dual_deq_tmo_ca);
634 				CN9K_SET_EVDEV_DEQ_OP(
635 					dev, event_dev->dequeue_burst,
636 					sso_hws_dual_deq_tmo_ca_burst);
637 			}
638 		}
639 
640 		if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
641 			CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
642 					      sso_hws_dual_tx_adptr_enq_seg);
643 		else
644 			CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
645 					      sso_hws_dual_tx_adptr_enq);
646 	}
647 
648 	event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
649 	rte_mb();
650 }
651 
652 static void *
653 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
654 {
655 	struct cnxk_sso_evdev *dev = arg;
656 	struct cn9k_sso_hws_dual *dws;
657 	struct cn9k_sso_hws *ws;
658 	void *data;
659 
660 	if (dev->dual_ws) {
661 		dws = rte_zmalloc("cn9k_dual_ws",
662 				  sizeof(struct cn9k_sso_hws_dual) +
663 					  RTE_CACHE_LINE_SIZE,
664 				  RTE_CACHE_LINE_SIZE);
665 		if (dws == NULL) {
666 			plt_err("Failed to alloc memory for port=%d", port_id);
667 			return NULL;
668 		}
669 
670 		dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
671 		dws->base[0] = roc_sso_hws_base_get(
672 			&dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
673 		dws->base[1] = roc_sso_hws_base_get(
674 			&dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
675 		dws->hws_id = port_id;
676 		dws->swtag_req = 0;
677 		dws->vws = 0;
678 		if (dev->deq_tmo_ns)
679 			dws->gw_wdata = BIT_ULL(16);
680 		dws->gw_wdata |= 1;
681 
682 		data = dws;
683 	} else {
684 		/* Allocate event port memory */
685 		ws = rte_zmalloc("cn9k_ws",
686 				 sizeof(struct cn9k_sso_hws) +
687 					 RTE_CACHE_LINE_SIZE,
688 				 RTE_CACHE_LINE_SIZE);
689 		if (ws == NULL) {
690 			plt_err("Failed to alloc memory for port=%d", port_id);
691 			return NULL;
692 		}
693 
694 		/* First cache line is reserved for cookie */
695 		ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
696 		ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
697 		ws->hws_id = port_id;
698 		ws->swtag_req = 0;
699 		if (dev->deq_tmo_ns)
700 			ws->gw_wdata = BIT_ULL(16);
701 		ws->gw_wdata |= 1;
702 
703 		data = ws;
704 	}
705 
706 	return data;
707 }
708 
709 static void
710 cn9k_sso_info_get(struct rte_eventdev *event_dev,
711 		  struct rte_event_dev_info *dev_info)
712 {
713 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
714 
715 	dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
716 	cnxk_sso_info_get(dev, dev_info);
717 }
718 
719 static int
720 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
721 {
722 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
723 	int rc;
724 
725 	rc = cnxk_sso_dev_validate(event_dev);
726 	if (rc < 0) {
727 		plt_err("Invalid event device configuration");
728 		return -EINVAL;
729 	}
730 
731 	rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
732 	if (rc < 0) {
733 		plt_err("Failed to initialize SSO resources");
734 		return -ENODEV;
735 	}
736 
737 	rc = cnxk_sso_xaq_allocate(dev);
738 	if (rc < 0)
739 		goto cnxk_rsrc_fini;
740 
741 	rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
742 				    cn9k_sso_hws_setup);
743 	if (rc < 0)
744 		goto cnxk_rsrc_fini;
745 
746 	/* Restore any prior port-queue mapping. */
747 	cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
748 
749 	dev->configured = 1;
750 	rte_mb();
751 
752 	return 0;
753 cnxk_rsrc_fini:
754 	roc_sso_rsrc_fini(&dev->sso);
755 	dev->nb_event_ports = 0;
756 	return rc;
757 }
758 
759 static int
760 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
761 		    const struct rte_event_port_conf *port_conf)
762 {
763 
764 	RTE_SET_USED(port_conf);
765 	return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
766 }
767 
768 static void
769 cn9k_sso_port_release(void *port)
770 {
771 	struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
772 	struct cnxk_sso_evdev *dev;
773 
774 	if (port == NULL)
775 		return;
776 
777 	dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
778 	if (!gws_cookie->configured)
779 		goto free;
780 
781 	cn9k_sso_hws_release(dev, port);
782 	memset(gws_cookie, 0, sizeof(*gws_cookie));
783 free:
784 	rte_free(gws_cookie);
785 }
786 
787 static int
788 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
789 		   const uint8_t queues[], const uint8_t priorities[],
790 		   uint16_t nb_links)
791 {
792 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
793 	uint16_t hwgrp_ids[nb_links];
794 	uint16_t link;
795 
796 	RTE_SET_USED(priorities);
797 	for (link = 0; link < nb_links; link++)
798 		hwgrp_ids[link] = queues[link];
799 	nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
800 
801 	return (int)nb_links;
802 }
803 
804 static int
805 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
806 		     uint8_t queues[], uint16_t nb_unlinks)
807 {
808 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
809 	uint16_t hwgrp_ids[nb_unlinks];
810 	uint16_t unlink;
811 
812 	for (unlink = 0; unlink < nb_unlinks; unlink++)
813 		hwgrp_ids[unlink] = queues[unlink];
814 	nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
815 
816 	return (int)nb_unlinks;
817 }
818 
819 static int
820 cn9k_sso_start(struct rte_eventdev *event_dev)
821 {
822 	int rc;
823 
824 	rc = cn9k_sso_updt_tx_adptr_data(event_dev);
825 	if (rc < 0)
826 		return rc;
827 
828 	rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
829 			    cn9k_sso_hws_flush_events);
830 	if (rc < 0)
831 		return rc;
832 
833 	cn9k_sso_fp_fns_set(event_dev);
834 
835 	return rc;
836 }
837 
838 static void
839 cn9k_sso_stop(struct rte_eventdev *event_dev)
840 {
841 	cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
842 }
843 
844 static int
845 cn9k_sso_close(struct rte_eventdev *event_dev)
846 {
847 	return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
848 }
849 
850 static int
851 cn9k_sso_selftest(void)
852 {
853 	return cnxk_sso_selftest(RTE_STR(event_cn9k));
854 }
855 
856 static int
857 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
858 			     const struct rte_eth_dev *eth_dev, uint32_t *caps)
859 {
860 	int rc;
861 
862 	RTE_SET_USED(event_dev);
863 	rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
864 	if (rc)
865 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
866 	else
867 		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
868 			RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
869 			RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
870 
871 	return 0;
872 }
873 
874 static void
875 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
876 		      void *tstmp_info)
877 {
878 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
879 	int i;
880 
881 	for (i = 0; i < dev->nb_event_ports; i++) {
882 		if (dev->dual_ws) {
883 			struct cn9k_sso_hws_dual *dws =
884 				event_dev->data->ports[i];
885 			dws->lookup_mem = lookup_mem;
886 			dws->tstamp = tstmp_info;
887 		} else {
888 			struct cn9k_sso_hws *ws = event_dev->data->ports[i];
889 			ws->lookup_mem = lookup_mem;
890 			ws->tstamp = tstmp_info;
891 		}
892 	}
893 }
894 
895 static int
896 cn9k_sso_rx_adapter_queue_add(
897 	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
898 	int32_t rx_queue_id,
899 	const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
900 {
901 	struct cn9k_eth_rxq *rxq;
902 	void *lookup_mem;
903 	void *tstmp_info;
904 	int rc;
905 
906 	rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
907 	if (rc)
908 		return -EINVAL;
909 
910 	rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
911 					   queue_conf);
912 	if (rc)
913 		return -EINVAL;
914 
915 	rxq = eth_dev->data->rx_queues[0];
916 	lookup_mem = rxq->lookup_mem;
917 	tstmp_info = rxq->tstamp;
918 	cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
919 	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
920 
921 	return 0;
922 }
923 
924 static int
925 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
926 			      const struct rte_eth_dev *eth_dev,
927 			      int32_t rx_queue_id)
928 {
929 	int rc;
930 
931 	rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
932 	if (rc)
933 		return -EINVAL;
934 
935 	return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
936 }
937 
938 static int
939 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
940 			     const struct rte_eth_dev *eth_dev, uint32_t *caps)
941 {
942 	int ret;
943 
944 	RTE_SET_USED(dev);
945 	ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
946 	if (ret)
947 		*caps = 0;
948 	else
949 		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
950 
951 	return 0;
952 }
953 
954 static void
955 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
956 {
957 	struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
958 	struct cn9k_eth_txq *txq;
959 	struct roc_nix_sq *sq;
960 	int i;
961 
962 	if (tx_queue_id < 0) {
963 		for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
964 			cn9k_sso_txq_fc_update(eth_dev, i);
965 	} else {
966 		uint16_t sqes_per_sqb;
967 
968 		sq = &cnxk_eth_dev->sqs[tx_queue_id];
969 		txq = eth_dev->data->tx_queues[tx_queue_id];
970 		sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
971 		sq->nb_sqb_bufs_adj =
972 			sq->nb_sqb_bufs -
973 			RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
974 				sqes_per_sqb;
975 		if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
976 			sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
977 						(sqes_per_sqb - 1));
978 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
979 		txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
980 	}
981 }
982 
983 static int
984 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
985 			      const struct rte_eth_dev *eth_dev,
986 			      int32_t tx_queue_id)
987 {
988 	struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
989 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
990 	uint64_t tx_offloads;
991 	int rc;
992 
993 	RTE_SET_USED(id);
994 	rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
995 	if (rc < 0)
996 		return rc;
997 
998 	/* Can't enable tstamp if all the ports don't have it enabled. */
999 	tx_offloads = cnxk_eth_dev->tx_offload_flags;
1000 	if (dev->tx_adptr_configured) {
1001 		uint8_t tstmp_req = !!(tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
1002 		uint8_t tstmp_ena =
1003 			!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
1004 
1005 		if (tstmp_ena && !tstmp_req)
1006 			dev->tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
1007 		else if (!tstmp_ena && tstmp_req)
1008 			tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
1009 	}
1010 
1011 	dev->tx_offloads |= tx_offloads;
1012 	cn9k_sso_txq_fc_update(eth_dev, tx_queue_id);
1013 	rc = cn9k_sso_updt_tx_adptr_data(event_dev);
1014 	if (rc < 0)
1015 		return rc;
1016 	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1017 	dev->tx_adptr_configured = 1;
1018 
1019 	return 0;
1020 }
1021 
1022 static int
1023 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
1024 			      const struct rte_eth_dev *eth_dev,
1025 			      int32_t tx_queue_id)
1026 {
1027 	int rc;
1028 
1029 	RTE_SET_USED(id);
1030 	rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
1031 	if (rc < 0)
1032 		return rc;
1033 	cn9k_sso_txq_fc_update(eth_dev, tx_queue_id);
1034 	return cn9k_sso_updt_tx_adptr_data(event_dev);
1035 }
1036 
1037 static int
1038 cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
1039 			     const struct rte_cryptodev *cdev, uint32_t *caps)
1040 {
1041 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1042 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1043 
1044 	*caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
1045 		RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
1046 
1047 	return 0;
1048 }
1049 
1050 static int
1051 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
1052 			   const struct rte_cryptodev *cdev,
1053 			   int32_t queue_pair_id, const struct rte_event *event)
1054 {
1055 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1056 
1057 	RTE_SET_USED(event);
1058 
1059 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1060 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1061 
1062 	dev->is_ca_internal_port = 1;
1063 	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1064 
1065 	return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
1066 }
1067 
1068 static int
1069 cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
1070 			   const struct rte_cryptodev *cdev,
1071 			   int32_t queue_pair_id)
1072 {
1073 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1074 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1075 
1076 	return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
1077 }
1078 
1079 static struct eventdev_ops cn9k_sso_dev_ops = {
1080 	.dev_infos_get = cn9k_sso_info_get,
1081 	.dev_configure = cn9k_sso_dev_configure,
1082 	.queue_def_conf = cnxk_sso_queue_def_conf,
1083 	.queue_setup = cnxk_sso_queue_setup,
1084 	.queue_release = cnxk_sso_queue_release,
1085 	.port_def_conf = cnxk_sso_port_def_conf,
1086 	.port_setup = cn9k_sso_port_setup,
1087 	.port_release = cn9k_sso_port_release,
1088 	.port_link = cn9k_sso_port_link,
1089 	.port_unlink = cn9k_sso_port_unlink,
1090 	.timeout_ticks = cnxk_sso_timeout_ticks,
1091 
1092 	.eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
1093 	.eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
1094 	.eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
1095 	.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
1096 	.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
1097 
1098 	.eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
1099 	.eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
1100 	.eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
1101 
1102 	.timer_adapter_caps_get = cnxk_tim_caps_get,
1103 
1104 	.crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
1105 	.crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
1106 	.crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
1107 
1108 	.dump = cnxk_sso_dump,
1109 	.dev_start = cn9k_sso_start,
1110 	.dev_stop = cn9k_sso_stop,
1111 	.dev_close = cn9k_sso_close,
1112 	.dev_selftest = cn9k_sso_selftest,
1113 };
1114 
1115 static int
1116 cn9k_sso_init(struct rte_eventdev *event_dev)
1117 {
1118 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1119 	int rc;
1120 
1121 	if (RTE_CACHE_LINE_SIZE != 128) {
1122 		plt_err("Driver not compiled for CN9K");
1123 		return -EFAULT;
1124 	}
1125 
1126 	rc = roc_plt_init();
1127 	if (rc < 0) {
1128 		plt_err("Failed to initialize platform model");
1129 		return rc;
1130 	}
1131 
1132 	event_dev->dev_ops = &cn9k_sso_dev_ops;
1133 	/* For secondary processes, the primary has done all the work */
1134 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1135 		cn9k_sso_fp_fns_set(event_dev);
1136 		return 0;
1137 	}
1138 
1139 	rc = cnxk_sso_init(event_dev);
1140 	if (rc < 0)
1141 		return rc;
1142 
1143 	cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
1144 	if (!dev->max_event_ports || !dev->max_event_queues) {
1145 		plt_err("Not enough eventdev resource queues=%d ports=%d",
1146 			dev->max_event_queues, dev->max_event_ports);
1147 		cnxk_sso_fini(event_dev);
1148 		return -ENODEV;
1149 	}
1150 
1151 	plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1152 		    event_dev->data->name, dev->max_event_queues,
1153 		    dev->max_event_ports);
1154 
1155 	return 0;
1156 }
1157 
1158 static int
1159 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1160 {
1161 	return rte_event_pmd_pci_probe(
1162 		pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1163 }
1164 
1165 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1166 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1167 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1168 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1169 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1170 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1171 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1172 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1173 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1174 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1175 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1176 	{
1177 		.vendor_id = 0,
1178 	},
1179 };
1180 
1181 static struct rte_pci_driver cn9k_pci_sso = {
1182 	.id_table = cn9k_pci_sso_map,
1183 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1184 	.probe = cn9k_sso_probe,
1185 	.remove = cnxk_sso_remove,
1186 };
1187 
1188 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1189 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1190 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1191 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1192 			      CNXK_SSO_GGRP_QOS "=<string>"
1193 			      CNXK_SSO_FORCE_BP "=1"
1194 			      CN9K_SSO_SINGLE_WS "=1"
1195 			      CNXK_TIM_DISABLE_NPA "=1"
1196 			      CNXK_TIM_CHNK_SLOTS "=<int>"
1197 			      CNXK_TIM_RINGS_LMT "=<int>"
1198 			      CNXK_TIM_STATS_ENA "=1");
1199