1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include "otx2_evdev.h"
6 
7 int
otx2_sso_rx_adapter_caps_get(const struct rte_eventdev * event_dev,const struct rte_eth_dev * eth_dev,uint32_t * caps)8 otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
9 			     const struct rte_eth_dev *eth_dev, uint32_t *caps)
10 {
11 	int rc;
12 
13 	RTE_SET_USED(event_dev);
14 	rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
15 	if (rc)
16 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
17 	else
18 		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
19 			RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ;
20 
21 	return 0;
22 }
23 
24 static inline int
sso_rxq_enable(struct otx2_eth_dev * dev,uint16_t qid,uint8_t tt,uint8_t ggrp,uint16_t eth_port_id)25 sso_rxq_enable(struct otx2_eth_dev *dev, uint16_t qid, uint8_t tt, uint8_t ggrp,
26 	       uint16_t eth_port_id)
27 {
28 	struct otx2_mbox *mbox = dev->mbox;
29 	struct nix_aq_enq_req *aq;
30 	int rc;
31 
32 	aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
33 	aq->qidx = qid;
34 	aq->ctype = NIX_AQ_CTYPE_CQ;
35 	aq->op = NIX_AQ_INSTOP_WRITE;
36 
37 	aq->cq.ena = 0;
38 	aq->cq.caching = 0;
39 
40 	otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
41 	aq->cq_mask.ena = ~(aq->cq_mask.ena);
42 	aq->cq_mask.caching = ~(aq->cq_mask.caching);
43 
44 	rc = otx2_mbox_process(mbox);
45 	if (rc < 0) {
46 		otx2_err("Failed to disable cq context");
47 		goto fail;
48 	}
49 
50 	aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
51 	aq->qidx = qid;
52 	aq->ctype = NIX_AQ_CTYPE_RQ;
53 	aq->op = NIX_AQ_INSTOP_WRITE;
54 
55 	aq->rq.sso_ena = 1;
56 	aq->rq.sso_tt = tt;
57 	aq->rq.sso_grp = ggrp;
58 	aq->rq.ena_wqwd = 1;
59 	/* Mbuf Header generation :
60 	 * > FIRST_SKIP is a super set of WQE_SKIP, dont modify first skip as
61 	 * it already has data related to mbuf size, headroom, private area.
62 	 * > Using WQE_SKIP we can directly assign
63 	 *		mbuf = wqe - sizeof(struct mbuf);
64 	 * so that mbuf header will not have unpredicted values while headroom
65 	 * and private data starts at the beginning of wqe_data.
66 	 */
67 	aq->rq.wqe_skip = 1;
68 	aq->rq.wqe_caching = 1;
69 	aq->rq.spb_ena = 0;
70 	aq->rq.flow_tagw = 20; /* 20-bits */
71 
72 	/* Flow Tag calculation :
73 	 *
74 	 * rq_tag <31:24> = good/bad_tag<8:0>;
75 	 * rq_tag  <23:0> = [ltag]
76 	 *
77 	 * flow_tag_mask<31:0> =  (1 << flow_tagw) - 1; <31:20>
78 	 * tag<31:0> = (~flow_tag_mask & rq_tag) | (flow_tag_mask & flow_tag);
79 	 *
80 	 * Setup :
81 	 * ltag<23:0> = (eth_port_id & 0xF) << 20;
82 	 * good/bad_tag<8:0> =
83 	 *	((eth_port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4);
84 	 *
85 	 * TAG<31:0> on getwork = <31:28>(RTE_EVENT_TYPE_ETHDEV) |
86 	 *				<27:20> (eth_port_id) | <20:0> [TAG]
87 	 */
88 
89 	aq->rq.ltag = (eth_port_id & 0xF) << 20;
90 	aq->rq.good_utag = ((eth_port_id >> 4) & 0xF) |
91 				(RTE_EVENT_TYPE_ETHDEV << 4);
92 	aq->rq.bad_utag = aq->rq.good_utag;
93 
94 	aq->rq.ena = 0;		 /* Don't enable RQ yet */
95 	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
96 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
97 
98 	otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
99 	/* mask the bits to write. */
100 	aq->rq_mask.sso_ena      = ~(aq->rq_mask.sso_ena);
101 	aq->rq_mask.sso_tt       = ~(aq->rq_mask.sso_tt);
102 	aq->rq_mask.sso_grp      = ~(aq->rq_mask.sso_grp);
103 	aq->rq_mask.ena_wqwd     = ~(aq->rq_mask.ena_wqwd);
104 	aq->rq_mask.wqe_skip     = ~(aq->rq_mask.wqe_skip);
105 	aq->rq_mask.wqe_caching  = ~(aq->rq_mask.wqe_caching);
106 	aq->rq_mask.spb_ena      = ~(aq->rq_mask.spb_ena);
107 	aq->rq_mask.flow_tagw    = ~(aq->rq_mask.flow_tagw);
108 	aq->rq_mask.ltag         = ~(aq->rq_mask.ltag);
109 	aq->rq_mask.good_utag    = ~(aq->rq_mask.good_utag);
110 	aq->rq_mask.bad_utag     = ~(aq->rq_mask.bad_utag);
111 	aq->rq_mask.ena          = ~(aq->rq_mask.ena);
112 	aq->rq_mask.pb_caching   = ~(aq->rq_mask.pb_caching);
113 	aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
114 
115 	rc = otx2_mbox_process(mbox);
116 	if (rc < 0) {
117 		otx2_err("Failed to init rx adapter context");
118 		goto fail;
119 	}
120 
121 	return 0;
122 fail:
123 	return rc;
124 }
125 
126 static inline int
sso_rxq_disable(struct otx2_eth_dev * dev,uint16_t qid)127 sso_rxq_disable(struct otx2_eth_dev *dev, uint16_t qid)
128 {
129 	struct otx2_mbox *mbox = dev->mbox;
130 	struct nix_aq_enq_req *aq;
131 	int rc;
132 
133 	aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
134 	aq->qidx = qid;
135 	aq->ctype = NIX_AQ_CTYPE_CQ;
136 	aq->op = NIX_AQ_INSTOP_WRITE;
137 
138 	aq->cq.ena = 1;
139 	aq->cq.caching = 1;
140 
141 	otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
142 	aq->cq_mask.ena = ~(aq->cq_mask.ena);
143 	aq->cq_mask.caching = ~(aq->cq_mask.caching);
144 
145 	rc = otx2_mbox_process(mbox);
146 	if (rc < 0) {
147 		otx2_err("Failed to enable cq context");
148 		goto fail;
149 	}
150 
151 	aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
152 	aq->qidx = qid;
153 	aq->ctype = NIX_AQ_CTYPE_RQ;
154 	aq->op = NIX_AQ_INSTOP_WRITE;
155 
156 	aq->rq.sso_ena = 0;
157 	aq->rq.sso_tt = SSO_TT_UNTAGGED;
158 	aq->rq.sso_grp = 0;
159 	aq->rq.ena_wqwd = 0;
160 	aq->rq.wqe_caching = 0;
161 	aq->rq.wqe_skip = 0;
162 	aq->rq.spb_ena = 0;
163 	aq->rq.flow_tagw = 0x20;
164 	aq->rq.ltag = 0;
165 	aq->rq.good_utag = 0;
166 	aq->rq.bad_utag = 0;
167 	aq->rq.ena = 1;
168 	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
169 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
170 
171 	otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
172 	/* mask the bits to write. */
173 	aq->rq_mask.sso_ena      = ~(aq->rq_mask.sso_ena);
174 	aq->rq_mask.sso_tt       = ~(aq->rq_mask.sso_tt);
175 	aq->rq_mask.sso_grp      = ~(aq->rq_mask.sso_grp);
176 	aq->rq_mask.ena_wqwd     = ~(aq->rq_mask.ena_wqwd);
177 	aq->rq_mask.wqe_caching  = ~(aq->rq_mask.wqe_caching);
178 	aq->rq_mask.wqe_skip     = ~(aq->rq_mask.wqe_skip);
179 	aq->rq_mask.spb_ena      = ~(aq->rq_mask.spb_ena);
180 	aq->rq_mask.flow_tagw    = ~(aq->rq_mask.flow_tagw);
181 	aq->rq_mask.ltag         = ~(aq->rq_mask.ltag);
182 	aq->rq_mask.good_utag    = ~(aq->rq_mask.good_utag);
183 	aq->rq_mask.bad_utag     = ~(aq->rq_mask.bad_utag);
184 	aq->rq_mask.ena          = ~(aq->rq_mask.ena);
185 	aq->rq_mask.pb_caching   = ~(aq->rq_mask.pb_caching);
186 	aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
187 
188 	rc = otx2_mbox_process(mbox);
189 	if (rc < 0) {
190 		otx2_err("Failed to clear rx adapter context");
191 		goto fail;
192 	}
193 
194 	return 0;
195 fail:
196 	return rc;
197 }
198 
199 void
sso_updt_xae_cnt(struct otx2_sso_evdev * dev,void * data,uint32_t event_type)200 sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
201 {
202 	int i;
203 
204 	switch (event_type) {
205 	case RTE_EVENT_TYPE_ETHDEV:
206 	{
207 		struct otx2_eth_rxq *rxq = data;
208 		uint64_t *old_ptr;
209 
210 		for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
211 			if ((uint64_t)rxq->pool == dev->rx_adptr_pools[i])
212 				return;
213 		}
214 
215 		dev->rx_adptr_pool_cnt++;
216 		old_ptr = dev->rx_adptr_pools;
217 		dev->rx_adptr_pools = rte_realloc(dev->rx_adptr_pools,
218 						  sizeof(uint64_t) *
219 						  dev->rx_adptr_pool_cnt, 0);
220 		if (dev->rx_adptr_pools == NULL) {
221 			dev->adptr_xae_cnt += rxq->pool->size;
222 			dev->rx_adptr_pools = old_ptr;
223 			dev->rx_adptr_pool_cnt--;
224 			return;
225 		}
226 		dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
227 			(uint64_t)rxq->pool;
228 
229 		dev->adptr_xae_cnt += rxq->pool->size;
230 		break;
231 	}
232 	case RTE_EVENT_TYPE_TIMER:
233 	{
234 		struct otx2_tim_ring *timr = data;
235 		uint16_t *old_ring_ptr;
236 		uint64_t *old_sz_ptr;
237 
238 		for (i = 0; i < dev->tim_adptr_ring_cnt; i++) {
239 			if (timr->ring_id != dev->timer_adptr_rings[i])
240 				continue;
241 			if (timr->nb_timers == dev->timer_adptr_sz[i])
242 				return;
243 			dev->adptr_xae_cnt -= dev->timer_adptr_sz[i];
244 			dev->adptr_xae_cnt += timr->nb_timers;
245 			dev->timer_adptr_sz[i] = timr->nb_timers;
246 
247 			return;
248 		}
249 
250 		dev->tim_adptr_ring_cnt++;
251 		old_ring_ptr = dev->timer_adptr_rings;
252 		old_sz_ptr = dev->timer_adptr_sz;
253 
254 		dev->timer_adptr_rings = rte_realloc(dev->timer_adptr_rings,
255 						     sizeof(uint16_t) *
256 						     dev->tim_adptr_ring_cnt,
257 						     0);
258 		if (dev->timer_adptr_rings == NULL) {
259 			dev->adptr_xae_cnt += timr->nb_timers;
260 			dev->timer_adptr_rings = old_ring_ptr;
261 			dev->tim_adptr_ring_cnt--;
262 			return;
263 		}
264 
265 		dev->timer_adptr_sz = rte_realloc(dev->timer_adptr_sz,
266 						  sizeof(uint64_t) *
267 						  dev->tim_adptr_ring_cnt,
268 						  0);
269 
270 		if (dev->timer_adptr_sz == NULL) {
271 			dev->adptr_xae_cnt += timr->nb_timers;
272 			dev->timer_adptr_sz = old_sz_ptr;
273 			dev->tim_adptr_ring_cnt--;
274 			return;
275 		}
276 
277 		dev->timer_adptr_rings[dev->tim_adptr_ring_cnt - 1] =
278 			timr->ring_id;
279 		dev->timer_adptr_sz[dev->tim_adptr_ring_cnt - 1] =
280 			timr->nb_timers;
281 
282 		dev->adptr_xae_cnt += timr->nb_timers;
283 		break;
284 	}
285 	default:
286 		break;
287 	}
288 }
289 
290 static inline void
sso_updt_lookup_mem(const struct rte_eventdev * event_dev,void * lookup_mem)291 sso_updt_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
292 {
293 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
294 	int i;
295 
296 	for (i = 0; i < dev->nb_event_ports; i++) {
297 		if (dev->dual_ws) {
298 			struct otx2_ssogws_dual *ws = event_dev->data->ports[i];
299 
300 			ws->lookup_mem = lookup_mem;
301 		} else {
302 			struct otx2_ssogws *ws = event_dev->data->ports[i];
303 
304 			ws->lookup_mem = lookup_mem;
305 		}
306 	}
307 }
308 
309 int
otx2_sso_rx_adapter_queue_add(const struct rte_eventdev * event_dev,const struct rte_eth_dev * eth_dev,int32_t rx_queue_id,const struct rte_event_eth_rx_adapter_queue_conf * queue_conf)310 otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
311 			      const struct rte_eth_dev *eth_dev,
312 			      int32_t rx_queue_id,
313 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
314 {
315 	struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
316 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
317 	uint16_t port = eth_dev->data->port_id;
318 	struct otx2_eth_rxq *rxq;
319 	int i, rc;
320 
321 	rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
322 	if (rc)
323 		return -EINVAL;
324 
325 	if (rx_queue_id < 0) {
326 		for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) {
327 			rxq = eth_dev->data->rx_queues[i];
328 			sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
329 			rc = sso_xae_reconfigure((struct rte_eventdev *)
330 						 (uintptr_t)event_dev);
331 			rc |= sso_rxq_enable(otx2_eth_dev, i,
332 					     queue_conf->ev.sched_type,
333 					     queue_conf->ev.queue_id, port);
334 		}
335 		rxq = eth_dev->data->rx_queues[0];
336 		sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
337 	} else {
338 		rxq = eth_dev->data->rx_queues[rx_queue_id];
339 		sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
340 		rc = sso_xae_reconfigure((struct rte_eventdev *)
341 					 (uintptr_t)event_dev);
342 		rc |= sso_rxq_enable(otx2_eth_dev, (uint16_t)rx_queue_id,
343 				     queue_conf->ev.sched_type,
344 				     queue_conf->ev.queue_id, port);
345 		sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
346 	}
347 
348 	if (rc < 0) {
349 		otx2_err("Failed to configure Rx adapter port=%d, q=%d", port,
350 			 queue_conf->ev.queue_id);
351 		return rc;
352 	}
353 
354 	dev->rx_offloads |= otx2_eth_dev->rx_offload_flags;
355 	dev->tstamp = &otx2_eth_dev->tstamp;
356 	sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
357 
358 	return 0;
359 }
360 
361 int
otx2_sso_rx_adapter_queue_del(const struct rte_eventdev * event_dev,const struct rte_eth_dev * eth_dev,int32_t rx_queue_id)362 otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
363 			      const struct rte_eth_dev *eth_dev,
364 			      int32_t rx_queue_id)
365 {
366 	struct otx2_eth_dev *dev = eth_dev->data->dev_private;
367 	int i, rc;
368 
369 	RTE_SET_USED(event_dev);
370 	rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
371 	if (rc)
372 		return -EINVAL;
373 
374 	if (rx_queue_id < 0) {
375 		for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++)
376 			rc = sso_rxq_disable(dev, i);
377 	} else {
378 		rc = sso_rxq_disable(dev, (uint16_t)rx_queue_id);
379 	}
380 
381 	if (rc < 0)
382 		otx2_err("Failed to clear Rx adapter config port=%d, q=%d",
383 			 eth_dev->data->port_id, rx_queue_id);
384 
385 	return rc;
386 }
387 
388 int
otx2_sso_rx_adapter_start(const struct rte_eventdev * event_dev,const struct rte_eth_dev * eth_dev)389 otx2_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
390 			  const struct rte_eth_dev *eth_dev)
391 {
392 	RTE_SET_USED(event_dev);
393 	RTE_SET_USED(eth_dev);
394 
395 	return 0;
396 }
397 
398 int
otx2_sso_rx_adapter_stop(const struct rte_eventdev * event_dev,const struct rte_eth_dev * eth_dev)399 otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
400 			 const struct rte_eth_dev *eth_dev)
401 {
402 	RTE_SET_USED(event_dev);
403 	RTE_SET_USED(eth_dev);
404 
405 	return 0;
406 }
407 
408 int
otx2_sso_tx_adapter_caps_get(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev,uint32_t * caps)409 otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
410 			     const struct rte_eth_dev *eth_dev, uint32_t *caps)
411 {
412 	int ret;
413 
414 	RTE_SET_USED(dev);
415 	ret = strncmp(eth_dev->device->driver->name, "net_octeontx2,", 13);
416 	if (ret)
417 		*caps = 0;
418 	else
419 		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
420 
421 	return 0;
422 }
423 
424 static int
sso_sqb_aura_limit_edit(struct rte_mempool * mp,uint16_t nb_sqb_bufs)425 sso_sqb_aura_limit_edit(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
426 {
427 	struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
428 	struct npa_aq_enq_req *aura_req;
429 
430 	aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
431 	aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
432 	aura_req->ctype = NPA_AQ_CTYPE_AURA;
433 	aura_req->op = NPA_AQ_INSTOP_WRITE;
434 
435 	aura_req->aura.limit = nb_sqb_bufs;
436 	aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
437 
438 	return otx2_mbox_process(npa_lf->mbox);
439 }
440 
441 static int
sso_add_tx_queue_data(const struct rte_eventdev * event_dev,uint16_t eth_port_id,uint16_t tx_queue_id,struct otx2_eth_txq * txq)442 sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
443 		      uint16_t eth_port_id, uint16_t tx_queue_id,
444 		      struct otx2_eth_txq *txq)
445 {
446 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
447 	int i;
448 
449 	for (i = 0; i < event_dev->data->nb_ports; i++) {
450 		dev->max_port_id = RTE_MAX(dev->max_port_id, eth_port_id);
451 		if (dev->dual_ws) {
452 			struct otx2_ssogws_dual *old_dws;
453 			struct otx2_ssogws_dual *dws;
454 
455 			old_dws = event_dev->data->ports[i];
456 			dws = rte_realloc_socket(ssogws_get_cookie(old_dws),
457 						 sizeof(struct otx2_ssogws_dual)
458 						 + RTE_CACHE_LINE_SIZE +
459 						 (sizeof(uint64_t) *
460 						    (dev->max_port_id + 1) *
461 						    RTE_MAX_QUEUES_PER_PORT),
462 						 RTE_CACHE_LINE_SIZE,
463 						 event_dev->data->socket_id);
464 			if (dws == NULL)
465 				return -ENOMEM;
466 
467 			/* First cache line is reserved for cookie */
468 			dws = (struct otx2_ssogws_dual *)
469 				((uint8_t *)dws + RTE_CACHE_LINE_SIZE);
470 
471 			((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
472 			 )&dws->tx_adptr_data)[eth_port_id][tx_queue_id] =
473 				(uint64_t)txq;
474 			event_dev->data->ports[i] = dws;
475 		} else {
476 			struct otx2_ssogws *old_ws;
477 			struct otx2_ssogws *ws;
478 
479 			old_ws = event_dev->data->ports[i];
480 			ws = rte_realloc_socket(ssogws_get_cookie(old_ws),
481 						sizeof(struct otx2_ssogws) +
482 						RTE_CACHE_LINE_SIZE +
483 						(sizeof(uint64_t) *
484 						 (dev->max_port_id + 1) *
485 						 RTE_MAX_QUEUES_PER_PORT),
486 						RTE_CACHE_LINE_SIZE,
487 						event_dev->data->socket_id);
488 			if (ws == NULL)
489 				return -ENOMEM;
490 
491 			/* First cache line is reserved for cookie */
492 			ws = (struct otx2_ssogws *)
493 				((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
494 
495 			((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
496 			 )&ws->tx_adptr_data)[eth_port_id][tx_queue_id] =
497 				(uint64_t)txq;
498 			event_dev->data->ports[i] = ws;
499 		}
500 	}
501 
502 	return 0;
503 }
504 
505 int
otx2_sso_tx_adapter_queue_add(uint8_t id,const struct rte_eventdev * event_dev,const struct rte_eth_dev * eth_dev,int32_t tx_queue_id)506 otx2_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
507 			      const struct rte_eth_dev *eth_dev,
508 			      int32_t tx_queue_id)
509 {
510 	struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
511 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
512 	struct otx2_eth_txq *txq;
513 	int i, ret;
514 
515 	RTE_SET_USED(id);
516 	if (tx_queue_id < 0) {
517 		for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
518 			txq = eth_dev->data->tx_queues[i];
519 			sso_sqb_aura_limit_edit(txq->sqb_pool,
520 					OTX2_SSO_SQB_LIMIT);
521 			ret = sso_add_tx_queue_data(event_dev,
522 						    eth_dev->data->port_id, i,
523 						    txq);
524 			if (ret < 0)
525 				return ret;
526 		}
527 	} else {
528 		txq = eth_dev->data->tx_queues[tx_queue_id];
529 		sso_sqb_aura_limit_edit(txq->sqb_pool, OTX2_SSO_SQB_LIMIT);
530 		ret = sso_add_tx_queue_data(event_dev, eth_dev->data->port_id,
531 					    tx_queue_id, txq);
532 		if (ret < 0)
533 			return ret;
534 	}
535 
536 	dev->tx_offloads |= otx2_eth_dev->tx_offload_flags;
537 	sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
538 
539 	return 0;
540 }
541 
542 int
otx2_sso_tx_adapter_queue_del(uint8_t id,const struct rte_eventdev * event_dev,const struct rte_eth_dev * eth_dev,int32_t tx_queue_id)543 otx2_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
544 			      const struct rte_eth_dev *eth_dev,
545 			      int32_t tx_queue_id)
546 {
547 	struct otx2_eth_txq *txq;
548 	int i;
549 
550 	RTE_SET_USED(id);
551 	RTE_SET_USED(eth_dev);
552 	RTE_SET_USED(event_dev);
553 	if (tx_queue_id < 0) {
554 		for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
555 			txq = eth_dev->data->tx_queues[i];
556 			sso_sqb_aura_limit_edit(txq->sqb_pool,
557 						txq->nb_sqb_bufs);
558 		}
559 	} else {
560 		txq = eth_dev->data->tx_queues[tx_queue_id];
561 		sso_sqb_aura_limit_edit(txq->sqb_pool, txq->nb_sqb_bufs);
562 	}
563 
564 	return 0;
565 }
566