xref: /dpdk/drivers/net/cnxk/cnxk_ethdev.c (revision 29fd052d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <cnxk_ethdev.h>
5 
6 #include <rte_eventdev.h>
7 
8 static inline uint64_t
9 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
10 {
11 	uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
12 
13 	if (roc_nix_is_vf_or_sdp(&dev->nix) ||
14 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
15 		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
16 
17 	return capa;
18 }
19 
20 static inline uint64_t
21 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
22 {
23 	RTE_SET_USED(dev);
24 	return CNXK_NIX_TX_OFFLOAD_CAPA;
25 }
26 
27 static inline uint32_t
28 nix_get_speed_capa(struct cnxk_eth_dev *dev)
29 {
30 	uint32_t speed_capa;
31 
32 	/* Auto negotiation disabled */
33 	speed_capa = RTE_ETH_LINK_SPEED_FIXED;
34 	if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
35 		speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
36 			      RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
37 			      RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
38 	}
39 
40 	return speed_capa;
41 }
42 
43 int
44 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
45 {
46 	struct roc_nix *nix = &dev->nix;
47 
48 	if (dev->inb.inl_dev == use_inl_dev)
49 		return 0;
50 
51 	plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
52 		    dev->inb.nb_sess, !!dev->inb.inl_dev);
53 
54 	/* Change the mode */
55 	dev->inb.inl_dev = use_inl_dev;
56 
57 	/* Update RoC for NPC rule insertion */
58 	roc_nix_inb_mode_set(nix, use_inl_dev);
59 
60 	/* Setup lookup mem */
61 	return cnxk_nix_lookup_mem_sa_base_set(dev);
62 }
63 
64 static int
65 nix_security_setup(struct cnxk_eth_dev *dev)
66 {
67 	struct roc_nix *nix = &dev->nix;
68 	int i, rc = 0;
69 
70 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
71 		/* Setup Inline Inbound */
72 		rc = roc_nix_inl_inb_init(nix);
73 		if (rc) {
74 			plt_err("Failed to initialize nix inline inb, rc=%d",
75 				rc);
76 			return rc;
77 		}
78 
79 		/* By default pick using inline device for poll mode.
80 		 * Will be overridden when event mode rq's are setup.
81 		 */
82 		cnxk_nix_inb_mode_set(dev, true);
83 
84 		/* Allocate memory to be used as dptr for CPT ucode
85 		 * WRITE_SA op.
86 		 */
87 		dev->inb.sa_dptr =
88 			plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
89 		if (!dev->inb.sa_dptr) {
90 			plt_err("Couldn't allocate memory for SA dptr");
91 			rc = -ENOMEM;
92 			goto cleanup;
93 		}
94 	}
95 
96 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
97 	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
98 		struct plt_bitmap *bmap;
99 		size_t bmap_sz;
100 		void *mem;
101 
102 		/* Setup enough descriptors for all tx queues */
103 		nix->outb_nb_desc = dev->outb.nb_desc;
104 		nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
105 
106 		/* Setup Inline Outbound */
107 		rc = roc_nix_inl_outb_init(nix);
108 		if (rc) {
109 			plt_err("Failed to initialize nix inline outb, rc=%d",
110 				rc);
111 			goto sa_dptr_free;
112 		}
113 
114 		dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
115 
116 		/* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
117 		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
118 			return 0;
119 
120 		/* Allocate memory to be used as dptr for CPT ucode
121 		 * WRITE_SA op.
122 		 */
123 		dev->outb.sa_dptr =
124 			plt_zmalloc(ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ, 0);
125 		if (!dev->outb.sa_dptr) {
126 			plt_err("Couldn't allocate memory for SA dptr");
127 			rc = -ENOMEM;
128 			goto sa_dptr_free;
129 		}
130 
131 		rc = -ENOMEM;
132 		/* Allocate a bitmap to alloc and free sa indexes */
133 		bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
134 		mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
135 		if (mem == NULL) {
136 			plt_err("Outbound SA bmap alloc failed");
137 
138 			rc |= roc_nix_inl_outb_fini(nix);
139 			goto sa_dptr_free;
140 		}
141 
142 		rc = -EIO;
143 		bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
144 		if (!bmap) {
145 			plt_err("Outbound SA bmap init failed");
146 
147 			rc |= roc_nix_inl_outb_fini(nix);
148 			plt_free(mem);
149 			goto sa_dptr_free;
150 		}
151 
152 		for (i = 0; i < dev->outb.max_sa; i++)
153 			plt_bitmap_set(bmap, i);
154 
155 		dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
156 		dev->outb.sa_bmap_mem = mem;
157 		dev->outb.sa_bmap = bmap;
158 	}
159 	return 0;
160 
161 sa_dptr_free:
162 	if (dev->inb.sa_dptr)
163 		plt_free(dev->inb.sa_dptr);
164 	if (dev->outb.sa_dptr)
165 		plt_free(dev->outb.sa_dptr);
166 cleanup:
167 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
168 		rc |= roc_nix_inl_inb_fini(nix);
169 	return rc;
170 }
171 
172 static int
173 nix_meter_fini(struct cnxk_eth_dev *dev)
174 {
175 	struct cnxk_meter_node *next_mtr = NULL;
176 	struct roc_nix_bpf_objs profs = {0};
177 	struct cnxk_meter_node *mtr = NULL;
178 	struct cnxk_mtr *fms = &dev->mtr;
179 	struct roc_nix *nix = &dev->nix;
180 	struct roc_nix_rq *rq;
181 	uint32_t i;
182 	int rc = 0;
183 
184 	RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) {
185 		for (i = 0; i < mtr->rq_num; i++) {
186 			rq = &dev->rqs[mtr->rq_id[i]];
187 			rc |= roc_nix_bpf_ena_dis(nix, mtr->bpf_id, rq, false);
188 		}
189 
190 		profs.level = mtr->level;
191 		profs.count = 1;
192 		profs.ids[0] = mtr->bpf_id;
193 		rc = roc_nix_bpf_free(nix, &profs, 1);
194 
195 		if (rc)
196 			return rc;
197 
198 		TAILQ_REMOVE(fms, mtr, next);
199 		plt_free(mtr);
200 	}
201 	return 0;
202 }
203 
204 static int
205 nix_security_release(struct cnxk_eth_dev *dev)
206 {
207 	struct rte_eth_dev *eth_dev = dev->eth_dev;
208 	struct cnxk_eth_sec_sess *eth_sec, *tvar;
209 	struct roc_nix *nix = &dev->nix;
210 	int rc, ret = 0;
211 
212 	/* Cleanup Inline inbound */
213 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
214 		/* Destroy inbound sessions */
215 		tvar = NULL;
216 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
217 			cnxk_eth_sec_ops.session_destroy(eth_dev,
218 							 eth_sec->sess);
219 
220 		/* Clear lookup mem */
221 		cnxk_nix_lookup_mem_sa_base_clear(dev);
222 
223 		rc = roc_nix_inl_inb_fini(nix);
224 		if (rc)
225 			plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
226 		ret |= rc;
227 
228 		if (dev->inb.sa_dptr) {
229 			plt_free(dev->inb.sa_dptr);
230 			dev->inb.sa_dptr = NULL;
231 		}
232 	}
233 
234 	/* Cleanup Inline outbound */
235 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
236 	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
237 		/* Destroy outbound sessions */
238 		tvar = NULL;
239 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
240 			cnxk_eth_sec_ops.session_destroy(eth_dev,
241 							 eth_sec->sess);
242 
243 		rc = roc_nix_inl_outb_fini(nix);
244 		if (rc)
245 			plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
246 		ret |= rc;
247 
248 		plt_bitmap_free(dev->outb.sa_bmap);
249 		plt_free(dev->outb.sa_bmap_mem);
250 		dev->outb.sa_bmap = NULL;
251 		dev->outb.sa_bmap_mem = NULL;
252 		if (dev->outb.sa_dptr) {
253 			plt_free(dev->outb.sa_dptr);
254 			dev->outb.sa_dptr = NULL;
255 		}
256 	}
257 
258 	dev->inb.inl_dev = false;
259 	roc_nix_inb_mode_set(nix, false);
260 	dev->nb_rxq_sso = 0;
261 	dev->inb.nb_sess = 0;
262 	dev->outb.nb_sess = 0;
263 	return ret;
264 }
265 
266 static void
267 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
268 {
269 	struct rte_pktmbuf_pool_private *mbp_priv;
270 	struct rte_eth_dev *eth_dev;
271 	struct cnxk_eth_dev *dev;
272 	uint32_t buffsz;
273 
274 	dev = rxq->dev;
275 	eth_dev = dev->eth_dev;
276 
277 	/* Get rx buffer size */
278 	mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
279 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
280 
281 	if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
282 		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
283 		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
284 	}
285 }
286 
287 int
288 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
289 {
290 	struct rte_eth_dev_data *data = eth_dev->data;
291 	struct cnxk_eth_rxq_sp *rxq;
292 	int rc;
293 
294 	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
295 	/* Setup scatter mode if needed by jumbo */
296 	nix_enable_mseg_on_jumbo(rxq);
297 
298 	rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
299 	if (rc)
300 		plt_err("Failed to set default MTU size, rc=%d", rc);
301 
302 	return rc;
303 }
304 
305 static int
306 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
307 {
308 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
309 	enum roc_nix_fc_mode fc_mode = ROC_NIX_FC_FULL;
310 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
311 	int rc;
312 
313 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
314 	if (roc_model_is_cn96_ax() &&
315 	    dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG)
316 		fc_mode = ROC_NIX_FC_TX;
317 
318 	/* By default enable flow control */
319 	rc = roc_nix_fc_mode_set(&dev->nix, fc_mode);
320 	if (rc)
321 		return rc;
322 
323 	fc->mode = (fc_mode == ROC_NIX_FC_FULL) ? RTE_ETH_FC_FULL :
324 						  RTE_ETH_FC_TX_PAUSE;
325 	return rc;
326 }
327 
328 static int
329 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
330 {
331 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
332 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
333 	struct rte_eth_fc_conf fc_cfg = {0};
334 
335 	if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix))
336 		return 0;
337 
338 	fc_cfg.mode = fc->mode;
339 
340 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
341 	if (roc_model_is_cn96_ax() &&
342 	    dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
343 	    (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
344 		fc_cfg.mode =
345 				(fc_cfg.mode == RTE_ETH_FC_FULL ||
346 				fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
347 				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
348 	}
349 
350 	return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
351 }
352 
353 uint64_t
354 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
355 {
356 	uint16_t port_id = dev->eth_dev->data->port_id;
357 	struct rte_mbuf mb_def;
358 	uint64_t *tmp;
359 
360 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
361 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
362 				 offsetof(struct rte_mbuf, data_off) !=
363 			 2);
364 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
365 				 offsetof(struct rte_mbuf, data_off) !=
366 			 4);
367 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
368 				 offsetof(struct rte_mbuf, data_off) !=
369 			 6);
370 	mb_def.nb_segs = 1;
371 	mb_def.data_off = RTE_PKTMBUF_HEADROOM +
372 			  (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
373 	mb_def.port = port_id;
374 	rte_mbuf_refcnt_set(&mb_def, 1);
375 
376 	/* Prevent compiler reordering: rearm_data covers previous fields */
377 	rte_compiler_barrier();
378 	tmp = (uint64_t *)&mb_def.rearm_data;
379 
380 	return *tmp;
381 }
382 
383 static inline uint8_t
384 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
385 {
386 	/*
387 	 * Maximum three segments can be supported with W8, Choose
388 	 * NIX_MAXSQESZ_W16 for multi segment offload.
389 	 */
390 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
391 		return NIX_MAXSQESZ_W16;
392 	else
393 		return NIX_MAXSQESZ_W8;
394 }
395 
396 int
397 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
398 			uint16_t nb_desc, uint16_t fp_tx_q_sz,
399 			const struct rte_eth_txconf *tx_conf)
400 {
401 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
402 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
403 	struct cnxk_eth_txq_sp *txq_sp;
404 	struct roc_nix_sq *sq;
405 	size_t txq_sz;
406 	int rc;
407 
408 	/* Free memory prior to re-allocation if needed. */
409 	if (eth_dev->data->tx_queues[qid] != NULL) {
410 		plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
411 		dev_ops->tx_queue_release(eth_dev, qid);
412 		eth_dev->data->tx_queues[qid] = NULL;
413 	}
414 
415 	/* When Tx Security offload is enabled, increase tx desc count by
416 	 * max possible outbound desc count.
417 	 */
418 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
419 		nb_desc += dev->outb.nb_desc;
420 
421 	/* Setup ROC SQ */
422 	sq = &dev->sqs[qid];
423 	sq->qid = qid;
424 	sq->nb_desc = nb_desc;
425 	sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
426 
427 	rc = roc_nix_sq_init(&dev->nix, sq);
428 	if (rc) {
429 		plt_err("Failed to init sq=%d, rc=%d", qid, rc);
430 		return rc;
431 	}
432 
433 	rc = -ENOMEM;
434 	txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
435 	txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
436 	if (!txq_sp) {
437 		plt_err("Failed to alloc tx queue mem");
438 		rc |= roc_nix_sq_fini(sq);
439 		return rc;
440 	}
441 
442 	txq_sp->dev = dev;
443 	txq_sp->qid = qid;
444 	txq_sp->qconf.conf.tx = *tx_conf;
445 	/* Queue config should reflect global offloads */
446 	txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
447 	txq_sp->qconf.nb_desc = nb_desc;
448 
449 	plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
450 		    " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
451 		    qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
452 		    sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
453 
454 	/* Store start of fast path area */
455 	eth_dev->data->tx_queues[qid] = txq_sp + 1;
456 	eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
457 	return 0;
458 }
459 
460 static void
461 cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
462 {
463 	void *txq = eth_dev->data->tx_queues[qid];
464 	struct cnxk_eth_txq_sp *txq_sp;
465 	struct cnxk_eth_dev *dev;
466 	struct roc_nix_sq *sq;
467 	int rc;
468 
469 	if (!txq)
470 		return;
471 
472 	txq_sp = cnxk_eth_txq_to_sp(txq);
473 
474 	dev = txq_sp->dev;
475 
476 	plt_nix_dbg("Releasing txq %u", qid);
477 
478 	/* Cleanup ROC SQ */
479 	sq = &dev->sqs[qid];
480 	rc = roc_nix_sq_fini(sq);
481 	if (rc)
482 		plt_err("Failed to cleanup sq, rc=%d", rc);
483 
484 	/* Finally free */
485 	plt_free(txq_sp);
486 }
487 
488 int
489 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
490 			uint16_t nb_desc, uint16_t fp_rx_q_sz,
491 			const struct rte_eth_rxconf *rx_conf,
492 			struct rte_mempool *mp)
493 {
494 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
495 	struct roc_nix *nix = &dev->nix;
496 	struct cnxk_eth_rxq_sp *rxq_sp;
497 	struct rte_mempool_ops *ops;
498 	const char *platform_ops;
499 	struct roc_nix_rq *rq;
500 	struct roc_nix_cq *cq;
501 	uint16_t first_skip;
502 	int rc = -EINVAL;
503 	size_t rxq_sz;
504 
505 	/* Sanity checks */
506 	if (rx_conf->rx_deferred_start == 1) {
507 		plt_err("Deferred Rx start is not supported");
508 		goto fail;
509 	}
510 
511 	platform_ops = rte_mbuf_platform_mempool_ops();
512 	/* This driver needs cnxk_npa mempool ops to work */
513 	ops = rte_mempool_get_ops(mp->ops_index);
514 	if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
515 		plt_err("mempool ops should be of cnxk_npa type");
516 		goto fail;
517 	}
518 
519 	if (mp->pool_id == 0) {
520 		plt_err("Invalid pool_id");
521 		goto fail;
522 	}
523 
524 	/* Free memory prior to re-allocation if needed */
525 	if (eth_dev->data->rx_queues[qid] != NULL) {
526 		const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
527 
528 		plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
529 		dev_ops->rx_queue_release(eth_dev, qid);
530 		eth_dev->data->rx_queues[qid] = NULL;
531 	}
532 
533 	/* Clam up cq limit to size of packet pool aura for LBK
534 	 * to avoid meta packet drop as LBK does not currently support
535 	 * backpressure.
536 	 */
537 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
538 		uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
539 
540 		/* Use current RQ's aura limit if inl rq is not available */
541 		if (!pkt_pool_limit)
542 			pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
543 		nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
544 	}
545 
546 	/* Its a no-op when inline device is not used */
547 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY ||
548 	    dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
549 		roc_nix_inl_dev_xaq_realloc(mp->pool_id);
550 
551 	/* Setup ROC CQ */
552 	cq = &dev->cqs[qid];
553 	cq->qid = qid;
554 	cq->nb_desc = nb_desc;
555 	rc = roc_nix_cq_init(&dev->nix, cq);
556 	if (rc) {
557 		plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
558 		goto fail;
559 	}
560 
561 	/* Setup ROC RQ */
562 	rq = &dev->rqs[qid];
563 	rq->qid = qid;
564 	rq->aura_handle = mp->pool_id;
565 	rq->flow_tag_width = 32;
566 	rq->sso_ena = false;
567 
568 	/* Calculate first mbuf skip */
569 	first_skip = (sizeof(struct rte_mbuf));
570 	first_skip += RTE_PKTMBUF_HEADROOM;
571 	first_skip += rte_pktmbuf_priv_size(mp);
572 	rq->first_skip = first_skip;
573 	rq->later_skip = sizeof(struct rte_mbuf);
574 	rq->lpb_size = mp->elt_size;
575 	rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
576 
577 	/* Enable Inline IPSec on RQ, will not be used for Poll mode */
578 	if (roc_nix_inl_inb_is_enabled(nix))
579 		rq->ipsech_ena = true;
580 
581 	rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
582 	if (rc) {
583 		plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
584 		goto cq_fini;
585 	}
586 
587 	/* Allocate and setup fast path rx queue */
588 	rc = -ENOMEM;
589 	rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
590 	rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
591 	if (!rxq_sp) {
592 		plt_err("Failed to alloc rx queue for rq=%d", qid);
593 		goto rq_fini;
594 	}
595 
596 	/* Setup slow path fields */
597 	rxq_sp->dev = dev;
598 	rxq_sp->qid = qid;
599 	rxq_sp->qconf.conf.rx = *rx_conf;
600 	/* Queue config should reflect global offloads */
601 	rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
602 	rxq_sp->qconf.nb_desc = nb_desc;
603 	rxq_sp->qconf.mp = mp;
604 
605 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
606 		/* Pass a tagmask used to handle error packets in inline device.
607 		 * Ethdev rq's tag_mask field will be overwritten later
608 		 * when sso is setup.
609 		 */
610 		rq->tag_mask =
611 			0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
612 
613 		/* Setup rq reference for inline dev if present */
614 		rc = roc_nix_inl_dev_rq_get(rq);
615 		if (rc)
616 			goto free_mem;
617 	}
618 
619 	plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
620 		    cq->nb_desc);
621 
622 	/* Store start of fast path area */
623 	eth_dev->data->rx_queues[qid] = rxq_sp + 1;
624 	eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
625 
626 	/* Calculating delta and freq mult between PTP HI clock and tsc.
627 	 * These are needed in deriving raw clock value from tsc counter.
628 	 * read_clock eth op returns raw clock value.
629 	 */
630 	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
631 		rc = cnxk_nix_tsc_convert(dev);
632 		if (rc) {
633 			plt_err("Failed to calculate delta and freq mult");
634 			goto rq_fini;
635 		}
636 	}
637 
638 	return 0;
639 free_mem:
640 	plt_free(rxq_sp);
641 rq_fini:
642 	rc |= roc_nix_rq_fini(rq);
643 cq_fini:
644 	rc |= roc_nix_cq_fini(cq);
645 fail:
646 	return rc;
647 }
648 
649 static void
650 cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
651 {
652 	void *rxq = eth_dev->data->rx_queues[qid];
653 	struct cnxk_eth_rxq_sp *rxq_sp;
654 	struct cnxk_eth_dev *dev;
655 	struct roc_nix_rq *rq;
656 	struct roc_nix_cq *cq;
657 	int rc;
658 
659 	if (!rxq)
660 		return;
661 
662 	rxq_sp = cnxk_eth_rxq_to_sp(rxq);
663 	dev = rxq_sp->dev;
664 	rq = &dev->rqs[qid];
665 
666 	plt_nix_dbg("Releasing rxq %u", qid);
667 
668 	/* Release rq reference for inline dev if present */
669 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
670 		roc_nix_inl_dev_rq_put(rq);
671 
672 	/* Cleanup ROC RQ */
673 	rc = roc_nix_rq_fini(rq);
674 	if (rc)
675 		plt_err("Failed to cleanup rq, rc=%d", rc);
676 
677 	/* Cleanup ROC CQ */
678 	cq = &dev->cqs[qid];
679 	rc = roc_nix_cq_fini(cq);
680 	if (rc)
681 		plt_err("Failed to cleanup cq, rc=%d", rc);
682 
683 	/* Finally free fast path area */
684 	plt_free(rxq_sp);
685 }
686 
687 uint32_t
688 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
689 		       uint8_t rss_level)
690 {
691 	uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
692 		{FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
693 		 FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
694 		{FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
695 		 FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
696 		 FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
697 		{FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
698 		 FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
699 		 FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
700 		 FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
701 		 FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
702 		 FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
703 	};
704 	uint32_t flowkey_cfg = 0;
705 
706 	dev->ethdev_rss_hf = ethdev_rss;
707 
708 	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
709 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
710 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
711 	}
712 
713 	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
714 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
715 
716 	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
717 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
718 
719 	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
720 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
721 
722 	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
723 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
724 
725 	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
726 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
727 
728 	if (ethdev_rss & RSS_IPV4_ENABLE)
729 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
730 
731 	if (ethdev_rss & RSS_IPV6_ENABLE)
732 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
733 
734 	if (ethdev_rss & RTE_ETH_RSS_TCP)
735 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
736 
737 	if (ethdev_rss & RTE_ETH_RSS_UDP)
738 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
739 
740 	if (ethdev_rss & RTE_ETH_RSS_SCTP)
741 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
742 
743 	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
744 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
745 
746 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
747 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
748 
749 	if (ethdev_rss & RTE_ETH_RSS_PORT)
750 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
751 
752 	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
753 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
754 
755 	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
756 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
757 
758 	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
759 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
760 
761 	if (ethdev_rss & RTE_ETH_RSS_GTPU)
762 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
763 
764 	return flowkey_cfg;
765 }
766 
767 static void
768 nix_free_queue_mem(struct cnxk_eth_dev *dev)
769 {
770 	plt_free(dev->rqs);
771 	plt_free(dev->cqs);
772 	plt_free(dev->sqs);
773 	dev->rqs = NULL;
774 	dev->cqs = NULL;
775 	dev->sqs = NULL;
776 }
777 
778 static int
779 nix_ingress_policer_setup(struct cnxk_eth_dev *dev)
780 {
781 	struct rte_eth_dev *eth_dev = dev->eth_dev;
782 	int rc = 0;
783 
784 	TAILQ_INIT(&dev->mtr_profiles);
785 	TAILQ_INIT(&dev->mtr_policy);
786 	TAILQ_INIT(&dev->mtr);
787 
788 	if (eth_dev->dev_ops->mtr_ops_get == NULL)
789 		return rc;
790 
791 	return nix_mtr_capabilities_init(eth_dev);
792 }
793 
794 static int
795 nix_rss_default_setup(struct cnxk_eth_dev *dev)
796 {
797 	struct rte_eth_dev *eth_dev = dev->eth_dev;
798 	uint8_t rss_hash_level;
799 	uint32_t flowkey_cfg;
800 	uint64_t rss_hf;
801 
802 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
803 	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
804 	if (rss_hash_level)
805 		rss_hash_level -= 1;
806 
807 	flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
808 	return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
809 }
810 
811 static int
812 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
813 {
814 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
815 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
816 	struct cnxk_eth_qconf *tx_qconf = NULL;
817 	struct cnxk_eth_qconf *rx_qconf = NULL;
818 	struct cnxk_eth_rxq_sp *rxq_sp;
819 	struct cnxk_eth_txq_sp *txq_sp;
820 	int i, nb_rxq, nb_txq;
821 	void **txq, **rxq;
822 
823 	nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
824 	nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
825 
826 	tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
827 	if (tx_qconf == NULL) {
828 		plt_err("Failed to allocate memory for tx_qconf");
829 		goto fail;
830 	}
831 
832 	rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
833 	if (rx_qconf == NULL) {
834 		plt_err("Failed to allocate memory for rx_qconf");
835 		goto fail;
836 	}
837 
838 	txq = eth_dev->data->tx_queues;
839 	for (i = 0; i < nb_txq; i++) {
840 		if (txq[i] == NULL) {
841 			tx_qconf[i].valid = false;
842 			plt_info("txq[%d] is already released", i);
843 			continue;
844 		}
845 		txq_sp = cnxk_eth_txq_to_sp(txq[i]);
846 		memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
847 		tx_qconf[i].valid = true;
848 		dev_ops->tx_queue_release(eth_dev, i);
849 		eth_dev->data->tx_queues[i] = NULL;
850 	}
851 
852 	rxq = eth_dev->data->rx_queues;
853 	for (i = 0; i < nb_rxq; i++) {
854 		if (rxq[i] == NULL) {
855 			rx_qconf[i].valid = false;
856 			plt_info("rxq[%d] is already released", i);
857 			continue;
858 		}
859 		rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
860 		memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
861 		rx_qconf[i].valid = true;
862 		dev_ops->rx_queue_release(eth_dev, i);
863 		eth_dev->data->rx_queues[i] = NULL;
864 	}
865 
866 	dev->tx_qconf = tx_qconf;
867 	dev->rx_qconf = rx_qconf;
868 	return 0;
869 
870 fail:
871 	free(tx_qconf);
872 	free(rx_qconf);
873 	return -ENOMEM;
874 }
875 
876 static int
877 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
878 {
879 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
880 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
881 	struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
882 	struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
883 	int rc, i, nb_rxq, nb_txq;
884 
885 	nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
886 	nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
887 
888 	rc = -ENOMEM;
889 	/* Setup tx & rx queues with previous configuration so
890 	 * that the queues can be functional in cases like ports
891 	 * are started without re configuring queues.
892 	 *
893 	 * Usual re config sequence is like below:
894 	 * port_configure() {
895 	 *      if(reconfigure) {
896 	 *              queue_release()
897 	 *              queue_setup()
898 	 *      }
899 	 *      queue_configure() {
900 	 *              queue_release()
901 	 *              queue_setup()
902 	 *      }
903 	 * }
904 	 * port_start()
905 	 *
906 	 * In some application's control path, queue_configure() would
907 	 * NOT be invoked for TXQs/RXQs in port_configure().
908 	 * In such cases, queues can be functional after start as the
909 	 * queues are already setup in port_configure().
910 	 */
911 	for (i = 0; i < nb_txq; i++) {
912 		if (!tx_qconf[i].valid)
913 			continue;
914 		rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
915 					     &tx_qconf[i].conf.tx);
916 		if (rc) {
917 			plt_err("Failed to setup tx queue rc=%d", rc);
918 			for (i -= 1; i >= 0; i--)
919 				dev_ops->tx_queue_release(eth_dev, i);
920 			goto fail;
921 		}
922 	}
923 
924 	free(tx_qconf);
925 	tx_qconf = NULL;
926 
927 	for (i = 0; i < nb_rxq; i++) {
928 		if (!rx_qconf[i].valid)
929 			continue;
930 		rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
931 					     &rx_qconf[i].conf.rx,
932 					     rx_qconf[i].mp);
933 		if (rc) {
934 			plt_err("Failed to setup rx queue rc=%d", rc);
935 			for (i -= 1; i >= 0; i--)
936 				dev_ops->rx_queue_release(eth_dev, i);
937 			goto tx_queue_release;
938 		}
939 	}
940 
941 	free(rx_qconf);
942 	rx_qconf = NULL;
943 
944 	return 0;
945 
946 tx_queue_release:
947 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
948 		dev_ops->tx_queue_release(eth_dev, i);
949 fail:
950 	free(tx_qconf);
951 	free(rx_qconf);
952 
953 	return rc;
954 }
955 
956 static void
957 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
958 {
959 	/* These dummy functions are required for supporting
960 	 * some applications which reconfigure queues without
961 	 * stopping tx burst and rx burst threads(eg kni app)
962 	 * When the queues context is saved, txq/rxqs are released
963 	 * which caused app crash since rx/tx burst is still
964 	 * on different lcores
965 	 */
966 	eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
967 	eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
968 	rte_mb();
969 }
970 
971 static int
972 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
973 {
974 	uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
975 	uint8_t tun[ROC_NIX_LSO_TUN_MAX];
976 	struct roc_nix *nix = &dev->nix;
977 	int rc;
978 
979 	rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
980 	if (rc)
981 		return rc;
982 
983 	dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
984 			    (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
985 			    (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
986 			    (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
987 
988 	dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
989 			     (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
990 			     (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
991 			     (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
992 	return 0;
993 }
994 
995 static int
996 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
997 {
998 	struct roc_nix *nix = &dev->nix;
999 	int rc;
1000 
1001 	/* Nothing much to do if offload is not enabled */
1002 	if (!(dev->tx_offloads &
1003 	      (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1004 	       RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
1005 		return 0;
1006 
1007 	/* Setup LSO formats in AF. Its a no-op if other ethdev has
1008 	 * already set it up
1009 	 */
1010 	rc = roc_nix_lso_fmt_setup(nix);
1011 	if (rc)
1012 		return rc;
1013 
1014 	return nix_lso_tun_fmt_update(dev);
1015 }
1016 
1017 int
1018 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
1019 {
1020 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1021 	struct rte_eth_dev_data *data = eth_dev->data;
1022 	struct rte_eth_conf *conf = &data->dev_conf;
1023 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
1024 	struct rte_eth_txmode *txmode = &conf->txmode;
1025 	char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1026 	struct roc_nix_fc_cfg fc_cfg = {0};
1027 	struct roc_nix *nix = &dev->nix;
1028 	struct rte_ether_addr *ea;
1029 	uint8_t nb_rxq, nb_txq;
1030 	uint64_t rx_cfg;
1031 	void *qs;
1032 	int rc;
1033 
1034 	rc = -EINVAL;
1035 
1036 	/* Sanity checks */
1037 	if (rte_eal_has_hugepages() == 0) {
1038 		plt_err("Huge page is not configured");
1039 		goto fail_configure;
1040 	}
1041 
1042 	if (conf->dcb_capability_en == 1) {
1043 		plt_err("dcb enable is not supported");
1044 		goto fail_configure;
1045 	}
1046 
1047 	if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1048 		plt_err("Flow director is not supported");
1049 		goto fail_configure;
1050 	}
1051 
1052 	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
1053 	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
1054 		plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1055 		goto fail_configure;
1056 	}
1057 
1058 	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
1059 		plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
1060 		goto fail_configure;
1061 	}
1062 
1063 	/* Free the resources allocated from the previous configure */
1064 	if (dev->configured == 1) {
1065 		/* Unregister queue irq's */
1066 		roc_nix_unregister_queue_irqs(nix);
1067 
1068 		/* Unregister CQ irqs if present */
1069 		if (eth_dev->data->dev_conf.intr_conf.rxq)
1070 			roc_nix_unregister_cq_irqs(nix);
1071 
1072 		/* Set no-op functions */
1073 		nix_set_nop_rxtx_function(eth_dev);
1074 		/* Store queue config for later */
1075 		rc = nix_store_queue_cfg_and_then_release(eth_dev);
1076 		if (rc)
1077 			goto fail_configure;
1078 
1079 		/* Disable and free rte_meter entries */
1080 		rc = nix_meter_fini(dev);
1081 		if (rc)
1082 			goto fail_configure;
1083 
1084 		/* Cleanup security support */
1085 		rc = nix_security_release(dev);
1086 		if (rc)
1087 			goto fail_configure;
1088 
1089 		roc_nix_tm_fini(nix);
1090 		roc_nix_lf_free(nix);
1091 	}
1092 
1093 	dev->rx_offloads = rxmode->offloads;
1094 	dev->tx_offloads = txmode->offloads;
1095 
1096 	/* Prepare rx cfg */
1097 	rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
1098 	if (dev->rx_offloads &
1099 	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
1100 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
1101 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
1102 	}
1103 	rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
1104 		   ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
1105 		   ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
1106 
1107 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
1108 		rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
1109 		/* Disable drop re if rx offload security is enabled and
1110 		 * platform does not support it.
1111 		 */
1112 		if (dev->ipsecd_drop_re_dis)
1113 			rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE);
1114 	}
1115 
1116 	nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1117 	nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1118 
1119 	/* Alloc a nix lf */
1120 	rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
1121 	if (rc) {
1122 		plt_err("Failed to init nix_lf rc=%d", rc);
1123 		goto fail_configure;
1124 	}
1125 
1126 	/* Check if ptp is enable in PF owning this VF*/
1127 	if (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix)))
1128 		dev->ptp_en = roc_nix_ptp_is_enable(nix);
1129 
1130 	dev->npc.channel = roc_nix_get_base_chan(nix);
1131 
1132 	nb_rxq = data->nb_rx_queues;
1133 	nb_txq = data->nb_tx_queues;
1134 	rc = -ENOMEM;
1135 	if (nb_rxq) {
1136 		/* Allocate memory for roc rq's and cq's */
1137 		qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
1138 		if (!qs) {
1139 			plt_err("Failed to alloc rqs");
1140 			goto free_nix_lf;
1141 		}
1142 		dev->rqs = qs;
1143 
1144 		qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
1145 		if (!qs) {
1146 			plt_err("Failed to alloc cqs");
1147 			goto free_nix_lf;
1148 		}
1149 		dev->cqs = qs;
1150 	}
1151 
1152 	if (nb_txq) {
1153 		/* Allocate memory for roc sq's */
1154 		qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
1155 		if (!qs) {
1156 			plt_err("Failed to alloc sqs");
1157 			goto free_nix_lf;
1158 		}
1159 		dev->sqs = qs;
1160 	}
1161 
1162 	/* Re-enable NIX LF error interrupts */
1163 	roc_nix_err_intr_ena_dis(nix, true);
1164 	roc_nix_ras_intr_ena_dis(nix, true);
1165 
1166 	if (nix->rx_ptp_ena &&
1167 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
1168 		plt_err("Both PTP and switch header enabled");
1169 		goto free_nix_lf;
1170 	}
1171 
1172 	rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type,
1173 				    dev->npc.pre_l2_size_offset,
1174 				    dev->npc.pre_l2_size_offset_mask,
1175 				    dev->npc.pre_l2_size_shift_dir);
1176 	if (rc) {
1177 		plt_err("Failed to enable switch type nix_lf rc=%d", rc);
1178 		goto free_nix_lf;
1179 	}
1180 
1181 	/* Setup LSO if needed */
1182 	rc = nix_lso_fmt_setup(dev);
1183 	if (rc) {
1184 		plt_err("Failed to setup nix lso format fields, rc=%d", rc);
1185 		goto free_nix_lf;
1186 	}
1187 
1188 	/* Configure RSS */
1189 	rc = nix_rss_default_setup(dev);
1190 	if (rc) {
1191 		plt_err("Failed to configure rss rc=%d", rc);
1192 		goto free_nix_lf;
1193 	}
1194 
1195 	/* Init the default TM scheduler hierarchy */
1196 	rc = roc_nix_tm_init(nix);
1197 	if (rc) {
1198 		plt_err("Failed to init traffic manager, rc=%d", rc);
1199 		goto free_nix_lf;
1200 	}
1201 
1202 	rc = nix_ingress_policer_setup(dev);
1203 	if (rc) {
1204 		plt_err("Failed to setup ingress policer rc=%d", rc);
1205 		goto free_nix_lf;
1206 	}
1207 
1208 	rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
1209 	if (rc) {
1210 		plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
1211 		goto tm_fini;
1212 	}
1213 
1214 	/* Register queue IRQs */
1215 	rc = roc_nix_register_queue_irqs(nix);
1216 	if (rc) {
1217 		plt_err("Failed to register queue interrupts rc=%d", rc);
1218 		goto tm_fini;
1219 	}
1220 
1221 	/* Register cq IRQs */
1222 	if (eth_dev->data->dev_conf.intr_conf.rxq) {
1223 		if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
1224 			plt_err("Rx interrupt cannot be enabled, rxq > %d",
1225 				dev->nix.cints);
1226 			goto q_irq_fini;
1227 		}
1228 		/* Rx interrupt feature cannot work with vector mode because,
1229 		 * vector mode does not process packets unless min 4 pkts are
1230 		 * received, while cq interrupts are generated even for 1 pkt
1231 		 * in the CQ.
1232 		 */
1233 		dev->scalar_ena = true;
1234 
1235 		rc = roc_nix_register_cq_irqs(nix);
1236 		if (rc) {
1237 			plt_err("Failed to register CQ interrupts rc=%d", rc);
1238 			goto q_irq_fini;
1239 		}
1240 	}
1241 
1242 	/* Configure loop back mode */
1243 	rc = roc_nix_mac_loopback_enable(nix,
1244 					 eth_dev->data->dev_conf.lpbk_mode);
1245 	if (rc) {
1246 		plt_err("Failed to configure cgx loop back mode rc=%d", rc);
1247 		goto cq_fini;
1248 	}
1249 
1250 	/* Setup Inline security support */
1251 	rc = nix_security_setup(dev);
1252 	if (rc)
1253 		goto cq_fini;
1254 
1255 	/* Init flow control configuration */
1256 	fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG;
1257 	fc_cfg.rxchan_cfg.enable = true;
1258 	rc = roc_nix_fc_config_set(nix, &fc_cfg);
1259 	if (rc) {
1260 		plt_err("Failed to initialize flow control rc=%d", rc);
1261 		goto cq_fini;
1262 	}
1263 
1264 	/* Update flow control configuration to PMD */
1265 	rc = nix_init_flow_ctrl_config(eth_dev);
1266 	if (rc) {
1267 		plt_err("Failed to initialize flow control rc=%d", rc);
1268 		goto cq_fini;
1269 	}
1270 
1271 	/* Initialize TC to SQ mapping as invalid */
1272 	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
1273 	/*
1274 	 * Restore queue config when reconfigure followed by
1275 	 * reconfigure and no queue configure invoked from application case.
1276 	 */
1277 	if (dev->configured == 1) {
1278 		rc = nix_restore_queue_cfg(eth_dev);
1279 		if (rc)
1280 			goto sec_release;
1281 	}
1282 
1283 	/* Update the mac address */
1284 	ea = eth_dev->data->mac_addrs;
1285 	memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1286 	if (rte_is_zero_ether_addr(ea))
1287 		rte_eth_random_addr((uint8_t *)ea);
1288 
1289 	rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1290 
1291 	plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1292 		    " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
1293 		    eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
1294 		    dev->rx_offloads, dev->tx_offloads);
1295 
1296 	/* All good */
1297 	dev->configured = 1;
1298 	dev->nb_rxq = data->nb_rx_queues;
1299 	dev->nb_txq = data->nb_tx_queues;
1300 	return 0;
1301 
1302 sec_release:
1303 	rc |= nix_security_release(dev);
1304 cq_fini:
1305 	roc_nix_unregister_cq_irqs(nix);
1306 q_irq_fini:
1307 	roc_nix_unregister_queue_irqs(nix);
1308 tm_fini:
1309 	roc_nix_tm_fini(nix);
1310 free_nix_lf:
1311 	nix_free_queue_mem(dev);
1312 	rc |= roc_nix_lf_free(nix);
1313 fail_configure:
1314 	dev->configured = 0;
1315 	return rc;
1316 }
1317 
1318 int
1319 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1320 {
1321 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1322 	struct rte_eth_dev_data *data = eth_dev->data;
1323 	struct roc_nix_sq *sq = &dev->sqs[qid];
1324 	int rc = -EINVAL;
1325 
1326 	if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1327 		return 0;
1328 
1329 	rc = roc_nix_tm_sq_aura_fc(sq, true);
1330 	if (rc) {
1331 		plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
1332 		goto done;
1333 	}
1334 
1335 	data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1336 done:
1337 	return rc;
1338 }
1339 
1340 int
1341 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1342 {
1343 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1344 	struct rte_eth_dev_data *data = eth_dev->data;
1345 	struct roc_nix_sq *sq = &dev->sqs[qid];
1346 	int rc;
1347 
1348 	if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1349 		return 0;
1350 
1351 	rc = roc_nix_tm_sq_aura_fc(sq, false);
1352 	if (rc) {
1353 		plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1354 			rc);
1355 		goto done;
1356 	}
1357 
1358 	data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1359 done:
1360 	return rc;
1361 }
1362 
1363 static int
1364 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1365 {
1366 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1367 	struct rte_eth_dev_data *data = eth_dev->data;
1368 	struct roc_nix_rq *rq = &dev->rqs[qid];
1369 	int rc;
1370 
1371 	if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1372 		return 0;
1373 
1374 	rc = roc_nix_rq_ena_dis(rq, true);
1375 	if (rc) {
1376 		plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1377 		goto done;
1378 	}
1379 
1380 	data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1381 done:
1382 	return rc;
1383 }
1384 
1385 static int
1386 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1387 {
1388 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1389 	struct rte_eth_dev_data *data = eth_dev->data;
1390 	struct roc_nix_rq *rq = &dev->rqs[qid];
1391 	int rc;
1392 
1393 	if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1394 		return 0;
1395 
1396 	rc = roc_nix_rq_ena_dis(rq, false);
1397 	if (rc) {
1398 		plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1399 		goto done;
1400 	}
1401 
1402 	data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1403 done:
1404 	return rc;
1405 }
1406 
1407 static int
1408 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1409 {
1410 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1411 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1412 	struct rte_mbuf *rx_pkts[32];
1413 	struct rte_eth_link link;
1414 	int count, i, j, rc;
1415 	void *rxq;
1416 
1417 	/* Disable all the NPC entries */
1418 	rc = roc_npc_mcam_enable_all_entries(&dev->npc, 0);
1419 	if (rc)
1420 		return rc;
1421 
1422 	/* Stop link change events */
1423 	if (!roc_nix_is_vf_or_sdp(&dev->nix))
1424 		roc_nix_mac_link_event_start_stop(&dev->nix, false);
1425 
1426 	/* Disable Rx via NPC */
1427 	roc_nix_npc_rx_ena_dis(&dev->nix, false);
1428 
1429 	roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, false);
1430 
1431 	/* Stop rx queues and free up pkts pending */
1432 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1433 		rc = dev_ops->rx_queue_stop(eth_dev, i);
1434 		if (rc)
1435 			continue;
1436 
1437 		rxq = eth_dev->data->rx_queues[i];
1438 		count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1439 		while (count) {
1440 			for (j = 0; j < count; j++)
1441 				rte_pktmbuf_free(rx_pkts[j]);
1442 			count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1443 		}
1444 	}
1445 
1446 	/* Stop tx queues  */
1447 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1448 		dev_ops->tx_queue_stop(eth_dev, i);
1449 
1450 	/* Bring down link status internally */
1451 	memset(&link, 0, sizeof(link));
1452 	rte_eth_linkstatus_set(eth_dev, &link);
1453 
1454 	return 0;
1455 }
1456 
1457 int
1458 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1459 {
1460 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1461 	int rc, i;
1462 
1463 	if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
1464 		rc = nix_recalc_mtu(eth_dev);
1465 		if (rc)
1466 			return rc;
1467 	}
1468 
1469 	/* Start rx queues */
1470 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1471 		rc = cnxk_nix_rx_queue_start(eth_dev, i);
1472 		if (rc)
1473 			return rc;
1474 	}
1475 
1476 	/* Start tx queues  */
1477 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1478 		rc = cnxk_nix_tx_queue_start(eth_dev, i);
1479 		if (rc)
1480 			return rc;
1481 	}
1482 
1483 	/* Update Flow control configuration */
1484 	rc = nix_update_flow_ctrl_config(eth_dev);
1485 	if (rc) {
1486 		plt_err("Failed to enable flow control. error code(%d)", rc);
1487 		return rc;
1488 	}
1489 
1490 	/* Enable Rx in NPC */
1491 	rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1492 	if (rc) {
1493 		plt_err("Failed to enable NPC rx %d", rc);
1494 		return rc;
1495 	}
1496 
1497 	rc = roc_npc_mcam_enable_all_entries(&dev->npc, 1);
1498 	if (rc) {
1499 		plt_err("Failed to enable NPC entries %d", rc);
1500 		return rc;
1501 	}
1502 
1503 	cnxk_nix_toggle_flag_link_cfg(dev, true);
1504 
1505 	/* Start link change events */
1506 	if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1507 		rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1508 		if (rc) {
1509 			plt_err("Failed to start cgx link event %d", rc);
1510 			goto rx_disable;
1511 		}
1512 	}
1513 
1514 	/* Enable PTP if it is requested by the user or already
1515 	 * enabled on PF owning this VF
1516 	 */
1517 	memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
1518 	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
1519 		cnxk_eth_dev_ops.timesync_enable(eth_dev);
1520 	else
1521 		cnxk_eth_dev_ops.timesync_disable(eth_dev);
1522 
1523 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1524 		rc = rte_mbuf_dyn_rx_timestamp_register
1525 			(&dev->tstamp.tstamp_dynfield_offset,
1526 			 &dev->tstamp.rx_tstamp_dynflag);
1527 		if (rc != 0) {
1528 			plt_err("Failed to register Rx timestamp field/flag");
1529 			goto rx_disable;
1530 		}
1531 	}
1532 
1533 	cnxk_nix_toggle_flag_link_cfg(dev, false);
1534 
1535 	roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, true);
1536 
1537 	return 0;
1538 
1539 rx_disable:
1540 	roc_nix_npc_rx_ena_dis(&dev->nix, false);
1541 	cnxk_nix_toggle_flag_link_cfg(dev, false);
1542 	return rc;
1543 }
1544 
1545 static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
1546 static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
1547 
1548 /* CNXK platform independent eth dev ops */
1549 struct eth_dev_ops cnxk_eth_dev_ops = {
1550 	.mtu_set = cnxk_nix_mtu_set,
1551 	.mac_addr_add = cnxk_nix_mac_addr_add,
1552 	.mac_addr_remove = cnxk_nix_mac_addr_del,
1553 	.mac_addr_set = cnxk_nix_mac_addr_set,
1554 	.dev_infos_get = cnxk_nix_info_get,
1555 	.link_update = cnxk_nix_link_update,
1556 	.tx_queue_release = cnxk_nix_tx_queue_release,
1557 	.rx_queue_release = cnxk_nix_rx_queue_release,
1558 	.dev_stop = cnxk_nix_dev_stop,
1559 	.dev_close = cnxk_nix_dev_close,
1560 	.dev_reset = cnxk_nix_dev_reset,
1561 	.tx_queue_start = cnxk_nix_tx_queue_start,
1562 	.rx_queue_start = cnxk_nix_rx_queue_start,
1563 	.rx_queue_stop = cnxk_nix_rx_queue_stop,
1564 	.dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1565 	.promiscuous_enable = cnxk_nix_promisc_enable,
1566 	.promiscuous_disable = cnxk_nix_promisc_disable,
1567 	.allmulticast_enable = cnxk_nix_allmulticast_enable,
1568 	.allmulticast_disable = cnxk_nix_allmulticast_disable,
1569 	.rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1570 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1571 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1572 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1573 	.priority_flow_ctrl_queue_config =
1574 				cnxk_nix_priority_flow_ctrl_queue_config,
1575 	.priority_flow_ctrl_queue_info_get =
1576 				cnxk_nix_priority_flow_ctrl_queue_info_get,
1577 	.dev_set_link_up = cnxk_nix_set_link_up,
1578 	.dev_set_link_down = cnxk_nix_set_link_down,
1579 	.get_module_info = cnxk_nix_get_module_info,
1580 	.get_module_eeprom = cnxk_nix_get_module_eeprom,
1581 	.rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1582 	.rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1583 	.pool_ops_supported = cnxk_nix_pool_ops_supported,
1584 	.queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1585 	.stats_get = cnxk_nix_stats_get,
1586 	.stats_reset = cnxk_nix_stats_reset,
1587 	.xstats_get = cnxk_nix_xstats_get,
1588 	.xstats_get_names = cnxk_nix_xstats_get_names,
1589 	.xstats_reset = cnxk_nix_xstats_reset,
1590 	.xstats_get_by_id = cnxk_nix_xstats_get_by_id,
1591 	.xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
1592 	.fw_version_get = cnxk_nix_fw_version_get,
1593 	.rxq_info_get = cnxk_nix_rxq_info_get,
1594 	.txq_info_get = cnxk_nix_txq_info_get,
1595 	.tx_done_cleanup = cnxk_nix_tx_done_cleanup,
1596 	.flow_ops_get = cnxk_nix_flow_ops_get,
1597 	.get_reg = cnxk_nix_dev_get_reg,
1598 	.timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
1599 	.timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
1600 	.timesync_read_time = cnxk_nix_timesync_read_time,
1601 	.timesync_write_time = cnxk_nix_timesync_write_time,
1602 	.timesync_adjust_time = cnxk_nix_timesync_adjust_time,
1603 	.read_clock = cnxk_nix_read_clock,
1604 	.reta_update = cnxk_nix_reta_update,
1605 	.reta_query = cnxk_nix_reta_query,
1606 	.rss_hash_update = cnxk_nix_rss_hash_update,
1607 	.rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
1608 	.set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
1609 	.set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
1610 	.tm_ops_get = cnxk_nix_tm_ops_get,
1611 	.mtr_ops_get = cnxk_nix_mtr_ops_get,
1612 };
1613 
1614 static int
1615 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1616 {
1617 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1618 	struct rte_security_ctx *sec_ctx;
1619 	struct roc_nix *nix = &dev->nix;
1620 	struct rte_pci_device *pci_dev;
1621 	int rc, max_entries;
1622 
1623 	eth_dev->dev_ops = &cnxk_eth_dev_ops;
1624 	eth_dev->rx_queue_count = cnxk_nix_rx_queue_count;
1625 	eth_dev->rx_descriptor_status = cnxk_nix_rx_descriptor_status;
1626 	eth_dev->tx_descriptor_status = cnxk_nix_tx_descriptor_status;
1627 
1628 	/* Alloc security context */
1629 	sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
1630 	if (!sec_ctx)
1631 		return -ENOMEM;
1632 	sec_ctx->device = eth_dev;
1633 	sec_ctx->ops = &cnxk_eth_sec_ops;
1634 	sec_ctx->flags =
1635 		(RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
1636 	eth_dev->security_ctx = sec_ctx;
1637 
1638 	/* For secondary processes, the primary has done all the work */
1639 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1640 		return 0;
1641 
1642 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1643 	rte_eth_copy_pci_info(eth_dev, pci_dev);
1644 
1645 	/* Parse devargs string */
1646 	rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1647 	if (rc) {
1648 		plt_err("Failed to parse devargs rc=%d", rc);
1649 		goto error;
1650 	}
1651 
1652 	/* Initialize base roc nix */
1653 	nix->pci_dev = pci_dev;
1654 	nix->hw_vlan_ins = true;
1655 	rc = roc_nix_dev_init(nix);
1656 	if (rc) {
1657 		plt_err("Failed to initialize roc nix rc=%d", rc);
1658 		goto error;
1659 	}
1660 
1661 	/* Register up msg callbacks */
1662 	roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1663 
1664 	/* Register up msg callbacks */
1665 	roc_nix_mac_link_info_get_cb_register(nix,
1666 					      cnxk_eth_dev_link_status_get_cb);
1667 
1668 	dev->eth_dev = eth_dev;
1669 	dev->configured = 0;
1670 	dev->ptype_disable = 0;
1671 
1672 	TAILQ_INIT(&dev->inb.list);
1673 	TAILQ_INIT(&dev->outb.list);
1674 	rte_spinlock_init(&dev->inb.lock);
1675 	rte_spinlock_init(&dev->outb.lock);
1676 
1677 	/* For vfs, returned max_entries will be 0. but to keep default mac
1678 	 * address, one entry must be allocated. so setting up to 1.
1679 	 */
1680 	if (roc_nix_is_vf_or_sdp(nix))
1681 		max_entries = 1;
1682 	else
1683 		max_entries = roc_nix_mac_max_entries_get(nix);
1684 
1685 	if (max_entries <= 0) {
1686 		plt_err("Failed to get max entries for mac addr");
1687 		rc = -ENOTSUP;
1688 		goto dev_fini;
1689 	}
1690 
1691 	eth_dev->data->mac_addrs =
1692 		rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1693 	if (eth_dev->data->mac_addrs == NULL) {
1694 		plt_err("Failed to allocate memory for mac addr");
1695 		rc = -ENOMEM;
1696 		goto dev_fini;
1697 	}
1698 
1699 	dev->max_mac_entries = max_entries;
1700 	dev->dmac_filter_count = 1;
1701 
1702 	/* Get mac address */
1703 	rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1704 	if (rc) {
1705 		plt_err("Failed to get mac addr, rc=%d", rc);
1706 		goto free_mac_addrs;
1707 	}
1708 
1709 	/* Update the mac address */
1710 	memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1711 
1712 	if (!roc_nix_is_vf_or_sdp(nix)) {
1713 		/* Sync same MAC address to CGX/RPM table */
1714 		rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1715 		if (rc) {
1716 			plt_err("Failed to set mac addr, rc=%d", rc);
1717 			goto free_mac_addrs;
1718 		}
1719 	}
1720 
1721 	/* Union of all capabilities supported by CNXK.
1722 	 * Platform specific capabilities will be
1723 	 * updated later.
1724 	 */
1725 	dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1726 	dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1727 	dev->speed_capa = nix_get_speed_capa(dev);
1728 
1729 	/* Initialize roc npc */
1730 	dev->npc.roc_nix = nix;
1731 	rc = roc_npc_init(&dev->npc);
1732 	if (rc)
1733 		goto free_mac_addrs;
1734 
1735 	plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1736 		    " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1737 		    eth_dev->data->port_id, roc_nix_get_pf(nix),
1738 		    roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1739 		    dev->rx_offload_capa, dev->tx_offload_capa);
1740 	return 0;
1741 
1742 free_mac_addrs:
1743 	rte_free(eth_dev->data->mac_addrs);
1744 dev_fini:
1745 	roc_nix_dev_fini(nix);
1746 error:
1747 	plt_err("Failed to init nix eth_dev rc=%d", rc);
1748 	return rc;
1749 }
1750 
1751 static int
1752 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
1753 {
1754 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1755 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1756 	struct rte_eth_pfc_queue_conf pfc_conf;
1757 	struct roc_nix *nix = &dev->nix;
1758 	struct rte_eth_fc_conf fc_conf;
1759 	int rc, i;
1760 
1761 	/* Disable switch hdr pkind */
1762 	roc_nix_switch_hdr_set(&dev->nix, 0, 0, 0, 0);
1763 
1764 	plt_free(eth_dev->security_ctx);
1765 	eth_dev->security_ctx = NULL;
1766 
1767 	/* Nothing to be done for secondary processes */
1768 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1769 		return 0;
1770 
1771 	/* Clear the flag since we are closing down */
1772 	dev->configured = 0;
1773 
1774 	roc_nix_npc_rx_ena_dis(nix, false);
1775 
1776 	/* Restore 802.3 Flow control configuration */
1777 	memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_queue_conf));
1778 	memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1779 	fc_conf.mode = RTE_ETH_FC_NONE;
1780 	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
1781 
1782 	pfc_conf.mode = RTE_ETH_FC_NONE;
1783 	for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
1784 		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
1785 			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
1786 			pfc_conf.rx_pause.tc = i;
1787 			pfc_conf.tx_pause.rx_qid = i;
1788 			pfc_conf.tx_pause.tc = i;
1789 			rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
1790 				&pfc_conf);
1791 			if (rc)
1792 				plt_err("Failed to reset PFC. error code(%d)",
1793 					rc);
1794 		}
1795 	}
1796 
1797 	fc_conf.mode = RTE_ETH_FC_FULL;
1798 	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
1799 
1800 	/* Disable and free rte_meter entries */
1801 	nix_meter_fini(dev);
1802 
1803 	/* Disable and free rte_flow entries */
1804 	roc_npc_fini(&dev->npc);
1805 
1806 	/* Disable link status events */
1807 	roc_nix_mac_link_event_start_stop(nix, false);
1808 
1809 	/* Unregister the link update op, this is required to stop VFs from
1810 	 * receiving link status updates on exit path.
1811 	 */
1812 	roc_nix_mac_link_cb_unregister(nix);
1813 
1814 	/* Free up SQs */
1815 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1816 		dev_ops->tx_queue_release(eth_dev, i);
1817 		eth_dev->data->tx_queues[i] = NULL;
1818 	}
1819 	eth_dev->data->nb_tx_queues = 0;
1820 
1821 	/* Free up RQ's and CQ's */
1822 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1823 		dev_ops->rx_queue_release(eth_dev, i);
1824 		eth_dev->data->rx_queues[i] = NULL;
1825 	}
1826 	eth_dev->data->nb_rx_queues = 0;
1827 
1828 	/* Free security resources */
1829 	nix_security_release(dev);
1830 
1831 	/* Free tm resources */
1832 	roc_nix_tm_fini(nix);
1833 
1834 	/* Unregister queue irqs */
1835 	roc_nix_unregister_queue_irqs(nix);
1836 
1837 	/* Unregister cq irqs */
1838 	if (eth_dev->data->dev_conf.intr_conf.rxq)
1839 		roc_nix_unregister_cq_irqs(nix);
1840 
1841 	/* Free ROC RQ's, SQ's and CQ's memory */
1842 	nix_free_queue_mem(dev);
1843 
1844 	/* Free nix lf resources */
1845 	rc = roc_nix_lf_free(nix);
1846 	if (rc)
1847 		plt_err("Failed to free nix lf, rc=%d", rc);
1848 
1849 	rte_free(eth_dev->data->mac_addrs);
1850 	eth_dev->data->mac_addrs = NULL;
1851 
1852 	rc = roc_nix_dev_fini(nix);
1853 	/* Can be freed later by PMD if NPA LF is in use */
1854 	if (rc == -EAGAIN) {
1855 		if (!reset)
1856 			eth_dev->data->dev_private = NULL;
1857 		return 0;
1858 	} else if (rc) {
1859 		plt_err("Failed in nix dev fini, rc=%d", rc);
1860 	}
1861 
1862 	return rc;
1863 }
1864 
1865 static int
1866 cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
1867 {
1868 	cnxk_eth_dev_uninit(eth_dev, false);
1869 	return 0;
1870 }
1871 
1872 static int
1873 cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
1874 {
1875 	int rc;
1876 
1877 	rc = cnxk_eth_dev_uninit(eth_dev, true);
1878 	if (rc)
1879 		return rc;
1880 
1881 	return cnxk_eth_dev_init(eth_dev);
1882 }
1883 
1884 int
1885 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1886 {
1887 	struct rte_eth_dev *eth_dev;
1888 	struct roc_nix *nix;
1889 	int rc = -EINVAL;
1890 
1891 	eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1892 	if (eth_dev) {
1893 		/* Cleanup eth dev */
1894 		rc = cnxk_eth_dev_uninit(eth_dev, false);
1895 		if (rc)
1896 			return rc;
1897 
1898 		rte_eth_dev_release_port(eth_dev);
1899 	}
1900 
1901 	/* Nothing to be done for secondary processes */
1902 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1903 		return 0;
1904 
1905 	/* Check if this device is hosting common resource */
1906 	nix = roc_idev_npa_nix_get();
1907 	if (nix->pci_dev != pci_dev)
1908 		return 0;
1909 
1910 	/* Try nix fini now */
1911 	rc = roc_nix_dev_fini(nix);
1912 	if (rc == -EAGAIN) {
1913 		plt_info("%s: common resource in use by other devices",
1914 			 pci_dev->name);
1915 		goto exit;
1916 	} else if (rc) {
1917 		plt_err("Failed in nix dev fini, rc=%d", rc);
1918 		goto exit;
1919 	}
1920 
1921 	/* Free device pointer as rte_ethdev does not have it anymore */
1922 	rte_free(nix);
1923 exit:
1924 	return rc;
1925 }
1926 
1927 int
1928 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1929 {
1930 	int rc;
1931 
1932 	RTE_SET_USED(pci_drv);
1933 
1934 	rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1935 					   cnxk_eth_dev_init);
1936 
1937 	/* On error on secondary, recheck if port exists in primary or
1938 	 * in mid of detach state.
1939 	 */
1940 	if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1941 		if (!rte_eth_dev_allocated(pci_dev->device.name))
1942 			return 0;
1943 	return rc;
1944 }
1945