xref: /dpdk/drivers/net/cnxk/cnxk_ethdev.c (revision 3c100e0e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <cnxk_ethdev.h>
5 
6 #include <rte_eventdev.h>
7 
8 static inline uint64_t
nix_get_rx_offload_capa(struct cnxk_eth_dev * dev)9 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
10 {
11 	uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
12 
13 	if (roc_nix_is_vf_or_sdp(&dev->nix) ||
14 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
15 		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
16 
17 	return capa;
18 }
19 
20 static inline uint64_t
nix_get_tx_offload_capa(struct cnxk_eth_dev * dev)21 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
22 {
23 	RTE_SET_USED(dev);
24 	return CNXK_NIX_TX_OFFLOAD_CAPA;
25 }
26 
27 static inline uint32_t
nix_get_speed_capa(struct cnxk_eth_dev * dev)28 nix_get_speed_capa(struct cnxk_eth_dev *dev)
29 {
30 	uint32_t speed_capa;
31 
32 	/* Auto negotiation disabled */
33 	speed_capa = RTE_ETH_LINK_SPEED_FIXED;
34 	if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
35 		speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
36 			      RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
37 			      RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
38 	}
39 
40 	return speed_capa;
41 }
42 
43 int
cnxk_nix_inb_mode_set(struct cnxk_eth_dev * dev,bool use_inl_dev)44 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
45 {
46 	struct roc_nix *nix = &dev->nix;
47 
48 	if (dev->inb.inl_dev == use_inl_dev)
49 		return 0;
50 
51 	plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
52 		    dev->inb.nb_sess, !!dev->inb.inl_dev);
53 
54 	/* Change the mode */
55 	dev->inb.inl_dev = use_inl_dev;
56 
57 	/* Update RoC for NPC rule insertion */
58 	roc_nix_inb_mode_set(nix, use_inl_dev);
59 
60 	/* Setup lookup mem */
61 	return cnxk_nix_lookup_mem_sa_base_set(dev);
62 }
63 
64 static int
nix_security_setup(struct cnxk_eth_dev * dev)65 nix_security_setup(struct cnxk_eth_dev *dev)
66 {
67 	struct roc_nix *nix = &dev->nix;
68 	int i, rc = 0;
69 
70 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
71 		/* Setup Inline Inbound */
72 		rc = roc_nix_inl_inb_init(nix);
73 		if (rc) {
74 			plt_err("Failed to initialize nix inline inb, rc=%d",
75 				rc);
76 			return rc;
77 		}
78 
79 		/* By default pick using inline device for poll mode.
80 		 * Will be overridden when event mode rq's are setup.
81 		 */
82 		cnxk_nix_inb_mode_set(dev, true);
83 
84 		/* Allocate memory to be used as dptr for CPT ucode
85 		 * WRITE_SA op.
86 		 */
87 		dev->inb.sa_dptr =
88 			plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
89 		if (!dev->inb.sa_dptr) {
90 			plt_err("Couldn't allocate memory for SA dptr");
91 			rc = -ENOMEM;
92 			goto cleanup;
93 		}
94 	}
95 
96 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
97 	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
98 		struct plt_bitmap *bmap;
99 		size_t bmap_sz;
100 		void *mem;
101 
102 		/* Setup enough descriptors for all tx queues */
103 		nix->outb_nb_desc = dev->outb.nb_desc;
104 		nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
105 
106 		/* Setup Inline Outbound */
107 		rc = roc_nix_inl_outb_init(nix);
108 		if (rc) {
109 			plt_err("Failed to initialize nix inline outb, rc=%d",
110 				rc);
111 			goto sa_dptr_free;
112 		}
113 
114 		dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
115 
116 		/* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
117 		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
118 			return 0;
119 
120 		/* Allocate memory to be used as dptr for CPT ucode
121 		 * WRITE_SA op.
122 		 */
123 		dev->outb.sa_dptr =
124 			plt_zmalloc(ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ, 0);
125 		if (!dev->outb.sa_dptr) {
126 			plt_err("Couldn't allocate memory for SA dptr");
127 			rc = -ENOMEM;
128 			goto sa_dptr_free;
129 		}
130 
131 		rc = -ENOMEM;
132 		/* Allocate a bitmap to alloc and free sa indexes */
133 		bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
134 		mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
135 		if (mem == NULL) {
136 			plt_err("Outbound SA bmap alloc failed");
137 
138 			rc |= roc_nix_inl_outb_fini(nix);
139 			goto sa_dptr_free;
140 		}
141 
142 		rc = -EIO;
143 		bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
144 		if (!bmap) {
145 			plt_err("Outbound SA bmap init failed");
146 
147 			rc |= roc_nix_inl_outb_fini(nix);
148 			plt_free(mem);
149 			goto sa_dptr_free;
150 		}
151 
152 		for (i = 0; i < dev->outb.max_sa; i++)
153 			plt_bitmap_set(bmap, i);
154 
155 		dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
156 		dev->outb.sa_bmap_mem = mem;
157 		dev->outb.sa_bmap = bmap;
158 
159 		dev->outb.fc_sw_mem = plt_zmalloc(dev->outb.nb_crypto_qs *
160 							  RTE_CACHE_LINE_SIZE,
161 						  RTE_CACHE_LINE_SIZE);
162 		if (!dev->outb.fc_sw_mem) {
163 			plt_err("Outbound fc sw mem alloc failed");
164 			goto sa_bmap_free;
165 		}
166 	}
167 	return 0;
168 
169 sa_bmap_free:
170 	plt_free(dev->outb.sa_bmap_mem);
171 sa_dptr_free:
172 	if (dev->inb.sa_dptr)
173 		plt_free(dev->inb.sa_dptr);
174 	if (dev->outb.sa_dptr)
175 		plt_free(dev->outb.sa_dptr);
176 cleanup:
177 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
178 		rc |= roc_nix_inl_inb_fini(nix);
179 	return rc;
180 }
181 
182 static int
nix_meter_fini(struct cnxk_eth_dev * dev)183 nix_meter_fini(struct cnxk_eth_dev *dev)
184 {
185 	struct cnxk_meter_node *next_mtr = NULL;
186 	struct roc_nix_bpf_objs profs = {0};
187 	struct cnxk_meter_node *mtr = NULL;
188 	struct cnxk_mtr *fms = &dev->mtr;
189 	struct roc_nix *nix = &dev->nix;
190 	struct roc_nix_rq *rq;
191 	uint32_t i;
192 	int rc = 0;
193 
194 	RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) {
195 		for (i = 0; i < mtr->rq_num; i++) {
196 			rq = &dev->rqs[mtr->rq_id[i]];
197 			rc |= roc_nix_bpf_ena_dis(nix, mtr->bpf_id, rq, false);
198 		}
199 
200 		profs.level = mtr->level;
201 		profs.count = 1;
202 		profs.ids[0] = mtr->bpf_id;
203 		rc = roc_nix_bpf_free(nix, &profs, 1);
204 
205 		if (rc)
206 			return rc;
207 
208 		TAILQ_REMOVE(fms, mtr, next);
209 		plt_free(mtr);
210 	}
211 	return 0;
212 }
213 
214 static int
nix_security_release(struct cnxk_eth_dev * dev)215 nix_security_release(struct cnxk_eth_dev *dev)
216 {
217 	struct rte_eth_dev *eth_dev = dev->eth_dev;
218 	struct cnxk_eth_sec_sess *eth_sec, *tvar;
219 	struct roc_nix *nix = &dev->nix;
220 	int rc, ret = 0;
221 
222 	/* Cleanup Inline inbound */
223 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
224 		/* Destroy inbound sessions */
225 		tvar = NULL;
226 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
227 			cnxk_eth_sec_ops.session_destroy(eth_dev,
228 							 eth_sec->sess);
229 
230 		/* Clear lookup mem */
231 		cnxk_nix_lookup_mem_sa_base_clear(dev);
232 
233 		rc = roc_nix_inl_inb_fini(nix);
234 		if (rc)
235 			plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
236 		ret |= rc;
237 
238 		if (dev->inb.sa_dptr) {
239 			plt_free(dev->inb.sa_dptr);
240 			dev->inb.sa_dptr = NULL;
241 		}
242 	}
243 
244 	/* Cleanup Inline outbound */
245 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
246 	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
247 		/* Destroy outbound sessions */
248 		tvar = NULL;
249 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
250 			cnxk_eth_sec_ops.session_destroy(eth_dev,
251 							 eth_sec->sess);
252 
253 		rc = roc_nix_inl_outb_fini(nix);
254 		if (rc)
255 			plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
256 		ret |= rc;
257 
258 		plt_bitmap_free(dev->outb.sa_bmap);
259 		plt_free(dev->outb.sa_bmap_mem);
260 		dev->outb.sa_bmap = NULL;
261 		dev->outb.sa_bmap_mem = NULL;
262 		if (dev->outb.sa_dptr) {
263 			plt_free(dev->outb.sa_dptr);
264 			dev->outb.sa_dptr = NULL;
265 		}
266 
267 		plt_free(dev->outb.fc_sw_mem);
268 		dev->outb.fc_sw_mem = NULL;
269 	}
270 
271 	dev->inb.inl_dev = false;
272 	roc_nix_inb_mode_set(nix, false);
273 	dev->nb_rxq_sso = 0;
274 	dev->inb.nb_sess = 0;
275 	dev->outb.nb_sess = 0;
276 	return ret;
277 }
278 
279 static void
nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp * rxq)280 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
281 {
282 	struct rte_pktmbuf_pool_private *mbp_priv;
283 	struct rte_eth_dev *eth_dev;
284 	struct cnxk_eth_dev *dev;
285 	uint32_t buffsz;
286 
287 	dev = rxq->dev;
288 	eth_dev = dev->eth_dev;
289 
290 	/* Get rx buffer size */
291 	mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
292 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
293 
294 	if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
295 		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
296 		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
297 	}
298 }
299 
300 int
nix_recalc_mtu(struct rte_eth_dev * eth_dev)301 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
302 {
303 	struct rte_eth_dev_data *data = eth_dev->data;
304 	struct cnxk_eth_rxq_sp *rxq;
305 	int rc;
306 
307 	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
308 	/* Setup scatter mode if needed by jumbo */
309 	nix_enable_mseg_on_jumbo(rxq);
310 
311 	rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
312 	if (rc)
313 		plt_err("Failed to set default MTU size, rc=%d", rc);
314 
315 	return rc;
316 }
317 
318 static int
nix_init_flow_ctrl_config(struct rte_eth_dev * eth_dev)319 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
320 {
321 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
322 	enum roc_nix_fc_mode fc_mode = ROC_NIX_FC_FULL;
323 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
324 	int rc;
325 
326 	if (roc_nix_is_sdp(&dev->nix))
327 		return 0;
328 
329 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
330 	if (roc_model_is_cn96_ax() &&
331 	    dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG)
332 		fc_mode = ROC_NIX_FC_TX;
333 
334 	/* By default enable flow control */
335 	rc = roc_nix_fc_mode_set(&dev->nix, fc_mode);
336 	if (rc)
337 		return rc;
338 
339 	fc->mode = (fc_mode == ROC_NIX_FC_FULL) ? RTE_ETH_FC_FULL :
340 						  RTE_ETH_FC_TX_PAUSE;
341 	return rc;
342 }
343 
344 static int
nix_update_flow_ctrl_config(struct rte_eth_dev * eth_dev)345 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
346 {
347 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
348 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
349 	struct rte_eth_fc_conf fc_cfg = {0};
350 
351 	if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix))
352 		return 0;
353 
354 	fc_cfg.mode = fc->mode;
355 
356 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
357 	if (roc_model_is_cn96_ax() &&
358 	    dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
359 	    (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
360 		fc_cfg.mode =
361 				(fc_cfg.mode == RTE_ETH_FC_FULL ||
362 				fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
363 				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
364 	}
365 
366 	return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
367 }
368 
369 uint64_t
cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev * dev)370 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
371 {
372 	uint16_t port_id = dev->eth_dev->data->port_id;
373 	struct rte_mbuf mb_def;
374 	uint64_t *tmp;
375 
376 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
377 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
378 				 offsetof(struct rte_mbuf, data_off) !=
379 			 2);
380 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
381 				 offsetof(struct rte_mbuf, data_off) !=
382 			 4);
383 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
384 				 offsetof(struct rte_mbuf, data_off) !=
385 			 6);
386 	mb_def.nb_segs = 1;
387 	mb_def.data_off = RTE_PKTMBUF_HEADROOM +
388 			  (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
389 	mb_def.port = port_id;
390 	rte_mbuf_refcnt_set(&mb_def, 1);
391 
392 	/* Prevent compiler reordering: rearm_data covers previous fields */
393 	rte_compiler_barrier();
394 	tmp = (uint64_t *)&mb_def.rearm_data;
395 
396 	return *tmp;
397 }
398 
399 static inline uint8_t
nix_sq_max_sqe_sz(struct cnxk_eth_dev * dev)400 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
401 {
402 	/*
403 	 * Maximum three segments can be supported with W8, Choose
404 	 * NIX_MAXSQESZ_W16 for multi segment offload.
405 	 */
406 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
407 		return NIX_MAXSQESZ_W16;
408 	else
409 		return NIX_MAXSQESZ_W8;
410 }
411 
412 int
cnxk_nix_tx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t qid,uint16_t nb_desc,uint16_t fp_tx_q_sz,const struct rte_eth_txconf * tx_conf)413 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
414 			uint16_t nb_desc, uint16_t fp_tx_q_sz,
415 			const struct rte_eth_txconf *tx_conf)
416 {
417 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
418 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
419 	struct cnxk_eth_txq_sp *txq_sp;
420 	struct roc_nix_sq *sq;
421 	size_t txq_sz;
422 	int rc;
423 
424 	/* Free memory prior to re-allocation if needed. */
425 	if (eth_dev->data->tx_queues[qid] != NULL) {
426 		plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
427 		dev_ops->tx_queue_release(eth_dev, qid);
428 		eth_dev->data->tx_queues[qid] = NULL;
429 	}
430 
431 	/* When Tx Security offload is enabled, increase tx desc count by
432 	 * max possible outbound desc count.
433 	 */
434 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
435 		nb_desc += dev->outb.nb_desc;
436 
437 	/* Setup ROC SQ */
438 	sq = &dev->sqs[qid];
439 	sq->qid = qid;
440 	sq->nb_desc = nb_desc;
441 	sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
442 
443 	rc = roc_nix_sq_init(&dev->nix, sq);
444 	if (rc) {
445 		plt_err("Failed to init sq=%d, rc=%d", qid, rc);
446 		return rc;
447 	}
448 
449 	rc = -ENOMEM;
450 	txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
451 	txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
452 	if (!txq_sp) {
453 		plt_err("Failed to alloc tx queue mem");
454 		rc |= roc_nix_sq_fini(sq);
455 		return rc;
456 	}
457 
458 	txq_sp->dev = dev;
459 	txq_sp->qid = qid;
460 	txq_sp->qconf.conf.tx = *tx_conf;
461 	/* Queue config should reflect global offloads */
462 	txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
463 	txq_sp->qconf.nb_desc = nb_desc;
464 
465 	plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
466 		    " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
467 		    qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
468 		    sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
469 
470 	/* Store start of fast path area */
471 	eth_dev->data->tx_queues[qid] = txq_sp + 1;
472 	eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
473 	return 0;
474 }
475 
476 static void
cnxk_nix_tx_queue_release(struct rte_eth_dev * eth_dev,uint16_t qid)477 cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
478 {
479 	void *txq = eth_dev->data->tx_queues[qid];
480 	struct cnxk_eth_txq_sp *txq_sp;
481 	struct cnxk_eth_dev *dev;
482 	struct roc_nix_sq *sq;
483 	int rc;
484 
485 	if (!txq)
486 		return;
487 
488 	txq_sp = cnxk_eth_txq_to_sp(txq);
489 
490 	dev = txq_sp->dev;
491 
492 	plt_nix_dbg("Releasing txq %u", qid);
493 
494 	/* Cleanup ROC SQ */
495 	sq = &dev->sqs[qid];
496 	rc = roc_nix_sq_fini(sq);
497 	if (rc)
498 		plt_err("Failed to cleanup sq, rc=%d", rc);
499 
500 	/* Finally free */
501 	plt_free(txq_sp);
502 }
503 
504 int
cnxk_nix_rx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t qid,uint16_t nb_desc,uint16_t fp_rx_q_sz,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)505 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
506 			uint16_t nb_desc, uint16_t fp_rx_q_sz,
507 			const struct rte_eth_rxconf *rx_conf,
508 			struct rte_mempool *mp)
509 {
510 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
511 	struct roc_nix *nix = &dev->nix;
512 	struct cnxk_eth_rxq_sp *rxq_sp;
513 	struct rte_mempool_ops *ops;
514 	const char *platform_ops;
515 	struct roc_nix_rq *rq;
516 	struct roc_nix_cq *cq;
517 	uint16_t first_skip;
518 	int rc = -EINVAL;
519 	size_t rxq_sz;
520 
521 	/* Sanity checks */
522 	if (rx_conf->rx_deferred_start == 1) {
523 		plt_err("Deferred Rx start is not supported");
524 		goto fail;
525 	}
526 
527 	platform_ops = rte_mbuf_platform_mempool_ops();
528 	/* This driver needs cnxk_npa mempool ops to work */
529 	ops = rte_mempool_get_ops(mp->ops_index);
530 	if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
531 		plt_err("mempool ops should be of cnxk_npa type");
532 		goto fail;
533 	}
534 
535 	if (mp->pool_id == 0) {
536 		plt_err("Invalid pool_id");
537 		goto fail;
538 	}
539 
540 	/* Free memory prior to re-allocation if needed */
541 	if (eth_dev->data->rx_queues[qid] != NULL) {
542 		const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
543 
544 		plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
545 		dev_ops->rx_queue_release(eth_dev, qid);
546 		eth_dev->data->rx_queues[qid] = NULL;
547 	}
548 
549 	/* Its a no-op when inline device is not used */
550 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY ||
551 	    dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
552 		roc_nix_inl_dev_xaq_realloc(mp->pool_id);
553 
554 	/* Setup ROC CQ */
555 	cq = &dev->cqs[qid];
556 	cq->qid = qid;
557 	cq->nb_desc = nb_desc;
558 	rc = roc_nix_cq_init(&dev->nix, cq);
559 	if (rc) {
560 		plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
561 		goto fail;
562 	}
563 
564 	/* Setup ROC RQ */
565 	rq = &dev->rqs[qid];
566 	rq->qid = qid;
567 	rq->aura_handle = mp->pool_id;
568 	rq->flow_tag_width = 32;
569 	rq->sso_ena = false;
570 
571 	/* Calculate first mbuf skip */
572 	first_skip = (sizeof(struct rte_mbuf));
573 	first_skip += RTE_PKTMBUF_HEADROOM;
574 	first_skip += rte_pktmbuf_priv_size(mp);
575 	rq->first_skip = first_skip;
576 	rq->later_skip = sizeof(struct rte_mbuf);
577 	rq->lpb_size = mp->elt_size;
578 	rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
579 
580 	/* Enable Inline IPSec on RQ, will not be used for Poll mode */
581 	if (roc_nix_inl_inb_is_enabled(nix))
582 		rq->ipsech_ena = true;
583 
584 	rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
585 	if (rc) {
586 		plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
587 		goto cq_fini;
588 	}
589 
590 	/* Allocate and setup fast path rx queue */
591 	rc = -ENOMEM;
592 	rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
593 	rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
594 	if (!rxq_sp) {
595 		plt_err("Failed to alloc rx queue for rq=%d", qid);
596 		goto rq_fini;
597 	}
598 
599 	/* Setup slow path fields */
600 	rxq_sp->dev = dev;
601 	rxq_sp->qid = qid;
602 	rxq_sp->qconf.conf.rx = *rx_conf;
603 	/* Queue config should reflect global offloads */
604 	rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
605 	rxq_sp->qconf.nb_desc = nb_desc;
606 	rxq_sp->qconf.mp = mp;
607 
608 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
609 		/* Pass a tagmask used to handle error packets in inline device.
610 		 * Ethdev rq's tag_mask field will be overwritten later
611 		 * when sso is setup.
612 		 */
613 		rq->tag_mask =
614 			0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
615 
616 		/* Setup rq reference for inline dev if present */
617 		rc = roc_nix_inl_dev_rq_get(rq);
618 		if (rc)
619 			goto free_mem;
620 	}
621 
622 	plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
623 		    cq->nb_desc);
624 
625 	/* Store start of fast path area */
626 	eth_dev->data->rx_queues[qid] = rxq_sp + 1;
627 	eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
628 
629 	/* Calculating delta and freq mult between PTP HI clock and tsc.
630 	 * These are needed in deriving raw clock value from tsc counter.
631 	 * read_clock eth op returns raw clock value.
632 	 */
633 	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
634 		rc = cnxk_nix_tsc_convert(dev);
635 		if (rc) {
636 			plt_err("Failed to calculate delta and freq mult");
637 			goto rq_fini;
638 		}
639 	}
640 
641 	return 0;
642 free_mem:
643 	plt_free(rxq_sp);
644 rq_fini:
645 	rc |= roc_nix_rq_fini(rq);
646 cq_fini:
647 	rc |= roc_nix_cq_fini(cq);
648 fail:
649 	return rc;
650 }
651 
652 static void
cnxk_nix_rx_queue_release(struct rte_eth_dev * eth_dev,uint16_t qid)653 cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
654 {
655 	void *rxq = eth_dev->data->rx_queues[qid];
656 	struct cnxk_eth_rxq_sp *rxq_sp;
657 	struct cnxk_eth_dev *dev;
658 	struct roc_nix_rq *rq;
659 	struct roc_nix_cq *cq;
660 	int rc;
661 
662 	if (!rxq)
663 		return;
664 
665 	rxq_sp = cnxk_eth_rxq_to_sp(rxq);
666 	dev = rxq_sp->dev;
667 	rq = &dev->rqs[qid];
668 
669 	plt_nix_dbg("Releasing rxq %u", qid);
670 
671 	/* Release rq reference for inline dev if present */
672 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
673 		roc_nix_inl_dev_rq_put(rq);
674 
675 	/* Cleanup ROC RQ */
676 	rc = roc_nix_rq_fini(rq);
677 	if (rc)
678 		plt_err("Failed to cleanup rq, rc=%d", rc);
679 
680 	/* Cleanup ROC CQ */
681 	cq = &dev->cqs[qid];
682 	rc = roc_nix_cq_fini(cq);
683 	if (rc)
684 		plt_err("Failed to cleanup cq, rc=%d", rc);
685 
686 	/* Finally free fast path area */
687 	plt_free(rxq_sp);
688 }
689 
690 uint32_t
cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev * dev,uint64_t ethdev_rss,uint8_t rss_level)691 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
692 		       uint8_t rss_level)
693 {
694 	uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
695 		{FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
696 		 FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
697 		{FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
698 		 FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
699 		 FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
700 		{FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
701 		 FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
702 		 FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
703 		 FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
704 		 FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
705 		 FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
706 	};
707 	uint32_t flowkey_cfg = 0;
708 
709 	dev->ethdev_rss_hf = ethdev_rss;
710 
711 	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
712 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
713 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
714 	}
715 
716 	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
717 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
718 
719 	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
720 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
721 
722 	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
723 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
724 
725 	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
726 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
727 
728 	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
729 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
730 
731 	if (ethdev_rss & RSS_IPV4_ENABLE)
732 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
733 
734 	if (ethdev_rss & RSS_IPV6_ENABLE)
735 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
736 
737 	if (ethdev_rss & RTE_ETH_RSS_TCP)
738 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
739 
740 	if (ethdev_rss & RTE_ETH_RSS_UDP)
741 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
742 
743 	if (ethdev_rss & RTE_ETH_RSS_SCTP)
744 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
745 
746 	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
747 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
748 
749 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
750 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
751 
752 	if (ethdev_rss & RTE_ETH_RSS_PORT)
753 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
754 
755 	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
756 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
757 
758 	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
759 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
760 
761 	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
762 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
763 
764 	if (ethdev_rss & RTE_ETH_RSS_GTPU)
765 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
766 
767 	return flowkey_cfg;
768 }
769 
770 static void
nix_free_queue_mem(struct cnxk_eth_dev * dev)771 nix_free_queue_mem(struct cnxk_eth_dev *dev)
772 {
773 	plt_free(dev->rqs);
774 	plt_free(dev->cqs);
775 	plt_free(dev->sqs);
776 	dev->rqs = NULL;
777 	dev->cqs = NULL;
778 	dev->sqs = NULL;
779 }
780 
781 static int
nix_ingress_policer_setup(struct cnxk_eth_dev * dev)782 nix_ingress_policer_setup(struct cnxk_eth_dev *dev)
783 {
784 	struct rte_eth_dev *eth_dev = dev->eth_dev;
785 	int rc = 0;
786 
787 	TAILQ_INIT(&dev->mtr_profiles);
788 	TAILQ_INIT(&dev->mtr_policy);
789 	TAILQ_INIT(&dev->mtr);
790 
791 	if (eth_dev->dev_ops->mtr_ops_get == NULL)
792 		return rc;
793 
794 	return nix_mtr_capabilities_init(eth_dev);
795 }
796 
797 static int
nix_rss_default_setup(struct cnxk_eth_dev * dev)798 nix_rss_default_setup(struct cnxk_eth_dev *dev)
799 {
800 	struct rte_eth_dev *eth_dev = dev->eth_dev;
801 	uint8_t rss_hash_level;
802 	uint32_t flowkey_cfg;
803 	uint64_t rss_hf;
804 
805 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
806 	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
807 	if (rss_hash_level)
808 		rss_hash_level -= 1;
809 
810 	flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
811 	return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
812 }
813 
814 static int
nix_store_queue_cfg_and_then_release(struct rte_eth_dev * eth_dev)815 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
816 {
817 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
818 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
819 	struct cnxk_eth_qconf *tx_qconf = NULL;
820 	struct cnxk_eth_qconf *rx_qconf = NULL;
821 	struct cnxk_eth_rxq_sp *rxq_sp;
822 	struct cnxk_eth_txq_sp *txq_sp;
823 	int i, nb_rxq, nb_txq;
824 	void **txq, **rxq;
825 
826 	nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
827 	nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
828 
829 	tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
830 	if (tx_qconf == NULL) {
831 		plt_err("Failed to allocate memory for tx_qconf");
832 		goto fail;
833 	}
834 
835 	rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
836 	if (rx_qconf == NULL) {
837 		plt_err("Failed to allocate memory for rx_qconf");
838 		goto fail;
839 	}
840 
841 	txq = eth_dev->data->tx_queues;
842 	for (i = 0; i < nb_txq; i++) {
843 		if (txq[i] == NULL) {
844 			tx_qconf[i].valid = false;
845 			plt_info("txq[%d] is already released", i);
846 			continue;
847 		}
848 		txq_sp = cnxk_eth_txq_to_sp(txq[i]);
849 		memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
850 		tx_qconf[i].valid = true;
851 		dev_ops->tx_queue_release(eth_dev, i);
852 		eth_dev->data->tx_queues[i] = NULL;
853 	}
854 
855 	rxq = eth_dev->data->rx_queues;
856 	for (i = 0; i < nb_rxq; i++) {
857 		if (rxq[i] == NULL) {
858 			rx_qconf[i].valid = false;
859 			plt_info("rxq[%d] is already released", i);
860 			continue;
861 		}
862 		rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
863 		memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
864 		rx_qconf[i].valid = true;
865 		dev_ops->rx_queue_release(eth_dev, i);
866 		eth_dev->data->rx_queues[i] = NULL;
867 	}
868 
869 	dev->tx_qconf = tx_qconf;
870 	dev->rx_qconf = rx_qconf;
871 	return 0;
872 
873 fail:
874 	free(tx_qconf);
875 	free(rx_qconf);
876 	return -ENOMEM;
877 }
878 
879 static int
nix_restore_queue_cfg(struct rte_eth_dev * eth_dev)880 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
881 {
882 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
883 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
884 	struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
885 	struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
886 	int rc, i, nb_rxq, nb_txq;
887 
888 	nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
889 	nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
890 
891 	rc = -ENOMEM;
892 	/* Setup tx & rx queues with previous configuration so
893 	 * that the queues can be functional in cases like ports
894 	 * are started without re configuring queues.
895 	 *
896 	 * Usual re config sequence is like below:
897 	 * port_configure() {
898 	 *      if(reconfigure) {
899 	 *              queue_release()
900 	 *              queue_setup()
901 	 *      }
902 	 *      queue_configure() {
903 	 *              queue_release()
904 	 *              queue_setup()
905 	 *      }
906 	 * }
907 	 * port_start()
908 	 *
909 	 * In some application's control path, queue_configure() would
910 	 * NOT be invoked for TXQs/RXQs in port_configure().
911 	 * In such cases, queues can be functional after start as the
912 	 * queues are already setup in port_configure().
913 	 */
914 	for (i = 0; i < nb_txq; i++) {
915 		if (!tx_qconf[i].valid)
916 			continue;
917 		rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
918 					     &tx_qconf[i].conf.tx);
919 		if (rc) {
920 			plt_err("Failed to setup tx queue rc=%d", rc);
921 			for (i -= 1; i >= 0; i--)
922 				dev_ops->tx_queue_release(eth_dev, i);
923 			goto fail;
924 		}
925 	}
926 
927 	free(tx_qconf);
928 	tx_qconf = NULL;
929 
930 	for (i = 0; i < nb_rxq; i++) {
931 		if (!rx_qconf[i].valid)
932 			continue;
933 		rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
934 					     &rx_qconf[i].conf.rx,
935 					     rx_qconf[i].mp);
936 		if (rc) {
937 			plt_err("Failed to setup rx queue rc=%d", rc);
938 			for (i -= 1; i >= 0; i--)
939 				dev_ops->rx_queue_release(eth_dev, i);
940 			goto tx_queue_release;
941 		}
942 	}
943 
944 	free(rx_qconf);
945 	rx_qconf = NULL;
946 
947 	return 0;
948 
949 tx_queue_release:
950 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
951 		dev_ops->tx_queue_release(eth_dev, i);
952 fail:
953 	free(tx_qconf);
954 	free(rx_qconf);
955 
956 	return rc;
957 }
958 
959 static void
nix_set_nop_rxtx_function(struct rte_eth_dev * eth_dev)960 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
961 {
962 	/* These dummy functions are required for supporting
963 	 * some applications which reconfigure queues without
964 	 * stopping tx burst and rx burst threads(eg kni app)
965 	 * When the queues context is saved, txq/rxqs are released
966 	 * which caused app crash since rx/tx burst is still
967 	 * on different lcores
968 	 */
969 	eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
970 	eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
971 	rte_mb();
972 }
973 
974 static int
nix_lso_tun_fmt_update(struct cnxk_eth_dev * dev)975 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
976 {
977 	uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
978 	uint8_t tun[ROC_NIX_LSO_TUN_MAX];
979 	struct roc_nix *nix = &dev->nix;
980 	int rc;
981 
982 	rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
983 	if (rc)
984 		return rc;
985 
986 	dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
987 			    (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
988 			    (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
989 			    (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
990 
991 	dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
992 			     (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
993 			     (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
994 			     (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
995 	return 0;
996 }
997 
998 static int
nix_lso_fmt_setup(struct cnxk_eth_dev * dev)999 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
1000 {
1001 	struct roc_nix *nix = &dev->nix;
1002 	int rc;
1003 
1004 	/* Nothing much to do if offload is not enabled */
1005 	if (!(dev->tx_offloads &
1006 	      (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1007 	       RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
1008 		return 0;
1009 
1010 	/* Setup LSO formats in AF. Its a no-op if other ethdev has
1011 	 * already set it up
1012 	 */
1013 	rc = roc_nix_lso_fmt_setup(nix);
1014 	if (rc)
1015 		return rc;
1016 
1017 	return nix_lso_tun_fmt_update(dev);
1018 }
1019 
1020 int
cnxk_nix_configure(struct rte_eth_dev * eth_dev)1021 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
1022 {
1023 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1024 	struct rte_eth_dev_data *data = eth_dev->data;
1025 	struct rte_eth_conf *conf = &data->dev_conf;
1026 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
1027 	struct rte_eth_txmode *txmode = &conf->txmode;
1028 	char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1029 	struct roc_nix_fc_cfg fc_cfg = {0};
1030 	struct roc_nix *nix = &dev->nix;
1031 	struct rte_ether_addr *ea;
1032 	uint8_t nb_rxq, nb_txq;
1033 	uint64_t rx_cfg;
1034 	void *qs;
1035 	int rc;
1036 
1037 	rc = -EINVAL;
1038 
1039 	/* Sanity checks */
1040 	if (rte_eal_has_hugepages() == 0) {
1041 		plt_err("Huge page is not configured");
1042 		goto fail_configure;
1043 	}
1044 
1045 	if (conf->dcb_capability_en == 1) {
1046 		plt_err("dcb enable is not supported");
1047 		goto fail_configure;
1048 	}
1049 
1050 	if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1051 		plt_err("Flow director is not supported");
1052 		goto fail_configure;
1053 	}
1054 
1055 	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
1056 	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
1057 		plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1058 		goto fail_configure;
1059 	}
1060 
1061 	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
1062 		plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
1063 		goto fail_configure;
1064 	}
1065 
1066 	/* Free the resources allocated from the previous configure */
1067 	if (dev->configured == 1) {
1068 		/* Unregister queue irq's */
1069 		roc_nix_unregister_queue_irqs(nix);
1070 
1071 		/* Unregister CQ irqs if present */
1072 		if (eth_dev->data->dev_conf.intr_conf.rxq)
1073 			roc_nix_unregister_cq_irqs(nix);
1074 
1075 		/* Set no-op functions */
1076 		nix_set_nop_rxtx_function(eth_dev);
1077 		/* Store queue config for later */
1078 		rc = nix_store_queue_cfg_and_then_release(eth_dev);
1079 		if (rc)
1080 			goto fail_configure;
1081 
1082 		/* Disable and free rte_meter entries */
1083 		rc = nix_meter_fini(dev);
1084 		if (rc)
1085 			goto fail_configure;
1086 
1087 		/* Cleanup security support */
1088 		rc = nix_security_release(dev);
1089 		if (rc)
1090 			goto fail_configure;
1091 
1092 		roc_nix_tm_fini(nix);
1093 		roc_nix_lf_free(nix);
1094 	}
1095 
1096 	dev->rx_offloads = rxmode->offloads;
1097 	dev->tx_offloads = txmode->offloads;
1098 
1099 	/* Prepare rx cfg */
1100 	rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
1101 	if (dev->rx_offloads &
1102 	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
1103 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
1104 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
1105 	}
1106 	rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
1107 		   ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
1108 		   ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
1109 
1110 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
1111 		rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
1112 		/* Disable drop re if rx offload security is enabled and
1113 		 * platform does not support it.
1114 		 */
1115 		if (dev->ipsecd_drop_re_dis)
1116 			rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE);
1117 	}
1118 
1119 	nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1120 	nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1121 
1122 	if (roc_nix_is_lbk(nix))
1123 		nix->enable_loop = eth_dev->data->dev_conf.lpbk_mode;
1124 
1125 	/* Alloc a nix lf */
1126 	rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
1127 	if (rc) {
1128 		plt_err("Failed to init nix_lf rc=%d", rc);
1129 		goto fail_configure;
1130 	}
1131 
1132 	/* Check if ptp is enable in PF owning this VF*/
1133 	if (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix)))
1134 		dev->ptp_en = roc_nix_ptp_is_enable(nix);
1135 
1136 	dev->npc.channel = roc_nix_get_base_chan(nix);
1137 
1138 	nb_rxq = data->nb_rx_queues;
1139 	nb_txq = data->nb_tx_queues;
1140 	rc = -ENOMEM;
1141 	if (nb_rxq) {
1142 		/* Allocate memory for roc rq's and cq's */
1143 		qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
1144 		if (!qs) {
1145 			plt_err("Failed to alloc rqs");
1146 			goto free_nix_lf;
1147 		}
1148 		dev->rqs = qs;
1149 
1150 		qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
1151 		if (!qs) {
1152 			plt_err("Failed to alloc cqs");
1153 			goto free_nix_lf;
1154 		}
1155 		dev->cqs = qs;
1156 	}
1157 
1158 	if (nb_txq) {
1159 		/* Allocate memory for roc sq's */
1160 		qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
1161 		if (!qs) {
1162 			plt_err("Failed to alloc sqs");
1163 			goto free_nix_lf;
1164 		}
1165 		dev->sqs = qs;
1166 	}
1167 
1168 	/* Re-enable NIX LF error interrupts */
1169 	roc_nix_err_intr_ena_dis(nix, true);
1170 	roc_nix_ras_intr_ena_dis(nix, true);
1171 
1172 	if (nix->rx_ptp_ena &&
1173 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
1174 		plt_err("Both PTP and switch header enabled");
1175 		goto free_nix_lf;
1176 	}
1177 
1178 	rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type,
1179 				    dev->npc.pre_l2_size_offset,
1180 				    dev->npc.pre_l2_size_offset_mask,
1181 				    dev->npc.pre_l2_size_shift_dir);
1182 	if (rc) {
1183 		plt_err("Failed to enable switch type nix_lf rc=%d", rc);
1184 		goto free_nix_lf;
1185 	}
1186 
1187 	/* Setup LSO if needed */
1188 	rc = nix_lso_fmt_setup(dev);
1189 	if (rc) {
1190 		plt_err("Failed to setup nix lso format fields, rc=%d", rc);
1191 		goto free_nix_lf;
1192 	}
1193 
1194 	/* Configure RSS */
1195 	rc = nix_rss_default_setup(dev);
1196 	if (rc) {
1197 		plt_err("Failed to configure rss rc=%d", rc);
1198 		goto free_nix_lf;
1199 	}
1200 
1201 	/* Init the default TM scheduler hierarchy */
1202 	rc = roc_nix_tm_init(nix);
1203 	if (rc) {
1204 		plt_err("Failed to init traffic manager, rc=%d", rc);
1205 		goto free_nix_lf;
1206 	}
1207 
1208 	rc = nix_ingress_policer_setup(dev);
1209 	if (rc) {
1210 		plt_err("Failed to setup ingress policer rc=%d", rc);
1211 		goto free_nix_lf;
1212 	}
1213 
1214 	rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
1215 	if (rc) {
1216 		plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
1217 		goto tm_fini;
1218 	}
1219 
1220 	/* Register queue IRQs */
1221 	rc = roc_nix_register_queue_irqs(nix);
1222 	if (rc) {
1223 		plt_err("Failed to register queue interrupts rc=%d", rc);
1224 		goto tm_fini;
1225 	}
1226 
1227 	/* Register cq IRQs */
1228 	if (eth_dev->data->dev_conf.intr_conf.rxq) {
1229 		if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
1230 			plt_err("Rx interrupt cannot be enabled, rxq > %d",
1231 				dev->nix.cints);
1232 			goto q_irq_fini;
1233 		}
1234 		/* Rx interrupt feature cannot work with vector mode because,
1235 		 * vector mode does not process packets unless min 4 pkts are
1236 		 * received, while cq interrupts are generated even for 1 pkt
1237 		 * in the CQ.
1238 		 */
1239 		dev->scalar_ena = true;
1240 
1241 		rc = roc_nix_register_cq_irqs(nix);
1242 		if (rc) {
1243 			plt_err("Failed to register CQ interrupts rc=%d", rc);
1244 			goto q_irq_fini;
1245 		}
1246 	}
1247 
1248 	if (roc_nix_is_lbk(nix))
1249 		goto skip_lbk_setup;
1250 
1251 	/* Configure loop back mode */
1252 	rc = roc_nix_mac_loopback_enable(nix,
1253 					 eth_dev->data->dev_conf.lpbk_mode);
1254 	if (rc) {
1255 		plt_err("Failed to configure cgx loop back mode rc=%d", rc);
1256 		goto cq_fini;
1257 	}
1258 
1259 skip_lbk_setup:
1260 	/* Setup Inline security support */
1261 	rc = nix_security_setup(dev);
1262 	if (rc)
1263 		goto cq_fini;
1264 
1265 	/* Init flow control configuration */
1266 	fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG;
1267 	fc_cfg.rxchan_cfg.enable = true;
1268 	rc = roc_nix_fc_config_set(nix, &fc_cfg);
1269 	if (rc) {
1270 		plt_err("Failed to initialize flow control rc=%d", rc);
1271 		goto cq_fini;
1272 	}
1273 
1274 	/* Update flow control configuration to PMD */
1275 	rc = nix_init_flow_ctrl_config(eth_dev);
1276 	if (rc) {
1277 		plt_err("Failed to initialize flow control rc=%d", rc);
1278 		goto cq_fini;
1279 	}
1280 
1281 	/* Initialize TC to SQ mapping as invalid */
1282 	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
1283 	/*
1284 	 * Restore queue config when reconfigure followed by
1285 	 * reconfigure and no queue configure invoked from application case.
1286 	 */
1287 	if (dev->configured == 1) {
1288 		rc = nix_restore_queue_cfg(eth_dev);
1289 		if (rc)
1290 			goto sec_release;
1291 	}
1292 
1293 	/* Update the mac address */
1294 	ea = eth_dev->data->mac_addrs;
1295 	memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1296 	if (rte_is_zero_ether_addr(ea))
1297 		rte_eth_random_addr((uint8_t *)ea);
1298 
1299 	rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1300 
1301 	plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1302 		    " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
1303 		    eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
1304 		    dev->rx_offloads, dev->tx_offloads);
1305 
1306 	/* All good */
1307 	dev->configured = 1;
1308 	dev->nb_rxq = data->nb_rx_queues;
1309 	dev->nb_txq = data->nb_tx_queues;
1310 	return 0;
1311 
1312 sec_release:
1313 	rc |= nix_security_release(dev);
1314 cq_fini:
1315 	roc_nix_unregister_cq_irqs(nix);
1316 q_irq_fini:
1317 	roc_nix_unregister_queue_irqs(nix);
1318 tm_fini:
1319 	roc_nix_tm_fini(nix);
1320 free_nix_lf:
1321 	nix_free_queue_mem(dev);
1322 	rc |= roc_nix_lf_free(nix);
1323 fail_configure:
1324 	dev->configured = 0;
1325 	return rc;
1326 }
1327 
1328 int
cnxk_nix_tx_queue_start(struct rte_eth_dev * eth_dev,uint16_t qid)1329 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1330 {
1331 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1332 	struct rte_eth_dev_data *data = eth_dev->data;
1333 	struct roc_nix_sq *sq = &dev->sqs[qid];
1334 	int rc = -EINVAL;
1335 
1336 	if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1337 		return 0;
1338 
1339 	rc = roc_nix_tm_sq_aura_fc(sq, true);
1340 	if (rc) {
1341 		plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
1342 		goto done;
1343 	}
1344 
1345 	data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1346 done:
1347 	return rc;
1348 }
1349 
1350 int
cnxk_nix_tx_queue_stop(struct rte_eth_dev * eth_dev,uint16_t qid)1351 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1352 {
1353 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1354 	struct rte_eth_dev_data *data = eth_dev->data;
1355 	struct roc_nix_sq *sq = &dev->sqs[qid];
1356 	int rc;
1357 
1358 	if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1359 		return 0;
1360 
1361 	rc = roc_nix_tm_sq_aura_fc(sq, false);
1362 	if (rc) {
1363 		plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1364 			rc);
1365 		goto done;
1366 	}
1367 
1368 	data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1369 done:
1370 	return rc;
1371 }
1372 
1373 static int
cnxk_nix_rx_queue_start(struct rte_eth_dev * eth_dev,uint16_t qid)1374 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1375 {
1376 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1377 	struct rte_eth_dev_data *data = eth_dev->data;
1378 	struct roc_nix_rq *rq = &dev->rqs[qid];
1379 	int rc;
1380 
1381 	if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1382 		return 0;
1383 
1384 	rc = roc_nix_rq_ena_dis(rq, true);
1385 	if (rc) {
1386 		plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1387 		goto done;
1388 	}
1389 
1390 	data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1391 done:
1392 	return rc;
1393 }
1394 
1395 static int
cnxk_nix_rx_queue_stop(struct rte_eth_dev * eth_dev,uint16_t qid)1396 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1397 {
1398 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1399 	struct rte_eth_dev_data *data = eth_dev->data;
1400 	struct roc_nix_rq *rq = &dev->rqs[qid];
1401 	int rc;
1402 
1403 	if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1404 		return 0;
1405 
1406 	rc = roc_nix_rq_ena_dis(rq, false);
1407 	if (rc) {
1408 		plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1409 		goto done;
1410 	}
1411 
1412 	data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1413 done:
1414 	return rc;
1415 }
1416 
1417 static int
cnxk_nix_dev_stop(struct rte_eth_dev * eth_dev)1418 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1419 {
1420 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1421 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1422 	struct rte_mbuf *rx_pkts[32];
1423 	struct rte_eth_link link;
1424 	int count, i, j, rc;
1425 	void *rxq;
1426 
1427 	/* Disable all the NPC entries */
1428 	rc = roc_npc_mcam_enable_all_entries(&dev->npc, 0);
1429 	if (rc)
1430 		return rc;
1431 
1432 	/* Stop link change events */
1433 	if (!roc_nix_is_vf_or_sdp(&dev->nix))
1434 		roc_nix_mac_link_event_start_stop(&dev->nix, false);
1435 
1436 	/* Disable Rx via NPC */
1437 	roc_nix_npc_rx_ena_dis(&dev->nix, false);
1438 
1439 	roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, false);
1440 
1441 	/* Stop rx queues and free up pkts pending */
1442 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1443 		rc = dev_ops->rx_queue_stop(eth_dev, i);
1444 		if (rc)
1445 			continue;
1446 
1447 		rxq = eth_dev->data->rx_queues[i];
1448 		count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1449 		while (count) {
1450 			for (j = 0; j < count; j++)
1451 				rte_pktmbuf_free(rx_pkts[j]);
1452 			count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1453 		}
1454 	}
1455 
1456 	/* Stop tx queues  */
1457 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1458 		dev_ops->tx_queue_stop(eth_dev, i);
1459 
1460 	/* Bring down link status internally */
1461 	memset(&link, 0, sizeof(link));
1462 	rte_eth_linkstatus_set(eth_dev, &link);
1463 
1464 	return 0;
1465 }
1466 
1467 int
cnxk_nix_dev_start(struct rte_eth_dev * eth_dev)1468 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1469 {
1470 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1471 	int rc, i;
1472 
1473 	if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
1474 		rc = nix_recalc_mtu(eth_dev);
1475 		if (rc)
1476 			return rc;
1477 	}
1478 
1479 	/* Start rx queues */
1480 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1481 		rc = cnxk_nix_rx_queue_start(eth_dev, i);
1482 		if (rc)
1483 			return rc;
1484 	}
1485 
1486 	/* Start tx queues  */
1487 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1488 		rc = cnxk_nix_tx_queue_start(eth_dev, i);
1489 		if (rc)
1490 			return rc;
1491 	}
1492 
1493 	/* Update Flow control configuration */
1494 	rc = nix_update_flow_ctrl_config(eth_dev);
1495 	if (rc) {
1496 		plt_err("Failed to enable flow control. error code(%d)", rc);
1497 		return rc;
1498 	}
1499 
1500 	/* Enable Rx in NPC */
1501 	rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1502 	if (rc) {
1503 		plt_err("Failed to enable NPC rx %d", rc);
1504 		return rc;
1505 	}
1506 
1507 	rc = roc_npc_mcam_enable_all_entries(&dev->npc, 1);
1508 	if (rc) {
1509 		plt_err("Failed to enable NPC entries %d", rc);
1510 		return rc;
1511 	}
1512 
1513 	cnxk_nix_toggle_flag_link_cfg(dev, true);
1514 
1515 	/* Start link change events */
1516 	if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1517 		rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1518 		if (rc) {
1519 			plt_err("Failed to start cgx link event %d", rc);
1520 			goto rx_disable;
1521 		}
1522 	}
1523 
1524 	/* Enable PTP if it is requested by the user or already
1525 	 * enabled on PF owning this VF
1526 	 */
1527 	memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
1528 	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
1529 		cnxk_eth_dev_ops.timesync_enable(eth_dev);
1530 	else
1531 		cnxk_eth_dev_ops.timesync_disable(eth_dev);
1532 
1533 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1534 		rc = rte_mbuf_dyn_rx_timestamp_register
1535 			(&dev->tstamp.tstamp_dynfield_offset,
1536 			 &dev->tstamp.rx_tstamp_dynflag);
1537 		if (rc != 0) {
1538 			plt_err("Failed to register Rx timestamp field/flag");
1539 			goto rx_disable;
1540 		}
1541 	}
1542 
1543 	cnxk_nix_toggle_flag_link_cfg(dev, false);
1544 
1545 	roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, true);
1546 
1547 	return 0;
1548 
1549 rx_disable:
1550 	roc_nix_npc_rx_ena_dis(&dev->nix, false);
1551 	cnxk_nix_toggle_flag_link_cfg(dev, false);
1552 	return rc;
1553 }
1554 
1555 static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
1556 static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
1557 
1558 /* CNXK platform independent eth dev ops */
1559 struct eth_dev_ops cnxk_eth_dev_ops = {
1560 	.mtu_set = cnxk_nix_mtu_set,
1561 	.mac_addr_add = cnxk_nix_mac_addr_add,
1562 	.mac_addr_remove = cnxk_nix_mac_addr_del,
1563 	.mac_addr_set = cnxk_nix_mac_addr_set,
1564 	.dev_infos_get = cnxk_nix_info_get,
1565 	.link_update = cnxk_nix_link_update,
1566 	.tx_queue_release = cnxk_nix_tx_queue_release,
1567 	.rx_queue_release = cnxk_nix_rx_queue_release,
1568 	.dev_stop = cnxk_nix_dev_stop,
1569 	.dev_close = cnxk_nix_dev_close,
1570 	.dev_reset = cnxk_nix_dev_reset,
1571 	.tx_queue_start = cnxk_nix_tx_queue_start,
1572 	.rx_queue_start = cnxk_nix_rx_queue_start,
1573 	.rx_queue_stop = cnxk_nix_rx_queue_stop,
1574 	.dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1575 	.promiscuous_enable = cnxk_nix_promisc_enable,
1576 	.promiscuous_disable = cnxk_nix_promisc_disable,
1577 	.allmulticast_enable = cnxk_nix_allmulticast_enable,
1578 	.allmulticast_disable = cnxk_nix_allmulticast_disable,
1579 	.rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1580 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1581 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1582 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1583 	.priority_flow_ctrl_queue_config =
1584 				cnxk_nix_priority_flow_ctrl_queue_config,
1585 	.priority_flow_ctrl_queue_info_get =
1586 				cnxk_nix_priority_flow_ctrl_queue_info_get,
1587 	.dev_set_link_up = cnxk_nix_set_link_up,
1588 	.dev_set_link_down = cnxk_nix_set_link_down,
1589 	.get_module_info = cnxk_nix_get_module_info,
1590 	.get_module_eeprom = cnxk_nix_get_module_eeprom,
1591 	.rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1592 	.rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1593 	.pool_ops_supported = cnxk_nix_pool_ops_supported,
1594 	.queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1595 	.stats_get = cnxk_nix_stats_get,
1596 	.stats_reset = cnxk_nix_stats_reset,
1597 	.xstats_get = cnxk_nix_xstats_get,
1598 	.xstats_get_names = cnxk_nix_xstats_get_names,
1599 	.xstats_reset = cnxk_nix_xstats_reset,
1600 	.xstats_get_by_id = cnxk_nix_xstats_get_by_id,
1601 	.xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
1602 	.fw_version_get = cnxk_nix_fw_version_get,
1603 	.rxq_info_get = cnxk_nix_rxq_info_get,
1604 	.txq_info_get = cnxk_nix_txq_info_get,
1605 	.tx_done_cleanup = cnxk_nix_tx_done_cleanup,
1606 	.flow_ops_get = cnxk_nix_flow_ops_get,
1607 	.get_reg = cnxk_nix_dev_get_reg,
1608 	.timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
1609 	.timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
1610 	.timesync_read_time = cnxk_nix_timesync_read_time,
1611 	.timesync_write_time = cnxk_nix_timesync_write_time,
1612 	.timesync_adjust_time = cnxk_nix_timesync_adjust_time,
1613 	.read_clock = cnxk_nix_read_clock,
1614 	.reta_update = cnxk_nix_reta_update,
1615 	.reta_query = cnxk_nix_reta_query,
1616 	.rss_hash_update = cnxk_nix_rss_hash_update,
1617 	.rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
1618 	.set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
1619 	.set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
1620 	.tm_ops_get = cnxk_nix_tm_ops_get,
1621 	.mtr_ops_get = cnxk_nix_mtr_ops_get,
1622 };
1623 
1624 static int
cnxk_eth_dev_init(struct rte_eth_dev * eth_dev)1625 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1626 {
1627 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1628 	struct rte_security_ctx *sec_ctx;
1629 	struct roc_nix *nix = &dev->nix;
1630 	struct rte_pci_device *pci_dev;
1631 	int rc, max_entries;
1632 
1633 	eth_dev->dev_ops = &cnxk_eth_dev_ops;
1634 	eth_dev->rx_queue_count = cnxk_nix_rx_queue_count;
1635 	eth_dev->rx_descriptor_status = cnxk_nix_rx_descriptor_status;
1636 	eth_dev->tx_descriptor_status = cnxk_nix_tx_descriptor_status;
1637 
1638 	/* Alloc security context */
1639 	sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
1640 	if (!sec_ctx)
1641 		return -ENOMEM;
1642 	sec_ctx->device = eth_dev;
1643 	sec_ctx->ops = &cnxk_eth_sec_ops;
1644 	sec_ctx->flags =
1645 		(RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
1646 	eth_dev->security_ctx = sec_ctx;
1647 
1648 	/* For secondary processes, the primary has done all the work */
1649 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1650 		return 0;
1651 
1652 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1653 	rte_eth_copy_pci_info(eth_dev, pci_dev);
1654 
1655 	/* Parse devargs string */
1656 	rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1657 	if (rc) {
1658 		plt_err("Failed to parse devargs rc=%d", rc);
1659 		goto error;
1660 	}
1661 
1662 	/* Initialize base roc nix */
1663 	nix->pci_dev = pci_dev;
1664 	nix->hw_vlan_ins = true;
1665 	nix->port_id = eth_dev->data->port_id;
1666 	rc = roc_nix_dev_init(nix);
1667 	if (rc) {
1668 		plt_err("Failed to initialize roc nix rc=%d", rc);
1669 		goto error;
1670 	}
1671 
1672 	/* Register up msg callbacks */
1673 	roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1674 
1675 	/* Register up msg callbacks */
1676 	roc_nix_mac_link_info_get_cb_register(nix,
1677 					      cnxk_eth_dev_link_status_get_cb);
1678 
1679 	dev->eth_dev = eth_dev;
1680 	dev->configured = 0;
1681 	dev->ptype_disable = 0;
1682 
1683 	TAILQ_INIT(&dev->inb.list);
1684 	TAILQ_INIT(&dev->outb.list);
1685 	rte_spinlock_init(&dev->inb.lock);
1686 	rte_spinlock_init(&dev->outb.lock);
1687 
1688 	/* For vfs, returned max_entries will be 0. but to keep default mac
1689 	 * address, one entry must be allocated. so setting up to 1.
1690 	 */
1691 	if (roc_nix_is_vf_or_sdp(nix))
1692 		max_entries = 1;
1693 	else
1694 		max_entries = roc_nix_mac_max_entries_get(nix);
1695 
1696 	if (max_entries <= 0) {
1697 		plt_err("Failed to get max entries for mac addr");
1698 		rc = -ENOTSUP;
1699 		goto dev_fini;
1700 	}
1701 
1702 	eth_dev->data->mac_addrs =
1703 		rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1704 	if (eth_dev->data->mac_addrs == NULL) {
1705 		plt_err("Failed to allocate memory for mac addr");
1706 		rc = -ENOMEM;
1707 		goto dev_fini;
1708 	}
1709 
1710 	dev->max_mac_entries = max_entries;
1711 	dev->dmac_filter_count = 1;
1712 
1713 	/* Get mac address */
1714 	rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1715 	if (rc) {
1716 		plt_err("Failed to get mac addr, rc=%d", rc);
1717 		goto free_mac_addrs;
1718 	}
1719 
1720 	/* Update the mac address */
1721 	memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1722 
1723 	if (!roc_nix_is_vf_or_sdp(nix)) {
1724 		/* Sync same MAC address to CGX/RPM table */
1725 		rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1726 		if (rc) {
1727 			plt_err("Failed to set mac addr, rc=%d", rc);
1728 			goto free_mac_addrs;
1729 		}
1730 	}
1731 
1732 	/* Union of all capabilities supported by CNXK.
1733 	 * Platform specific capabilities will be
1734 	 * updated later.
1735 	 */
1736 	dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1737 	dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1738 	dev->speed_capa = nix_get_speed_capa(dev);
1739 
1740 	/* Initialize roc npc */
1741 	dev->npc.roc_nix = nix;
1742 	rc = roc_npc_init(&dev->npc);
1743 	if (rc)
1744 		goto free_mac_addrs;
1745 
1746 	plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1747 		    " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1748 		    eth_dev->data->port_id, roc_nix_get_pf(nix),
1749 		    roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1750 		    dev->rx_offload_capa, dev->tx_offload_capa);
1751 	return 0;
1752 
1753 free_mac_addrs:
1754 	rte_free(eth_dev->data->mac_addrs);
1755 dev_fini:
1756 	roc_nix_dev_fini(nix);
1757 error:
1758 	plt_err("Failed to init nix eth_dev rc=%d", rc);
1759 	return rc;
1760 }
1761 
1762 static int
cnxk_eth_dev_uninit(struct rte_eth_dev * eth_dev,bool reset)1763 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
1764 {
1765 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1766 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1767 	struct rte_eth_pfc_queue_conf pfc_conf;
1768 	struct roc_nix *nix = &dev->nix;
1769 	struct rte_eth_fc_conf fc_conf;
1770 	int rc, i;
1771 
1772 	plt_free(eth_dev->security_ctx);
1773 	eth_dev->security_ctx = NULL;
1774 
1775 	/* Nothing to be done for secondary processes */
1776 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1777 		return 0;
1778 
1779 	/* Disable switch hdr pkind */
1780 	roc_nix_switch_hdr_set(&dev->nix, 0, 0, 0, 0);
1781 
1782 	/* Clear the flag since we are closing down */
1783 	dev->configured = 0;
1784 
1785 	roc_nix_npc_rx_ena_dis(nix, false);
1786 
1787 	/* Restore 802.3 Flow control configuration */
1788 	memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_queue_conf));
1789 	memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1790 	fc_conf.mode = RTE_ETH_FC_NONE;
1791 	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
1792 
1793 	pfc_conf.mode = RTE_ETH_FC_NONE;
1794 	for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
1795 		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
1796 			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
1797 			pfc_conf.rx_pause.tc = i;
1798 			pfc_conf.tx_pause.rx_qid = i;
1799 			pfc_conf.tx_pause.tc = i;
1800 			rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
1801 				&pfc_conf);
1802 			if (rc)
1803 				plt_err("Failed to reset PFC. error code(%d)",
1804 					rc);
1805 		}
1806 	}
1807 
1808 	fc_conf.mode = RTE_ETH_FC_FULL;
1809 	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
1810 
1811 	/* Disable and free rte_meter entries */
1812 	nix_meter_fini(dev);
1813 
1814 	/* Disable and free rte_flow entries */
1815 	roc_npc_fini(&dev->npc);
1816 
1817 	/* Disable link status events */
1818 	roc_nix_mac_link_event_start_stop(nix, false);
1819 
1820 	/* Unregister the link update op, this is required to stop VFs from
1821 	 * receiving link status updates on exit path.
1822 	 */
1823 	roc_nix_mac_link_cb_unregister(nix);
1824 
1825 	/* Free up SQs */
1826 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1827 		dev_ops->tx_queue_release(eth_dev, i);
1828 		eth_dev->data->tx_queues[i] = NULL;
1829 	}
1830 	eth_dev->data->nb_tx_queues = 0;
1831 
1832 	/* Free up RQ's and CQ's */
1833 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1834 		dev_ops->rx_queue_release(eth_dev, i);
1835 		eth_dev->data->rx_queues[i] = NULL;
1836 	}
1837 	eth_dev->data->nb_rx_queues = 0;
1838 
1839 	/* Free security resources */
1840 	nix_security_release(dev);
1841 
1842 	/* Free tm resources */
1843 	roc_nix_tm_fini(nix);
1844 
1845 	/* Unregister queue irqs */
1846 	roc_nix_unregister_queue_irqs(nix);
1847 
1848 	/* Unregister cq irqs */
1849 	if (eth_dev->data->dev_conf.intr_conf.rxq)
1850 		roc_nix_unregister_cq_irqs(nix);
1851 
1852 	/* Free ROC RQ's, SQ's and CQ's memory */
1853 	nix_free_queue_mem(dev);
1854 
1855 	/* Free nix lf resources */
1856 	rc = roc_nix_lf_free(nix);
1857 	if (rc)
1858 		plt_err("Failed to free nix lf, rc=%d", rc);
1859 
1860 	rte_free(eth_dev->data->mac_addrs);
1861 	eth_dev->data->mac_addrs = NULL;
1862 
1863 	rc = roc_nix_dev_fini(nix);
1864 	/* Can be freed later by PMD if NPA LF is in use */
1865 	if (rc == -EAGAIN) {
1866 		if (!reset)
1867 			eth_dev->data->dev_private = NULL;
1868 		return 0;
1869 	} else if (rc) {
1870 		plt_err("Failed in nix dev fini, rc=%d", rc);
1871 	}
1872 
1873 	return rc;
1874 }
1875 
1876 static int
cnxk_nix_dev_close(struct rte_eth_dev * eth_dev)1877 cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
1878 {
1879 	cnxk_eth_dev_uninit(eth_dev, false);
1880 	return 0;
1881 }
1882 
1883 static int
cnxk_nix_dev_reset(struct rte_eth_dev * eth_dev)1884 cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
1885 {
1886 	int rc;
1887 
1888 	rc = cnxk_eth_dev_uninit(eth_dev, true);
1889 	if (rc)
1890 		return rc;
1891 
1892 	return cnxk_eth_dev_init(eth_dev);
1893 }
1894 
1895 int
cnxk_nix_remove(struct rte_pci_device * pci_dev)1896 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1897 {
1898 	struct rte_eth_dev *eth_dev;
1899 	struct roc_nix *nix;
1900 	int rc = -EINVAL;
1901 
1902 	eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1903 	if (eth_dev) {
1904 		/* Cleanup eth dev */
1905 		rc = cnxk_eth_dev_uninit(eth_dev, false);
1906 		if (rc)
1907 			return rc;
1908 
1909 		rte_eth_dev_release_port(eth_dev);
1910 	}
1911 
1912 	/* Nothing to be done for secondary processes */
1913 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1914 		return 0;
1915 
1916 	/* Check if this device is hosting common resource */
1917 	nix = roc_idev_npa_nix_get();
1918 	if (!nix || nix->pci_dev != pci_dev)
1919 		return 0;
1920 
1921 	/* Try nix fini now */
1922 	rc = roc_nix_dev_fini(nix);
1923 	if (rc == -EAGAIN) {
1924 		plt_info("%s: common resource in use by other devices",
1925 			 pci_dev->name);
1926 		goto exit;
1927 	} else if (rc) {
1928 		plt_err("Failed in nix dev fini, rc=%d", rc);
1929 		goto exit;
1930 	}
1931 
1932 	/* Free device pointer as rte_ethdev does not have it anymore */
1933 	rte_free(nix);
1934 exit:
1935 	return rc;
1936 }
1937 
1938 int
cnxk_nix_probe(struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)1939 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1940 {
1941 	int rc;
1942 
1943 	RTE_SET_USED(pci_drv);
1944 
1945 	rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1946 					   cnxk_eth_dev_init);
1947 
1948 	/* On error on secondary, recheck if port exists in primary or
1949 	 * in mid of detach state.
1950 	 */
1951 	if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1952 		if (!rte_eth_dev_allocated(pci_dev->device.name))
1953 			return 0;
1954 	return rc;
1955 }
1956