xref: /dpdk/drivers/net/cnxk/cn9k_ethdev.c (revision 29fd052d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include "cn9k_ethdev.h"
5 #include "cn9k_flow.h"
6 #include "cn9k_rx.h"
7 #include "cn9k_tx.h"
8 
9 static uint16_t
10 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
11 {
12 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13 	struct rte_eth_dev_data *data = eth_dev->data;
14 	struct rte_eth_conf *conf = &data->dev_conf;
15 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
16 	uint16_t flags = 0;
17 
18 	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
19 	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
20 		flags |= NIX_RX_OFFLOAD_RSS_F;
21 
22 	if (dev->rx_offloads &
23 	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
24 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
25 
26 	if (dev->rx_offloads &
27 	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
28 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
29 
30 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
31 		flags |= NIX_RX_MULTI_SEG_F;
32 
33 	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
34 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
35 
36 	if (!dev->ptype_disable)
37 		flags |= NIX_RX_OFFLOAD_PTYPE_F;
38 
39 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
40 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
41 
42 	if (dev->rx_mark_update)
43 		flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
44 
45 	return flags;
46 }
47 
48 static uint16_t
49 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
50 {
51 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
52 	uint64_t conf = dev->tx_offloads;
53 	uint16_t flags = 0;
54 
55 	/* Fastpath is dependent on these enums */
56 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
57 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
58 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
59 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
60 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
61 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
62 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
63 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
64 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
65 	RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
66 	RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
67 	RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
68 	RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
69 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
70 			 offsetof(struct rte_mbuf, buf_iova) + 8);
71 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
72 			 offsetof(struct rte_mbuf, buf_iova) + 16);
73 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
74 			 offsetof(struct rte_mbuf, ol_flags) + 12);
75 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
76 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
77 
78 	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
79 	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
80 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
81 
82 	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
83 	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
84 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
85 
86 	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
87 	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
88 	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
89 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
90 
91 	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
92 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
93 
94 	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
95 		flags |= NIX_TX_MULTI_SEG_F;
96 
97 	/* Enable Inner checksum for TSO */
98 	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
99 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
100 
101 	/* Enable Inner and Outer checksum for Tunnel TSO */
102 	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
103 		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
104 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
105 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
106 
107 	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
108 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
109 
110 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
111 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
112 
113 	if (dev->tx_mark)
114 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
115 
116 	return flags;
117 }
118 
119 static int
120 cn9k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
121 {
122 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
123 
124 	if (ptype_mask) {
125 		dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F;
126 		dev->ptype_disable = 0;
127 	} else {
128 		dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F;
129 		dev->ptype_disable = 1;
130 	}
131 
132 	cn9k_eth_set_rx_function(eth_dev);
133 	return 0;
134 }
135 
136 static void
137 nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn9k_eth_txq *txq,
138 		      uint16_t qid)
139 {
140 	union nix_send_hdr_w0_u send_hdr_w0;
141 
142 	/* Initialize the fields based on basic single segment packet */
143 	send_hdr_w0.u = 0;
144 	if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
145 		/* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
146 		send_hdr_w0.sizem1 = 2;
147 		if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
148 			/* Default: one seg packet would have:
149 			 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
150 			 * => 8/2 - 1 = 3
151 			 */
152 			send_hdr_w0.sizem1 = 3;
153 
154 			/* To calculate the offset for send_mem,
155 			 * send_hdr->w0.sizem1 * 2
156 			 */
157 			txq->ts_mem = dev->tstamp.tx_tstamp_iova;
158 		}
159 	} else {
160 		/* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
161 		send_hdr_w0.sizem1 = 1;
162 	}
163 	send_hdr_w0.sq = qid;
164 	txq->send_hdr_w0 = send_hdr_w0.u;
165 	rte_wmb();
166 }
167 
168 static int
169 cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
170 			uint16_t nb_desc, unsigned int socket,
171 			const struct rte_eth_txconf *tx_conf)
172 {
173 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
174 	uint64_t mark_fmt, mark_flag;
175 	struct roc_cpt_lf *inl_lf;
176 	struct cn9k_eth_txq *txq;
177 	struct roc_nix_sq *sq;
178 	uint16_t crypto_qid;
179 	int rc;
180 
181 	RTE_SET_USED(socket);
182 
183 	/* Common Tx queue setup */
184 	rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
185 				     sizeof(struct cn9k_eth_txq), tx_conf);
186 	if (rc)
187 		return rc;
188 
189 	sq = &dev->sqs[qid];
190 	/* Update fast path queue */
191 	txq = eth_dev->data->tx_queues[qid];
192 	txq->fc_mem = sq->fc;
193 	txq->lmt_addr = sq->lmt_addr;
194 	txq->io_addr = sq->io_addr;
195 	txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
196 	txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
197 
198 	/* Fetch CPT LF info for outbound if present */
199 	if (dev->outb.lf_base) {
200 		crypto_qid = qid % dev->outb.nb_crypto_qs;
201 		inl_lf = dev->outb.lf_base + crypto_qid;
202 
203 		txq->cpt_io_addr = inl_lf->io_addr;
204 		txq->cpt_fc = inl_lf->fc_addr;
205 		txq->cpt_desc = inl_lf->nb_desc * 0.7;
206 		txq->sa_base = (uint64_t)dev->outb.sa_base;
207 		txq->sa_base |= eth_dev->data->port_id;
208 		PLT_STATIC_ASSERT(BIT_ULL(16) == ROC_NIX_INL_SA_BASE_ALIGN);
209 	}
210 
211 	mark_fmt = roc_nix_tm_mark_format_get(&dev->nix, &mark_flag);
212 	txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
213 	txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
214 
215 	nix_form_default_desc(dev, txq, qid);
216 	txq->lso_tun_fmt = dev->lso_tun_fmt;
217 	return 0;
218 }
219 
220 static int
221 cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
222 			uint16_t nb_desc, unsigned int socket,
223 			const struct rte_eth_rxconf *rx_conf,
224 			struct rte_mempool *mp)
225 {
226 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
227 	struct cn9k_eth_rxq *rxq;
228 	struct roc_nix_rq *rq;
229 	struct roc_nix_cq *cq;
230 	int rc;
231 
232 	RTE_SET_USED(socket);
233 
234 	/* CQ Errata needs min 4K ring */
235 	if (dev->cq_min_4k && nb_desc < 4096)
236 		nb_desc = 4096;
237 
238 	/* Common Rx queue setup */
239 	rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
240 				     sizeof(struct cn9k_eth_rxq), rx_conf, mp);
241 	if (rc)
242 		return rc;
243 
244 	rq = &dev->rqs[qid];
245 	cq = &dev->cqs[qid];
246 
247 	/* Update fast path queue */
248 	rxq = eth_dev->data->rx_queues[qid];
249 	rxq->rq = qid;
250 	rxq->desc = (uintptr_t)cq->desc_base;
251 	rxq->cq_door = cq->door;
252 	rxq->cq_status = cq->status;
253 	rxq->wdata = cq->wdata;
254 	rxq->head = cq->head;
255 	rxq->qmask = cq->qmask;
256 	rxq->tstamp = &dev->tstamp;
257 
258 	/* Data offset from data to start of mbuf is first_skip */
259 	rxq->data_off = rq->first_skip;
260 	rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
261 
262 	/* Lookup mem */
263 	rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
264 	return 0;
265 }
266 
267 static int
268 cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
269 {
270 	struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
271 	int rc;
272 
273 	rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
274 	if (rc)
275 		return rc;
276 
277 	/* Clear fc cache pkts to trigger worker stop */
278 	txq->fc_cache_pkts = 0;
279 	return 0;
280 }
281 
282 static int
283 cn9k_nix_configure(struct rte_eth_dev *eth_dev)
284 {
285 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
286 	struct rte_eth_conf *conf = &eth_dev->data->dev_conf;
287 	struct rte_eth_txmode *txmode = &conf->txmode;
288 	int rc;
289 
290 	/* Platform specific checks */
291 	if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
292 	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
293 	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
294 	     (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
295 		plt_err("Outer IP and SCTP checksum unsupported");
296 		return -EINVAL;
297 	}
298 
299 	/* Common nix configure */
300 	rc = cnxk_nix_configure(eth_dev);
301 	if (rc)
302 		return rc;
303 
304 	/* Update offload flags */
305 	dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
306 	dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
307 
308 	plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
309 		    " tx_offload_flags=0x%x",
310 		    eth_dev->data->port_id, dev->rx_offload_flags,
311 		    dev->tx_offload_flags);
312 	return 0;
313 }
314 
315 /* Function to enable ptp config for VFs */
316 static void
317 nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
318 {
319 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
320 
321 	if (nix_recalc_mtu(eth_dev))
322 		plt_err("Failed to set MTU size for ptp");
323 
324 	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
325 
326 	/* Setting up the function pointers as per new offload flags */
327 	cn9k_eth_set_rx_function(eth_dev);
328 	cn9k_eth_set_tx_function(eth_dev);
329 }
330 
331 static uint16_t
332 nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
333 {
334 	struct cn9k_eth_rxq *rxq = queue;
335 	struct cnxk_eth_rxq_sp *rxq_sp;
336 	struct rte_eth_dev *eth_dev;
337 
338 	RTE_SET_USED(mbufs);
339 	RTE_SET_USED(pkts);
340 
341 	rxq_sp = cnxk_eth_rxq_to_sp(rxq);
342 	eth_dev = rxq_sp->dev->eth_dev;
343 	nix_ptp_enable_vf(eth_dev);
344 
345 	return 0;
346 }
347 
348 static int
349 cn9k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
350 {
351 	struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix;
352 	struct rte_eth_dev *eth_dev;
353 	struct cn9k_eth_rxq *rxq;
354 	int i;
355 
356 	if (!dev)
357 		return -EINVAL;
358 
359 	eth_dev = dev->eth_dev;
360 	if (!eth_dev)
361 		return -EINVAL;
362 
363 	dev->ptp_en = ptp_en;
364 
365 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
366 		rxq = eth_dev->data->rx_queues[i];
367 		rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
368 	}
369 
370 	if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) &&
371 	    !(roc_nix_is_lbk(nix))) {
372 		/* In case of VF, setting of MTU cannot be done directly in this
373 		 * function as this is running as part of MBOX request(PF->VF)
374 		 * and MTU setting also requires MBOX message to be
375 		 * sent(VF->PF)
376 		 */
377 		eth_dev->rx_pkt_burst = nix_ptp_vf_burst;
378 		rte_mb();
379 	}
380 
381 	return 0;
382 }
383 
384 static int
385 cn9k_nix_timesync_enable(struct rte_eth_dev *eth_dev)
386 {
387 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
388 	int i, rc;
389 
390 	rc = cnxk_nix_timesync_enable(eth_dev);
391 	if (rc)
392 		return rc;
393 
394 	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
395 	dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
396 
397 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
398 		nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
399 
400 	/* Setting up the rx[tx]_offload_flags due to change
401 	 * in rx[tx]_offloads.
402 	 */
403 	cn9k_eth_set_rx_function(eth_dev);
404 	cn9k_eth_set_tx_function(eth_dev);
405 	return 0;
406 }
407 
408 static int
409 cn9k_nix_timesync_disable(struct rte_eth_dev *eth_dev)
410 {
411 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
412 	int i, rc;
413 
414 	rc = cnxk_nix_timesync_disable(eth_dev);
415 	if (rc)
416 		return rc;
417 
418 	dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
419 	dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
420 
421 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
422 		nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
423 
424 	/* Setting up the rx[tx]_offload_flags due to change
425 	 * in rx[tx]_offloads.
426 	 */
427 	cn9k_eth_set_rx_function(eth_dev);
428 	cn9k_eth_set_tx_function(eth_dev);
429 	return 0;
430 }
431 
432 static int
433 cn9k_nix_dev_start(struct rte_eth_dev *eth_dev)
434 {
435 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
436 	struct roc_nix *nix = &dev->nix;
437 	int rc;
438 
439 	/* Common eth dev start */
440 	rc = cnxk_nix_dev_start(eth_dev);
441 	if (rc)
442 		return rc;
443 
444 	/* Update VF about data off shifted by 8 bytes if PTP already
445 	 * enabled in PF owning this VF
446 	 */
447 	if (dev->ptp_en && (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))))
448 		nix_ptp_enable_vf(eth_dev);
449 
450 	/* Setting up the rx[tx]_offload_flags due to change
451 	 * in rx[tx]_offloads.
452 	 */
453 	dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
454 	dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
455 
456 	cn9k_eth_set_tx_function(eth_dev);
457 	cn9k_eth_set_rx_function(eth_dev);
458 	return 0;
459 }
460 
461 static int
462 cn9k_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev,
463 				    struct timespec *timestamp)
464 {
465 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
466 	struct cnxk_timesync_info *tstamp = &dev->tstamp;
467 	uint64_t ns;
468 
469 	if (*tstamp->tx_tstamp == 0)
470 		return -EINVAL;
471 
472 	ns = rte_timecounter_update(&dev->tx_tstamp_tc, *tstamp->tx_tstamp);
473 	*timestamp = rte_ns_to_timespec(ns);
474 	*tstamp->tx_tstamp = 0;
475 	rte_wmb();
476 
477 	return 0;
478 }
479 
480 static int
481 cn9k_nix_rx_metadata_negotiate(struct rte_eth_dev *eth_dev, uint64_t *features)
482 {
483 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
484 
485 	*features &=
486 		(RTE_ETH_RX_METADATA_USER_FLAG | RTE_ETH_RX_METADATA_USER_MARK);
487 
488 	if (*features) {
489 		dev->rx_offload_flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
490 		dev->rx_mark_update = true;
491 	} else {
492 		dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
493 		dev->rx_mark_update = false;
494 	}
495 
496 	cn9k_eth_set_rx_function(eth_dev);
497 
498 	return 0;
499 }
500 
501 static int
502 cn9k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
503 			  int mark_yellow, int mark_red,
504 			  struct rte_tm_error *error)
505 {
506 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
507 	struct roc_nix *roc_nix = &dev->nix;
508 	uint64_t mark_fmt, mark_flag;
509 	int rc, i;
510 
511 	rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
512 				       mark_red, error);
513 
514 	if (rc)
515 		goto exit;
516 
517 	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
518 	if (mark_flag) {
519 		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
520 		dev->tx_mark = true;
521 	} else {
522 		dev->tx_mark = false;
523 		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
524 		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
525 			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
526 	}
527 
528 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
529 		struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
530 
531 		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
532 		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
533 	}
534 	cn9k_eth_set_tx_function(eth_dev);
535 exit:
536 	return rc;
537 }
538 
539 static int
540 cn9k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
541 			int mark_yellow, int mark_red,
542 			struct rte_tm_error *error)
543 {
544 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
545 	struct roc_nix *roc_nix = &dev->nix;
546 	uint64_t mark_fmt, mark_flag;
547 	int rc, i;
548 
549 	rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
550 				     error);
551 	if (rc)
552 		goto exit;
553 
554 	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
555 	if (mark_flag) {
556 		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
557 		dev->tx_mark = true;
558 	} else {
559 		dev->tx_mark = false;
560 		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
561 		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
562 			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
563 	}
564 
565 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
566 		struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
567 
568 		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
569 		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
570 	}
571 	cn9k_eth_set_tx_function(eth_dev);
572 exit:
573 	return rc;
574 }
575 
576 static int
577 cn9k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
578 			 int mark_yellow, int mark_red,
579 			 struct rte_tm_error *error)
580 {
581 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
582 	struct roc_nix *roc_nix = &dev->nix;
583 	uint64_t mark_fmt, mark_flag;
584 	int rc, i;
585 
586 	rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
587 				      mark_red, error);
588 	if (rc)
589 		goto exit;
590 
591 	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
592 	if (mark_flag) {
593 		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
594 		dev->tx_mark = true;
595 	} else {
596 		dev->tx_mark = false;
597 		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
598 		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
599 			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
600 	}
601 
602 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
603 		struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
604 
605 		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
606 		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
607 	}
608 	cn9k_eth_set_tx_function(eth_dev);
609 exit:
610 	return rc;
611 }
612 
613 /* Update platform specific eth dev ops */
614 static void
615 nix_eth_dev_ops_override(void)
616 {
617 	static int init_once;
618 
619 	if (init_once)
620 		return;
621 	init_once = 1;
622 
623 	/* Update platform specific ops */
624 	cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
625 	cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
626 	cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
627 	cnxk_eth_dev_ops.tx_queue_stop = cn9k_nix_tx_queue_stop;
628 	cnxk_eth_dev_ops.dev_start = cn9k_nix_dev_start;
629 	cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set;
630 	cnxk_eth_dev_ops.timesync_enable = cn9k_nix_timesync_enable;
631 	cnxk_eth_dev_ops.timesync_disable = cn9k_nix_timesync_disable;
632 	cnxk_eth_dev_ops.mtr_ops_get = NULL;
633 	cnxk_eth_dev_ops.rx_metadata_negotiate = cn9k_nix_rx_metadata_negotiate;
634 	cnxk_eth_dev_ops.timesync_read_tx_timestamp =
635 		cn9k_nix_timesync_read_tx_timestamp;
636 }
637 
638 /* Update platform specific eth dev ops */
639 static void
640 nix_tm_ops_override(void)
641 {
642 	static int init_once;
643 
644 	if (init_once)
645 		return;
646 	init_once = 1;
647 
648 	/* Update platform specific ops */
649 	cnxk_tm_ops.mark_vlan_dei = cn9k_nix_tm_mark_vlan_dei;
650 	cnxk_tm_ops.mark_ip_ecn = cn9k_nix_tm_mark_ip_ecn;
651 	cnxk_tm_ops.mark_ip_dscp = cn9k_nix_tm_mark_ip_dscp;
652 }
653 
654 static void
655 npc_flow_ops_override(void)
656 {
657 	static int init_once;
658 
659 	if (init_once)
660 		return;
661 	init_once = 1;
662 
663 	/* Update platform specific ops */
664 	cnxk_flow_ops.create = cn9k_flow_create;
665 	cnxk_flow_ops.destroy = cn9k_flow_destroy;
666 }
667 
668 static int
669 cn9k_nix_remove(struct rte_pci_device *pci_dev)
670 {
671 	return cnxk_nix_remove(pci_dev);
672 }
673 
674 static int
675 cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
676 {
677 	struct rte_eth_dev *eth_dev;
678 	struct cnxk_eth_dev *dev;
679 	int rc;
680 
681 	if (RTE_CACHE_LINE_SIZE != 128) {
682 		plt_err("Driver not compiled for CN9K");
683 		return -EFAULT;
684 	}
685 
686 	rc = roc_plt_init();
687 	if (rc) {
688 		plt_err("Failed to initialize platform model, rc=%d", rc);
689 		return rc;
690 	}
691 
692 	nix_eth_dev_ops_override();
693 	nix_tm_ops_override();
694 	npc_flow_ops_override();
695 
696 	cn9k_eth_sec_ops_override();
697 
698 	/* Common probe */
699 	rc = cnxk_nix_probe(pci_drv, pci_dev);
700 	if (rc)
701 		return rc;
702 
703 	/* Find eth dev allocated */
704 	eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
705 	if (!eth_dev)
706 		return -ENOENT;
707 
708 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
709 		/* Setup callbacks for secondary process */
710 		cn9k_eth_set_tx_function(eth_dev);
711 		cn9k_eth_set_rx_function(eth_dev);
712 		return 0;
713 	}
714 
715 	dev = cnxk_eth_pmd_priv(eth_dev);
716 	/* Update capabilities already set for TSO.
717 	 * TSO not supported for earlier chip revisions
718 	 */
719 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
720 		dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
721 					  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
722 					  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
723 					  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
724 
725 	/* 50G and 100G to be supported for board version C0
726 	 * and above of CN9K.
727 	 */
728 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
729 		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
730 		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
731 	}
732 
733 	dev->hwcap = 0;
734 	dev->inb.no_inl_dev = 1;
735 
736 	/* Register up msg callbacks for PTP information */
737 	roc_nix_ptp_info_cb_register(&dev->nix, cn9k_nix_ptp_info_update_cb);
738 
739 	/* Update HW erratas */
740 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
741 		dev->cq_min_4k = 1;
742 	return 0;
743 }
744 
745 static const struct rte_pci_id cn9k_pci_nix_map[] = {
746 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_PF),
747 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_PF),
748 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_PF),
749 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_PF),
750 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_PF),
751 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_VF),
752 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_VF),
753 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_VF),
754 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_VF),
755 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_VF),
756 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_AF_VF),
757 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_AF_VF),
758 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_AF_VF),
759 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_AF_VF),
760 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_AF_VF),
761 	{
762 		.vendor_id = 0,
763 	},
764 };
765 
766 static struct rte_pci_driver cn9k_pci_nix = {
767 	.id_table = cn9k_pci_nix_map,
768 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
769 		     RTE_PCI_DRV_INTR_LSC,
770 	.probe = cn9k_nix_probe,
771 	.remove = cn9k_nix_remove,
772 };
773 
774 RTE_PMD_REGISTER_PCI(net_cn9k, cn9k_pci_nix);
775 RTE_PMD_REGISTER_PCI_TABLE(net_cn9k, cn9k_pci_nix_map);
776 RTE_PMD_REGISTER_KMOD_DEP(net_cn9k, "vfio-pci");
777