xref: /dpdk/drivers/net/cnxk/cn10k_ethdev.c (revision 29fd052d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include "cn10k_ethdev.h"
5 #include "cn10k_flow.h"
6 #include "cn10k_rx.h"
7 #include "cn10k_tx.h"
8 
9 static uint16_t
10 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
11 {
12 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13 	struct rte_eth_dev_data *data = eth_dev->data;
14 	struct rte_eth_conf *conf = &data->dev_conf;
15 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
16 	uint16_t flags = 0;
17 
18 	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
19 	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
20 		flags |= NIX_RX_OFFLOAD_RSS_F;
21 
22 	if (dev->rx_offloads &
23 	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
24 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
25 
26 	if (dev->rx_offloads &
27 	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
28 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
29 
30 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
31 		flags |= NIX_RX_MULTI_SEG_F;
32 
33 	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
34 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
35 
36 	if (!dev->ptype_disable)
37 		flags |= NIX_RX_OFFLOAD_PTYPE_F;
38 
39 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
40 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
41 
42 	if (dev->rx_mark_update)
43 		flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
44 
45 	return flags;
46 }
47 
48 static uint16_t
49 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
50 {
51 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
52 	uint64_t conf = dev->tx_offloads;
53 	uint16_t flags = 0;
54 
55 	/* Fastpath is dependent on these enums */
56 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
57 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
58 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
59 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
60 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
61 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
62 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
63 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
64 	RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
65 	RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
66 	RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
67 	RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
68 	RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
69 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
70 			 offsetof(struct rte_mbuf, buf_iova) + 8);
71 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
72 			 offsetof(struct rte_mbuf, buf_iova) + 16);
73 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
74 			 offsetof(struct rte_mbuf, ol_flags) + 12);
75 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
76 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
77 
78 	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
79 	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
80 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
81 
82 	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
83 	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
84 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
85 
86 	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
87 	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
88 	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
89 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
90 
91 	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
92 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
93 
94 	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
95 		flags |= NIX_TX_MULTI_SEG_F;
96 
97 	/* Enable Inner checksum for TSO */
98 	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
99 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
100 
101 	/* Enable Inner and Outer checksum for Tunnel TSO */
102 	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
103 		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
104 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
105 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
106 
107 	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
108 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
109 
110 	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
111 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
112 
113 	if (dev->tx_mark)
114 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
115 
116 	return flags;
117 }
118 
119 static int
120 cn10k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
121 {
122 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
123 
124 	if (ptype_mask) {
125 		dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F;
126 		dev->ptype_disable = 0;
127 	} else {
128 		dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F;
129 		dev->ptype_disable = 1;
130 	}
131 
132 	cn10k_eth_set_rx_function(eth_dev);
133 	return 0;
134 }
135 
136 static void
137 nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn10k_eth_txq *txq,
138 		      uint16_t qid)
139 {
140 	union nix_send_hdr_w0_u send_hdr_w0;
141 
142 	/* Initialize the fields based on basic single segment packet */
143 	send_hdr_w0.u = 0;
144 	if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
145 		/* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
146 		send_hdr_w0.sizem1 = 2;
147 		if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
148 			/* Default: one seg packet would have:
149 			 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
150 			 * => 8/2 - 1 = 3
151 			 */
152 			send_hdr_w0.sizem1 = 3;
153 
154 			/* To calculate the offset for send_mem,
155 			 * send_hdr->w0.sizem1 * 2
156 			 */
157 			txq->ts_mem = dev->tstamp.tx_tstamp_iova;
158 		}
159 	} else {
160 		/* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
161 		send_hdr_w0.sizem1 = 1;
162 	}
163 	send_hdr_w0.sq = qid;
164 	txq->send_hdr_w0 = send_hdr_w0.u;
165 	rte_wmb();
166 }
167 
168 static int
169 cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
170 			 uint16_t nb_desc, unsigned int socket,
171 			 const struct rte_eth_txconf *tx_conf)
172 {
173 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
174 	struct roc_nix *nix = &dev->nix;
175 	uint64_t mark_fmt, mark_flag;
176 	struct roc_cpt_lf *inl_lf;
177 	struct cn10k_eth_txq *txq;
178 	struct roc_nix_sq *sq;
179 	uint16_t crypto_qid;
180 	int rc;
181 
182 	RTE_SET_USED(socket);
183 
184 	/* Common Tx queue setup */
185 	rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
186 				     sizeof(struct cn10k_eth_txq), tx_conf);
187 	if (rc)
188 		return rc;
189 
190 	sq = &dev->sqs[qid];
191 	/* Update fast path queue */
192 	txq = eth_dev->data->tx_queues[qid];
193 	txq->fc_mem = sq->fc;
194 	/* Store lmt base in tx queue for easy access */
195 	txq->lmt_base = nix->lmt_base;
196 	txq->io_addr = sq->io_addr;
197 	txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
198 	txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
199 
200 	/* Fetch CPT LF info for outbound if present */
201 	if (dev->outb.lf_base) {
202 		crypto_qid = qid % dev->outb.nb_crypto_qs;
203 		inl_lf = dev->outb.lf_base + crypto_qid;
204 
205 		txq->cpt_io_addr = inl_lf->io_addr;
206 		txq->cpt_fc = inl_lf->fc_addr;
207 		txq->cpt_desc = inl_lf->nb_desc * 0.7;
208 		txq->sa_base = (uint64_t)dev->outb.sa_base;
209 		txq->sa_base |= eth_dev->data->port_id;
210 		PLT_STATIC_ASSERT(ROC_NIX_INL_SA_BASE_ALIGN == BIT_ULL(16));
211 	}
212 
213 	/* Restore marking flag from roc */
214 	mark_fmt = roc_nix_tm_mark_format_get(nix, &mark_flag);
215 	txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
216 	txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
217 
218 	nix_form_default_desc(dev, txq, qid);
219 	txq->lso_tun_fmt = dev->lso_tun_fmt;
220 	return 0;
221 }
222 
223 static int
224 cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
225 			 uint16_t nb_desc, unsigned int socket,
226 			 const struct rte_eth_rxconf *rx_conf,
227 			 struct rte_mempool *mp)
228 {
229 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
230 	struct cnxk_eth_rxq_sp *rxq_sp;
231 	struct cn10k_eth_rxq *rxq;
232 	struct roc_nix_rq *rq;
233 	struct roc_nix_cq *cq;
234 	int rc;
235 
236 	RTE_SET_USED(socket);
237 
238 	/* CQ Errata needs min 4K ring */
239 	if (dev->cq_min_4k && nb_desc < 4096)
240 		nb_desc = 4096;
241 
242 	/* Common Rx queue setup */
243 	rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
244 				     sizeof(struct cn10k_eth_rxq), rx_conf, mp);
245 	if (rc)
246 		return rc;
247 
248 	rq = &dev->rqs[qid];
249 	cq = &dev->cqs[qid];
250 
251 	/* Update fast path queue */
252 	rxq = eth_dev->data->rx_queues[qid];
253 	rxq->rq = qid;
254 	rxq->desc = (uintptr_t)cq->desc_base;
255 	rxq->cq_door = cq->door;
256 	rxq->cq_status = cq->status;
257 	rxq->wdata = cq->wdata;
258 	rxq->head = cq->head;
259 	rxq->qmask = cq->qmask;
260 	rxq->tstamp = &dev->tstamp;
261 
262 	/* Data offset from data to start of mbuf is first_skip */
263 	rxq->data_off = rq->first_skip;
264 	rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
265 
266 	/* Setup security related info */
267 	if (dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F) {
268 		rxq->lmt_base = dev->nix.lmt_base;
269 		rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix,
270 							   dev->inb.inl_dev);
271 	}
272 	rxq_sp = cnxk_eth_rxq_to_sp(rxq);
273 	rxq->aura_handle = rxq_sp->qconf.mp->pool_id;
274 
275 	/* Lookup mem */
276 	rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
277 	return 0;
278 }
279 
280 static int
281 cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
282 {
283 	struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
284 	int rc;
285 
286 	rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
287 	if (rc)
288 		return rc;
289 
290 	/* Clear fc cache pkts to trigger worker stop */
291 	txq->fc_cache_pkts = 0;
292 	return 0;
293 }
294 
295 static int
296 cn10k_nix_configure(struct rte_eth_dev *eth_dev)
297 {
298 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
299 	int rc;
300 
301 	/* Common nix configure */
302 	rc = cnxk_nix_configure(eth_dev);
303 	if (rc)
304 		return rc;
305 
306 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
307 	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
308 		/* Register callback to handle security error work */
309 		roc_nix_inl_cb_register(cn10k_eth_sec_sso_work_cb, NULL);
310 	}
311 
312 	/* Update offload flags */
313 	dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
314 	dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
315 
316 	/* reset reassembly dynfield/flag offset */
317 	dev->reass_dynfield_off = -1;
318 	dev->reass_dynflag_bit = -1;
319 
320 	plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
321 		    " tx_offload_flags=0x%x",
322 		    eth_dev->data->port_id, dev->rx_offload_flags,
323 		    dev->tx_offload_flags);
324 	return 0;
325 }
326 
327 /* Function to enable ptp config for VFs */
328 static void
329 nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
330 {
331 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
332 
333 	if (nix_recalc_mtu(eth_dev))
334 		plt_err("Failed to set MTU size for ptp");
335 
336 	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
337 
338 	/* Setting up the function pointers as per new offload flags */
339 	cn10k_eth_set_rx_function(eth_dev);
340 	cn10k_eth_set_tx_function(eth_dev);
341 }
342 
343 static uint16_t
344 nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
345 {
346 	struct cn10k_eth_rxq *rxq = queue;
347 	struct cnxk_eth_rxq_sp *rxq_sp;
348 	struct rte_eth_dev *eth_dev;
349 
350 	RTE_SET_USED(mbufs);
351 	RTE_SET_USED(pkts);
352 
353 	rxq_sp = cnxk_eth_rxq_to_sp(rxq);
354 	eth_dev = rxq_sp->dev->eth_dev;
355 	nix_ptp_enable_vf(eth_dev);
356 
357 	return 0;
358 }
359 
360 static int
361 cn10k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
362 {
363 	struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix;
364 	struct rte_eth_dev *eth_dev;
365 	struct cn10k_eth_rxq *rxq;
366 	int i;
367 
368 	if (!dev)
369 		return -EINVAL;
370 
371 	eth_dev = dev->eth_dev;
372 	if (!eth_dev)
373 		return -EINVAL;
374 
375 	dev->ptp_en = ptp_en;
376 
377 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
378 		rxq = eth_dev->data->rx_queues[i];
379 		rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
380 	}
381 
382 	if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) &&
383 	    !(roc_nix_is_lbk(nix))) {
384 		/* In case of VF, setting of MTU cannot be done directly in this
385 		 * function as this is running as part of MBOX request(PF->VF)
386 		 * and MTU setting also requires MBOX message to be
387 		 * sent(VF->PF)
388 		 */
389 		eth_dev->rx_pkt_burst = nix_ptp_vf_burst;
390 		rte_mb();
391 	}
392 
393 	return 0;
394 }
395 
396 static int
397 cn10k_nix_timesync_enable(struct rte_eth_dev *eth_dev)
398 {
399 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
400 	int i, rc;
401 
402 	rc = cnxk_nix_timesync_enable(eth_dev);
403 	if (rc)
404 		return rc;
405 
406 	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
407 	dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
408 
409 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
410 		nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
411 
412 	/* Setting up the rx[tx]_offload_flags due to change
413 	 * in rx[tx]_offloads.
414 	 */
415 	cn10k_eth_set_rx_function(eth_dev);
416 	cn10k_eth_set_tx_function(eth_dev);
417 	return 0;
418 }
419 
420 static int
421 cn10k_nix_timesync_disable(struct rte_eth_dev *eth_dev)
422 {
423 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
424 	int i, rc;
425 
426 	rc = cnxk_nix_timesync_disable(eth_dev);
427 	if (rc)
428 		return rc;
429 
430 	dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
431 	dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
432 
433 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
434 		nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
435 
436 	/* Setting up the rx[tx]_offload_flags due to change
437 	 * in rx[tx]_offloads.
438 	 */
439 	cn10k_eth_set_rx_function(eth_dev);
440 	cn10k_eth_set_tx_function(eth_dev);
441 	return 0;
442 }
443 
444 static int
445 cn10k_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev,
446 				     struct timespec *timestamp)
447 {
448 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
449 	struct cnxk_timesync_info *tstamp = &dev->tstamp;
450 	uint64_t ns;
451 
452 	if (*tstamp->tx_tstamp == 0)
453 		return -EINVAL;
454 
455 	*tstamp->tx_tstamp = ((*tstamp->tx_tstamp >> 32) * NSEC_PER_SEC) +
456 		(*tstamp->tx_tstamp & 0xFFFFFFFFUL);
457 	ns = rte_timecounter_update(&dev->tx_tstamp_tc, *tstamp->tx_tstamp);
458 	*timestamp = rte_ns_to_timespec(ns);
459 	*tstamp->tx_tstamp = 0;
460 	rte_wmb();
461 
462 	return 0;
463 }
464 
465 static int
466 cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
467 {
468 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
469 	struct roc_nix *nix = &dev->nix;
470 	int rc;
471 
472 	/* Common eth dev start */
473 	rc = cnxk_nix_dev_start(eth_dev);
474 	if (rc)
475 		return rc;
476 
477 	/* Update VF about data off shifted by 8 bytes if PTP already
478 	 * enabled in PF owning this VF
479 	 */
480 	if (dev->ptp_en && (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))))
481 		nix_ptp_enable_vf(eth_dev);
482 
483 	/* Setting up the rx[tx]_offload_flags due to change
484 	 * in rx[tx]_offloads.
485 	 */
486 	dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
487 	dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
488 
489 	cn10k_eth_set_tx_function(eth_dev);
490 	cn10k_eth_set_rx_function(eth_dev);
491 	return 0;
492 }
493 
494 static int
495 cn10k_nix_rx_metadata_negotiate(struct rte_eth_dev *eth_dev, uint64_t *features)
496 {
497 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
498 
499 	*features &=
500 		(RTE_ETH_RX_METADATA_USER_FLAG | RTE_ETH_RX_METADATA_USER_MARK);
501 
502 	if (*features) {
503 		dev->rx_offload_flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
504 		dev->rx_mark_update = true;
505 	} else {
506 		dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
507 		dev->rx_mark_update = false;
508 	}
509 
510 	cn10k_eth_set_rx_function(eth_dev);
511 
512 	return 0;
513 }
514 
515 static int
516 cn10k_nix_reassembly_capability_get(struct rte_eth_dev *eth_dev,
517 		struct rte_eth_ip_reassembly_params *reassembly_capa)
518 {
519 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
520 	int rc = -ENOTSUP;
521 	RTE_SET_USED(eth_dev);
522 
523 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
524 		reassembly_capa->timeout_ms = 60 * 1000;
525 		reassembly_capa->max_frags = 4;
526 		reassembly_capa->flags = RTE_ETH_DEV_REASSEMBLY_F_IPV4 |
527 					 RTE_ETH_DEV_REASSEMBLY_F_IPV6;
528 		rc = 0;
529 	}
530 
531 	return rc;
532 }
533 
534 static int
535 cn10k_nix_reassembly_conf_get(struct rte_eth_dev *eth_dev,
536 		struct rte_eth_ip_reassembly_params *conf)
537 {
538 	RTE_SET_USED(eth_dev);
539 	RTE_SET_USED(conf);
540 	return -ENOTSUP;
541 }
542 
543 static int
544 cn10k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
545 		const struct rte_eth_ip_reassembly_params *conf)
546 {
547 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
548 	int rc = 0;
549 
550 	rc = roc_nix_reassembly_configure(conf->timeout_ms,
551 				conf->max_frags);
552 	if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
553 		dev->rx_offload_flags |= NIX_RX_REAS_F;
554 
555 	return rc;
556 }
557 
558 static int
559 cn10k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
560 			   int mark_yellow, int mark_red,
561 			   struct rte_tm_error *error)
562 {
563 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
564 	struct roc_nix *roc_nix = &dev->nix;
565 	uint64_t mark_fmt, mark_flag;
566 	int rc, i;
567 
568 	rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
569 				       mark_red, error);
570 
571 	if (rc)
572 		goto exit;
573 
574 	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
575 	if (mark_flag) {
576 		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
577 		dev->tx_mark = true;
578 	} else {
579 		dev->tx_mark = false;
580 		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
581 		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
582 			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
583 	}
584 
585 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
586 		struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
587 
588 		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
589 		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
590 	}
591 	cn10k_eth_set_tx_function(eth_dev);
592 exit:
593 	return rc;
594 }
595 
596 static int
597 cn10k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
598 			 int mark_yellow, int mark_red,
599 			 struct rte_tm_error *error)
600 {
601 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
602 	struct roc_nix *roc_nix = &dev->nix;
603 	uint64_t mark_fmt, mark_flag;
604 	int rc, i;
605 
606 	rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
607 				     error);
608 	if (rc)
609 		goto exit;
610 
611 	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
612 	if (mark_flag) {
613 		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
614 		dev->tx_mark = true;
615 	} else {
616 		dev->tx_mark = false;
617 		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
618 		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
619 			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
620 	}
621 
622 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
623 		struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
624 
625 		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
626 		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
627 	}
628 	cn10k_eth_set_tx_function(eth_dev);
629 exit:
630 	return rc;
631 }
632 
633 static int
634 cn10k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
635 			  int mark_yellow, int mark_red,
636 			  struct rte_tm_error *error)
637 {
638 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
639 	struct roc_nix *roc_nix = &dev->nix;
640 	uint64_t mark_fmt, mark_flag;
641 	int rc, i;
642 
643 	rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
644 				      mark_red, error);
645 	if (rc)
646 		goto exit;
647 
648 	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
649 	if (mark_flag) {
650 		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
651 		dev->tx_mark = true;
652 	} else {
653 		dev->tx_mark = false;
654 		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
655 		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
656 			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
657 	}
658 
659 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
660 		struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
661 
662 		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
663 		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
664 	}
665 	cn10k_eth_set_tx_function(eth_dev);
666 exit:
667 	return rc;
668 }
669 
670 /* Update platform specific eth dev ops */
671 static void
672 nix_eth_dev_ops_override(void)
673 {
674 	static int init_once;
675 
676 	if (init_once)
677 		return;
678 	init_once = 1;
679 
680 	/* Update platform specific ops */
681 	cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure;
682 	cnxk_eth_dev_ops.tx_queue_setup = cn10k_nix_tx_queue_setup;
683 	cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup;
684 	cnxk_eth_dev_ops.tx_queue_stop = cn10k_nix_tx_queue_stop;
685 	cnxk_eth_dev_ops.dev_start = cn10k_nix_dev_start;
686 	cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set;
687 	cnxk_eth_dev_ops.timesync_enable = cn10k_nix_timesync_enable;
688 	cnxk_eth_dev_ops.timesync_disable = cn10k_nix_timesync_disable;
689 	cnxk_eth_dev_ops.rx_metadata_negotiate =
690 		cn10k_nix_rx_metadata_negotiate;
691 	cnxk_eth_dev_ops.timesync_read_tx_timestamp =
692 		cn10k_nix_timesync_read_tx_timestamp;
693 	cnxk_eth_dev_ops.ip_reassembly_capability_get =
694 			cn10k_nix_reassembly_capability_get;
695 	cnxk_eth_dev_ops.ip_reassembly_conf_get = cn10k_nix_reassembly_conf_get;
696 	cnxk_eth_dev_ops.ip_reassembly_conf_set = cn10k_nix_reassembly_conf_set;
697 }
698 
699 /* Update platform specific tm ops */
700 static void
701 nix_tm_ops_override(void)
702 {
703 	static int init_once;
704 
705 	if (init_once)
706 		return;
707 	init_once = 1;
708 
709 	/* Update platform specific ops */
710 	cnxk_tm_ops.mark_vlan_dei = cn10k_nix_tm_mark_vlan_dei;
711 	cnxk_tm_ops.mark_ip_ecn = cn10k_nix_tm_mark_ip_ecn;
712 	cnxk_tm_ops.mark_ip_dscp = cn10k_nix_tm_mark_ip_dscp;
713 }
714 
715 static void
716 npc_flow_ops_override(void)
717 {
718 	static int init_once;
719 
720 	if (init_once)
721 		return;
722 	init_once = 1;
723 
724 	/* Update platform specific ops */
725 	cnxk_flow_ops.create = cn10k_flow_create;
726 	cnxk_flow_ops.destroy = cn10k_flow_destroy;
727 }
728 
729 static int
730 cn10k_nix_remove(struct rte_pci_device *pci_dev)
731 {
732 	return cnxk_nix_remove(pci_dev);
733 }
734 
735 static int
736 cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
737 {
738 	struct rte_eth_dev *eth_dev;
739 	struct cnxk_eth_dev *dev;
740 	int rc;
741 
742 	if (RTE_CACHE_LINE_SIZE != 64) {
743 		plt_err("Driver not compiled for CN10K");
744 		return -EFAULT;
745 	}
746 
747 	rc = roc_plt_init();
748 	if (rc) {
749 		plt_err("Failed to initialize platform model, rc=%d", rc);
750 		return rc;
751 	}
752 
753 	nix_eth_dev_ops_override();
754 	nix_tm_ops_override();
755 	npc_flow_ops_override();
756 
757 	cn10k_eth_sec_ops_override();
758 
759 	/* Common probe */
760 	rc = cnxk_nix_probe(pci_drv, pci_dev);
761 	if (rc)
762 		return rc;
763 
764 	/* Find eth dev allocated */
765 	eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
766 	if (!eth_dev)
767 		return -ENOENT;
768 
769 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
770 		/* Setup callbacks for secondary process */
771 		cn10k_eth_set_tx_function(eth_dev);
772 		cn10k_eth_set_rx_function(eth_dev);
773 		return 0;
774 	}
775 
776 	dev = cnxk_eth_pmd_priv(eth_dev);
777 
778 	/* DROP_RE is not supported with inline IPSec for CN10K A0 and
779 	 * when vector mode is enabled.
780 	 */
781 	if ((roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
782 	     roc_model_is_cnf10kb_a0()) &&
783 	    !roc_env_is_asim()) {
784 		dev->ipsecd_drop_re_dis = 1;
785 		dev->vec_drop_re_dis = 1;
786 	}
787 
788 	/* Register up msg callbacks for PTP information */
789 	roc_nix_ptp_info_cb_register(&dev->nix, cn10k_nix_ptp_info_update_cb);
790 
791 	return 0;
792 }
793 
794 static const struct rte_pci_id cn10k_pci_nix_map[] = {
795 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_PF),
796 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_PF),
797 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_PF),
798 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_VF),
799 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_VF),
800 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_VF),
801 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_AF_VF),
802 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_AF_VF),
803 	CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_AF_VF),
804 	{
805 		.vendor_id = 0,
806 	},
807 };
808 
809 static struct rte_pci_driver cn10k_pci_nix = {
810 	.id_table = cn10k_pci_nix_map,
811 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
812 		     RTE_PCI_DRV_INTR_LSC,
813 	.probe = cn10k_nix_probe,
814 	.remove = cn10k_nix_remove,
815 };
816 
817 RTE_PMD_REGISTER_PCI(net_cn10k, cn10k_pci_nix);
818 RTE_PMD_REGISTER_PCI_TABLE(net_cn10k, cn10k_pci_nix_map);
819 RTE_PMD_REGISTER_KMOD_DEP(net_cn10k, "vfio-pci");
820