1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4 #include "cn10k_ethdev.h"
5 #include "cn10k_flow.h"
6 #include "cn10k_rx.h"
7 #include "cn10k_tx.h"
8
9 static uint16_t
nix_rx_offload_flags(struct rte_eth_dev * eth_dev)10 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
11 {
12 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13 struct rte_eth_dev_data *data = eth_dev->data;
14 struct rte_eth_conf *conf = &data->dev_conf;
15 struct rte_eth_rxmode *rxmode = &conf->rxmode;
16 uint16_t flags = 0;
17
18 if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
19 (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
20 flags |= NIX_RX_OFFLOAD_RSS_F;
21
22 if (dev->rx_offloads &
23 (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
24 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
25
26 if (dev->rx_offloads &
27 (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
28 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
29
30 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
31 flags |= NIX_RX_MULTI_SEG_F;
32
33 if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
34 flags |= NIX_RX_OFFLOAD_TSTAMP_F;
35
36 if (!dev->ptype_disable)
37 flags |= NIX_RX_OFFLOAD_PTYPE_F;
38
39 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
40 flags |= NIX_RX_OFFLOAD_SECURITY_F;
41
42 if (dev->rx_mark_update)
43 flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
44
45 return flags;
46 }
47
48 static uint16_t
nix_tx_offload_flags(struct rte_eth_dev * eth_dev)49 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
50 {
51 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
52 uint64_t conf = dev->tx_offloads;
53 uint16_t flags = 0;
54
55 /* Fastpath is dependent on these enums */
56 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
57 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
58 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
59 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
60 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
61 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
62 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
63 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
64 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
65 RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
66 RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
67 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
68 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
69 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
70 offsetof(struct rte_mbuf, buf_iova) + 8);
71 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
72 offsetof(struct rte_mbuf, buf_iova) + 16);
73 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
74 offsetof(struct rte_mbuf, ol_flags) + 12);
75 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
76 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
77
78 if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
79 conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
80 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
81
82 if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
83 conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
84 flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
85
86 if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
87 conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
88 conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
89 flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
90
91 if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
92 flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
93
94 if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
95 flags |= NIX_TX_MULTI_SEG_F;
96
97 /* Enable Inner checksum for TSO */
98 if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
99 flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
100
101 /* Enable Inner and Outer checksum for Tunnel TSO */
102 if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
103 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
104 flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
105 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
106
107 if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
108 flags |= NIX_TX_OFFLOAD_TSTAMP_F;
109
110 if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
111 flags |= NIX_TX_OFFLOAD_SECURITY_F;
112
113 if (dev->tx_mark)
114 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
115
116 return flags;
117 }
118
119 static int
cn10k_nix_ptypes_set(struct rte_eth_dev * eth_dev,uint32_t ptype_mask)120 cn10k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
121 {
122 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
123
124 if (ptype_mask) {
125 dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F;
126 dev->ptype_disable = 0;
127 } else {
128 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F;
129 dev->ptype_disable = 1;
130 }
131
132 cn10k_eth_set_rx_function(eth_dev);
133 return 0;
134 }
135
136 static void
nix_form_default_desc(struct cnxk_eth_dev * dev,struct cn10k_eth_txq * txq,uint16_t qid)137 nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn10k_eth_txq *txq,
138 uint16_t qid)
139 {
140 union nix_send_hdr_w0_u send_hdr_w0;
141
142 /* Initialize the fields based on basic single segment packet */
143 send_hdr_w0.u = 0;
144 if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
145 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
146 send_hdr_w0.sizem1 = 2;
147 if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
148 /* Default: one seg packet would have:
149 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
150 * => 8/2 - 1 = 3
151 */
152 send_hdr_w0.sizem1 = 3;
153
154 /* To calculate the offset for send_mem,
155 * send_hdr->w0.sizem1 * 2
156 */
157 txq->ts_mem = dev->tstamp.tx_tstamp_iova;
158 }
159 } else {
160 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
161 send_hdr_w0.sizem1 = 1;
162 }
163 send_hdr_w0.sq = qid;
164 txq->send_hdr_w0 = send_hdr_w0.u;
165 rte_wmb();
166 }
167
168 static int
cn10k_nix_tx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t qid,uint16_t nb_desc,unsigned int socket,const struct rte_eth_txconf * tx_conf)169 cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
170 uint16_t nb_desc, unsigned int socket,
171 const struct rte_eth_txconf *tx_conf)
172 {
173 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
174 struct roc_nix *nix = &dev->nix;
175 uint64_t mark_fmt, mark_flag;
176 struct roc_cpt_lf *inl_lf;
177 struct cn10k_eth_txq *txq;
178 struct roc_nix_sq *sq;
179 uint16_t crypto_qid;
180 int rc;
181
182 RTE_SET_USED(socket);
183
184 /* Common Tx queue setup */
185 rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
186 sizeof(struct cn10k_eth_txq), tx_conf);
187 if (rc)
188 return rc;
189
190 sq = &dev->sqs[qid];
191 /* Update fast path queue */
192 txq = eth_dev->data->tx_queues[qid];
193 txq->fc_mem = sq->fc;
194 /* Store lmt base in tx queue for easy access */
195 txq->lmt_base = nix->lmt_base;
196 txq->io_addr = sq->io_addr;
197 txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
198 txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
199
200 /* Fetch CPT LF info for outbound if present */
201 if (dev->outb.lf_base) {
202 crypto_qid = qid % dev->outb.nb_crypto_qs;
203 inl_lf = dev->outb.lf_base + crypto_qid;
204
205 txq->cpt_io_addr = inl_lf->io_addr;
206 txq->cpt_fc = inl_lf->fc_addr;
207 txq->cpt_fc_sw = (int32_t *)((uintptr_t)dev->outb.fc_sw_mem +
208 crypto_qid * RTE_CACHE_LINE_SIZE);
209
210 txq->cpt_desc = inl_lf->nb_desc * 0.7;
211 txq->sa_base = (uint64_t)dev->outb.sa_base;
212 txq->sa_base |= eth_dev->data->port_id;
213 PLT_STATIC_ASSERT(ROC_NIX_INL_SA_BASE_ALIGN == BIT_ULL(16));
214 }
215
216 /* Restore marking flag from roc */
217 mark_fmt = roc_nix_tm_mark_format_get(nix, &mark_flag);
218 txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
219 txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
220
221 nix_form_default_desc(dev, txq, qid);
222 txq->lso_tun_fmt = dev->lso_tun_fmt;
223 return 0;
224 }
225
226 static int
cn10k_nix_rx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t qid,uint16_t nb_desc,unsigned int socket,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)227 cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
228 uint16_t nb_desc, unsigned int socket,
229 const struct rte_eth_rxconf *rx_conf,
230 struct rte_mempool *mp)
231 {
232 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
233 struct cnxk_eth_rxq_sp *rxq_sp;
234 struct cn10k_eth_rxq *rxq;
235 struct roc_nix_rq *rq;
236 struct roc_nix_cq *cq;
237 int rc;
238
239 RTE_SET_USED(socket);
240
241 /* CQ Errata needs min 4K ring */
242 if (dev->cq_min_4k && nb_desc < 4096)
243 nb_desc = 4096;
244
245 /* Common Rx queue setup */
246 rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
247 sizeof(struct cn10k_eth_rxq), rx_conf, mp);
248 if (rc)
249 return rc;
250
251 /* Do initial mtu setup for RQ0 before device start */
252 if (!qid) {
253 rc = nix_recalc_mtu(eth_dev);
254 if (rc)
255 return rc;
256
257 /* Update offload flags */
258 dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
259 dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
260 }
261
262 rq = &dev->rqs[qid];
263 cq = &dev->cqs[qid];
264
265 /* Update fast path queue */
266 rxq = eth_dev->data->rx_queues[qid];
267 rxq->rq = qid;
268 rxq->desc = (uintptr_t)cq->desc_base;
269 rxq->cq_door = cq->door;
270 rxq->cq_status = cq->status;
271 rxq->wdata = cq->wdata;
272 rxq->head = cq->head;
273 rxq->qmask = cq->qmask;
274 rxq->tstamp = &dev->tstamp;
275
276 /* Data offset from data to start of mbuf is first_skip */
277 rxq->data_off = rq->first_skip;
278 rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
279
280 /* Setup security related info */
281 if (dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F) {
282 rxq->lmt_base = dev->nix.lmt_base;
283 rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix,
284 dev->inb.inl_dev);
285 }
286 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
287 rxq->aura_handle = rxq_sp->qconf.mp->pool_id;
288
289 /* Lookup mem */
290 rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
291 return 0;
292 }
293
294 static int
cn10k_nix_tx_queue_stop(struct rte_eth_dev * eth_dev,uint16_t qidx)295 cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
296 {
297 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
298 int rc;
299
300 rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
301 if (rc)
302 return rc;
303
304 /* Clear fc cache pkts to trigger worker stop */
305 txq->fc_cache_pkts = 0;
306 return 0;
307 }
308
309 static int
cn10k_nix_configure(struct rte_eth_dev * eth_dev)310 cn10k_nix_configure(struct rte_eth_dev *eth_dev)
311 {
312 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
313 int rc;
314
315 /* Common nix configure */
316 rc = cnxk_nix_configure(eth_dev);
317 if (rc)
318 return rc;
319
320 if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
321 dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
322 /* Register callback to handle security error work */
323 roc_nix_inl_cb_register(cn10k_eth_sec_sso_work_cb, NULL);
324 }
325
326 /* Update offload flags */
327 dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
328 dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
329
330 /* reset reassembly dynfield/flag offset */
331 dev->reass_dynfield_off = -1;
332 dev->reass_dynflag_bit = -1;
333
334 plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
335 " tx_offload_flags=0x%x",
336 eth_dev->data->port_id, dev->rx_offload_flags,
337 dev->tx_offload_flags);
338 return 0;
339 }
340
341 /* Function to enable ptp config for VFs */
342 static void
nix_ptp_enable_vf(struct rte_eth_dev * eth_dev)343 nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
344 {
345 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
346
347 if (nix_recalc_mtu(eth_dev))
348 plt_err("Failed to set MTU size for ptp");
349
350 dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
351
352 /* Setting up the function pointers as per new offload flags */
353 cn10k_eth_set_rx_function(eth_dev);
354 cn10k_eth_set_tx_function(eth_dev);
355 }
356
357 static uint16_t
nix_ptp_vf_burst(void * queue,struct rte_mbuf ** mbufs,uint16_t pkts)358 nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
359 {
360 struct cn10k_eth_rxq *rxq = queue;
361 struct cnxk_eth_rxq_sp *rxq_sp;
362 struct rte_eth_dev *eth_dev;
363
364 RTE_SET_USED(mbufs);
365 RTE_SET_USED(pkts);
366
367 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
368 eth_dev = rxq_sp->dev->eth_dev;
369 nix_ptp_enable_vf(eth_dev);
370
371 return 0;
372 }
373
374 static int
cn10k_nix_ptp_info_update_cb(struct roc_nix * nix,bool ptp_en)375 cn10k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
376 {
377 struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix;
378 struct rte_eth_dev *eth_dev;
379 struct cn10k_eth_rxq *rxq;
380 int i;
381
382 if (!dev)
383 return -EINVAL;
384
385 eth_dev = dev->eth_dev;
386 if (!eth_dev)
387 return -EINVAL;
388
389 dev->ptp_en = ptp_en;
390
391 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
392 rxq = eth_dev->data->rx_queues[i];
393 rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
394 }
395
396 if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) &&
397 !(roc_nix_is_lbk(nix))) {
398 /* In case of VF, setting of MTU cannot be done directly in this
399 * function as this is running as part of MBOX request(PF->VF)
400 * and MTU setting also requires MBOX message to be
401 * sent(VF->PF)
402 */
403 eth_dev->rx_pkt_burst = nix_ptp_vf_burst;
404 rte_mb();
405 }
406
407 return 0;
408 }
409
410 static int
cn10k_nix_timesync_enable(struct rte_eth_dev * eth_dev)411 cn10k_nix_timesync_enable(struct rte_eth_dev *eth_dev)
412 {
413 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
414 int i, rc;
415
416 rc = cnxk_nix_timesync_enable(eth_dev);
417 if (rc)
418 return rc;
419
420 dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
421 dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
422
423 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
424 nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
425
426 /* Setting up the rx[tx]_offload_flags due to change
427 * in rx[tx]_offloads.
428 */
429 cn10k_eth_set_rx_function(eth_dev);
430 cn10k_eth_set_tx_function(eth_dev);
431 return 0;
432 }
433
434 static int
cn10k_nix_timesync_disable(struct rte_eth_dev * eth_dev)435 cn10k_nix_timesync_disable(struct rte_eth_dev *eth_dev)
436 {
437 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
438 int i, rc;
439
440 rc = cnxk_nix_timesync_disable(eth_dev);
441 if (rc)
442 return rc;
443
444 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
445 dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
446
447 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
448 nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
449
450 /* Setting up the rx[tx]_offload_flags due to change
451 * in rx[tx]_offloads.
452 */
453 cn10k_eth_set_rx_function(eth_dev);
454 cn10k_eth_set_tx_function(eth_dev);
455 return 0;
456 }
457
458 static int
cn10k_nix_timesync_read_tx_timestamp(struct rte_eth_dev * eth_dev,struct timespec * timestamp)459 cn10k_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev,
460 struct timespec *timestamp)
461 {
462 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
463 struct cnxk_timesync_info *tstamp = &dev->tstamp;
464 uint64_t ns;
465
466 if (*tstamp->tx_tstamp == 0)
467 return -EINVAL;
468
469 *tstamp->tx_tstamp = ((*tstamp->tx_tstamp >> 32) * NSEC_PER_SEC) +
470 (*tstamp->tx_tstamp & 0xFFFFFFFFUL);
471 ns = rte_timecounter_update(&dev->tx_tstamp_tc, *tstamp->tx_tstamp);
472 *timestamp = rte_ns_to_timespec(ns);
473 *tstamp->tx_tstamp = 0;
474 rte_wmb();
475
476 return 0;
477 }
478
479 static int
cn10k_nix_dev_start(struct rte_eth_dev * eth_dev)480 cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
481 {
482 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
483 struct roc_nix *nix = &dev->nix;
484 int rc;
485
486 /* Common eth dev start */
487 rc = cnxk_nix_dev_start(eth_dev);
488 if (rc)
489 return rc;
490
491 /* Update VF about data off shifted by 8 bytes if PTP already
492 * enabled in PF owning this VF
493 */
494 if (dev->ptp_en && (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))))
495 nix_ptp_enable_vf(eth_dev);
496
497 /* Setting up the rx[tx]_offload_flags due to change
498 * in rx[tx]_offloads.
499 */
500 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
501 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
502
503 cn10k_eth_set_tx_function(eth_dev);
504 cn10k_eth_set_rx_function(eth_dev);
505 return 0;
506 }
507
508 static int
cn10k_nix_rx_metadata_negotiate(struct rte_eth_dev * eth_dev,uint64_t * features)509 cn10k_nix_rx_metadata_negotiate(struct rte_eth_dev *eth_dev, uint64_t *features)
510 {
511 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
512
513 *features &=
514 (RTE_ETH_RX_METADATA_USER_FLAG | RTE_ETH_RX_METADATA_USER_MARK);
515
516 if (*features) {
517 dev->rx_offload_flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
518 dev->rx_mark_update = true;
519 } else {
520 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
521 dev->rx_mark_update = false;
522 }
523
524 cn10k_eth_set_rx_function(eth_dev);
525
526 return 0;
527 }
528
529 static int
cn10k_nix_reassembly_capability_get(struct rte_eth_dev * eth_dev,struct rte_eth_ip_reassembly_params * reassembly_capa)530 cn10k_nix_reassembly_capability_get(struct rte_eth_dev *eth_dev,
531 struct rte_eth_ip_reassembly_params *reassembly_capa)
532 {
533 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
534 int rc = -ENOTSUP;
535 RTE_SET_USED(eth_dev);
536
537 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
538 reassembly_capa->timeout_ms = 60 * 1000;
539 reassembly_capa->max_frags = 4;
540 reassembly_capa->flags = RTE_ETH_DEV_REASSEMBLY_F_IPV4 |
541 RTE_ETH_DEV_REASSEMBLY_F_IPV6;
542 rc = 0;
543 }
544
545 return rc;
546 }
547
548 static int
cn10k_nix_reassembly_conf_get(struct rte_eth_dev * eth_dev,struct rte_eth_ip_reassembly_params * conf)549 cn10k_nix_reassembly_conf_get(struct rte_eth_dev *eth_dev,
550 struct rte_eth_ip_reassembly_params *conf)
551 {
552 RTE_SET_USED(eth_dev);
553 RTE_SET_USED(conf);
554 return -ENOTSUP;
555 }
556
557 static int
cn10k_nix_reassembly_conf_set(struct rte_eth_dev * eth_dev,const struct rte_eth_ip_reassembly_params * conf)558 cn10k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
559 const struct rte_eth_ip_reassembly_params *conf)
560 {
561 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
562 int rc = 0;
563
564 if (!conf->flags) {
565 /* Clear offload flags on disable */
566 dev->rx_offload_flags &= ~NIX_RX_REAS_F;
567 return 0;
568 }
569
570 rc = roc_nix_reassembly_configure(conf->timeout_ms,
571 conf->max_frags);
572 if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
573 dev->rx_offload_flags |= NIX_RX_REAS_F;
574
575 return rc;
576 }
577
578 static int
cn10k_nix_tm_mark_vlan_dei(struct rte_eth_dev * eth_dev,int mark_green,int mark_yellow,int mark_red,struct rte_tm_error * error)579 cn10k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
580 int mark_yellow, int mark_red,
581 struct rte_tm_error *error)
582 {
583 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
584 struct roc_nix *roc_nix = &dev->nix;
585 uint64_t mark_fmt, mark_flag;
586 int rc, i;
587
588 rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
589 mark_red, error);
590
591 if (rc)
592 goto exit;
593
594 mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
595 if (mark_flag) {
596 dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
597 dev->tx_mark = true;
598 } else {
599 dev->tx_mark = false;
600 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
601 dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
602 dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
603 }
604
605 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
606 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
607
608 txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
609 txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
610 }
611 cn10k_eth_set_tx_function(eth_dev);
612 exit:
613 return rc;
614 }
615
616 static int
cn10k_nix_tm_mark_ip_ecn(struct rte_eth_dev * eth_dev,int mark_green,int mark_yellow,int mark_red,struct rte_tm_error * error)617 cn10k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
618 int mark_yellow, int mark_red,
619 struct rte_tm_error *error)
620 {
621 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
622 struct roc_nix *roc_nix = &dev->nix;
623 uint64_t mark_fmt, mark_flag;
624 int rc, i;
625
626 rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
627 error);
628 if (rc)
629 goto exit;
630
631 mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
632 if (mark_flag) {
633 dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
634 dev->tx_mark = true;
635 } else {
636 dev->tx_mark = false;
637 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
638 dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
639 dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
640 }
641
642 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
643 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
644
645 txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
646 txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
647 }
648 cn10k_eth_set_tx_function(eth_dev);
649 exit:
650 return rc;
651 }
652
653 static int
cn10k_nix_tm_mark_ip_dscp(struct rte_eth_dev * eth_dev,int mark_green,int mark_yellow,int mark_red,struct rte_tm_error * error)654 cn10k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
655 int mark_yellow, int mark_red,
656 struct rte_tm_error *error)
657 {
658 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
659 struct roc_nix *roc_nix = &dev->nix;
660 uint64_t mark_fmt, mark_flag;
661 int rc, i;
662
663 rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
664 mark_red, error);
665 if (rc)
666 goto exit;
667
668 mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
669 if (mark_flag) {
670 dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
671 dev->tx_mark = true;
672 } else {
673 dev->tx_mark = false;
674 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
675 dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
676 dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
677 }
678
679 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
680 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
681
682 txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
683 txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
684 }
685 cn10k_eth_set_tx_function(eth_dev);
686 exit:
687 return rc;
688 }
689
690 /* Update platform specific eth dev ops */
691 static void
nix_eth_dev_ops_override(void)692 nix_eth_dev_ops_override(void)
693 {
694 static int init_once;
695
696 if (init_once)
697 return;
698 init_once = 1;
699
700 /* Update platform specific ops */
701 cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure;
702 cnxk_eth_dev_ops.tx_queue_setup = cn10k_nix_tx_queue_setup;
703 cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup;
704 cnxk_eth_dev_ops.tx_queue_stop = cn10k_nix_tx_queue_stop;
705 cnxk_eth_dev_ops.dev_start = cn10k_nix_dev_start;
706 cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set;
707 cnxk_eth_dev_ops.timesync_enable = cn10k_nix_timesync_enable;
708 cnxk_eth_dev_ops.timesync_disable = cn10k_nix_timesync_disable;
709 cnxk_eth_dev_ops.rx_metadata_negotiate =
710 cn10k_nix_rx_metadata_negotiate;
711 cnxk_eth_dev_ops.timesync_read_tx_timestamp =
712 cn10k_nix_timesync_read_tx_timestamp;
713 cnxk_eth_dev_ops.ip_reassembly_capability_get =
714 cn10k_nix_reassembly_capability_get;
715 cnxk_eth_dev_ops.ip_reassembly_conf_get = cn10k_nix_reassembly_conf_get;
716 cnxk_eth_dev_ops.ip_reassembly_conf_set = cn10k_nix_reassembly_conf_set;
717 }
718
719 /* Update platform specific tm ops */
720 static void
nix_tm_ops_override(void)721 nix_tm_ops_override(void)
722 {
723 static int init_once;
724
725 if (init_once)
726 return;
727 init_once = 1;
728
729 /* Update platform specific ops */
730 cnxk_tm_ops.mark_vlan_dei = cn10k_nix_tm_mark_vlan_dei;
731 cnxk_tm_ops.mark_ip_ecn = cn10k_nix_tm_mark_ip_ecn;
732 cnxk_tm_ops.mark_ip_dscp = cn10k_nix_tm_mark_ip_dscp;
733 }
734
735 static void
npc_flow_ops_override(void)736 npc_flow_ops_override(void)
737 {
738 static int init_once;
739
740 if (init_once)
741 return;
742 init_once = 1;
743
744 /* Update platform specific ops */
745 cnxk_flow_ops.create = cn10k_flow_create;
746 cnxk_flow_ops.destroy = cn10k_flow_destroy;
747 }
748
749 static int
cn10k_nix_remove(struct rte_pci_device * pci_dev)750 cn10k_nix_remove(struct rte_pci_device *pci_dev)
751 {
752 return cnxk_nix_remove(pci_dev);
753 }
754
755 static int
cn10k_nix_probe(struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)756 cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
757 {
758 struct rte_eth_dev *eth_dev;
759 struct cnxk_eth_dev *dev;
760 int rc;
761
762 if (RTE_CACHE_LINE_SIZE != 64) {
763 plt_err("Driver not compiled for CN10K");
764 return -EFAULT;
765 }
766
767 rc = roc_plt_init();
768 if (rc) {
769 plt_err("Failed to initialize platform model, rc=%d", rc);
770 return rc;
771 }
772
773 nix_eth_dev_ops_override();
774 nix_tm_ops_override();
775 npc_flow_ops_override();
776
777 cn10k_eth_sec_ops_override();
778
779 /* Common probe */
780 rc = cnxk_nix_probe(pci_drv, pci_dev);
781 if (rc)
782 return rc;
783
784 /* Find eth dev allocated */
785 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
786 if (!eth_dev) {
787 /* Ignore if ethdev is in mid of detach state in secondary */
788 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
789 return 0;
790 return -ENOENT;
791 }
792
793 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
794 /* Setup callbacks for secondary process */
795 cn10k_eth_set_tx_function(eth_dev);
796 cn10k_eth_set_rx_function(eth_dev);
797 return 0;
798 }
799
800 dev = cnxk_eth_pmd_priv(eth_dev);
801
802 /* DROP_RE is not supported with inline IPSec for CN10K A0 and
803 * when vector mode is enabled.
804 */
805 if (roc_errata_nix_has_no_drop_re() && !roc_env_is_asim()) {
806 dev->ipsecd_drop_re_dis = 1;
807 dev->vec_drop_re_dis = 1;
808 }
809
810 /* Register up msg callbacks for PTP information */
811 roc_nix_ptp_info_cb_register(&dev->nix, cn10k_nix_ptp_info_update_cb);
812
813 return 0;
814 }
815
816 static const struct rte_pci_id cn10k_pci_nix_map[] = {
817 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_PF),
818 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_PF),
819 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_PF),
820 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_VF),
821 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_VF),
822 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_VF),
823 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_AF_VF),
824 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_AF_VF),
825 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_AF_VF),
826 {
827 .vendor_id = 0,
828 },
829 };
830
831 static struct rte_pci_driver cn10k_pci_nix = {
832 .id_table = cn10k_pci_nix_map,
833 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
834 RTE_PCI_DRV_INTR_LSC,
835 .probe = cn10k_nix_probe,
836 .remove = cn10k_nix_remove,
837 };
838
839 RTE_PMD_REGISTER_PCI(net_cn10k, cn10k_pci_nix);
840 RTE_PMD_REGISTER_PCI_TABLE(net_cn10k, cn10k_pci_nix_map);
841 RTE_PMD_REGISTER_KMOD_DEP(net_cn10k, "vfio-pci");
842