1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 */
4
5 #include <rte_ethdev.h>
6 #include <rte_mbuf_pool_ops.h>
7
8 #include "otx2_ethdev.h"
9
10 int
otx2_nix_mtu_set(struct rte_eth_dev * eth_dev,uint16_t mtu)11 otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
12 {
13 uint32_t buffsz, frame_size = mtu + NIX_L2_OVERHEAD;
14 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
15 struct rte_eth_dev_data *data = eth_dev->data;
16 struct otx2_mbox *mbox = dev->mbox;
17 struct nix_frs_cfg *req;
18 int rc;
19
20 frame_size += NIX_TIMESYNC_RX_OFFSET * otx2_ethdev_is_ptp_en(dev);
21
22 /* Check if MTU is within the allowed range */
23 if (frame_size < NIX_MIN_FRS || frame_size > NIX_MAX_FRS)
24 return -EINVAL;
25
26 buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
27
28 /* Refuse MTU that requires the support of scattered packets
29 * when this feature has not been enabled before.
30 */
31 if (data->dev_started && frame_size > buffsz &&
32 !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER))
33 return -EINVAL;
34
35 /* Check <seg size> * <max_seg> >= max_frame */
36 if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
37 (frame_size > buffsz * NIX_RX_NB_SEG_MAX))
38 return -EINVAL;
39
40 req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
41 req->update_smq = true;
42 if (otx2_dev_is_sdp(dev))
43 req->sdp_link = true;
44 /* FRS HW config should exclude FCS but include NPC VTAG insert size */
45 req->maxlen = frame_size - RTE_ETHER_CRC_LEN + NIX_MAX_VTAG_ACT_SIZE;
46
47 rc = otx2_mbox_process(mbox);
48 if (rc)
49 return rc;
50
51 /* Now just update Rx MAXLEN */
52 req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
53 req->maxlen = frame_size - RTE_ETHER_CRC_LEN;
54 if (otx2_dev_is_sdp(dev))
55 req->sdp_link = true;
56
57 rc = otx2_mbox_process(mbox);
58 if (rc)
59 return rc;
60
61 if (frame_size > RTE_ETHER_MAX_LEN)
62 dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
63 else
64 dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
65
66 /* Update max_rx_pkt_len */
67 data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
68
69 return rc;
70 }
71
72 int
otx2_nix_recalc_mtu(struct rte_eth_dev * eth_dev)73 otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev)
74 {
75 struct rte_eth_dev_data *data = eth_dev->data;
76 struct otx2_eth_rxq *rxq;
77 uint16_t mtu;
78 int rc;
79
80 rxq = data->rx_queues[0];
81
82 /* Setup scatter mode if needed by jumbo */
83 otx2_nix_enable_mseg_on_jumbo(rxq);
84
85 /* Setup MTU based on max_rx_pkt_len */
86 mtu = data->dev_conf.rxmode.max_rx_pkt_len - NIX_L2_OVERHEAD;
87
88 rc = otx2_nix_mtu_set(eth_dev, mtu);
89 if (rc)
90 otx2_err("Failed to set default MTU size %d", rc);
91
92 return rc;
93 }
94
95 static void
nix_cgx_promisc_config(struct rte_eth_dev * eth_dev,int en)96 nix_cgx_promisc_config(struct rte_eth_dev *eth_dev, int en)
97 {
98 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
99 struct otx2_mbox *mbox = dev->mbox;
100
101 if (otx2_dev_is_vf_or_sdp(dev))
102 return;
103
104 if (en)
105 otx2_mbox_alloc_msg_cgx_promisc_enable(mbox);
106 else
107 otx2_mbox_alloc_msg_cgx_promisc_disable(mbox);
108
109 otx2_mbox_process(mbox);
110 }
111
112 void
otx2_nix_promisc_config(struct rte_eth_dev * eth_dev,int en)113 otx2_nix_promisc_config(struct rte_eth_dev *eth_dev, int en)
114 {
115 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
116 struct otx2_mbox *mbox = dev->mbox;
117 struct nix_rx_mode *req;
118
119 if (otx2_dev_is_vf(dev))
120 return;
121
122 req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox);
123
124 if (en)
125 req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_PROMISC;
126
127 otx2_mbox_process(mbox);
128 eth_dev->data->promiscuous = en;
129 otx2_nix_vlan_update_promisc(eth_dev, en);
130 }
131
132 int
otx2_nix_promisc_enable(struct rte_eth_dev * eth_dev)133 otx2_nix_promisc_enable(struct rte_eth_dev *eth_dev)
134 {
135 otx2_nix_promisc_config(eth_dev, 1);
136 nix_cgx_promisc_config(eth_dev, 1);
137
138 return 0;
139 }
140
141 int
otx2_nix_promisc_disable(struct rte_eth_dev * eth_dev)142 otx2_nix_promisc_disable(struct rte_eth_dev *eth_dev)
143 {
144 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
145 otx2_nix_promisc_config(eth_dev, dev->dmac_filter_enable);
146 nix_cgx_promisc_config(eth_dev, 0);
147 dev->dmac_filter_enable = false;
148
149 return 0;
150 }
151
152 static void
nix_allmulticast_config(struct rte_eth_dev * eth_dev,int en)153 nix_allmulticast_config(struct rte_eth_dev *eth_dev, int en)
154 {
155 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
156 struct otx2_mbox *mbox = dev->mbox;
157 struct nix_rx_mode *req;
158
159 if (otx2_dev_is_vf(dev))
160 return;
161
162 req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox);
163
164 if (en)
165 req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_ALLMULTI;
166 else if (eth_dev->data->promiscuous)
167 req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_PROMISC;
168
169 otx2_mbox_process(mbox);
170 }
171
172 int
otx2_nix_allmulticast_enable(struct rte_eth_dev * eth_dev)173 otx2_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
174 {
175 nix_allmulticast_config(eth_dev, 1);
176
177 return 0;
178 }
179
180 int
otx2_nix_allmulticast_disable(struct rte_eth_dev * eth_dev)181 otx2_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
182 {
183 nix_allmulticast_config(eth_dev, 0);
184
185 return 0;
186 }
187
188 void
otx2_nix_rxq_info_get(struct rte_eth_dev * eth_dev,uint16_t queue_id,struct rte_eth_rxq_info * qinfo)189 otx2_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
190 struct rte_eth_rxq_info *qinfo)
191 {
192 struct otx2_eth_rxq *rxq;
193
194 rxq = eth_dev->data->rx_queues[queue_id];
195
196 qinfo->mp = rxq->pool;
197 qinfo->scattered_rx = eth_dev->data->scattered_rx;
198 qinfo->nb_desc = rxq->qconf.nb_desc;
199
200 qinfo->conf.rx_free_thresh = 0;
201 qinfo->conf.rx_drop_en = 0;
202 qinfo->conf.rx_deferred_start = 0;
203 qinfo->conf.offloads = rxq->offloads;
204 }
205
206 void
otx2_nix_txq_info_get(struct rte_eth_dev * eth_dev,uint16_t queue_id,struct rte_eth_txq_info * qinfo)207 otx2_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
208 struct rte_eth_txq_info *qinfo)
209 {
210 struct otx2_eth_txq *txq;
211
212 txq = eth_dev->data->tx_queues[queue_id];
213
214 qinfo->nb_desc = txq->qconf.nb_desc;
215
216 qinfo->conf.tx_thresh.pthresh = 0;
217 qinfo->conf.tx_thresh.hthresh = 0;
218 qinfo->conf.tx_thresh.wthresh = 0;
219
220 qinfo->conf.tx_free_thresh = 0;
221 qinfo->conf.tx_rs_thresh = 0;
222 qinfo->conf.offloads = txq->offloads;
223 qinfo->conf.tx_deferred_start = 0;
224 }
225
226 int
otx2_rx_burst_mode_get(struct rte_eth_dev * eth_dev,__rte_unused uint16_t queue_id,struct rte_eth_burst_mode * mode)227 otx2_rx_burst_mode_get(struct rte_eth_dev *eth_dev,
228 __rte_unused uint16_t queue_id,
229 struct rte_eth_burst_mode *mode)
230 {
231 ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
232 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
233 const struct burst_info {
234 uint16_t flags;
235 const char *output;
236 } rx_offload_map[] = {
237 {NIX_RX_OFFLOAD_RSS_F, "RSS,"},
238 {NIX_RX_OFFLOAD_PTYPE_F, " Ptype,"},
239 {NIX_RX_OFFLOAD_CHECKSUM_F, " Checksum,"},
240 {NIX_RX_OFFLOAD_VLAN_STRIP_F, " VLAN Strip,"},
241 {NIX_RX_OFFLOAD_MARK_UPDATE_F, " Mark Update,"},
242 {NIX_RX_OFFLOAD_TSTAMP_F, " Timestamp,"},
243 {NIX_RX_MULTI_SEG_F, " Scattered,"}
244 };
245 static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
246 "Scalar, Rx Offloads:"
247 };
248 uint32_t i;
249
250 /* Update burst mode info */
251 rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
252 str_size - bytes);
253 if (rc < 0)
254 goto done;
255
256 bytes += rc;
257
258 /* Update Rx offload info */
259 for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
260 if (dev->rx_offload_flags & rx_offload_map[i].flags) {
261 rc = rte_strscpy(mode->info + bytes,
262 rx_offload_map[i].output,
263 str_size - bytes);
264 if (rc < 0)
265 goto done;
266
267 bytes += rc;
268 }
269 }
270
271 done:
272 return 0;
273 }
274
275 int
otx2_tx_burst_mode_get(struct rte_eth_dev * eth_dev,__rte_unused uint16_t queue_id,struct rte_eth_burst_mode * mode)276 otx2_tx_burst_mode_get(struct rte_eth_dev *eth_dev,
277 __rte_unused uint16_t queue_id,
278 struct rte_eth_burst_mode *mode)
279 {
280 ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
281 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
282 const struct burst_info {
283 uint16_t flags;
284 const char *output;
285 } tx_offload_map[] = {
286 {NIX_TX_OFFLOAD_L3_L4_CSUM_F, " Inner L3/L4 csum,"},
287 {NIX_TX_OFFLOAD_OL3_OL4_CSUM_F, " Outer L3/L4 csum,"},
288 {NIX_TX_OFFLOAD_VLAN_QINQ_F, " VLAN Insertion,"},
289 {NIX_TX_OFFLOAD_MBUF_NOFF_F, " MBUF free disable,"},
290 {NIX_TX_OFFLOAD_TSTAMP_F, " Timestamp,"},
291 {NIX_TX_OFFLOAD_TSO_F, " TSO,"},
292 {NIX_TX_MULTI_SEG_F, " Scattered,"}
293 };
294 static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
295 "Scalar, Tx Offloads:"
296 };
297 uint32_t i;
298
299 /* Update burst mode info */
300 rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
301 str_size - bytes);
302 if (rc < 0)
303 goto done;
304
305 bytes += rc;
306
307 /* Update Tx offload info */
308 for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
309 if (dev->tx_offload_flags & tx_offload_map[i].flags) {
310 rc = rte_strscpy(mode->info + bytes,
311 tx_offload_map[i].output,
312 str_size - bytes);
313 if (rc < 0)
314 goto done;
315
316 bytes += rc;
317 }
318 }
319
320 done:
321 return 0;
322 }
323
324 static void
nix_rx_head_tail_get(struct otx2_eth_dev * dev,uint32_t * head,uint32_t * tail,uint16_t queue_idx)325 nix_rx_head_tail_get(struct otx2_eth_dev *dev,
326 uint32_t *head, uint32_t *tail, uint16_t queue_idx)
327 {
328 uint64_t reg, val;
329
330 if (head == NULL || tail == NULL)
331 return;
332
333 reg = (((uint64_t)queue_idx) << 32);
334 val = otx2_atomic64_add_nosync(reg, (int64_t *)
335 (dev->base + NIX_LF_CQ_OP_STATUS));
336 if (val & (OP_ERR | CQ_ERR))
337 val = 0;
338
339 *tail = (uint32_t)(val & 0xFFFFF);
340 *head = (uint32_t)((val >> 20) & 0xFFFFF);
341 }
342
343 uint32_t
otx2_nix_rx_queue_count(struct rte_eth_dev * eth_dev,uint16_t queue_idx)344 otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t queue_idx)
345 {
346 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[queue_idx];
347 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
348 uint32_t head, tail;
349
350 nix_rx_head_tail_get(dev, &head, &tail, queue_idx);
351 return (tail - head) % rxq->qlen;
352 }
353
354 static inline int
nix_offset_has_packet(uint32_t head,uint32_t tail,uint16_t offset)355 nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset)
356 {
357 /* Check given offset(queue index) has packet filled by HW */
358 if (tail > head && offset <= tail && offset >= head)
359 return 1;
360 /* Wrap around case */
361 if (head > tail && (offset >= head || offset <= tail))
362 return 1;
363
364 return 0;
365 }
366
367 int
otx2_nix_rx_descriptor_done(void * rx_queue,uint16_t offset)368 otx2_nix_rx_descriptor_done(void *rx_queue, uint16_t offset)
369 {
370 struct otx2_eth_rxq *rxq = rx_queue;
371 uint32_t head, tail;
372
373 nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
374 &head, &tail, rxq->rq);
375
376 return nix_offset_has_packet(head, tail, offset);
377 }
378
379 int
otx2_nix_rx_descriptor_status(void * rx_queue,uint16_t offset)380 otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset)
381 {
382 struct otx2_eth_rxq *rxq = rx_queue;
383 uint32_t head, tail;
384
385 if (rxq->qlen <= offset)
386 return -EINVAL;
387
388 nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
389 &head, &tail, rxq->rq);
390
391 if (nix_offset_has_packet(head, tail, offset))
392 return RTE_ETH_RX_DESC_DONE;
393 else
394 return RTE_ETH_RX_DESC_AVAIL;
395 }
396
397 static void
nix_tx_head_tail_get(struct otx2_eth_dev * dev,uint32_t * head,uint32_t * tail,uint16_t queue_idx)398 nix_tx_head_tail_get(struct otx2_eth_dev *dev,
399 uint32_t *head, uint32_t *tail, uint16_t queue_idx)
400 {
401 uint64_t reg, val;
402
403 if (head == NULL || tail == NULL)
404 return;
405
406 reg = (((uint64_t)queue_idx) << 32);
407 val = otx2_atomic64_add_nosync(reg, (int64_t *)
408 (dev->base + NIX_LF_SQ_OP_STATUS));
409 if (val & OP_ERR)
410 val = 0;
411
412 *tail = (uint32_t)((val >> 28) & 0x3F);
413 *head = (uint32_t)((val >> 20) & 0x3F);
414 }
415
416 int
otx2_nix_tx_descriptor_status(void * tx_queue,uint16_t offset)417 otx2_nix_tx_descriptor_status(void *tx_queue, uint16_t offset)
418 {
419 struct otx2_eth_txq *txq = tx_queue;
420 uint32_t head, tail;
421
422 if (txq->qconf.nb_desc <= offset)
423 return -EINVAL;
424
425 nix_tx_head_tail_get(txq->dev, &head, &tail, txq->sq);
426
427 if (nix_offset_has_packet(head, tail, offset))
428 return RTE_ETH_TX_DESC_DONE;
429 else
430 return RTE_ETH_TX_DESC_FULL;
431 }
432
433 /* It is a NOP for octeontx2 as HW frees the buffer on xmit */
434 int
otx2_nix_tx_done_cleanup(void * txq,uint32_t free_cnt)435 otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
436 {
437 RTE_SET_USED(txq);
438 RTE_SET_USED(free_cnt);
439
440 return 0;
441 }
442
443 int
otx2_nix_fw_version_get(struct rte_eth_dev * eth_dev,char * fw_version,size_t fw_size)444 otx2_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
445 size_t fw_size)
446 {
447 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
448 int rc = (int)fw_size;
449
450 if (fw_size > sizeof(dev->mkex_pfl_name))
451 rc = sizeof(dev->mkex_pfl_name);
452
453 rc = strlcpy(fw_version, (char *)dev->mkex_pfl_name, rc);
454
455 rc += 1; /* Add the size of '\0' */
456 if (fw_size < (uint32_t)rc)
457 return rc;
458
459 return 0;
460 }
461
462 int
otx2_nix_pool_ops_supported(struct rte_eth_dev * eth_dev,const char * pool)463 otx2_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
464 {
465 RTE_SET_USED(eth_dev);
466
467 if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
468 return 0;
469
470 return -ENOTSUP;
471 }
472
473 int
otx2_nix_dev_filter_ctrl(struct rte_eth_dev * eth_dev,enum rte_filter_type filter_type,enum rte_filter_op filter_op,void * arg)474 otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
475 enum rte_filter_type filter_type,
476 enum rte_filter_op filter_op, void *arg)
477 {
478 RTE_SET_USED(eth_dev);
479
480 if (filter_type != RTE_ETH_FILTER_GENERIC) {
481 otx2_err("Unsupported filter type %d", filter_type);
482 return -ENOTSUP;
483 }
484
485 if (filter_op == RTE_ETH_FILTER_GET) {
486 *(const void **)arg = &otx2_flow_ops;
487 return 0;
488 }
489
490 otx2_err("Invalid filter_op %d", filter_op);
491 return -EINVAL;
492 }
493
494 static struct cgx_fw_data *
nix_get_fwdata(struct otx2_eth_dev * dev)495 nix_get_fwdata(struct otx2_eth_dev *dev)
496 {
497 struct otx2_mbox *mbox = dev->mbox;
498 struct cgx_fw_data *rsp = NULL;
499 int rc;
500
501 otx2_mbox_alloc_msg_cgx_get_aux_link_info(mbox);
502
503 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
504 if (rc) {
505 otx2_err("Failed to get fw data: %d", rc);
506 return NULL;
507 }
508
509 return rsp;
510 }
511
512 int
otx2_nix_get_module_info(struct rte_eth_dev * eth_dev,struct rte_eth_dev_module_info * modinfo)513 otx2_nix_get_module_info(struct rte_eth_dev *eth_dev,
514 struct rte_eth_dev_module_info *modinfo)
515 {
516 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
517 struct cgx_fw_data *rsp;
518
519 rsp = nix_get_fwdata(dev);
520 if (rsp == NULL)
521 return -EIO;
522
523 modinfo->type = rsp->fwdata.sfp_eeprom.sff_id;
524 modinfo->eeprom_len = SFP_EEPROM_SIZE;
525
526 return 0;
527 }
528
529 int
otx2_nix_get_module_eeprom(struct rte_eth_dev * eth_dev,struct rte_dev_eeprom_info * info)530 otx2_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
531 struct rte_dev_eeprom_info *info)
532 {
533 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
534 struct cgx_fw_data *rsp;
535
536 if (!info->data || !info->length ||
537 (info->offset + info->length > SFP_EEPROM_SIZE))
538 return -EINVAL;
539
540 rsp = nix_get_fwdata(dev);
541 if (rsp == NULL)
542 return -EIO;
543
544 otx2_mbox_memcpy(info->data, rsp->fwdata.sfp_eeprom.buf + info->offset,
545 info->length);
546
547 return 0;
548 }
549
550 int
otx2_nix_info_get(struct rte_eth_dev * eth_dev,struct rte_eth_dev_info * devinfo)551 otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
552 {
553 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
554 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
555
556 devinfo->min_rx_bufsize = NIX_MIN_FRS;
557 devinfo->max_rx_pktlen = NIX_MAX_FRS;
558 devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
559 devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
560 devinfo->max_mac_addrs = dev->max_mac_entries;
561 devinfo->max_vfs = pci_dev->max_vfs;
562 devinfo->max_mtu = devinfo->max_rx_pktlen - NIX_L2_OVERHEAD;
563 devinfo->min_mtu = devinfo->min_rx_bufsize - NIX_L2_OVERHEAD;
564
565 devinfo->rx_offload_capa = dev->rx_offload_capa;
566 devinfo->tx_offload_capa = dev->tx_offload_capa;
567 devinfo->rx_queue_offload_capa = 0;
568 devinfo->tx_queue_offload_capa = 0;
569
570 devinfo->reta_size = dev->rss_info.rss_size;
571 devinfo->hash_key_size = NIX_HASH_KEY_SIZE;
572 devinfo->flow_type_rss_offloads = NIX_RSS_OFFLOAD;
573
574 devinfo->default_rxconf = (struct rte_eth_rxconf) {
575 .rx_drop_en = 0,
576 .offloads = 0,
577 };
578
579 devinfo->default_txconf = (struct rte_eth_txconf) {
580 .offloads = 0,
581 };
582
583 devinfo->default_rxportconf = (struct rte_eth_dev_portconf) {
584 .ring_size = NIX_RX_DEFAULT_RING_SZ,
585 };
586
587 devinfo->rx_desc_lim = (struct rte_eth_desc_lim) {
588 .nb_max = UINT16_MAX,
589 .nb_min = NIX_RX_MIN_DESC,
590 .nb_align = NIX_RX_MIN_DESC_ALIGN,
591 .nb_seg_max = NIX_RX_NB_SEG_MAX,
592 .nb_mtu_seg_max = NIX_RX_NB_SEG_MAX,
593 };
594 devinfo->rx_desc_lim.nb_max =
595 RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
596 NIX_RX_MIN_DESC_ALIGN);
597
598 devinfo->tx_desc_lim = (struct rte_eth_desc_lim) {
599 .nb_max = UINT16_MAX,
600 .nb_min = 1,
601 .nb_align = 1,
602 .nb_seg_max = NIX_TX_NB_SEG_MAX,
603 .nb_mtu_seg_max = NIX_TX_NB_SEG_MAX,
604 };
605
606 /* Auto negotiation disabled */
607 devinfo->speed_capa = ETH_LINK_SPEED_FIXED;
608 if (!otx2_dev_is_vf_or_sdp(dev) && !otx2_dev_is_lbk(dev)) {
609 devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
610 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
611
612 /* 50G and 100G to be supported for board version C0
613 * and above.
614 */
615 if (!otx2_dev_is_Ax(dev))
616 devinfo->speed_capa |= ETH_LINK_SPEED_50G |
617 ETH_LINK_SPEED_100G;
618 }
619
620 devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
621 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
622
623 return 0;
624 }
625