1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
3 */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <fcntl.h>
13 #include <inttypes.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_bus_pci.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_eal.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <ethdev_driver.h>
30 #include <ethdev_pci.h>
31 #include <rte_string_fns.h>
32 #include <rte_malloc.h>
33 #include <rte_dev.h>
34
35 #include "base/vmxnet3_defs.h"
36
37 #include "vmxnet3_ring.h"
38 #include "vmxnet3_logs.h"
39 #include "vmxnet3_ethdev.h"
40
41 #define VMXNET3_TX_MAX_SEG UINT8_MAX
42
43 #define VMXNET3_TX_OFFLOAD_CAP \
44 (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
45 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
46 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
47 RTE_ETH_TX_OFFLOAD_TCP_TSO | \
48 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
49
50 #define VMXNET3_RX_OFFLOAD_CAP \
51 (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
52 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
53 RTE_ETH_RX_OFFLOAD_SCATTER | \
54 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
55 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
56 RTE_ETH_RX_OFFLOAD_TCP_LRO | \
57 RTE_ETH_RX_OFFLOAD_RSS_HASH)
58
59 int vmxnet3_segs_dynfield_offset = -1;
60
61 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
62 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
63 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
64 static int vmxnet3_dev_start(struct rte_eth_dev *dev);
65 static int vmxnet3_dev_stop(struct rte_eth_dev *dev);
66 static int vmxnet3_dev_close(struct rte_eth_dev *dev);
67 static int vmxnet3_dev_reset(struct rte_eth_dev *dev);
68 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
69 static int vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
70 static int vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
71 static int vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
72 static int vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
73 static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
74 int wait_to_complete);
75 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
76 int wait_to_complete);
77 static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
78 static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
79 struct rte_eth_stats *stats);
80 static int vmxnet3_dev_stats_reset(struct rte_eth_dev *dev);
81 static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
82 struct rte_eth_xstat_name *xstats,
83 unsigned int n);
84 static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
85 struct rte_eth_xstat *xstats, unsigned int n);
86 static int vmxnet3_dev_info_get(struct rte_eth_dev *dev,
87 struct rte_eth_dev_info *dev_info);
88 static const uint32_t *
89 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
90 static int vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
91 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
92 uint16_t vid, int on);
93 static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
94 static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
95 struct rte_ether_addr *mac_addr);
96 static void vmxnet3_process_events(struct rte_eth_dev *dev);
97 static void vmxnet3_interrupt_handler(void *param);
98 static int vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
99 uint16_t queue_id);
100 static int vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
101 uint16_t queue_id);
102
103 /*
104 * The set of PCI devices this driver supports
105 */
106 #define VMWARE_PCI_VENDOR_ID 0x15AD
107 #define VMWARE_DEV_ID_VMXNET3 0x07B0
108 static const struct rte_pci_id pci_id_vmxnet3_map[] = {
109 { RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) },
110 { .vendor_id = 0, /* sentinel */ },
111 };
112
113 static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
114 .dev_configure = vmxnet3_dev_configure,
115 .dev_start = vmxnet3_dev_start,
116 .dev_stop = vmxnet3_dev_stop,
117 .dev_close = vmxnet3_dev_close,
118 .dev_reset = vmxnet3_dev_reset,
119 .promiscuous_enable = vmxnet3_dev_promiscuous_enable,
120 .promiscuous_disable = vmxnet3_dev_promiscuous_disable,
121 .allmulticast_enable = vmxnet3_dev_allmulticast_enable,
122 .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
123 .link_update = vmxnet3_dev_link_update,
124 .stats_get = vmxnet3_dev_stats_get,
125 .xstats_get_names = vmxnet3_dev_xstats_get_names,
126 .xstats_get = vmxnet3_dev_xstats_get,
127 .stats_reset = vmxnet3_dev_stats_reset,
128 .mac_addr_set = vmxnet3_mac_addr_set,
129 .dev_infos_get = vmxnet3_dev_info_get,
130 .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
131 .mtu_set = vmxnet3_dev_mtu_set,
132 .vlan_filter_set = vmxnet3_dev_vlan_filter_set,
133 .vlan_offload_set = vmxnet3_dev_vlan_offload_set,
134 .rx_queue_setup = vmxnet3_dev_rx_queue_setup,
135 .rx_queue_release = vmxnet3_dev_rx_queue_release,
136 .tx_queue_setup = vmxnet3_dev_tx_queue_setup,
137 .tx_queue_release = vmxnet3_dev_tx_queue_release,
138 .rx_queue_intr_enable = vmxnet3_dev_rx_queue_intr_enable,
139 .rx_queue_intr_disable = vmxnet3_dev_rx_queue_intr_disable,
140 };
141
142 struct vmxnet3_xstats_name_off {
143 char name[RTE_ETH_XSTATS_NAME_SIZE];
144 unsigned int offset;
145 };
146
147 /* tx_qX_ is prepended to the name string here */
148 static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
149 {"drop_total", offsetof(struct vmxnet3_txq_stats, drop_total)},
150 {"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
151 {"drop_tso", offsetof(struct vmxnet3_txq_stats, drop_tso)},
152 {"tx_ring_full", offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
153 };
154
155 /* rx_qX_ is prepended to the name string here */
156 static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
157 {"drop_total", offsetof(struct vmxnet3_rxq_stats, drop_total)},
158 {"drop_err", offsetof(struct vmxnet3_rxq_stats, drop_err)},
159 {"drop_fcs", offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
160 {"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
161 };
162
163 static const struct rte_memzone *
gpa_zone_reserve(struct rte_eth_dev * dev,uint32_t size,const char * post_string,int socket_id,uint16_t align,bool reuse)164 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
165 const char *post_string, int socket_id,
166 uint16_t align, bool reuse)
167 {
168 char z_name[RTE_MEMZONE_NAMESIZE];
169 const struct rte_memzone *mz;
170
171 snprintf(z_name, sizeof(z_name), "eth_p%d_%s",
172 dev->data->port_id, post_string);
173
174 mz = rte_memzone_lookup(z_name);
175 if (!reuse) {
176 if (mz)
177 rte_memzone_free(mz);
178 return rte_memzone_reserve_aligned(z_name, size, socket_id,
179 RTE_MEMZONE_IOVA_CONTIG, align);
180 }
181
182 if (mz)
183 return mz;
184
185 return rte_memzone_reserve_aligned(z_name, size, socket_id,
186 RTE_MEMZONE_IOVA_CONTIG, align);
187 }
188
189 /*
190 * Enable the given interrupt
191 */
192 static void
vmxnet3_enable_intr(struct vmxnet3_hw * hw,unsigned int intr_idx)193 vmxnet3_enable_intr(struct vmxnet3_hw *hw, unsigned int intr_idx)
194 {
195 PMD_INIT_FUNC_TRACE();
196 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + intr_idx * 8, 0);
197 }
198
199 /*
200 * Disable the given interrupt
201 */
202 static void
vmxnet3_disable_intr(struct vmxnet3_hw * hw,unsigned int intr_idx)203 vmxnet3_disable_intr(struct vmxnet3_hw *hw, unsigned int intr_idx)
204 {
205 PMD_INIT_FUNC_TRACE();
206 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + intr_idx * 8, 1);
207 }
208
209 /*
210 * Enable all intrs used by the device
211 */
212 static void
vmxnet3_enable_all_intrs(struct vmxnet3_hw * hw)213 vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw)
214 {
215 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
216
217 PMD_INIT_FUNC_TRACE();
218
219 devRead->intrConf.intrCtrl &= rte_cpu_to_le_32(~VMXNET3_IC_DISABLE_ALL);
220
221 if (hw->intr.lsc_only) {
222 vmxnet3_enable_intr(hw, devRead->intrConf.eventIntrIdx);
223 } else {
224 int i;
225
226 for (i = 0; i < hw->intr.num_intrs; i++)
227 vmxnet3_enable_intr(hw, i);
228 }
229 }
230
231 /*
232 * Disable all intrs used by the device
233 */
234 static void
vmxnet3_disable_all_intrs(struct vmxnet3_hw * hw)235 vmxnet3_disable_all_intrs(struct vmxnet3_hw *hw)
236 {
237 int i;
238
239 PMD_INIT_FUNC_TRACE();
240
241 hw->shared->devRead.intrConf.intrCtrl |=
242 rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
243 for (i = 0; i < hw->num_intrs; i++)
244 vmxnet3_disable_intr(hw, i);
245 }
246
247 /*
248 * Gets tx data ring descriptor size.
249 */
250 static uint16_t
eth_vmxnet3_txdata_get(struct vmxnet3_hw * hw)251 eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
252 {
253 uint16 txdata_desc_size;
254
255 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
256 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
257 txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
258
259 return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
260 txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
261 txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
262 sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
263 }
264
265 /*
266 * It returns 0 on success.
267 */
268 static int
eth_vmxnet3_dev_init(struct rte_eth_dev * eth_dev)269 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
270 {
271 struct rte_pci_device *pci_dev;
272 struct vmxnet3_hw *hw = eth_dev->data->dev_private;
273 uint32_t mac_hi, mac_lo, ver;
274 struct rte_eth_link link;
275 static const struct rte_mbuf_dynfield vmxnet3_segs_dynfield_desc = {
276 .name = VMXNET3_SEGS_DYNFIELD_NAME,
277 .size = sizeof(vmxnet3_segs_dynfield_t),
278 .align = __alignof__(vmxnet3_segs_dynfield_t),
279 };
280
281 PMD_INIT_FUNC_TRACE();
282
283 eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
284 eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
285 eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
286 eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
287 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
288
289 /* extra mbuf field is required to guess MSS */
290 vmxnet3_segs_dynfield_offset =
291 rte_mbuf_dynfield_register(&vmxnet3_segs_dynfield_desc);
292 if (vmxnet3_segs_dynfield_offset < 0) {
293 PMD_INIT_LOG(ERR, "Cannot register mbuf field.");
294 return -rte_errno;
295 }
296
297 /*
298 * for secondary processes, we don't initialize any further as primary
299 * has already done this work.
300 */
301 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
302 return 0;
303
304 rte_eth_copy_pci_info(eth_dev, pci_dev);
305 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
306
307 /* Vendor and Device ID need to be set before init of shared code */
308 hw->device_id = pci_dev->id.device_id;
309 hw->vendor_id = pci_dev->id.vendor_id;
310 hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
311 hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
312
313 hw->num_rx_queues = 1;
314 hw->num_tx_queues = 1;
315 hw->bufs_per_pkt = 1;
316
317 /* Check h/w version compatibility with driver. */
318 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
319 PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
320
321 if (ver & (1 << VMXNET3_REV_4)) {
322 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
323 1 << VMXNET3_REV_4);
324 hw->version = VMXNET3_REV_4 + 1;
325 } else if (ver & (1 << VMXNET3_REV_3)) {
326 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
327 1 << VMXNET3_REV_3);
328 hw->version = VMXNET3_REV_3 + 1;
329 } else if (ver & (1 << VMXNET3_REV_2)) {
330 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
331 1 << VMXNET3_REV_2);
332 hw->version = VMXNET3_REV_2 + 1;
333 } else if (ver & (1 << VMXNET3_REV_1)) {
334 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
335 1 << VMXNET3_REV_1);
336 hw->version = VMXNET3_REV_1 + 1;
337 } else {
338 PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
339 return -EIO;
340 }
341
342 PMD_INIT_LOG(INFO, "Using device v%d", hw->version);
343
344 /* Check UPT version compatibility with driver. */
345 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
346 PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
347 if (ver & 0x1)
348 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
349 else {
350 PMD_INIT_LOG(ERR, "Incompatible UPT version.");
351 return -EIO;
352 }
353
354 /* Getting MAC Address */
355 mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
356 mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
357 memcpy(hw->perm_addr, &mac_lo, 4);
358 memcpy(hw->perm_addr + 4, &mac_hi, 2);
359
360 /* Allocate memory for storing MAC addresses */
361 eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", RTE_ETHER_ADDR_LEN *
362 VMXNET3_MAX_MAC_ADDRS, 0);
363 if (eth_dev->data->mac_addrs == NULL) {
364 PMD_INIT_LOG(ERR,
365 "Failed to allocate %d bytes needed to store MAC addresses",
366 RTE_ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
367 return -ENOMEM;
368 }
369 /* Copy the permanent MAC address */
370 rte_ether_addr_copy((struct rte_ether_addr *)hw->perm_addr,
371 ð_dev->data->mac_addrs[0]);
372
373 PMD_INIT_LOG(DEBUG, "MAC Address : " RTE_ETHER_ADDR_PRT_FMT,
374 hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
375 hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
376
377 /* Put device in Quiesce Mode */
378 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
379
380 /* allow untagged pkts */
381 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
382
383 hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
384 eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
385
386 hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
387 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
388 RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
389 hw->rxdata_desc_size);
390
391 /* clear shadow stats */
392 memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
393 memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
394
395 /* clear snapshot stats */
396 memset(hw->snapshot_tx_stats, 0, sizeof(hw->snapshot_tx_stats));
397 memset(hw->snapshot_rx_stats, 0, sizeof(hw->snapshot_rx_stats));
398
399 /* set the initial link status */
400 memset(&link, 0, sizeof(link));
401 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
402 link.link_speed = RTE_ETH_SPEED_NUM_10G;
403 link.link_autoneg = RTE_ETH_LINK_FIXED;
404 rte_eth_linkstatus_set(eth_dev, &link);
405
406 return 0;
407 }
408
409 static int
eth_vmxnet3_dev_uninit(struct rte_eth_dev * eth_dev)410 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
411 {
412 struct vmxnet3_hw *hw = eth_dev->data->dev_private;
413
414 PMD_INIT_FUNC_TRACE();
415
416 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
417 return 0;
418
419 if (hw->adapter_stopped == 0) {
420 PMD_INIT_LOG(DEBUG, "Device has not been closed.");
421 return -EBUSY;
422 }
423
424 return 0;
425 }
426
eth_vmxnet3_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)427 static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
428 struct rte_pci_device *pci_dev)
429 {
430 return rte_eth_dev_pci_generic_probe(pci_dev,
431 sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
432 }
433
eth_vmxnet3_pci_remove(struct rte_pci_device * pci_dev)434 static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
435 {
436 return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
437 }
438
439 static struct rte_pci_driver rte_vmxnet3_pmd = {
440 .id_table = pci_id_vmxnet3_map,
441 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
442 .probe = eth_vmxnet3_pci_probe,
443 .remove = eth_vmxnet3_pci_remove,
444 };
445
446 static void
vmxnet3_alloc_intr_resources(struct rte_eth_dev * dev)447 vmxnet3_alloc_intr_resources(struct rte_eth_dev *dev)
448 {
449 struct vmxnet3_hw *hw = dev->data->dev_private;
450 uint32_t cfg;
451 int nvec = 1; /* for link event */
452
453 /* intr settings */
454 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
455 VMXNET3_CMD_GET_CONF_INTR);
456 cfg = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
457 hw->intr.type = cfg & 0x3;
458 hw->intr.mask_mode = (cfg >> 2) & 0x3;
459
460 if (hw->intr.type == VMXNET3_IT_AUTO)
461 hw->intr.type = VMXNET3_IT_MSIX;
462
463 if (hw->intr.type == VMXNET3_IT_MSIX) {
464 /* only support shared tx/rx intr */
465 if (hw->num_tx_queues != hw->num_rx_queues)
466 goto msix_err;
467
468 nvec += hw->num_rx_queues;
469 hw->intr.num_intrs = nvec;
470 return;
471 }
472
473 msix_err:
474 /* the tx/rx queue interrupt will be disabled */
475 hw->intr.num_intrs = 2;
476 hw->intr.lsc_only = TRUE;
477 PMD_INIT_LOG(INFO, "Enabled MSI-X with %d vectors", hw->intr.num_intrs);
478 }
479
480 static int
vmxnet3_dev_configure(struct rte_eth_dev * dev)481 vmxnet3_dev_configure(struct rte_eth_dev *dev)
482 {
483 const struct rte_memzone *mz;
484 struct vmxnet3_hw *hw = dev->data->dev_private;
485 size_t size;
486
487 PMD_INIT_FUNC_TRACE();
488
489 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
490 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
491
492 if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
493 dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
494 PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
495 return -EINVAL;
496 }
497
498 if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
499 PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
500 return -EINVAL;
501 }
502
503 size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
504 dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
505
506 if (size > UINT16_MAX)
507 return -EINVAL;
508
509 hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
510 hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
511
512 /*
513 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
514 * on current socket
515 */
516 mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
517 "shared", rte_socket_id(), 8, 1);
518
519 if (mz == NULL) {
520 PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
521 return -ENOMEM;
522 }
523 memset(mz->addr, 0, mz->len);
524
525 hw->shared = mz->addr;
526 hw->sharedPA = mz->iova;
527
528 /*
529 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
530 * on current socket.
531 *
532 * We cannot reuse this memzone from previous allocation as its size
533 * depends on the number of tx and rx queues, which could be different
534 * from one config to another.
535 */
536 mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
537 VMXNET3_QUEUE_DESC_ALIGN, 0);
538 if (mz == NULL) {
539 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
540 return -ENOMEM;
541 }
542 memset(mz->addr, 0, mz->len);
543
544 hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
545 hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
546
547 hw->queueDescPA = mz->iova;
548 hw->queue_desc_len = (uint16_t)size;
549
550 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
551 /* Allocate memory structure for UPT1_RSSConf and configure */
552 mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
553 "rss_conf", rte_socket_id(),
554 RTE_CACHE_LINE_SIZE, 1);
555 if (mz == NULL) {
556 PMD_INIT_LOG(ERR,
557 "ERROR: Creating rss_conf structure zone");
558 return -ENOMEM;
559 }
560 memset(mz->addr, 0, mz->len);
561
562 hw->rss_conf = mz->addr;
563 hw->rss_confPA = mz->iova;
564 }
565
566 vmxnet3_alloc_intr_resources(dev);
567
568 return 0;
569 }
570
571 static void
vmxnet3_write_mac(struct vmxnet3_hw * hw,const uint8_t * addr)572 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
573 {
574 uint32_t val;
575
576 PMD_INIT_LOG(DEBUG,
577 "Writing MAC Address : " RTE_ETHER_ADDR_PRT_FMT,
578 addr[0], addr[1], addr[2],
579 addr[3], addr[4], addr[5]);
580
581 memcpy(&val, addr, 4);
582 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
583
584 memcpy(&val, addr + 4, 2);
585 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
586 }
587
588 /*
589 * Configure the hardware to generate MSI-X interrupts.
590 * If setting up MSIx fails, try setting up MSI (only 1 interrupt vector
591 * which will be disabled to allow lsc to work).
592 *
593 * Returns 0 on success and -1 otherwise.
594 */
595 static int
vmxnet3_configure_msix(struct rte_eth_dev * dev)596 vmxnet3_configure_msix(struct rte_eth_dev *dev)
597 {
598 struct vmxnet3_hw *hw = dev->data->dev_private;
599 struct rte_intr_handle *intr_handle = dev->intr_handle;
600 uint16_t intr_vector;
601 int i;
602
603 hw->intr.event_intr_idx = 0;
604
605 /* only vfio-pci driver can support interrupt mode. */
606 if (!rte_intr_cap_multiple(intr_handle) ||
607 dev->data->dev_conf.intr_conf.rxq == 0)
608 return -1;
609
610 intr_vector = dev->data->nb_rx_queues;
611 if (intr_vector > VMXNET3_MAX_RX_QUEUES) {
612 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
613 VMXNET3_MAX_RX_QUEUES);
614 return -ENOTSUP;
615 }
616
617 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
618 PMD_INIT_LOG(ERR, "Failed to enable fastpath event fd");
619 return -1;
620 }
621
622 if (rte_intr_dp_is_en(intr_handle)) {
623 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
624 dev->data->nb_rx_queues)) {
625 PMD_INIT_LOG(ERR, "Failed to allocate %d Rx queues intr_vec",
626 dev->data->nb_rx_queues);
627 rte_intr_efd_disable(intr_handle);
628 return -ENOMEM;
629 }
630 }
631
632 if (!rte_intr_allow_others(intr_handle) &&
633 dev->data->dev_conf.intr_conf.lsc != 0) {
634 PMD_INIT_LOG(ERR, "not enough intr vector to support both Rx interrupt and LSC");
635 rte_intr_vec_list_free(intr_handle);
636 rte_intr_efd_disable(intr_handle);
637 return -1;
638 }
639
640 /* if we cannot allocate one MSI-X vector per queue, don't enable
641 * interrupt mode.
642 */
643 if (hw->intr.num_intrs !=
644 (rte_intr_nb_efd_get(intr_handle) + 1)) {
645 PMD_INIT_LOG(ERR, "Device configured with %d Rx intr vectors, expecting %d",
646 hw->intr.num_intrs,
647 rte_intr_nb_efd_get(intr_handle) + 1);
648 rte_intr_vec_list_free(intr_handle);
649 rte_intr_efd_disable(intr_handle);
650 return -1;
651 }
652
653 for (i = 0; i < dev->data->nb_rx_queues; i++)
654 if (rte_intr_vec_list_index_set(intr_handle, i, i + 1))
655 return -rte_errno;
656
657 for (i = 0; i < hw->intr.num_intrs; i++)
658 hw->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
659
660 PMD_INIT_LOG(INFO, "intr type %u, mode %u, %u vectors allocated",
661 hw->intr.type, hw->intr.mask_mode, hw->intr.num_intrs);
662
663 return 0;
664 }
665
666 static int
vmxnet3_dev_setup_memreg(struct rte_eth_dev * dev)667 vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
668 {
669 struct vmxnet3_hw *hw = dev->data->dev_private;
670 Vmxnet3_DriverShared *shared = hw->shared;
671 Vmxnet3_CmdInfo *cmdInfo;
672 struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
673 uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
674 uint32_t num, i, j, size;
675
676 if (hw->memRegsPA == 0) {
677 const struct rte_memzone *mz;
678
679 size = sizeof(Vmxnet3_MemRegs) +
680 (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
681 sizeof(Vmxnet3_MemoryRegion);
682
683 mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
684 1);
685 if (mz == NULL) {
686 PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
687 return -ENOMEM;
688 }
689 memset(mz->addr, 0, mz->len);
690 hw->memRegs = mz->addr;
691 hw->memRegsPA = mz->iova;
692 }
693
694 num = hw->num_rx_queues;
695
696 for (i = 0; i < num; i++) {
697 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
698
699 mp[i] = rxq->mp;
700 index[i] = 1 << i;
701 }
702
703 /*
704 * The same mempool could be used by multiple queues. In such a case,
705 * remove duplicate mempool entries. Only one entry is kept with
706 * bitmask indicating queues that are using this mempool.
707 */
708 for (i = 1; i < num; i++) {
709 for (j = 0; j < i; j++) {
710 if (mp[i] == mp[j]) {
711 mp[i] = NULL;
712 index[j] |= 1 << i;
713 break;
714 }
715 }
716 }
717
718 j = 0;
719 for (i = 0; i < num; i++) {
720 if (mp[i] == NULL)
721 continue;
722
723 Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
724
725 mr->startPA =
726 (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova;
727 mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
728 STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
729 mr->txQueueBits = index[i];
730 mr->rxQueueBits = index[i];
731
732 PMD_INIT_LOG(INFO,
733 "index: %u startPA: %" PRIu64 " length: %u, "
734 "rxBits: %x",
735 j, mr->startPA, mr->length, mr->rxQueueBits);
736 j++;
737 }
738 hw->memRegs->numRegs = j;
739 PMD_INIT_LOG(INFO, "numRegs: %u", j);
740
741 size = sizeof(Vmxnet3_MemRegs) +
742 (j - 1) * sizeof(Vmxnet3_MemoryRegion);
743
744 cmdInfo = &shared->cu.cmdInfo;
745 cmdInfo->varConf.confVer = 1;
746 cmdInfo->varConf.confLen = size;
747 cmdInfo->varConf.confPA = hw->memRegsPA;
748
749 return 0;
750 }
751
752 static int
vmxnet3_setup_driver_shared(struct rte_eth_dev * dev)753 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
754 {
755 struct rte_eth_conf port_conf = dev->data->dev_conf;
756 struct vmxnet3_hw *hw = dev->data->dev_private;
757 struct rte_intr_handle *intr_handle = dev->intr_handle;
758 uint32_t mtu = dev->data->mtu;
759 Vmxnet3_DriverShared *shared = hw->shared;
760 Vmxnet3_DSDevRead *devRead = &shared->devRead;
761 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
762 uint32_t i;
763 int ret;
764
765 hw->mtu = mtu;
766
767 shared->magic = VMXNET3_REV1_MAGIC;
768 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
769
770 /* Setting up Guest OS information */
771 devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ?
772 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
773 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
774 devRead->misc.driverInfo.vmxnet3RevSpt = 1;
775 devRead->misc.driverInfo.uptVerSpt = 1;
776
777 devRead->misc.mtu = rte_le_to_cpu_32(mtu);
778 devRead->misc.queueDescPA = hw->queueDescPA;
779 devRead->misc.queueDescLen = hw->queue_desc_len;
780 devRead->misc.numTxQueues = hw->num_tx_queues;
781 devRead->misc.numRxQueues = hw->num_rx_queues;
782
783 for (i = 0; i < hw->num_tx_queues; i++) {
784 Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
785 vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i];
786
787 txq->shared = &hw->tqd_start[i];
788
789 tqd->ctrl.txNumDeferred = 0;
790 tqd->ctrl.txThreshold = 1;
791 tqd->conf.txRingBasePA = txq->cmd_ring.basePA;
792 tqd->conf.compRingBasePA = txq->comp_ring.basePA;
793 tqd->conf.dataRingBasePA = txq->data_ring.basePA;
794
795 tqd->conf.txRingSize = txq->cmd_ring.size;
796 tqd->conf.compRingSize = txq->comp_ring.size;
797 tqd->conf.dataRingSize = txq->data_ring.size;
798 tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
799
800 if (hw->intr.lsc_only)
801 tqd->conf.intrIdx = 1;
802 else
803 tqd->conf.intrIdx =
804 rte_intr_vec_list_index_get(intr_handle,
805 i);
806 tqd->status.stopped = TRUE;
807 tqd->status.error = 0;
808 memset(&tqd->stats, 0, sizeof(tqd->stats));
809 }
810
811 for (i = 0; i < hw->num_rx_queues; i++) {
812 Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i];
813 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
814
815 rxq->shared = &hw->rqd_start[i];
816
817 rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
818 rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
819 rqd->conf.compRingBasePA = rxq->comp_ring.basePA;
820
821 rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size;
822 rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size;
823 rqd->conf.compRingSize = rxq->comp_ring.size;
824
825 if (hw->intr.lsc_only)
826 rqd->conf.intrIdx = 1;
827 else
828 rqd->conf.intrIdx =
829 rte_intr_vec_list_index_get(intr_handle,
830 i);
831 rqd->status.stopped = TRUE;
832 rqd->status.error = 0;
833 memset(&rqd->stats, 0, sizeof(rqd->stats));
834 }
835
836 /* intr settings */
837 devRead->intrConf.autoMask = hw->intr.mask_mode == VMXNET3_IMM_AUTO;
838 devRead->intrConf.numIntrs = hw->intr.num_intrs;
839 for (i = 0; i < hw->intr.num_intrs; i++)
840 devRead->intrConf.modLevels[i] = hw->intr.mod_levels[i];
841
842 devRead->intrConf.eventIntrIdx = hw->intr.event_intr_idx;
843 devRead->intrConf.intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
844
845 /* RxMode set to 0 of VMXNET3_RXM_xxx */
846 devRead->rxFilterConf.rxMode = 0;
847
848 /* Setting up feature flags */
849 if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
850 devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
851
852 if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
853 devRead->misc.uptFeatures |= VMXNET3_F_LRO;
854 devRead->misc.maxNumRxSG = 0;
855 }
856
857 if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
858 ret = vmxnet3_rss_configure(dev);
859 if (ret != VMXNET3_SUCCESS)
860 return ret;
861
862 devRead->misc.uptFeatures |= VMXNET3_F_RSS;
863 devRead->rssConfDesc.confVer = 1;
864 devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
865 devRead->rssConfDesc.confPA = hw->rss_confPA;
866 }
867
868 ret = vmxnet3_dev_vlan_offload_set(dev,
869 RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
870 if (ret)
871 return ret;
872
873 vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
874
875 return VMXNET3_SUCCESS;
876 }
877
878 /*
879 * Configure device link speed and setup link.
880 * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
881 * It returns 0 on success.
882 */
883 static int
vmxnet3_dev_start(struct rte_eth_dev * dev)884 vmxnet3_dev_start(struct rte_eth_dev *dev)
885 {
886 int ret;
887 struct vmxnet3_hw *hw = dev->data->dev_private;
888
889 PMD_INIT_FUNC_TRACE();
890
891 /* Save stats before it is reset by CMD_ACTIVATE */
892 vmxnet3_hw_stats_save(hw);
893
894 /* configure MSI-X */
895 ret = vmxnet3_configure_msix(dev);
896 if (ret < 0) {
897 /* revert to lsc only */
898 hw->intr.num_intrs = 2;
899 hw->intr.lsc_only = TRUE;
900 }
901
902 ret = vmxnet3_setup_driver_shared(dev);
903 if (ret != VMXNET3_SUCCESS)
904 return ret;
905
906 /* Exchange shared data with device */
907 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
908 VMXNET3_GET_ADDR_LO(hw->sharedPA));
909 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
910 VMXNET3_GET_ADDR_HI(hw->sharedPA));
911
912 /* Activate device by register write */
913 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
914 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
915
916 if (ret != 0) {
917 PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
918 return -EINVAL;
919 }
920
921 /* Setup memory region for rx buffers */
922 ret = vmxnet3_dev_setup_memreg(dev);
923 if (ret == 0) {
924 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
925 VMXNET3_CMD_REGISTER_MEMREGS);
926 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
927 if (ret != 0)
928 PMD_INIT_LOG(DEBUG,
929 "Failed in setup memory region cmd\n");
930 ret = 0;
931 } else {
932 PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
933 }
934
935 if (VMXNET3_VERSION_GE_4(hw) &&
936 dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
937 /* Check for additional RSS */
938 ret = vmxnet3_v4_rss_configure(dev);
939 if (ret != VMXNET3_SUCCESS) {
940 PMD_INIT_LOG(ERR, "Failed to configure v4 RSS");
941 return ret;
942 }
943 }
944
945 /*
946 * Load RX queues with blank mbufs and update next2fill index for device
947 * Update RxMode of the device
948 */
949 ret = vmxnet3_dev_rxtx_init(dev);
950 if (ret != VMXNET3_SUCCESS) {
951 PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
952 return ret;
953 }
954
955 hw->adapter_stopped = FALSE;
956
957 /* Setting proper Rx Mode and issue Rx Mode Update command */
958 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
959
960 /* Setup interrupt callback */
961 rte_intr_callback_register(dev->intr_handle,
962 vmxnet3_interrupt_handler, dev);
963
964 if (rte_intr_enable(dev->intr_handle) < 0) {
965 PMD_INIT_LOG(ERR, "interrupt enable failed");
966 return -EIO;
967 }
968
969 /* enable all intrs */
970 vmxnet3_enable_all_intrs(hw);
971
972 vmxnet3_process_events(dev);
973
974 /*
975 * Update link state from device since this won't be
976 * done upon starting with lsc in use. This is done
977 * only after enabling interrupts to avoid any race
978 * where the link state could change without an
979 * interrupt being fired.
980 */
981 __vmxnet3_dev_link_update(dev, 0);
982
983 return VMXNET3_SUCCESS;
984 }
985
986 /*
987 * Stop device: disable rx and tx functions to allow for reconfiguring.
988 */
989 static int
vmxnet3_dev_stop(struct rte_eth_dev * dev)990 vmxnet3_dev_stop(struct rte_eth_dev *dev)
991 {
992 struct rte_eth_link link;
993 struct vmxnet3_hw *hw = dev->data->dev_private;
994 struct rte_intr_handle *intr_handle = dev->intr_handle;
995 int ret;
996
997 PMD_INIT_FUNC_TRACE();
998
999 if (hw->adapter_stopped == 1) {
1000 PMD_INIT_LOG(DEBUG, "Device already stopped.");
1001 return 0;
1002 }
1003
1004 do {
1005 /* Unregister has lock to make sure there is no running cb.
1006 * This has to happen first since vmxnet3_interrupt_handler
1007 * reenables interrupts by calling vmxnet3_enable_intr
1008 */
1009 ret = rte_intr_callback_unregister(intr_handle,
1010 vmxnet3_interrupt_handler,
1011 (void *)-1);
1012 } while (ret == -EAGAIN);
1013
1014 if (ret < 0)
1015 PMD_DRV_LOG(ERR, "Error attempting to unregister intr cb: %d",
1016 ret);
1017
1018 PMD_INIT_LOG(DEBUG, "Disabled %d intr callbacks", ret);
1019
1020 /* disable interrupts */
1021 vmxnet3_disable_all_intrs(hw);
1022
1023 rte_intr_disable(intr_handle);
1024
1025 /* Clean datapath event and queue/vector mapping */
1026 rte_intr_efd_disable(intr_handle);
1027 rte_intr_vec_list_free(intr_handle);
1028
1029 /* quiesce the device first */
1030 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
1031 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
1032 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
1033
1034 /* reset the device */
1035 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
1036 PMD_INIT_LOG(DEBUG, "Device reset.");
1037
1038 vmxnet3_dev_clear_queues(dev);
1039
1040 /* Clear recorded link status */
1041 memset(&link, 0, sizeof(link));
1042 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1043 link.link_speed = RTE_ETH_SPEED_NUM_10G;
1044 link.link_autoneg = RTE_ETH_LINK_FIXED;
1045 rte_eth_linkstatus_set(dev, &link);
1046
1047 hw->adapter_stopped = 1;
1048 dev->data->dev_started = 0;
1049
1050 return 0;
1051 }
1052
1053 static void
vmxnet3_free_queues(struct rte_eth_dev * dev)1054 vmxnet3_free_queues(struct rte_eth_dev *dev)
1055 {
1056 int i;
1057
1058 PMD_INIT_FUNC_TRACE();
1059
1060 for (i = 0; i < dev->data->nb_rx_queues; i++)
1061 vmxnet3_dev_rx_queue_release(dev, i);
1062 dev->data->nb_rx_queues = 0;
1063
1064 for (i = 0; i < dev->data->nb_tx_queues; i++)
1065 vmxnet3_dev_tx_queue_release(dev, i);
1066 dev->data->nb_tx_queues = 0;
1067 }
1068
1069 /*
1070 * Reset and stop device.
1071 */
1072 static int
vmxnet3_dev_close(struct rte_eth_dev * dev)1073 vmxnet3_dev_close(struct rte_eth_dev *dev)
1074 {
1075 int ret;
1076 PMD_INIT_FUNC_TRACE();
1077 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1078 return 0;
1079
1080 ret = vmxnet3_dev_stop(dev);
1081 vmxnet3_free_queues(dev);
1082
1083 return ret;
1084 }
1085
1086 static int
vmxnet3_dev_reset(struct rte_eth_dev * dev)1087 vmxnet3_dev_reset(struct rte_eth_dev *dev)
1088 {
1089 int ret;
1090
1091 ret = eth_vmxnet3_dev_uninit(dev);
1092 if (ret)
1093 return ret;
1094 ret = eth_vmxnet3_dev_init(dev);
1095 return ret;
1096 }
1097
1098 static void
vmxnet3_hw_tx_stats_get(struct vmxnet3_hw * hw,unsigned int q,struct UPT1_TxStats * res)1099 vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1100 struct UPT1_TxStats *res)
1101 {
1102 #define VMXNET3_UPDATE_TX_STAT(h, i, f, r) \
1103 ((r)->f = (h)->tqd_start[(i)].stats.f + \
1104 (h)->saved_tx_stats[(i)].f)
1105
1106 VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
1107 VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
1108 VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
1109 VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
1110 VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
1111 VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
1112 VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
1113 VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
1114
1115 #undef VMXNET3_UPDATE_TX_STAT
1116 }
1117
1118 static void
vmxnet3_hw_rx_stats_get(struct vmxnet3_hw * hw,unsigned int q,struct UPT1_RxStats * res)1119 vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1120 struct UPT1_RxStats *res)
1121 {
1122 #define VMXNET3_UPDATE_RX_STAT(h, i, f, r) \
1123 ((r)->f = (h)->rqd_start[(i)].stats.f + \
1124 (h)->saved_rx_stats[(i)].f)
1125
1126 VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
1127 VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
1128 VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
1129 VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
1130 VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
1131 VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
1132 VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
1133 VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
1134
1135 #undef VMXNET3_UPDATE_RX_STAT
1136 }
1137
1138 static void
vmxnet3_tx_stats_get(struct vmxnet3_hw * hw,unsigned int q,struct UPT1_TxStats * res)1139 vmxnet3_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1140 struct UPT1_TxStats *res)
1141 {
1142 vmxnet3_hw_tx_stats_get(hw, q, res);
1143
1144 #define VMXNET3_REDUCE_SNAPSHOT_TX_STAT(h, i, f, r) \
1145 ((r)->f -= (h)->snapshot_tx_stats[(i)].f)
1146
1147 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastPktsTxOK, res);
1148 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastPktsTxOK, res);
1149 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastPktsTxOK, res);
1150 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastBytesTxOK, res);
1151 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastBytesTxOK, res);
1152 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastBytesTxOK, res);
1153 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxError, res);
1154 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxDiscard, res);
1155
1156 #undef VMXNET3_REDUCE_SNAPSHOT_TX_STAT
1157 }
1158
1159 static void
vmxnet3_rx_stats_get(struct vmxnet3_hw * hw,unsigned int q,struct UPT1_RxStats * res)1160 vmxnet3_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1161 struct UPT1_RxStats *res)
1162 {
1163 vmxnet3_hw_rx_stats_get(hw, q, res);
1164
1165 #define VMXNET3_REDUCE_SNAPSHOT_RX_STAT(h, i, f, r) \
1166 ((r)->f -= (h)->snapshot_rx_stats[(i)].f)
1167
1168 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastPktsRxOK, res);
1169 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastPktsRxOK, res);
1170 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastPktsRxOK, res);
1171 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastBytesRxOK, res);
1172 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastBytesRxOK, res);
1173 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastBytesRxOK, res);
1174 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxError, res);
1175 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxOutOfBuf, res);
1176
1177 #undef VMXNET3_REDUCE_SNAPSHOT_RX_STAT
1178 }
1179
1180 static void
vmxnet3_hw_stats_save(struct vmxnet3_hw * hw)1181 vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
1182 {
1183 unsigned int i;
1184
1185 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1186
1187 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1188
1189 for (i = 0; i < hw->num_tx_queues; i++)
1190 vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
1191 for (i = 0; i < hw->num_rx_queues; i++)
1192 vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
1193 }
1194
1195 static int
vmxnet3_dev_xstats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,unsigned int n)1196 vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
1197 struct rte_eth_xstat_name *xstats_names,
1198 unsigned int n)
1199 {
1200 unsigned int i, t, count = 0;
1201 unsigned int nstats =
1202 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1203 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1204
1205 if (!xstats_names || n < nstats)
1206 return nstats;
1207
1208 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1209 if (!dev->data->rx_queues[i])
1210 continue;
1211
1212 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1213 snprintf(xstats_names[count].name,
1214 sizeof(xstats_names[count].name),
1215 "rx_q%u_%s", i,
1216 vmxnet3_rxq_stat_strings[t].name);
1217 count++;
1218 }
1219 }
1220
1221 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1222 if (!dev->data->tx_queues[i])
1223 continue;
1224
1225 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1226 snprintf(xstats_names[count].name,
1227 sizeof(xstats_names[count].name),
1228 "tx_q%u_%s", i,
1229 vmxnet3_txq_stat_strings[t].name);
1230 count++;
1231 }
1232 }
1233
1234 return count;
1235 }
1236
1237 static int
vmxnet3_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int n)1238 vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1239 unsigned int n)
1240 {
1241 unsigned int i, t, count = 0;
1242 unsigned int nstats =
1243 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1244 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1245
1246 if (n < nstats)
1247 return nstats;
1248
1249 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1250 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
1251
1252 if (rxq == NULL)
1253 continue;
1254
1255 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1256 xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
1257 vmxnet3_rxq_stat_strings[t].offset);
1258 xstats[count].id = count;
1259 count++;
1260 }
1261 }
1262
1263 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1264 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1265
1266 if (txq == NULL)
1267 continue;
1268
1269 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1270 xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
1271 vmxnet3_txq_stat_strings[t].offset);
1272 xstats[count].id = count;
1273 count++;
1274 }
1275 }
1276
1277 return count;
1278 }
1279
1280 static int
vmxnet3_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)1281 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1282 {
1283 unsigned int i;
1284 struct vmxnet3_hw *hw = dev->data->dev_private;
1285 struct UPT1_TxStats txStats;
1286 struct UPT1_RxStats rxStats;
1287
1288 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1289
1290 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1291 for (i = 0; i < hw->num_tx_queues; i++) {
1292 vmxnet3_tx_stats_get(hw, i, &txStats);
1293
1294 stats->q_opackets[i] = txStats.ucastPktsTxOK +
1295 txStats.mcastPktsTxOK +
1296 txStats.bcastPktsTxOK;
1297
1298 stats->q_obytes[i] = txStats.ucastBytesTxOK +
1299 txStats.mcastBytesTxOK +
1300 txStats.bcastBytesTxOK;
1301
1302 stats->opackets += stats->q_opackets[i];
1303 stats->obytes += stats->q_obytes[i];
1304 stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
1305 }
1306
1307 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
1308 for (i = 0; i < hw->num_rx_queues; i++) {
1309 vmxnet3_rx_stats_get(hw, i, &rxStats);
1310
1311 stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
1312 rxStats.mcastPktsRxOK +
1313 rxStats.bcastPktsRxOK;
1314
1315 stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
1316 rxStats.mcastBytesRxOK +
1317 rxStats.bcastBytesRxOK;
1318
1319 stats->ipackets += stats->q_ipackets[i];
1320 stats->ibytes += stats->q_ibytes[i];
1321
1322 stats->q_errors[i] = rxStats.pktsRxError;
1323 stats->ierrors += rxStats.pktsRxError;
1324 stats->imissed += rxStats.pktsRxOutOfBuf;
1325 }
1326
1327 return 0;
1328 }
1329
1330 static int
vmxnet3_dev_stats_reset(struct rte_eth_dev * dev)1331 vmxnet3_dev_stats_reset(struct rte_eth_dev *dev)
1332 {
1333 unsigned int i;
1334 struct vmxnet3_hw *hw = dev->data->dev_private;
1335 struct UPT1_TxStats txStats = {0};
1336 struct UPT1_RxStats rxStats = {0};
1337
1338 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1339
1340 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1341
1342 for (i = 0; i < hw->num_tx_queues; i++) {
1343 vmxnet3_hw_tx_stats_get(hw, i, &txStats);
1344 memcpy(&hw->snapshot_tx_stats[i], &txStats,
1345 sizeof(hw->snapshot_tx_stats[0]));
1346 }
1347 for (i = 0; i < hw->num_rx_queues; i++) {
1348 vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
1349 memcpy(&hw->snapshot_rx_stats[i], &rxStats,
1350 sizeof(hw->snapshot_rx_stats[0]));
1351 }
1352
1353 return 0;
1354 }
1355
1356 static int
vmxnet3_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)1357 vmxnet3_dev_info_get(struct rte_eth_dev *dev,
1358 struct rte_eth_dev_info *dev_info)
1359 {
1360 struct vmxnet3_hw *hw = dev->data->dev_private;
1361
1362 dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
1363 dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
1364 dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
1365 dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
1366 dev_info->min_mtu = VMXNET3_MIN_MTU;
1367 dev_info->max_mtu = VMXNET3_MAX_MTU;
1368 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
1369 dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
1370
1371 dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
1372
1373 if (VMXNET3_VERSION_GE_4(hw)) {
1374 dev_info->flow_type_rss_offloads |= VMXNET3_V4_RSS_MASK;
1375 }
1376
1377 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1378 .nb_max = VMXNET3_RX_RING_MAX_SIZE,
1379 .nb_min = VMXNET3_DEF_RX_RING_SIZE,
1380 .nb_align = 1,
1381 };
1382
1383 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1384 .nb_max = VMXNET3_TX_RING_MAX_SIZE,
1385 .nb_min = VMXNET3_DEF_TX_RING_SIZE,
1386 .nb_align = 1,
1387 .nb_seg_max = VMXNET3_TX_MAX_SEG,
1388 .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
1389 };
1390
1391 dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP;
1392 dev_info->rx_queue_offload_capa = 0;
1393 dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
1394 dev_info->tx_queue_offload_capa = 0;
1395
1396 return 0;
1397 }
1398
1399 static const uint32_t *
vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev * dev)1400 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1401 {
1402 static const uint32_t ptypes[] = {
1403 RTE_PTYPE_L3_IPV4_EXT,
1404 RTE_PTYPE_L3_IPV4,
1405 RTE_PTYPE_UNKNOWN
1406 };
1407
1408 if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
1409 return ptypes;
1410 return NULL;
1411 }
1412
1413 static int
vmxnet3_dev_mtu_set(struct rte_eth_dev * dev,__rte_unused uint16_t mtu)1414 vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, __rte_unused uint16_t mtu)
1415 {
1416 if (dev->data->dev_started) {
1417 PMD_DRV_LOG(ERR, "Port %d must be stopped to configure MTU",
1418 dev->data->port_id);
1419 return -EBUSY;
1420 }
1421
1422 return 0;
1423 }
1424
1425 static int
vmxnet3_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)1426 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1427 {
1428 struct vmxnet3_hw *hw = dev->data->dev_private;
1429
1430 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr));
1431 vmxnet3_write_mac(hw, mac_addr->addr_bytes);
1432 return 0;
1433 }
1434
1435 /* return 0 means link status changed, -1 means not changed */
1436 static int
__vmxnet3_dev_link_update(struct rte_eth_dev * dev,__rte_unused int wait_to_complete)1437 __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
1438 __rte_unused int wait_to_complete)
1439 {
1440 struct vmxnet3_hw *hw = dev->data->dev_private;
1441 struct rte_eth_link link;
1442 uint32_t ret;
1443
1444 memset(&link, 0, sizeof(link));
1445
1446 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
1447 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1448
1449 if (ret & 0x1)
1450 link.link_status = RTE_ETH_LINK_UP;
1451 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1452 link.link_speed = RTE_ETH_SPEED_NUM_10G;
1453 link.link_autoneg = RTE_ETH_LINK_FIXED;
1454
1455 return rte_eth_linkstatus_set(dev, &link);
1456 }
1457
1458 static int
vmxnet3_dev_link_update(struct rte_eth_dev * dev,int wait_to_complete)1459 vmxnet3_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1460 {
1461 /* Link status doesn't change for stopped dev */
1462 if (dev->data->dev_started == 0)
1463 return -1;
1464
1465 return __vmxnet3_dev_link_update(dev, wait_to_complete);
1466 }
1467
1468 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
1469 static void
vmxnet3_dev_set_rxmode(struct vmxnet3_hw * hw,uint32_t feature,int set)1470 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
1471 {
1472 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1473
1474 if (set)
1475 rxConf->rxMode = rxConf->rxMode | feature;
1476 else
1477 rxConf->rxMode = rxConf->rxMode & (~feature);
1478
1479 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
1480 }
1481
1482 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1483 static int
vmxnet3_dev_promiscuous_enable(struct rte_eth_dev * dev)1484 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
1485 {
1486 struct vmxnet3_hw *hw = dev->data->dev_private;
1487 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1488
1489 memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
1490 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
1491
1492 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1493 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1494
1495 return 0;
1496 }
1497
1498 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1499 static int
vmxnet3_dev_promiscuous_disable(struct rte_eth_dev * dev)1500 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
1501 {
1502 struct vmxnet3_hw *hw = dev->data->dev_private;
1503 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1504 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1505
1506 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1507 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1508 else
1509 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1510 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
1511 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1512 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1513
1514 return 0;
1515 }
1516
1517 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1518 static int
vmxnet3_dev_allmulticast_enable(struct rte_eth_dev * dev)1519 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
1520 {
1521 struct vmxnet3_hw *hw = dev->data->dev_private;
1522
1523 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
1524
1525 return 0;
1526 }
1527
1528 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1529 static int
vmxnet3_dev_allmulticast_disable(struct rte_eth_dev * dev)1530 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
1531 {
1532 struct vmxnet3_hw *hw = dev->data->dev_private;
1533
1534 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
1535
1536 return 0;
1537 }
1538
1539 /* Enable/disable filter on vlan */
1540 static int
vmxnet3_dev_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vid,int on)1541 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1542 {
1543 struct vmxnet3_hw *hw = dev->data->dev_private;
1544 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1545 uint32_t *vf_table = rxConf->vfTable;
1546
1547 /* save state for restore */
1548 if (on)
1549 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1550 else
1551 VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1552
1553 /* don't change active filter if in promiscuous mode */
1554 if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
1555 return 0;
1556
1557 /* set in hardware */
1558 if (on)
1559 VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
1560 else
1561 VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
1562
1563 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1564 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1565 return 0;
1566 }
1567
1568 static int
vmxnet3_dev_vlan_offload_set(struct rte_eth_dev * dev,int mask)1569 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1570 {
1571 struct vmxnet3_hw *hw = dev->data->dev_private;
1572 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1573 uint32_t *vf_table = devRead->rxFilterConf.vfTable;
1574 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1575
1576 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1577 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1578 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1579 else
1580 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1581
1582 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1583 VMXNET3_CMD_UPDATE_FEATURE);
1584 }
1585
1586 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1587 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1588 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1589 else
1590 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1591
1592 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1593 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1594 }
1595
1596 return 0;
1597 }
1598
1599 static void
vmxnet3_process_events(struct rte_eth_dev * dev)1600 vmxnet3_process_events(struct rte_eth_dev *dev)
1601 {
1602 struct vmxnet3_hw *hw = dev->data->dev_private;
1603 uint32_t events = hw->shared->ecr;
1604
1605 if (!events)
1606 return;
1607
1608 /*
1609 * ECR bits when written with 1b are cleared. Hence write
1610 * events back to ECR so that the bits which were set will be reset.
1611 */
1612 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
1613
1614 /* Check if link state has changed */
1615 if (events & VMXNET3_ECR_LINK) {
1616 PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event");
1617 if (vmxnet3_dev_link_update(dev, 0) == 0)
1618 rte_eth_dev_callback_process(dev,
1619 RTE_ETH_EVENT_INTR_LSC,
1620 NULL);
1621 }
1622
1623 /* Check if there is an error on xmit/recv queues */
1624 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
1625 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1626 VMXNET3_CMD_GET_QUEUE_STATUS);
1627
1628 if (hw->tqd_start->status.stopped)
1629 PMD_DRV_LOG(ERR, "tq error 0x%x",
1630 hw->tqd_start->status.error);
1631
1632 if (hw->rqd_start->status.stopped)
1633 PMD_DRV_LOG(ERR, "rq error 0x%x",
1634 hw->rqd_start->status.error);
1635
1636 /* Reset the device */
1637 /* Have to reset the device */
1638 }
1639
1640 if (events & VMXNET3_ECR_DIC)
1641 PMD_DRV_LOG(DEBUG, "Device implementation change event.");
1642
1643 if (events & VMXNET3_ECR_DEBUG)
1644 PMD_DRV_LOG(DEBUG, "Debug event generated by device.");
1645 }
1646
1647 static void
vmxnet3_interrupt_handler(void * param)1648 vmxnet3_interrupt_handler(void *param)
1649 {
1650 struct rte_eth_dev *dev = param;
1651 struct vmxnet3_hw *hw = dev->data->dev_private;
1652 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1653 uint32_t events;
1654
1655 PMD_INIT_FUNC_TRACE();
1656 vmxnet3_disable_intr(hw, devRead->intrConf.eventIntrIdx);
1657
1658 events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
1659 if (events == 0)
1660 goto done;
1661
1662 RTE_LOG(DEBUG, PMD, "Reading events: 0x%X", events);
1663 vmxnet3_process_events(dev);
1664 done:
1665 vmxnet3_enable_intr(hw, devRead->intrConf.eventIntrIdx);
1666 }
1667
1668 static int
vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)1669 vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1670 {
1671 struct vmxnet3_hw *hw = dev->data->dev_private;
1672
1673 vmxnet3_enable_intr(hw,
1674 rte_intr_vec_list_index_get(dev->intr_handle,
1675 queue_id));
1676
1677 return 0;
1678 }
1679
1680 static int
vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)1681 vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1682 {
1683 struct vmxnet3_hw *hw = dev->data->dev_private;
1684
1685 vmxnet3_disable_intr(hw,
1686 rte_intr_vec_list_index_get(dev->intr_handle, queue_id));
1687
1688 return 0;
1689 }
1690
1691 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
1692 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
1693 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
1694 RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_init, init, NOTICE);
1695 RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_driver, driver, NOTICE);
1696