xref: /dpdk/drivers/net/nfp/nfp_ethdev_vf.c (revision 15174c40)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 /*
9  * vim:shiftwidth=8:noexpandtab
10  *
11  * @file dpdk/pmd/nfp_ethdev_vf.c
12  *
13  * Netronome vNIC  VF DPDK Poll-Mode Driver: Main entry point
14  */
15 
16 #include <rte_alarm.h>
17 
18 #include "nfpcore/nfp_mip.h"
19 #include "nfpcore/nfp_rtsym.h"
20 
21 #include "nfp_common.h"
22 #include "nfp_rxtx.h"
23 #include "nfp_logs.h"
24 #include "nfp_ctrl.h"
25 
26 static void nfp_netvf_read_mac(struct nfp_net_hw *hw);
27 static int nfp_netvf_start(struct rte_eth_dev *dev);
28 static int nfp_netvf_stop(struct rte_eth_dev *dev);
29 static int nfp_netvf_set_link_up(struct rte_eth_dev *dev);
30 static int nfp_netvf_set_link_down(struct rte_eth_dev *dev);
31 static int nfp_netvf_close(struct rte_eth_dev *dev);
32 static int nfp_netvf_init(struct rte_eth_dev *eth_dev);
33 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev);
34 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
35 	struct rte_pci_device *pci_dev);
36 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev);
37 
38 static void
nfp_netvf_read_mac(struct nfp_net_hw * hw)39 nfp_netvf_read_mac(struct nfp_net_hw *hw)
40 {
41 	uint32_t tmp;
42 
43 	tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
44 	memcpy(&hw->mac_addr[0], &tmp, 4);
45 
46 	tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
47 	memcpy(&hw->mac_addr[4], &tmp, 2);
48 }
49 
50 static int
nfp_netvf_start(struct rte_eth_dev * dev)51 nfp_netvf_start(struct rte_eth_dev *dev)
52 {
53 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
54 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
55 	uint32_t new_ctrl, update = 0;
56 	struct nfp_net_hw *hw;
57 	struct rte_eth_conf *dev_conf;
58 	struct rte_eth_rxmode *rxmode;
59 	uint32_t intr_vector;
60 	int ret;
61 
62 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
63 
64 	PMD_INIT_LOG(DEBUG, "Start");
65 
66 	/* Disabling queues just in case... */
67 	nfp_net_disable_queues(dev);
68 
69 	/* Enabling the required queues in the device */
70 	nfp_net_enable_queues(dev);
71 
72 	/* check and configure queue intr-vector mapping */
73 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
74 		if (rte_intr_type_get(intr_handle) ==
75 						RTE_INTR_HANDLE_UIO) {
76 			/*
77 			 * Better not to share LSC with RX interrupts.
78 			 * Unregistering LSC interrupt handler
79 			 */
80 			rte_intr_callback_unregister(pci_dev->intr_handle,
81 				nfp_net_dev_interrupt_handler, (void *)dev);
82 
83 			if (dev->data->nb_rx_queues > 1) {
84 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
85 					     "supports 1 queue with UIO");
86 				return -EIO;
87 			}
88 		}
89 		intr_vector = dev->data->nb_rx_queues;
90 		if (rte_intr_efd_enable(intr_handle, intr_vector))
91 			return -1;
92 
93 		nfp_configure_rx_interrupt(dev, intr_handle);
94 		update = NFP_NET_CFG_UPDATE_MSIX;
95 	}
96 
97 	rte_intr_enable(intr_handle);
98 
99 	new_ctrl = nfp_check_offloads(dev);
100 
101 	/* Writing configuration parameters in the device */
102 	nfp_net_params_setup(hw);
103 
104 	dev_conf = &dev->data->dev_conf;
105 	rxmode = &dev_conf->rxmode;
106 
107 	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
108 		nfp_net_rss_config_default(dev);
109 		update |= NFP_NET_CFG_UPDATE_RSS;
110 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
111 	}
112 
113 	/* Enable device */
114 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
115 
116 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
117 
118 	if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
119 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
120 
121 	nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
122 	if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
123 		return -EIO;
124 
125 	/*
126 	 * Allocating rte mbufs for configured rx queues.
127 	 * This requires queues being enabled before
128 	 */
129 	if (nfp_net_rx_freelist_setup(dev) < 0) {
130 		ret = -ENOMEM;
131 		goto error;
132 	}
133 
134 	hw->ctrl = new_ctrl;
135 
136 	return 0;
137 
138 error:
139 	/*
140 	 * An error returned by this function should mean the app
141 	 * exiting and then the system releasing all the memory
142 	 * allocated even memory coming from hugepages.
143 	 *
144 	 * The device could be enabled at this point with some queues
145 	 * ready for getting packets. This is true if the call to
146 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
147 	 * fails for subsequent queues.
148 	 *
149 	 * This should make the app exiting but better if we tell the
150 	 * device first.
151 	 */
152 	nfp_net_disable_queues(dev);
153 
154 	return ret;
155 }
156 
157 static int
nfp_netvf_stop(struct rte_eth_dev * dev)158 nfp_netvf_stop(struct rte_eth_dev *dev)
159 {
160 	struct nfp_net_txq *this_tx_q;
161 	struct nfp_net_rxq *this_rx_q;
162 	int i;
163 
164 	PMD_INIT_LOG(DEBUG, "Stop");
165 
166 	nfp_net_disable_queues(dev);
167 
168 	/* Clear queues */
169 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
170 		this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
171 		nfp_net_reset_tx_queue(this_tx_q);
172 	}
173 
174 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
175 		this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
176 		nfp_net_reset_rx_queue(this_rx_q);
177 	}
178 
179 	return 0;
180 }
181 
182 static int
nfp_netvf_set_link_up(struct rte_eth_dev * dev __rte_unused)183 nfp_netvf_set_link_up(struct rte_eth_dev *dev __rte_unused)
184 {
185 	return -ENOTSUP;
186 }
187 
188 /* Set the link down. */
189 static int
nfp_netvf_set_link_down(struct rte_eth_dev * dev __rte_unused)190 nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
191 {
192 	return -ENOTSUP;
193 }
194 
195 /* Reset and stop device. The device can not be restarted. */
196 static int
nfp_netvf_close(struct rte_eth_dev * dev)197 nfp_netvf_close(struct rte_eth_dev *dev)
198 {
199 	struct rte_pci_device *pci_dev;
200 	struct nfp_net_txq *this_tx_q;
201 	struct nfp_net_rxq *this_rx_q;
202 	int i;
203 
204 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
205 		return 0;
206 
207 	PMD_INIT_LOG(DEBUG, "Close");
208 
209 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
210 
211 	/*
212 	 * We assume that the DPDK application is stopping all the
213 	 * threads/queues before calling the device close function.
214 	 */
215 
216 	nfp_net_disable_queues(dev);
217 
218 	/* Clear queues */
219 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
220 		this_tx_q =  (struct nfp_net_txq *)dev->data->tx_queues[i];
221 		nfp_net_reset_tx_queue(this_tx_q);
222 		nfp_net_tx_queue_release(dev, i);
223 	}
224 
225 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
226 		this_rx_q =  (struct nfp_net_rxq *)dev->data->rx_queues[i];
227 		nfp_net_reset_rx_queue(this_rx_q);
228 		nfp_net_rx_queue_release(dev, i);
229 	}
230 
231 	rte_intr_disable(pci_dev->intr_handle);
232 
233 	/* unregister callback func from eal lib */
234 	rte_intr_callback_unregister(pci_dev->intr_handle,
235 				     nfp_net_dev_interrupt_handler,
236 				     (void *)dev);
237 
238 	/* Cancel possible impending LSC work here before releasing the port*/
239 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
240 			     (void *)dev);
241 
242 	/*
243 	 * The ixgbe PMD disables the pcie master on the
244 	 * device. The i40e does not...
245 	 */
246 
247 	return 0;
248 }
249 
250 /* Initialise and register VF driver with DPDK Application */
251 static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
252 	.dev_configure		= nfp_net_configure,
253 	.dev_start		= nfp_netvf_start,
254 	.dev_stop		= nfp_netvf_stop,
255 	.dev_set_link_up	= nfp_netvf_set_link_up,
256 	.dev_set_link_down	= nfp_netvf_set_link_down,
257 	.dev_close		= nfp_netvf_close,
258 	.promiscuous_enable	= nfp_net_promisc_enable,
259 	.promiscuous_disable	= nfp_net_promisc_disable,
260 	.link_update		= nfp_net_link_update,
261 	.stats_get		= nfp_net_stats_get,
262 	.stats_reset		= nfp_net_stats_reset,
263 	.dev_infos_get		= nfp_net_infos_get,
264 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
265 	.mtu_set		= nfp_net_dev_mtu_set,
266 	.mac_addr_set           = nfp_set_mac_addr,
267 	.vlan_offload_set	= nfp_net_vlan_offload_set,
268 	.reta_update		= nfp_net_reta_update,
269 	.reta_query		= nfp_net_reta_query,
270 	.rss_hash_update	= nfp_net_rss_hash_update,
271 	.rss_hash_conf_get	= nfp_net_rss_hash_conf_get,
272 	.rx_queue_setup		= nfp_net_rx_queue_setup,
273 	.rx_queue_release	= nfp_net_rx_queue_release,
274 	.tx_queue_setup		= nfp_net_tx_queue_setup,
275 	.tx_queue_release	= nfp_net_tx_queue_release,
276 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
277 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
278 };
279 
280 static int
nfp_netvf_init(struct rte_eth_dev * eth_dev)281 nfp_netvf_init(struct rte_eth_dev *eth_dev)
282 {
283 	struct rte_pci_device *pci_dev;
284 	struct nfp_net_hw *hw;
285 	struct rte_ether_addr *tmp_ether_addr;
286 
287 	uint64_t tx_bar_off = 0, rx_bar_off = 0;
288 	uint32_t start_q;
289 	int stride = 4;
290 	int port = 0;
291 	int err;
292 
293 	PMD_INIT_FUNC_TRACE();
294 
295 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
296 
297 	/* NFP can not handle DMA addresses requiring more than 40 bits */
298 	if (rte_mem_check_dma_mask(40)) {
299 		RTE_LOG(ERR, PMD, "device %s can not be used:",
300 				   pci_dev->device.name);
301 		RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
302 		return -ENODEV;
303 	};
304 
305 	hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
306 
307 	eth_dev->dev_ops = &nfp_netvf_eth_dev_ops;
308 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
309 	eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
310 	eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
311 
312 	/* For secondary processes, the primary has done all the work */
313 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
314 		return 0;
315 
316 	rte_eth_copy_pci_info(eth_dev, pci_dev);
317 
318 	hw->device_id = pci_dev->id.device_id;
319 	hw->vendor_id = pci_dev->id.vendor_id;
320 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
321 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
322 
323 	PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
324 		     pci_dev->id.vendor_id, pci_dev->id.device_id,
325 		     pci_dev->addr.domain, pci_dev->addr.bus,
326 		     pci_dev->addr.devid, pci_dev->addr.function);
327 
328 	hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
329 	if (hw->ctrl_bar == NULL) {
330 		PMD_DRV_LOG(ERR,
331 			"hw->ctrl_bar is NULL. BAR0 not configured");
332 		return -ENODEV;
333 	}
334 
335 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
336 
337 	hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
338 	hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
339 
340 	/* Work out where in the BAR the queues start. */
341 	switch (pci_dev->id.device_id) {
342 	case PCI_DEVICE_ID_NFP6000_VF_NIC:
343 		start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
344 		tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
345 		start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
346 		rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
347 		break;
348 	default:
349 		PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
350 		err = -ENODEV;
351 		goto dev_err_ctrl_map;
352 	}
353 
354 	PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
355 	PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
356 
357 	hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
358 		     tx_bar_off;
359 	hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
360 		     rx_bar_off;
361 
362 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
363 		     hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
364 
365 	nfp_net_cfg_queue_setup(hw);
366 
367 	/* Get some of the read-only fields from the config BAR */
368 	hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
369 	hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
370 	hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
371 	hw->mtu = RTE_ETHER_MTU;
372 
373 	/* VLAN insertion is incompatible with LSOv2 */
374 	if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
375 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
376 
377 	if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
378 		hw->rx_offset = NFP_NET_RX_OFFSET;
379 	else
380 		hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
381 
382 	PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
383 			   NFD_CFG_MAJOR_VERSION_of(hw->ver),
384 			   NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
385 
386 	PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
387 		     hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
388 		     hw->cap & NFP_NET_CFG_CTRL_L2BC    ? "L2BCFILT " : "",
389 		     hw->cap & NFP_NET_CFG_CTRL_L2MC    ? "L2MCFILT " : "",
390 		     hw->cap & NFP_NET_CFG_CTRL_RXCSUM  ? "RXCSUM "  : "",
391 		     hw->cap & NFP_NET_CFG_CTRL_TXCSUM  ? "TXCSUM "  : "",
392 		     hw->cap & NFP_NET_CFG_CTRL_RXVLAN  ? "RXVLAN "  : "",
393 		     hw->cap & NFP_NET_CFG_CTRL_TXVLAN  ? "TXVLAN "  : "",
394 		     hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
395 		     hw->cap & NFP_NET_CFG_CTRL_GATHER  ? "GATHER "  : "",
396 		     hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR "  : "",
397 		     hw->cap & NFP_NET_CFG_CTRL_LSO     ? "TSO "     : "",
398 		     hw->cap & NFP_NET_CFG_CTRL_LSO2     ? "TSOv2 "     : "",
399 		     hw->cap & NFP_NET_CFG_CTRL_RSS     ? "RSS "     : "",
400 		     hw->cap & NFP_NET_CFG_CTRL_RSS2     ? "RSSv2 "     : "");
401 
402 	hw->ctrl = 0;
403 
404 	hw->stride_rx = stride;
405 	hw->stride_tx = stride;
406 
407 	PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
408 		     hw->max_rx_queues, hw->max_tx_queues);
409 
410 	/* Initializing spinlock for reconfigs */
411 	rte_spinlock_init(&hw->reconfig_lock);
412 
413 	/* Allocating memory for mac addr */
414 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
415 					       RTE_ETHER_ADDR_LEN, 0);
416 	if (eth_dev->data->mac_addrs == NULL) {
417 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
418 		err = -ENOMEM;
419 		goto dev_err_queues_map;
420 	}
421 
422 	nfp_netvf_read_mac(hw);
423 
424 	tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
425 	if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
426 		PMD_INIT_LOG(INFO, "Using random mac address for port %d",
427 				   port);
428 		/* Using random mac addresses for VFs */
429 		rte_eth_random_addr(&hw->mac_addr[0]);
430 		nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
431 	}
432 
433 	/* Copying mac address to DPDK eth_dev struct */
434 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
435 			&eth_dev->data->mac_addrs[0]);
436 
437 	if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
438 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
439 
440 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
441 
442 	PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
443 		     "mac=%02x:%02x:%02x:%02x:%02x:%02x",
444 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
445 		     pci_dev->id.device_id,
446 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
447 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
448 
449 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
450 		/* Registering LSC interrupt handler */
451 		rte_intr_callback_register(pci_dev->intr_handle,
452 					   nfp_net_dev_interrupt_handler,
453 					   (void *)eth_dev);
454 		/* Telling the firmware about the LSC interrupt entry */
455 		nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
456 		/* Recording current stats counters values */
457 		nfp_net_stats_reset(eth_dev);
458 	}
459 
460 	return 0;
461 
462 dev_err_queues_map:
463 		nfp_cpp_area_free(hw->hwqueues_area);
464 dev_err_ctrl_map:
465 		nfp_cpp_area_free(hw->ctrl_area);
466 
467 	return err;
468 }
469 
470 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
471 	{
472 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
473 			       PCI_DEVICE_ID_NFP6000_VF_NIC)
474 	},
475 	{
476 		.vendor_id = 0,
477 	},
478 };
479 
nfp_vf_pci_uninit(struct rte_eth_dev * eth_dev)480 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
481 {
482 	/* VF cleanup, just free private port data */
483 	return nfp_netvf_close(eth_dev);
484 }
485 
eth_nfp_vf_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)486 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
487 	struct rte_pci_device *pci_dev)
488 {
489 	return rte_eth_dev_pci_generic_probe(pci_dev,
490 		sizeof(struct nfp_net_adapter), nfp_netvf_init);
491 }
492 
eth_nfp_vf_pci_remove(struct rte_pci_device * pci_dev)493 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
494 {
495 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit);
496 }
497 
498 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
499 	.id_table = pci_id_nfp_vf_net_map,
500 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
501 	.probe = eth_nfp_vf_pci_probe,
502 	.remove = eth_nfp_vf_pci_remove,
503 };
504 
505 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
506 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
507 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
508 /*
509  * Local variables:
510  * c-file-style: "Linux"
511  * indent-tabs-mode: t
512  * End:
513  */
514