1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Cesnet
3 * Copyright(c) 2019 Netcope Technologies, a.s. <[email protected]>
4 * All rights reserved.
5 */
6
7 #include <nfb/nfb.h>
8 #include <nfb/ndp.h>
9 #include <netcope/rxmac.h>
10 #include <netcope/txmac.h>
11
12 #include <ethdev_pci.h>
13 #include <rte_kvargs.h>
14
15 #include "nfb_stats.h"
16 #include "nfb_rx.h"
17 #include "nfb_tx.h"
18 #include "nfb_rxmode.h"
19 #include "nfb.h"
20
21 /**
22 * Default MAC addr
23 */
24 static const struct rte_ether_addr eth_addr = {
25 .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
26 };
27
28 /**
29 * Open all RX DMA queues
30 *
31 * @param dev
32 * Pointer to nfb device.
33 * @param[out] rxmac
34 * Pointer to output array of nc_rxmac
35 * @param[out] max_rxmac
36 * Pointer to output max index of rxmac
37 */
38 static void
nfb_nc_rxmac_init(struct nfb_device * nfb,struct nc_rxmac * rxmac[RTE_MAX_NC_RXMAC],uint16_t * max_rxmac)39 nfb_nc_rxmac_init(struct nfb_device *nfb,
40 struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC],
41 uint16_t *max_rxmac)
42 {
43 *max_rxmac = 0;
44 while ((rxmac[*max_rxmac] = nc_rxmac_open_index(nfb, *max_rxmac)))
45 ++(*max_rxmac);
46 }
47
48 /**
49 * Open all TX DMA queues
50 *
51 * @param dev
52 * Pointer to nfb device.
53 * @param[out] txmac
54 * Pointer to output array of nc_txmac
55 * @param[out] max_rxmac
56 * Pointer to output max index of txmac
57 */
58 static void
nfb_nc_txmac_init(struct nfb_device * nfb,struct nc_txmac * txmac[RTE_MAX_NC_TXMAC],uint16_t * max_txmac)59 nfb_nc_txmac_init(struct nfb_device *nfb,
60 struct nc_txmac *txmac[RTE_MAX_NC_TXMAC],
61 uint16_t *max_txmac)
62 {
63 *max_txmac = 0;
64 while ((txmac[*max_txmac] = nc_txmac_open_index(nfb, *max_txmac)))
65 ++(*max_txmac);
66 }
67
68 /**
69 * Close all RX DMA queues
70 *
71 * @param rxmac
72 * Pointer to array of nc_rxmac
73 * @param max_rxmac
74 * Maximum index of rxmac
75 */
76 static void
nfb_nc_rxmac_deinit(struct nc_rxmac * rxmac[RTE_MAX_NC_RXMAC],uint16_t max_rxmac)77 nfb_nc_rxmac_deinit(struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC],
78 uint16_t max_rxmac)
79 {
80 uint16_t i;
81 for (i = 0; i < max_rxmac; i++) {
82 nc_rxmac_close(rxmac[i]);
83 rxmac[i] = NULL;
84 }
85 }
86
87 /**
88 * Close all TX DMA queues
89 *
90 * @param txmac
91 * Pointer to array of nc_txmac
92 * @param max_txmac
93 * Maximum index of txmac
94 */
95 static void
nfb_nc_txmac_deinit(struct nc_txmac * txmac[RTE_MAX_NC_TXMAC],uint16_t max_txmac)96 nfb_nc_txmac_deinit(struct nc_txmac *txmac[RTE_MAX_NC_TXMAC],
97 uint16_t max_txmac)
98 {
99 uint16_t i;
100 for (i = 0; i < max_txmac; i++) {
101 nc_txmac_close(txmac[i]);
102 txmac[i] = NULL;
103 }
104 }
105
106 /**
107 * DPDK callback to start the device.
108 *
109 * Start device by starting all configured queues.
110 *
111 * @param dev
112 * Pointer to Ethernet device structure.
113 *
114 * @return
115 * 0 on success, a negative errno value otherwise.
116 */
117 static int
nfb_eth_dev_start(struct rte_eth_dev * dev)118 nfb_eth_dev_start(struct rte_eth_dev *dev)
119 {
120 int ret;
121 uint16_t i;
122 uint16_t nb_rx = dev->data->nb_rx_queues;
123 uint16_t nb_tx = dev->data->nb_tx_queues;
124
125 for (i = 0; i < nb_rx; i++) {
126 ret = nfb_eth_rx_queue_start(dev, i);
127 if (ret != 0)
128 goto err_rx;
129 }
130
131 for (i = 0; i < nb_tx; i++) {
132 ret = nfb_eth_tx_queue_start(dev, i);
133 if (ret != 0)
134 goto err_tx;
135 }
136
137 return 0;
138
139 err_tx:
140 for (i = 0; i < nb_tx; i++)
141 nfb_eth_tx_queue_stop(dev, i);
142 err_rx:
143 for (i = 0; i < nb_rx; i++)
144 nfb_eth_rx_queue_stop(dev, i);
145 return ret;
146 }
147
148 /**
149 * DPDK callback to stop the device.
150 *
151 * Stop device by stopping all configured queues.
152 *
153 * @param dev
154 * Pointer to Ethernet device structure.
155 */
156 static int
nfb_eth_dev_stop(struct rte_eth_dev * dev)157 nfb_eth_dev_stop(struct rte_eth_dev *dev)
158 {
159 uint16_t i;
160 uint16_t nb_rx = dev->data->nb_rx_queues;
161 uint16_t nb_tx = dev->data->nb_tx_queues;
162
163 dev->data->dev_started = 0;
164
165 for (i = 0; i < nb_tx; i++)
166 nfb_eth_tx_queue_stop(dev, i);
167
168 for (i = 0; i < nb_rx; i++)
169 nfb_eth_rx_queue_stop(dev, i);
170
171 return 0;
172 }
173
174 /**
175 * DPDK callback for Ethernet device configuration.
176 *
177 * @param dev
178 * Pointer to Ethernet device structure.
179 *
180 * @return
181 * 0 on success, a negative errno value otherwise.
182 */
183 static int
nfb_eth_dev_configure(struct rte_eth_dev * dev __rte_unused)184 nfb_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
185 {
186 int ret;
187 struct pmd_internals *internals = dev->data->dev_private;
188 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
189
190 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
191 ret = rte_mbuf_dyn_rx_timestamp_register
192 (&nfb_timestamp_dynfield_offset,
193 &nfb_timestamp_rx_dynflag);
194 if (ret != 0) {
195 RTE_LOG(ERR, PMD, "Cannot register Rx timestamp"
196 " field/flag %d\n", ret);
197 nfb_close(internals->nfb);
198 return -rte_errno;
199 }
200 }
201
202 return 0;
203 }
204
205 static uint32_t
nfb_eth_get_max_mac_address_count(struct rte_eth_dev * dev)206 nfb_eth_get_max_mac_address_count(struct rte_eth_dev *dev)
207 {
208 uint16_t i;
209 uint32_t c;
210 uint32_t ret = (uint32_t)-1;
211 struct pmd_internals *internals = dev->data->dev_private;
212
213 /*
214 * Go through all RX MAC components in firmware and find
215 * the minimal indicated space size for MAC addresses.
216 */
217 for (i = 0; i < internals->max_rxmac; i++) {
218 c = nc_rxmac_mac_address_count(internals->rxmac[i]);
219 ret = RTE_MIN(c, ret);
220 }
221
222 /* The driver must support at least 1 MAC address, pretend that */
223 if (internals->max_rxmac == 0 || ret == 0)
224 ret = 1;
225
226 return ret;
227 }
228
229 /**
230 * DPDK callback to get information about the device.
231 *
232 * @param dev
233 * Pointer to Ethernet device structure.
234 * @param[out] info
235 * Info structure output buffer.
236 */
237 static int
nfb_eth_dev_info(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)238 nfb_eth_dev_info(struct rte_eth_dev *dev,
239 struct rte_eth_dev_info *dev_info)
240 {
241 dev_info->max_mac_addrs = nfb_eth_get_max_mac_address_count(dev);
242
243 dev_info->max_rx_pktlen = (uint32_t)-1;
244 dev_info->max_rx_queues = dev->data->nb_rx_queues;
245 dev_info->max_tx_queues = dev->data->nb_tx_queues;
246 dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
247 dev_info->rx_offload_capa =
248 RTE_ETH_RX_OFFLOAD_TIMESTAMP;
249
250 return 0;
251 }
252
253 /**
254 * DPDK callback to close the device.
255 *
256 * Destroy all queues and objects, free memory.
257 *
258 * @param dev
259 * Pointer to Ethernet device structure.
260 */
261 static int
nfb_eth_dev_close(struct rte_eth_dev * dev)262 nfb_eth_dev_close(struct rte_eth_dev *dev)
263 {
264 struct pmd_internals *internals = dev->data->dev_private;
265 uint16_t i;
266 uint16_t nb_rx = dev->data->nb_rx_queues;
267 uint16_t nb_tx = dev->data->nb_tx_queues;
268 int ret;
269
270 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
271 return 0;
272
273 ret = nfb_eth_dev_stop(dev);
274
275 nfb_nc_rxmac_deinit(internals->rxmac, internals->max_rxmac);
276 nfb_nc_txmac_deinit(internals->txmac, internals->max_txmac);
277
278 for (i = 0; i < nb_rx; i++) {
279 nfb_eth_rx_queue_release(dev, i);
280 dev->data->rx_queues[i] = NULL;
281 }
282 dev->data->nb_rx_queues = 0;
283 for (i = 0; i < nb_tx; i++) {
284 nfb_eth_tx_queue_release(dev, i);
285 dev->data->tx_queues[i] = NULL;
286 }
287 dev->data->nb_tx_queues = 0;
288
289 return ret;
290 }
291
292 /**
293 * DPDK callback to retrieve physical link information.
294 *
295 * @param dev
296 * Pointer to Ethernet device structure.
297 * @param[out] link
298 * Storage for current link status.
299 *
300 * @return
301 * 0 on success, a negative errno value otherwise.
302 */
303 static int
nfb_eth_link_update(struct rte_eth_dev * dev,int wait_to_complete __rte_unused)304 nfb_eth_link_update(struct rte_eth_dev *dev,
305 int wait_to_complete __rte_unused)
306 {
307 uint16_t i;
308 struct nc_rxmac_status status;
309 struct rte_eth_link link;
310 memset(&link, 0, sizeof(link));
311
312 struct pmd_internals *internals = dev->data->dev_private;
313
314 status.speed = MAC_SPEED_UNKNOWN;
315
316 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
317 link.link_status = RTE_ETH_LINK_DOWN;
318 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
319 link.link_autoneg = RTE_ETH_LINK_SPEED_FIXED;
320
321 if (internals->rxmac[0] != NULL) {
322 nc_rxmac_read_status(internals->rxmac[0], &status);
323
324 switch (status.speed) {
325 case MAC_SPEED_10G:
326 link.link_speed = RTE_ETH_SPEED_NUM_10G;
327 break;
328 case MAC_SPEED_40G:
329 link.link_speed = RTE_ETH_SPEED_NUM_40G;
330 break;
331 case MAC_SPEED_100G:
332 link.link_speed = RTE_ETH_SPEED_NUM_100G;
333 break;
334 default:
335 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
336 break;
337 }
338 }
339
340 for (i = 0; i < internals->max_rxmac; ++i) {
341 nc_rxmac_read_status(internals->rxmac[i], &status);
342
343 if (status.enabled && status.link_up) {
344 link.link_status = RTE_ETH_LINK_UP;
345 break;
346 }
347 }
348
349 rte_eth_linkstatus_set(dev, &link);
350
351 return 0;
352 }
353
354 /**
355 * DPDK callback to bring the link UP.
356 *
357 * @param dev
358 * Pointer to Ethernet device structure.
359 *
360 * @return
361 * 0 on success, a negative errno value otherwise.
362 */
363 static int
nfb_eth_dev_set_link_up(struct rte_eth_dev * dev)364 nfb_eth_dev_set_link_up(struct rte_eth_dev *dev)
365 {
366 struct pmd_internals *internals = (struct pmd_internals *)
367 dev->data->dev_private;
368
369 uint16_t i;
370 for (i = 0; i < internals->max_rxmac; ++i)
371 nc_rxmac_enable(internals->rxmac[i]);
372
373 for (i = 0; i < internals->max_txmac; ++i)
374 nc_txmac_enable(internals->txmac[i]);
375
376 return 0;
377 }
378
379 /**
380 * DPDK callback to bring the link DOWN.
381 *
382 * @param dev
383 * Pointer to Ethernet device structure.
384 *
385 * @return
386 * 0 on success, a negative errno value otherwise.
387 */
388 static int
nfb_eth_dev_set_link_down(struct rte_eth_dev * dev)389 nfb_eth_dev_set_link_down(struct rte_eth_dev *dev)
390 {
391 struct pmd_internals *internals = (struct pmd_internals *)
392 dev->data->dev_private;
393
394 uint16_t i;
395 for (i = 0; i < internals->max_rxmac; ++i)
396 nc_rxmac_disable(internals->rxmac[i]);
397
398 for (i = 0; i < internals->max_txmac; ++i)
399 nc_txmac_disable(internals->txmac[i]);
400
401 return 0;
402 }
403
404 static uint64_t
nfb_eth_mac_addr_conv(struct rte_ether_addr * mac_addr)405 nfb_eth_mac_addr_conv(struct rte_ether_addr *mac_addr)
406 {
407 int i;
408 uint64_t res = 0;
409 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
410 res <<= 8;
411 res |= mac_addr->addr_bytes[i] & 0xFF;
412 }
413 return res;
414 }
415
416 /**
417 * DPDK callback to set primary MAC address.
418 *
419 * @param dev
420 * Pointer to Ethernet device structure.
421 * @param mac_addr
422 * MAC address to register.
423 *
424 * @return
425 * 0 on success, a negative errno value otherwise.
426 */
427 static int
nfb_eth_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)428 nfb_eth_mac_addr_set(struct rte_eth_dev *dev,
429 struct rte_ether_addr *mac_addr)
430 {
431 unsigned int i;
432 uint64_t mac;
433 struct rte_eth_dev_data *data = dev->data;
434 struct pmd_internals *internals = (struct pmd_internals *)
435 data->dev_private;
436
437 mac = nfb_eth_mac_addr_conv(mac_addr);
438 /* Until no real multi-port support, configure all RX MACs the same */
439 for (i = 0; i < internals->max_rxmac; ++i)
440 nc_rxmac_set_mac(internals->rxmac[i], 0, mac, 1);
441
442 return 0;
443 }
444
445 static int
nfb_eth_mac_addr_add(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t pool __rte_unused)446 nfb_eth_mac_addr_add(struct rte_eth_dev *dev,
447 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool __rte_unused)
448 {
449 unsigned int i;
450 uint64_t mac;
451 struct rte_eth_dev_data *data = dev->data;
452 struct pmd_internals *internals = (struct pmd_internals *)
453 data->dev_private;
454
455 mac = nfb_eth_mac_addr_conv(mac_addr);
456 for (i = 0; i < internals->max_rxmac; ++i)
457 nc_rxmac_set_mac(internals->rxmac[i], index, mac, 1);
458
459 return 0;
460 }
461
462 static void
nfb_eth_mac_addr_remove(struct rte_eth_dev * dev,uint32_t index)463 nfb_eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
464 {
465 unsigned int i;
466 struct rte_eth_dev_data *data = dev->data;
467 struct pmd_internals *internals = (struct pmd_internals *)
468 data->dev_private;
469
470 for (i = 0; i < internals->max_rxmac; ++i)
471 nc_rxmac_set_mac(internals->rxmac[i], index, 0, 0);
472 }
473
474 static const struct eth_dev_ops ops = {
475 .dev_start = nfb_eth_dev_start,
476 .dev_stop = nfb_eth_dev_stop,
477 .dev_set_link_up = nfb_eth_dev_set_link_up,
478 .dev_set_link_down = nfb_eth_dev_set_link_down,
479 .dev_close = nfb_eth_dev_close,
480 .dev_configure = nfb_eth_dev_configure,
481 .dev_infos_get = nfb_eth_dev_info,
482 .promiscuous_enable = nfb_eth_promiscuous_enable,
483 .promiscuous_disable = nfb_eth_promiscuous_disable,
484 .allmulticast_enable = nfb_eth_allmulticast_enable,
485 .allmulticast_disable = nfb_eth_allmulticast_disable,
486 .rx_queue_start = nfb_eth_rx_queue_start,
487 .rx_queue_stop = nfb_eth_rx_queue_stop,
488 .tx_queue_start = nfb_eth_tx_queue_start,
489 .tx_queue_stop = nfb_eth_tx_queue_stop,
490 .rx_queue_setup = nfb_eth_rx_queue_setup,
491 .tx_queue_setup = nfb_eth_tx_queue_setup,
492 .rx_queue_release = nfb_eth_rx_queue_release,
493 .tx_queue_release = nfb_eth_tx_queue_release,
494 .link_update = nfb_eth_link_update,
495 .stats_get = nfb_eth_stats_get,
496 .stats_reset = nfb_eth_stats_reset,
497 .mac_addr_set = nfb_eth_mac_addr_set,
498 .mac_addr_add = nfb_eth_mac_addr_add,
499 .mac_addr_remove = nfb_eth_mac_addr_remove,
500 };
501
502 /**
503 * DPDK callback to initialize an ethernet device
504 *
505 * @param dev
506 * Pointer to ethernet device structure
507 *
508 * @return
509 * 0 on success, a negative errno value otherwise.
510 */
511 static int
nfb_eth_dev_init(struct rte_eth_dev * dev)512 nfb_eth_dev_init(struct rte_eth_dev *dev)
513 {
514 uint32_t mac_count;
515 struct rte_eth_dev_data *data = dev->data;
516 struct pmd_internals *internals = (struct pmd_internals *)
517 data->dev_private;
518 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
519 struct rte_pci_addr *pci_addr = &pci_dev->addr;
520 struct rte_ether_addr eth_addr_init;
521 struct rte_kvargs *kvlist;
522
523 RTE_LOG(INFO, PMD, "Initializing NFB device (" PCI_PRI_FMT ")\n",
524 pci_addr->domain, pci_addr->bus, pci_addr->devid,
525 pci_addr->function);
526
527 snprintf(internals->nfb_dev, PATH_MAX,
528 "/dev/nfb/by-pci-slot/" PCI_PRI_FMT,
529 pci_addr->domain, pci_addr->bus, pci_addr->devid,
530 pci_addr->function);
531
532 /* Check validity of device args */
533 if (dev->device->devargs != NULL &&
534 dev->device->devargs->args != NULL &&
535 strlen(dev->device->devargs->args) > 0) {
536 kvlist = rte_kvargs_parse(dev->device->devargs->args,
537 VALID_KEYS);
538 if (kvlist == NULL) {
539 RTE_LOG(ERR, PMD, "Failed to parse device arguments %s",
540 dev->device->devargs->args);
541 rte_kvargs_free(kvlist);
542 return -EINVAL;
543 }
544 rte_kvargs_free(kvlist);
545 }
546
547 /*
548 * Get number of available DMA RX and TX queues, which is maximum
549 * number of queues that can be created and store it in private device
550 * data structure.
551 */
552 internals->nfb = nfb_open(internals->nfb_dev);
553 if (internals->nfb == NULL) {
554 RTE_LOG(ERR, PMD, "nfb_open(): failed to open %s",
555 internals->nfb_dev);
556 return -EINVAL;
557 }
558 data->nb_rx_queues = ndp_get_rx_queue_available_count(internals->nfb);
559 data->nb_tx_queues = ndp_get_tx_queue_available_count(internals->nfb);
560
561 RTE_LOG(INFO, PMD, "Available NDP queues RX: %u TX: %u\n",
562 data->nb_rx_queues, data->nb_tx_queues);
563
564 nfb_nc_rxmac_init(internals->nfb,
565 internals->rxmac,
566 &internals->max_rxmac);
567 nfb_nc_txmac_init(internals->nfb,
568 internals->txmac,
569 &internals->max_txmac);
570
571 /* Set rx, tx burst functions */
572 dev->rx_pkt_burst = nfb_eth_ndp_rx;
573 dev->tx_pkt_burst = nfb_eth_ndp_tx;
574
575 /* Set function callbacks for Ethernet API */
576 dev->dev_ops = &ops;
577
578 /* Get link state */
579 nfb_eth_link_update(dev, 0);
580
581 /* Allocate space for MAC addresses */
582 mac_count = nfb_eth_get_max_mac_address_count(dev);
583 data->mac_addrs = rte_zmalloc(data->name,
584 sizeof(struct rte_ether_addr) * mac_count, RTE_CACHE_LINE_SIZE);
585 if (data->mac_addrs == NULL) {
586 RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n");
587 nfb_close(internals->nfb);
588 return -EINVAL;
589 }
590
591 rte_eth_random_addr(eth_addr_init.addr_bytes);
592 eth_addr_init.addr_bytes[0] = eth_addr.addr_bytes[0];
593 eth_addr_init.addr_bytes[1] = eth_addr.addr_bytes[1];
594 eth_addr_init.addr_bytes[2] = eth_addr.addr_bytes[2];
595
596 nfb_eth_mac_addr_set(dev, ð_addr_init);
597 rte_ether_addr_copy(ð_addr_init, &dev->data->mac_addrs[0]);
598
599 data->promiscuous = nfb_eth_promiscuous_get(dev);
600 data->all_multicast = nfb_eth_allmulticast_get(dev);
601
602 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
603
604 RTE_LOG(INFO, PMD, "NFB device ("
605 PCI_PRI_FMT ") successfully initialized\n",
606 pci_addr->domain, pci_addr->bus, pci_addr->devid,
607 pci_addr->function);
608
609 return 0;
610 }
611
612 /**
613 * DPDK callback to uninitialize an ethernet device
614 *
615 * @param dev
616 * Pointer to ethernet device structure
617 *
618 * @return
619 * 0 on success, a negative errno value otherwise.
620 */
621 static int
nfb_eth_dev_uninit(struct rte_eth_dev * dev)622 nfb_eth_dev_uninit(struct rte_eth_dev *dev)
623 {
624 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
625 struct rte_pci_addr *pci_addr = &pci_dev->addr;
626
627 nfb_eth_dev_close(dev);
628
629 RTE_LOG(INFO, PMD, "NFB device ("
630 PCI_PRI_FMT ") successfully uninitialized\n",
631 pci_addr->domain, pci_addr->bus, pci_addr->devid,
632 pci_addr->function);
633
634 return 0;
635 }
636
637 static const struct rte_pci_id nfb_pci_id_table[] = {
638 { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_40G2) },
639 { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_100G2) },
640 { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_200G2QL) },
641 { RTE_PCI_DEVICE(PCI_VENDOR_ID_SILICOM, PCI_DEVICE_ID_FB2CGG3) },
642 { RTE_PCI_DEVICE(PCI_VENDOR_ID_SILICOM, PCI_DEVICE_ID_FB2CGG3D) },
643 { .vendor_id = 0, }
644 };
645
646 /**
647 * DPDK callback to register a PCI device.
648 *
649 * This function spawns Ethernet devices out of a given PCI device.
650 *
651 * @param[in] pci_drv
652 * PCI driver structure (nfb_driver).
653 * @param[in] pci_dev
654 * PCI device information.
655 *
656 * @return
657 * 0 on success, a negative errno value otherwise.
658 */
659 static int
nfb_eth_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)660 nfb_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
661 struct rte_pci_device *pci_dev)
662 {
663 return rte_eth_dev_pci_generic_probe(pci_dev,
664 sizeof(struct pmd_internals), nfb_eth_dev_init);
665 }
666
667 /**
668 * DPDK callback to remove a PCI device.
669 *
670 * This function removes all Ethernet devices belong to a given PCI device.
671 *
672 * @param[in] pci_dev
673 * Pointer to the PCI device.
674 *
675 * @return
676 * 0 on success, the function cannot fail.
677 */
678 static int
nfb_eth_pci_remove(struct rte_pci_device * pci_dev)679 nfb_eth_pci_remove(struct rte_pci_device *pci_dev)
680 {
681 return rte_eth_dev_pci_generic_remove(pci_dev, nfb_eth_dev_uninit);
682 }
683
684 static struct rte_pci_driver nfb_eth_driver = {
685 .id_table = nfb_pci_id_table,
686 .probe = nfb_eth_pci_probe,
687 .remove = nfb_eth_pci_remove,
688 };
689
690 RTE_PMD_REGISTER_PCI(RTE_NFB_DRIVER_NAME, nfb_eth_driver);
691 RTE_PMD_REGISTER_PCI_TABLE(RTE_NFB_DRIVER_NAME, nfb_pci_id_table);
692 RTE_PMD_REGISTER_KMOD_DEP(RTE_NFB_DRIVER_NAME, "* nfb");
693