1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2014-2021 Netronome Systems, Inc.
3 * All rights reserved.
4 *
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6 */
7
8 /*
9 * vim:shiftwidth=8:noexpandtab
10 *
11 * @file dpdk/pmd/nfp_ethdev.c
12 *
13 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
14 */
15
16 #include <rte_common.h>
17 #include <ethdev_driver.h>
18 #include <ethdev_pci.h>
19 #include <rte_dev.h>
20 #include <rte_ether.h>
21 #include <rte_malloc.h>
22 #include <rte_memzone.h>
23 #include <rte_mempool.h>
24 #include <rte_service_component.h>
25 #include <rte_alarm.h>
26 #include "eal_firmware.h"
27
28 #include "nfpcore/nfp_cpp.h"
29 #include "nfpcore/nfp_nffw.h"
30 #include "nfpcore/nfp_hwinfo.h"
31 #include "nfpcore/nfp_mip.h"
32 #include "nfpcore/nfp_rtsym.h"
33 #include "nfpcore/nfp_nsp.h"
34
35 #include "nfp_common.h"
36 #include "nfp_rxtx.h"
37 #include "nfp_logs.h"
38 #include "nfp_ctrl.h"
39 #include "nfp_cpp_bridge.h"
40
41
42 static int nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port);
43 static int nfp_net_start(struct rte_eth_dev *dev);
44 static int nfp_net_stop(struct rte_eth_dev *dev);
45 static int nfp_net_set_link_up(struct rte_eth_dev *dev);
46 static int nfp_net_set_link_down(struct rte_eth_dev *dev);
47 static int nfp_net_close(struct rte_eth_dev *dev);
48 static int nfp_net_init(struct rte_eth_dev *eth_dev);
49 static int nfp_fw_upload(struct rte_pci_device *dev,
50 struct nfp_nsp *nsp, char *card);
51 static int nfp_fw_setup(struct rte_pci_device *dev,
52 struct nfp_cpp *cpp,
53 struct nfp_eth_table *nfp_eth_table,
54 struct nfp_hwinfo *hwinfo);
55 static int nfp_init_phyports(struct nfp_pf_dev *pf_dev);
56 static int nfp_pf_init(struct rte_pci_device *pci_dev);
57 static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev);
58 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
59 struct rte_pci_device *dev);
60 static int nfp_pci_uninit(struct rte_eth_dev *eth_dev);
61 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev);
62
63 static int
nfp_net_pf_read_mac(struct nfp_pf_dev * pf_dev,int port)64 nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port)
65 {
66 struct nfp_eth_table *nfp_eth_table;
67 struct nfp_net_hw *hw = NULL;
68
69 /* Grab a pointer to the correct physical port */
70 hw = pf_dev->ports[port];
71
72 nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
73
74 nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
75 (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
76
77 free(nfp_eth_table);
78 return 0;
79 }
80
81 static int
nfp_net_start(struct rte_eth_dev * dev)82 nfp_net_start(struct rte_eth_dev *dev)
83 {
84 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
85 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
86 uint32_t new_ctrl, update = 0;
87 struct nfp_net_hw *hw;
88 struct nfp_pf_dev *pf_dev;
89 struct rte_eth_conf *dev_conf;
90 struct rte_eth_rxmode *rxmode;
91 uint32_t intr_vector;
92 int ret;
93
94 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
95 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
96
97 PMD_INIT_LOG(DEBUG, "Start");
98
99 /* Disabling queues just in case... */
100 nfp_net_disable_queues(dev);
101
102 /* Enabling the required queues in the device */
103 nfp_net_enable_queues(dev);
104
105 /* check and configure queue intr-vector mapping */
106 if (dev->data->dev_conf.intr_conf.rxq != 0) {
107 if (pf_dev->multiport) {
108 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
109 "with NFP multiport PF");
110 return -EINVAL;
111 }
112 if (rte_intr_type_get(intr_handle) ==
113 RTE_INTR_HANDLE_UIO) {
114 /*
115 * Better not to share LSC with RX interrupts.
116 * Unregistering LSC interrupt handler
117 */
118 rte_intr_callback_unregister(pci_dev->intr_handle,
119 nfp_net_dev_interrupt_handler, (void *)dev);
120
121 if (dev->data->nb_rx_queues > 1) {
122 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
123 "supports 1 queue with UIO");
124 return -EIO;
125 }
126 }
127 intr_vector = dev->data->nb_rx_queues;
128 if (rte_intr_efd_enable(intr_handle, intr_vector))
129 return -1;
130
131 nfp_configure_rx_interrupt(dev, intr_handle);
132 update = NFP_NET_CFG_UPDATE_MSIX;
133 }
134
135 rte_intr_enable(intr_handle);
136
137 new_ctrl = nfp_check_offloads(dev);
138
139 /* Writing configuration parameters in the device */
140 nfp_net_params_setup(hw);
141
142 dev_conf = &dev->data->dev_conf;
143 rxmode = &dev_conf->rxmode;
144
145 if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
146 nfp_net_rss_config_default(dev);
147 update |= NFP_NET_CFG_UPDATE_RSS;
148 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
149 }
150
151 /* Enable device */
152 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
153
154 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
155
156 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
157 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
158
159 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
160 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
161 return -EIO;
162
163 /*
164 * Allocating rte mbufs for configured rx queues.
165 * This requires queues being enabled before
166 */
167 if (nfp_net_rx_freelist_setup(dev) < 0) {
168 ret = -ENOMEM;
169 goto error;
170 }
171
172 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
173 /* Configure the physical port up */
174 nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
175 else
176 nfp_eth_set_configured(dev->process_private,
177 hw->nfp_idx, 1);
178
179 hw->ctrl = new_ctrl;
180
181 return 0;
182
183 error:
184 /*
185 * An error returned by this function should mean the app
186 * exiting and then the system releasing all the memory
187 * allocated even memory coming from hugepages.
188 *
189 * The device could be enabled at this point with some queues
190 * ready for getting packets. This is true if the call to
191 * nfp_net_rx_freelist_setup() succeeds for some queues but
192 * fails for subsequent queues.
193 *
194 * This should make the app exiting but better if we tell the
195 * device first.
196 */
197 nfp_net_disable_queues(dev);
198
199 return ret;
200 }
201
202 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
203 static int
nfp_net_stop(struct rte_eth_dev * dev)204 nfp_net_stop(struct rte_eth_dev *dev)
205 {
206 int i;
207 struct nfp_net_hw *hw;
208 struct nfp_net_txq *this_tx_q;
209 struct nfp_net_rxq *this_rx_q;
210
211 PMD_INIT_LOG(DEBUG, "Stop");
212
213 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
214
215 nfp_net_disable_queues(dev);
216
217 /* Clear queues */
218 for (i = 0; i < dev->data->nb_tx_queues; i++) {
219 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
220 nfp_net_reset_tx_queue(this_tx_q);
221 }
222
223 for (i = 0; i < dev->data->nb_rx_queues; i++) {
224 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
225 nfp_net_reset_rx_queue(this_rx_q);
226 }
227
228 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
229 /* Configure the physical port down */
230 nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
231 else
232 nfp_eth_set_configured(dev->process_private,
233 hw->nfp_idx, 0);
234
235 return 0;
236 }
237
238 /* Set the link up. */
239 static int
nfp_net_set_link_up(struct rte_eth_dev * dev)240 nfp_net_set_link_up(struct rte_eth_dev *dev)
241 {
242 struct nfp_net_hw *hw;
243
244 PMD_DRV_LOG(DEBUG, "Set link up");
245
246 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
247
248 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
249 /* Configure the physical port down */
250 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
251 else
252 return nfp_eth_set_configured(dev->process_private,
253 hw->nfp_idx, 1);
254 }
255
256 /* Set the link down. */
257 static int
nfp_net_set_link_down(struct rte_eth_dev * dev)258 nfp_net_set_link_down(struct rte_eth_dev *dev)
259 {
260 struct nfp_net_hw *hw;
261
262 PMD_DRV_LOG(DEBUG, "Set link down");
263
264 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
265
266 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
267 /* Configure the physical port down */
268 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
269 else
270 return nfp_eth_set_configured(dev->process_private,
271 hw->nfp_idx, 0);
272 }
273
274 /* Reset and stop device. The device can not be restarted. */
275 static int
nfp_net_close(struct rte_eth_dev * dev)276 nfp_net_close(struct rte_eth_dev *dev)
277 {
278 struct nfp_net_hw *hw;
279 struct rte_pci_device *pci_dev;
280 struct nfp_pf_dev *pf_dev;
281 struct nfp_net_txq *this_tx_q;
282 struct nfp_net_rxq *this_rx_q;
283 int i;
284
285 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
286 return 0;
287
288 PMD_INIT_LOG(DEBUG, "Close");
289
290 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
291 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
292 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
293
294 /*
295 * We assume that the DPDK application is stopping all the
296 * threads/queues before calling the device close function.
297 */
298
299 nfp_net_disable_queues(dev);
300
301 /* Clear queues */
302 for (i = 0; i < dev->data->nb_tx_queues; i++) {
303 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
304 nfp_net_reset_tx_queue(this_tx_q);
305 nfp_net_tx_queue_release(dev, i);
306 }
307
308 for (i = 0; i < dev->data->nb_rx_queues; i++) {
309 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
310 nfp_net_reset_rx_queue(this_rx_q);
311 nfp_net_rx_queue_release(dev, i);
312 }
313
314 /* Cancel possible impending LSC work here before releasing the port*/
315 rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
316 (void *)dev);
317
318 /* Only free PF resources after all physical ports have been closed */
319 /* Mark this port as unused and free device priv resources*/
320 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
321 pf_dev->ports[hw->idx] = NULL;
322 rte_eth_dev_release_port(dev);
323
324 for (i = 0; i < pf_dev->total_phyports; i++) {
325 /* Check to see if ports are still in use */
326 if (pf_dev->ports[i])
327 return 0;
328 }
329
330 /* Now it is safe to free all PF resources */
331 PMD_INIT_LOG(INFO, "Freeing PF resources");
332 nfp_cpp_area_free(pf_dev->ctrl_area);
333 nfp_cpp_area_free(pf_dev->hwqueues_area);
334 free(pf_dev->hwinfo);
335 free(pf_dev->sym_tbl);
336 nfp_cpp_free(pf_dev->cpp);
337 rte_free(pf_dev);
338
339 rte_intr_disable(pci_dev->intr_handle);
340
341 /* unregister callback func from eal lib */
342 rte_intr_callback_unregister(pci_dev->intr_handle,
343 nfp_net_dev_interrupt_handler,
344 (void *)dev);
345
346 /*
347 * The ixgbe PMD disables the pcie master on the
348 * device. The i40e does not...
349 */
350
351 return 0;
352 }
353
354 /* Initialise and register driver with DPDK Application */
355 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
356 .dev_configure = nfp_net_configure,
357 .dev_start = nfp_net_start,
358 .dev_stop = nfp_net_stop,
359 .dev_set_link_up = nfp_net_set_link_up,
360 .dev_set_link_down = nfp_net_set_link_down,
361 .dev_close = nfp_net_close,
362 .promiscuous_enable = nfp_net_promisc_enable,
363 .promiscuous_disable = nfp_net_promisc_disable,
364 .link_update = nfp_net_link_update,
365 .stats_get = nfp_net_stats_get,
366 .stats_reset = nfp_net_stats_reset,
367 .dev_infos_get = nfp_net_infos_get,
368 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
369 .mtu_set = nfp_net_dev_mtu_set,
370 .mac_addr_set = nfp_set_mac_addr,
371 .vlan_offload_set = nfp_net_vlan_offload_set,
372 .reta_update = nfp_net_reta_update,
373 .reta_query = nfp_net_reta_query,
374 .rss_hash_update = nfp_net_rss_hash_update,
375 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
376 .rx_queue_setup = nfp_net_rx_queue_setup,
377 .rx_queue_release = nfp_net_rx_queue_release,
378 .tx_queue_setup = nfp_net_tx_queue_setup,
379 .tx_queue_release = nfp_net_tx_queue_release,
380 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
381 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
382 };
383
384 static int
nfp_net_init(struct rte_eth_dev * eth_dev)385 nfp_net_init(struct rte_eth_dev *eth_dev)
386 {
387 struct rte_pci_device *pci_dev;
388 struct nfp_pf_dev *pf_dev;
389 struct nfp_net_hw *hw;
390 struct rte_ether_addr *tmp_ether_addr;
391
392 uint64_t tx_bar_off = 0, rx_bar_off = 0;
393 uint32_t start_q;
394 int stride = 4;
395 int port = 0;
396 int err;
397
398 PMD_INIT_FUNC_TRACE();
399
400 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
401
402 /* Use backpointer here to the PF of this eth_dev */
403 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private);
404
405 /* NFP can not handle DMA addresses requiring more than 40 bits */
406 if (rte_mem_check_dma_mask(40)) {
407 RTE_LOG(ERR, PMD, "device %s can not be used:",
408 pci_dev->device.name);
409 RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
410 return -ENODEV;
411 };
412
413 port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
414 if (port < 0 || port > 7) {
415 PMD_DRV_LOG(ERR, "Port value is wrong");
416 return -ENODEV;
417 }
418
419 /* Use PF array of physical ports to get pointer to
420 * this specific port
421 */
422 hw = pf_dev->ports[port];
423
424 PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, "
425 "NFP internal port number: %d",
426 port, hw->nfp_idx);
427
428 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
429 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
430 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
431 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
432
433 /* For secondary processes, the primary has done all the work */
434 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
435 return 0;
436
437 rte_eth_copy_pci_info(eth_dev, pci_dev);
438
439 hw->device_id = pci_dev->id.device_id;
440 hw->vendor_id = pci_dev->id.vendor_id;
441 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
442 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
443
444 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
445 pci_dev->id.vendor_id, pci_dev->id.device_id,
446 pci_dev->addr.domain, pci_dev->addr.bus,
447 pci_dev->addr.devid, pci_dev->addr.function);
448
449 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
450 if (hw->ctrl_bar == NULL) {
451 PMD_DRV_LOG(ERR,
452 "hw->ctrl_bar is NULL. BAR0 not configured");
453 return -ENODEV;
454 }
455
456 if (port == 0) {
457 hw->ctrl_bar = pf_dev->ctrl_bar;
458 } else {
459 if (!pf_dev->ctrl_bar)
460 return -ENODEV;
461 /* Use port offset in pf ctrl_bar for this
462 * ports control bar
463 */
464 hw->ctrl_bar = pf_dev->ctrl_bar +
465 (port * NFP_PF_CSR_SLICE_SIZE);
466 }
467
468 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
469
470 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
471 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
472
473 /* Work out where in the BAR the queues start. */
474 switch (pci_dev->id.device_id) {
475 case PCI_DEVICE_ID_NFP4000_PF_NIC:
476 case PCI_DEVICE_ID_NFP6000_PF_NIC:
477 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
478 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
479 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
480 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
481 break;
482 default:
483 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
484 err = -ENODEV;
485 goto dev_err_ctrl_map;
486 }
487
488 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
489 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
490
491 hw->tx_bar = pf_dev->hw_queues + tx_bar_off;
492 hw->rx_bar = pf_dev->hw_queues + rx_bar_off;
493 eth_dev->data->dev_private = hw;
494
495 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
496 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
497
498 nfp_net_cfg_queue_setup(hw);
499
500 /* Get some of the read-only fields from the config BAR */
501 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
502 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
503 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
504 hw->mtu = RTE_ETHER_MTU;
505
506 /* VLAN insertion is incompatible with LSOv2 */
507 if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
508 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
509
510 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
511 hw->rx_offset = NFP_NET_RX_OFFSET;
512 else
513 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
514
515 PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
516 NFD_CFG_MAJOR_VERSION_of(hw->ver),
517 NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
518
519 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
520 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
521 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
522 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
523 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
524 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
525 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
526 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
527 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
528 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
529 hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
530 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
531 hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
532 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
533 hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
534
535 hw->ctrl = 0;
536
537 hw->stride_rx = stride;
538 hw->stride_tx = stride;
539
540 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
541 hw->max_rx_queues, hw->max_tx_queues);
542
543 /* Initializing spinlock for reconfigs */
544 rte_spinlock_init(&hw->reconfig_lock);
545
546 /* Allocating memory for mac addr */
547 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
548 RTE_ETHER_ADDR_LEN, 0);
549 if (eth_dev->data->mac_addrs == NULL) {
550 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
551 err = -ENOMEM;
552 goto dev_err_queues_map;
553 }
554
555 nfp_net_pf_read_mac(pf_dev, port);
556 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
557
558 tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
559 if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
560 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
561 port);
562 /* Using random mac addresses for VFs */
563 rte_eth_random_addr(&hw->mac_addr[0]);
564 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
565 }
566
567 /* Copying mac address to DPDK eth_dev struct */
568 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
569 ð_dev->data->mac_addrs[0]);
570
571 if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
572 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
573
574 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
575
576 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
577 "mac=" RTE_ETHER_ADDR_PRT_FMT,
578 eth_dev->data->port_id, pci_dev->id.vendor_id,
579 pci_dev->id.device_id,
580 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
581 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
582
583 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
584 /* Registering LSC interrupt handler */
585 rte_intr_callback_register(pci_dev->intr_handle,
586 nfp_net_dev_interrupt_handler,
587 (void *)eth_dev);
588 /* Telling the firmware about the LSC interrupt entry */
589 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
590 /* Recording current stats counters values */
591 nfp_net_stats_reset(eth_dev);
592 }
593
594 return 0;
595
596 dev_err_queues_map:
597 nfp_cpp_area_free(hw->hwqueues_area);
598 dev_err_ctrl_map:
599 nfp_cpp_area_free(hw->ctrl_area);
600
601 return err;
602 }
603
604 #define DEFAULT_FW_PATH "/lib/firmware/netronome"
605
606 static int
nfp_fw_upload(struct rte_pci_device * dev,struct nfp_nsp * nsp,char * card)607 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
608 {
609 struct nfp_cpp *cpp = nsp->cpp;
610 void *fw_buf;
611 char fw_name[125];
612 char serial[40];
613 size_t fsize;
614
615 /* Looking for firmware file in order of priority */
616
617 /* First try to find a firmware image specific for this device */
618 snprintf(serial, sizeof(serial),
619 "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
620 cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
621 cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
622 cpp->interface & 0xff);
623
624 snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
625 serial);
626
627 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
628 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
629 goto load_fw;
630 /* Then try the PCI name */
631 snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
632 dev->device.name);
633
634 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
635 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
636 goto load_fw;
637
638 /* Finally try the card type and media */
639 snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
640 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
641 if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) {
642 PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
643 return -ENOENT;
644 }
645
646 load_fw:
647 PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
648 fw_name, fsize);
649 PMD_DRV_LOG(INFO, "Uploading the firmware ...");
650 nfp_nsp_load_fw(nsp, fw_buf, fsize);
651 PMD_DRV_LOG(INFO, "Done");
652
653 free(fw_buf);
654
655 return 0;
656 }
657
658 static int
nfp_fw_setup(struct rte_pci_device * dev,struct nfp_cpp * cpp,struct nfp_eth_table * nfp_eth_table,struct nfp_hwinfo * hwinfo)659 nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
660 struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
661 {
662 struct nfp_nsp *nsp;
663 const char *nfp_fw_model;
664 char card_desc[100];
665 int err = 0;
666
667 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
668
669 if (nfp_fw_model) {
670 PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
671 } else {
672 PMD_DRV_LOG(ERR, "firmware model NOT found");
673 return -EIO;
674 }
675
676 if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
677 PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
678 nfp_eth_table->count);
679 return -EIO;
680 }
681
682 PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
683 nfp_eth_table->count);
684
685 PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
686
687 snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
688 nfp_fw_model, nfp_eth_table->count,
689 nfp_eth_table->ports[0].speed / 1000);
690
691 nsp = nfp_nsp_open(cpp);
692 if (!nsp) {
693 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
694 return -EIO;
695 }
696
697 nfp_nsp_device_soft_reset(nsp);
698 err = nfp_fw_upload(dev, nsp, card_desc);
699
700 nfp_nsp_close(nsp);
701 return err;
702 }
703
nfp_init_phyports(struct nfp_pf_dev * pf_dev)704 static int nfp_init_phyports(struct nfp_pf_dev *pf_dev)
705 {
706 struct nfp_net_hw *hw;
707 struct rte_eth_dev *eth_dev;
708 struct nfp_eth_table *nfp_eth_table = NULL;
709 int ret = 0;
710 int i;
711
712 nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
713 if (!nfp_eth_table) {
714 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
715 ret = -EIO;
716 goto error;
717 }
718
719 /* Loop through all physical ports on PF */
720 for (i = 0; i < pf_dev->total_phyports; i++) {
721 const unsigned int numa_node = rte_socket_id();
722 char port_name[RTE_ETH_NAME_MAX_LEN];
723
724 snprintf(port_name, sizeof(port_name), "%s_port%d",
725 pf_dev->pci_dev->device.name, i);
726
727 /* Allocate a eth_dev for this phyport */
728 eth_dev = rte_eth_dev_allocate(port_name);
729 if (!eth_dev) {
730 ret = -ENODEV;
731 goto port_cleanup;
732 }
733
734 /* Allocate memory for this phyport */
735 eth_dev->data->dev_private =
736 rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw),
737 RTE_CACHE_LINE_SIZE, numa_node);
738 if (!eth_dev->data->dev_private) {
739 ret = -ENOMEM;
740 rte_eth_dev_release_port(eth_dev);
741 goto port_cleanup;
742 }
743
744 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
745
746 /* Add this device to the PF's array of physical ports */
747 pf_dev->ports[i] = hw;
748
749 hw->pf_dev = pf_dev;
750 hw->cpp = pf_dev->cpp;
751 hw->eth_dev = eth_dev;
752 hw->idx = i;
753 hw->nfp_idx = nfp_eth_table->ports[i].index;
754 hw->is_phyport = true;
755
756 eth_dev->device = &pf_dev->pci_dev->device;
757
758 /* ctrl/tx/rx BAR mappings and remaining init happens in
759 * nfp_net_init
760 */
761 ret = nfp_net_init(eth_dev);
762
763 if (ret) {
764 ret = -ENODEV;
765 goto port_cleanup;
766 }
767
768 rte_eth_dev_probing_finish(eth_dev);
769
770 } /* End loop, all ports on this PF */
771 ret = 0;
772 goto eth_table_cleanup;
773
774 port_cleanup:
775 for (i = 0; i < pf_dev->total_phyports; i++) {
776 if (pf_dev->ports[i] && pf_dev->ports[i]->eth_dev) {
777 struct rte_eth_dev *tmp_dev;
778 tmp_dev = pf_dev->ports[i]->eth_dev;
779 rte_eth_dev_release_port(tmp_dev);
780 pf_dev->ports[i] = NULL;
781 }
782 }
783 eth_table_cleanup:
784 free(nfp_eth_table);
785 error:
786 return ret;
787 }
788
nfp_pf_init(struct rte_pci_device * pci_dev)789 static int nfp_pf_init(struct rte_pci_device *pci_dev)
790 {
791 struct nfp_pf_dev *pf_dev = NULL;
792 struct nfp_cpp *cpp;
793 struct nfp_hwinfo *hwinfo;
794 struct nfp_rtsym_table *sym_tbl;
795 struct nfp_eth_table *nfp_eth_table = NULL;
796 char name[RTE_ETH_NAME_MAX_LEN];
797 int total_ports;
798 int ret = -ENODEV;
799 int err;
800
801 if (!pci_dev)
802 return ret;
803
804 /*
805 * When device bound to UIO, the device could be used, by mistake,
806 * by two DPDK apps, and the UIO driver does not avoid it. This
807 * could lead to a serious problem when configuring the NFP CPP
808 * interface. Here we avoid this telling to the CPP init code to
809 * use a lock file if UIO is being used.
810 */
811 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
812 cpp = nfp_cpp_from_device_name(pci_dev, 0);
813 else
814 cpp = nfp_cpp_from_device_name(pci_dev, 1);
815
816 if (!cpp) {
817 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
818 ret = -EIO;
819 goto error;
820 }
821
822 hwinfo = nfp_hwinfo_read(cpp);
823 if (!hwinfo) {
824 PMD_INIT_LOG(ERR, "Error reading hwinfo table");
825 ret = -EIO;
826 goto error;
827 }
828
829 nfp_eth_table = nfp_eth_read_ports(cpp);
830 if (!nfp_eth_table) {
831 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
832 ret = -EIO;
833 goto hwinfo_cleanup;
834 }
835
836 if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) {
837 PMD_INIT_LOG(ERR, "Error when uploading firmware");
838 ret = -EIO;
839 goto eth_table_cleanup;
840 }
841
842 /* Now the symbol table should be there */
843 sym_tbl = nfp_rtsym_table_read(cpp);
844 if (!sym_tbl) {
845 PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
846 " symbol table");
847 ret = -EIO;
848 goto eth_table_cleanup;
849 }
850
851 total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
852 if (total_ports != (int)nfp_eth_table->count) {
853 PMD_DRV_LOG(ERR, "Inconsistent number of ports");
854 ret = -EIO;
855 goto sym_tbl_cleanup;
856 }
857
858 PMD_INIT_LOG(INFO, "Total physical ports: %d", total_ports);
859
860 if (total_ports <= 0 || total_ports > 8) {
861 PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
862 ret = -ENODEV;
863 goto sym_tbl_cleanup;
864 }
865 /* Allocate memory for the PF "device" */
866 snprintf(name, sizeof(name), "nfp_pf%d", 0);
867 pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
868 if (!pf_dev) {
869 ret = -ENOMEM;
870 goto sym_tbl_cleanup;
871 }
872
873 /* Populate the newly created PF device */
874 pf_dev->cpp = cpp;
875 pf_dev->hwinfo = hwinfo;
876 pf_dev->sym_tbl = sym_tbl;
877 pf_dev->total_phyports = total_ports;
878
879 if (total_ports > 1)
880 pf_dev->multiport = true;
881
882 pf_dev->pci_dev = pci_dev;
883
884 /* Map the symbol table */
885 pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0",
886 pf_dev->total_phyports * 32768,
887 &pf_dev->ctrl_area);
888 if (!pf_dev->ctrl_bar) {
889 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar");
890 ret = -EIO;
891 goto pf_cleanup;
892 }
893
894 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
895
896 /* configure access to tx/rx vNIC BARs */
897 pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0,
898 NFP_PCIE_QUEUE(0),
899 NFP_QCP_QUEUE_AREA_SZ,
900 &pf_dev->hwqueues_area);
901 if (!pf_dev->hw_queues) {
902 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
903 ret = -EIO;
904 goto ctrl_area_cleanup;
905 }
906
907 PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues);
908
909 /* Initialize and prep physical ports now
910 * This will loop through all physical ports
911 */
912 ret = nfp_init_phyports(pf_dev);
913 if (ret) {
914 PMD_INIT_LOG(ERR, "Could not create physical ports");
915 goto hwqueues_cleanup;
916 }
917
918 /* register the CPP bridge service here for primary use */
919 nfp_register_cpp_service(pf_dev->cpp);
920
921 return 0;
922
923 hwqueues_cleanup:
924 nfp_cpp_area_free(pf_dev->hwqueues_area);
925 ctrl_area_cleanup:
926 nfp_cpp_area_free(pf_dev->ctrl_area);
927 pf_cleanup:
928 rte_free(pf_dev);
929 sym_tbl_cleanup:
930 free(sym_tbl);
931 eth_table_cleanup:
932 free(nfp_eth_table);
933 hwinfo_cleanup:
934 free(hwinfo);
935 error:
936 return ret;
937 }
938
939 /*
940 * When attaching to the NFP4000/6000 PF on a secondary process there
941 * is no need to initialise the PF again. Only minimal work is required
942 * here
943 */
nfp_pf_secondary_init(struct rte_pci_device * pci_dev)944 static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
945 {
946 struct nfp_cpp *cpp;
947 struct nfp_rtsym_table *sym_tbl;
948 int total_ports;
949 int i;
950 int err;
951
952 if (!pci_dev)
953 return -ENODEV;
954
955 /*
956 * When device bound to UIO, the device could be used, by mistake,
957 * by two DPDK apps, and the UIO driver does not avoid it. This
958 * could lead to a serious problem when configuring the NFP CPP
959 * interface. Here we avoid this telling to the CPP init code to
960 * use a lock file if UIO is being used.
961 */
962 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
963 cpp = nfp_cpp_from_device_name(pci_dev, 0);
964 else
965 cpp = nfp_cpp_from_device_name(pci_dev, 1);
966
967 if (!cpp) {
968 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
969 return -EIO;
970 }
971
972 /*
973 * We don't have access to the PF created in the primary process
974 * here so we have to read the number of ports from firmware
975 */
976 sym_tbl = nfp_rtsym_table_read(cpp);
977 if (!sym_tbl) {
978 PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
979 " symbol table");
980 return -EIO;
981 }
982
983 total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
984
985 for (i = 0; i < total_ports; i++) {
986 struct rte_eth_dev *eth_dev;
987 char port_name[RTE_ETH_NAME_MAX_LEN];
988
989 snprintf(port_name, sizeof(port_name), "%s_port%d",
990 pci_dev->device.name, i);
991
992 PMD_DRV_LOG(DEBUG, "Secondary attaching to port %s",
993 port_name);
994 eth_dev = rte_eth_dev_attach_secondary(port_name);
995 if (!eth_dev) {
996 RTE_LOG(ERR, EAL,
997 "secondary process attach failed, "
998 "ethdev doesn't exist");
999 return -ENODEV;
1000 }
1001 eth_dev->process_private = cpp;
1002 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
1003 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
1004 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
1005 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
1006 rte_eth_dev_probing_finish(eth_dev);
1007 }
1008
1009 /* Register the CPP bridge service for the secondary too */
1010 nfp_register_cpp_service(cpp);
1011
1012 return 0;
1013 }
1014
nfp_pf_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * dev)1015 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1016 struct rte_pci_device *dev)
1017 {
1018 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1019 return nfp_pf_init(dev);
1020 else
1021 return nfp_pf_secondary_init(dev);
1022 }
1023
1024 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
1025 {
1026 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1027 PCI_DEVICE_ID_NFP4000_PF_NIC)
1028 },
1029 {
1030 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1031 PCI_DEVICE_ID_NFP6000_PF_NIC)
1032 },
1033 {
1034 .vendor_id = 0,
1035 },
1036 };
1037
nfp_pci_uninit(struct rte_eth_dev * eth_dev)1038 static int nfp_pci_uninit(struct rte_eth_dev *eth_dev)
1039 {
1040 struct rte_pci_device *pci_dev;
1041 uint16_t port_id;
1042
1043 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1044
1045 /* Free up all physical ports under PF */
1046 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1047 rte_eth_dev_close(port_id);
1048 /*
1049 * Ports can be closed and freed but hotplugging is not
1050 * currently supported
1051 */
1052 return -ENOTSUP;
1053 }
1054
eth_nfp_pci_remove(struct rte_pci_device * pci_dev)1055 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
1056 {
1057 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
1058 }
1059
1060 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
1061 .id_table = pci_id_nfp_pf_net_map,
1062 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1063 .probe = nfp_pf_pci_probe,
1064 .remove = eth_nfp_pci_remove,
1065 };
1066
1067 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
1068 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
1069 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
1070 /*
1071 * Local variables:
1072 * c-file-style: "Linux"
1073 * indent-tabs-mode: t
1074 * End:
1075 */
1076