1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10 /* sysconf() */
11 #include <unistd.h>
12
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
15
16 #include "efx.h"
17
18 #include "sfc.h"
19 #include "sfc_debug.h"
20 #include "sfc_log.h"
21 #include "sfc_ev.h"
22 #include "sfc_rx.h"
23 #include "sfc_mae_counter.h"
24 #include "sfc_tx.h"
25 #include "sfc_kvargs.h"
26 #include "sfc_tweak.h"
27 #include "sfc_sw_stats.h"
28 #include "sfc_switch.h"
29 #include "sfc_nic_dma.h"
30
31 bool
sfc_repr_supported(const struct sfc_adapter * sa)32 sfc_repr_supported(const struct sfc_adapter *sa)
33 {
34 if (!sa->switchdev)
35 return false;
36
37 /*
38 * Representor proxy should use service lcore on PF's socket
39 * (sa->socket_id) to be efficient. But the proxy will fall back
40 * to any socket if it is not possible to get the service core
41 * on the same socket. Check that at least service core on any
42 * socket is available.
43 */
44 if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE)
45 return false;
46
47 return true;
48 }
49
50 bool
sfc_repr_available(const struct sfc_adapter_shared * sas)51 sfc_repr_available(const struct sfc_adapter_shared *sas)
52 {
53 return sas->nb_repr_rxq > 0 && sas->nb_repr_txq > 0;
54 }
55
56 int
sfc_dma_alloc(struct sfc_adapter * sa,const char * name,uint16_t id,efx_nic_dma_addr_type_t addr_type,size_t len,int socket_id,efsys_mem_t * esmp)57 sfc_dma_alloc(struct sfc_adapter *sa, const char *name, uint16_t id,
58 efx_nic_dma_addr_type_t addr_type, size_t len, int socket_id,
59 efsys_mem_t *esmp)
60 {
61 const struct rte_memzone *mz;
62 int rc;
63
64 sfc_log_init(sa, "name=%s id=%u len=%zu socket_id=%d",
65 name, id, len, socket_id);
66
67 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
68 sysconf(_SC_PAGESIZE), socket_id);
69 if (mz == NULL) {
70 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
71 name, (unsigned int)id, (unsigned int)len, socket_id,
72 rte_strerror(rte_errno));
73 return ENOMEM;
74 }
75 if (mz->iova == RTE_BAD_IOVA) {
76 (void)rte_memzone_free(mz);
77 return EFAULT;
78 }
79
80 rc = sfc_nic_dma_mz_map(sa, mz, addr_type, &esmp->esm_addr);
81 if (rc != 0) {
82 (void)rte_memzone_free(mz);
83 return rc;
84 }
85
86 esmp->esm_mz = mz;
87 esmp->esm_base = mz->addr;
88
89 sfc_info(sa,
90 "DMA name=%s id=%u len=%lu socket_id=%d => virt=%p iova=%lx",
91 name, id, len, socket_id, esmp->esm_base,
92 (unsigned long)esmp->esm_addr);
93
94 return 0;
95 }
96
97 void
sfc_dma_free(const struct sfc_adapter * sa,efsys_mem_t * esmp)98 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
99 {
100 int rc;
101
102 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
103
104 rc = rte_memzone_free(esmp->esm_mz);
105 if (rc != 0)
106 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
107
108 memset(esmp, 0, sizeof(*esmp));
109 }
110
111 static uint32_t
sfc_phy_cap_from_link_speeds(uint32_t speeds)112 sfc_phy_cap_from_link_speeds(uint32_t speeds)
113 {
114 uint32_t phy_caps = 0;
115
116 if (~speeds & RTE_ETH_LINK_SPEED_FIXED) {
117 phy_caps |= (1 << EFX_PHY_CAP_AN);
118 /*
119 * If no speeds are specified in the mask, any supported
120 * may be negotiated
121 */
122 if (speeds == RTE_ETH_LINK_SPEED_AUTONEG)
123 phy_caps |=
124 (1 << EFX_PHY_CAP_1000FDX) |
125 (1 << EFX_PHY_CAP_10000FDX) |
126 (1 << EFX_PHY_CAP_25000FDX) |
127 (1 << EFX_PHY_CAP_40000FDX) |
128 (1 << EFX_PHY_CAP_50000FDX) |
129 (1 << EFX_PHY_CAP_100000FDX);
130 }
131 if (speeds & RTE_ETH_LINK_SPEED_1G)
132 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
133 if (speeds & RTE_ETH_LINK_SPEED_10G)
134 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
135 if (speeds & RTE_ETH_LINK_SPEED_25G)
136 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
137 if (speeds & RTE_ETH_LINK_SPEED_40G)
138 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
139 if (speeds & RTE_ETH_LINK_SPEED_50G)
140 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
141 if (speeds & RTE_ETH_LINK_SPEED_100G)
142 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
143
144 return phy_caps;
145 }
146
147 /*
148 * Check requested device level configuration.
149 * Receive and transmit configuration is checked in corresponding
150 * modules.
151 */
152 static int
sfc_check_conf(struct sfc_adapter * sa)153 sfc_check_conf(struct sfc_adapter *sa)
154 {
155 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
156 int rc = 0;
157
158 sa->port.phy_adv_cap =
159 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
160 sa->port.phy_adv_cap_mask;
161 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
162 sfc_err(sa, "No link speeds from mask %#x are supported",
163 conf->link_speeds);
164 rc = EINVAL;
165 }
166
167 #if !EFSYS_OPT_LOOPBACK
168 if (conf->lpbk_mode != 0) {
169 sfc_err(sa, "Loopback not supported");
170 rc = EINVAL;
171 }
172 #endif
173
174 if (conf->dcb_capability_en != 0) {
175 sfc_err(sa, "Priority-based flow control not supported");
176 rc = EINVAL;
177 }
178
179 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
180 sfc_err(sa, "Flow Director not supported");
181 rc = EINVAL;
182 }
183
184 if ((conf->intr_conf.lsc != 0) &&
185 (sa->intr.type != EFX_INTR_LINE) &&
186 (sa->intr.type != EFX_INTR_MESSAGE)) {
187 sfc_err(sa, "Link status change interrupt not supported");
188 rc = EINVAL;
189 }
190
191 if (conf->intr_conf.rxq != 0 &&
192 (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_INTR) == 0) {
193 sfc_err(sa, "Receive queue interrupt not supported");
194 rc = EINVAL;
195 }
196
197 return rc;
198 }
199
200 /*
201 * Find out maximum number of receive and transmit queues which could be
202 * advertised.
203 *
204 * NIC is kept initialized on success to allow other modules acquire
205 * defaults and capabilities.
206 */
207 static int
sfc_estimate_resource_limits(struct sfc_adapter * sa)208 sfc_estimate_resource_limits(struct sfc_adapter *sa)
209 {
210 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
211 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
212 efx_drv_limits_t limits;
213 int rc;
214 uint32_t evq_allocated;
215 uint32_t rxq_allocated;
216 uint32_t txq_allocated;
217
218 memset(&limits, 0, sizeof(limits));
219
220 /* Request at least one Rx and Tx queue */
221 limits.edl_min_rxq_count = 1;
222 limits.edl_min_txq_count = 1;
223 /* Management event queue plus event queue for each Tx and Rx queue */
224 limits.edl_min_evq_count =
225 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
226
227 /* Divide by number of functions to guarantee that all functions
228 * will get promised resources
229 */
230 /* FIXME Divide by number of functions (not 2) below */
231 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
232 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
233
234 /* Split equally between receive and transmit */
235 limits.edl_max_rxq_count =
236 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
237 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
238
239 limits.edl_max_txq_count =
240 MIN(encp->enc_txq_limit,
241 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
242
243 if (sa->tso && encp->enc_fw_assisted_tso_v2_enabled)
244 limits.edl_max_txq_count =
245 MIN(limits.edl_max_txq_count,
246 encp->enc_fw_assisted_tso_v2_n_contexts /
247 encp->enc_hw_pf_count);
248
249 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
250
251 /* Configure the minimum required resources needed for the
252 * driver to operate, and the maximum desired resources that the
253 * driver is capable of using.
254 */
255 efx_nic_set_drv_limits(sa->nic, &limits);
256
257 sfc_log_init(sa, "init nic");
258 rc = efx_nic_init(sa->nic);
259 if (rc != 0)
260 goto fail_nic_init;
261
262 /* Find resource dimensions assigned by firmware to this function */
263 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
264 &txq_allocated);
265 if (rc != 0)
266 goto fail_get_vi_pool;
267
268 /* It still may allocate more than maximum, ensure limit */
269 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
270 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
271 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
272
273 /*
274 * Subtract management EVQ not used for traffic
275 * The resource allocation strategy is as follows:
276 * - one EVQ for management
277 * - one EVQ for each ethdev RXQ
278 * - one EVQ for each ethdev TXQ
279 * - one EVQ and one RXQ for optional MAE counters.
280 */
281 if (evq_allocated == 0) {
282 sfc_err(sa, "count of allocated EvQ is 0");
283 rc = ENOMEM;
284 goto fail_allocate_evq;
285 }
286 evq_allocated--;
287
288 /*
289 * Reserve absolutely required minimum.
290 * Right now we use separate EVQ for Rx and Tx.
291 */
292 if (rxq_allocated > 0 && evq_allocated > 0) {
293 sa->rxq_max = 1;
294 rxq_allocated--;
295 evq_allocated--;
296 }
297 if (txq_allocated > 0 && evq_allocated > 0) {
298 sa->txq_max = 1;
299 txq_allocated--;
300 evq_allocated--;
301 }
302
303 if (sfc_mae_counter_rxq_required(sa) &&
304 rxq_allocated > 0 && evq_allocated > 0) {
305 rxq_allocated--;
306 evq_allocated--;
307 sas->counters_rxq_allocated = true;
308 } else {
309 sas->counters_rxq_allocated = false;
310 }
311
312 if (sfc_repr_supported(sa) &&
313 evq_allocated >= SFC_REPR_PROXY_NB_RXQ_MIN +
314 SFC_REPR_PROXY_NB_TXQ_MIN &&
315 rxq_allocated >= SFC_REPR_PROXY_NB_RXQ_MIN &&
316 txq_allocated >= SFC_REPR_PROXY_NB_TXQ_MIN) {
317 unsigned int extra;
318
319 txq_allocated -= SFC_REPR_PROXY_NB_TXQ_MIN;
320 rxq_allocated -= SFC_REPR_PROXY_NB_RXQ_MIN;
321 evq_allocated -= SFC_REPR_PROXY_NB_RXQ_MIN +
322 SFC_REPR_PROXY_NB_TXQ_MIN;
323
324 sas->nb_repr_rxq = SFC_REPR_PROXY_NB_RXQ_MIN;
325 sas->nb_repr_txq = SFC_REPR_PROXY_NB_TXQ_MIN;
326
327 /* Allocate extra representor RxQs up to the maximum */
328 extra = MIN(evq_allocated, rxq_allocated);
329 extra = MIN(extra,
330 SFC_REPR_PROXY_NB_RXQ_MAX - sas->nb_repr_rxq);
331 evq_allocated -= extra;
332 rxq_allocated -= extra;
333 sas->nb_repr_rxq += extra;
334
335 /* Allocate extra representor TxQs up to the maximum */
336 extra = MIN(evq_allocated, txq_allocated);
337 extra = MIN(extra,
338 SFC_REPR_PROXY_NB_TXQ_MAX - sas->nb_repr_txq);
339 evq_allocated -= extra;
340 txq_allocated -= extra;
341 sas->nb_repr_txq += extra;
342 } else {
343 sas->nb_repr_rxq = 0;
344 sas->nb_repr_txq = 0;
345 }
346
347 /* Add remaining allocated queues */
348 sa->rxq_max += MIN(rxq_allocated, evq_allocated / 2);
349 sa->txq_max += MIN(txq_allocated, evq_allocated - sa->rxq_max);
350
351 /* Keep NIC initialized */
352 return 0;
353
354 fail_allocate_evq:
355 fail_get_vi_pool:
356 efx_nic_fini(sa->nic);
357 fail_nic_init:
358 return rc;
359 }
360
361 static int
sfc_set_drv_limits(struct sfc_adapter * sa)362 sfc_set_drv_limits(struct sfc_adapter *sa)
363 {
364 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
365 const struct rte_eth_dev_data *data = sa->eth_dev->data;
366 uint32_t rxq_reserved = sfc_nb_reserved_rxq(sas);
367 uint32_t txq_reserved = sfc_nb_txq_reserved(sas);
368 efx_drv_limits_t lim;
369
370 memset(&lim, 0, sizeof(lim));
371
372 /*
373 * Limits are strict since take into account initial estimation.
374 * Resource allocation strategy is described in
375 * sfc_estimate_resource_limits().
376 */
377 lim.edl_min_evq_count = lim.edl_max_evq_count =
378 1 + data->nb_rx_queues + data->nb_tx_queues +
379 rxq_reserved + txq_reserved;
380 lim.edl_min_rxq_count = lim.edl_max_rxq_count =
381 data->nb_rx_queues + rxq_reserved;
382 lim.edl_min_txq_count = lim.edl_max_txq_count =
383 data->nb_tx_queues + txq_reserved;
384
385 return efx_nic_set_drv_limits(sa->nic, &lim);
386 }
387
388 static int
sfc_set_fw_subvariant(struct sfc_adapter * sa)389 sfc_set_fw_subvariant(struct sfc_adapter *sa)
390 {
391 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
392 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
393 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
394 unsigned int txq_index;
395 efx_nic_fw_subvariant_t req_fw_subvariant;
396 efx_nic_fw_subvariant_t cur_fw_subvariant;
397 int rc;
398
399 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
400 sfc_info(sa, "no-Tx-checksum subvariant not supported");
401 return 0;
402 }
403
404 for (txq_index = 0; txq_index < sas->txq_count; ++txq_index) {
405 struct sfc_txq_info *txq_info = &sas->txq_info[txq_index];
406
407 if (txq_info->state & SFC_TXQ_INITIALIZED)
408 tx_offloads |= txq_info->offloads;
409 }
410
411 if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
412 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
413 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
414 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
415 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
416 else
417 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
418
419 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
420 if (rc != 0) {
421 sfc_err(sa, "failed to get FW subvariant: %d", rc);
422 return rc;
423 }
424 sfc_info(sa, "FW subvariant is %u vs required %u",
425 cur_fw_subvariant, req_fw_subvariant);
426
427 if (cur_fw_subvariant == req_fw_subvariant)
428 return 0;
429
430 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
431 if (rc != 0) {
432 sfc_err(sa, "failed to set FW subvariant %u: %d",
433 req_fw_subvariant, rc);
434 return rc;
435 }
436 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
437
438 return 0;
439 }
440
441 static int
sfc_try_start(struct sfc_adapter * sa)442 sfc_try_start(struct sfc_adapter *sa)
443 {
444 const efx_nic_cfg_t *encp;
445 int rc;
446
447 sfc_log_init(sa, "entry");
448
449 SFC_ASSERT(sfc_adapter_is_locked(sa));
450 SFC_ASSERT(sa->state == SFC_ETHDEV_STARTING);
451
452 sfc_log_init(sa, "set FW subvariant");
453 rc = sfc_set_fw_subvariant(sa);
454 if (rc != 0)
455 goto fail_set_fw_subvariant;
456
457 sfc_log_init(sa, "set resource limits");
458 rc = sfc_set_drv_limits(sa);
459 if (rc != 0)
460 goto fail_set_drv_limits;
461
462 sfc_log_init(sa, "init nic");
463 rc = efx_nic_init(sa->nic);
464 if (rc != 0)
465 goto fail_nic_init;
466
467 sfc_log_init(sa, "reconfigure NIC DMA");
468 rc = efx_nic_dma_reconfigure(sa->nic);
469 if (rc != 0) {
470 sfc_err(sa, "cannot reconfigure NIC DMA: %s", rte_strerror(rc));
471 goto fail_nic_dma_reconfigure;
472 }
473
474 encp = efx_nic_cfg_get(sa->nic);
475
476 /*
477 * Refresh (since it may change on NIC reset/restart) a copy of
478 * supported tunnel encapsulations in shared memory to be used
479 * on supported Rx packet type classes get.
480 */
481 sa->priv.shared->tunnel_encaps =
482 encp->enc_tunnel_encapsulations_supported;
483
484 if (encp->enc_tunnel_encapsulations_supported != 0) {
485 sfc_log_init(sa, "apply tunnel config");
486 rc = efx_tunnel_reconfigure(sa->nic);
487 if (rc != 0)
488 goto fail_tunnel_reconfigure;
489 }
490
491 rc = sfc_intr_start(sa);
492 if (rc != 0)
493 goto fail_intr_start;
494
495 rc = sfc_ev_start(sa);
496 if (rc != 0)
497 goto fail_ev_start;
498
499 rc = sfc_port_start(sa);
500 if (rc != 0)
501 goto fail_port_start;
502
503 rc = sfc_rx_start(sa);
504 if (rc != 0)
505 goto fail_rx_start;
506
507 rc = sfc_tx_start(sa);
508 if (rc != 0)
509 goto fail_tx_start;
510
511 rc = sfc_flow_start(sa);
512 if (rc != 0)
513 goto fail_flows_insert;
514
515 rc = sfc_repr_proxy_start(sa);
516 if (rc != 0)
517 goto fail_repr_proxy_start;
518
519 sfc_log_init(sa, "done");
520 return 0;
521
522 fail_repr_proxy_start:
523 sfc_flow_stop(sa);
524
525 fail_flows_insert:
526 sfc_tx_stop(sa);
527
528 fail_tx_start:
529 sfc_rx_stop(sa);
530
531 fail_rx_start:
532 sfc_port_stop(sa);
533
534 fail_port_start:
535 sfc_ev_stop(sa);
536
537 fail_ev_start:
538 sfc_intr_stop(sa);
539
540 fail_intr_start:
541 fail_tunnel_reconfigure:
542 fail_nic_dma_reconfigure:
543 efx_nic_fini(sa->nic);
544
545 fail_nic_init:
546 fail_set_drv_limits:
547 fail_set_fw_subvariant:
548 sfc_log_init(sa, "failed %d", rc);
549 return rc;
550 }
551
552 int
sfc_start(struct sfc_adapter * sa)553 sfc_start(struct sfc_adapter *sa)
554 {
555 unsigned int start_tries = 3;
556 int rc;
557
558 sfc_log_init(sa, "entry");
559
560 SFC_ASSERT(sfc_adapter_is_locked(sa));
561
562 switch (sa->state) {
563 case SFC_ETHDEV_CONFIGURED:
564 break;
565 case SFC_ETHDEV_STARTED:
566 sfc_notice(sa, "already started");
567 return 0;
568 default:
569 rc = EINVAL;
570 goto fail_bad_state;
571 }
572
573 sa->state = SFC_ETHDEV_STARTING;
574
575 rc = 0;
576 do {
577 /*
578 * FIXME Try to recreate vSwitch on start retry.
579 * vSwitch is absent after MC reboot like events and
580 * we should recreate it. May be we need proper
581 * indication instead of guessing.
582 */
583 if (rc != 0) {
584 sfc_sriov_vswitch_destroy(sa);
585 rc = sfc_sriov_vswitch_create(sa);
586 if (rc != 0)
587 goto fail_sriov_vswitch_create;
588 }
589 rc = sfc_try_start(sa);
590 } while ((--start_tries > 0) &&
591 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
592
593 if (rc != 0)
594 goto fail_try_start;
595
596 sa->state = SFC_ETHDEV_STARTED;
597 sfc_log_init(sa, "done");
598 return 0;
599
600 fail_try_start:
601 fail_sriov_vswitch_create:
602 sa->state = SFC_ETHDEV_CONFIGURED;
603 fail_bad_state:
604 sfc_log_init(sa, "failed %d", rc);
605 return rc;
606 }
607
608 void
sfc_stop(struct sfc_adapter * sa)609 sfc_stop(struct sfc_adapter *sa)
610 {
611 sfc_log_init(sa, "entry");
612
613 SFC_ASSERT(sfc_adapter_is_locked(sa));
614
615 switch (sa->state) {
616 case SFC_ETHDEV_STARTED:
617 break;
618 case SFC_ETHDEV_CONFIGURED:
619 sfc_notice(sa, "already stopped");
620 return;
621 default:
622 sfc_err(sa, "stop in unexpected state %u", sa->state);
623 SFC_ASSERT(B_FALSE);
624 return;
625 }
626
627 sa->state = SFC_ETHDEV_STOPPING;
628
629 sfc_repr_proxy_stop(sa);
630 sfc_flow_stop(sa);
631 sfc_tx_stop(sa);
632 sfc_rx_stop(sa);
633 sfc_port_stop(sa);
634 sfc_ev_stop(sa);
635 sfc_intr_stop(sa);
636 efx_nic_fini(sa->nic);
637
638 sa->state = SFC_ETHDEV_CONFIGURED;
639 sfc_log_init(sa, "done");
640 }
641
642 static int
sfc_restart(struct sfc_adapter * sa)643 sfc_restart(struct sfc_adapter *sa)
644 {
645 int rc;
646
647 SFC_ASSERT(sfc_adapter_is_locked(sa));
648
649 if (sa->state != SFC_ETHDEV_STARTED)
650 return EINVAL;
651
652 sfc_stop(sa);
653
654 rc = sfc_start(sa);
655 if (rc != 0)
656 sfc_err(sa, "restart failed");
657
658 return rc;
659 }
660
661 static void
sfc_restart_if_required(void * arg)662 sfc_restart_if_required(void *arg)
663 {
664 struct sfc_adapter *sa = arg;
665
666 /* If restart is scheduled, clear the flag and do it */
667 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
668 1, 0)) {
669 sfc_adapter_lock(sa);
670 if (sa->state == SFC_ETHDEV_STARTED)
671 (void)sfc_restart(sa);
672 sfc_adapter_unlock(sa);
673 }
674 }
675
676 void
sfc_schedule_restart(struct sfc_adapter * sa)677 sfc_schedule_restart(struct sfc_adapter *sa)
678 {
679 int rc;
680
681 /* Schedule restart alarm if it is not scheduled yet */
682 if (!rte_atomic32_test_and_set(&sa->restart_required))
683 return;
684
685 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
686 if (rc == -ENOTSUP)
687 sfc_warn(sa, "alarms are not supported, restart is pending");
688 else if (rc != 0)
689 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
690 else
691 sfc_notice(sa, "restart scheduled");
692 }
693
694 int
sfc_configure(struct sfc_adapter * sa)695 sfc_configure(struct sfc_adapter *sa)
696 {
697 int rc;
698
699 sfc_log_init(sa, "entry");
700
701 SFC_ASSERT(sfc_adapter_is_locked(sa));
702
703 SFC_ASSERT(sa->state == SFC_ETHDEV_INITIALIZED ||
704 sa->state == SFC_ETHDEV_CONFIGURED);
705 sa->state = SFC_ETHDEV_CONFIGURING;
706
707 rc = sfc_check_conf(sa);
708 if (rc != 0)
709 goto fail_check_conf;
710
711 rc = sfc_intr_configure(sa);
712 if (rc != 0)
713 goto fail_intr_configure;
714
715 rc = sfc_port_configure(sa);
716 if (rc != 0)
717 goto fail_port_configure;
718
719 rc = sfc_rx_configure(sa);
720 if (rc != 0)
721 goto fail_rx_configure;
722
723 rc = sfc_tx_configure(sa);
724 if (rc != 0)
725 goto fail_tx_configure;
726
727 rc = sfc_sw_xstats_configure(sa);
728 if (rc != 0)
729 goto fail_sw_xstats_configure;
730
731 sa->state = SFC_ETHDEV_CONFIGURED;
732 sfc_log_init(sa, "done");
733 return 0;
734
735 fail_sw_xstats_configure:
736 sfc_tx_close(sa);
737
738 fail_tx_configure:
739 sfc_rx_close(sa);
740
741 fail_rx_configure:
742 sfc_port_close(sa);
743
744 fail_port_configure:
745 sfc_intr_close(sa);
746
747 fail_intr_configure:
748 fail_check_conf:
749 sa->state = SFC_ETHDEV_INITIALIZED;
750 sfc_log_init(sa, "failed %d", rc);
751 return rc;
752 }
753
754 void
sfc_close(struct sfc_adapter * sa)755 sfc_close(struct sfc_adapter *sa)
756 {
757 sfc_log_init(sa, "entry");
758
759 SFC_ASSERT(sfc_adapter_is_locked(sa));
760
761 SFC_ASSERT(sa->state == SFC_ETHDEV_CONFIGURED);
762 sa->state = SFC_ETHDEV_CLOSING;
763
764 sfc_sw_xstats_close(sa);
765 sfc_tx_close(sa);
766 sfc_rx_close(sa);
767 sfc_port_close(sa);
768 sfc_intr_close(sa);
769
770 sa->state = SFC_ETHDEV_INITIALIZED;
771 sfc_log_init(sa, "done");
772 }
773
774 static int
sfc_mem_bar_init(struct sfc_adapter * sa,const efx_bar_region_t * mem_ebrp)775 sfc_mem_bar_init(struct sfc_adapter *sa, const efx_bar_region_t *mem_ebrp)
776 {
777 struct rte_eth_dev *eth_dev = sa->eth_dev;
778 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
779 efsys_bar_t *ebp = &sa->mem_bar;
780 struct rte_mem_resource *res =
781 &pci_dev->mem_resource[mem_ebrp->ebr_index];
782
783 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
784 ebp->esb_rid = mem_ebrp->ebr_index;
785 ebp->esb_dev = pci_dev;
786 ebp->esb_base = res->addr;
787
788 sa->fcw_offset = mem_ebrp->ebr_offset;
789
790 return 0;
791 }
792
793 static void
sfc_mem_bar_fini(struct sfc_adapter * sa)794 sfc_mem_bar_fini(struct sfc_adapter *sa)
795 {
796 efsys_bar_t *ebp = &sa->mem_bar;
797
798 SFC_BAR_LOCK_DESTROY(ebp);
799 memset(ebp, 0, sizeof(*ebp));
800 }
801
802 /*
803 * A fixed RSS key which has a property of being symmetric
804 * (symmetrical flows are distributed to the same CPU)
805 * and also known to give a uniform distribution
806 * (a good distribution of traffic between different CPUs)
807 */
808 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
809 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
810 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
811 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
812 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
813 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
814 };
815
816 static int
sfc_rss_attach(struct sfc_adapter * sa)817 sfc_rss_attach(struct sfc_adapter *sa)
818 {
819 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
820 int rc;
821
822 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
823 if (rc != 0)
824 goto fail_intr_init;
825
826 rc = efx_ev_init(sa->nic);
827 if (rc != 0)
828 goto fail_ev_init;
829
830 rc = efx_rx_init(sa->nic);
831 if (rc != 0)
832 goto fail_rx_init;
833
834 rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
835 if (rc != 0)
836 goto fail_scale_support_get;
837
838 rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
839 if (rc != 0)
840 goto fail_hash_support_get;
841
842 rc = sfc_rx_hash_init(sa);
843 if (rc != 0)
844 goto fail_rx_hash_init;
845
846 efx_rx_fini(sa->nic);
847 efx_ev_fini(sa->nic);
848 efx_intr_fini(sa->nic);
849
850 rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
851 memset(&rss->dummy_ctx, 0, sizeof(rss->dummy_ctx));
852 rss->dummy_ctx.conf.qid_span = 1;
853 rss->dummy_ctx.dummy = true;
854
855 return 0;
856
857 fail_rx_hash_init:
858 fail_hash_support_get:
859 fail_scale_support_get:
860 efx_rx_fini(sa->nic);
861
862 fail_rx_init:
863 efx_ev_fini(sa->nic);
864
865 fail_ev_init:
866 efx_intr_fini(sa->nic);
867
868 fail_intr_init:
869 return rc;
870 }
871
872 static void
sfc_rss_detach(struct sfc_adapter * sa)873 sfc_rss_detach(struct sfc_adapter *sa)
874 {
875 sfc_rx_hash_fini(sa);
876 }
877
878 int
sfc_attach(struct sfc_adapter * sa)879 sfc_attach(struct sfc_adapter *sa)
880 {
881 const efx_nic_cfg_t *encp;
882 efx_nic_t *enp = sa->nic;
883 int rc;
884
885 sfc_log_init(sa, "entry");
886
887 SFC_ASSERT(sfc_adapter_is_locked(sa));
888
889 efx_mcdi_new_epoch(enp);
890
891 sfc_log_init(sa, "reset nic");
892 rc = efx_nic_reset(enp);
893 if (rc != 0)
894 goto fail_nic_reset;
895
896 rc = sfc_sriov_attach(sa);
897 if (rc != 0)
898 goto fail_sriov_attach;
899
900 /*
901 * Probed NIC is sufficient for tunnel init.
902 * Initialize tunnel support to be able to use libefx
903 * efx_tunnel_config_udp_{add,remove}() in any state and
904 * efx_tunnel_reconfigure() on start up.
905 */
906 rc = efx_tunnel_init(enp);
907 if (rc != 0)
908 goto fail_tunnel_init;
909
910 encp = efx_nic_cfg_get(sa->nic);
911
912 /*
913 * Make a copy of supported tunnel encapsulations in shared
914 * memory to be used on supported Rx packet type classes get.
915 */
916 sa->priv.shared->tunnel_encaps =
917 encp->enc_tunnel_encapsulations_supported;
918
919 if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
920 sa->tso = encp->enc_fw_assisted_tso_v2_enabled ||
921 encp->enc_tso_v3_enabled;
922 if (!sa->tso)
923 sfc_info(sa, "TSO support isn't available on this adapter");
924 }
925
926 if (sa->tso &&
927 (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
928 (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
929 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
930 sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled ||
931 encp->enc_tso_v3_enabled;
932 if (!sa->tso_encap)
933 sfc_info(sa, "Encapsulated TSO support isn't available on this adapter");
934 }
935
936 sfc_log_init(sa, "estimate resource limits");
937 rc = sfc_estimate_resource_limits(sa);
938 if (rc != 0)
939 goto fail_estimate_rsrc_limits;
940
941 sa->evq_max_entries = encp->enc_evq_max_nevs;
942 SFC_ASSERT(rte_is_power_of_2(sa->evq_max_entries));
943
944 sa->evq_min_entries = encp->enc_evq_min_nevs;
945 SFC_ASSERT(rte_is_power_of_2(sa->evq_min_entries));
946
947 sa->rxq_max_entries = encp->enc_rxq_max_ndescs;
948 SFC_ASSERT(rte_is_power_of_2(sa->rxq_max_entries));
949
950 sa->rxq_min_entries = encp->enc_rxq_min_ndescs;
951 SFC_ASSERT(rte_is_power_of_2(sa->rxq_min_entries));
952
953 sa->txq_max_entries = encp->enc_txq_max_ndescs;
954 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
955
956 sa->txq_min_entries = encp->enc_txq_min_ndescs;
957 SFC_ASSERT(rte_is_power_of_2(sa->txq_min_entries));
958
959 rc = sfc_intr_attach(sa);
960 if (rc != 0)
961 goto fail_intr_attach;
962
963 rc = sfc_ev_attach(sa);
964 if (rc != 0)
965 goto fail_ev_attach;
966
967 rc = sfc_port_attach(sa);
968 if (rc != 0)
969 goto fail_port_attach;
970
971 rc = sfc_rss_attach(sa);
972 if (rc != 0)
973 goto fail_rss_attach;
974
975 rc = sfc_flow_rss_attach(sa);
976 if (rc != 0)
977 goto fail_flow_rss_attach;
978
979 rc = sfc_filter_attach(sa);
980 if (rc != 0)
981 goto fail_filter_attach;
982
983 rc = sfc_mae_counter_rxq_attach(sa);
984 if (rc != 0)
985 goto fail_mae_counter_rxq_attach;
986
987 rc = sfc_mae_attach(sa);
988 if (rc != 0)
989 goto fail_mae_attach;
990
991 rc = sfc_mae_switchdev_init(sa);
992 if (rc != 0)
993 goto fail_mae_switchdev_init;
994
995 rc = sfc_repr_proxy_attach(sa);
996 if (rc != 0)
997 goto fail_repr_proxy_attach;
998
999 sfc_log_init(sa, "fini nic");
1000 efx_nic_fini(enp);
1001
1002 sfc_flow_init(sa);
1003
1004 rc = sfc_sw_xstats_init(sa);
1005 if (rc != 0)
1006 goto fail_sw_xstats_init;
1007
1008 /*
1009 * Create vSwitch to be able to use VFs when PF is not started yet
1010 * as DPDK port. VFs should be able to talk to each other even
1011 * if PF is down.
1012 */
1013 rc = sfc_sriov_vswitch_create(sa);
1014 if (rc != 0)
1015 goto fail_sriov_vswitch_create;
1016
1017 sa->state = SFC_ETHDEV_INITIALIZED;
1018
1019 sfc_log_init(sa, "done");
1020 return 0;
1021
1022 fail_sriov_vswitch_create:
1023 sfc_sw_xstats_close(sa);
1024
1025 fail_sw_xstats_init:
1026 sfc_flow_fini(sa);
1027 sfc_repr_proxy_detach(sa);
1028
1029 fail_repr_proxy_attach:
1030 sfc_mae_switchdev_fini(sa);
1031
1032 fail_mae_switchdev_init:
1033 sfc_mae_detach(sa);
1034
1035 fail_mae_attach:
1036 sfc_mae_counter_rxq_detach(sa);
1037
1038 fail_mae_counter_rxq_attach:
1039 sfc_filter_detach(sa);
1040
1041 fail_filter_attach:
1042 sfc_flow_rss_detach(sa);
1043
1044 fail_flow_rss_attach:
1045 sfc_rss_detach(sa);
1046
1047 fail_rss_attach:
1048 sfc_port_detach(sa);
1049
1050 fail_port_attach:
1051 sfc_ev_detach(sa);
1052
1053 fail_ev_attach:
1054 sfc_intr_detach(sa);
1055
1056 fail_intr_attach:
1057 efx_nic_fini(sa->nic);
1058
1059 fail_estimate_rsrc_limits:
1060 fail_tunnel_init:
1061 efx_tunnel_fini(sa->nic);
1062 sfc_sriov_detach(sa);
1063
1064 fail_sriov_attach:
1065 fail_nic_reset:
1066
1067 sfc_log_init(sa, "failed %d", rc);
1068 return rc;
1069 }
1070
1071 void
sfc_pre_detach(struct sfc_adapter * sa)1072 sfc_pre_detach(struct sfc_adapter *sa)
1073 {
1074 sfc_log_init(sa, "entry");
1075
1076 SFC_ASSERT(!sfc_adapter_is_locked(sa));
1077
1078 sfc_repr_proxy_pre_detach(sa);
1079
1080 sfc_log_init(sa, "done");
1081 }
1082
1083 void
sfc_detach(struct sfc_adapter * sa)1084 sfc_detach(struct sfc_adapter *sa)
1085 {
1086 sfc_log_init(sa, "entry");
1087
1088 SFC_ASSERT(sfc_adapter_is_locked(sa));
1089
1090 sfc_sriov_vswitch_destroy(sa);
1091
1092 sfc_flow_fini(sa);
1093
1094 sfc_repr_proxy_detach(sa);
1095 sfc_mae_switchdev_fini(sa);
1096 sfc_mae_detach(sa);
1097 sfc_mae_counter_rxq_detach(sa);
1098 sfc_filter_detach(sa);
1099 sfc_flow_rss_detach(sa);
1100 sfc_rss_detach(sa);
1101 sfc_port_detach(sa);
1102 sfc_ev_detach(sa);
1103 sfc_intr_detach(sa);
1104 efx_tunnel_fini(sa->nic);
1105 sfc_sriov_detach(sa);
1106
1107 sa->state = SFC_ETHDEV_UNINITIALIZED;
1108 }
1109
1110 static int
sfc_kvarg_fv_variant_handler(__rte_unused const char * key,const char * value_str,void * opaque)1111 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
1112 const char *value_str, void *opaque)
1113 {
1114 uint32_t *value = opaque;
1115
1116 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
1117 *value = EFX_FW_VARIANT_DONT_CARE;
1118 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
1119 *value = EFX_FW_VARIANT_FULL_FEATURED;
1120 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
1121 *value = EFX_FW_VARIANT_LOW_LATENCY;
1122 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
1123 *value = EFX_FW_VARIANT_PACKED_STREAM;
1124 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
1125 *value = EFX_FW_VARIANT_DPDK;
1126 else
1127 return -EINVAL;
1128
1129 return 0;
1130 }
1131
1132 static int
sfc_get_fw_variant(struct sfc_adapter * sa,efx_fw_variant_t * efv)1133 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
1134 {
1135 efx_nic_fw_info_t enfi;
1136 int rc;
1137
1138 rc = efx_nic_get_fw_version(sa->nic, &enfi);
1139 if (rc != 0)
1140 return rc;
1141 else if (!enfi.enfi_dpcpu_fw_ids_valid)
1142 return ENOTSUP;
1143
1144 /*
1145 * Firmware variant can be uniquely identified by the RxDPCPU
1146 * firmware id
1147 */
1148 switch (enfi.enfi_rx_dpcpu_fw_id) {
1149 case EFX_RXDP_FULL_FEATURED_FW_ID:
1150 *efv = EFX_FW_VARIANT_FULL_FEATURED;
1151 break;
1152
1153 case EFX_RXDP_LOW_LATENCY_FW_ID:
1154 *efv = EFX_FW_VARIANT_LOW_LATENCY;
1155 break;
1156
1157 case EFX_RXDP_PACKED_STREAM_FW_ID:
1158 *efv = EFX_FW_VARIANT_PACKED_STREAM;
1159 break;
1160
1161 case EFX_RXDP_DPDK_FW_ID:
1162 *efv = EFX_FW_VARIANT_DPDK;
1163 break;
1164
1165 default:
1166 /*
1167 * Other firmware variants are not considered, since they are
1168 * not supported in the device parameters
1169 */
1170 *efv = EFX_FW_VARIANT_DONT_CARE;
1171 break;
1172 }
1173
1174 return 0;
1175 }
1176
1177 static const char *
sfc_fw_variant2str(efx_fw_variant_t efv)1178 sfc_fw_variant2str(efx_fw_variant_t efv)
1179 {
1180 switch (efv) {
1181 case EFX_RXDP_FULL_FEATURED_FW_ID:
1182 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
1183 case EFX_RXDP_LOW_LATENCY_FW_ID:
1184 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
1185 case EFX_RXDP_PACKED_STREAM_FW_ID:
1186 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
1187 case EFX_RXDP_DPDK_FW_ID:
1188 return SFC_KVARG_FW_VARIANT_DPDK;
1189 default:
1190 return "unknown";
1191 }
1192 }
1193
1194 static int
sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter * sa)1195 sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
1196 {
1197 int rc;
1198 long value;
1199
1200 value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
1201
1202 rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
1203 sfc_kvarg_long_handler, &value);
1204 if (rc != 0)
1205 return rc;
1206
1207 if (value < 0 ||
1208 (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
1209 sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
1210 "was set (%ld);", value);
1211 sfc_err(sa, "it must not be less than 0 or greater than %u",
1212 EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
1213 return EINVAL;
1214 }
1215
1216 sa->rxd_wait_timeout_ns = value;
1217 return 0;
1218 }
1219
1220 static int
sfc_nic_probe(struct sfc_adapter * sa)1221 sfc_nic_probe(struct sfc_adapter *sa)
1222 {
1223 efx_nic_t *enp = sa->nic;
1224 efx_fw_variant_t preferred_efv;
1225 efx_fw_variant_t efv;
1226 int rc;
1227
1228 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
1229 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
1230 sfc_kvarg_fv_variant_handler,
1231 &preferred_efv);
1232 if (rc != 0) {
1233 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
1234 return rc;
1235 }
1236
1237 rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
1238 if (rc != 0)
1239 return rc;
1240
1241 rc = efx_nic_probe(enp, preferred_efv);
1242 if (rc == EACCES) {
1243 /* Unprivileged functions cannot set FW variant */
1244 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
1245 }
1246 if (rc != 0)
1247 return rc;
1248
1249 rc = sfc_get_fw_variant(sa, &efv);
1250 if (rc == ENOTSUP) {
1251 sfc_warn(sa, "FW variant can not be obtained");
1252 return 0;
1253 }
1254 if (rc != 0)
1255 return rc;
1256
1257 /* Check that firmware variant was changed to the requested one */
1258 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
1259 sfc_warn(sa, "FW variant has not changed to the requested %s",
1260 sfc_fw_variant2str(preferred_efv));
1261 }
1262
1263 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
1264
1265 return 0;
1266 }
1267
1268 int
sfc_probe(struct sfc_adapter * sa)1269 sfc_probe(struct sfc_adapter *sa)
1270 {
1271 efx_bar_region_t mem_ebrp;
1272 struct rte_eth_dev *eth_dev = sa->eth_dev;
1273 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1274 efx_nic_t *enp;
1275 int rc;
1276
1277 sfc_log_init(sa, "entry");
1278
1279 SFC_ASSERT(sfc_adapter_is_locked(sa));
1280
1281 sa->socket_id = rte_socket_id();
1282 rte_atomic32_init(&sa->restart_required);
1283
1284 sfc_log_init(sa, "get family");
1285 rc = sfc_efx_family(pci_dev, &mem_ebrp, &sa->family);
1286
1287 if (rc != 0)
1288 goto fail_family;
1289 sfc_log_init(sa,
1290 "family is %u, membar is %u, function control window offset is %lu",
1291 sa->family, mem_ebrp.ebr_index, mem_ebrp.ebr_offset);
1292
1293 sfc_log_init(sa, "init mem bar");
1294 rc = sfc_mem_bar_init(sa, &mem_ebrp);
1295 if (rc != 0)
1296 goto fail_mem_bar_init;
1297
1298 sfc_log_init(sa, "create nic");
1299 rte_spinlock_init(&sa->nic_lock);
1300 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
1301 &sa->mem_bar, mem_ebrp.ebr_offset,
1302 &sa->nic_lock, &enp);
1303 if (rc != 0)
1304 goto fail_nic_create;
1305 sa->nic = enp;
1306
1307 rc = sfc_mcdi_init(sa);
1308 if (rc != 0)
1309 goto fail_mcdi_init;
1310
1311 sfc_log_init(sa, "probe nic");
1312 rc = sfc_nic_probe(sa);
1313 if (rc != 0)
1314 goto fail_nic_probe;
1315
1316 sfc_log_init(sa, "done");
1317 return 0;
1318
1319 fail_nic_probe:
1320 sfc_mcdi_fini(sa);
1321
1322 fail_mcdi_init:
1323 sfc_log_init(sa, "destroy nic");
1324 sa->nic = NULL;
1325 efx_nic_destroy(enp);
1326
1327 fail_nic_create:
1328 sfc_mem_bar_fini(sa);
1329
1330 fail_mem_bar_init:
1331 fail_family:
1332 sfc_log_init(sa, "failed %d", rc);
1333 return rc;
1334 }
1335
1336 void
sfc_unprobe(struct sfc_adapter * sa)1337 sfc_unprobe(struct sfc_adapter *sa)
1338 {
1339 efx_nic_t *enp = sa->nic;
1340
1341 sfc_log_init(sa, "entry");
1342
1343 SFC_ASSERT(sfc_adapter_is_locked(sa));
1344
1345 sfc_log_init(sa, "unprobe nic");
1346 efx_nic_unprobe(enp);
1347
1348 sfc_mcdi_fini(sa);
1349
1350 /*
1351 * Make sure there is no pending alarm to restart since we are
1352 * going to free device private which is passed as the callback
1353 * opaque data. A new alarm cannot be scheduled since MCDI is
1354 * shut down.
1355 */
1356 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1357
1358 sfc_mae_clear_switch_port(sa->mae.switch_domain_id,
1359 sa->mae.switch_port_id);
1360
1361 sfc_log_init(sa, "destroy nic");
1362 sa->nic = NULL;
1363 efx_nic_destroy(enp);
1364
1365 sfc_mem_bar_fini(sa);
1366
1367 sfc_flow_fini(sa);
1368 sa->state = SFC_ETHDEV_UNINITIALIZED;
1369 }
1370
1371 uint32_t
sfc_register_logtype(const struct rte_pci_addr * pci_addr,const char * lt_prefix_str,uint32_t ll_default)1372 sfc_register_logtype(const struct rte_pci_addr *pci_addr,
1373 const char *lt_prefix_str, uint32_t ll_default)
1374 {
1375 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1376 size_t lt_str_size_max;
1377 char *lt_str = NULL;
1378 int ret;
1379
1380 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1381 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1382 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1383 } else {
1384 return sfc_logtype_driver;
1385 }
1386
1387 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1388 if (lt_str == NULL)
1389 return sfc_logtype_driver;
1390
1391 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1392 lt_str[lt_prefix_str_size - 1] = '.';
1393 rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
1394 lt_str_size_max - lt_prefix_str_size);
1395 lt_str[lt_str_size_max - 1] = '\0';
1396
1397 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1398 rte_free(lt_str);
1399
1400 if (ret < 0)
1401 return sfc_logtype_driver;
1402
1403 return ret;
1404 }
1405
1406 struct sfc_hw_switch_id {
1407 char board_sn[RTE_SIZEOF_FIELD(efx_nic_board_info_t, enbi_serial)];
1408 };
1409
1410 int
sfc_hw_switch_id_init(struct sfc_adapter * sa,struct sfc_hw_switch_id ** idp)1411 sfc_hw_switch_id_init(struct sfc_adapter *sa,
1412 struct sfc_hw_switch_id **idp)
1413 {
1414 efx_nic_board_info_t board_info;
1415 struct sfc_hw_switch_id *id;
1416 int rc;
1417
1418 if (idp == NULL)
1419 return EINVAL;
1420
1421 id = rte_zmalloc("sfc_hw_switch_id", sizeof(*id), 0);
1422 if (id == NULL)
1423 return ENOMEM;
1424
1425 rc = efx_nic_get_board_info(sa->nic, &board_info);
1426 if (rc != 0)
1427 return rc;
1428
1429 memcpy(id->board_sn, board_info.enbi_serial, sizeof(id->board_sn));
1430
1431 *idp = id;
1432
1433 return 0;
1434 }
1435
1436 void
sfc_hw_switch_id_fini(__rte_unused struct sfc_adapter * sa,struct sfc_hw_switch_id * id)1437 sfc_hw_switch_id_fini(__rte_unused struct sfc_adapter *sa,
1438 struct sfc_hw_switch_id *id)
1439 {
1440 rte_free(id);
1441 }
1442
1443 bool
sfc_hw_switch_ids_equal(const struct sfc_hw_switch_id * left,const struct sfc_hw_switch_id * right)1444 sfc_hw_switch_ids_equal(const struct sfc_hw_switch_id *left,
1445 const struct sfc_hw_switch_id *right)
1446 {
1447 return strncmp(left->board_sn, right->board_sn,
1448 sizeof(left->board_sn)) == 0;
1449 }
1450