1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10 #include "efx.h"
11
12 #include "sfc.h"
13 #include "sfc_debug.h"
14 #include "sfc_log.h"
15 #include "sfc_kvargs.h"
16
17 /** Default MAC statistics update period is 1 second */
18 #define SFC_MAC_STATS_UPDATE_PERIOD_MS_DEF MS_PER_S
19
20 /** The number of microseconds to sleep on attempt to get statistics update */
21 #define SFC_MAC_STATS_UPDATE_RETRY_INTERVAL_US 10
22
23 /** The number of attempts to await arrival of freshly generated statistics */
24 #define SFC_MAC_STATS_UPDATE_NB_ATTEMPTS 50
25
26 /**
27 * Update MAC statistics in the buffer.
28 *
29 * @param sa Adapter
30 *
31 * @return Status code
32 * @retval 0 Success
33 * @retval EAGAIN Try again
34 * @retval ENOMEM Memory allocation failure
35 */
36 int
sfc_port_update_mac_stats(struct sfc_adapter * sa)37 sfc_port_update_mac_stats(struct sfc_adapter *sa)
38 {
39 struct sfc_port *port = &sa->port;
40 efsys_mem_t *esmp = &port->mac_stats_dma_mem;
41 uint32_t *genp = NULL;
42 uint32_t gen_old;
43 unsigned int nb_attempts = 0;
44 int rc;
45
46 SFC_ASSERT(rte_spinlock_is_locked(&port->mac_stats_lock));
47
48 if (sa->state != SFC_ADAPTER_STARTED)
49 return EINVAL;
50
51 /*
52 * If periodic statistics DMA'ing is off or if not supported,
53 * make a manual request and keep an eye on timer if need be
54 */
55 if (!port->mac_stats_periodic_dma_supported ||
56 (port->mac_stats_update_period_ms == 0)) {
57 if (port->mac_stats_update_period_ms != 0) {
58 uint64_t timestamp = sfc_get_system_msecs();
59
60 if ((timestamp -
61 port->mac_stats_last_request_timestamp) <
62 port->mac_stats_update_period_ms)
63 return 0;
64
65 port->mac_stats_last_request_timestamp = timestamp;
66 }
67
68 rc = efx_mac_stats_upload(sa->nic, esmp);
69 if (rc != 0)
70 return rc;
71
72 genp = &port->mac_stats_update_generation;
73 gen_old = *genp;
74 }
75
76 do {
77 if (nb_attempts > 0)
78 rte_delay_us(SFC_MAC_STATS_UPDATE_RETRY_INTERVAL_US);
79
80 rc = efx_mac_stats_update(sa->nic, esmp,
81 port->mac_stats_buf, genp);
82 if (rc != 0)
83 return rc;
84
85 } while ((genp != NULL) && (*genp == gen_old) &&
86 (++nb_attempts < SFC_MAC_STATS_UPDATE_NB_ATTEMPTS));
87
88 return 0;
89 }
90
91 static void
sfc_port_reset_sw_stats(struct sfc_adapter * sa)92 sfc_port_reset_sw_stats(struct sfc_adapter *sa)
93 {
94 struct sfc_port *port = &sa->port;
95
96 /*
97 * Reset diff stats explicitly since check which does not allow
98 * the statistics to grow backward could deny it.
99 */
100 port->ipackets = 0;
101 }
102
103 int
sfc_port_reset_mac_stats(struct sfc_adapter * sa)104 sfc_port_reset_mac_stats(struct sfc_adapter *sa)
105 {
106 struct sfc_port *port = &sa->port;
107 int rc;
108
109 rte_spinlock_lock(&port->mac_stats_lock);
110 rc = efx_mac_stats_clear(sa->nic);
111 if (rc == 0)
112 sfc_port_reset_sw_stats(sa);
113 rte_spinlock_unlock(&port->mac_stats_lock);
114
115 return rc;
116 }
117
118 static int
sfc_port_init_dev_link(struct sfc_adapter * sa)119 sfc_port_init_dev_link(struct sfc_adapter *sa)
120 {
121 struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
122 int rc;
123 efx_link_mode_t link_mode;
124 struct rte_eth_link current_link;
125
126 rc = efx_port_poll(sa->nic, &link_mode);
127 if (rc != 0)
128 return rc;
129
130 sfc_port_link_mode_to_info(link_mode, ¤t_link);
131
132 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
133 rte_atomic64_set((rte_atomic64_t *)dev_link,
134 *(uint64_t *)¤t_link);
135
136 return 0;
137 }
138
139 #if EFSYS_OPT_LOOPBACK
140
141 static efx_link_mode_t
sfc_port_phy_caps_to_max_link_speed(uint32_t phy_caps)142 sfc_port_phy_caps_to_max_link_speed(uint32_t phy_caps)
143 {
144 if (phy_caps & (1u << EFX_PHY_CAP_100000FDX))
145 return EFX_LINK_100000FDX;
146 if (phy_caps & (1u << EFX_PHY_CAP_50000FDX))
147 return EFX_LINK_50000FDX;
148 if (phy_caps & (1u << EFX_PHY_CAP_40000FDX))
149 return EFX_LINK_40000FDX;
150 if (phy_caps & (1u << EFX_PHY_CAP_25000FDX))
151 return EFX_LINK_25000FDX;
152 if (phy_caps & (1u << EFX_PHY_CAP_10000FDX))
153 return EFX_LINK_10000FDX;
154 if (phy_caps & (1u << EFX_PHY_CAP_1000FDX))
155 return EFX_LINK_1000FDX;
156 return EFX_LINK_UNKNOWN;
157 }
158
159 #endif
160
161 int
sfc_port_start(struct sfc_adapter * sa)162 sfc_port_start(struct sfc_adapter *sa)
163 {
164 struct sfc_port *port = &sa->port;
165 int rc;
166 uint32_t phy_adv_cap;
167 const uint32_t phy_pause_caps =
168 ((1u << EFX_PHY_CAP_PAUSE) | (1u << EFX_PHY_CAP_ASYM));
169 unsigned int i;
170
171 sfc_log_init(sa, "entry");
172
173 sfc_log_init(sa, "init filters");
174 rc = efx_filter_init(sa->nic);
175 if (rc != 0)
176 goto fail_filter_init;
177
178 sfc_log_init(sa, "init port");
179 rc = efx_port_init(sa->nic);
180 if (rc != 0)
181 goto fail_port_init;
182
183 #if EFSYS_OPT_LOOPBACK
184 if (sa->eth_dev->data->dev_conf.lpbk_mode != 0) {
185 efx_link_mode_t link_mode;
186
187 link_mode =
188 sfc_port_phy_caps_to_max_link_speed(port->phy_adv_cap);
189 sfc_log_init(sa, "set loopback link_mode=%u type=%u", link_mode,
190 sa->eth_dev->data->dev_conf.lpbk_mode);
191 rc = efx_port_loopback_set(sa->nic, link_mode,
192 sa->eth_dev->data->dev_conf.lpbk_mode);
193 if (rc != 0)
194 goto fail_loopback_set;
195 }
196 #endif
197
198 sfc_log_init(sa, "set flow control to %#x autoneg=%u",
199 port->flow_ctrl, port->flow_ctrl_autoneg);
200 rc = efx_mac_fcntl_set(sa->nic, port->flow_ctrl,
201 port->flow_ctrl_autoneg);
202 if (rc != 0)
203 goto fail_mac_fcntl_set;
204
205 /* Preserve pause capabilities set by above efx_mac_fcntl_set() */
206 efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_CURRENT, &phy_adv_cap);
207 SFC_ASSERT((port->phy_adv_cap & phy_pause_caps) == 0);
208 phy_adv_cap = port->phy_adv_cap | (phy_adv_cap & phy_pause_caps);
209
210 /*
211 * No controls for FEC yet. Use default FEC mode.
212 * I.e. advertise everything supported (*_FEC=1), but do not request
213 * anything explicitly (*_FEC_REQUESTED=0).
214 */
215 phy_adv_cap |= port->phy_adv_cap_mask &
216 (1u << EFX_PHY_CAP_BASER_FEC |
217 1u << EFX_PHY_CAP_RS_FEC |
218 1u << EFX_PHY_CAP_25G_BASER_FEC);
219
220 sfc_log_init(sa, "set phy adv caps to %#x", phy_adv_cap);
221 rc = efx_phy_adv_cap_set(sa->nic, phy_adv_cap);
222 if (rc != 0)
223 goto fail_phy_adv_cap_set;
224
225 sfc_log_init(sa, "set MAC PDU %u", (unsigned int)port->pdu);
226 rc = efx_mac_pdu_set(sa->nic, port->pdu);
227 if (rc != 0)
228 goto fail_mac_pdu_set;
229
230 if (!sfc_sa2shared(sa)->isolated) {
231 struct rte_ether_addr *addr = &port->default_mac_addr;
232
233 sfc_log_init(sa, "set MAC address");
234 rc = efx_mac_addr_set(sa->nic, addr->addr_bytes);
235 if (rc != 0)
236 goto fail_mac_addr_set;
237
238 sfc_log_init(sa, "set MAC filters");
239 port->promisc = (sa->eth_dev->data->promiscuous != 0) ?
240 B_TRUE : B_FALSE;
241 port->allmulti = (sa->eth_dev->data->all_multicast != 0) ?
242 B_TRUE : B_FALSE;
243 rc = sfc_set_rx_mode_unchecked(sa);
244 if (rc != 0)
245 goto fail_mac_filter_set;
246
247 sfc_log_init(sa, "set multicast address list");
248 rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
249 port->nb_mcast_addrs);
250 if (rc != 0)
251 goto fail_mcast_address_list_set;
252 }
253
254 if (port->mac_stats_reset_pending) {
255 rc = sfc_port_reset_mac_stats(sa);
256 if (rc != 0)
257 sfc_err(sa, "statistics reset failed (requested "
258 "before the port was started)");
259
260 port->mac_stats_reset_pending = B_FALSE;
261 }
262
263 efx_mac_stats_get_mask(sa->nic, port->mac_stats_mask,
264 sizeof(port->mac_stats_mask));
265
266 for (i = 0, port->mac_stats_nb_supported = 0; i < EFX_MAC_NSTATS; ++i)
267 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
268 port->mac_stats_nb_supported++;
269
270 port->mac_stats_update_generation = 0;
271
272 if (port->mac_stats_update_period_ms != 0) {
273 /*
274 * Update MAC stats using periodic DMA;
275 * any positive update interval different from
276 * 1000 ms can be set only on SFN8xxx provided
277 * that FW version is 6.2.1.1033 or higher
278 */
279 sfc_log_init(sa, "request MAC stats DMA'ing");
280 rc = efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
281 port->mac_stats_update_period_ms,
282 B_FALSE);
283 if (rc == 0) {
284 port->mac_stats_periodic_dma_supported = B_TRUE;
285 } else if (rc == EOPNOTSUPP) {
286 port->mac_stats_periodic_dma_supported = B_FALSE;
287 port->mac_stats_last_request_timestamp = 0;
288 } else {
289 goto fail_mac_stats_periodic;
290 }
291 }
292
293 if ((port->mac_stats_update_period_ms != 0) &&
294 port->mac_stats_periodic_dma_supported) {
295 /*
296 * Request an explicit MAC stats upload immediately to
297 * preclude bogus figures readback if the user decides
298 * to read stats before periodic DMA is really started
299 */
300 rc = efx_mac_stats_upload(sa->nic, &port->mac_stats_dma_mem);
301 if (rc != 0)
302 goto fail_mac_stats_upload;
303 }
304
305 sfc_log_init(sa, "disable MAC drain");
306 rc = efx_mac_drain(sa->nic, B_FALSE);
307 if (rc != 0)
308 goto fail_mac_drain;
309
310 /* Synchronize link status knowledge */
311 rc = sfc_port_init_dev_link(sa);
312 if (rc != 0)
313 goto fail_port_init_dev_link;
314
315 sfc_log_init(sa, "done");
316 return 0;
317
318 fail_port_init_dev_link:
319 (void)efx_mac_drain(sa->nic, B_TRUE);
320
321 fail_mac_drain:
322 fail_mac_stats_upload:
323 (void)efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
324 0, B_FALSE);
325
326 fail_mac_stats_periodic:
327 fail_mcast_address_list_set:
328 fail_mac_filter_set:
329 fail_mac_addr_set:
330 fail_mac_pdu_set:
331 fail_phy_adv_cap_set:
332 fail_mac_fcntl_set:
333 #if EFSYS_OPT_LOOPBACK
334 fail_loopback_set:
335 #endif
336 efx_port_fini(sa->nic);
337
338 fail_port_init:
339 efx_filter_fini(sa->nic);
340
341 fail_filter_init:
342 sfc_log_init(sa, "failed %d", rc);
343 return rc;
344 }
345
346 void
sfc_port_stop(struct sfc_adapter * sa)347 sfc_port_stop(struct sfc_adapter *sa)
348 {
349 sfc_log_init(sa, "entry");
350
351 efx_mac_drain(sa->nic, B_TRUE);
352
353 (void)efx_mac_stats_periodic(sa->nic, &sa->port.mac_stats_dma_mem,
354 0, B_FALSE);
355
356 efx_port_fini(sa->nic);
357 efx_filter_fini(sa->nic);
358
359 sfc_log_init(sa, "done");
360 }
361
362 int
sfc_port_configure(struct sfc_adapter * sa)363 sfc_port_configure(struct sfc_adapter *sa)
364 {
365 const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
366 struct sfc_port *port = &sa->port;
367 const struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
368
369 sfc_log_init(sa, "entry");
370
371 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
372 port->pdu = rxmode->max_rx_pkt_len;
373 else
374 port->pdu = EFX_MAC_PDU(dev_data->mtu);
375
376 return 0;
377 }
378
379 void
sfc_port_close(struct sfc_adapter * sa)380 sfc_port_close(struct sfc_adapter *sa)
381 {
382 sfc_log_init(sa, "entry");
383 }
384
385 int
sfc_port_attach(struct sfc_adapter * sa)386 sfc_port_attach(struct sfc_adapter *sa)
387 {
388 struct sfc_port *port = &sa->port;
389 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
390 const struct rte_ether_addr *from;
391 uint32_t mac_nstats;
392 size_t mac_stats_size;
393 long kvarg_stats_update_period_ms;
394 int rc;
395
396 sfc_log_init(sa, "entry");
397
398 efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_PERM, &port->phy_adv_cap_mask);
399
400 /* Enable flow control by default */
401 port->flow_ctrl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
402 port->flow_ctrl_autoneg = B_TRUE;
403
404 RTE_BUILD_BUG_ON(sizeof(encp->enc_mac_addr) != sizeof(*from));
405 from = (const struct rte_ether_addr *)(encp->enc_mac_addr);
406 rte_ether_addr_copy(from, &port->default_mac_addr);
407
408 port->max_mcast_addrs = EFX_MAC_MULTICAST_LIST_MAX;
409 port->nb_mcast_addrs = 0;
410 port->mcast_addrs = rte_calloc_socket("mcast_addr_list_buf",
411 port->max_mcast_addrs,
412 EFX_MAC_ADDR_LEN, 0,
413 sa->socket_id);
414 if (port->mcast_addrs == NULL) {
415 rc = ENOMEM;
416 goto fail_mcast_addr_list_buf_alloc;
417 }
418
419 rte_spinlock_init(&port->mac_stats_lock);
420
421 rc = ENOMEM;
422 port->mac_stats_buf = rte_calloc_socket("mac_stats_buf", EFX_MAC_NSTATS,
423 sizeof(uint64_t), 0,
424 sa->socket_id);
425 if (port->mac_stats_buf == NULL)
426 goto fail_mac_stats_buf_alloc;
427
428 mac_nstats = efx_nic_cfg_get(sa->nic)->enc_mac_stats_nstats;
429 mac_stats_size = RTE_ALIGN(mac_nstats * sizeof(uint64_t), EFX_BUF_SIZE);
430 rc = sfc_dma_alloc(sa, "mac_stats", 0, mac_stats_size,
431 sa->socket_id, &port->mac_stats_dma_mem);
432 if (rc != 0)
433 goto fail_mac_stats_dma_alloc;
434
435 port->mac_stats_reset_pending = B_FALSE;
436
437 kvarg_stats_update_period_ms = SFC_MAC_STATS_UPDATE_PERIOD_MS_DEF;
438
439 rc = sfc_kvargs_process(sa, SFC_KVARG_STATS_UPDATE_PERIOD_MS,
440 sfc_kvarg_long_handler,
441 &kvarg_stats_update_period_ms);
442 if ((rc == 0) &&
443 ((kvarg_stats_update_period_ms < 0) ||
444 (kvarg_stats_update_period_ms > UINT16_MAX))) {
445 sfc_err(sa, "wrong '" SFC_KVARG_STATS_UPDATE_PERIOD_MS "' "
446 "was set (%ld);", kvarg_stats_update_period_ms);
447 sfc_err(sa, "it must not be less than 0 "
448 "or greater than %" PRIu16, UINT16_MAX);
449 rc = EINVAL;
450 goto fail_kvarg_stats_update_period_ms;
451 } else if (rc != 0) {
452 goto fail_kvarg_stats_update_period_ms;
453 }
454
455 port->mac_stats_update_period_ms = kvarg_stats_update_period_ms;
456
457 sfc_log_init(sa, "done");
458 return 0;
459
460 fail_kvarg_stats_update_period_ms:
461 sfc_dma_free(sa, &port->mac_stats_dma_mem);
462
463 fail_mac_stats_dma_alloc:
464 rte_free(port->mac_stats_buf);
465
466 fail_mac_stats_buf_alloc:
467 rte_free(port->mcast_addrs);
468
469 fail_mcast_addr_list_buf_alloc:
470 sfc_log_init(sa, "failed %d", rc);
471 return rc;
472 }
473
474 void
sfc_port_detach(struct sfc_adapter * sa)475 sfc_port_detach(struct sfc_adapter *sa)
476 {
477 struct sfc_port *port = &sa->port;
478
479 sfc_log_init(sa, "entry");
480
481 sfc_dma_free(sa, &port->mac_stats_dma_mem);
482 rte_free(port->mac_stats_buf);
483
484 rte_free(port->mcast_addrs);
485
486 sfc_log_init(sa, "done");
487 }
488
489 static boolean_t
sfc_get_requested_all_ucast(struct sfc_port * port)490 sfc_get_requested_all_ucast(struct sfc_port *port)
491 {
492 return port->promisc;
493 }
494
495 static boolean_t
sfc_get_requested_all_mcast(struct sfc_port * port)496 sfc_get_requested_all_mcast(struct sfc_port *port)
497 {
498 return port->promisc || port->allmulti;
499 }
500
501 int
sfc_set_rx_mode_unchecked(struct sfc_adapter * sa)502 sfc_set_rx_mode_unchecked(struct sfc_adapter *sa)
503 {
504 struct sfc_port *port = &sa->port;
505 boolean_t requested_all_ucast = sfc_get_requested_all_ucast(port);
506 boolean_t requested_all_mcast = sfc_get_requested_all_mcast(port);
507 int rc;
508
509 rc = efx_mac_filter_set(sa->nic, requested_all_ucast, B_TRUE,
510 requested_all_mcast, B_TRUE);
511 if (rc != 0)
512 return rc;
513
514 return 0;
515 }
516
517 int
sfc_set_rx_mode(struct sfc_adapter * sa)518 sfc_set_rx_mode(struct sfc_adapter *sa)
519 {
520 struct sfc_port *port = &sa->port;
521 boolean_t old_all_ucast;
522 boolean_t old_all_mcast;
523 boolean_t requested_all_ucast = sfc_get_requested_all_ucast(port);
524 boolean_t requested_all_mcast = sfc_get_requested_all_mcast(port);
525 boolean_t actual_all_ucast;
526 boolean_t actual_all_mcast;
527 int rc;
528
529 efx_mac_filter_get_all_ucast_mcast(sa->nic, &old_all_ucast,
530 &old_all_mcast);
531
532 rc = sfc_set_rx_mode_unchecked(sa);
533 if (rc != 0)
534 return rc;
535
536 efx_mac_filter_get_all_ucast_mcast(sa->nic, &actual_all_ucast,
537 &actual_all_mcast);
538
539 if (actual_all_ucast != requested_all_ucast ||
540 actual_all_mcast != requested_all_mcast) {
541 /*
542 * MAC filter set succeeded but not all requested modes
543 * were applied. The rollback is necessary to bring back the
544 * consistent old state.
545 */
546 (void)efx_mac_filter_set(sa->nic, old_all_ucast, B_TRUE,
547 old_all_mcast, B_TRUE);
548
549 return EPERM;
550 }
551
552 return 0;
553 }
554
555 void
sfc_port_link_mode_to_info(efx_link_mode_t link_mode,struct rte_eth_link * link_info)556 sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
557 struct rte_eth_link *link_info)
558 {
559 SFC_ASSERT(link_mode < EFX_LINK_NMODES);
560
561 memset(link_info, 0, sizeof(*link_info));
562 if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
563 link_info->link_status = ETH_LINK_DOWN;
564 else
565 link_info->link_status = ETH_LINK_UP;
566
567 switch (link_mode) {
568 case EFX_LINK_10HDX:
569 link_info->link_speed = ETH_SPEED_NUM_10M;
570 link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
571 break;
572 case EFX_LINK_10FDX:
573 link_info->link_speed = ETH_SPEED_NUM_10M;
574 link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
575 break;
576 case EFX_LINK_100HDX:
577 link_info->link_speed = ETH_SPEED_NUM_100M;
578 link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
579 break;
580 case EFX_LINK_100FDX:
581 link_info->link_speed = ETH_SPEED_NUM_100M;
582 link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
583 break;
584 case EFX_LINK_1000HDX:
585 link_info->link_speed = ETH_SPEED_NUM_1G;
586 link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
587 break;
588 case EFX_LINK_1000FDX:
589 link_info->link_speed = ETH_SPEED_NUM_1G;
590 link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
591 break;
592 case EFX_LINK_10000FDX:
593 link_info->link_speed = ETH_SPEED_NUM_10G;
594 link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
595 break;
596 case EFX_LINK_25000FDX:
597 link_info->link_speed = ETH_SPEED_NUM_25G;
598 link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
599 break;
600 case EFX_LINK_40000FDX:
601 link_info->link_speed = ETH_SPEED_NUM_40G;
602 link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
603 break;
604 case EFX_LINK_50000FDX:
605 link_info->link_speed = ETH_SPEED_NUM_50G;
606 link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
607 break;
608 case EFX_LINK_100000FDX:
609 link_info->link_speed = ETH_SPEED_NUM_100G;
610 link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
611 break;
612 default:
613 SFC_ASSERT(B_FALSE);
614 /* FALLTHROUGH */
615 case EFX_LINK_UNKNOWN:
616 case EFX_LINK_DOWN:
617 link_info->link_speed = ETH_SPEED_NUM_NONE;
618 link_info->link_duplex = 0;
619 break;
620 }
621
622 link_info->link_autoneg = ETH_LINK_AUTONEG;
623 }
624