1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2018-2019 Solarflare Communications Inc.
5 */
6
7 #include "efx.h"
8 #include "efx_impl.h"
9
10
11 #if EFSYS_OPT_RIVERHEAD
12
13 __checkReturn efx_rc_t
rhead_board_cfg(__in efx_nic_t * enp)14 rhead_board_cfg(
15 __in efx_nic_t *enp)
16 {
17 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
18 uint32_t end_padding;
19 uint32_t bandwidth;
20 efx_rc_t rc;
21
22 if ((rc = efx_mcdi_nic_board_cfg(enp)) != 0)
23 goto fail1;
24
25 /*
26 * The tunnel encapsulation initialization happens unconditionally
27 * for now.
28 */
29 encp->enc_tunnel_encapsulations_supported =
30 (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
31 (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
32 (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
33
34 /*
35 * Software limitation inherited from EF10. This limit is not
36 * increased since the hardware does not report this limit, it is
37 * handled internally resulting in a tunnel add error when there is no
38 * space for more UDP tunnels.
39 */
40 encp->enc_tunnel_config_udp_entries_max = EFX_TUNNEL_MAXNENTRIES;
41
42 encp->enc_clk_mult = 1; /* not used for Riverhead */
43
44 /*
45 * FIXME There are TxSend and TxSeg descriptors on Riverhead.
46 * TxSeg is bigger than TxSend.
47 */
48 encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_GZ_TX_SEND_LEN);
49 /* No boundary crossing limits */
50 encp->enc_tx_dma_desc_boundary = 0;
51
52 /*
53 * Initialise design parameters to either a runtime value read from
54 * the design parameters area or the well known default value
55 * (see SF-119689-TC section 4.4 for details).
56 * FIXME: Read design parameters area values.
57 */
58 encp->enc_tx_tso_max_header_ndescs =
59 ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT;
60 encp->enc_tx_tso_max_header_length =
61 ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT;
62 encp->enc_tx_tso_max_payload_ndescs =
63 ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT;
64 encp->enc_tx_tso_max_payload_length =
65 ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT;
66 encp->enc_tx_tso_max_nframes =
67 ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT;
68
69 /*
70 * Riverhead does not put any restrictions on TCP header offset limit.
71 */
72 encp->enc_tx_tso_tcp_header_offset_limit = UINT32_MAX;
73
74 /*
75 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
76 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
77 * resources (allocated to this PCIe function), which is zero until
78 * after we have allocated VIs.
79 */
80 encp->enc_evq_limit = 1024;
81 encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
82 encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
83
84 encp->enc_buftbl_limit = UINT32_MAX;
85
86 /*
87 * Riverhead event queue creation completes
88 * immediately (no initial event).
89 */
90 encp->enc_evq_init_done_ev_supported = B_FALSE;
91
92 /*
93 * Enable firmware workarounds for hardware errata.
94 * Expected responses are:
95 * - 0 (zero):
96 * Success: workaround enabled or disabled as requested.
97 * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
98 * Firmware does not support the MC_CMD_WORKAROUND request.
99 * (assume that the workaround is not supported).
100 * - MC_CMD_ERR_ENOENT (reported as ENOENT):
101 * Firmware does not support the requested workaround.
102 * - MC_CMD_ERR_EPERM (reported as EACCES):
103 * Unprivileged function cannot enable/disable workarounds.
104 *
105 * See efx_mcdi_request_errcode() for MCDI error translations.
106 */
107
108 /*
109 * Replay engine on Riverhead should suppress duplicate packets
110 * (e.g. because of exact multicast and all-multicast filters
111 * match) to the same RxQ.
112 */
113 encp->enc_bug26807_workaround = B_FALSE;
114
115 /*
116 * Checksums for TSO sends should always be correct on Riverhead.
117 * FIXME: revisit when TSO support is implemented.
118 */
119 encp->enc_bug61297_workaround = B_FALSE;
120
121 encp->enc_evq_max_nevs = RHEAD_EVQ_MAXNEVS;
122 encp->enc_evq_min_nevs = RHEAD_EVQ_MINNEVS;
123 encp->enc_rxq_max_ndescs = RHEAD_RXQ_MAXNDESCS;
124 encp->enc_rxq_min_ndescs = RHEAD_RXQ_MINNDESCS;
125 encp->enc_txq_max_ndescs = RHEAD_TXQ_MAXNDESCS;
126 encp->enc_txq_min_ndescs = RHEAD_TXQ_MINNDESCS;
127
128 /* Riverhead FW does not support event queue timers yet. */
129 encp->enc_evq_timer_quantum_ns = 0;
130 encp->enc_evq_timer_max_us = 0;
131
132 #if EFSYS_OPT_EV_EXTENDED_WIDTH
133 encp->enc_ev_ew_desc_size = RHEAD_EVQ_EW_DESC_SIZE;
134 #else
135 encp->enc_ev_ew_desc_size = 0;
136 #endif
137
138 encp->enc_ev_desc_size = RHEAD_EVQ_DESC_SIZE;
139 encp->enc_rx_desc_size = RHEAD_RXQ_DESC_SIZE;
140 encp->enc_tx_desc_size = RHEAD_TXQ_DESC_SIZE;
141
142 /* No required alignment for WPTR updates */
143 encp->enc_rx_push_align = 1;
144
145 /* Riverhead supports a single Rx prefix size. */
146 encp->enc_rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN;
147
148 /* Alignment for receive packet DMA buffers. */
149 encp->enc_rx_buf_align_start = 1;
150
151 /* Get the RX DMA end padding alignment configuration. */
152 if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
153 if (rc != EACCES)
154 goto fail2;
155
156 /* Assume largest tail padding size supported by hardware. */
157 end_padding = 128;
158 }
159 encp->enc_rx_buf_align_end = end_padding;
160
161 /* FIXME: It should be extracted from design parameters (Bug 86844) */
162 encp->enc_rx_scatter_max = 7;
163
164 /*
165 * Riverhead stores a single global copy of VPD, not per-PF as on
166 * Huntington.
167 */
168 encp->enc_vpd_is_global = B_TRUE;
169
170 rc = ef10_nic_get_port_mode_bandwidth(enp, &bandwidth);
171 if (rc != 0)
172 goto fail3;
173 encp->enc_required_pcie_bandwidth_mbps = bandwidth;
174 encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
175
176 return (0);
177
178 fail3:
179 EFSYS_PROBE(fail3);
180 fail2:
181 EFSYS_PROBE(fail2);
182 fail1:
183 EFSYS_PROBE1(fail1, efx_rc_t, rc);
184
185 return (rc);
186 }
187
188 __checkReturn efx_rc_t
rhead_nic_probe(__in efx_nic_t * enp)189 rhead_nic_probe(
190 __in efx_nic_t *enp)
191 {
192 const efx_nic_ops_t *enop = enp->en_enop;
193 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
194 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
195 efx_rc_t rc;
196
197 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp));
198
199 /* Read and clear any assertion state */
200 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
201 goto fail1;
202
203 /* Exit the assertion handler */
204 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
205 if (rc != EACCES)
206 goto fail2;
207
208 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
209 goto fail3;
210
211 /* Get remaining controller-specific board config */
212 if ((rc = enop->eno_board_cfg(enp)) != 0)
213 goto fail4;
214
215 /*
216 * Set default driver config limits (based on board config).
217 *
218 * FIXME: For now allocate a fixed number of VIs which is likely to be
219 * sufficient and small enough to allow multiple functions on the same
220 * port.
221 */
222 edcp->edc_min_vi_count = edcp->edc_max_vi_count =
223 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
224
225 /*
226 * The client driver must configure and enable PIO buffer support,
227 * but there is no PIO support on Riverhead anyway.
228 */
229 edcp->edc_max_piobuf_count = 0;
230 edcp->edc_pio_alloc_size = 0;
231
232 #if EFSYS_OPT_MAC_STATS
233 /* Wipe the MAC statistics */
234 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
235 goto fail5;
236 #endif
237
238 #if EFSYS_OPT_LOOPBACK
239 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
240 goto fail6;
241 #endif
242
243 return (0);
244
245 #if EFSYS_OPT_LOOPBACK
246 fail6:
247 EFSYS_PROBE(fail6);
248 #endif
249 #if EFSYS_OPT_MAC_STATS
250 fail5:
251 EFSYS_PROBE(fail5);
252 #endif
253 fail4:
254 EFSYS_PROBE(fail4);
255 fail3:
256 EFSYS_PROBE(fail3);
257 fail2:
258 EFSYS_PROBE(fail2);
259 fail1:
260 EFSYS_PROBE1(fail1, efx_rc_t, rc);
261
262 return (rc);
263 }
264
265 __checkReturn efx_rc_t
rhead_nic_set_drv_limits(__inout efx_nic_t * enp,__in efx_drv_limits_t * edlp)266 rhead_nic_set_drv_limits(
267 __inout efx_nic_t *enp,
268 __in efx_drv_limits_t *edlp)
269 {
270 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
271 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
272 uint32_t min_evq_count, max_evq_count;
273 uint32_t min_rxq_count, max_rxq_count;
274 uint32_t min_txq_count, max_txq_count;
275 efx_rc_t rc;
276
277 if (edlp == NULL) {
278 rc = EINVAL;
279 goto fail1;
280 }
281
282 /* Get minimum required and maximum usable VI limits */
283 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
284 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
285 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
286
287 edcp->edc_min_vi_count =
288 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
289
290 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
291 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
292 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
293
294 edcp->edc_max_vi_count =
295 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
296
297 /* There is no PIO support on Riverhead */
298 edcp->edc_max_piobuf_count = 0;
299 edcp->edc_pio_alloc_size = 0;
300
301 return (0);
302
303 fail1:
304 EFSYS_PROBE1(fail1, efx_rc_t, rc);
305
306 return (rc);
307 }
308
309 __checkReturn efx_rc_t
rhead_nic_reset(__in efx_nic_t * enp)310 rhead_nic_reset(
311 __in efx_nic_t *enp)
312 {
313 efx_rc_t rc;
314
315 /* ef10_nic_reset() is called to recover from BADASSERT failures. */
316 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
317 goto fail1;
318 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
319 goto fail2;
320
321 if ((rc = efx_mcdi_entity_reset(enp)) != 0)
322 goto fail3;
323
324 /* Clear RX/TX DMA queue errors */
325 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
326
327 return (0);
328
329 fail3:
330 EFSYS_PROBE(fail3);
331 fail2:
332 EFSYS_PROBE(fail2);
333 fail1:
334 EFSYS_PROBE1(fail1, efx_rc_t, rc);
335
336 return (rc);
337 }
338
339 __checkReturn efx_rc_t
rhead_nic_init(__in efx_nic_t * enp)340 rhead_nic_init(
341 __in efx_nic_t *enp)
342 {
343 const efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
344 uint32_t min_vi_count, max_vi_count;
345 uint32_t vi_count, vi_base, vi_shift;
346 uint32_t vi_window_size;
347 efx_rc_t rc;
348 boolean_t alloc_vadaptor = B_TRUE;
349
350 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp));
351 EFSYS_ASSERT3U(edcp->edc_max_piobuf_count, ==, 0);
352
353 /* Enable reporting of some events (e.g. link change) */
354 if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
355 goto fail1;
356
357 min_vi_count = edcp->edc_min_vi_count;
358 max_vi_count = edcp->edc_max_vi_count;
359
360 /* Ensure that the previously attached driver's VIs are freed */
361 if ((rc = efx_mcdi_free_vis(enp)) != 0)
362 goto fail2;
363
364 /*
365 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
366 * fails then retrying the request for fewer VI resources may succeed.
367 */
368 vi_count = 0;
369 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
370 &vi_base, &vi_count, &vi_shift)) != 0)
371 goto fail3;
372
373 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
374
375 if (vi_count < min_vi_count) {
376 rc = ENOMEM;
377 goto fail4;
378 }
379
380 enp->en_arch.ef10.ena_vi_base = vi_base;
381 enp->en_arch.ef10.ena_vi_count = vi_count;
382 enp->en_arch.ef10.ena_vi_shift = vi_shift;
383
384 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
385 EFX_VI_WINDOW_SHIFT_INVALID);
386 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
387 EFX_VI_WINDOW_SHIFT_64K);
388 vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
389
390 /* Save UC memory mapping details */
391 enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
392 enp->en_arch.ef10.ena_uc_mem_map_size =
393 vi_window_size * enp->en_arch.ef10.ena_vi_count;
394
395 /* No WC memory mapping since PIO is not supported */
396 enp->en_arch.ef10.ena_pio_write_vi_base = 0;
397 enp->en_arch.ef10.ena_wc_mem_map_offset = 0;
398 enp->en_arch.ef10.ena_wc_mem_map_size = 0;
399
400 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
401
402 /*
403 * For SR-IOV use case, vAdaptor is allocated for PF and associated VFs
404 * during NIC initialization when vSwitch is created and vPorts are
405 * allocated. Hence, skip vAdaptor allocation for EVB and update vPort
406 * ID in NIC structure with the one allocated for PF.
407 */
408
409 enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
410 #if EFSYS_OPT_EVB
411 if ((enp->en_vswitchp != NULL) && (enp->en_vswitchp->ev_evcp != NULL)) {
412 /* For EVB use vPort allocated on vSwitch */
413 enp->en_vport_id = enp->en_vswitchp->ev_evcp->evc_vport_id;
414 alloc_vadaptor = B_FALSE;
415 }
416 #endif
417 if (alloc_vadaptor != B_FALSE) {
418 /* Allocate a vAdaptor attached to our upstream vPort/pPort */
419 if ((rc = ef10_upstream_port_vadaptor_alloc(enp)) != 0)
420 goto fail5;
421 }
422
423 return (0);
424
425 fail5:
426 EFSYS_PROBE(fail5);
427
428 fail4:
429 EFSYS_PROBE(fail4);
430
431 (void) efx_mcdi_free_vis(enp);
432
433 fail3:
434 EFSYS_PROBE(fail3);
435 fail2:
436 EFSYS_PROBE(fail2);
437 fail1:
438 EFSYS_PROBE1(fail1, efx_rc_t, rc);
439
440 return (rc);
441 }
442
443 __checkReturn efx_rc_t
rhead_nic_get_vi_pool(__in efx_nic_t * enp,__out uint32_t * vi_countp)444 rhead_nic_get_vi_pool(
445 __in efx_nic_t *enp,
446 __out uint32_t *vi_countp)
447 {
448 /*
449 * Report VIs that the client driver can use.
450 * Do not include VIs used for PIO buffer writes.
451 */
452 *vi_countp = enp->en_arch.ef10.ena_vi_count;
453
454 return (0);
455 }
456
457 __checkReturn efx_rc_t
rhead_nic_get_bar_region(__in efx_nic_t * enp,__in efx_nic_region_t region,__out uint32_t * offsetp,__out size_t * sizep)458 rhead_nic_get_bar_region(
459 __in efx_nic_t *enp,
460 __in efx_nic_region_t region,
461 __out uint32_t *offsetp,
462 __out size_t *sizep)
463 {
464 efx_rc_t rc;
465
466 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp));
467
468 /*
469 * TODO: Specify host memory mapping alignment and granularity
470 * in efx_drv_limits_t so that they can be taken into account
471 * when allocating extra VIs for PIO writes.
472 */
473 switch (region) {
474 case EFX_REGION_VI:
475 /* UC mapped memory BAR region for VI registers */
476 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
477 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
478 break;
479
480 case EFX_REGION_PIO_WRITE_VI:
481 /* WC mapped memory BAR region for piobuf writes */
482 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
483 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
484 break;
485
486 default:
487 rc = EINVAL;
488 goto fail1;
489 }
490
491 return (0);
492
493 fail1:
494 EFSYS_PROBE1(fail1, efx_rc_t, rc);
495
496 return (rc);
497 }
498
499 __checkReturn boolean_t
rhead_nic_hw_unavailable(__in efx_nic_t * enp)500 rhead_nic_hw_unavailable(
501 __in efx_nic_t *enp)
502 {
503 efx_dword_t dword;
504
505 if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL)
506 return (B_TRUE);
507
508 EFX_BAR_FCW_READD(enp, ER_GZ_MC_SFT_STATUS, &dword);
509 if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff)
510 goto unavail;
511
512 return (B_FALSE);
513
514 unavail:
515 rhead_nic_set_hw_unavailable(enp);
516
517 return (B_TRUE);
518 }
519
520 void
rhead_nic_set_hw_unavailable(__in efx_nic_t * enp)521 rhead_nic_set_hw_unavailable(
522 __in efx_nic_t *enp)
523 {
524 EFSYS_PROBE(hw_unavail);
525 enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL;
526 }
527
528 void
rhead_nic_fini(__in efx_nic_t * enp)529 rhead_nic_fini(
530 __in efx_nic_t *enp)
531 {
532 boolean_t do_vadaptor_free = B_TRUE;
533
534 #if EFSYS_OPT_EVB
535 if (enp->en_vswitchp != NULL) {
536 /*
537 * For SR-IOV the vAdaptor is freed with the vSwitch,
538 * so do not free it here.
539 */
540 do_vadaptor_free = B_FALSE;
541 }
542 #endif
543 if (do_vadaptor_free != B_FALSE) {
544 (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
545 enp->en_vport_id = EVB_PORT_ID_NULL;
546 }
547
548 (void) efx_mcdi_free_vis(enp);
549 enp->en_arch.ef10.ena_vi_count = 0;
550 }
551
552 void
rhead_nic_unprobe(__in efx_nic_t * enp)553 rhead_nic_unprobe(
554 __in efx_nic_t *enp)
555 {
556 (void) efx_mcdi_drv_attach(enp, B_FALSE);
557 }
558
559 #if EFSYS_OPT_DIAG
560
561 __checkReturn efx_rc_t
rhead_nic_register_test(__in efx_nic_t * enp)562 rhead_nic_register_test(
563 __in efx_nic_t *enp)
564 {
565 efx_rc_t rc;
566
567 /* FIXME */
568 _NOTE(ARGUNUSED(enp))
569 _NOTE(CONSTANTCONDITION)
570 if (B_FALSE) {
571 rc = ENOTSUP;
572 goto fail1;
573 }
574 /* FIXME */
575
576 return (0);
577
578 fail1:
579 EFSYS_PROBE1(fail1, efx_rc_t, rc);
580
581 return (rc);
582 }
583
584 #endif /* EFSYS_OPT_DIAG */
585
586 __checkReturn efx_rc_t
rhead_nic_xilinx_cap_tbl_read_ef100_locator(__in efsys_bar_t * esbp,__in efsys_dma_addr_t offset,__out efx_bar_region_t * ebrp)587 rhead_nic_xilinx_cap_tbl_read_ef100_locator(
588 __in efsys_bar_t *esbp,
589 __in efsys_dma_addr_t offset,
590 __out efx_bar_region_t *ebrp)
591 {
592 efx_oword_t entry;
593 uint32_t rev;
594 uint32_t len;
595 efx_rc_t rc;
596
597 /*
598 * Xilinx Capabilities Table requires 32bit aligned reads.
599 * See SF-119689-TC section 4.2.2 "Discovery Steps".
600 */
601 EFSYS_BAR_READD(esbp, offset +
602 (EFX_LOW_BIT(ESF_GZ_CFGBAR_ENTRY_FORMAT) / 8),
603 &entry.eo_dword[0], B_FALSE);
604 EFSYS_BAR_READD(esbp, offset +
605 (EFX_LOW_BIT(ESF_GZ_CFGBAR_ENTRY_SIZE) / 8),
606 &entry.eo_dword[1], B_FALSE);
607
608 rev = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_ENTRY_REV);
609 len = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_ENTRY_SIZE);
610
611 if (rev != ESE_GZ_CFGBAR_ENTRY_REV_EF100 ||
612 len < ESE_GZ_CFGBAR_ENTRY_SIZE_EF100) {
613 rc = EINVAL;
614 goto fail1;
615 }
616
617 EFSYS_BAR_READD(esbp, offset +
618 (EFX_LOW_BIT(ESF_GZ_CFGBAR_EF100_BAR) / 8),
619 &entry.eo_dword[2], B_FALSE);
620
621 ebrp->ebr_index = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_EF100_BAR);
622 ebrp->ebr_offset = EFX_OWORD_FIELD32(entry,
623 ESF_GZ_CFGBAR_EF100_FUNC_CTL_WIN_OFF) <<
624 ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT;
625 ebrp->ebr_type = EFX_BAR_TYPE_MEM;
626 ebrp->ebr_length = 0;
627
628 return (0);
629
630 fail1:
631 EFSYS_PROBE1(fail1, efx_rc_t, rc);
632
633 return (rc);
634 }
635
636 #endif /* EFSYS_OPT_RIVERHEAD */
637