1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2012-2019 Solarflare Communications Inc.
5 */
6
7 #include "efx.h"
8 #include "efx_impl.h"
9 #if EFSYS_OPT_MON_MCDI
10 #include "mcdi_mon.h"
11 #endif
12
13 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
14
15 #include "ef10_tlv_layout.h"
16
17 __checkReturn efx_rc_t
efx_mcdi_get_port_assignment(__in efx_nic_t * enp,__out uint32_t * portp)18 efx_mcdi_get_port_assignment(
19 __in efx_nic_t *enp,
20 __out uint32_t *portp)
21 {
22 efx_mcdi_req_t req;
23 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
24 MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
25 efx_rc_t rc;
26
27 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
28
29 req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
30 req.emr_in_buf = payload;
31 req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
32 req.emr_out_buf = payload;
33 req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
34
35 efx_mcdi_execute(enp, &req);
36
37 if (req.emr_rc != 0) {
38 rc = req.emr_rc;
39 goto fail1;
40 }
41
42 if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
43 rc = EMSGSIZE;
44 goto fail2;
45 }
46
47 *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
48
49 return (0);
50
51 fail2:
52 EFSYS_PROBE(fail2);
53 fail1:
54 EFSYS_PROBE1(fail1, efx_rc_t, rc);
55
56 return (rc);
57 }
58
59 __checkReturn efx_rc_t
efx_mcdi_get_port_modes(__in efx_nic_t * enp,__out uint32_t * modesp,__out_opt uint32_t * current_modep,__out_opt uint32_t * default_modep)60 efx_mcdi_get_port_modes(
61 __in efx_nic_t *enp,
62 __out uint32_t *modesp,
63 __out_opt uint32_t *current_modep,
64 __out_opt uint32_t *default_modep)
65 {
66 efx_mcdi_req_t req;
67 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_MODES_IN_LEN,
68 MC_CMD_GET_PORT_MODES_OUT_LEN);
69 efx_rc_t rc;
70
71 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
72
73 req.emr_cmd = MC_CMD_GET_PORT_MODES;
74 req.emr_in_buf = payload;
75 req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
76 req.emr_out_buf = payload;
77 req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
78
79 efx_mcdi_execute(enp, &req);
80
81 if (req.emr_rc != 0) {
82 rc = req.emr_rc;
83 goto fail1;
84 }
85
86 /*
87 * Require only Modes and DefaultMode fields, unless the current mode
88 * was requested (CurrentMode field was added for Medford).
89 */
90 if (req.emr_out_length_used <
91 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
92 rc = EMSGSIZE;
93 goto fail2;
94 }
95 if ((current_modep != NULL) && (req.emr_out_length_used <
96 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
97 rc = EMSGSIZE;
98 goto fail3;
99 }
100
101 *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
102
103 if (current_modep != NULL) {
104 *current_modep = MCDI_OUT_DWORD(req,
105 GET_PORT_MODES_OUT_CURRENT_MODE);
106 }
107
108 if (default_modep != NULL) {
109 *default_modep = MCDI_OUT_DWORD(req,
110 GET_PORT_MODES_OUT_DEFAULT_MODE);
111 }
112
113 return (0);
114
115 fail3:
116 EFSYS_PROBE(fail3);
117 fail2:
118 EFSYS_PROBE(fail2);
119 fail1:
120 EFSYS_PROBE1(fail1, efx_rc_t, rc);
121
122 return (rc);
123 }
124
125 __checkReturn efx_rc_t
ef10_nic_get_port_mode_bandwidth(__in efx_nic_t * enp,__out uint32_t * bandwidth_mbpsp)126 ef10_nic_get_port_mode_bandwidth(
127 __in efx_nic_t *enp,
128 __out uint32_t *bandwidth_mbpsp)
129 {
130 uint32_t port_modes;
131 uint32_t current_mode;
132 efx_port_t *epp = &(enp->en_port);
133
134 uint32_t single_lane;
135 uint32_t dual_lane;
136 uint32_t quad_lane;
137 uint32_t bandwidth;
138 efx_rc_t rc;
139
140 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
141 ¤t_mode, NULL)) != 0) {
142 /* No port mode info available. */
143 goto fail1;
144 }
145
146 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_25000FDX))
147 single_lane = 25000;
148 else
149 single_lane = 10000;
150
151 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_50000FDX))
152 dual_lane = 50000;
153 else
154 dual_lane = 20000;
155
156 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_100000FDX))
157 quad_lane = 100000;
158 else
159 quad_lane = 40000;
160
161 switch (current_mode) {
162 case TLV_PORT_MODE_1x1_NA: /* mode 0 */
163 bandwidth = single_lane;
164 break;
165 case TLV_PORT_MODE_1x2_NA: /* mode 10 */
166 case TLV_PORT_MODE_NA_1x2: /* mode 11 */
167 bandwidth = dual_lane;
168 break;
169 case TLV_PORT_MODE_1x1_1x1: /* mode 2 */
170 bandwidth = single_lane + single_lane;
171 break;
172 case TLV_PORT_MODE_4x1_NA: /* mode 4 */
173 case TLV_PORT_MODE_NA_4x1: /* mode 8 */
174 bandwidth = 4 * single_lane;
175 break;
176 case TLV_PORT_MODE_2x1_2x1: /* mode 5 */
177 bandwidth = (2 * single_lane) + (2 * single_lane);
178 break;
179 case TLV_PORT_MODE_1x2_1x2: /* mode 12 */
180 bandwidth = dual_lane + dual_lane;
181 break;
182 case TLV_PORT_MODE_1x2_2x1: /* mode 17 */
183 case TLV_PORT_MODE_2x1_1x2: /* mode 18 */
184 bandwidth = dual_lane + (2 * single_lane);
185 break;
186 /* Legacy Medford-only mode. Do not use (see bug63270) */
187 case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2: /* mode 9 */
188 bandwidth = 4 * single_lane;
189 break;
190 case TLV_PORT_MODE_1x4_NA: /* mode 1 */
191 case TLV_PORT_MODE_NA_1x4: /* mode 22 */
192 bandwidth = quad_lane;
193 break;
194 case TLV_PORT_MODE_2x2_NA: /* mode 13 */
195 case TLV_PORT_MODE_NA_2x2: /* mode 14 */
196 bandwidth = 2 * dual_lane;
197 break;
198 case TLV_PORT_MODE_1x4_2x1: /* mode 6 */
199 case TLV_PORT_MODE_2x1_1x4: /* mode 7 */
200 bandwidth = quad_lane + (2 * single_lane);
201 break;
202 case TLV_PORT_MODE_1x4_1x2: /* mode 15 */
203 case TLV_PORT_MODE_1x2_1x4: /* mode 16 */
204 bandwidth = quad_lane + dual_lane;
205 break;
206 case TLV_PORT_MODE_1x4_1x4: /* mode 3 */
207 bandwidth = quad_lane + quad_lane;
208 break;
209 default:
210 rc = EINVAL;
211 goto fail2;
212 }
213
214 *bandwidth_mbpsp = bandwidth;
215
216 return (0);
217
218 fail2:
219 EFSYS_PROBE(fail2);
220 fail1:
221 EFSYS_PROBE1(fail1, efx_rc_t, rc);
222
223 return (rc);
224 }
225
226 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
227
228 #if EFX_OPTS_EF10()
229
230 __checkReturn efx_rc_t
efx_mcdi_vadaptor_alloc(__in efx_nic_t * enp,__in uint32_t port_id)231 efx_mcdi_vadaptor_alloc(
232 __in efx_nic_t *enp,
233 __in uint32_t port_id)
234 {
235 efx_mcdi_req_t req;
236 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_ALLOC_IN_LEN,
237 MC_CMD_VADAPTOR_ALLOC_OUT_LEN);
238 efx_rc_t rc;
239
240 req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
241 req.emr_in_buf = payload;
242 req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
243 req.emr_out_buf = payload;
244 req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
245
246 MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
247 MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
248 VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
249 enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
250
251 efx_mcdi_execute(enp, &req);
252
253 if (req.emr_rc != 0) {
254 rc = req.emr_rc;
255 goto fail1;
256 }
257
258 return (0);
259
260 fail1:
261 EFSYS_PROBE1(fail1, efx_rc_t, rc);
262
263 return (rc);
264 }
265
266 __checkReturn efx_rc_t
efx_mcdi_vadaptor_free(__in efx_nic_t * enp,__in uint32_t port_id)267 efx_mcdi_vadaptor_free(
268 __in efx_nic_t *enp,
269 __in uint32_t port_id)
270 {
271 efx_mcdi_req_t req;
272 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_FREE_IN_LEN,
273 MC_CMD_VADAPTOR_FREE_OUT_LEN);
274 efx_rc_t rc;
275
276 req.emr_cmd = MC_CMD_VADAPTOR_FREE;
277 req.emr_in_buf = payload;
278 req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
279 req.emr_out_buf = payload;
280 req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
281
282 MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
283
284 efx_mcdi_execute(enp, &req);
285
286 if (req.emr_rc != 0) {
287 rc = req.emr_rc;
288 goto fail1;
289 }
290
291 return (0);
292
293 fail1:
294 EFSYS_PROBE1(fail1, efx_rc_t, rc);
295
296 return (rc);
297 }
298
299 #endif /* EFX_OPTS_EF10() */
300
301 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
302
303 __checkReturn efx_rc_t
304 efx_mcdi_get_mac_address_pf(
305 __in efx_nic_t *enp,
306 __out_ecount_opt(6) uint8_t mac_addrp[6])
307 {
308 efx_mcdi_req_t req;
309 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
310 MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
311 efx_rc_t rc;
312
313 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
314
315 req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
316 req.emr_in_buf = payload;
317 req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
318 req.emr_out_buf = payload;
319 req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
320
321 efx_mcdi_execute(enp, &req);
322
323 if (req.emr_rc != 0) {
324 rc = req.emr_rc;
325 goto fail1;
326 }
327
328 if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
329 rc = EMSGSIZE;
330 goto fail2;
331 }
332
333 if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
334 rc = ENOENT;
335 goto fail3;
336 }
337
338 if (mac_addrp != NULL) {
339 uint8_t *addrp;
340
341 addrp = MCDI_OUT2(req, uint8_t,
342 GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
343
344 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
345 }
346
347 return (0);
348
349 fail3:
350 EFSYS_PROBE(fail3);
351 fail2:
352 EFSYS_PROBE(fail2);
353 fail1:
354 EFSYS_PROBE1(fail1, efx_rc_t, rc);
355
356 return (rc);
357 }
358
359 __checkReturn efx_rc_t
360 efx_mcdi_get_mac_address_vf(
361 __in efx_nic_t *enp,
362 __out_ecount_opt(6) uint8_t mac_addrp[6])
363 {
364 efx_mcdi_req_t req;
365 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
366 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
367 efx_rc_t rc;
368
369 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
370
371 req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
372 req.emr_in_buf = payload;
373 req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
374 req.emr_out_buf = payload;
375 req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
376
377 MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
378 EVB_PORT_ID_ASSIGNED);
379
380 efx_mcdi_execute(enp, &req);
381
382 if (req.emr_rc != 0) {
383 rc = req.emr_rc;
384 goto fail1;
385 }
386
387 if (req.emr_out_length_used <
388 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
389 rc = EMSGSIZE;
390 goto fail2;
391 }
392
393 if (MCDI_OUT_DWORD(req,
394 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
395 rc = ENOENT;
396 goto fail3;
397 }
398
399 if (mac_addrp != NULL) {
400 uint8_t *addrp;
401
402 addrp = MCDI_OUT2(req, uint8_t,
403 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
404
405 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
406 }
407
408 return (0);
409
410 fail3:
411 EFSYS_PROBE(fail3);
412 fail2:
413 EFSYS_PROBE(fail2);
414 fail1:
415 EFSYS_PROBE1(fail1, efx_rc_t, rc);
416
417 return (rc);
418 }
419
420 __checkReturn efx_rc_t
efx_mcdi_get_clock(__in efx_nic_t * enp,__out uint32_t * sys_freqp,__out uint32_t * dpcpu_freqp)421 efx_mcdi_get_clock(
422 __in efx_nic_t *enp,
423 __out uint32_t *sys_freqp,
424 __out uint32_t *dpcpu_freqp)
425 {
426 efx_mcdi_req_t req;
427 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CLOCK_IN_LEN,
428 MC_CMD_GET_CLOCK_OUT_LEN);
429 efx_rc_t rc;
430
431 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
432
433 req.emr_cmd = MC_CMD_GET_CLOCK;
434 req.emr_in_buf = payload;
435 req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
436 req.emr_out_buf = payload;
437 req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
438
439 efx_mcdi_execute(enp, &req);
440
441 if (req.emr_rc != 0) {
442 rc = req.emr_rc;
443 goto fail1;
444 }
445
446 if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
447 rc = EMSGSIZE;
448 goto fail2;
449 }
450
451 *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
452 if (*sys_freqp == 0) {
453 rc = EINVAL;
454 goto fail3;
455 }
456 *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
457 if (*dpcpu_freqp == 0) {
458 rc = EINVAL;
459 goto fail4;
460 }
461
462 return (0);
463
464 fail4:
465 EFSYS_PROBE(fail4);
466 fail3:
467 EFSYS_PROBE(fail3);
468 fail2:
469 EFSYS_PROBE(fail2);
470 fail1:
471 EFSYS_PROBE1(fail1, efx_rc_t, rc);
472
473 return (rc);
474 }
475
476 __checkReturn efx_rc_t
efx_mcdi_get_rxdp_config(__in efx_nic_t * enp,__out uint32_t * end_paddingp)477 efx_mcdi_get_rxdp_config(
478 __in efx_nic_t *enp,
479 __out uint32_t *end_paddingp)
480 {
481 efx_mcdi_req_t req;
482 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RXDP_CONFIG_IN_LEN,
483 MC_CMD_GET_RXDP_CONFIG_OUT_LEN);
484 uint32_t end_padding;
485 efx_rc_t rc;
486
487 req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
488 req.emr_in_buf = payload;
489 req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
490 req.emr_out_buf = payload;
491 req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
492
493 efx_mcdi_execute(enp, &req);
494 if (req.emr_rc != 0) {
495 rc = req.emr_rc;
496 goto fail1;
497 }
498
499 if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
500 GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
501 /* RX DMA end padding is disabled */
502 end_padding = 0;
503 } else {
504 switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
505 GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
506 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
507 end_padding = 64;
508 break;
509 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
510 end_padding = 128;
511 break;
512 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
513 end_padding = 256;
514 break;
515 default:
516 rc = ENOTSUP;
517 goto fail2;
518 }
519 }
520
521 *end_paddingp = end_padding;
522
523 return (0);
524
525 fail2:
526 EFSYS_PROBE(fail2);
527 fail1:
528 EFSYS_PROBE1(fail1, efx_rc_t, rc);
529
530 return (rc);
531 }
532
533 __checkReturn efx_rc_t
efx_mcdi_get_vector_cfg(__in efx_nic_t * enp,__out_opt uint32_t * vec_basep,__out_opt uint32_t * pf_nvecp,__out_opt uint32_t * vf_nvecp)534 efx_mcdi_get_vector_cfg(
535 __in efx_nic_t *enp,
536 __out_opt uint32_t *vec_basep,
537 __out_opt uint32_t *pf_nvecp,
538 __out_opt uint32_t *vf_nvecp)
539 {
540 efx_mcdi_req_t req;
541 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_VECTOR_CFG_IN_LEN,
542 MC_CMD_GET_VECTOR_CFG_OUT_LEN);
543 efx_rc_t rc;
544
545 req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
546 req.emr_in_buf = payload;
547 req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
548 req.emr_out_buf = payload;
549 req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
550
551 efx_mcdi_execute(enp, &req);
552
553 if (req.emr_rc != 0) {
554 rc = req.emr_rc;
555 goto fail1;
556 }
557
558 if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
559 rc = EMSGSIZE;
560 goto fail2;
561 }
562
563 if (vec_basep != NULL)
564 *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
565 if (pf_nvecp != NULL)
566 *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
567 if (vf_nvecp != NULL)
568 *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
569
570 return (0);
571
572 fail2:
573 EFSYS_PROBE(fail2);
574 fail1:
575 EFSYS_PROBE1(fail1, efx_rc_t, rc);
576
577 return (rc);
578 }
579
580 __checkReturn efx_rc_t
efx_mcdi_alloc_vis(__in efx_nic_t * enp,__in uint32_t min_vi_count,__in uint32_t max_vi_count,__out uint32_t * vi_basep,__out uint32_t * vi_countp,__out uint32_t * vi_shiftp)581 efx_mcdi_alloc_vis(
582 __in efx_nic_t *enp,
583 __in uint32_t min_vi_count,
584 __in uint32_t max_vi_count,
585 __out uint32_t *vi_basep,
586 __out uint32_t *vi_countp,
587 __out uint32_t *vi_shiftp)
588 {
589 efx_mcdi_req_t req;
590 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_VIS_IN_LEN,
591 MC_CMD_ALLOC_VIS_EXT_OUT_LEN);
592 efx_rc_t rc;
593
594 if (vi_countp == NULL) {
595 rc = EINVAL;
596 goto fail1;
597 }
598
599 req.emr_cmd = MC_CMD_ALLOC_VIS;
600 req.emr_in_buf = payload;
601 req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
602 req.emr_out_buf = payload;
603 req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
604
605 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
606 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
607
608 efx_mcdi_execute(enp, &req);
609
610 if (req.emr_rc != 0) {
611 rc = req.emr_rc;
612 goto fail2;
613 }
614
615 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
616 rc = EMSGSIZE;
617 goto fail3;
618 }
619
620 *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
621 *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
622
623 /* Report VI_SHIFT if available (always zero for Huntington) */
624 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
625 *vi_shiftp = 0;
626 else
627 *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
628
629 return (0);
630
631 fail3:
632 EFSYS_PROBE(fail3);
633 fail2:
634 EFSYS_PROBE(fail2);
635 fail1:
636 EFSYS_PROBE1(fail1, efx_rc_t, rc);
637
638 return (rc);
639 }
640
641
642 __checkReturn efx_rc_t
efx_mcdi_free_vis(__in efx_nic_t * enp)643 efx_mcdi_free_vis(
644 __in efx_nic_t *enp)
645 {
646 efx_mcdi_req_t req;
647 efx_rc_t rc;
648
649 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
650 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
651
652 req.emr_cmd = MC_CMD_FREE_VIS;
653 req.emr_in_buf = NULL;
654 req.emr_in_length = 0;
655 req.emr_out_buf = NULL;
656 req.emr_out_length = 0;
657
658 efx_mcdi_execute_quiet(enp, &req);
659
660 /* Ignore ELREADY (no allocated VIs, so nothing to free) */
661 if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
662 rc = req.emr_rc;
663 goto fail1;
664 }
665
666 return (0);
667
668 fail1:
669 EFSYS_PROBE1(fail1, efx_rc_t, rc);
670
671 return (rc);
672 }
673
674 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
675
676 #if EFX_OPTS_EF10()
677
678 static __checkReturn efx_rc_t
efx_mcdi_alloc_piobuf(__in efx_nic_t * enp,__out efx_piobuf_handle_t * handlep)679 efx_mcdi_alloc_piobuf(
680 __in efx_nic_t *enp,
681 __out efx_piobuf_handle_t *handlep)
682 {
683 efx_mcdi_req_t req;
684 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_PIOBUF_IN_LEN,
685 MC_CMD_ALLOC_PIOBUF_OUT_LEN);
686 efx_rc_t rc;
687
688 if (handlep == NULL) {
689 rc = EINVAL;
690 goto fail1;
691 }
692
693 req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
694 req.emr_in_buf = payload;
695 req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
696 req.emr_out_buf = payload;
697 req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
698
699 efx_mcdi_execute_quiet(enp, &req);
700
701 if (req.emr_rc != 0) {
702 rc = req.emr_rc;
703 goto fail2;
704 }
705
706 if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
707 rc = EMSGSIZE;
708 goto fail3;
709 }
710
711 *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
712
713 return (0);
714
715 fail3:
716 EFSYS_PROBE(fail3);
717 fail2:
718 EFSYS_PROBE(fail2);
719 fail1:
720 EFSYS_PROBE1(fail1, efx_rc_t, rc);
721
722 return (rc);
723 }
724
725 static __checkReturn efx_rc_t
efx_mcdi_free_piobuf(__in efx_nic_t * enp,__in efx_piobuf_handle_t handle)726 efx_mcdi_free_piobuf(
727 __in efx_nic_t *enp,
728 __in efx_piobuf_handle_t handle)
729 {
730 efx_mcdi_req_t req;
731 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FREE_PIOBUF_IN_LEN,
732 MC_CMD_FREE_PIOBUF_OUT_LEN);
733 efx_rc_t rc;
734
735 req.emr_cmd = MC_CMD_FREE_PIOBUF;
736 req.emr_in_buf = payload;
737 req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
738 req.emr_out_buf = payload;
739 req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
740
741 MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
742
743 efx_mcdi_execute_quiet(enp, &req);
744
745 if (req.emr_rc != 0) {
746 rc = req.emr_rc;
747 goto fail1;
748 }
749
750 return (0);
751
752 fail1:
753 EFSYS_PROBE1(fail1, efx_rc_t, rc);
754
755 return (rc);
756 }
757
758 static __checkReturn efx_rc_t
efx_mcdi_link_piobuf(__in efx_nic_t * enp,__in uint32_t vi_index,__in efx_piobuf_handle_t handle)759 efx_mcdi_link_piobuf(
760 __in efx_nic_t *enp,
761 __in uint32_t vi_index,
762 __in efx_piobuf_handle_t handle)
763 {
764 efx_mcdi_req_t req;
765 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LINK_PIOBUF_IN_LEN,
766 MC_CMD_LINK_PIOBUF_OUT_LEN);
767 efx_rc_t rc;
768
769 req.emr_cmd = MC_CMD_LINK_PIOBUF;
770 req.emr_in_buf = payload;
771 req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
772 req.emr_out_buf = payload;
773 req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
774
775 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
776 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
777
778 efx_mcdi_execute(enp, &req);
779
780 if (req.emr_rc != 0) {
781 rc = req.emr_rc;
782 goto fail1;
783 }
784
785 return (0);
786
787 fail1:
788 EFSYS_PROBE1(fail1, efx_rc_t, rc);
789
790 return (rc);
791 }
792
793 static __checkReturn efx_rc_t
efx_mcdi_unlink_piobuf(__in efx_nic_t * enp,__in uint32_t vi_index)794 efx_mcdi_unlink_piobuf(
795 __in efx_nic_t *enp,
796 __in uint32_t vi_index)
797 {
798 efx_mcdi_req_t req;
799 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_UNLINK_PIOBUF_IN_LEN,
800 MC_CMD_UNLINK_PIOBUF_OUT_LEN);
801 efx_rc_t rc;
802
803 req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
804 req.emr_in_buf = payload;
805 req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
806 req.emr_out_buf = payload;
807 req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
808
809 MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
810
811 efx_mcdi_execute_quiet(enp, &req);
812
813 if (req.emr_rc != 0) {
814 rc = req.emr_rc;
815 goto fail1;
816 }
817
818 return (0);
819
820 fail1:
821 EFSYS_PROBE1(fail1, efx_rc_t, rc);
822
823 return (rc);
824 }
825
826 static void
ef10_nic_alloc_piobufs(__in efx_nic_t * enp,__in uint32_t max_piobuf_count)827 ef10_nic_alloc_piobufs(
828 __in efx_nic_t *enp,
829 __in uint32_t max_piobuf_count)
830 {
831 efx_piobuf_handle_t *handlep;
832 unsigned int i;
833
834 EFSYS_ASSERT3U(max_piobuf_count, <=,
835 EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
836
837 enp->en_arch.ef10.ena_piobuf_count = 0;
838
839 for (i = 0; i < max_piobuf_count; i++) {
840 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
841
842 if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
843 goto fail1;
844
845 enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
846 enp->en_arch.ef10.ena_piobuf_count++;
847 }
848
849 return;
850
851 fail1:
852 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
853 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
854
855 (void) efx_mcdi_free_piobuf(enp, *handlep);
856 *handlep = EFX_PIOBUF_HANDLE_INVALID;
857 }
858 enp->en_arch.ef10.ena_piobuf_count = 0;
859 }
860
861
862 static void
ef10_nic_free_piobufs(__in efx_nic_t * enp)863 ef10_nic_free_piobufs(
864 __in efx_nic_t *enp)
865 {
866 efx_piobuf_handle_t *handlep;
867 unsigned int i;
868
869 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
870 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
871
872 (void) efx_mcdi_free_piobuf(enp, *handlep);
873 *handlep = EFX_PIOBUF_HANDLE_INVALID;
874 }
875 enp->en_arch.ef10.ena_piobuf_count = 0;
876 }
877
878 /* Sub-allocate a block from a piobuf */
879 __checkReturn efx_rc_t
ef10_nic_pio_alloc(__inout efx_nic_t * enp,__out uint32_t * bufnump,__out efx_piobuf_handle_t * handlep,__out uint32_t * blknump,__out uint32_t * offsetp,__out size_t * sizep)880 ef10_nic_pio_alloc(
881 __inout efx_nic_t *enp,
882 __out uint32_t *bufnump,
883 __out efx_piobuf_handle_t *handlep,
884 __out uint32_t *blknump,
885 __out uint32_t *offsetp,
886 __out size_t *sizep)
887 {
888 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
889 efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
890 uint32_t blk_per_buf;
891 uint32_t buf, blk;
892 efx_rc_t rc;
893
894 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
895 EFSYS_ASSERT(bufnump);
896 EFSYS_ASSERT(handlep);
897 EFSYS_ASSERT(blknump);
898 EFSYS_ASSERT(offsetp);
899 EFSYS_ASSERT(sizep);
900
901 if ((edcp->edc_pio_alloc_size == 0) ||
902 (enp->en_arch.ef10.ena_piobuf_count == 0)) {
903 rc = ENOMEM;
904 goto fail1;
905 }
906 blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
907
908 for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
909 uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
910
911 if (~(*map) == 0)
912 continue;
913
914 EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
915 for (blk = 0; blk < blk_per_buf; blk++) {
916 if ((*map & (1u << blk)) == 0) {
917 *map |= (1u << blk);
918 goto done;
919 }
920 }
921 }
922 rc = ENOMEM;
923 goto fail2;
924
925 done:
926 *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
927 *bufnump = buf;
928 *blknump = blk;
929 *sizep = edcp->edc_pio_alloc_size;
930 *offsetp = blk * (*sizep);
931
932 return (0);
933
934 fail2:
935 EFSYS_PROBE(fail2);
936 fail1:
937 EFSYS_PROBE1(fail1, efx_rc_t, rc);
938
939 return (rc);
940 }
941
942 /* Free a piobuf sub-allocated block */
943 __checkReturn efx_rc_t
ef10_nic_pio_free(__inout efx_nic_t * enp,__in uint32_t bufnum,__in uint32_t blknum)944 ef10_nic_pio_free(
945 __inout efx_nic_t *enp,
946 __in uint32_t bufnum,
947 __in uint32_t blknum)
948 {
949 uint32_t *map;
950 efx_rc_t rc;
951
952 if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
953 (blknum >= (8 * sizeof (*map)))) {
954 rc = EINVAL;
955 goto fail1;
956 }
957
958 map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
959 if ((*map & (1u << blknum)) == 0) {
960 rc = ENOENT;
961 goto fail2;
962 }
963 *map &= ~(1u << blknum);
964
965 return (0);
966
967 fail2:
968 EFSYS_PROBE(fail2);
969 fail1:
970 EFSYS_PROBE1(fail1, efx_rc_t, rc);
971
972 return (rc);
973 }
974
975 __checkReturn efx_rc_t
ef10_nic_pio_link(__inout efx_nic_t * enp,__in uint32_t vi_index,__in efx_piobuf_handle_t handle)976 ef10_nic_pio_link(
977 __inout efx_nic_t *enp,
978 __in uint32_t vi_index,
979 __in efx_piobuf_handle_t handle)
980 {
981 return (efx_mcdi_link_piobuf(enp, vi_index, handle));
982 }
983
984 __checkReturn efx_rc_t
ef10_nic_pio_unlink(__inout efx_nic_t * enp,__in uint32_t vi_index)985 ef10_nic_pio_unlink(
986 __inout efx_nic_t *enp,
987 __in uint32_t vi_index)
988 {
989 return (efx_mcdi_unlink_piobuf(enp, vi_index));
990 }
991
992 #endif /* EFX_OPTS_EF10() */
993
994 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
995
996 static __checkReturn efx_rc_t
ef10_mcdi_get_pf_count(__in efx_nic_t * enp,__out uint32_t * pf_countp)997 ef10_mcdi_get_pf_count(
998 __in efx_nic_t *enp,
999 __out uint32_t *pf_countp)
1000 {
1001 efx_mcdi_req_t req;
1002 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PF_COUNT_IN_LEN,
1003 MC_CMD_GET_PF_COUNT_OUT_LEN);
1004 efx_rc_t rc;
1005
1006 req.emr_cmd = MC_CMD_GET_PF_COUNT;
1007 req.emr_in_buf = payload;
1008 req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
1009 req.emr_out_buf = payload;
1010 req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
1011
1012 efx_mcdi_execute(enp, &req);
1013
1014 if (req.emr_rc != 0) {
1015 rc = req.emr_rc;
1016 goto fail1;
1017 }
1018
1019 if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
1020 rc = EMSGSIZE;
1021 goto fail2;
1022 }
1023
1024 *pf_countp = *MCDI_OUT(req, uint8_t,
1025 MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
1026
1027 EFSYS_ASSERT(*pf_countp != 0);
1028
1029 return (0);
1030
1031 fail2:
1032 EFSYS_PROBE(fail2);
1033 fail1:
1034 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1035
1036 return (rc);
1037 }
1038
1039 static __checkReturn efx_rc_t
ef10_get_datapath_caps(__in efx_nic_t * enp)1040 ef10_get_datapath_caps(
1041 __in efx_nic_t *enp)
1042 {
1043 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1044 efx_mcdi_req_t req;
1045 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
1046 MC_CMD_GET_CAPABILITIES_V7_OUT_LEN);
1047 efx_rc_t rc;
1048
1049 req.emr_cmd = MC_CMD_GET_CAPABILITIES;
1050 req.emr_in_buf = payload;
1051 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
1052 req.emr_out_buf = payload;
1053 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V7_OUT_LEN;
1054
1055 efx_mcdi_execute_quiet(enp, &req);
1056
1057 if (req.emr_rc != 0) {
1058 rc = req.emr_rc;
1059 goto fail1;
1060 }
1061
1062 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
1063 rc = EMSGSIZE;
1064 goto fail2;
1065 }
1066
1067 #define CAP_FLAGS1(_req, _flag) \
1068 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \
1069 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))
1070
1071 #define CAP_FLAGS2(_req, _flag) \
1072 (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \
1073 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \
1074 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
1075
1076 #define CAP_FLAGS3(_req, _flag) \
1077 (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V7_OUT_LEN) && \
1078 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V7_OUT_FLAGS3) & \
1079 (1u << (MC_CMD_GET_CAPABILITIES_V7_OUT_ ## _flag ## _LBN))))
1080
1081 /* Check if RXDP firmware inserts 14 byte prefix */
1082 if (CAP_FLAGS1(req, RX_PREFIX_LEN_14))
1083 encp->enc_rx_prefix_size = 14;
1084 else
1085 encp->enc_rx_prefix_size = 0;
1086
1087 #if EFSYS_OPT_RX_SCALE
1088 /* Check if the firmware supports additional RSS modes */
1089 if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES))
1090 encp->enc_rx_scale_additional_modes_supported = B_TRUE;
1091 else
1092 encp->enc_rx_scale_additional_modes_supported = B_FALSE;
1093 #endif /* EFSYS_OPT_RX_SCALE */
1094
1095 /* Check if the firmware supports TSO */
1096 if (CAP_FLAGS1(req, TX_TSO))
1097 encp->enc_fw_assisted_tso_enabled = B_TRUE;
1098 else
1099 encp->enc_fw_assisted_tso_enabled = B_FALSE;
1100
1101 /* Check if the firmware supports FATSOv2 */
1102 if (CAP_FLAGS2(req, TX_TSO_V2)) {
1103 encp->enc_fw_assisted_tso_v2_enabled = B_TRUE;
1104 encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req,
1105 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
1106 } else {
1107 encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
1108 encp->enc_fw_assisted_tso_v2_n_contexts = 0;
1109 }
1110
1111 /* Check if the firmware supports FATSOv2 encap */
1112 if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP))
1113 encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE;
1114 else
1115 encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE;
1116
1117 /* Check if TSOv3 is supported */
1118 if (CAP_FLAGS2(req, TX_TSO_V3))
1119 encp->enc_tso_v3_enabled = B_TRUE;
1120 else
1121 encp->enc_tso_v3_enabled = B_FALSE;
1122
1123 /* Check if the firmware has vadapter/vport/vswitch support */
1124 if (CAP_FLAGS1(req, EVB))
1125 encp->enc_datapath_cap_evb = B_TRUE;
1126 else
1127 encp->enc_datapath_cap_evb = B_FALSE;
1128
1129 /* Check if the firmware supports vport reconfiguration */
1130 if (CAP_FLAGS1(req, VPORT_RECONFIGURE))
1131 encp->enc_vport_reconfigure_supported = B_TRUE;
1132 else
1133 encp->enc_vport_reconfigure_supported = B_FALSE;
1134
1135 /* Check if the firmware supports VLAN insertion */
1136 if (CAP_FLAGS1(req, TX_VLAN_INSERTION))
1137 encp->enc_hw_tx_insert_vlan_enabled = B_TRUE;
1138 else
1139 encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
1140
1141 /* Check if the firmware supports RX event batching */
1142 if (CAP_FLAGS1(req, RX_BATCHING))
1143 encp->enc_rx_batching_enabled = B_TRUE;
1144 else
1145 encp->enc_rx_batching_enabled = B_FALSE;
1146
1147 /*
1148 * Even if batching isn't reported as supported, we may still get
1149 * batched events (see bug61153).
1150 */
1151 encp->enc_rx_batch_max = 16;
1152
1153 /* Check if the firmware supports disabling scatter on RXQs */
1154 if (CAP_FLAGS1(req, RX_DISABLE_SCATTER))
1155 encp->enc_rx_disable_scatter_supported = B_TRUE;
1156 else
1157 encp->enc_rx_disable_scatter_supported = B_FALSE;
1158
1159 /* No limit on maximum number of Rx scatter elements per packet. */
1160 encp->enc_rx_scatter_max = -1;
1161
1162 /* Check if the firmware supports packed stream mode */
1163 if (CAP_FLAGS1(req, RX_PACKED_STREAM))
1164 encp->enc_rx_packed_stream_supported = B_TRUE;
1165 else
1166 encp->enc_rx_packed_stream_supported = B_FALSE;
1167
1168 /*
1169 * Check if the firmware supports configurable buffer sizes
1170 * for packed stream mode (otherwise buffer size is 1Mbyte)
1171 */
1172 if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS))
1173 encp->enc_rx_var_packed_stream_supported = B_TRUE;
1174 else
1175 encp->enc_rx_var_packed_stream_supported = B_FALSE;
1176
1177 /* Check if the firmware supports equal stride super-buffer mode */
1178 if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER))
1179 encp->enc_rx_es_super_buffer_supported = B_TRUE;
1180 else
1181 encp->enc_rx_es_super_buffer_supported = B_FALSE;
1182
1183 /* Check if the firmware supports FW subvariant w/o Tx checksumming */
1184 if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM))
1185 encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE;
1186 else
1187 encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
1188
1189 /* Check if the firmware supports set mac with running filters */
1190 if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED))
1191 encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
1192 else
1193 encp->enc_allow_set_mac_with_installed_filters = B_FALSE;
1194
1195 /*
1196 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
1197 * specifying which parameters to configure.
1198 */
1199 if (CAP_FLAGS1(req, SET_MAC_ENHANCED))
1200 encp->enc_enhanced_set_mac_supported = B_TRUE;
1201 else
1202 encp->enc_enhanced_set_mac_supported = B_FALSE;
1203
1204 /*
1205 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
1206 * us to let the firmware choose the settings to use on an EVQ.
1207 */
1208 if (CAP_FLAGS2(req, INIT_EVQ_V2))
1209 encp->enc_init_evq_v2_supported = B_TRUE;
1210 else
1211 encp->enc_init_evq_v2_supported = B_FALSE;
1212
1213 /*
1214 * Check if firmware supports extended width event queues, which have
1215 * a different event descriptor layout.
1216 */
1217 if (CAP_FLAGS3(req, EXTENDED_WIDTH_EVQS_SUPPORTED))
1218 encp->enc_init_evq_extended_width_supported = B_TRUE;
1219 else
1220 encp->enc_init_evq_extended_width_supported = B_FALSE;
1221
1222 /*
1223 * Check if the NO_CONT_EV mode for RX events is supported.
1224 */
1225 if (CAP_FLAGS2(req, INIT_RXQ_NO_CONT_EV))
1226 encp->enc_no_cont_ev_mode_supported = B_TRUE;
1227 else
1228 encp->enc_no_cont_ev_mode_supported = B_FALSE;
1229
1230 /*
1231 * Check if buffer size may and must be specified on INIT_RXQ.
1232 * It may be always specified to efx_rx_qcreate(), but will be
1233 * just kept libefx internal if MCDI does not support it.
1234 */
1235 if (CAP_FLAGS2(req, INIT_RXQ_WITH_BUFFER_SIZE))
1236 encp->enc_init_rxq_with_buffer_size = B_TRUE;
1237 else
1238 encp->enc_init_rxq_with_buffer_size = B_FALSE;
1239
1240 /*
1241 * Check if firmware-verified NVRAM updates must be used.
1242 *
1243 * The firmware trusted installer requires all NVRAM updates to use
1244 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
1245 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
1246 * partition and report the result).
1247 */
1248 if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT))
1249 encp->enc_nvram_update_verify_result_supported = B_TRUE;
1250 else
1251 encp->enc_nvram_update_verify_result_supported = B_FALSE;
1252
1253 if (CAP_FLAGS2(req, NVRAM_UPDATE_POLL_VERIFY_RESULT))
1254 encp->enc_nvram_update_poll_verify_result_supported = B_TRUE;
1255 else
1256 encp->enc_nvram_update_poll_verify_result_supported = B_FALSE;
1257
1258 /*
1259 * Check if firmware update via the BUNDLE partition is supported
1260 */
1261 if (CAP_FLAGS2(req, BUNDLE_UPDATE))
1262 encp->enc_nvram_bundle_update_supported = B_TRUE;
1263 else
1264 encp->enc_nvram_bundle_update_supported = B_FALSE;
1265
1266 /*
1267 * Check if firmware provides packet memory and Rx datapath
1268 * counters.
1269 */
1270 if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS))
1271 encp->enc_pm_and_rxdp_counters = B_TRUE;
1272 else
1273 encp->enc_pm_and_rxdp_counters = B_FALSE;
1274
1275 /*
1276 * Check if the 40G MAC hardware is capable of reporting
1277 * statistics for Tx size bins.
1278 */
1279 if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS))
1280 encp->enc_mac_stats_40g_tx_size_bins = B_TRUE;
1281 else
1282 encp->enc_mac_stats_40g_tx_size_bins = B_FALSE;
1283
1284 /*
1285 * Check if firmware supports VXLAN and NVGRE tunnels.
1286 * The capability indicates Geneve protocol support as well.
1287 */
1288 if (CAP_FLAGS1(req, VXLAN_NVGRE)) {
1289 encp->enc_tunnel_encapsulations_supported =
1290 (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
1291 (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
1292 (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
1293
1294 EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
1295 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
1296 encp->enc_tunnel_config_udp_entries_max =
1297 EFX_TUNNEL_MAXNENTRIES;
1298 } else {
1299 encp->enc_tunnel_config_udp_entries_max = 0;
1300 }
1301
1302 /*
1303 * Check if firmware reports the VI window mode.
1304 * Medford2 has a variable VI window size (8K, 16K or 64K).
1305 * Medford and Huntington have a fixed 8K VI window size.
1306 */
1307 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
1308 uint8_t mode =
1309 MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
1310
1311 switch (mode) {
1312 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
1313 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1314 break;
1315 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
1316 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K;
1317 break;
1318 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
1319 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K;
1320 break;
1321 default:
1322 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1323 break;
1324 }
1325 } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) ||
1326 (enp->en_family == EFX_FAMILY_MEDFORD)) {
1327 /* Huntington and Medford have fixed 8K window size */
1328 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1329 } else {
1330 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1331 }
1332
1333 /* Check if firmware supports extended MAC stats. */
1334 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
1335 /* Extended stats buffer supported */
1336 encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req,
1337 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
1338 } else {
1339 /* Use Siena-compatible legacy MAC stats */
1340 encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
1341 }
1342
1343 if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2)
1344 encp->enc_fec_counters = B_TRUE;
1345 else
1346 encp->enc_fec_counters = B_FALSE;
1347
1348 /* Check if the firmware provides head-of-line blocking counters */
1349 if (CAP_FLAGS2(req, RXDP_HLB_IDLE))
1350 encp->enc_hlb_counters = B_TRUE;
1351 else
1352 encp->enc_hlb_counters = B_FALSE;
1353
1354 #if EFSYS_OPT_RX_SCALE
1355 if (CAP_FLAGS1(req, RX_RSS_LIMITED)) {
1356 /* Only one exclusive RSS context is available per port. */
1357 encp->enc_rx_scale_max_exclusive_contexts = 1;
1358
1359 switch (enp->en_family) {
1360 case EFX_FAMILY_MEDFORD2:
1361 encp->enc_rx_scale_hash_alg_mask =
1362 (1U << EFX_RX_HASHALG_TOEPLITZ);
1363 break;
1364
1365 case EFX_FAMILY_MEDFORD:
1366 case EFX_FAMILY_HUNTINGTON:
1367 /*
1368 * Packed stream firmware variant maintains a
1369 * non-standard algorithm for hash computation.
1370 * It implies explicit XORing together
1371 * source + destination IP addresses (or last
1372 * four bytes in the case of IPv6) and using the
1373 * resulting value as the input to a Toeplitz hash.
1374 */
1375 encp->enc_rx_scale_hash_alg_mask =
1376 (1U << EFX_RX_HASHALG_PACKED_STREAM);
1377 break;
1378
1379 default:
1380 rc = EINVAL;
1381 goto fail3;
1382 }
1383
1384 /* Port numbers cannot contribute to the hash value */
1385 encp->enc_rx_scale_l4_hash_supported = B_FALSE;
1386 } else {
1387 /*
1388 * Maximum number of exclusive RSS contexts.
1389 * EF10 hardware supports 64 in total, but 6 are reserved
1390 * for shared contexts. They are a global resource so
1391 * not all may be available.
1392 */
1393 encp->enc_rx_scale_max_exclusive_contexts = 64 - 6;
1394
1395 encp->enc_rx_scale_hash_alg_mask =
1396 (1U << EFX_RX_HASHALG_TOEPLITZ);
1397
1398 /*
1399 * It is possible to use port numbers as
1400 * the input data for hash computation.
1401 */
1402 encp->enc_rx_scale_l4_hash_supported = B_TRUE;
1403 }
1404 #endif /* EFSYS_OPT_RX_SCALE */
1405
1406 /* Check if the firmware supports "FLAG" and "MARK" filter actions */
1407 if (CAP_FLAGS2(req, FILTER_ACTION_FLAG))
1408 encp->enc_filter_action_flag_supported = B_TRUE;
1409 else
1410 encp->enc_filter_action_flag_supported = B_FALSE;
1411
1412 if (CAP_FLAGS2(req, FILTER_ACTION_MARK))
1413 encp->enc_filter_action_mark_supported = B_TRUE;
1414 else
1415 encp->enc_filter_action_mark_supported = B_FALSE;
1416
1417 /* Get maximum supported value for "MARK" filter action */
1418 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)
1419 encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req,
1420 GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX);
1421 else
1422 encp->enc_filter_action_mark_max = 0;
1423
1424 #if EFSYS_OPT_MAE
1425 /*
1426 * Indicate support for MAE.
1427 * MAE is supported by Riverhead boards starting with R2,
1428 * and it is required that FW is built with MAE support, too.
1429 */
1430 if (CAP_FLAGS3(req, MAE_SUPPORTED))
1431 encp->enc_mae_supported = B_TRUE;
1432 else
1433 encp->enc_mae_supported = B_FALSE;
1434 #else
1435 encp->enc_mae_supported = B_FALSE;
1436 #endif /* EFSYS_OPT_MAE */
1437
1438 #undef CAP_FLAGS1
1439 #undef CAP_FLAGS2
1440 #undef CAP_FLAGS3
1441
1442 return (0);
1443
1444 #if EFSYS_OPT_RX_SCALE
1445 fail3:
1446 EFSYS_PROBE(fail3);
1447 #endif /* EFSYS_OPT_RX_SCALE */
1448 fail2:
1449 EFSYS_PROBE(fail2);
1450 fail1:
1451 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1452
1453 return (rc);
1454 }
1455
1456
1457 #define EF10_LEGACY_PF_PRIVILEGE_MASK \
1458 (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
1459 MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
1460 MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
1461 MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
1462 MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
1463 MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
1464 MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
1465 MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
1466 MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
1467 MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
1468 MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
1469
1470 #define EF10_LEGACY_VF_PRIVILEGE_MASK 0
1471
1472
1473 __checkReturn efx_rc_t
ef10_get_privilege_mask(__in efx_nic_t * enp,__out uint32_t * maskp)1474 ef10_get_privilege_mask(
1475 __in efx_nic_t *enp,
1476 __out uint32_t *maskp)
1477 {
1478 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1479 uint32_t mask;
1480 efx_rc_t rc;
1481
1482 if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
1483 &mask)) != 0) {
1484 if (rc != ENOTSUP)
1485 goto fail1;
1486
1487 /* Fallback for old firmware without privilege mask support */
1488 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1489 /* Assume PF has admin privilege */
1490 mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
1491 } else {
1492 /* VF is always unprivileged by default */
1493 mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
1494 }
1495 }
1496
1497 *maskp = mask;
1498
1499 return (0);
1500
1501 fail1:
1502 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1503
1504 return (rc);
1505 }
1506
1507
1508 #define EFX_EXT_PORT_MAX 4
1509 #define EFX_EXT_PORT_NA 0xFF
1510
1511 /*
1512 * Table of mapping schemes from port number to external number.
1513 *
1514 * Each port number ultimately corresponds to a connector: either as part of
1515 * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on
1516 * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T
1517 * "Salina"). In general:
1518 *
1519 * Port number (0-based)
1520 * |
1521 * port mapping (n:1)
1522 * |
1523 * v
1524 * External port number (1-based)
1525 * |
1526 * fixed (1:1) or cable assembly (1:m)
1527 * |
1528 * v
1529 * Connector
1530 *
1531 * The external numbering refers to the cages or magjacks on the board,
1532 * as visibly annotated on the board or back panel. This table describes
1533 * how to determine which external cage/magjack corresponds to the port
1534 * numbers used by the driver.
1535 *
1536 * The count of consecutive port numbers that map to each external number,
1537 * is determined by the chip family and the current port mode.
1538 *
1539 * For the Huntington family, the current port mode cannot be discovered,
1540 * but a single mapping is used by all modes for a given chip variant,
1541 * so the mapping used is instead the last match in the table to the full
1542 * set of port modes to which the NIC can be configured. Therefore the
1543 * ordering of entries in the mapping table is significant.
1544 */
1545 static struct ef10_external_port_map_s {
1546 efx_family_t family;
1547 uint32_t modes_mask;
1548 uint8_t base_port[EFX_EXT_PORT_MAX];
1549 } __ef10_external_port_mappings[] = {
1550 /*
1551 * Modes used by Huntington family controllers where each port
1552 * number maps to a separate cage.
1553 * SFN7x22F (Torino):
1554 * port 0 -> cage 1
1555 * port 1 -> cage 2
1556 * SFN7xx4F (Pavia):
1557 * port 0 -> cage 1
1558 * port 1 -> cage 2
1559 * port 2 -> cage 3
1560 * port 3 -> cage 4
1561 */
1562 {
1563 EFX_FAMILY_HUNTINGTON,
1564 (1U << TLV_PORT_MODE_10G) | /* mode 0 */
1565 (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */
1566 (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */
1567 { 0, 1, 2, 3 }
1568 },
1569 /*
1570 * Modes which for Huntington identify a chip variant where 2
1571 * adjacent port numbers map to each cage.
1572 * SFN7x42Q (Monza):
1573 * port 0 -> cage 1
1574 * port 1 -> cage 1
1575 * port 2 -> cage 2
1576 * port 3 -> cage 2
1577 */
1578 {
1579 EFX_FAMILY_HUNTINGTON,
1580 (1U << TLV_PORT_MODE_40G) | /* mode 1 */
1581 (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
1582 (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
1583 (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */
1584 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1585 },
1586 /*
1587 * Modes that on Medford allocate each port number to a separate
1588 * cage.
1589 * port 0 -> cage 1
1590 * port 1 -> cage 2
1591 * port 2 -> cage 3
1592 * port 3 -> cage 4
1593 */
1594 {
1595 EFX_FAMILY_MEDFORD,
1596 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
1597 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
1598 (1U << TLV_PORT_MODE_1x1_1x1), /* mode 2 */
1599 { 0, 1, 2, 3 }
1600 },
1601 /*
1602 * Modes that on Medford allocate 2 adjacent port numbers to each
1603 * cage.
1604 * port 0 -> cage 1
1605 * port 1 -> cage 1
1606 * port 2 -> cage 2
1607 * port 3 -> cage 2
1608 */
1609 {
1610 EFX_FAMILY_MEDFORD,
1611 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
1612 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 5 */
1613 (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */
1614 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
1615 /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
1616 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */
1617 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1618 },
1619 /*
1620 * Modes that on Medford allocate 4 adjacent port numbers to
1621 * cage 1.
1622 * port 0 -> cage 1
1623 * port 1 -> cage 1
1624 * port 2 -> cage 1
1625 * port 3 -> cage 1
1626 */
1627 {
1628 EFX_FAMILY_MEDFORD,
1629 /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
1630 (1U << TLV_PORT_MODE_4x1_NA), /* mode 4 */
1631 { 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1632 },
1633 /*
1634 * Modes that on Medford allocate 4 adjacent port numbers to
1635 * cage 2.
1636 * port 0 -> cage 2
1637 * port 1 -> cage 2
1638 * port 2 -> cage 2
1639 * port 3 -> cage 2
1640 */
1641 {
1642 EFX_FAMILY_MEDFORD,
1643 (1U << TLV_PORT_MODE_NA_4x1), /* mode 8 */
1644 { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1645 },
1646 /*
1647 * Modes that on Medford2 allocate each port number to a separate
1648 * cage.
1649 * port 0 -> cage 1
1650 * port 1 -> cage 2
1651 * port 2 -> cage 3
1652 * port 3 -> cage 4
1653 */
1654 {
1655 EFX_FAMILY_MEDFORD2,
1656 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
1657 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
1658 (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */
1659 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
1660 (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */
1661 (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */
1662 (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */
1663 (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */
1664 { 0, 1, 2, 3 }
1665 },
1666 /*
1667 * Modes that on Medford2 allocate 1 port to cage 1 and the rest
1668 * to cage 2.
1669 * port 0 -> cage 1
1670 * port 1 -> cage 2
1671 * port 2 -> cage 2
1672 */
1673 {
1674 EFX_FAMILY_MEDFORD2,
1675 (1U << TLV_PORT_MODE_1x2_2x1) | /* mode 17 */
1676 (1U << TLV_PORT_MODE_1x4_2x1), /* mode 6 */
1677 { 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1678 },
1679 /*
1680 * Modes that on Medford2 allocate 2 adjacent port numbers to cage 1
1681 * and the rest to cage 2.
1682 * port 0 -> cage 1
1683 * port 1 -> cage 1
1684 * port 2 -> cage 2
1685 * port 3 -> cage 2
1686 */
1687 {
1688 EFX_FAMILY_MEDFORD2,
1689 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */
1690 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
1691 (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */
1692 (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */
1693 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1694 },
1695 /*
1696 * Modes that on Medford2 allocate up to 4 adjacent port numbers
1697 * to cage 1.
1698 * port 0 -> cage 1
1699 * port 1 -> cage 1
1700 * port 2 -> cage 1
1701 * port 3 -> cage 1
1702 */
1703 {
1704 EFX_FAMILY_MEDFORD2,
1705 (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */
1706 { 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1707 },
1708 /*
1709 * Modes that on Medford2 allocate up to 4 adjacent port numbers
1710 * to cage 2.
1711 * port 0 -> cage 2
1712 * port 1 -> cage 2
1713 * port 2 -> cage 2
1714 * port 3 -> cage 2
1715 */
1716 {
1717 EFX_FAMILY_MEDFORD2,
1718 (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */
1719 (1U << TLV_PORT_MODE_NA_1x2) | /* mode 11 */
1720 (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */
1721 { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1722 },
1723 /*
1724 * Modes that on Riverhead allocate each port number to a separate
1725 * cage.
1726 * port 0 -> cage 1
1727 * port 1 -> cage 2
1728 */
1729 {
1730 EFX_FAMILY_RIVERHEAD,
1731 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
1732 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
1733 (1U << TLV_PORT_MODE_1x1_1x1), /* mode 2 */
1734 { 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1735 },
1736 };
1737
1738 static __checkReturn efx_rc_t
ef10_external_port_mapping(__in efx_nic_t * enp,__in uint32_t port,__out uint8_t * external_portp)1739 ef10_external_port_mapping(
1740 __in efx_nic_t *enp,
1741 __in uint32_t port,
1742 __out uint8_t *external_portp)
1743 {
1744 efx_rc_t rc;
1745 int i;
1746 uint32_t port_modes;
1747 uint32_t matches;
1748 uint32_t current;
1749 struct ef10_external_port_map_s *mapp = NULL;
1750 int ext_index = port; /* Default 1-1 mapping */
1751
1752 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t,
1753 NULL)) != 0) {
1754 /*
1755 * No current port mode information (i.e. Huntington)
1756 * - infer mapping from available modes
1757 */
1758 if ((rc = efx_mcdi_get_port_modes(enp,
1759 &port_modes, NULL, NULL)) != 0) {
1760 /*
1761 * No port mode information available
1762 * - use default mapping
1763 */
1764 goto out;
1765 }
1766 } else {
1767 /* Only need to scan the current mode */
1768 port_modes = 1 << current;
1769 }
1770
1771 /*
1772 * Infer the internal port -> external number mapping from
1773 * the possible port modes for this NIC.
1774 */
1775 for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
1776 struct ef10_external_port_map_s *eepmp =
1777 &__ef10_external_port_mappings[i];
1778 if (eepmp->family != enp->en_family)
1779 continue;
1780 matches = (eepmp->modes_mask & port_modes);
1781 if (matches != 0) {
1782 /*
1783 * Some modes match. For some Huntington boards
1784 * there will be multiple matches. The mapping on the
1785 * last match is used.
1786 */
1787 mapp = eepmp;
1788 port_modes &= ~matches;
1789 }
1790 }
1791
1792 if (port_modes != 0) {
1793 /* Some advertised modes are not supported */
1794 rc = ENOTSUP;
1795 goto fail1;
1796 }
1797
1798 out:
1799 if (mapp != NULL) {
1800 /*
1801 * External ports are assigned a sequence of consecutive
1802 * port numbers, so find the one with the closest base_port.
1803 */
1804 uint32_t delta = EFX_EXT_PORT_NA;
1805
1806 for (i = 0; i < EFX_EXT_PORT_MAX; i++) {
1807 uint32_t base = mapp->base_port[i];
1808 if ((base != EFX_EXT_PORT_NA) && (base <= port)) {
1809 if ((port - base) < delta) {
1810 delta = (port - base);
1811 ext_index = i;
1812 }
1813 }
1814 }
1815 }
1816 *external_portp = (uint8_t)(ext_index + 1);
1817
1818 return (0);
1819
1820 fail1:
1821 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1822
1823 return (rc);
1824 }
1825
1826 __checkReturn efx_rc_t
efx_mcdi_nic_board_cfg(__in efx_nic_t * enp)1827 efx_mcdi_nic_board_cfg(
1828 __in efx_nic_t *enp)
1829 {
1830 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
1831 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1832 ef10_link_state_t els;
1833 efx_port_t *epp = &(enp->en_port);
1834 uint32_t board_type = 0;
1835 uint32_t base, nvec;
1836 uint32_t port;
1837 uint32_t mask;
1838 uint32_t pf;
1839 uint32_t vf;
1840 uint8_t mac_addr[6] = { 0 };
1841 efx_rc_t rc;
1842
1843 /* Get the (zero-based) MCDI port number */
1844 if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
1845 goto fail1;
1846
1847 /* EFX MCDI interface uses one-based port numbers */
1848 emip->emi_port = port + 1;
1849
1850 encp->enc_assigned_port = port;
1851
1852 if ((rc = ef10_external_port_mapping(enp, port,
1853 &encp->enc_external_port)) != 0)
1854 goto fail2;
1855
1856 /*
1857 * Get PCIe function number from firmware (used for
1858 * per-function privilege and dynamic config info).
1859 * - PCIe PF: pf = PF number, vf = 0xffff.
1860 * - PCIe VF: pf = parent PF, vf = VF number.
1861 */
1862 if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
1863 goto fail3;
1864
1865 encp->enc_pf = pf;
1866 encp->enc_vf = vf;
1867
1868 if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
1869 goto fail4;
1870
1871 /* MAC address for this function */
1872 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1873 rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
1874 #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
1875 /*
1876 * Disable static config checking, ONLY for manufacturing test
1877 * and setup at the factory, to allow the static config to be
1878 * installed.
1879 */
1880 #else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1881 if ((rc == 0) && (mac_addr[0] & 0x02)) {
1882 /*
1883 * If the static config does not include a global MAC
1884 * address pool then the board may return a locally
1885 * administered MAC address (this should only happen on
1886 * incorrectly programmed boards).
1887 */
1888 rc = EINVAL;
1889 }
1890 #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1891 } else {
1892 rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
1893 }
1894 if (rc != 0)
1895 goto fail5;
1896
1897 EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
1898
1899 /* Board configuration (legacy) */
1900 rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
1901 if (rc != 0) {
1902 /* Unprivileged functions may not be able to read board cfg */
1903 if (rc == EACCES)
1904 board_type = 0;
1905 else
1906 goto fail6;
1907 }
1908
1909 encp->enc_board_type = board_type;
1910
1911 /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
1912 if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
1913 goto fail7;
1914
1915 /*
1916 * Firmware with support for *_FEC capability bits does not
1917 * report that the corresponding *_FEC_REQUESTED bits are supported.
1918 * Add them here so that drivers understand that they are supported.
1919 */
1920 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_BASER_FEC))
1921 epp->ep_phy_cap_mask |=
1922 (1u << EFX_PHY_CAP_BASER_FEC_REQUESTED);
1923 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_RS_FEC))
1924 epp->ep_phy_cap_mask |=
1925 (1u << EFX_PHY_CAP_RS_FEC_REQUESTED);
1926 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_25G_BASER_FEC))
1927 epp->ep_phy_cap_mask |=
1928 (1u << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED);
1929
1930 /* Obtain the default PHY advertised capabilities */
1931 if ((rc = ef10_phy_get_link(enp, &els)) != 0)
1932 goto fail8;
1933 epp->ep_default_adv_cap_mask = els.epls.epls_adv_cap_mask;
1934 epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask;
1935
1936 /* Check capabilities of running datapath firmware */
1937 if ((rc = ef10_get_datapath_caps(enp)) != 0)
1938 goto fail9;
1939
1940 /* Get interrupt vector limits */
1941 if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
1942 if (EFX_PCI_FUNCTION_IS_PF(encp))
1943 goto fail10;
1944
1945 /* Ignore error (cannot query vector limits from a VF). */
1946 base = 0;
1947 nvec = 1024;
1948 }
1949 encp->enc_intr_vec_base = base;
1950 encp->enc_intr_limit = nvec;
1951
1952 /*
1953 * Get the current privilege mask. Note that this may be modified
1954 * dynamically, so this value is informational only. DO NOT use
1955 * the privilege mask to check for sufficient privileges, as that
1956 * can result in time-of-check/time-of-use bugs.
1957 */
1958 if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
1959 goto fail11;
1960 encp->enc_privilege_mask = mask;
1961
1962 return (0);
1963
1964 fail11:
1965 EFSYS_PROBE(fail11);
1966 fail10:
1967 EFSYS_PROBE(fail10);
1968 fail9:
1969 EFSYS_PROBE(fail9);
1970 fail8:
1971 EFSYS_PROBE(fail8);
1972 fail7:
1973 EFSYS_PROBE(fail7);
1974 fail6:
1975 EFSYS_PROBE(fail6);
1976 fail5:
1977 EFSYS_PROBE(fail5);
1978 fail4:
1979 EFSYS_PROBE(fail4);
1980 fail3:
1981 EFSYS_PROBE(fail3);
1982 fail2:
1983 EFSYS_PROBE(fail2);
1984 fail1:
1985 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1986
1987 return (rc);
1988 }
1989
1990 __checkReturn efx_rc_t
efx_mcdi_entity_reset(__in efx_nic_t * enp)1991 efx_mcdi_entity_reset(
1992 __in efx_nic_t *enp)
1993 {
1994 efx_mcdi_req_t req;
1995 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN,
1996 MC_CMD_ENTITY_RESET_OUT_LEN);
1997 efx_rc_t rc;
1998
1999 req.emr_cmd = MC_CMD_ENTITY_RESET;
2000 req.emr_in_buf = payload;
2001 req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
2002 req.emr_out_buf = payload;
2003 req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
2004
2005 MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
2006 ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
2007
2008 efx_mcdi_execute(enp, &req);
2009
2010 if (req.emr_rc != 0) {
2011 rc = req.emr_rc;
2012 goto fail1;
2013 }
2014
2015 return (0);
2016
2017 fail1:
2018 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2019
2020 return (rc);
2021 }
2022
2023 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
2024
2025 #if EFX_OPTS_EF10()
2026
2027 static __checkReturn efx_rc_t
ef10_set_workaround_bug26807(__in efx_nic_t * enp)2028 ef10_set_workaround_bug26807(
2029 __in efx_nic_t *enp)
2030 {
2031 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2032 uint32_t flags;
2033 efx_rc_t rc;
2034
2035 /*
2036 * If the bug26807 workaround is enabled, then firmware has enabled
2037 * support for chained multicast filters. Firmware will reset (FLR)
2038 * functions which have filters in the hardware filter table when the
2039 * workaround is enabled/disabled.
2040 *
2041 * We must recheck if the workaround is enabled after inserting the
2042 * first hardware filter, in case it has been changed since this check.
2043 */
2044 rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807,
2045 B_TRUE, &flags);
2046 if (rc == 0) {
2047 encp->enc_bug26807_workaround = B_TRUE;
2048 if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) {
2049 /*
2050 * Other functions had installed filters before the
2051 * workaround was enabled, and they have been reset
2052 * by firmware.
2053 */
2054 EFSYS_PROBE(bug26807_workaround_flr_done);
2055 /* FIXME: bump MC warm boot count ? */
2056 }
2057 } else if (rc == EACCES) {
2058 /*
2059 * Unprivileged functions cannot enable the workaround in older
2060 * firmware.
2061 */
2062 encp->enc_bug26807_workaround = B_FALSE;
2063 } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
2064 encp->enc_bug26807_workaround = B_FALSE;
2065 } else {
2066 goto fail1;
2067 }
2068
2069 return (0);
2070
2071 fail1:
2072 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2073
2074 return (rc);
2075 }
2076
2077 static __checkReturn efx_rc_t
ef10_nic_board_cfg(__in efx_nic_t * enp)2078 ef10_nic_board_cfg(
2079 __in efx_nic_t *enp)
2080 {
2081 const efx_nic_ops_t *enop = enp->en_enop;
2082 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2083 efx_rc_t rc;
2084
2085 if ((rc = efx_mcdi_nic_board_cfg(enp)) != 0)
2086 goto fail1;
2087
2088 /*
2089 * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
2090 * We only support the 14 byte prefix here.
2091 */
2092 if (encp->enc_rx_prefix_size != 14) {
2093 rc = ENOTSUP;
2094 goto fail2;
2095 }
2096
2097 encp->enc_clk_mult = 1; /* not used for EF10 */
2098
2099 /* Alignment for WPTR updates */
2100 encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
2101
2102 encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
2103 /* No boundary crossing limits */
2104 encp->enc_tx_dma_desc_boundary = 0;
2105
2106 /*
2107 * Maximum number of bytes into the frame the TCP header can start for
2108 * firmware assisted TSO to work.
2109 */
2110 encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
2111
2112 /* EF10 TSO engine demands that packet header be contiguous. */
2113 encp->enc_tx_tso_max_header_ndescs = 1;
2114
2115 /* The overall TSO header length is not limited. */
2116 encp->enc_tx_tso_max_header_length = UINT32_MAX;
2117
2118 /*
2119 * There are no specific limitations on the number of
2120 * TSO payload descriptors.
2121 */
2122 encp->enc_tx_tso_max_payload_ndescs = UINT32_MAX;
2123
2124 /* TSO superframe payload length is not limited. */
2125 encp->enc_tx_tso_max_payload_length = UINT32_MAX;
2126
2127 /*
2128 * Limitation on the maximum number of outgoing packets per
2129 * TSO transaction described in SF-108452-SW.
2130 */
2131 encp->enc_tx_tso_max_nframes = 32767;
2132
2133 /*
2134 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
2135 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
2136 * resources (allocated to this PCIe function), which is zero until
2137 * after we have allocated VIs.
2138 */
2139 encp->enc_evq_limit = 1024;
2140 encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
2141 encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
2142
2143 encp->enc_buftbl_limit = UINT32_MAX;
2144
2145 if ((rc = ef10_set_workaround_bug26807(enp)) != 0)
2146 goto fail3;
2147
2148 /* Get remaining controller-specific board config */
2149 if ((rc = enop->eno_board_cfg(enp)) != 0)
2150 if (rc != EACCES)
2151 goto fail4;
2152
2153 return (0);
2154
2155 fail4:
2156 EFSYS_PROBE(fail4);
2157 fail3:
2158 EFSYS_PROBE(fail3);
2159 fail2:
2160 EFSYS_PROBE(fail2);
2161 fail1:
2162 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2163
2164 return (rc);
2165 }
2166
2167 __checkReturn efx_rc_t
ef10_nic_probe(__in efx_nic_t * enp)2168 ef10_nic_probe(
2169 __in efx_nic_t *enp)
2170 {
2171 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2172 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2173 efx_rc_t rc;
2174
2175 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2176
2177 /* Read and clear any assertion state */
2178 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
2179 goto fail1;
2180
2181 /* Exit the assertion handler */
2182 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
2183 if (rc != EACCES)
2184 goto fail2;
2185
2186 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
2187 goto fail3;
2188
2189 if ((rc = ef10_nic_board_cfg(enp)) != 0)
2190 goto fail4;
2191
2192 /*
2193 * Set default driver config limits (based on board config).
2194 *
2195 * FIXME: For now allocate a fixed number of VIs which is likely to be
2196 * sufficient and small enough to allow multiple functions on the same
2197 * port.
2198 */
2199 edcp->edc_min_vi_count = edcp->edc_max_vi_count =
2200 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
2201
2202 /* The client driver must configure and enable PIO buffer support */
2203 edcp->edc_max_piobuf_count = 0;
2204 edcp->edc_pio_alloc_size = 0;
2205
2206 #if EFSYS_OPT_MAC_STATS
2207 /* Wipe the MAC statistics */
2208 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
2209 goto fail5;
2210 #endif
2211
2212 #if EFSYS_OPT_LOOPBACK
2213 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
2214 goto fail6;
2215 #endif
2216
2217 #if EFSYS_OPT_MON_STATS
2218 if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
2219 /* Unprivileged functions do not have access to sensors */
2220 if (rc != EACCES)
2221 goto fail7;
2222 }
2223 #endif
2224
2225 return (0);
2226
2227 #if EFSYS_OPT_MON_STATS
2228 fail7:
2229 EFSYS_PROBE(fail7);
2230 #endif
2231 #if EFSYS_OPT_LOOPBACK
2232 fail6:
2233 EFSYS_PROBE(fail6);
2234 #endif
2235 #if EFSYS_OPT_MAC_STATS
2236 fail5:
2237 EFSYS_PROBE(fail5);
2238 #endif
2239 fail4:
2240 EFSYS_PROBE(fail4);
2241 fail3:
2242 EFSYS_PROBE(fail3);
2243 fail2:
2244 EFSYS_PROBE(fail2);
2245 fail1:
2246 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2247
2248 return (rc);
2249 }
2250
2251 __checkReturn efx_rc_t
ef10_nic_set_drv_limits(__inout efx_nic_t * enp,__in efx_drv_limits_t * edlp)2252 ef10_nic_set_drv_limits(
2253 __inout efx_nic_t *enp,
2254 __in efx_drv_limits_t *edlp)
2255 {
2256 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2257 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2258 uint32_t min_evq_count, max_evq_count;
2259 uint32_t min_rxq_count, max_rxq_count;
2260 uint32_t min_txq_count, max_txq_count;
2261 efx_rc_t rc;
2262
2263 if (edlp == NULL) {
2264 rc = EINVAL;
2265 goto fail1;
2266 }
2267
2268 /* Get minimum required and maximum usable VI limits */
2269 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
2270 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
2271 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
2272
2273 edcp->edc_min_vi_count =
2274 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
2275
2276 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
2277 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
2278 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
2279
2280 edcp->edc_max_vi_count =
2281 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
2282
2283 /*
2284 * Check limits for sub-allocated piobuf blocks.
2285 * PIO is optional, so don't fail if the limits are incorrect.
2286 */
2287 if ((encp->enc_piobuf_size == 0) ||
2288 (encp->enc_piobuf_limit == 0) ||
2289 (edlp->edl_min_pio_alloc_size == 0) ||
2290 (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
2291 /* Disable PIO */
2292 edcp->edc_max_piobuf_count = 0;
2293 edcp->edc_pio_alloc_size = 0;
2294 } else {
2295 uint32_t blk_size, blk_count, blks_per_piobuf;
2296
2297 blk_size =
2298 MAX(edlp->edl_min_pio_alloc_size,
2299 encp->enc_piobuf_min_alloc_size);
2300
2301 blks_per_piobuf = encp->enc_piobuf_size / blk_size;
2302 EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
2303
2304 blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
2305
2306 /* A zero max pio alloc count means unlimited */
2307 if ((edlp->edl_max_pio_alloc_count > 0) &&
2308 (edlp->edl_max_pio_alloc_count < blk_count)) {
2309 blk_count = edlp->edl_max_pio_alloc_count;
2310 }
2311
2312 edcp->edc_pio_alloc_size = blk_size;
2313 edcp->edc_max_piobuf_count =
2314 (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
2315 }
2316
2317 return (0);
2318
2319 fail1:
2320 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2321
2322 return (rc);
2323 }
2324
2325
2326 __checkReturn efx_rc_t
ef10_nic_reset(__in efx_nic_t * enp)2327 ef10_nic_reset(
2328 __in efx_nic_t *enp)
2329 {
2330 efx_rc_t rc;
2331
2332 /* ef10_nic_reset() is called to recover from BADASSERT failures. */
2333 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
2334 goto fail1;
2335 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
2336 goto fail2;
2337
2338 if ((rc = efx_mcdi_entity_reset(enp)) != 0)
2339 goto fail3;
2340
2341 /* Clear RX/TX DMA queue errors */
2342 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
2343
2344 return (0);
2345
2346 fail3:
2347 EFSYS_PROBE(fail3);
2348 fail2:
2349 EFSYS_PROBE(fail2);
2350 fail1:
2351 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2352
2353 return (rc);
2354 }
2355
2356 #endif /* EFX_OPTS_EF10() */
2357
2358 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
2359
2360 __checkReturn efx_rc_t
ef10_upstream_port_vadaptor_alloc(__in efx_nic_t * enp)2361 ef10_upstream_port_vadaptor_alloc(
2362 __in efx_nic_t *enp)
2363 {
2364 uint32_t retry;
2365 uint32_t delay_us;
2366 efx_rc_t rc;
2367
2368 /*
2369 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
2370 * driver has yet to bring up the EVB port. See bug 56147. In this case,
2371 * retry the request several times after waiting a while. The wait time
2372 * between retries starts small (10ms) and exponentially increases.
2373 * Total wait time is a little over two seconds. Retry logic in the
2374 * client driver may mean this whole loop is repeated if it continues to
2375 * fail.
2376 */
2377 retry = 0;
2378 delay_us = 10000;
2379 while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
2380 if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
2381 (rc != ENOENT)) {
2382 /*
2383 * Do not retry alloc for PF, or for other errors on
2384 * a VF.
2385 */
2386 goto fail1;
2387 }
2388
2389 /* VF startup before PF is ready. Retry allocation. */
2390 if (retry > 5) {
2391 /* Too many attempts */
2392 rc = EINVAL;
2393 goto fail2;
2394 }
2395 EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
2396 EFSYS_SLEEP(delay_us);
2397 retry++;
2398 if (delay_us < 500000)
2399 delay_us <<= 2;
2400 }
2401
2402 return (0);
2403
2404 fail2:
2405 EFSYS_PROBE(fail2);
2406 fail1:
2407 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2408
2409 return (rc);
2410 }
2411
2412 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
2413
2414 #if EFX_OPTS_EF10()
2415
2416 __checkReturn efx_rc_t
ef10_nic_init(__in efx_nic_t * enp)2417 ef10_nic_init(
2418 __in efx_nic_t *enp)
2419 {
2420 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2421 uint32_t min_vi_count, max_vi_count;
2422 uint32_t vi_count, vi_base, vi_shift;
2423 uint32_t i;
2424 uint32_t vi_window_size;
2425 efx_rc_t rc;
2426 boolean_t alloc_vadaptor = B_TRUE;
2427
2428 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2429
2430 /* Enable reporting of some events (e.g. link change) */
2431 if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
2432 goto fail1;
2433
2434 /* Allocate (optional) on-chip PIO buffers */
2435 ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
2436
2437 /*
2438 * For best performance, PIO writes should use a write-combined
2439 * (WC) memory mapping. Using a separate WC mapping for the PIO
2440 * aperture of each VI would be a burden to drivers (and not
2441 * possible if the host page size is >4Kbyte).
2442 *
2443 * To avoid this we use a single uncached (UC) mapping for VI
2444 * register access, and a single WC mapping for extra VIs used
2445 * for PIO writes.
2446 *
2447 * Each piobuf must be linked to a VI in the WC mapping, and to
2448 * each VI that is using a sub-allocated block from the piobuf.
2449 */
2450 min_vi_count = edcp->edc_min_vi_count;
2451 max_vi_count =
2452 edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
2453
2454 /* Ensure that the previously attached driver's VIs are freed */
2455 if ((rc = efx_mcdi_free_vis(enp)) != 0)
2456 goto fail2;
2457
2458 /*
2459 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
2460 * fails then retrying the request for fewer VI resources may succeed.
2461 */
2462 vi_count = 0;
2463 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
2464 &vi_base, &vi_count, &vi_shift)) != 0)
2465 goto fail3;
2466
2467 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
2468
2469 if (vi_count < min_vi_count) {
2470 rc = ENOMEM;
2471 goto fail4;
2472 }
2473
2474 enp->en_arch.ef10.ena_vi_base = vi_base;
2475 enp->en_arch.ef10.ena_vi_count = vi_count;
2476 enp->en_arch.ef10.ena_vi_shift = vi_shift;
2477
2478 if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
2479 /* Not enough extra VIs to map piobufs */
2480 ef10_nic_free_piobufs(enp);
2481 }
2482
2483 enp->en_arch.ef10.ena_pio_write_vi_base =
2484 vi_count - enp->en_arch.ef10.ena_piobuf_count;
2485
2486 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
2487 EFX_VI_WINDOW_SHIFT_INVALID);
2488 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
2489 EFX_VI_WINDOW_SHIFT_64K);
2490 vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
2491
2492 /* Save UC memory mapping details */
2493 enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
2494 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2495 enp->en_arch.ef10.ena_uc_mem_map_size =
2496 (vi_window_size *
2497 enp->en_arch.ef10.ena_pio_write_vi_base);
2498 } else {
2499 enp->en_arch.ef10.ena_uc_mem_map_size =
2500 (vi_window_size *
2501 enp->en_arch.ef10.ena_vi_count);
2502 }
2503
2504 /* Save WC memory mapping details */
2505 enp->en_arch.ef10.ena_wc_mem_map_offset =
2506 enp->en_arch.ef10.ena_uc_mem_map_offset +
2507 enp->en_arch.ef10.ena_uc_mem_map_size;
2508
2509 enp->en_arch.ef10.ena_wc_mem_map_size =
2510 (vi_window_size *
2511 enp->en_arch.ef10.ena_piobuf_count);
2512
2513 /* Link piobufs to extra VIs in WC mapping */
2514 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2515 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2516 rc = efx_mcdi_link_piobuf(enp,
2517 enp->en_arch.ef10.ena_pio_write_vi_base + i,
2518 enp->en_arch.ef10.ena_piobuf_handle[i]);
2519 if (rc != 0)
2520 break;
2521 }
2522 }
2523
2524 /*
2525 * For SR-IOV use case, vAdaptor is allocated for PF and associated VFs
2526 * during NIC initialization when vSwitch is created and vports are
2527 * allocated. Hence, skip vAdaptor allocation for EVB and update vport
2528 * id in NIC structure with the one allocated for PF.
2529 */
2530
2531 enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
2532 #if EFSYS_OPT_EVB
2533 if ((enp->en_vswitchp != NULL) && (enp->en_vswitchp->ev_evcp != NULL)) {
2534 /* For EVB use vport allocated on vswitch */
2535 enp->en_vport_id = enp->en_vswitchp->ev_evcp->evc_vport_id;
2536 alloc_vadaptor = B_FALSE;
2537 }
2538 #endif
2539 if (alloc_vadaptor != B_FALSE) {
2540 /* Allocate a vAdaptor attached to our upstream vPort/pPort */
2541 if ((rc = ef10_upstream_port_vadaptor_alloc(enp)) != 0)
2542 goto fail5;
2543 }
2544 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
2545
2546 return (0);
2547
2548 fail5:
2549 EFSYS_PROBE(fail5);
2550 fail4:
2551 EFSYS_PROBE(fail4);
2552 fail3:
2553 EFSYS_PROBE(fail3);
2554 fail2:
2555 EFSYS_PROBE(fail2);
2556
2557 ef10_nic_free_piobufs(enp);
2558
2559 fail1:
2560 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2561
2562 return (rc);
2563 }
2564
2565 __checkReturn efx_rc_t
ef10_nic_get_vi_pool(__in efx_nic_t * enp,__out uint32_t * vi_countp)2566 ef10_nic_get_vi_pool(
2567 __in efx_nic_t *enp,
2568 __out uint32_t *vi_countp)
2569 {
2570 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2571
2572 /*
2573 * Report VIs that the client driver can use.
2574 * Do not include VIs used for PIO buffer writes.
2575 */
2576 *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
2577
2578 return (0);
2579 }
2580
2581 __checkReturn efx_rc_t
ef10_nic_get_bar_region(__in efx_nic_t * enp,__in efx_nic_region_t region,__out uint32_t * offsetp,__out size_t * sizep)2582 ef10_nic_get_bar_region(
2583 __in efx_nic_t *enp,
2584 __in efx_nic_region_t region,
2585 __out uint32_t *offsetp,
2586 __out size_t *sizep)
2587 {
2588 efx_rc_t rc;
2589
2590 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2591
2592 /*
2593 * TODO: Specify host memory mapping alignment and granularity
2594 * in efx_drv_limits_t so that they can be taken into account
2595 * when allocating extra VIs for PIO writes.
2596 */
2597 switch (region) {
2598 case EFX_REGION_VI:
2599 /* UC mapped memory BAR region for VI registers */
2600 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
2601 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
2602 break;
2603
2604 case EFX_REGION_PIO_WRITE_VI:
2605 /* WC mapped memory BAR region for piobuf writes */
2606 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
2607 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
2608 break;
2609
2610 default:
2611 rc = EINVAL;
2612 goto fail1;
2613 }
2614
2615 return (0);
2616
2617 fail1:
2618 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2619
2620 return (rc);
2621 }
2622
2623 __checkReturn boolean_t
ef10_nic_hw_unavailable(__in efx_nic_t * enp)2624 ef10_nic_hw_unavailable(
2625 __in efx_nic_t *enp)
2626 {
2627 efx_dword_t dword;
2628
2629 if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL)
2630 return (B_TRUE);
2631
2632 EFX_BAR_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, &dword, B_FALSE);
2633 if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff)
2634 goto unavail;
2635
2636 return (B_FALSE);
2637
2638 unavail:
2639 ef10_nic_set_hw_unavailable(enp);
2640
2641 return (B_TRUE);
2642 }
2643
2644 void
ef10_nic_set_hw_unavailable(__in efx_nic_t * enp)2645 ef10_nic_set_hw_unavailable(
2646 __in efx_nic_t *enp)
2647 {
2648 EFSYS_PROBE(hw_unavail);
2649 enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL;
2650 }
2651
2652
2653 void
ef10_nic_fini(__in efx_nic_t * enp)2654 ef10_nic_fini(
2655 __in efx_nic_t *enp)
2656 {
2657 uint32_t i;
2658 efx_rc_t rc;
2659 boolean_t do_vadaptor_free = B_TRUE;
2660
2661 #if EFSYS_OPT_EVB
2662 if (enp->en_vswitchp != NULL) {
2663 /*
2664 * For SR-IOV the vAdaptor is freed with the vswitch,
2665 * so do not free it here.
2666 */
2667 do_vadaptor_free = B_FALSE;
2668 }
2669 #endif
2670 if (do_vadaptor_free != B_FALSE) {
2671 (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
2672 enp->en_vport_id = EVB_PORT_ID_NULL;
2673 }
2674
2675 /* Unlink piobufs from extra VIs in WC mapping */
2676 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2677 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2678 rc = efx_mcdi_unlink_piobuf(enp,
2679 enp->en_arch.ef10.ena_pio_write_vi_base + i);
2680 if (rc != 0)
2681 break;
2682 }
2683 }
2684
2685 ef10_nic_free_piobufs(enp);
2686
2687 (void) efx_mcdi_free_vis(enp);
2688 enp->en_arch.ef10.ena_vi_count = 0;
2689 }
2690
2691 void
ef10_nic_unprobe(__in efx_nic_t * enp)2692 ef10_nic_unprobe(
2693 __in efx_nic_t *enp)
2694 {
2695 #if EFSYS_OPT_MON_STATS
2696 mcdi_mon_cfg_free(enp);
2697 #endif /* EFSYS_OPT_MON_STATS */
2698 (void) efx_mcdi_drv_attach(enp, B_FALSE);
2699 }
2700
2701 #if EFSYS_OPT_DIAG
2702
2703 __checkReturn efx_rc_t
ef10_nic_register_test(__in efx_nic_t * enp)2704 ef10_nic_register_test(
2705 __in efx_nic_t *enp)
2706 {
2707 efx_rc_t rc;
2708
2709 /* FIXME */
2710 _NOTE(ARGUNUSED(enp))
2711 _NOTE(CONSTANTCONDITION)
2712 if (B_FALSE) {
2713 rc = ENOTSUP;
2714 goto fail1;
2715 }
2716 /* FIXME */
2717
2718 return (0);
2719
2720 fail1:
2721 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2722
2723 return (rc);
2724 }
2725
2726 #endif /* EFSYS_OPT_DIAG */
2727
2728 #if EFSYS_OPT_FW_SUBVARIANT_AWARE
2729
2730 __checkReturn efx_rc_t
efx_mcdi_get_nic_global(__in efx_nic_t * enp,__in uint32_t key,__out uint32_t * valuep)2731 efx_mcdi_get_nic_global(
2732 __in efx_nic_t *enp,
2733 __in uint32_t key,
2734 __out uint32_t *valuep)
2735 {
2736 efx_mcdi_req_t req;
2737 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_NIC_GLOBAL_IN_LEN,
2738 MC_CMD_GET_NIC_GLOBAL_OUT_LEN);
2739 efx_rc_t rc;
2740
2741 req.emr_cmd = MC_CMD_GET_NIC_GLOBAL;
2742 req.emr_in_buf = payload;
2743 req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN;
2744 req.emr_out_buf = payload;
2745 req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN;
2746
2747 MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key);
2748
2749 efx_mcdi_execute(enp, &req);
2750
2751 if (req.emr_rc != 0) {
2752 rc = req.emr_rc;
2753 goto fail1;
2754 }
2755
2756 if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) {
2757 rc = EMSGSIZE;
2758 goto fail2;
2759 }
2760
2761 *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE);
2762
2763 return (0);
2764
2765 fail2:
2766 EFSYS_PROBE(fail2);
2767 fail1:
2768 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2769
2770 return (rc);
2771 }
2772
2773 __checkReturn efx_rc_t
efx_mcdi_set_nic_global(__in efx_nic_t * enp,__in uint32_t key,__in uint32_t value)2774 efx_mcdi_set_nic_global(
2775 __in efx_nic_t *enp,
2776 __in uint32_t key,
2777 __in uint32_t value)
2778 {
2779 efx_mcdi_req_t req;
2780 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_NIC_GLOBAL_IN_LEN, 0);
2781 efx_rc_t rc;
2782
2783 req.emr_cmd = MC_CMD_SET_NIC_GLOBAL;
2784 req.emr_in_buf = payload;
2785 req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN;
2786 req.emr_out_buf = NULL;
2787 req.emr_out_length = 0;
2788
2789 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key);
2790 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value);
2791
2792 efx_mcdi_execute(enp, &req);
2793
2794 if (req.emr_rc != 0) {
2795 rc = req.emr_rc;
2796 goto fail1;
2797 }
2798
2799 return (0);
2800
2801 fail1:
2802 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2803
2804 return (rc);
2805 }
2806
2807 #endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
2808
2809 #endif /* EFX_OPTS_EF10() */
2810