1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) 2024, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "ice_common.h"
33 #include "ice_sched.h"
34 #include "ice_adminq_cmd.h"
35 #include "ice_flow.h"
36 #include "ice_switch.h"
37
38 #define ICE_PF_RESET_WAIT_COUNT 500
39
40 static const char * const ice_link_mode_str_low[] = {
41 ice_arr_elem_idx(0, "100BASE_TX"),
42 ice_arr_elem_idx(1, "100M_SGMII"),
43 ice_arr_elem_idx(2, "1000BASE_T"),
44 ice_arr_elem_idx(3, "1000BASE_SX"),
45 ice_arr_elem_idx(4, "1000BASE_LX"),
46 ice_arr_elem_idx(5, "1000BASE_KX"),
47 ice_arr_elem_idx(6, "1G_SGMII"),
48 ice_arr_elem_idx(7, "2500BASE_T"),
49 ice_arr_elem_idx(8, "2500BASE_X"),
50 ice_arr_elem_idx(9, "2500BASE_KX"),
51 ice_arr_elem_idx(10, "5GBASE_T"),
52 ice_arr_elem_idx(11, "5GBASE_KR"),
53 ice_arr_elem_idx(12, "10GBASE_T"),
54 ice_arr_elem_idx(13, "10G_SFI_DA"),
55 ice_arr_elem_idx(14, "10GBASE_SR"),
56 ice_arr_elem_idx(15, "10GBASE_LR"),
57 ice_arr_elem_idx(16, "10GBASE_KR_CR1"),
58 ice_arr_elem_idx(17, "10G_SFI_AOC_ACC"),
59 ice_arr_elem_idx(18, "10G_SFI_C2C"),
60 ice_arr_elem_idx(19, "25GBASE_T"),
61 ice_arr_elem_idx(20, "25GBASE_CR"),
62 ice_arr_elem_idx(21, "25GBASE_CR_S"),
63 ice_arr_elem_idx(22, "25GBASE_CR1"),
64 ice_arr_elem_idx(23, "25GBASE_SR"),
65 ice_arr_elem_idx(24, "25GBASE_LR"),
66 ice_arr_elem_idx(25, "25GBASE_KR"),
67 ice_arr_elem_idx(26, "25GBASE_KR_S"),
68 ice_arr_elem_idx(27, "25GBASE_KR1"),
69 ice_arr_elem_idx(28, "25G_AUI_AOC_ACC"),
70 ice_arr_elem_idx(29, "25G_AUI_C2C"),
71 ice_arr_elem_idx(30, "40GBASE_CR4"),
72 ice_arr_elem_idx(31, "40GBASE_SR4"),
73 ice_arr_elem_idx(32, "40GBASE_LR4"),
74 ice_arr_elem_idx(33, "40GBASE_KR4"),
75 ice_arr_elem_idx(34, "40G_XLAUI_AOC_ACC"),
76 ice_arr_elem_idx(35, "40G_XLAUI"),
77 ice_arr_elem_idx(36, "50GBASE_CR2"),
78 ice_arr_elem_idx(37, "50GBASE_SR2"),
79 ice_arr_elem_idx(38, "50GBASE_LR2"),
80 ice_arr_elem_idx(39, "50GBASE_KR2"),
81 ice_arr_elem_idx(40, "50G_LAUI2_AOC_ACC"),
82 ice_arr_elem_idx(41, "50G_LAUI2"),
83 ice_arr_elem_idx(42, "50G_AUI2_AOC_ACC"),
84 ice_arr_elem_idx(43, "50G_AUI2"),
85 ice_arr_elem_idx(44, "50GBASE_CP"),
86 ice_arr_elem_idx(45, "50GBASE_SR"),
87 ice_arr_elem_idx(46, "50GBASE_FR"),
88 ice_arr_elem_idx(47, "50GBASE_LR"),
89 ice_arr_elem_idx(48, "50GBASE_KR_PAM4"),
90 ice_arr_elem_idx(49, "50G_AUI1_AOC_ACC"),
91 ice_arr_elem_idx(50, "50G_AUI1"),
92 ice_arr_elem_idx(51, "100GBASE_CR4"),
93 ice_arr_elem_idx(52, "100GBASE_SR4"),
94 ice_arr_elem_idx(53, "100GBASE_LR4"),
95 ice_arr_elem_idx(54, "100GBASE_KR4"),
96 ice_arr_elem_idx(55, "100G_CAUI4_AOC_ACC"),
97 ice_arr_elem_idx(56, "100G_CAUI4"),
98 ice_arr_elem_idx(57, "100G_AUI4_AOC_ACC"),
99 ice_arr_elem_idx(58, "100G_AUI4"),
100 ice_arr_elem_idx(59, "100GBASE_CR_PAM4"),
101 ice_arr_elem_idx(60, "100GBASE_KR_PAM4"),
102 ice_arr_elem_idx(61, "100GBASE_CP2"),
103 ice_arr_elem_idx(62, "100GBASE_SR2"),
104 ice_arr_elem_idx(63, "100GBASE_DR"),
105 };
106
107 static const char * const ice_link_mode_str_high[] = {
108 ice_arr_elem_idx(0, "100GBASE_KR2_PAM4"),
109 ice_arr_elem_idx(1, "100G_CAUI2_AOC_ACC"),
110 ice_arr_elem_idx(2, "100G_CAUI2"),
111 ice_arr_elem_idx(3, "100G_AUI2_AOC_ACC"),
112 ice_arr_elem_idx(4, "100G_AUI2"),
113 ice_arr_elem_idx(5, "200G_CR4_PAM4"),
114 ice_arr_elem_idx(6, "200G_SR4"),
115 ice_arr_elem_idx(7, "200G_FR4"),
116 ice_arr_elem_idx(8, "200G_LR4"),
117 ice_arr_elem_idx(9, "200G_DR4"),
118 ice_arr_elem_idx(10, "200G_KR4_PAM4"),
119 ice_arr_elem_idx(11, "200G_AUI4_AOC_ACC"),
120 ice_arr_elem_idx(12, "200G_AUI4"),
121 ice_arr_elem_idx(13, "200G_AUI8_AOC_ACC"),
122 ice_arr_elem_idx(14, "200G_AUI8"),
123 ice_arr_elem_idx(15, "400GBASE_FR8"),
124 };
125
126 /**
127 * ice_dump_phy_type - helper function to dump phy_type
128 * @hw: pointer to the HW structure
129 * @low: 64 bit value for phy_type_low
130 * @high: 64 bit value for phy_type_high
131 * @prefix: prefix string to differentiate multiple dumps
132 */
133 static void
ice_dump_phy_type(struct ice_hw * hw,u64 low,u64 high,const char * prefix)134 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
135 {
136 u32 i;
137
138 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
139 (unsigned long long)low);
140
141 for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_low); i++) {
142 if (low & BIT_ULL(i))
143 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
144 prefix, i, ice_link_mode_str_low[i]);
145 }
146
147 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
148 (unsigned long long)high);
149
150 for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_high); i++) {
151 if (high & BIT_ULL(i))
152 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
153 prefix, i, ice_link_mode_str_high[i]);
154 }
155 }
156
157 /**
158 * ice_set_mac_type - Sets MAC type
159 * @hw: pointer to the HW structure
160 *
161 * This function sets the MAC type of the adapter based on the
162 * vendor ID and device ID stored in the HW structure.
163 */
ice_set_mac_type(struct ice_hw * hw)164 int ice_set_mac_type(struct ice_hw *hw)
165 {
166 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
167
168 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
169 return ICE_ERR_DEVICE_NOT_SUPPORTED;
170
171 switch (hw->device_id) {
172 case ICE_DEV_ID_E810C_BACKPLANE:
173 case ICE_DEV_ID_E810C_QSFP:
174 case ICE_DEV_ID_E810C_SFP:
175 case ICE_DEV_ID_E810_XXV_BACKPLANE:
176 case ICE_DEV_ID_E810_XXV_QSFP:
177 case ICE_DEV_ID_E810_XXV_SFP:
178 hw->mac_type = ICE_MAC_E810;
179 break;
180 case ICE_DEV_ID_E822C_10G_BASE_T:
181 case ICE_DEV_ID_E822C_BACKPLANE:
182 case ICE_DEV_ID_E822C_QSFP:
183 case ICE_DEV_ID_E822C_SFP:
184 case ICE_DEV_ID_E822C_SGMII:
185 case ICE_DEV_ID_E822L_10G_BASE_T:
186 case ICE_DEV_ID_E822L_BACKPLANE:
187 case ICE_DEV_ID_E822L_SFP:
188 case ICE_DEV_ID_E822L_SGMII:
189 case ICE_DEV_ID_E823L_10G_BASE_T:
190 case ICE_DEV_ID_E823L_1GBE:
191 case ICE_DEV_ID_E823L_BACKPLANE:
192 case ICE_DEV_ID_E823L_QSFP:
193 case ICE_DEV_ID_E823L_SFP:
194 case ICE_DEV_ID_E823C_10G_BASE_T:
195 case ICE_DEV_ID_E823C_BACKPLANE:
196 case ICE_DEV_ID_E823C_QSFP:
197 case ICE_DEV_ID_E823C_SFP:
198 case ICE_DEV_ID_E823C_SGMII:
199 hw->mac_type = ICE_MAC_GENERIC;
200 break;
201 case ICE_DEV_ID_E825C_BACKPLANE:
202 case ICE_DEV_ID_E825C_QSFP:
203 case ICE_DEV_ID_E825C_SFP:
204 case ICE_DEV_ID_E825C_SGMII:
205 hw->mac_type = ICE_MAC_GENERIC_3K_E825;
206 break;
207 case ICE_DEV_ID_E830_BACKPLANE:
208 case ICE_DEV_ID_E830_QSFP56:
209 case ICE_DEV_ID_E830_SFP:
210 case ICE_DEV_ID_E830C_BACKPLANE:
211 case ICE_DEV_ID_E830_L_BACKPLANE:
212 case ICE_DEV_ID_E830C_QSFP:
213 case ICE_DEV_ID_E830_L_QSFP:
214 case ICE_DEV_ID_E830C_SFP:
215 case ICE_DEV_ID_E830_L_SFP:
216 hw->mac_type = ICE_MAC_E830;
217 break;
218 default:
219 hw->mac_type = ICE_MAC_UNKNOWN;
220 break;
221 }
222
223 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
224 return 0;
225 }
226
227 /**
228 * ice_is_generic_mac
229 * @hw: pointer to the hardware structure
230 *
231 * returns true if mac_type is ICE_MAC_GENERIC, false if not
232 */
ice_is_generic_mac(struct ice_hw * hw)233 bool ice_is_generic_mac(struct ice_hw *hw)
234 {
235 return (hw->mac_type == ICE_MAC_GENERIC ||
236 hw->mac_type == ICE_MAC_GENERIC_3K ||
237 hw->mac_type == ICE_MAC_GENERIC_3K_E825);
238 }
239
240 /**
241 * ice_is_e810
242 * @hw: pointer to the hardware structure
243 *
244 * returns true if the device is E810 based, false if not.
245 */
ice_is_e810(struct ice_hw * hw)246 bool ice_is_e810(struct ice_hw *hw)
247 {
248 return hw->mac_type == ICE_MAC_E810;
249 }
250
251 /**
252 * ice_is_e810t
253 * @hw: pointer to the hardware structure
254 *
255 * returns true if the device is E810T based, false if not.
256 */
ice_is_e810t(struct ice_hw * hw)257 bool ice_is_e810t(struct ice_hw *hw)
258 {
259 switch (hw->device_id) {
260 case ICE_DEV_ID_E810C_SFP:
261 switch (hw->subsystem_device_id) {
262 case ICE_SUBDEV_ID_E810T:
263 case ICE_SUBDEV_ID_E810T2:
264 case ICE_SUBDEV_ID_E810T3:
265 case ICE_SUBDEV_ID_E810T4:
266 case ICE_SUBDEV_ID_E810T6:
267 case ICE_SUBDEV_ID_E810T7:
268 return true;
269 }
270 break;
271 case ICE_DEV_ID_E810C_QSFP:
272 switch (hw->subsystem_device_id) {
273 case ICE_SUBDEV_ID_E810T2:
274 case ICE_SUBDEV_ID_E810T3:
275 case ICE_SUBDEV_ID_E810T5:
276 return true;
277 }
278 break;
279 default:
280 break;
281 }
282
283 return false;
284 }
285
286 /**
287 * ice_is_e830
288 * @hw: pointer to the hardware structure
289 *
290 * returns true if the device is E830 based, false if not.
291 */
ice_is_e830(struct ice_hw * hw)292 bool ice_is_e830(struct ice_hw *hw)
293 {
294 return hw->mac_type == ICE_MAC_E830;
295 }
296
297 /**
298 * ice_is_e823
299 * @hw: pointer to the hardware structure
300 *
301 * returns true if the device is E823-L or E823-C based, false if not.
302 */
ice_is_e823(struct ice_hw * hw)303 bool ice_is_e823(struct ice_hw *hw)
304 {
305 switch (hw->device_id) {
306 case ICE_DEV_ID_E823L_BACKPLANE:
307 case ICE_DEV_ID_E823L_SFP:
308 case ICE_DEV_ID_E823L_10G_BASE_T:
309 case ICE_DEV_ID_E823L_1GBE:
310 case ICE_DEV_ID_E823L_QSFP:
311 case ICE_DEV_ID_E823C_BACKPLANE:
312 case ICE_DEV_ID_E823C_QSFP:
313 case ICE_DEV_ID_E823C_SFP:
314 case ICE_DEV_ID_E823C_10G_BASE_T:
315 case ICE_DEV_ID_E823C_SGMII:
316 return true;
317 default:
318 return false;
319 }
320 }
321
322 /**
323 * ice_is_e825c
324 * @hw: pointer to the hardware structure
325 *
326 * returns true if the device is E825-C based, false if not.
327 */
ice_is_e825c(struct ice_hw * hw)328 bool ice_is_e825c(struct ice_hw *hw)
329 {
330 switch (hw->device_id) {
331 case ICE_DEV_ID_E825C_BACKPLANE:
332 case ICE_DEV_ID_E825C_QSFP:
333 case ICE_DEV_ID_E825C_SFP:
334 case ICE_DEV_ID_E825C_SGMII:
335 return true;
336 default:
337 return false;
338 }
339 }
340
341 /**
342 * ice_clear_pf_cfg - Clear PF configuration
343 * @hw: pointer to the hardware structure
344 *
345 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
346 * configuration, flow director filters, etc.).
347 */
ice_clear_pf_cfg(struct ice_hw * hw)348 int ice_clear_pf_cfg(struct ice_hw *hw)
349 {
350 struct ice_aq_desc desc;
351
352 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
353
354 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
355 }
356
357 /**
358 * ice_aq_manage_mac_read - manage MAC address read command
359 * @hw: pointer to the HW struct
360 * @buf: a virtual buffer to hold the manage MAC read response
361 * @buf_size: Size of the virtual buffer
362 * @cd: pointer to command details structure or NULL
363 *
364 * This function is used to return per PF station MAC address (0x0107).
365 * NOTE: Upon successful completion of this command, MAC address information
366 * is returned in user specified buffer. Please interpret user specified
367 * buffer as "manage_mac_read" response.
368 * Response such as various MAC addresses are stored in HW struct (port.mac)
369 * ice_discover_dev_caps is expected to be called before this function is
370 * called.
371 */
372 int
ice_aq_manage_mac_read(struct ice_hw * hw,void * buf,u16 buf_size,struct ice_sq_cd * cd)373 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
374 struct ice_sq_cd *cd)
375 {
376 struct ice_aqc_manage_mac_read_resp *resp;
377 struct ice_aqc_manage_mac_read *cmd;
378 struct ice_aq_desc desc;
379 int status;
380 u16 flags;
381 u8 i;
382
383 cmd = &desc.params.mac_read;
384
385 if (buf_size < sizeof(*resp))
386 return ICE_ERR_BUF_TOO_SHORT;
387
388 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
389
390 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
391 if (status)
392 return status;
393
394 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
395 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
396
397 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
398 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
399 return ICE_ERR_CFG;
400 }
401
402 /* A single port can report up to two (LAN and WoL) addresses */
403 for (i = 0; i < cmd->num_addr; i++)
404 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
405 ice_memcpy(hw->port_info->mac.lan_addr,
406 resp[i].mac_addr, ETH_ALEN,
407 ICE_NONDMA_TO_NONDMA);
408 ice_memcpy(hw->port_info->mac.perm_addr,
409 resp[i].mac_addr,
410 ETH_ALEN, ICE_NONDMA_TO_NONDMA);
411 break;
412 }
413 return 0;
414 }
415
416 /**
417 * ice_phy_maps_to_media
418 * @phy_type_low: PHY type low bits
419 * @phy_type_high: PHY type high bits
420 * @media_mask_low: media type PHY type low bitmask
421 * @media_mask_high: media type PHY type high bitmask
422 *
423 * Return true if PHY type [low|high] bits are only of media type PHY types
424 * [low|high] bitmask.
425 */
426 static bool
ice_phy_maps_to_media(u64 phy_type_low,u64 phy_type_high,u64 media_mask_low,u64 media_mask_high)427 ice_phy_maps_to_media(u64 phy_type_low, u64 phy_type_high,
428 u64 media_mask_low, u64 media_mask_high)
429 {
430 /* check if a PHY type exist for media type */
431 if (!(phy_type_low & media_mask_low ||
432 phy_type_high & media_mask_high))
433 return false;
434
435 /* check that PHY types are only of media type */
436 if (!(phy_type_low & ~media_mask_low) &&
437 !(phy_type_high & ~media_mask_high))
438 return true;
439
440 return false;
441 }
442
443 /**
444 * ice_set_media_type - Sets media type
445 * @pi: port information structure
446 *
447 * Set ice_port_info PHY media type based on PHY type. This should be called
448 * from Get PHY caps with media.
449 */
ice_set_media_type(struct ice_port_info * pi)450 static void ice_set_media_type(struct ice_port_info *pi)
451 {
452 enum ice_media_type *media_type;
453 u64 phy_type_high, phy_type_low;
454
455 phy_type_high = pi->phy.phy_type_high;
456 phy_type_low = pi->phy.phy_type_low;
457 media_type = &pi->phy.media_type;
458
459 /* if no media, then media type is NONE */
460 if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
461 *media_type = ICE_MEDIA_NONE;
462 /* else if PHY types are only BASE-T, then media type is BASET */
463 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
464 ICE_MEDIA_BASET_PHY_TYPE_LOW_M, 0))
465 *media_type = ICE_MEDIA_BASET;
466 /* else if any PHY type is BACKPLANE, then media type is BACKPLANE */
467 else if (phy_type_low & ICE_MEDIA_BP_PHY_TYPE_LOW_M ||
468 phy_type_high & ICE_MEDIA_BP_PHY_TYPE_HIGH_M)
469 *media_type = ICE_MEDIA_BACKPLANE;
470 /* else if PHY types are only optical, or optical and C2M, then media
471 * type is FIBER
472 */
473 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
474 ICE_MEDIA_OPT_PHY_TYPE_LOW_M,
475 ICE_MEDIA_OPT_PHY_TYPE_HIGH_M) ||
476 ((phy_type_low & ICE_MEDIA_OPT_PHY_TYPE_LOW_M ||
477 phy_type_high & ICE_MEDIA_OPT_PHY_TYPE_HIGH_M) &&
478 (phy_type_low & ICE_MEDIA_C2M_PHY_TYPE_LOW_M ||
479 phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)))
480 *media_type = ICE_MEDIA_FIBER;
481 /* else if PHY types are only DA, or DA and C2C, then media type DA */
482 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
483 ICE_MEDIA_DAC_PHY_TYPE_LOW_M,
484 ICE_MEDIA_DAC_PHY_TYPE_HIGH_M) ||
485 ((phy_type_low & ICE_MEDIA_DAC_PHY_TYPE_LOW_M ||
486 phy_type_high & ICE_MEDIA_DAC_PHY_TYPE_HIGH_M) &&
487 (phy_type_low & ICE_MEDIA_C2C_PHY_TYPE_LOW_M ||
488 phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)))
489 *media_type = ICE_MEDIA_DA;
490 /* else if PHY types are only C2M or only C2C, then media is AUI */
491 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
492 ICE_MEDIA_C2M_PHY_TYPE_LOW_M,
493 ICE_MEDIA_C2M_PHY_TYPE_HIGH_M) ||
494 ice_phy_maps_to_media(phy_type_low, phy_type_high,
495 ICE_MEDIA_C2C_PHY_TYPE_LOW_M,
496 ICE_MEDIA_C2C_PHY_TYPE_HIGH_M))
497 *media_type = ICE_MEDIA_AUI;
498
499 else
500 *media_type = ICE_MEDIA_UNKNOWN;
501 }
502
503 /**
504 * ice_aq_get_phy_caps - returns PHY capabilities
505 * @pi: port information structure
506 * @qual_mods: report qualified modules
507 * @report_mode: report mode capabilities
508 * @pcaps: structure for PHY capabilities to be filled
509 * @cd: pointer to command details structure or NULL
510 *
511 * Returns the various PHY capabilities supported on the Port (0x0600)
512 */
513 int
ice_aq_get_phy_caps(struct ice_port_info * pi,bool qual_mods,u8 report_mode,struct ice_aqc_get_phy_caps_data * pcaps,struct ice_sq_cd * cd)514 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
515 struct ice_aqc_get_phy_caps_data *pcaps,
516 struct ice_sq_cd *cd)
517 {
518 struct ice_aqc_get_phy_caps *cmd;
519 u16 pcaps_size = sizeof(*pcaps);
520 struct ice_aq_desc desc;
521 const char *prefix;
522 struct ice_hw *hw;
523 int status;
524
525 cmd = &desc.params.get_phy;
526
527 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
528 return ICE_ERR_PARAM;
529 hw = pi->hw;
530
531 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
532 !ice_fw_supports_report_dflt_cfg(hw))
533 return ICE_ERR_PARAM;
534
535 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
536
537 if (qual_mods)
538 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
539
540 cmd->param0 |= CPU_TO_LE16(report_mode);
541
542 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
543
544 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
545
546 switch (report_mode) {
547 case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
548 prefix = "phy_caps_media";
549 break;
550 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
551 prefix = "phy_caps_no_media";
552 break;
553 case ICE_AQC_REPORT_ACTIVE_CFG:
554 prefix = "phy_caps_active";
555 break;
556 case ICE_AQC_REPORT_DFLT_CFG:
557 prefix = "phy_caps_default";
558 break;
559 default:
560 prefix = "phy_caps_invalid";
561 }
562
563 ice_dump_phy_type(hw, LE64_TO_CPU(pcaps->phy_type_low),
564 LE64_TO_CPU(pcaps->phy_type_high), prefix);
565
566 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
567 prefix, report_mode);
568 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
569 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
570 pcaps->low_power_ctrl_an);
571 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
572 pcaps->eee_cap);
573 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
574 pcaps->eeer_value);
575 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
576 pcaps->link_fec_options);
577 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
578 prefix, pcaps->module_compliance_enforcement);
579 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
580 prefix, pcaps->extended_compliance_code);
581 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
582 pcaps->module_type[0]);
583 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
584 pcaps->module_type[1]);
585 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
586 pcaps->module_type[2]);
587
588 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
589 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
590 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
591 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
592 sizeof(pi->phy.link_info.module_type),
593 ICE_NONDMA_TO_NONDMA);
594 ice_set_media_type(pi);
595 ice_debug(hw, ICE_DBG_LINK, "%s: media_type = 0x%x\n", prefix,
596 pi->phy.media_type);
597 }
598
599 return status;
600 }
601
602 /**
603 * ice_aq_get_phy_equalization - function to read serdes equalizer value from
604 * firmware using admin queue command.
605 * @hw: pointer to the HW struct
606 * @data_in: represents the serdes equalization parameter requested
607 * @op_code: represents the serdes number and flag to represent tx or rx
608 * @serdes_num: represents the serdes number
609 * @output: pointer to the caller-supplied buffer to return serdes equalizer
610 *
611 * Returns 0 on success,
612 * non-zero status on error
613 */
ice_aq_get_phy_equalization(struct ice_hw * hw,u16 data_in,u16 op_code,u8 serdes_num,int * output)614 int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
615 u8 serdes_num, int *output)
616 {
617 struct ice_aqc_dnl_call_command *cmd;
618 struct ice_aqc_dnl_call buf;
619 struct ice_aq_desc desc;
620 int err = 0;
621
622 if (!hw || !output)
623 return (ICE_ERR_PARAM);
624
625 memset(&buf, 0, sizeof(buf));
626 buf.sto.txrx_equa_reqs.data_in = CPU_TO_LE16(data_in);
627 buf.sto.txrx_equa_reqs.op_code_serdes_sel =
628 CPU_TO_LE16(op_code | (serdes_num & 0xF));
629
630 cmd = &desc.params.dnl_call;
631 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
632 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_RD |
633 ICE_AQ_FLAG_SI);
634 desc.datalen = CPU_TO_LE16(sizeof(struct ice_aqc_dnl_call));
635 cmd->activity_id = CPU_TO_LE16(ICE_AQC_ACT_ID_DNL);
636 cmd->ctx = 0;
637
638 err = ice_aq_send_cmd(hw, &desc, &buf,
639 sizeof(struct ice_aqc_dnl_call), NULL);
640 if (!err)
641 *output = buf.sto.txrx_equa_resp.val;
642
643 return err;
644 }
645
646 #define ice_get_link_status_data_ver(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
647 ICE_GET_LINK_STATUS_DATA_V2 : ICE_GET_LINK_STATUS_DATA_V1)
648
649 /**
650 * ice_get_link_status_datalen
651 * @hw: pointer to the HW struct
652 *
653 * return Get Link Status datalen
654 */
ice_get_link_status_datalen(struct ice_hw * hw)655 static u16 ice_get_link_status_datalen(struct ice_hw *hw)
656 {
657 return (ice_get_link_status_data_ver(hw) ==
658 ICE_GET_LINK_STATUS_DATA_V1) ? ICE_GET_LINK_STATUS_DATALEN_V1 :
659 ICE_GET_LINK_STATUS_DATALEN_V2;
660 }
661
662 /**
663 * ice_aq_get_link_info
664 * @pi: port information structure
665 * @ena_lse: enable/disable LinkStatusEvent reporting
666 * @link: pointer to link status structure - optional
667 * @cd: pointer to command details structure or NULL
668 *
669 * Get Link Status (0x607). Returns the link status of the adapter.
670 */
671 int
ice_aq_get_link_info(struct ice_port_info * pi,bool ena_lse,struct ice_link_status * link,struct ice_sq_cd * cd)672 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
673 struct ice_link_status *link, struct ice_sq_cd *cd)
674 {
675 struct ice_aqc_get_link_status_data link_data = { 0 };
676 struct ice_aqc_get_link_status *resp;
677 struct ice_link_status *li_old, *li;
678 struct ice_fc_info *hw_fc_info;
679 bool tx_pause, rx_pause;
680 struct ice_aq_desc desc;
681 struct ice_hw *hw;
682 u16 cmd_flags;
683 int status;
684
685 if (!pi)
686 return ICE_ERR_PARAM;
687 hw = pi->hw;
688
689 li_old = &pi->phy.link_info_old;
690 li = &pi->phy.link_info;
691 hw_fc_info = &pi->fc;
692
693 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
694 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
695 resp = &desc.params.get_link_status;
696 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
697 resp->lport_num = pi->lport;
698
699 status = ice_aq_send_cmd(hw, &desc, &link_data,
700 ice_get_link_status_datalen(hw), cd);
701 if (status)
702 return status;
703
704 /* save off old link status information */
705 *li_old = *li;
706
707 /* update current link status information */
708 li->link_speed = LE16_TO_CPU(link_data.link_speed);
709 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
710 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
711 li->link_info = link_data.link_info;
712 li->link_cfg_err = link_data.link_cfg_err;
713 li->an_info = link_data.an_info;
714 li->ext_info = link_data.ext_info;
715 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
716 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
717 li->topo_media_conflict = link_data.topo_media_conflict;
718 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
719 ICE_AQ_CFG_PACING_TYPE_M);
720
721 /* update fc info */
722 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
723 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
724 if (tx_pause && rx_pause)
725 hw_fc_info->current_mode = ICE_FC_FULL;
726 else if (tx_pause)
727 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
728 else if (rx_pause)
729 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
730 else
731 hw_fc_info->current_mode = ICE_FC_NONE;
732
733 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
734
735 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
736 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
737 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
738 (unsigned long long)li->phy_type_low);
739 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
740 (unsigned long long)li->phy_type_high);
741 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
742 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
743 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
744 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
745 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
746 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
747 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
748 li->max_frame_size);
749 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
750
751 /* save link status information */
752 if (link)
753 *link = *li;
754
755 /* flag cleared so calling functions don't call AQ again */
756 pi->phy.get_link_info = false;
757
758 return 0;
759 }
760
761 /**
762 * ice_fill_tx_timer_and_fc_thresh
763 * @hw: pointer to the HW struct
764 * @cmd: pointer to MAC cfg structure
765 *
766 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
767 * descriptor
768 */
769 static void
ice_fill_tx_timer_and_fc_thresh(struct ice_hw * hw,struct ice_aqc_set_mac_cfg * cmd)770 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
771 struct ice_aqc_set_mac_cfg *cmd)
772 {
773 u16 fc_thres_val, tx_timer_val;
774 u32 val;
775
776 /* We read back the transmit timer and fc threshold value of
777 * LFC. Thus, we will use index =
778 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
779 *
780 * Also, because we are operating on transmit timer and fc
781 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
782 */
783 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
784
785 if ((hw)->mac_type == ICE_MAC_E830) {
786 /* Retrieve the transmit timer */
787 val = rd32(hw, E830_PRTMAC_CL01_PAUSE_QUANTA);
788 tx_timer_val = val & E830_PRTMAC_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_M;
789 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
790
791 /* Retrieve the fc threshold */
792 val = rd32(hw, E830_PRTMAC_CL01_QUANTA_THRESH);
793 fc_thres_val = val & E830_PRTMAC_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_M;
794 } else {
795 /* Retrieve the transmit timer */
796 val = rd32(hw, E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(E800_IDX_OF_LFC));
797 tx_timer_val = val &
798 E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
799 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
800
801 /* Retrieve the fc threshold */
802 val = rd32(hw, E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(E800_IDX_OF_LFC));
803 fc_thres_val = val & E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
804 }
805
806 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
807 }
808
809 /**
810 * ice_aq_set_mac_cfg
811 * @hw: pointer to the HW struct
812 * @max_frame_size: Maximum Frame Size to be supported
813 * @auto_drop: Tell HW to drop packets if TC queue is blocked
814 * @cd: pointer to command details structure or NULL
815 *
816 * Set MAC configuration (0x0603)
817 */
818 int
ice_aq_set_mac_cfg(struct ice_hw * hw,u16 max_frame_size,bool auto_drop,struct ice_sq_cd * cd)819 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
820 struct ice_sq_cd *cd)
821 {
822 struct ice_aqc_set_mac_cfg *cmd;
823 struct ice_aq_desc desc;
824
825 cmd = &desc.params.set_mac_cfg;
826
827 if (max_frame_size == 0)
828 return ICE_ERR_PARAM;
829
830 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
831
832 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
833
834 if (ice_is_fw_auto_drop_supported(hw) && auto_drop)
835 cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS;
836 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
837
838 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
839 }
840
841 /**
842 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
843 * @hw: pointer to the HW struct
844 */
ice_init_fltr_mgmt_struct(struct ice_hw * hw)845 int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
846 {
847 struct ice_switch_info *sw;
848 int status;
849
850 hw->switch_info = (struct ice_switch_info *)
851 ice_malloc(hw, sizeof(*hw->switch_info));
852
853 sw = hw->switch_info;
854
855 if (!sw)
856 return ICE_ERR_NO_MEMORY;
857
858 INIT_LIST_HEAD(&sw->vsi_list_map_head);
859 sw->prof_res_bm_init = 0;
860
861 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
862 if (status) {
863 ice_free(hw, hw->switch_info);
864 return status;
865 }
866 return 0;
867 }
868
869 /**
870 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
871 * @hw: pointer to the HW struct
872 * @sw: pointer to switch info struct for which function clears filters
873 */
874 static void
ice_cleanup_fltr_mgmt_single(struct ice_hw * hw,struct ice_switch_info * sw)875 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
876 {
877 struct ice_vsi_list_map_info *v_pos_map;
878 struct ice_vsi_list_map_info *v_tmp_map;
879 struct ice_sw_recipe *recps;
880 u8 i;
881
882 if (!sw)
883 return;
884
885 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
886 ice_vsi_list_map_info, list_entry) {
887 LIST_DEL(&v_pos_map->list_entry);
888 ice_free(hw, v_pos_map);
889 }
890 recps = sw->recp_list;
891 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
892 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
893
894 recps[i].root_rid = i;
895 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
896 &recps[i].rg_list, ice_recp_grp_entry,
897 l_entry) {
898 LIST_DEL(&rg_entry->l_entry);
899 ice_free(hw, rg_entry);
900 }
901
902 if (recps[i].adv_rule) {
903 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
904 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
905
906 ice_destroy_lock(&recps[i].filt_rule_lock);
907 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
908 &recps[i].filt_rules,
909 ice_adv_fltr_mgmt_list_entry,
910 list_entry) {
911 LIST_DEL(&lst_itr->list_entry);
912 ice_free(hw, lst_itr->lkups);
913 ice_free(hw, lst_itr);
914 }
915 } else {
916 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
917
918 ice_destroy_lock(&recps[i].filt_rule_lock);
919 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
920 &recps[i].filt_rules,
921 ice_fltr_mgmt_list_entry,
922 list_entry) {
923 LIST_DEL(&lst_itr->list_entry);
924 ice_free(hw, lst_itr);
925 }
926 }
927 if (recps[i].root_buf)
928 ice_free(hw, recps[i].root_buf);
929 }
930 ice_rm_sw_replay_rule_info(hw, sw);
931 ice_free(hw, sw->recp_list);
932 ice_free(hw, sw);
933 }
934
935 /**
936 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
937 * @hw: pointer to the HW struct
938 */
ice_cleanup_fltr_mgmt_struct(struct ice_hw * hw)939 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
940 {
941 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
942 }
943
944 /**
945 * ice_get_itr_intrl_gran
946 * @hw: pointer to the HW struct
947 *
948 * Determines the ITR/INTRL granularities based on the maximum aggregate
949 * bandwidth according to the device's configuration during power-on.
950 */
ice_get_itr_intrl_gran(struct ice_hw * hw)951 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
952 {
953 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
954 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
955 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
956
957 switch (max_agg_bw) {
958 case ICE_MAX_AGG_BW_200G:
959 case ICE_MAX_AGG_BW_100G:
960 case ICE_MAX_AGG_BW_50G:
961 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
962 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
963 break;
964 case ICE_MAX_AGG_BW_25G:
965 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
966 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
967 break;
968 }
969 }
970
971 /**
972 * ice_print_rollback_msg - print FW rollback message
973 * @hw: pointer to the hardware structure
974 */
ice_print_rollback_msg(struct ice_hw * hw)975 void ice_print_rollback_msg(struct ice_hw *hw)
976 {
977 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
978 struct ice_orom_info *orom;
979 struct ice_nvm_info *nvm;
980
981 orom = &hw->flash.orom;
982 nvm = &hw->flash.nvm;
983
984 (void)SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
985 nvm->major, nvm->minor, nvm->eetrack, orom->major,
986 orom->build, orom->patch);
987 ice_warn(hw,
988 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
989 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
990 }
991
992 /**
993 * ice_set_umac_shared
994 * @hw: pointer to the hw struct
995 *
996 * Set boolean flag to allow unicast MAC sharing
997 */
ice_set_umac_shared(struct ice_hw * hw)998 void ice_set_umac_shared(struct ice_hw *hw)
999 {
1000 hw->umac_shared = true;
1001 }
1002
1003 /**
1004 * ice_init_hw - main hardware initialization routine
1005 * @hw: pointer to the hardware structure
1006 */
ice_init_hw(struct ice_hw * hw)1007 int ice_init_hw(struct ice_hw *hw)
1008 {
1009 struct ice_aqc_get_phy_caps_data *pcaps;
1010 u16 mac_buf_len;
1011 void *mac_buf;
1012 int status;
1013
1014 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1015
1016 /* Set MAC type based on DeviceID */
1017 status = ice_set_mac_type(hw);
1018 if (status)
1019 return status;
1020
1021 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
1022 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
1023 PF_FUNC_RID_FUNCTION_NUMBER_S;
1024
1025 status = ice_reset(hw, ICE_RESET_PFR);
1026 if (status)
1027 return status;
1028 ice_get_itr_intrl_gran(hw);
1029
1030 hw->fw_vsi_num = ICE_DFLT_VSI_INVAL;
1031
1032 status = ice_create_all_ctrlq(hw);
1033 if (status)
1034 goto err_unroll_cqinit;
1035
1036 ice_fwlog_set_support_ena(hw);
1037 status = ice_fwlog_set(hw, &hw->fwlog_cfg);
1038 if (status) {
1039 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging, status %d.\n",
1040 status);
1041 } else {
1042 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_REGISTER_ON_INIT) {
1043 status = ice_fwlog_register(hw);
1044 if (status)
1045 ice_debug(hw, ICE_DBG_INIT, "Failed to register for FW logging events, status %d.\n",
1046 status);
1047 } else {
1048 status = ice_fwlog_unregister(hw);
1049 if (status)
1050 ice_debug(hw, ICE_DBG_INIT, "Failed to unregister for FW logging events, status %d.\n",
1051 status);
1052 }
1053 }
1054
1055 status = ice_init_nvm(hw);
1056 if (status)
1057 goto err_unroll_cqinit;
1058
1059 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
1060 ice_print_rollback_msg(hw);
1061
1062 if (!hw->skip_clear_pf) {
1063 status = ice_clear_pf_cfg(hw);
1064 if (status)
1065 goto err_unroll_cqinit;
1066 }
1067
1068 ice_clear_pxe_mode(hw);
1069
1070 status = ice_get_caps(hw);
1071 if (status)
1072 goto err_unroll_cqinit;
1073
1074 if (!hw->port_info)
1075 hw->port_info = (struct ice_port_info *)
1076 ice_malloc(hw, sizeof(*hw->port_info));
1077 if (!hw->port_info) {
1078 status = ICE_ERR_NO_MEMORY;
1079 goto err_unroll_cqinit;
1080 }
1081
1082 hw->port_info->loopback_mode = ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_NORMAL;
1083
1084 /* set the back pointer to HW */
1085 hw->port_info->hw = hw;
1086
1087 /* Initialize port_info struct with switch configuration data */
1088 status = ice_get_initial_sw_cfg(hw);
1089 if (status)
1090 goto err_unroll_alloc;
1091
1092 hw->evb_veb = true;
1093 /* Query the allocated resources for Tx scheduler */
1094 status = ice_sched_query_res_alloc(hw);
1095 if (status) {
1096 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
1097 goto err_unroll_alloc;
1098 }
1099 ice_sched_get_psm_clk_freq(hw);
1100
1101 /* Initialize port_info struct with scheduler data */
1102 status = ice_sched_init_port(hw->port_info);
1103 if (status)
1104 goto err_unroll_sched;
1105 pcaps = (struct ice_aqc_get_phy_caps_data *)
1106 ice_malloc(hw, sizeof(*pcaps));
1107 if (!pcaps) {
1108 status = ICE_ERR_NO_MEMORY;
1109 goto err_unroll_sched;
1110 }
1111
1112 /* Initialize port_info struct with PHY capabilities */
1113 status = ice_aq_get_phy_caps(hw->port_info, false,
1114 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
1115 ice_free(hw, pcaps);
1116 if (status)
1117 ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
1118 status);
1119
1120 /* Initialize port_info struct with link information */
1121 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1122 if (status)
1123 goto err_unroll_sched;
1124 /* need a valid SW entry point to build a Tx tree */
1125 if (!hw->sw_entry_point_layer) {
1126 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1127 status = ICE_ERR_CFG;
1128 goto err_unroll_sched;
1129 }
1130 INIT_LIST_HEAD(&hw->agg_list);
1131 /* Initialize max burst size */
1132 if (!hw->max_burst_size)
1133 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1134 status = ice_init_fltr_mgmt_struct(hw);
1135 if (status)
1136 goto err_unroll_sched;
1137
1138 /* Get MAC information */
1139
1140 /* A single port can report up to two (LAN and WoL) addresses */
1141 mac_buf = ice_calloc(hw, 2,
1142 sizeof(struct ice_aqc_manage_mac_read_resp));
1143 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1144
1145 if (!mac_buf) {
1146 status = ICE_ERR_NO_MEMORY;
1147 goto err_unroll_fltr_mgmt_struct;
1148 }
1149
1150 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1151 ice_free(hw, mac_buf);
1152
1153 if (status)
1154 goto err_unroll_fltr_mgmt_struct;
1155
1156 /* enable jumbo frame support at MAC level */
1157 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false,
1158 NULL);
1159 if (status)
1160 goto err_unroll_fltr_mgmt_struct;
1161
1162 status = ice_init_hw_tbls(hw);
1163 if (status)
1164 goto err_unroll_fltr_mgmt_struct;
1165 ice_init_lock(&hw->tnl_lock);
1166
1167 return 0;
1168
1169 err_unroll_fltr_mgmt_struct:
1170 ice_cleanup_fltr_mgmt_struct(hw);
1171 err_unroll_sched:
1172 ice_sched_cleanup_all(hw);
1173 err_unroll_alloc:
1174 ice_free(hw, hw->port_info);
1175 hw->port_info = NULL;
1176 err_unroll_cqinit:
1177 ice_destroy_all_ctrlq(hw);
1178 return status;
1179 }
1180
1181 /**
1182 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1183 * @hw: pointer to the hardware structure
1184 *
1185 * This should be called only during nominal operation, not as a result of
1186 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1187 * applicable initializations if it fails for any reason.
1188 */
ice_deinit_hw(struct ice_hw * hw)1189 void ice_deinit_hw(struct ice_hw *hw)
1190 {
1191 ice_cleanup_fltr_mgmt_struct(hw);
1192
1193 ice_sched_cleanup_all(hw);
1194 ice_sched_clear_agg(hw);
1195 ice_free_seg(hw);
1196 ice_free_hw_tbls(hw);
1197 ice_destroy_lock(&hw->tnl_lock);
1198
1199 if (hw->port_info) {
1200 ice_free(hw, hw->port_info);
1201 hw->port_info = NULL;
1202 }
1203
1204 ice_destroy_all_ctrlq(hw);
1205
1206 /* Clear VSI contexts if not already cleared */
1207 ice_clear_all_vsi_ctx(hw);
1208 }
1209
1210 /**
1211 * ice_check_reset - Check to see if a global reset is complete
1212 * @hw: pointer to the hardware structure
1213 */
ice_check_reset(struct ice_hw * hw)1214 int ice_check_reset(struct ice_hw *hw)
1215 {
1216 u32 cnt, reg = 0, grst_timeout, uld_mask, reset_wait_cnt;
1217
1218 /* Poll for Device Active state in case a recent CORER, GLOBR,
1219 * or EMPR has occurred. The grst delay value is in 100ms units.
1220 * Add 1sec for outstanding AQ commands that can take a long time.
1221 */
1222 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1223 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1224
1225 for (cnt = 0; cnt < grst_timeout; cnt++) {
1226 ice_msec_delay(100, true);
1227 reg = rd32(hw, GLGEN_RSTAT);
1228 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1229 break;
1230 }
1231
1232 if (cnt == grst_timeout) {
1233 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1234 return ICE_ERR_RESET_FAILED;
1235 }
1236
1237 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1238 GLNVM_ULD_PCIER_DONE_1_M |\
1239 GLNVM_ULD_CORER_DONE_M |\
1240 GLNVM_ULD_GLOBR_DONE_M |\
1241 GLNVM_ULD_POR_DONE_M |\
1242 GLNVM_ULD_POR_DONE_1_M |\
1243 GLNVM_ULD_PCIER_DONE_2_M)
1244
1245 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.iwarp ?
1246 GLNVM_ULD_PE_DONE_M : 0);
1247
1248 reset_wait_cnt = ICE_PF_RESET_WAIT_COUNT;
1249
1250 /* Device is Active; check Global Reset processes are done */
1251 for (cnt = 0; cnt < reset_wait_cnt; cnt++) {
1252 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1253 if (reg == uld_mask) {
1254 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1255 break;
1256 }
1257 ice_msec_delay(10, true);
1258 }
1259
1260 if (cnt == reset_wait_cnt) {
1261 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1262 reg);
1263 return ICE_ERR_RESET_FAILED;
1264 }
1265
1266 return 0;
1267 }
1268
1269 /**
1270 * ice_pf_reset - Reset the PF
1271 * @hw: pointer to the hardware structure
1272 *
1273 * If a global reset has been triggered, this function checks
1274 * for its completion and then issues the PF reset
1275 */
ice_pf_reset(struct ice_hw * hw)1276 static int ice_pf_reset(struct ice_hw *hw)
1277 {
1278 u32 cnt, reg, reset_wait_cnt, cfg_lock_timeout;
1279
1280 /* If at function entry a global reset was already in progress, i.e.
1281 * state is not 'device active' or any of the reset done bits are not
1282 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1283 * global reset is done.
1284 */
1285 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1286 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1287 /* poll on global reset currently in progress until done */
1288 if (ice_check_reset(hw))
1289 return ICE_ERR_RESET_FAILED;
1290
1291 return 0;
1292 }
1293
1294 /* Reset the PF */
1295 reg = rd32(hw, PFGEN_CTRL);
1296
1297 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1298
1299 /* Wait for the PFR to complete. The wait time is the global config lock
1300 * timeout plus the PFR timeout which will account for a possible reset
1301 * that is occurring during a download package operation.
1302 */
1303 reset_wait_cnt = ICE_PF_RESET_WAIT_COUNT;
1304 cfg_lock_timeout = ICE_GLOBAL_CFG_LOCK_TIMEOUT;
1305
1306 for (cnt = 0; cnt < cfg_lock_timeout + reset_wait_cnt; cnt++) {
1307 reg = rd32(hw, PFGEN_CTRL);
1308 if (!(reg & PFGEN_CTRL_PFSWR_M))
1309 break;
1310
1311 ice_msec_delay(1, true);
1312 }
1313
1314 if (cnt == cfg_lock_timeout + reset_wait_cnt) {
1315 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1316 return ICE_ERR_RESET_FAILED;
1317 }
1318
1319 return 0;
1320 }
1321
1322 /**
1323 * ice_reset - Perform different types of reset
1324 * @hw: pointer to the hardware structure
1325 * @req: reset request
1326 *
1327 * This function triggers a reset as specified by the req parameter.
1328 *
1329 * Note:
1330 * If anything other than a PF reset is triggered, PXE mode is restored.
1331 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1332 * interface has been restored in the rebuild flow.
1333 */
ice_reset(struct ice_hw * hw,enum ice_reset_req req)1334 int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1335 {
1336 u32 val = 0;
1337
1338 switch (req) {
1339 case ICE_RESET_PFR:
1340 return ice_pf_reset(hw);
1341 case ICE_RESET_CORER:
1342 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1343 val = GLGEN_RTRIG_CORER_M;
1344 break;
1345 case ICE_RESET_GLOBR:
1346 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1347 val = GLGEN_RTRIG_GLOBR_M;
1348 break;
1349 default:
1350 return ICE_ERR_PARAM;
1351 }
1352
1353 val |= rd32(hw, GLGEN_RTRIG);
1354 wr32(hw, GLGEN_RTRIG, val);
1355 ice_flush(hw);
1356
1357 /* wait for the FW to be ready */
1358 return ice_check_reset(hw);
1359 }
1360
1361 /**
1362 * ice_copy_rxq_ctx_to_hw
1363 * @hw: pointer to the hardware structure
1364 * @ice_rxq_ctx: pointer to the rxq context
1365 * @rxq_index: the index of the Rx queue
1366 *
1367 * Copies rxq context from dense structure to HW register space
1368 */
1369 static int
ice_copy_rxq_ctx_to_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1370 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1371 {
1372 u8 i;
1373
1374 if (!ice_rxq_ctx)
1375 return ICE_ERR_BAD_PTR;
1376
1377 if (rxq_index > QRX_CTRL_MAX_INDEX)
1378 return ICE_ERR_PARAM;
1379
1380 /* Copy each dword separately to HW */
1381 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1382 wr32(hw, QRX_CONTEXT(i, rxq_index),
1383 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1384
1385 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1386 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1387 }
1388
1389 return 0;
1390 }
1391
1392 /**
1393 * ice_copy_rxq_ctx_from_hw - Copy rxq context register from HW
1394 * @hw: pointer to the hardware structure
1395 * @ice_rxq_ctx: pointer to the rxq context
1396 * @rxq_index: the index of the Rx queue
1397 *
1398 * Copies rxq context from HW register space to dense structure
1399 */
1400 static int
ice_copy_rxq_ctx_from_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1401 ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1402 {
1403 u8 i;
1404
1405 if (!ice_rxq_ctx)
1406 return ICE_ERR_BAD_PTR;
1407
1408 if (rxq_index > QRX_CTRL_MAX_INDEX)
1409 return ICE_ERR_PARAM;
1410
1411 /* Copy each dword separately from HW */
1412 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1413 u32 *ctx = (u32 *)(ice_rxq_ctx + (i * sizeof(u32)));
1414
1415 *ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
1416
1417 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
1418 }
1419
1420 return 0;
1421 }
1422
1423 /* LAN Rx Queue Context */
1424 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1425 /* Field Width LSB */
1426 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1427 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1428 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1429 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1430 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1431 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1432 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1433 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1434 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1435 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1436 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1437 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1438 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1439 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1440 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1441 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1442 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1443 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1444 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1445 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1446 { 0 }
1447 };
1448
1449 /**
1450 * ice_write_rxq_ctx
1451 * @hw: pointer to the hardware structure
1452 * @rlan_ctx: pointer to the rxq context
1453 * @rxq_index: the index of the Rx queue
1454 *
1455 * Converts rxq context from sparse to dense structure and then writes
1456 * it to HW register space and enables the hardware to prefetch descriptors
1457 * instead of only fetching them on demand
1458 */
1459 int
ice_write_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1460 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1461 u32 rxq_index)
1462 {
1463 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1464
1465 if (!rlan_ctx)
1466 return ICE_ERR_BAD_PTR;
1467
1468 rlan_ctx->prefena = 1;
1469
1470 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1471 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1472 }
1473
1474 /**
1475 * ice_read_rxq_ctx - Read rxq context from HW
1476 * @hw: pointer to the hardware structure
1477 * @rlan_ctx: pointer to the rxq context
1478 * @rxq_index: the index of the Rx queue
1479 *
1480 * Read rxq context from HW register space and then converts it from dense
1481 * structure to sparse
1482 */
1483 int
ice_read_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1484 ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1485 u32 rxq_index)
1486 {
1487 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1488 int status;
1489
1490 if (!rlan_ctx)
1491 return ICE_ERR_BAD_PTR;
1492
1493 status = ice_copy_rxq_ctx_from_hw(hw, ctx_buf, rxq_index);
1494 if (status)
1495 return status;
1496
1497 return ice_get_ctx(ctx_buf, (u8 *)rlan_ctx, ice_rlan_ctx_info);
1498 }
1499
1500 /**
1501 * ice_clear_rxq_ctx
1502 * @hw: pointer to the hardware structure
1503 * @rxq_index: the index of the Rx queue to clear
1504 *
1505 * Clears rxq context in HW register space
1506 */
ice_clear_rxq_ctx(struct ice_hw * hw,u32 rxq_index)1507 int ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1508 {
1509 u8 i;
1510
1511 if (rxq_index > QRX_CTRL_MAX_INDEX)
1512 return ICE_ERR_PARAM;
1513
1514 /* Clear each dword register separately */
1515 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1516 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1517
1518 return 0;
1519 }
1520
1521 /* LAN Tx Queue Context used for set Tx config by ice_aqc_opc_add_txqs,
1522 * Bit[0-175] is valid
1523 */
1524 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1525 /* Field Width LSB */
1526 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1527 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1528 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1529 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1530 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1531 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1532 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1533 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1534 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1535 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1536 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1537 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1538 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1539 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1540 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1541 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1542 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1543 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1544 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1545 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1546 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1547 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1548 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1549 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1550 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1551 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1552 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1553 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1554 { 0 }
1555 };
1556
1557 /**
1558 * ice_copy_tx_cmpltnq_ctx_to_hw
1559 * @hw: pointer to the hardware structure
1560 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1561 * @tx_cmpltnq_index: the index of the completion queue
1562 *
1563 * Copies Tx completion queue context from dense structure to HW register space
1564 */
1565 static int
ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw * hw,u8 * ice_tx_cmpltnq_ctx,u32 tx_cmpltnq_index)1566 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1567 u32 tx_cmpltnq_index)
1568 {
1569 u8 i;
1570
1571 if (!ice_tx_cmpltnq_ctx)
1572 return ICE_ERR_BAD_PTR;
1573
1574 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1575 return ICE_ERR_PARAM;
1576
1577 /* Copy each dword separately to HW */
1578 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1579 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1580 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1581
1582 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1583 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1584 }
1585
1586 return 0;
1587 }
1588
1589 /* LAN Tx Completion Queue Context */
1590 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1591 /* Field Width LSB */
1592 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1593 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1594 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1595 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1596 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1597 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1598 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1599 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1600 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1601 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1602 { 0 }
1603 };
1604
1605 /**
1606 * ice_write_tx_cmpltnq_ctx
1607 * @hw: pointer to the hardware structure
1608 * @tx_cmpltnq_ctx: pointer to the completion queue context
1609 * @tx_cmpltnq_index: the index of the completion queue
1610 *
1611 * Converts completion queue context from sparse to dense structure and then
1612 * writes it to HW register space
1613 */
1614 int
ice_write_tx_cmpltnq_ctx(struct ice_hw * hw,struct ice_tx_cmpltnq_ctx * tx_cmpltnq_ctx,u32 tx_cmpltnq_index)1615 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1616 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1617 u32 tx_cmpltnq_index)
1618 {
1619 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1620
1621 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1622 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1623 }
1624
1625 /**
1626 * ice_clear_tx_cmpltnq_ctx
1627 * @hw: pointer to the hardware structure
1628 * @tx_cmpltnq_index: the index of the completion queue to clear
1629 *
1630 * Clears Tx completion queue context in HW register space
1631 */
1632 int
ice_clear_tx_cmpltnq_ctx(struct ice_hw * hw,u32 tx_cmpltnq_index)1633 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1634 {
1635 u8 i;
1636
1637 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1638 return ICE_ERR_PARAM;
1639
1640 /* Clear each dword register separately */
1641 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1642 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1643
1644 return 0;
1645 }
1646
1647 /**
1648 * ice_copy_tx_drbell_q_ctx_to_hw
1649 * @hw: pointer to the hardware structure
1650 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1651 * @tx_drbell_q_index: the index of the doorbell queue
1652 *
1653 * Copies doorbell queue context from dense structure to HW register space
1654 */
1655 static int
ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw * hw,u8 * ice_tx_drbell_q_ctx,u32 tx_drbell_q_index)1656 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1657 u32 tx_drbell_q_index)
1658 {
1659 u8 i;
1660
1661 if (!ice_tx_drbell_q_ctx)
1662 return ICE_ERR_BAD_PTR;
1663
1664 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1665 return ICE_ERR_PARAM;
1666
1667 /* Copy each dword separately to HW */
1668 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1669 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1670 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1671
1672 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1673 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1674 }
1675
1676 return 0;
1677 }
1678
1679 /* LAN Tx Doorbell Queue Context info */
1680 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1681 /* Field Width LSB */
1682 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1683 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1684 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1685 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1686 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1687 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1688 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1689 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1690 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1691 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1692 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1693 { 0 }
1694 };
1695
1696 /**
1697 * ice_write_tx_drbell_q_ctx
1698 * @hw: pointer to the hardware structure
1699 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1700 * @tx_drbell_q_index: the index of the doorbell queue
1701 *
1702 * Converts doorbell queue context from sparse to dense structure and then
1703 * writes it to HW register space
1704 */
1705 int
ice_write_tx_drbell_q_ctx(struct ice_hw * hw,struct ice_tx_drbell_q_ctx * tx_drbell_q_ctx,u32 tx_drbell_q_index)1706 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1707 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1708 u32 tx_drbell_q_index)
1709 {
1710 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1711
1712 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1713 ice_tx_drbell_q_ctx_info);
1714 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1715 }
1716
1717 /**
1718 * ice_clear_tx_drbell_q_ctx
1719 * @hw: pointer to the hardware structure
1720 * @tx_drbell_q_index: the index of the doorbell queue to clear
1721 *
1722 * Clears doorbell queue context in HW register space
1723 */
1724 int
ice_clear_tx_drbell_q_ctx(struct ice_hw * hw,u32 tx_drbell_q_index)1725 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1726 {
1727 u8 i;
1728
1729 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1730 return ICE_ERR_PARAM;
1731
1732 /* Clear each dword register separately */
1733 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1734 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1735
1736 return 0;
1737 }
1738
1739 /* Sideband Queue command wrappers */
1740
1741 /**
1742 * ice_get_sbq - returns the right control queue to use for sideband
1743 * @hw: pointer to the hardware structure
1744 */
ice_get_sbq(struct ice_hw * hw)1745 static struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
1746 {
1747 if (!ice_is_generic_mac(hw))
1748 return &hw->adminq;
1749 return &hw->sbq;
1750 }
1751
1752 /**
1753 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1754 * @hw: pointer to the HW struct
1755 * @desc: descriptor describing the command
1756 * @buf: buffer to use for indirect commands (NULL for direct commands)
1757 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1758 * @cd: pointer to command details structure
1759 */
1760 static int
ice_sbq_send_cmd(struct ice_hw * hw,struct ice_sbq_cmd_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1761 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1762 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1763 {
1764 return ice_sq_send_cmd(hw, ice_get_sbq(hw), (struct ice_aq_desc *)desc,
1765 buf, buf_size, cd);
1766 }
1767
1768 /**
1769 * ice_sbq_send_cmd_nolock - send Sideband Queue command to Sideband Queue
1770 * but do not lock sq_lock
1771 * @hw: pointer to the HW struct
1772 * @desc: descriptor describing the command
1773 * @buf: buffer to use for indirect commands (NULL for direct commands)
1774 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1775 * @cd: pointer to command details structure
1776 */
1777 static int
ice_sbq_send_cmd_nolock(struct ice_hw * hw,struct ice_sbq_cmd_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1778 ice_sbq_send_cmd_nolock(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1779 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1780 {
1781 return ice_sq_send_cmd_nolock(hw, ice_get_sbq(hw),
1782 (struct ice_aq_desc *)desc, buf,
1783 buf_size, cd);
1784 }
1785
1786 /**
1787 * ice_sbq_rw_reg_lp - Fill Sideband Queue command, with lock parameter
1788 * @hw: pointer to the HW struct
1789 * @in: message info to be filled in descriptor
1790 * @flag: flag to fill desc structure
1791 * @lock: true to lock the sq_lock (the usual case); false if the sq_lock has
1792 * already been locked at a higher level
1793 */
ice_sbq_rw_reg_lp(struct ice_hw * hw,struct ice_sbq_msg_input * in,u16 flag,bool lock)1794 int ice_sbq_rw_reg_lp(struct ice_hw *hw, struct ice_sbq_msg_input *in,
1795 u16 flag, bool lock)
1796 {
1797 struct ice_sbq_cmd_desc desc = {0};
1798 struct ice_sbq_msg_req msg = {0};
1799 u16 msg_len;
1800 int status;
1801
1802 msg_len = sizeof(msg);
1803
1804 msg.dest_dev = in->dest_dev;
1805 msg.opcode = in->opcode;
1806 msg.flags = ICE_SBQ_MSG_FLAGS;
1807 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1808 msg.msg_addr_low = CPU_TO_LE16(in->msg_addr_low);
1809 msg.msg_addr_high = CPU_TO_LE32(in->msg_addr_high);
1810
1811 if (in->opcode)
1812 msg.data = CPU_TO_LE32(in->data);
1813 else
1814 /* data read comes back in completion, so shorten the struct by
1815 * sizeof(msg.data)
1816 */
1817 msg_len -= sizeof(msg.data);
1818
1819 desc.flags = CPU_TO_LE16(flag);
1820 desc.opcode = CPU_TO_LE16(ice_sbq_opc_neigh_dev_req);
1821 desc.param0.cmd_len = CPU_TO_LE16(msg_len);
1822 if (lock)
1823 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1824 else
1825 status = ice_sbq_send_cmd_nolock(hw, &desc, &msg, msg_len,
1826 NULL);
1827 if (!status && !in->opcode)
1828 in->data = LE32_TO_CPU
1829 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1830 return status;
1831 }
1832
1833 /**
1834 * ice_sbq_rw_reg - Fill Sideband Queue command
1835 * @hw: pointer to the HW struct
1836 * @in: message info to be filled in descriptor
1837 * @flag: flag to fill desc structure
1838 */
ice_sbq_rw_reg(struct ice_hw * hw,struct ice_sbq_msg_input * in,u16 flag)1839 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag)
1840 {
1841 return ice_sbq_rw_reg_lp(hw, in, flag, true);
1842 }
1843
1844 /**
1845 * ice_sbq_lock - Lock the sideband queue's sq_lock
1846 * @hw: pointer to the HW struct
1847 */
ice_sbq_lock(struct ice_hw * hw)1848 void ice_sbq_lock(struct ice_hw *hw)
1849 {
1850 ice_acquire_lock(&ice_get_sbq(hw)->sq_lock);
1851 }
1852
1853 /**
1854 * ice_sbq_unlock - Unlock the sideband queue's sq_lock
1855 * @hw: pointer to the HW struct
1856 */
ice_sbq_unlock(struct ice_hw * hw)1857 void ice_sbq_unlock(struct ice_hw *hw)
1858 {
1859 ice_release_lock(&ice_get_sbq(hw)->sq_lock);
1860 }
1861
1862 /* FW Admin Queue command wrappers */
1863
1864 /**
1865 * ice_should_retry_sq_send_cmd
1866 * @opcode: AQ opcode
1867 *
1868 * Decide if we should retry the send command routine for the ATQ, depending
1869 * on the opcode.
1870 */
ice_should_retry_sq_send_cmd(u16 opcode)1871 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1872 {
1873 switch (opcode) {
1874 case ice_aqc_opc_dnl_get_status:
1875 case ice_aqc_opc_dnl_run:
1876 case ice_aqc_opc_dnl_call:
1877 case ice_aqc_opc_dnl_read_sto:
1878 case ice_aqc_opc_dnl_write_sto:
1879 case ice_aqc_opc_dnl_set_breakpoints:
1880 case ice_aqc_opc_dnl_read_log:
1881 case ice_aqc_opc_get_link_topo:
1882 case ice_aqc_opc_done_alt_write:
1883 case ice_aqc_opc_lldp_stop:
1884 case ice_aqc_opc_lldp_start:
1885 case ice_aqc_opc_lldp_filter_ctrl:
1886 return true;
1887 }
1888
1889 return false;
1890 }
1891
1892 /**
1893 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1894 * @hw: pointer to the HW struct
1895 * @cq: pointer to the specific Control queue
1896 * @desc: prefilled descriptor describing the command
1897 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1898 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1899 * @cd: pointer to command details structure
1900 *
1901 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1902 * Queue if the EBUSY AQ error is returned.
1903 */
1904 static int
ice_sq_send_cmd_retry(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1905 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1906 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1907 struct ice_sq_cd *cd)
1908 {
1909 struct ice_aq_desc desc_cpy;
1910 bool is_cmd_for_retry;
1911 u8 *buf_cpy = NULL;
1912 u8 idx = 0;
1913 u16 opcode;
1914 int status;
1915
1916 opcode = LE16_TO_CPU(desc->opcode);
1917 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1918 ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1919
1920 if (is_cmd_for_retry) {
1921 if (buf) {
1922 buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1923 if (!buf_cpy)
1924 return ICE_ERR_NO_MEMORY;
1925 }
1926
1927 ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1928 ICE_NONDMA_TO_NONDMA);
1929 }
1930
1931 do {
1932 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1933
1934 if (!is_cmd_for_retry || !status ||
1935 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1936 break;
1937
1938 if (buf_cpy)
1939 ice_memcpy(buf, buf_cpy, buf_size,
1940 ICE_NONDMA_TO_NONDMA);
1941
1942 ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1943 ICE_NONDMA_TO_NONDMA);
1944
1945 ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1946
1947 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1948
1949 if (buf_cpy)
1950 ice_free(hw, buf_cpy);
1951
1952 return status;
1953 }
1954
1955 /**
1956 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1957 * @hw: pointer to the HW struct
1958 * @desc: descriptor describing the command
1959 * @buf: buffer to use for indirect commands (NULL for direct commands)
1960 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1961 * @cd: pointer to command details structure
1962 *
1963 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1964 */
1965 int
ice_aq_send_cmd(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1966 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1967 u16 buf_size, struct ice_sq_cd *cd)
1968 {
1969 return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1970 }
1971
1972 /**
1973 * ice_aq_get_fw_ver
1974 * @hw: pointer to the HW struct
1975 * @cd: pointer to command details structure or NULL
1976 *
1977 * Get the firmware version (0x0001) from the admin queue commands
1978 */
ice_aq_get_fw_ver(struct ice_hw * hw,struct ice_sq_cd * cd)1979 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1980 {
1981 struct ice_aqc_get_ver *resp;
1982 struct ice_aq_desc desc;
1983 int status;
1984
1985 resp = &desc.params.get_ver;
1986
1987 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1988
1989 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1990
1991 if (!status) {
1992 hw->fw_branch = resp->fw_branch;
1993 hw->fw_maj_ver = resp->fw_major;
1994 hw->fw_min_ver = resp->fw_minor;
1995 hw->fw_patch = resp->fw_patch;
1996 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1997 hw->api_branch = resp->api_branch;
1998 hw->api_maj_ver = resp->api_major;
1999 hw->api_min_ver = resp->api_minor;
2000 hw->api_patch = resp->api_patch;
2001 }
2002
2003 return status;
2004 }
2005
2006 /**
2007 * ice_aq_send_driver_ver
2008 * @hw: pointer to the HW struct
2009 * @dv: driver's major, minor version
2010 * @cd: pointer to command details structure or NULL
2011 *
2012 * Send the driver version (0x0002) to the firmware
2013 */
2014 int
ice_aq_send_driver_ver(struct ice_hw * hw,struct ice_driver_ver * dv,struct ice_sq_cd * cd)2015 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
2016 struct ice_sq_cd *cd)
2017 {
2018 struct ice_aqc_driver_ver *cmd;
2019 struct ice_aq_desc desc;
2020 u16 len;
2021
2022 cmd = &desc.params.driver_ver;
2023
2024 if (!dv)
2025 return ICE_ERR_PARAM;
2026
2027 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
2028
2029 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2030 cmd->major_ver = dv->major_ver;
2031 cmd->minor_ver = dv->minor_ver;
2032 cmd->build_ver = dv->build_ver;
2033 cmd->subbuild_ver = dv->subbuild_ver;
2034
2035 len = 0;
2036 while (len < sizeof(dv->driver_string) &&
2037 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
2038 len++;
2039
2040 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
2041 }
2042
2043 /**
2044 * ice_aq_q_shutdown
2045 * @hw: pointer to the HW struct
2046 * @unloading: is the driver unloading itself
2047 *
2048 * Tell the Firmware that we're shutting down the AdminQ and whether
2049 * or not the driver is unloading as well (0x0003).
2050 */
ice_aq_q_shutdown(struct ice_hw * hw,bool unloading)2051 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
2052 {
2053 struct ice_aqc_q_shutdown *cmd;
2054 struct ice_aq_desc desc;
2055
2056 cmd = &desc.params.q_shutdown;
2057
2058 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
2059
2060 if (unloading)
2061 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
2062
2063 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2064 }
2065
2066 /**
2067 * ice_aq_req_res
2068 * @hw: pointer to the HW struct
2069 * @res: resource ID
2070 * @access: access type
2071 * @sdp_number: resource number
2072 * @timeout: the maximum time in ms that the driver may hold the resource
2073 * @cd: pointer to command details structure or NULL
2074 *
2075 * Requests common resource using the admin queue commands (0x0008).
2076 * When attempting to acquire the Global Config Lock, the driver can
2077 * learn of three states:
2078 * 1) 0 - acquired lock, and can perform download package
2079 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
2080 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
2081 * successfully downloaded the package; the driver does
2082 * not have to download the package and can continue
2083 * loading
2084 *
2085 * Note that if the caller is in an acquire lock, perform action, release lock
2086 * phase of operation, it is possible that the FW may detect a timeout and issue
2087 * a CORER. In this case, the driver will receive a CORER interrupt and will
2088 * have to determine its cause. The calling thread that is handling this flow
2089 * will likely get an error propagated back to it indicating the Download
2090 * Package, Update Package or the Release Resource AQ commands timed out.
2091 */
2092 static int
ice_aq_req_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u8 sdp_number,u32 * timeout,struct ice_sq_cd * cd)2093 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
2094 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
2095 struct ice_sq_cd *cd)
2096 {
2097 struct ice_aqc_req_res *cmd_resp;
2098 struct ice_aq_desc desc;
2099 int status;
2100
2101 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2102
2103 cmd_resp = &desc.params.res_owner;
2104
2105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
2106
2107 cmd_resp->res_id = CPU_TO_LE16(res);
2108 cmd_resp->access_type = CPU_TO_LE16(access);
2109 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
2110 cmd_resp->timeout = CPU_TO_LE32(*timeout);
2111 *timeout = 0;
2112
2113 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2114
2115 /* The completion specifies the maximum time in ms that the driver
2116 * may hold the resource in the Timeout field.
2117 */
2118
2119 /* Global config lock response utilizes an additional status field.
2120 *
2121 * If the Global config lock resource is held by some other driver, the
2122 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
2123 * and the timeout field indicates the maximum time the current owner
2124 * of the resource has to free it.
2125 */
2126 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
2127 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
2128 *timeout = LE32_TO_CPU(cmd_resp->timeout);
2129 return 0;
2130 } else if (LE16_TO_CPU(cmd_resp->status) ==
2131 ICE_AQ_RES_GLBL_IN_PROG) {
2132 *timeout = LE32_TO_CPU(cmd_resp->timeout);
2133 return ICE_ERR_AQ_ERROR;
2134 } else if (LE16_TO_CPU(cmd_resp->status) ==
2135 ICE_AQ_RES_GLBL_DONE) {
2136 return ICE_ERR_AQ_NO_WORK;
2137 }
2138
2139 /* invalid FW response, force a timeout immediately */
2140 *timeout = 0;
2141 return ICE_ERR_AQ_ERROR;
2142 }
2143
2144 /* If the resource is held by some other driver, the command completes
2145 * with a busy return value and the timeout field indicates the maximum
2146 * time the current owner of the resource has to free it.
2147 */
2148 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
2149 *timeout = LE32_TO_CPU(cmd_resp->timeout);
2150
2151 return status;
2152 }
2153
2154 /**
2155 * ice_aq_release_res
2156 * @hw: pointer to the HW struct
2157 * @res: resource ID
2158 * @sdp_number: resource number
2159 * @cd: pointer to command details structure or NULL
2160 *
2161 * release common resource using the admin queue commands (0x0009)
2162 */
2163 static int
ice_aq_release_res(struct ice_hw * hw,enum ice_aq_res_ids res,u8 sdp_number,struct ice_sq_cd * cd)2164 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
2165 struct ice_sq_cd *cd)
2166 {
2167 struct ice_aqc_req_res *cmd;
2168 struct ice_aq_desc desc;
2169
2170 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2171
2172 cmd = &desc.params.res_owner;
2173
2174 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
2175
2176 cmd->res_id = CPU_TO_LE16(res);
2177 cmd->res_number = CPU_TO_LE32(sdp_number);
2178
2179 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2180 }
2181
2182 /**
2183 * ice_acquire_res
2184 * @hw: pointer to the HW structure
2185 * @res: resource ID
2186 * @access: access type (read or write)
2187 * @timeout: timeout in milliseconds
2188 *
2189 * This function will attempt to acquire the ownership of a resource.
2190 */
2191 int
ice_acquire_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u32 timeout)2192 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
2193 enum ice_aq_res_access_type access, u32 timeout)
2194 {
2195 #define ICE_RES_POLLING_DELAY_MS 10
2196 u32 delay = ICE_RES_POLLING_DELAY_MS;
2197 u32 time_left = timeout;
2198 int status;
2199
2200 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2201
2202 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2203
2204 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
2205 * previously acquired the resource and performed any necessary updates;
2206 * in this case the caller does not obtain the resource and has no
2207 * further work to do.
2208 */
2209 if (status == ICE_ERR_AQ_NO_WORK)
2210 goto ice_acquire_res_exit;
2211
2212 if (status)
2213 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
2214
2215 /* If necessary, poll until the current lock owner timeouts */
2216 timeout = time_left;
2217 while (status && timeout && time_left) {
2218 ice_msec_delay(delay, true);
2219 timeout = (timeout > delay) ? timeout - delay : 0;
2220 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2221
2222 if (status == ICE_ERR_AQ_NO_WORK)
2223 /* lock free, but no work to do */
2224 break;
2225
2226 if (!status)
2227 /* lock acquired */
2228 break;
2229 }
2230 if (status && status != ICE_ERR_AQ_NO_WORK)
2231 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
2232
2233 ice_acquire_res_exit:
2234 if (status == ICE_ERR_AQ_NO_WORK) {
2235 if (access == ICE_RES_WRITE)
2236 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
2237 else
2238 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
2239 }
2240 return status;
2241 }
2242
2243 /**
2244 * ice_release_res
2245 * @hw: pointer to the HW structure
2246 * @res: resource ID
2247 *
2248 * This function will release a resource using the proper Admin Command.
2249 */
ice_release_res(struct ice_hw * hw,enum ice_aq_res_ids res)2250 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
2251 {
2252 u32 total_delay = 0;
2253 int status;
2254
2255 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2256
2257 status = ice_aq_release_res(hw, res, 0, NULL);
2258
2259 /* there are some rare cases when trying to release the resource
2260 * results in an admin queue timeout, so handle them correctly
2261 */
2262 while ((status == ICE_ERR_AQ_TIMEOUT) &&
2263 (total_delay < hw->adminq.sq_cmd_timeout)) {
2264 ice_msec_delay(1, true);
2265 status = ice_aq_release_res(hw, res, 0, NULL);
2266 total_delay++;
2267 }
2268 }
2269
2270 /**
2271 * ice_aq_alloc_free_res - command to allocate/free resources
2272 * @hw: pointer to the HW struct
2273 * @num_entries: number of resource entries in buffer
2274 * @buf: Indirect buffer to hold data parameters and response
2275 * @buf_size: size of buffer for indirect commands
2276 * @opc: pass in the command opcode
2277 * @cd: pointer to command details structure or NULL
2278 *
2279 * Helper function to allocate/free resources using the admin queue commands
2280 */
2281 int
ice_aq_alloc_free_res(struct ice_hw * hw,u16 num_entries,struct ice_aqc_alloc_free_res_elem * buf,u16 buf_size,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2282 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
2283 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
2284 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2285 {
2286 struct ice_aqc_alloc_free_res_cmd *cmd;
2287 struct ice_aq_desc desc;
2288
2289 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2290
2291 cmd = &desc.params.sw_res_ctrl;
2292
2293 if (!buf)
2294 return ICE_ERR_PARAM;
2295
2296 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
2297 return ICE_ERR_PARAM;
2298
2299 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2300
2301 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2302
2303 cmd->num_entries = CPU_TO_LE16(num_entries);
2304
2305 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2306 }
2307
2308 /**
2309 * ice_alloc_hw_res - allocate resource
2310 * @hw: pointer to the HW struct
2311 * @type: type of resource
2312 * @num: number of resources to allocate
2313 * @btm: allocate from bottom
2314 * @res: pointer to array that will receive the resources
2315 */
2316 int
ice_alloc_hw_res(struct ice_hw * hw,u16 type,u16 num,bool btm,u16 * res)2317 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2318 {
2319 struct ice_aqc_alloc_free_res_elem *buf;
2320 u16 buf_len;
2321 int status;
2322
2323 buf_len = ice_struct_size(buf, elem, num);
2324 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2325 if (!buf)
2326 return ICE_ERR_NO_MEMORY;
2327
2328 /* Prepare buffer to allocate resource. */
2329 buf->num_elems = CPU_TO_LE16(num);
2330 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2331 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
2332 if (btm)
2333 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2334
2335 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2336 ice_aqc_opc_alloc_res, NULL);
2337 if (status)
2338 goto ice_alloc_res_exit;
2339
2340 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
2341 ICE_NONDMA_TO_NONDMA);
2342
2343 ice_alloc_res_exit:
2344 ice_free(hw, buf);
2345 return status;
2346 }
2347
2348 /**
2349 * ice_free_hw_res - free allocated HW resource
2350 * @hw: pointer to the HW struct
2351 * @type: type of resource to free
2352 * @num: number of resources
2353 * @res: pointer to array that contains the resources to free
2354 */
ice_free_hw_res(struct ice_hw * hw,u16 type,u16 num,u16 * res)2355 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2356 {
2357 struct ice_aqc_alloc_free_res_elem *buf;
2358 u16 buf_len;
2359 int status;
2360
2361 buf_len = ice_struct_size(buf, elem, num);
2362 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2363 if (!buf)
2364 return ICE_ERR_NO_MEMORY;
2365
2366 /* Prepare buffer to free resource. */
2367 buf->num_elems = CPU_TO_LE16(num);
2368 buf->res_type = CPU_TO_LE16(type);
2369 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
2370 ICE_NONDMA_TO_NONDMA);
2371
2372 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
2373 ice_aqc_opc_free_res, NULL);
2374 if (status)
2375 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2376
2377 ice_free(hw, buf);
2378 return status;
2379 }
2380
2381 /**
2382 * ice_get_num_per_func - determine number of resources per PF
2383 * @hw: pointer to the HW structure
2384 * @max: value to be evenly split between each PF
2385 *
2386 * Determine the number of valid functions by going through the bitmap returned
2387 * from parsing capabilities and use this to calculate the number of resources
2388 * per PF based on the max value passed in.
2389 */
ice_get_num_per_func(struct ice_hw * hw,u32 max)2390 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2391 {
2392 u8 funcs;
2393
2394 #define ICE_CAPS_VALID_FUNCS_M 0xFF
2395 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
2396 ICE_CAPS_VALID_FUNCS_M);
2397
2398 if (!funcs)
2399 return 0;
2400
2401 return max / funcs;
2402 }
2403
2404 /**
2405 * ice_print_led_caps - print LED capabilities
2406 * @hw: pointer to the ice_hw instance
2407 * @caps: pointer to common caps instance
2408 * @prefix: string to prefix when printing
2409 * @dbg: set to indicate debug print
2410 */
2411 static void
ice_print_led_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,char const * prefix,bool dbg)2412 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2413 char const *prefix, bool dbg)
2414 {
2415 u8 i;
2416
2417 if (dbg)
2418 ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %u\n", prefix,
2419 caps->led_pin_num);
2420 else
2421 ice_info(hw, "%s: led_pin_num = %u\n", prefix,
2422 caps->led_pin_num);
2423
2424 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
2425 if (!caps->led[i])
2426 continue;
2427
2428 if (dbg)
2429 ice_debug(hw, ICE_DBG_INIT, "%s: led[%u] = %u\n",
2430 prefix, i, caps->led[i]);
2431 else
2432 ice_info(hw, "%s: led[%u] = %u\n", prefix, i,
2433 caps->led[i]);
2434 }
2435 }
2436
2437 /**
2438 * ice_print_sdp_caps - print SDP capabilities
2439 * @hw: pointer to the ice_hw instance
2440 * @caps: pointer to common caps instance
2441 * @prefix: string to prefix when printing
2442 * @dbg: set to indicate debug print
2443 */
2444 static void
ice_print_sdp_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,char const * prefix,bool dbg)2445 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2446 char const *prefix, bool dbg)
2447 {
2448 u8 i;
2449
2450 if (dbg)
2451 ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %u\n", prefix,
2452 caps->sdp_pin_num);
2453 else
2454 ice_info(hw, "%s: sdp_pin_num = %u\n", prefix,
2455 caps->sdp_pin_num);
2456
2457 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
2458 if (!caps->sdp[i])
2459 continue;
2460
2461 if (dbg)
2462 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%u] = %u\n",
2463 prefix, i, caps->sdp[i]);
2464 else
2465 ice_info(hw, "%s: sdp[%u] = %u\n", prefix,
2466 i, caps->sdp[i]);
2467 }
2468 }
2469
2470 /**
2471 * ice_parse_common_caps - parse common device/function capabilities
2472 * @hw: pointer to the HW struct
2473 * @caps: pointer to common capabilities structure
2474 * @elem: the capability element to parse
2475 * @prefix: message prefix for tracing capabilities
2476 *
2477 * Given a capability element, extract relevant details into the common
2478 * capability structure.
2479 *
2480 * Returns: true if the capability matches one of the common capability ids,
2481 * false otherwise.
2482 */
2483 static bool
ice_parse_common_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,struct ice_aqc_list_caps_elem * elem,const char * prefix)2484 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2485 struct ice_aqc_list_caps_elem *elem, const char *prefix)
2486 {
2487 u32 logical_id = LE32_TO_CPU(elem->logical_id);
2488 u32 phys_id = LE32_TO_CPU(elem->phys_id);
2489 u32 number = LE32_TO_CPU(elem->number);
2490 u16 cap = LE16_TO_CPU(elem->cap);
2491 bool found = true;
2492
2493 switch (cap) {
2494 case ICE_AQC_CAPS_SWITCHING_MODE:
2495 caps->switching_mode = number;
2496 ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %u\n", prefix,
2497 caps->switching_mode);
2498 break;
2499 case ICE_AQC_CAPS_MANAGEABILITY_MODE:
2500 caps->mgmt_mode = number;
2501 caps->mgmt_protocols_mctp = logical_id;
2502 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %u\n", prefix,
2503 caps->mgmt_mode);
2504 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %u\n", prefix,
2505 caps->mgmt_protocols_mctp);
2506 break;
2507 case ICE_AQC_CAPS_OS2BMC:
2508 caps->os2bmc = number;
2509 ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %u\n", prefix, caps->os2bmc);
2510 break;
2511 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2512 caps->valid_functions = number;
2513 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = 0x%x\n", prefix,
2514 caps->valid_functions);
2515 break;
2516 case ICE_AQC_CAPS_SRIOV:
2517 caps->sr_iov_1_1 = (number == 1);
2518 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %u\n", prefix,
2519 caps->sr_iov_1_1);
2520 break;
2521 case ICE_AQC_CAPS_VMDQ:
2522 caps->vmdq = (number == 1);
2523 ice_debug(hw, ICE_DBG_INIT, "%s: vmdq = %u\n", prefix, caps->vmdq);
2524 break;
2525 case ICE_AQC_CAPS_802_1QBG:
2526 caps->evb_802_1_qbg = (number == 1);
2527 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %u\n", prefix, number);
2528 break;
2529 case ICE_AQC_CAPS_802_1BR:
2530 caps->evb_802_1_qbh = (number == 1);
2531 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %u\n", prefix, number);
2532 break;
2533 case ICE_AQC_CAPS_DCB:
2534 caps->dcb = (number == 1);
2535 caps->active_tc_bitmap = logical_id;
2536 caps->maxtc = phys_id;
2537 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %u\n", prefix, caps->dcb);
2538 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = 0x%x\n", prefix,
2539 caps->active_tc_bitmap);
2540 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %u\n", prefix, caps->maxtc);
2541 break;
2542 case ICE_AQC_CAPS_ISCSI:
2543 caps->iscsi = (number == 1);
2544 ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %u\n", prefix, caps->iscsi);
2545 break;
2546 case ICE_AQC_CAPS_RSS:
2547 caps->rss_table_size = number;
2548 caps->rss_table_entry_width = logical_id;
2549 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %u\n", prefix,
2550 caps->rss_table_size);
2551 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %u\n", prefix,
2552 caps->rss_table_entry_width);
2553 break;
2554 case ICE_AQC_CAPS_RXQS:
2555 caps->num_rxq = number;
2556 caps->rxq_first_id = phys_id;
2557 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %u\n", prefix,
2558 caps->num_rxq);
2559 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %u\n", prefix,
2560 caps->rxq_first_id);
2561 break;
2562 case ICE_AQC_CAPS_TXQS:
2563 caps->num_txq = number;
2564 caps->txq_first_id = phys_id;
2565 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %u\n", prefix,
2566 caps->num_txq);
2567 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %u\n", prefix,
2568 caps->txq_first_id);
2569 break;
2570 case ICE_AQC_CAPS_MSIX:
2571 caps->num_msix_vectors = number;
2572 caps->msix_vector_first_id = phys_id;
2573 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %u\n", prefix,
2574 caps->num_msix_vectors);
2575 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %u\n", prefix,
2576 caps->msix_vector_first_id);
2577 break;
2578 case ICE_AQC_CAPS_NVM_MGMT:
2579 caps->sec_rev_disabled =
2580 (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
2581 true : false;
2582 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
2583 caps->sec_rev_disabled);
2584 caps->update_disabled =
2585 (number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
2586 true : false;
2587 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
2588 caps->update_disabled);
2589 caps->nvm_unified_update =
2590 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2591 true : false;
2592 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2593 caps->nvm_unified_update);
2594 caps->netlist_auth =
2595 (number & ICE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
2596 true : false;
2597 ice_debug(hw, ICE_DBG_INIT, "%s: netlist_auth = %d\n", prefix,
2598 caps->netlist_auth);
2599 break;
2600 case ICE_AQC_CAPS_CEM:
2601 caps->mgmt_cem = (number == 1);
2602 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %u\n", prefix,
2603 caps->mgmt_cem);
2604 break;
2605 case ICE_AQC_CAPS_IWARP:
2606 caps->iwarp = (number == 1);
2607 ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %u\n", prefix, caps->iwarp);
2608 break;
2609 case ICE_AQC_CAPS_ROCEV2_LAG:
2610 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
2611 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
2612 prefix, caps->roce_lag);
2613 break;
2614 case ICE_AQC_CAPS_LED:
2615 if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
2616 caps->led[phys_id] = true;
2617 caps->led_pin_num++;
2618 ice_debug(hw, ICE_DBG_INIT, "%s: led[%u] = 1\n", prefix, phys_id);
2619 }
2620 break;
2621 case ICE_AQC_CAPS_SDP:
2622 if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
2623 caps->sdp[phys_id] = true;
2624 caps->sdp_pin_num++;
2625 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%u] = 1\n", prefix, phys_id);
2626 }
2627 break;
2628 case ICE_AQC_CAPS_WR_CSR_PROT:
2629 caps->wr_csr_prot = number;
2630 caps->wr_csr_prot |= (u64)logical_id << 32;
2631 ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
2632 (unsigned long long)caps->wr_csr_prot);
2633 break;
2634 case ICE_AQC_CAPS_WOL_PROXY:
2635 caps->num_wol_proxy_fltr = number;
2636 caps->wol_proxy_vsi_seid = logical_id;
2637 caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2638 caps->acpi_prog_mthd = !!(phys_id &
2639 ICE_ACPI_PROG_MTHD_M);
2640 caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2641 ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %u\n", prefix,
2642 caps->num_wol_proxy_fltr);
2643 ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %u\n", prefix,
2644 caps->wol_proxy_vsi_seid);
2645 ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %u\n",
2646 prefix, caps->apm_wol_support);
2647 break;
2648 case ICE_AQC_CAPS_MAX_MTU:
2649 caps->max_mtu = number;
2650 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %u\n",
2651 prefix, caps->max_mtu);
2652 break;
2653 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2654 caps->pcie_reset_avoidance = (number > 0);
2655 ice_debug(hw, ICE_DBG_INIT,
2656 "%s: pcie_reset_avoidance = %d\n", prefix,
2657 caps->pcie_reset_avoidance);
2658 break;
2659 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2660 caps->reset_restrict_support = (number == 1);
2661 ice_debug(hw, ICE_DBG_INIT,
2662 "%s: reset_restrict_support = %d\n", prefix,
2663 caps->reset_restrict_support);
2664 break;
2665 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
2666 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
2667 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
2668 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
2669 {
2670 u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0);
2671
2672 caps->ext_topo_dev_img_ver_high[index] = number;
2673 caps->ext_topo_dev_img_ver_low[index] = logical_id;
2674 caps->ext_topo_dev_img_part_num[index] =
2675 (phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
2676 ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
2677 caps->ext_topo_dev_img_load_en[index] =
2678 (phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
2679 caps->ext_topo_dev_img_prog_en[index] =
2680 (phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
2681 caps->ext_topo_dev_img_ver_schema[index] =
2682 (phys_id & ICE_EXT_TOPO_DEV_IMG_VER_SCHEMA) != 0;
2683 ice_debug(hw, ICE_DBG_INIT,
2684 "%s: ext_topo_dev_img_ver_high[%d] = %u\n",
2685 prefix, index,
2686 caps->ext_topo_dev_img_ver_high[index]);
2687 ice_debug(hw, ICE_DBG_INIT,
2688 "%s: ext_topo_dev_img_ver_low[%d] = %u\n",
2689 prefix, index,
2690 caps->ext_topo_dev_img_ver_low[index]);
2691 ice_debug(hw, ICE_DBG_INIT,
2692 "%s: ext_topo_dev_img_part_num[%d] = %u\n",
2693 prefix, index,
2694 caps->ext_topo_dev_img_part_num[index]);
2695 ice_debug(hw, ICE_DBG_INIT,
2696 "%s: ext_topo_dev_img_load_en[%d] = %d\n",
2697 prefix, index,
2698 caps->ext_topo_dev_img_load_en[index]);
2699 ice_debug(hw, ICE_DBG_INIT,
2700 "%s: ext_topo_dev_img_prog_en[%d] = %d\n",
2701 prefix, index,
2702 caps->ext_topo_dev_img_prog_en[index]);
2703 ice_debug(hw, ICE_DBG_INIT,
2704 "%s: ext_topo_dev_img_ver_schema[%d] = %d\n",
2705 prefix, index,
2706 caps->ext_topo_dev_img_ver_schema[index]);
2707 break;
2708 }
2709 case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
2710 caps->tx_sched_topo_comp_mode_en = (number == 1);
2711 break;
2712 case ICE_AQC_CAPS_DYN_FLATTENING:
2713 caps->dyn_flattening_en = (number == 1);
2714 ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n",
2715 prefix, caps->dyn_flattening_en);
2716 break;
2717 case ICE_AQC_CAPS_OROM_RECOVERY_UPDATE:
2718 caps->orom_recovery_update = (number == 1);
2719 ice_debug(hw, ICE_DBG_INIT, "%s: orom_recovery_update = %d\n",
2720 prefix, caps->orom_recovery_update);
2721 break;
2722 case ICE_AQC_CAPS_NEXT_CLUSTER_ID:
2723 caps->next_cluster_id_support = (number == 1);
2724 ice_debug(hw, ICE_DBG_INIT, "%s: next_cluster_id_support = %d\n",
2725 prefix, caps->next_cluster_id_support);
2726 break;
2727 default:
2728 /* Not one of the recognized common capabilities */
2729 found = false;
2730 }
2731
2732 return found;
2733 }
2734
2735 /**
2736 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2737 * @hw: pointer to the HW structure
2738 * @caps: pointer to capabilities structure to fix
2739 *
2740 * Re-calculate the capabilities that are dependent on the number of physical
2741 * ports; i.e. some features are not supported or function differently on
2742 * devices with more than 4 ports.
2743 */
2744 static void
ice_recalc_port_limited_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps)2745 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2746 {
2747 /* This assumes device capabilities are always scanned before function
2748 * capabilities during the initialization flow.
2749 */
2750 if (hw->dev_caps.num_funcs > 4) {
2751 /* Max 4 TCs per port */
2752 caps->maxtc = 4;
2753 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %u (based on #ports)\n",
2754 caps->maxtc);
2755 if (caps->iwarp) {
2756 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2757 caps->iwarp = 0;
2758 }
2759
2760 /* print message only when processing device capabilities
2761 * during initialization.
2762 */
2763 if (caps == &hw->dev_caps.common_cap)
2764 ice_info(hw, "RDMA functionality is not available with the current device configuration.\n");
2765 }
2766 }
2767
2768 /**
2769 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2770 * @hw: pointer to the HW struct
2771 * @func_p: pointer to function capabilities structure
2772 * @cap: pointer to the capability element to parse
2773 *
2774 * Extract function capabilities for ICE_AQC_CAPS_VF.
2775 */
2776 static void
ice_parse_vf_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2777 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2778 struct ice_aqc_list_caps_elem *cap)
2779 {
2780 u32 number = LE32_TO_CPU(cap->number);
2781 u32 logical_id = LE32_TO_CPU(cap->logical_id);
2782
2783 func_p->num_allocd_vfs = number;
2784 func_p->vf_base_id = logical_id;
2785 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %u\n",
2786 func_p->num_allocd_vfs);
2787 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %u\n",
2788 func_p->vf_base_id);
2789 }
2790
2791 /**
2792 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2793 * @hw: pointer to the HW struct
2794 * @func_p: pointer to function capabilities structure
2795 * @cap: pointer to the capability element to parse
2796 *
2797 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2798 */
2799 static void
ice_parse_vsi_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2800 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2801 struct ice_aqc_list_caps_elem *cap)
2802 {
2803 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2804 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %u\n",
2805 LE32_TO_CPU(cap->number));
2806 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %u\n",
2807 func_p->guar_num_vsi);
2808 }
2809
2810 /**
2811 * ice_parse_func_caps - Parse function capabilities
2812 * @hw: pointer to the HW struct
2813 * @func_p: pointer to function capabilities structure
2814 * @buf: buffer containing the function capability records
2815 * @cap_count: the number of capabilities
2816 *
2817 * Helper function to parse function (0x000A) capabilities list. For
2818 * capabilities shared between device and function, this relies on
2819 * ice_parse_common_caps.
2820 *
2821 * Loop through the list of provided capabilities and extract the relevant
2822 * data into the function capabilities structured.
2823 */
2824 static void
ice_parse_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,void * buf,u32 cap_count)2825 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2826 void *buf, u32 cap_count)
2827 {
2828 struct ice_aqc_list_caps_elem *cap_resp;
2829 u32 i;
2830
2831 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2832
2833 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2834
2835 for (i = 0; i < cap_count; i++) {
2836 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2837 bool found;
2838
2839 found = ice_parse_common_caps(hw, &func_p->common_cap,
2840 &cap_resp[i], "func caps");
2841
2842 switch (cap) {
2843 case ICE_AQC_CAPS_VF:
2844 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2845 break;
2846 case ICE_AQC_CAPS_VSI:
2847 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2848 break;
2849 default:
2850 /* Don't list common capabilities as unknown */
2851 if (!found)
2852 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2853 i, cap);
2854 break;
2855 }
2856 }
2857
2858 ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
2859 ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
2860
2861 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2862 }
2863
2864 /**
2865 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2866 * @hw: pointer to the HW struct
2867 * @dev_p: pointer to device capabilities structure
2868 * @cap: capability element to parse
2869 *
2870 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2871 */
2872 static void
ice_parse_valid_functions_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2873 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2874 struct ice_aqc_list_caps_elem *cap)
2875 {
2876 u32 number = LE32_TO_CPU(cap->number);
2877
2878 dev_p->num_funcs = ice_hweight32(number);
2879 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %u\n",
2880 dev_p->num_funcs);
2881
2882 }
2883
2884 /**
2885 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2886 * @hw: pointer to the HW struct
2887 * @dev_p: pointer to device capabilities structure
2888 * @cap: capability element to parse
2889 *
2890 * Parse ICE_AQC_CAPS_VF for device capabilities.
2891 */
2892 static void
ice_parse_vf_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2893 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2894 struct ice_aqc_list_caps_elem *cap)
2895 {
2896 u32 number = LE32_TO_CPU(cap->number);
2897
2898 dev_p->num_vfs_exposed = number;
2899 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %u\n",
2900 dev_p->num_vfs_exposed);
2901 }
2902
2903 /**
2904 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2905 * @hw: pointer to the HW struct
2906 * @dev_p: pointer to device capabilities structure
2907 * @cap: capability element to parse
2908 *
2909 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2910 */
2911 static void
ice_parse_vsi_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2912 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2913 struct ice_aqc_list_caps_elem *cap)
2914 {
2915 u32 number = LE32_TO_CPU(cap->number);
2916
2917 dev_p->num_vsi_allocd_to_host = number;
2918 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %u\n",
2919 dev_p->num_vsi_allocd_to_host);
2920 }
2921
2922 /**
2923 * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
2924 * @hw: pointer to the HW struct
2925 * @dev_p: pointer to device capabilities structure
2926 * @cap: capability element to parse
2927 *
2928 * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
2929 */
2930 static void
ice_parse_nac_topo_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2931 ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2932 struct ice_aqc_list_caps_elem *cap)
2933 {
2934 dev_p->nac_topo.mode = LE32_TO_CPU(cap->number);
2935 dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M;
2936
2937 ice_info(hw, "PF is configured in %s mode with IP instance ID %u\n",
2938 (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ?
2939 "primary" : "secondary", dev_p->nac_topo.id);
2940
2941 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
2942 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
2943 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
2944 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
2945 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %u\n",
2946 dev_p->nac_topo.id);
2947 }
2948
2949 /**
2950 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap
2951 * @hw: pointer to the HW struct
2952 * @dev_p: pointer to device capabilities structure
2953 * @cap: capability element to parse
2954 *
2955 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading
2956 * enabled sensors.
2957 */
2958 static void
ice_parse_sensor_reading_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2959 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2960 struct ice_aqc_list_caps_elem *cap)
2961 {
2962 dev_p->supported_sensors = LE32_TO_CPU(cap->number);
2963
2964 ice_debug(hw, ICE_DBG_INIT,
2965 "dev caps: supported sensors (bitmap) = 0x%x\n",
2966 dev_p->supported_sensors);
2967 }
2968
2969 /**
2970 * ice_parse_dev_caps - Parse device capabilities
2971 * @hw: pointer to the HW struct
2972 * @dev_p: pointer to device capabilities structure
2973 * @buf: buffer containing the device capability records
2974 * @cap_count: the number of capabilities
2975 *
2976 * Helper device to parse device (0x000B) capabilities list. For
2977 * capabilities shared between device and function, this relies on
2978 * ice_parse_common_caps.
2979 *
2980 * Loop through the list of provided capabilities and extract the relevant
2981 * data into the device capabilities structured.
2982 */
2983 static void
ice_parse_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,void * buf,u32 cap_count)2984 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2985 void *buf, u32 cap_count)
2986 {
2987 struct ice_aqc_list_caps_elem *cap_resp;
2988 u32 i;
2989
2990 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2991
2992 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2993
2994 for (i = 0; i < cap_count; i++) {
2995 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2996 bool found;
2997
2998 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2999 &cap_resp[i], "dev caps");
3000
3001 switch (cap) {
3002 case ICE_AQC_CAPS_VALID_FUNCTIONS:
3003 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
3004 break;
3005 case ICE_AQC_CAPS_VF:
3006 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
3007 break;
3008 case ICE_AQC_CAPS_VSI:
3009 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
3010 break;
3011 case ICE_AQC_CAPS_NAC_TOPOLOGY:
3012 ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
3013 break;
3014 case ICE_AQC_CAPS_SENSOR_READING:
3015 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
3016 break;
3017 default:
3018 /* Don't list common capabilities as unknown */
3019 if (!found)
3020 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%u]: 0x%x\n",
3021 i, cap);
3022 break;
3023 }
3024 }
3025
3026 ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
3027 ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
3028
3029 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
3030 }
3031
3032 /**
3033 * ice_aq_get_netlist_node
3034 * @hw: pointer to the hw struct
3035 * @cmd: get_link_topo AQ structure
3036 * @node_part_number: output node part number if node found
3037 * @node_handle: output node handle parameter if node found
3038 */
3039 int
ice_aq_get_netlist_node(struct ice_hw * hw,struct ice_aqc_get_link_topo * cmd,u8 * node_part_number,u16 * node_handle)3040 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
3041 u8 *node_part_number, u16 *node_handle)
3042 {
3043 struct ice_aq_desc desc;
3044
3045 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
3046 desc.params.get_link_topo = *cmd;
3047
3048 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
3049 return ICE_ERR_NOT_SUPPORTED;
3050
3051 if (node_handle)
3052 *node_handle =
3053 LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
3054 if (node_part_number)
3055 *node_part_number = desc.params.get_link_topo.node_part_num;
3056
3057 return 0;
3058 }
3059
3060 #define MAX_NETLIST_SIZE 10
3061 /**
3062 * ice_find_netlist_node
3063 * @hw: pointer to the hw struct
3064 * @node_type_ctx: type of netlist node to look for
3065 * @node_part_number: node part number to look for
3066 * @node_handle: output parameter if node found - optional
3067 *
3068 * Scan the netlist for a node handle of the given node type and part number.
3069 *
3070 * If node_handle is non-NULL it will be modified on function exit. It is only
3071 * valid if the function returns zero, and should be ignored on any non-zero
3072 * return value.
3073 *
3074 * Returns: 0 if the node is found, ICE_ERR_DOES_NOT_EXIST if no handle was
3075 * found, and an error code on failure to access the AQ.
3076 */
3077 int
ice_find_netlist_node(struct ice_hw * hw,u8 node_type_ctx,u8 node_part_number,u16 * node_handle)3078 ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
3079 u16 *node_handle)
3080 {
3081 u8 idx;
3082
3083 for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
3084 struct ice_aqc_get_link_topo cmd;
3085 u8 rec_node_part_number;
3086 int status;
3087
3088 memset(&cmd, 0, sizeof(cmd));
3089
3090 cmd.addr.topo_params.node_type_ctx =
3091 (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
3092 cmd.addr.topo_params.index = idx;
3093
3094 status = ice_aq_get_netlist_node(hw, &cmd,
3095 &rec_node_part_number,
3096 node_handle);
3097 if (status)
3098 return status;
3099
3100 if (rec_node_part_number == node_part_number)
3101 return 0;
3102 }
3103
3104 return ICE_ERR_DOES_NOT_EXIST;
3105 }
3106
3107 /**
3108 * ice_aq_list_caps - query function/device capabilities
3109 * @hw: pointer to the HW struct
3110 * @buf: a buffer to hold the capabilities
3111 * @buf_size: size of the buffer
3112 * @cap_count: if not NULL, set to the number of capabilities reported
3113 * @opc: capabilities type to discover, device or function
3114 * @cd: pointer to command details structure or NULL
3115 *
3116 * Get the function (0x000A) or device (0x000B) capabilities description from
3117 * firmware and store it in the buffer.
3118 *
3119 * If the cap_count pointer is not NULL, then it is set to the number of
3120 * capabilities firmware will report. Note that if the buffer size is too
3121 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
3122 * cap_count will still be updated in this case. It is recommended that the
3123 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
3124 * firmware could return) to avoid this.
3125 */
3126 static int
ice_aq_list_caps(struct ice_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ice_adminq_opc opc,struct ice_sq_cd * cd)3127 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
3128 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3129 {
3130 struct ice_aqc_list_caps *cmd;
3131 struct ice_aq_desc desc;
3132 int status;
3133
3134 cmd = &desc.params.get_cap;
3135
3136 if (opc != ice_aqc_opc_list_func_caps &&
3137 opc != ice_aqc_opc_list_dev_caps)
3138 return ICE_ERR_PARAM;
3139
3140 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3141 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3142
3143 if (cap_count)
3144 *cap_count = LE32_TO_CPU(cmd->count);
3145
3146 return status;
3147 }
3148
3149 /**
3150 * ice_discover_dev_caps - Read and extract device capabilities
3151 * @hw: pointer to the hardware structure
3152 * @dev_caps: pointer to device capabilities structure
3153 *
3154 * Read the device capabilities and extract them into the dev_caps structure
3155 * for later use.
3156 */
3157 static int
ice_discover_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_caps)3158 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
3159 {
3160 u32 cap_count = 0;
3161 void *cbuf;
3162 int status;
3163
3164 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
3165 if (!cbuf)
3166 return ICE_ERR_NO_MEMORY;
3167
3168 /* Although the driver doesn't know the number of capabilities the
3169 * device will return, we can simply send a 4KB buffer, the maximum
3170 * possible size that firmware can return.
3171 */
3172 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
3173
3174 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
3175 ice_aqc_opc_list_dev_caps, NULL);
3176 if (!status)
3177 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
3178 ice_free(hw, cbuf);
3179
3180 return status;
3181 }
3182
3183 /**
3184 * ice_discover_func_caps - Read and extract function capabilities
3185 * @hw: pointer to the hardware structure
3186 * @func_caps: pointer to function capabilities structure
3187 *
3188 * Read the function capabilities and extract them into the func_caps structure
3189 * for later use.
3190 */
3191 static int
ice_discover_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_caps)3192 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
3193 {
3194 u32 cap_count = 0;
3195 void *cbuf;
3196 int status;
3197
3198 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
3199 if (!cbuf)
3200 return ICE_ERR_NO_MEMORY;
3201
3202 /* Although the driver doesn't know the number of capabilities the
3203 * device will return, we can simply send a 4KB buffer, the maximum
3204 * possible size that firmware can return.
3205 */
3206 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
3207
3208 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
3209 ice_aqc_opc_list_func_caps, NULL);
3210 if (!status)
3211 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
3212 ice_free(hw, cbuf);
3213
3214 return status;
3215 }
3216
3217 /**
3218 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
3219 * @hw: pointer to the hardware structure
3220 */
ice_set_safe_mode_caps(struct ice_hw * hw)3221 void ice_set_safe_mode_caps(struct ice_hw *hw)
3222 {
3223 struct ice_hw_func_caps *func_caps = &hw->func_caps;
3224 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
3225 struct ice_hw_common_caps cached_caps;
3226 u32 num_funcs;
3227
3228 /* cache some func_caps values that should be restored after memset */
3229 cached_caps = func_caps->common_cap;
3230
3231 /* unset func capabilities */
3232 memset(func_caps, 0, sizeof(*func_caps));
3233
3234 #define ICE_RESTORE_FUNC_CAP(name) \
3235 func_caps->common_cap.name = cached_caps.name
3236
3237 /* restore cached values */
3238 ICE_RESTORE_FUNC_CAP(valid_functions);
3239 ICE_RESTORE_FUNC_CAP(txq_first_id);
3240 ICE_RESTORE_FUNC_CAP(rxq_first_id);
3241 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
3242 ICE_RESTORE_FUNC_CAP(max_mtu);
3243 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
3244
3245 /* one Tx and one Rx queue in safe mode */
3246 func_caps->common_cap.num_rxq = 1;
3247 func_caps->common_cap.num_txq = 1;
3248
3249 /* two MSIX vectors, one for traffic and one for misc causes */
3250 func_caps->common_cap.num_msix_vectors = 2;
3251 func_caps->guar_num_vsi = 1;
3252
3253 /* cache some dev_caps values that should be restored after memset */
3254 cached_caps = dev_caps->common_cap;
3255 num_funcs = dev_caps->num_funcs;
3256
3257 /* unset dev capabilities */
3258 memset(dev_caps, 0, sizeof(*dev_caps));
3259
3260 #define ICE_RESTORE_DEV_CAP(name) \
3261 dev_caps->common_cap.name = cached_caps.name
3262
3263 /* restore cached values */
3264 ICE_RESTORE_DEV_CAP(valid_functions);
3265 ICE_RESTORE_DEV_CAP(txq_first_id);
3266 ICE_RESTORE_DEV_CAP(rxq_first_id);
3267 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
3268 ICE_RESTORE_DEV_CAP(max_mtu);
3269 ICE_RESTORE_DEV_CAP(nvm_unified_update);
3270 dev_caps->num_funcs = num_funcs;
3271
3272 /* one Tx and one Rx queue per function in safe mode */
3273 dev_caps->common_cap.num_rxq = num_funcs;
3274 dev_caps->common_cap.num_txq = num_funcs;
3275
3276 /* two MSIX vectors per function */
3277 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
3278 }
3279
3280 /**
3281 * ice_get_caps - get info about the HW
3282 * @hw: pointer to the hardware structure
3283 */
ice_get_caps(struct ice_hw * hw)3284 int ice_get_caps(struct ice_hw *hw)
3285 {
3286 int status;
3287
3288 status = ice_discover_dev_caps(hw, &hw->dev_caps);
3289 if (status)
3290 return status;
3291
3292 return ice_discover_func_caps(hw, &hw->func_caps);
3293 }
3294
3295 /**
3296 * ice_aq_manage_mac_write - manage MAC address write command
3297 * @hw: pointer to the HW struct
3298 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
3299 * @flags: flags to control write behavior
3300 * @cd: pointer to command details structure or NULL
3301 *
3302 * This function is used to write MAC address to the NVM (0x0108).
3303 */
3304 int
ice_aq_manage_mac_write(struct ice_hw * hw,const u8 * mac_addr,u8 flags,struct ice_sq_cd * cd)3305 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
3306 struct ice_sq_cd *cd)
3307 {
3308 struct ice_aqc_manage_mac_write *cmd;
3309 struct ice_aq_desc desc;
3310
3311 cmd = &desc.params.mac_write;
3312 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
3313
3314 cmd->flags = flags;
3315 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
3316
3317 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3318 }
3319
3320 /**
3321 * ice_aq_clear_pxe_mode
3322 * @hw: pointer to the HW struct
3323 *
3324 * Tell the firmware that the driver is taking over from PXE (0x0110).
3325 */
ice_aq_clear_pxe_mode(struct ice_hw * hw)3326 static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
3327 {
3328 struct ice_aq_desc desc;
3329
3330 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
3331 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
3332
3333 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3334 }
3335
3336 /**
3337 * ice_clear_pxe_mode - clear pxe operations mode
3338 * @hw: pointer to the HW struct
3339 *
3340 * Make sure all PXE mode settings are cleared, including things
3341 * like descriptor fetch/write-back mode.
3342 */
ice_clear_pxe_mode(struct ice_hw * hw)3343 void ice_clear_pxe_mode(struct ice_hw *hw)
3344 {
3345 if (ice_check_sq_alive(hw, &hw->adminq))
3346 ice_aq_clear_pxe_mode(hw);
3347 }
3348
3349 /**
3350 * ice_aq_set_port_params - set physical port parameters
3351 * @pi: pointer to the port info struct
3352 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
3353 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
3354 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
3355 * @double_vlan: if set double VLAN is enabled
3356 * @cd: pointer to command details structure or NULL
3357 *
3358 * Set Physical port parameters (0x0203)
3359 */
3360 int
ice_aq_set_port_params(struct ice_port_info * pi,u16 bad_frame_vsi,bool save_bad_pac,bool pad_short_pac,bool double_vlan,struct ice_sq_cd * cd)3361 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
3362 bool save_bad_pac, bool pad_short_pac, bool double_vlan,
3363 struct ice_sq_cd *cd)
3364 {
3365 struct ice_aqc_set_port_params *cmd;
3366 struct ice_hw *hw = pi->hw;
3367 struct ice_aq_desc desc;
3368 u16 cmd_flags = 0;
3369
3370 cmd = &desc.params.set_port_params;
3371
3372 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
3373 cmd->lb_mode = pi->loopback_mode |
3374 ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_VALID;
3375 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
3376 if (save_bad_pac)
3377 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
3378 if (pad_short_pac)
3379 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
3380 if (double_vlan)
3381 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
3382 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
3383
3384 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3385 }
3386
3387 /**
3388 * ice_is_100m_speed_supported
3389 * @hw: pointer to the HW struct
3390 *
3391 * returns true if 100M speeds are supported by the device,
3392 * false otherwise.
3393 */
ice_is_100m_speed_supported(struct ice_hw * hw)3394 bool ice_is_100m_speed_supported(struct ice_hw *hw)
3395 {
3396 switch (hw->device_id) {
3397 case ICE_DEV_ID_E822C_SGMII:
3398 case ICE_DEV_ID_E822L_SGMII:
3399 case ICE_DEV_ID_E823L_1GBE:
3400 case ICE_DEV_ID_E823C_SGMII:
3401 return true;
3402 default:
3403 return false;
3404 }
3405 }
3406
3407 /**
3408 * ice_get_link_speed_based_on_phy_type - returns link speed
3409 * @phy_type_low: lower part of phy_type
3410 * @phy_type_high: higher part of phy_type
3411 *
3412 * This helper function will convert an entry in PHY type structure
3413 * [phy_type_low, phy_type_high] to its corresponding link speed.
3414 * Note: In the structure of [phy_type_low, phy_type_high], there should
3415 * be one bit set, as this function will convert one PHY type to its
3416 * speed.
3417 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3418 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3419 */
3420 static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low,u64 phy_type_high)3421 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
3422 {
3423 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3424 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3425
3426 switch (phy_type_low) {
3427 case ICE_PHY_TYPE_LOW_100BASE_TX:
3428 case ICE_PHY_TYPE_LOW_100M_SGMII:
3429 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3430 break;
3431 case ICE_PHY_TYPE_LOW_1000BASE_T:
3432 case ICE_PHY_TYPE_LOW_1000BASE_SX:
3433 case ICE_PHY_TYPE_LOW_1000BASE_LX:
3434 case ICE_PHY_TYPE_LOW_1000BASE_KX:
3435 case ICE_PHY_TYPE_LOW_1G_SGMII:
3436 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3437 break;
3438 case ICE_PHY_TYPE_LOW_2500BASE_T:
3439 case ICE_PHY_TYPE_LOW_2500BASE_X:
3440 case ICE_PHY_TYPE_LOW_2500BASE_KX:
3441 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3442 break;
3443 case ICE_PHY_TYPE_LOW_5GBASE_T:
3444 case ICE_PHY_TYPE_LOW_5GBASE_KR:
3445 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3446 break;
3447 case ICE_PHY_TYPE_LOW_10GBASE_T:
3448 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3449 case ICE_PHY_TYPE_LOW_10GBASE_SR:
3450 case ICE_PHY_TYPE_LOW_10GBASE_LR:
3451 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3452 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3453 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3454 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3455 break;
3456 case ICE_PHY_TYPE_LOW_25GBASE_T:
3457 case ICE_PHY_TYPE_LOW_25GBASE_CR:
3458 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3459 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3460 case ICE_PHY_TYPE_LOW_25GBASE_SR:
3461 case ICE_PHY_TYPE_LOW_25GBASE_LR:
3462 case ICE_PHY_TYPE_LOW_25GBASE_KR:
3463 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3464 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3465 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3466 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3467 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3468 break;
3469 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3470 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3471 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3472 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3473 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3474 case ICE_PHY_TYPE_LOW_40G_XLAUI:
3475 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3476 break;
3477 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3478 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3479 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3480 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3481 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3482 case ICE_PHY_TYPE_LOW_50G_LAUI2:
3483 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3484 case ICE_PHY_TYPE_LOW_50G_AUI2:
3485 case ICE_PHY_TYPE_LOW_50GBASE_CP:
3486 case ICE_PHY_TYPE_LOW_50GBASE_SR:
3487 case ICE_PHY_TYPE_LOW_50GBASE_FR:
3488 case ICE_PHY_TYPE_LOW_50GBASE_LR:
3489 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3490 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3491 case ICE_PHY_TYPE_LOW_50G_AUI1:
3492 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3493 break;
3494 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3495 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3496 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3497 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3498 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3499 case ICE_PHY_TYPE_LOW_100G_CAUI4:
3500 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3501 case ICE_PHY_TYPE_LOW_100G_AUI4:
3502 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3503 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3504 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3505 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3506 case ICE_PHY_TYPE_LOW_100GBASE_DR:
3507 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3508 break;
3509 default:
3510 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3511 break;
3512 }
3513
3514 switch (phy_type_high) {
3515 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3516 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3517 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3518 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3519 case ICE_PHY_TYPE_HIGH_100G_AUI2:
3520 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3521 break;
3522 case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
3523 case ICE_PHY_TYPE_HIGH_200G_SR4:
3524 case ICE_PHY_TYPE_HIGH_200G_FR4:
3525 case ICE_PHY_TYPE_HIGH_200G_LR4:
3526 case ICE_PHY_TYPE_HIGH_200G_DR4:
3527 case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
3528 case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
3529 case ICE_PHY_TYPE_HIGH_200G_AUI4:
3530 case ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC:
3531 case ICE_PHY_TYPE_HIGH_200G_AUI8:
3532 speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
3533 break;
3534 default:
3535 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3536 break;
3537 }
3538
3539 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3540 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3541 return ICE_AQ_LINK_SPEED_UNKNOWN;
3542 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3543 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3544 return ICE_AQ_LINK_SPEED_UNKNOWN;
3545 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3546 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3547 return speed_phy_type_low;
3548 else
3549 return speed_phy_type_high;
3550 }
3551
3552 /**
3553 * ice_update_phy_type
3554 * @phy_type_low: pointer to the lower part of phy_type
3555 * @phy_type_high: pointer to the higher part of phy_type
3556 * @link_speeds_bitmap: targeted link speeds bitmap
3557 *
3558 * Note: For the link_speeds_bitmap structure, you can check it at
3559 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3560 * link_speeds_bitmap include multiple speeds.
3561 *
3562 * Each entry in this [phy_type_low, phy_type_high] structure will
3563 * present a certain link speed. This helper function will turn on bits
3564 * in [phy_type_low, phy_type_high] structure based on the value of
3565 * link_speeds_bitmap input parameter.
3566 */
3567 void
ice_update_phy_type(u64 * phy_type_low,u64 * phy_type_high,u16 link_speeds_bitmap)3568 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3569 u16 link_speeds_bitmap)
3570 {
3571 u64 pt_high;
3572 u64 pt_low;
3573 int index;
3574 u16 speed;
3575
3576 /* We first check with low part of phy_type */
3577 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3578 pt_low = BIT_ULL(index);
3579 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
3580
3581 if (link_speeds_bitmap & speed)
3582 *phy_type_low |= BIT_ULL(index);
3583 }
3584
3585 /* We then check with high part of phy_type */
3586 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3587 pt_high = BIT_ULL(index);
3588 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3589
3590 if (link_speeds_bitmap & speed)
3591 *phy_type_high |= BIT_ULL(index);
3592 }
3593 }
3594
3595 /**
3596 * ice_aq_set_phy_cfg
3597 * @hw: pointer to the HW struct
3598 * @pi: port info structure of the interested logical port
3599 * @cfg: structure with PHY configuration data to be set
3600 * @cd: pointer to command details structure or NULL
3601 *
3602 * Set the various PHY configuration parameters supported on the Port.
3603 * One or more of the Set PHY config parameters may be ignored in an MFP
3604 * mode as the PF may not have the privilege to set some of the PHY Config
3605 * parameters. This status will be indicated by the command response (0x0601).
3606 */
3607 int
ice_aq_set_phy_cfg(struct ice_hw * hw,struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,struct ice_sq_cd * cd)3608 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3609 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3610 {
3611 struct ice_aq_desc desc;
3612 int status;
3613
3614 if (!cfg)
3615 return ICE_ERR_PARAM;
3616
3617 /* Ensure that only valid bits of cfg->caps can be turned on. */
3618 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3619 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3620 cfg->caps);
3621
3622 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3623 }
3624
3625 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3626 desc.params.set_phy.lport_num = pi->lport;
3627 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3628
3629 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3630 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
3631 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
3632 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
3633 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
3634 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
3635 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
3636 cfg->low_power_ctrl_an);
3637 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
3638 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
3639 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
3640 cfg->link_fec_opt);
3641
3642 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3643
3644 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3645 status = 0;
3646
3647 if (!status)
3648 pi->phy.curr_user_phy_cfg = *cfg;
3649
3650 return status;
3651 }
3652
3653 /**
3654 * ice_update_link_info - update status of the HW network link
3655 * @pi: port info structure of the interested logical port
3656 */
ice_update_link_info(struct ice_port_info * pi)3657 int ice_update_link_info(struct ice_port_info *pi)
3658 {
3659 struct ice_link_status *li;
3660 int status;
3661
3662 if (!pi)
3663 return ICE_ERR_PARAM;
3664
3665 li = &pi->phy.link_info;
3666
3667 status = ice_aq_get_link_info(pi, true, NULL, NULL);
3668 if (status)
3669 return status;
3670
3671 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3672 struct ice_aqc_get_phy_caps_data *pcaps;
3673 struct ice_hw *hw;
3674
3675 hw = pi->hw;
3676 pcaps = (struct ice_aqc_get_phy_caps_data *)
3677 ice_malloc(hw, sizeof(*pcaps));
3678 if (!pcaps)
3679 return ICE_ERR_NO_MEMORY;
3680
3681 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3682 pcaps, NULL);
3683
3684 if (!status)
3685 ice_memcpy(li->module_type, &pcaps->module_type,
3686 sizeof(li->module_type),
3687 ICE_NONDMA_TO_NONDMA);
3688
3689 ice_free(hw, pcaps);
3690 }
3691
3692 return status;
3693 }
3694
3695 /**
3696 * ice_cache_phy_user_req
3697 * @pi: port information structure
3698 * @cache_data: PHY logging data
3699 * @cache_mode: PHY logging mode
3700 *
3701 * Log the user request on (FC, FEC, SPEED) for later user.
3702 */
3703 static void
ice_cache_phy_user_req(struct ice_port_info * pi,struct ice_phy_cache_mode_data cache_data,enum ice_phy_cache_mode cache_mode)3704 ice_cache_phy_user_req(struct ice_port_info *pi,
3705 struct ice_phy_cache_mode_data cache_data,
3706 enum ice_phy_cache_mode cache_mode)
3707 {
3708 if (!pi)
3709 return;
3710
3711 switch (cache_mode) {
3712 case ICE_FC_MODE:
3713 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3714 break;
3715 case ICE_SPEED_MODE:
3716 pi->phy.curr_user_speed_req =
3717 cache_data.data.curr_user_speed_req;
3718 break;
3719 case ICE_FEC_MODE:
3720 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3721 break;
3722 default:
3723 break;
3724 }
3725 }
3726
3727 /**
3728 * ice_caps_to_fc_mode
3729 * @caps: PHY capabilities
3730 *
3731 * Convert PHY FC capabilities to ice FC mode
3732 */
ice_caps_to_fc_mode(u8 caps)3733 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3734 {
3735 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3736 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3737 return ICE_FC_FULL;
3738
3739 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3740 return ICE_FC_TX_PAUSE;
3741
3742 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3743 return ICE_FC_RX_PAUSE;
3744
3745 return ICE_FC_NONE;
3746 }
3747
3748 /**
3749 * ice_caps_to_fec_mode
3750 * @caps: PHY capabilities
3751 * @fec_options: Link FEC options
3752 *
3753 * Convert PHY FEC capabilities to ice FEC mode
3754 */
ice_caps_to_fec_mode(u8 caps,u8 fec_options)3755 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3756 {
3757 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) {
3758 if (fec_options & ICE_AQC_PHY_FEC_DIS)
3759 return ICE_FEC_DIS_AUTO;
3760 else
3761 return ICE_FEC_AUTO;
3762 }
3763
3764 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3765 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3766 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3767 ICE_AQC_PHY_FEC_25G_KR_REQ))
3768 return ICE_FEC_BASER;
3769
3770 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3771 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3772 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3773 return ICE_FEC_RS;
3774
3775 return ICE_FEC_NONE;
3776 }
3777
3778 /**
3779 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3780 * @pi: port information structure
3781 * @cfg: PHY configuration data to set FC mode
3782 * @req_mode: FC mode to configure
3783 */
3784 static int
ice_cfg_phy_fc(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fc_mode req_mode)3785 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3786 enum ice_fc_mode req_mode)
3787 {
3788 struct ice_phy_cache_mode_data cache_data;
3789 u8 pause_mask = 0x0;
3790
3791 if (!pi || !cfg)
3792 return ICE_ERR_BAD_PTR;
3793 switch (req_mode) {
3794 case ICE_FC_AUTO:
3795 {
3796 struct ice_aqc_get_phy_caps_data *pcaps;
3797 int status;
3798
3799 pcaps = (struct ice_aqc_get_phy_caps_data *)
3800 ice_malloc(pi->hw, sizeof(*pcaps));
3801 if (!pcaps)
3802 return ICE_ERR_NO_MEMORY;
3803 /* Query the value of FC that both the NIC and attached media
3804 * can do.
3805 */
3806 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3807 pcaps, NULL);
3808 if (status) {
3809 ice_free(pi->hw, pcaps);
3810 return status;
3811 }
3812
3813 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3814 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3815
3816 ice_free(pi->hw, pcaps);
3817 break;
3818 }
3819 case ICE_FC_FULL:
3820 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3821 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3822 break;
3823 case ICE_FC_RX_PAUSE:
3824 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3825 break;
3826 case ICE_FC_TX_PAUSE:
3827 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3828 break;
3829 default:
3830 break;
3831 }
3832
3833 /* clear the old pause settings */
3834 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3835 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3836
3837 /* set the new capabilities */
3838 cfg->caps |= pause_mask;
3839
3840 /* Cache user FC request */
3841 cache_data.data.curr_user_fc_req = req_mode;
3842 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3843
3844 return 0;
3845 }
3846
3847 /**
3848 * ice_set_fc
3849 * @pi: port information structure
3850 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3851 * @ena_auto_link_update: enable automatic link update
3852 *
3853 * Set the requested flow control mode.
3854 */
3855 int
ice_set_fc(struct ice_port_info * pi,u8 * aq_failures,bool ena_auto_link_update)3856 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3857 {
3858 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3859 struct ice_aqc_get_phy_caps_data *pcaps;
3860 struct ice_hw *hw;
3861 int status;
3862
3863 if (!pi || !aq_failures)
3864 return ICE_ERR_BAD_PTR;
3865
3866 *aq_failures = 0;
3867 hw = pi->hw;
3868
3869 pcaps = (struct ice_aqc_get_phy_caps_data *)
3870 ice_malloc(hw, sizeof(*pcaps));
3871 if (!pcaps)
3872 return ICE_ERR_NO_MEMORY;
3873
3874 /* Get the current PHY config */
3875 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3876 pcaps, NULL);
3877
3878 if (status) {
3879 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3880 goto out;
3881 }
3882
3883 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3884
3885 /* Configure the set PHY data */
3886 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3887 if (status) {
3888 if (status != ICE_ERR_BAD_PTR)
3889 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3890
3891 goto out;
3892 }
3893
3894 /* If the capabilities have changed, then set the new config */
3895 if (cfg.caps != pcaps->caps) {
3896 int retry_count, retry_max = 10;
3897
3898 /* Auto restart link so settings take effect */
3899 if (ena_auto_link_update)
3900 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3901
3902 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3903 if (status) {
3904 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3905 goto out;
3906 }
3907
3908 /* Update the link info
3909 * It sometimes takes a really long time for link to
3910 * come back from the atomic reset. Thus, we wait a
3911 * little bit.
3912 */
3913 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3914 status = ice_update_link_info(pi);
3915
3916 if (!status)
3917 break;
3918
3919 ice_msec_delay(100, true);
3920 }
3921
3922 if (status)
3923 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3924 }
3925
3926 out:
3927 ice_free(hw, pcaps);
3928 return status;
3929 }
3930
3931 /**
3932 * ice_phy_caps_equals_cfg
3933 * @phy_caps: PHY capabilities
3934 * @phy_cfg: PHY configuration
3935 *
3936 * Helper function to determine if PHY capabilities matches PHY
3937 * configuration
3938 */
3939 bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data * phy_caps,struct ice_aqc_set_phy_cfg_data * phy_cfg)3940 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3941 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3942 {
3943 u8 caps_mask, cfg_mask;
3944
3945 if (!phy_caps || !phy_cfg)
3946 return false;
3947
3948 /* These bits are not common between capabilities and configuration.
3949 * Do not use them to determine equality.
3950 */
3951 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3952 ICE_AQC_PHY_EN_MOD_QUAL);
3953 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3954
3955 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3956 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3957 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3958 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3959 phy_caps->eee_cap != phy_cfg->eee_cap ||
3960 phy_caps->eeer_value != phy_cfg->eeer_value ||
3961 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3962 return false;
3963
3964 return true;
3965 }
3966
3967 /**
3968 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3969 * @pi: port information structure
3970 * @caps: PHY ability structure to copy data from
3971 * @cfg: PHY configuration structure to copy data to
3972 *
3973 * Helper function to copy AQC PHY get ability data to PHY set configuration
3974 * data structure
3975 */
3976 void
ice_copy_phy_caps_to_cfg(struct ice_port_info * pi,struct ice_aqc_get_phy_caps_data * caps,struct ice_aqc_set_phy_cfg_data * cfg)3977 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3978 struct ice_aqc_get_phy_caps_data *caps,
3979 struct ice_aqc_set_phy_cfg_data *cfg)
3980 {
3981 if (!pi || !caps || !cfg)
3982 return;
3983
3984 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3985 cfg->phy_type_low = caps->phy_type_low;
3986 cfg->phy_type_high = caps->phy_type_high;
3987 cfg->caps = caps->caps;
3988 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3989 cfg->eee_cap = caps->eee_cap;
3990 cfg->eeer_value = caps->eeer_value;
3991 cfg->link_fec_opt = caps->link_fec_options;
3992 cfg->module_compliance_enforcement =
3993 caps->module_compliance_enforcement;
3994 }
3995
3996 /**
3997 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3998 * @pi: port information structure
3999 * @cfg: PHY configuration data to set FEC mode
4000 * @fec: FEC mode to configure
4001 */
4002 int
ice_cfg_phy_fec(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fec_mode fec)4003 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
4004 enum ice_fec_mode fec)
4005 {
4006 struct ice_aqc_get_phy_caps_data *pcaps;
4007 struct ice_hw *hw;
4008 int status = 0;
4009
4010 if (!pi || !cfg)
4011 return ICE_ERR_BAD_PTR;
4012
4013 hw = pi->hw;
4014
4015 pcaps = (struct ice_aqc_get_phy_caps_data *)
4016 ice_malloc(hw, sizeof(*pcaps));
4017 if (!pcaps)
4018 return ICE_ERR_NO_MEMORY;
4019
4020 status = ice_aq_get_phy_caps(pi, false,
4021 (ice_fw_supports_report_dflt_cfg(hw) ?
4022 ICE_AQC_REPORT_DFLT_CFG :
4023 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
4024
4025 if (status)
4026 goto out;
4027
4028 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
4029 cfg->link_fec_opt = pcaps->link_fec_options;
4030
4031 switch (fec) {
4032 case ICE_FEC_BASER:
4033 /* Clear RS bits, and AND BASE-R ability
4034 * bits and OR request bits.
4035 */
4036 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
4037 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
4038 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
4039 ICE_AQC_PHY_FEC_25G_KR_REQ;
4040 break;
4041 case ICE_FEC_RS:
4042 /* Clear BASE-R bits, and AND RS ability
4043 * bits and OR request bits.
4044 */
4045 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
4046 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
4047 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
4048 break;
4049 case ICE_FEC_NONE:
4050 /* Clear all FEC option bits. */
4051 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
4052 break;
4053 case ICE_FEC_DIS_AUTO:
4054 /* Set No FEC and auto FEC */
4055 if (!ice_fw_supports_fec_dis_auto(hw)) {
4056 status = ICE_ERR_NOT_SUPPORTED;
4057 goto out;
4058 }
4059 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS;
4060 /* fall-through */
4061 case ICE_FEC_AUTO:
4062 /* AND auto FEC bit, and all caps bits. */
4063 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
4064 cfg->link_fec_opt |= pcaps->link_fec_options;
4065 break;
4066 default:
4067 status = ICE_ERR_PARAM;
4068 break;
4069 }
4070
4071 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
4072 !ice_fw_supports_report_dflt_cfg(pi->hw)) {
4073 struct ice_link_default_override_tlv tlv;
4074
4075 if (ice_get_link_default_override(&tlv, pi))
4076 goto out;
4077
4078 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
4079 (tlv.options & ICE_LINK_OVERRIDE_EN))
4080 cfg->link_fec_opt = tlv.fec_options;
4081 }
4082
4083 out:
4084 ice_free(hw, pcaps);
4085
4086 return status;
4087 }
4088
4089 /**
4090 * ice_get_link_status - get status of the HW network link
4091 * @pi: port information structure
4092 * @link_up: pointer to bool (true/false = linkup/linkdown)
4093 *
4094 * Variable link_up is true if link is up, false if link is down.
4095 * The variable link_up is invalid if status is non zero. As a
4096 * result of this call, link status reporting becomes enabled
4097 */
ice_get_link_status(struct ice_port_info * pi,bool * link_up)4098 int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
4099 {
4100 struct ice_phy_info *phy_info;
4101 int status = 0;
4102
4103 if (!pi || !link_up)
4104 return ICE_ERR_PARAM;
4105
4106 phy_info = &pi->phy;
4107
4108 if (phy_info->get_link_info) {
4109 status = ice_update_link_info(pi);
4110
4111 if (status)
4112 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
4113 status);
4114 }
4115
4116 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
4117
4118 return status;
4119 }
4120
4121 /**
4122 * ice_aq_set_link_restart_an
4123 * @pi: pointer to the port information structure
4124 * @ena_link: if true: enable link, if false: disable link
4125 * @cd: pointer to command details structure or NULL
4126 *
4127 * Sets up the link and restarts the Auto-Negotiation over the link.
4128 */
4129 int
ice_aq_set_link_restart_an(struct ice_port_info * pi,bool ena_link,struct ice_sq_cd * cd)4130 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
4131 struct ice_sq_cd *cd)
4132 {
4133 int status = ICE_ERR_AQ_ERROR;
4134 struct ice_aqc_restart_an *cmd;
4135 struct ice_aq_desc desc;
4136
4137 cmd = &desc.params.restart_an;
4138
4139 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
4140
4141 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
4142 cmd->lport_num = pi->lport;
4143 if (ena_link)
4144 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
4145 else
4146 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
4147
4148 status = ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
4149 if (status)
4150 return status;
4151
4152 if (ena_link)
4153 pi->phy.curr_user_phy_cfg.caps |= ICE_AQC_PHY_EN_LINK;
4154 else
4155 pi->phy.curr_user_phy_cfg.caps &= ~ICE_AQC_PHY_EN_LINK;
4156
4157 return 0;
4158 }
4159
4160 /**
4161 * ice_aq_set_event_mask
4162 * @hw: pointer to the HW struct
4163 * @port_num: port number of the physical function
4164 * @mask: event mask to be set
4165 * @cd: pointer to command details structure or NULL
4166 *
4167 * Set event mask (0x0613)
4168 */
4169 int
ice_aq_set_event_mask(struct ice_hw * hw,u8 port_num,u16 mask,struct ice_sq_cd * cd)4170 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
4171 struct ice_sq_cd *cd)
4172 {
4173 struct ice_aqc_set_event_mask *cmd;
4174 struct ice_aq_desc desc;
4175
4176 cmd = &desc.params.set_event_mask;
4177
4178 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
4179
4180 cmd->lport_num = port_num;
4181
4182 cmd->event_mask = CPU_TO_LE16(mask);
4183 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4184 }
4185
4186 /**
4187 * ice_aq_set_mac_loopback
4188 * @hw: pointer to the HW struct
4189 * @ena_lpbk: Enable or Disable loopback
4190 * @cd: pointer to command details structure or NULL
4191 *
4192 * Enable/disable loopback on a given port
4193 */
4194 int
ice_aq_set_mac_loopback(struct ice_hw * hw,bool ena_lpbk,struct ice_sq_cd * cd)4195 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
4196 {
4197 struct ice_aqc_set_mac_lb *cmd;
4198 struct ice_aq_desc desc;
4199
4200 cmd = &desc.params.set_mac_lb;
4201
4202 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
4203 if (ena_lpbk)
4204 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
4205
4206 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4207 }
4208
4209 /**
4210 * ice_aq_set_port_id_led
4211 * @pi: pointer to the port information
4212 * @is_orig_mode: is this LED set to original mode (by the net-list)
4213 * @cd: pointer to command details structure or NULL
4214 *
4215 * Set LED value for the given port (0x06e9)
4216 */
4217 int
ice_aq_set_port_id_led(struct ice_port_info * pi,bool is_orig_mode,struct ice_sq_cd * cd)4218 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
4219 struct ice_sq_cd *cd)
4220 {
4221 struct ice_aqc_set_port_id_led *cmd;
4222 struct ice_hw *hw = pi->hw;
4223 struct ice_aq_desc desc;
4224
4225 cmd = &desc.params.set_port_id_led;
4226
4227 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
4228
4229 if (is_orig_mode)
4230 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
4231 else
4232 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
4233
4234 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4235 }
4236
4237 /**
4238 * ice_aq_sff_eeprom
4239 * @hw: pointer to the HW struct
4240 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
4241 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
4242 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
4243 * @page: QSFP page
4244 * @set_page: set or ignore the page
4245 * @data: pointer to data buffer to be read/written to the I2C device.
4246 * @length: 1-16 for read, 1 for write.
4247 * @write: 0 read, 1 for write.
4248 * @cd: pointer to command details structure or NULL
4249 *
4250 * Read/Write SFF EEPROM (0x06EE)
4251 */
4252 int
ice_aq_sff_eeprom(struct ice_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 set_page,u8 * data,u8 length,bool write,struct ice_sq_cd * cd)4253 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
4254 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
4255 bool write, struct ice_sq_cd *cd)
4256 {
4257 struct ice_aqc_sff_eeprom *cmd;
4258 struct ice_aq_desc desc;
4259 int status;
4260
4261 if (!data || (mem_addr & 0xff00))
4262 return ICE_ERR_PARAM;
4263
4264 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
4265 cmd = &desc.params.read_write_sff_param;
4266 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
4267 cmd->lport_num = (u8)(lport & 0xff);
4268 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
4269 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
4270 ICE_AQC_SFF_I2CBUS_7BIT_M) |
4271 ((set_page <<
4272 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
4273 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
4274 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
4275 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
4276 if (write)
4277 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
4278
4279 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
4280 return status;
4281 }
4282
4283 /**
4284 * ice_aq_prog_topo_dev_nvm
4285 * @hw: pointer to the hardware structure
4286 * @topo_params: pointer to structure storing topology parameters for a device
4287 * @cd: pointer to command details structure or NULL
4288 *
4289 * Program Topology Device NVM (0x06F2)
4290 *
4291 */
4292 int
ice_aq_prog_topo_dev_nvm(struct ice_hw * hw,struct ice_aqc_link_topo_params * topo_params,struct ice_sq_cd * cd)4293 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
4294 struct ice_aqc_link_topo_params *topo_params,
4295 struct ice_sq_cd *cd)
4296 {
4297 struct ice_aqc_prog_topo_dev_nvm *cmd;
4298 struct ice_aq_desc desc;
4299
4300 cmd = &desc.params.prog_topo_dev_nvm;
4301
4302 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
4303
4304 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
4305 ICE_NONDMA_TO_NONDMA);
4306
4307 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4308 }
4309
4310 /**
4311 * ice_aq_read_topo_dev_nvm
4312 * @hw: pointer to the hardware structure
4313 * @topo_params: pointer to structure storing topology parameters for a device
4314 * @start_address: byte offset in the topology device NVM
4315 * @data: pointer to data buffer
4316 * @data_size: number of bytes to be read from the topology device NVM
4317 * @cd: pointer to command details structure or NULL
4318 * Read Topology Device NVM (0x06F3)
4319 *
4320 */
4321 int
ice_aq_read_topo_dev_nvm(struct ice_hw * hw,struct ice_aqc_link_topo_params * topo_params,u32 start_address,u8 * data,u8 data_size,struct ice_sq_cd * cd)4322 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
4323 struct ice_aqc_link_topo_params *topo_params,
4324 u32 start_address, u8 *data, u8 data_size,
4325 struct ice_sq_cd *cd)
4326 {
4327 struct ice_aqc_read_topo_dev_nvm *cmd;
4328 struct ice_aq_desc desc;
4329 int status;
4330
4331 if (!data || data_size == 0 ||
4332 data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
4333 return ICE_ERR_PARAM;
4334
4335 cmd = &desc.params.read_topo_dev_nvm;
4336
4337 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
4338
4339 desc.datalen = CPU_TO_LE16(data_size);
4340 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
4341 ICE_NONDMA_TO_NONDMA);
4342 cmd->start_address = CPU_TO_LE32(start_address);
4343
4344 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4345 if (status)
4346 return status;
4347
4348 ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
4349
4350 return 0;
4351 }
4352
ice_lut_type_to_size(u16 lut_type)4353 static u16 ice_lut_type_to_size(u16 lut_type)
4354 {
4355 switch (lut_type) {
4356 case ICE_LUT_VSI:
4357 return ICE_LUT_VSI_SIZE;
4358 case ICE_LUT_GLOBAL:
4359 return ICE_LUT_GLOBAL_SIZE;
4360 case ICE_LUT_PF:
4361 return ICE_LUT_PF_SIZE;
4362 case ICE_LUT_PF_SMALL:
4363 return ICE_LUT_PF_SMALL_SIZE;
4364 default:
4365 return 0;
4366 }
4367 }
4368
ice_lut_size_to_flag(u16 lut_size)4369 static u16 ice_lut_size_to_flag(u16 lut_size)
4370 {
4371 u16 f = 0;
4372
4373 switch (lut_size) {
4374 case ICE_LUT_GLOBAL_SIZE:
4375 f = ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG;
4376 break;
4377 case ICE_LUT_PF_SIZE:
4378 f = ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG;
4379 break;
4380 default:
4381 break;
4382 }
4383 return f << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S;
4384 }
4385
ice_lut_size_to_type(int lut_size)4386 int ice_lut_size_to_type(int lut_size)
4387 {
4388 switch (lut_size) {
4389 case ICE_LUT_VSI_SIZE:
4390 return ICE_LUT_VSI;
4391 case ICE_LUT_GLOBAL_SIZE:
4392 return ICE_LUT_GLOBAL;
4393 case ICE_LUT_PF_SIZE:
4394 return ICE_LUT_PF;
4395 case ICE_LUT_PF_SMALL_SIZE:
4396 return ICE_LUT_PF_SMALL;
4397 default:
4398 return -1;
4399 }
4400 }
4401
4402 /**
4403 * __ice_aq_get_set_rss_lut
4404 * @hw: pointer to the hardware structure
4405 * @params: RSS LUT parameters
4406 * @set: set true to set the table, false to get the table
4407 *
4408 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
4409 */
4410 static int
__ice_aq_get_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * params,bool set)4411 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
4412 {
4413 u16 flags, vsi_id, lut_type, lut_size, glob_lut_idx = 0, vsi_handle;
4414 struct ice_aqc_get_set_rss_lut *cmd_resp;
4415 struct ice_aq_desc desc;
4416 int status;
4417 u8 *lut;
4418
4419 if (!params)
4420 return ICE_ERR_PARAM;
4421
4422 vsi_handle = params->vsi_handle;
4423 lut = params->lut;
4424 lut_size = ice_lut_type_to_size(params->lut_type);
4425 lut_type = params->lut_type & ICE_LUT_TYPE_MASK;
4426 cmd_resp = &desc.params.get_set_rss_lut;
4427 if (lut_type == ICE_LUT_GLOBAL)
4428 glob_lut_idx = params->global_lut_id;
4429
4430 if (!lut || !lut_size || !ice_is_vsi_valid(hw, vsi_handle))
4431 return ICE_ERR_PARAM;
4432
4433 if (lut_size > params->lut_size)
4434 return ICE_ERR_INVAL_SIZE;
4435
4436 if (set && lut_size != params->lut_size)
4437 return ICE_ERR_PARAM;
4438
4439 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4440
4441 if (set) {
4442 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
4443 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4444 } else {
4445 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
4446 }
4447
4448 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4449 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
4450 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
4451 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
4452
4453 flags = ice_lut_size_to_flag(lut_size) |
4454 ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
4455 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M) |
4456 ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
4457 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
4458
4459 cmd_resp->flags = CPU_TO_LE16(flags);
4460 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
4461 params->lut_size = LE16_TO_CPU(desc.datalen);
4462 return status;
4463 }
4464
4465 /**
4466 * ice_aq_get_rss_lut
4467 * @hw: pointer to the hardware structure
4468 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
4469 *
4470 * get the RSS lookup table, PF or VSI type
4471 */
4472 int
ice_aq_get_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * get_params)4473 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
4474 {
4475 return __ice_aq_get_set_rss_lut(hw, get_params, false);
4476 }
4477
4478 /**
4479 * ice_aq_set_rss_lut
4480 * @hw: pointer to the hardware structure
4481 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
4482 *
4483 * set the RSS lookup table, PF or VSI type
4484 */
4485 int
ice_aq_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * set_params)4486 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4487 {
4488 return __ice_aq_get_set_rss_lut(hw, set_params, true);
4489 }
4490
4491 /**
4492 * __ice_aq_get_set_rss_key
4493 * @hw: pointer to the HW struct
4494 * @vsi_id: VSI FW index
4495 * @key: pointer to key info struct
4496 * @set: set true to set the key, false to get the key
4497 *
4498 * get (0x0B04) or set (0x0B02) the RSS key per VSI
4499 */
__ice_aq_get_set_rss_key(struct ice_hw * hw,u16 vsi_id,struct ice_aqc_get_set_rss_keys * key,bool set)4500 static int __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4501 struct ice_aqc_get_set_rss_keys *key,
4502 bool set)
4503 {
4504 struct ice_aqc_get_set_rss_key *cmd_resp;
4505 u16 key_size = sizeof(*key);
4506 struct ice_aq_desc desc;
4507
4508 cmd_resp = &desc.params.get_set_rss_key;
4509
4510 if (set) {
4511 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4512 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4513 } else {
4514 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4515 }
4516
4517 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4518 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
4519 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
4520 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
4521
4522 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4523 }
4524
4525 /**
4526 * ice_aq_get_rss_key
4527 * @hw: pointer to the HW struct
4528 * @vsi_handle: software VSI handle
4529 * @key: pointer to key info struct
4530 *
4531 * get the RSS key per VSI
4532 */
4533 int
ice_aq_get_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * key)4534 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4535 struct ice_aqc_get_set_rss_keys *key)
4536 {
4537 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4538 return ICE_ERR_PARAM;
4539
4540 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4541 key, false);
4542 }
4543
4544 /**
4545 * ice_aq_set_rss_key
4546 * @hw: pointer to the HW struct
4547 * @vsi_handle: software VSI handle
4548 * @keys: pointer to key info struct
4549 *
4550 * set the RSS key per VSI
4551 */
4552 int
ice_aq_set_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * keys)4553 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4554 struct ice_aqc_get_set_rss_keys *keys)
4555 {
4556 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4557 return ICE_ERR_PARAM;
4558
4559 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4560 keys, true);
4561 }
4562
4563 /**
4564 * ice_aq_add_lan_txq
4565 * @hw: pointer to the hardware structure
4566 * @num_qgrps: Number of added queue groups
4567 * @qg_list: list of queue groups to be added
4568 * @buf_size: size of buffer for indirect command
4569 * @cd: pointer to command details structure or NULL
4570 *
4571 * Add Tx LAN queue (0x0C30)
4572 *
4573 * NOTE:
4574 * Prior to calling add Tx LAN queue:
4575 * Initialize the following as part of the Tx queue context:
4576 * Completion queue ID if the queue uses Completion queue, Quanta profile,
4577 * Cache profile and Packet shaper profile.
4578 *
4579 * After add Tx LAN queue AQ command is completed:
4580 * Interrupts should be associated with specific queues,
4581 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4582 * flow.
4583 */
4584 int
ice_aq_add_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * qg_list,u16 buf_size,struct ice_sq_cd * cd)4585 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4586 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4587 struct ice_sq_cd *cd)
4588 {
4589 struct ice_aqc_add_tx_qgrp *list;
4590 struct ice_aqc_add_txqs *cmd;
4591 struct ice_aq_desc desc;
4592 u16 i, sum_size = 0;
4593
4594 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4595
4596 cmd = &desc.params.add_txqs;
4597
4598 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4599
4600 if (!qg_list)
4601 return ICE_ERR_PARAM;
4602
4603 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4604 return ICE_ERR_PARAM;
4605
4606 for (i = 0, list = qg_list; i < num_qgrps; i++) {
4607 sum_size += ice_struct_size(list, txqs, list->num_txqs);
4608 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4609 list->num_txqs);
4610 }
4611
4612 if (buf_size != sum_size)
4613 return ICE_ERR_PARAM;
4614
4615 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4616
4617 cmd->num_qgrps = num_qgrps;
4618
4619 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4620 }
4621
4622 /**
4623 * ice_aq_dis_lan_txq
4624 * @hw: pointer to the hardware structure
4625 * @num_qgrps: number of groups in the list
4626 * @qg_list: the list of groups to disable
4627 * @buf_size: the total size of the qg_list buffer in bytes
4628 * @rst_src: if called due to reset, specifies the reset source
4629 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4630 * @cd: pointer to command details structure or NULL
4631 *
4632 * Disable LAN Tx queue (0x0C31)
4633 */
4634 static int
ice_aq_dis_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_dis_txq_item * qg_list,u16 buf_size,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)4635 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4636 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4637 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4638 struct ice_sq_cd *cd)
4639 {
4640 struct ice_aqc_dis_txq_item *item;
4641 struct ice_aqc_dis_txqs *cmd;
4642 struct ice_aq_desc desc;
4643 int status;
4644 u16 i, sz = 0;
4645
4646 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4647 cmd = &desc.params.dis_txqs;
4648 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4649
4650 /* qg_list can be NULL only in VM/VF reset flow */
4651 if (!qg_list && !rst_src)
4652 return ICE_ERR_PARAM;
4653
4654 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4655 return ICE_ERR_PARAM;
4656
4657 cmd->num_entries = num_qgrps;
4658
4659 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
4660 ICE_AQC_Q_DIS_TIMEOUT_M);
4661
4662 switch (rst_src) {
4663 case ICE_VM_RESET:
4664 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4665 cmd->vmvf_and_timeout |=
4666 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
4667 break;
4668 case ICE_VF_RESET:
4669 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
4670 /* In this case, FW expects vmvf_num to be absolute VF ID */
4671 cmd->vmvf_and_timeout |=
4672 CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
4673 ICE_AQC_Q_DIS_VMVF_NUM_M);
4674 break;
4675 case ICE_NO_RESET:
4676 default:
4677 break;
4678 }
4679
4680 /* flush pipe on time out */
4681 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4682 /* If no queue group info, we are in a reset flow. Issue the AQ */
4683 if (!qg_list)
4684 goto do_aq;
4685
4686 /* set RD bit to indicate that command buffer is provided by the driver
4687 * and it needs to be read by the firmware
4688 */
4689 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4690
4691 for (i = 0, item = qg_list; i < num_qgrps; i++) {
4692 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
4693
4694 /* If the num of queues is even, add 2 bytes of padding */
4695 if ((item->num_qs % 2) == 0)
4696 item_size += 2;
4697
4698 sz += item_size;
4699
4700 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4701 }
4702
4703 if (buf_size != sz)
4704 return ICE_ERR_PARAM;
4705
4706 do_aq:
4707 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4708 if (status) {
4709 if (!qg_list)
4710 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4711 vmvf_num, hw->adminq.sq_last_status);
4712 else
4713 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4714 LE16_TO_CPU(qg_list[0].q_id[0]),
4715 hw->adminq.sq_last_status);
4716 }
4717 return status;
4718 }
4719
4720 /**
4721 * ice_aq_move_recfg_lan_txq
4722 * @hw: pointer to the hardware structure
4723 * @num_qs: number of queues to move/reconfigure
4724 * @is_move: true if this operation involves node movement
4725 * @is_tc_change: true if this operation involves a TC change
4726 * @subseq_call: true if this operation is a subsequent call
4727 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
4728 * @timeout: timeout in units of 100 usec (valid values 0-50)
4729 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
4730 * @buf: struct containing src/dest TEID and per-queue info
4731 * @buf_size: size of buffer for indirect command
4732 * @txqs_moved: out param, number of queues successfully moved
4733 * @cd: pointer to command details structure or NULL
4734 *
4735 * Move / Reconfigure Tx LAN queues (0x0C32)
4736 */
4737 int
ice_aq_move_recfg_lan_txq(struct ice_hw * hw,u8 num_qs,bool is_move,bool is_tc_change,bool subseq_call,bool flush_pipe,u8 timeout,u32 * blocked_cgds,struct ice_aqc_move_txqs_data * buf,u16 buf_size,u8 * txqs_moved,struct ice_sq_cd * cd)4738 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
4739 bool is_tc_change, bool subseq_call, bool flush_pipe,
4740 u8 timeout, u32 *blocked_cgds,
4741 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
4742 u8 *txqs_moved, struct ice_sq_cd *cd)
4743 {
4744 struct ice_aqc_move_txqs *cmd;
4745 struct ice_aq_desc desc;
4746 int status;
4747
4748 cmd = &desc.params.move_txqs;
4749 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
4750
4751 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
4752 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
4753 return ICE_ERR_PARAM;
4754
4755 if (is_tc_change && !flush_pipe && !blocked_cgds)
4756 return ICE_ERR_PARAM;
4757
4758 if (!is_move && !is_tc_change)
4759 return ICE_ERR_PARAM;
4760
4761 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4762
4763 if (is_move)
4764 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
4765
4766 if (is_tc_change)
4767 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
4768
4769 if (subseq_call)
4770 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
4771
4772 if (flush_pipe)
4773 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
4774
4775 cmd->num_qs = num_qs;
4776 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
4777 ICE_AQC_Q_CMD_TIMEOUT_M);
4778
4779 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4780
4781 if (!status && txqs_moved)
4782 *txqs_moved = cmd->num_qs;
4783
4784 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
4785 is_tc_change && !flush_pipe)
4786 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
4787
4788 return status;
4789 }
4790
4791 /**
4792 * ice_aq_add_rdma_qsets
4793 * @hw: pointer to the hardware structure
4794 * @num_qset_grps: Number of RDMA Qset groups
4795 * @qset_list: list of qset groups to be added
4796 * @buf_size: size of buffer for indirect command
4797 * @cd: pointer to command details structure or NULL
4798 *
4799 * Add Tx RDMA Qsets (0x0C33)
4800 */
4801 int
ice_aq_add_rdma_qsets(struct ice_hw * hw,u8 num_qset_grps,struct ice_aqc_add_rdma_qset_data * qset_list,u16 buf_size,struct ice_sq_cd * cd)4802 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
4803 struct ice_aqc_add_rdma_qset_data *qset_list,
4804 u16 buf_size, struct ice_sq_cd *cd)
4805 {
4806 struct ice_aqc_add_rdma_qset_data *list;
4807 struct ice_aqc_add_rdma_qset *cmd;
4808 struct ice_aq_desc desc;
4809 u16 i, sum_size = 0;
4810
4811 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4812
4813 cmd = &desc.params.add_rdma_qset;
4814
4815 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
4816
4817 if (!qset_list)
4818 return ICE_ERR_PARAM;
4819
4820 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
4821 return ICE_ERR_PARAM;
4822
4823 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
4824 u16 num_qsets = LE16_TO_CPU(list->num_qsets);
4825
4826 sum_size += ice_struct_size(list, rdma_qsets, num_qsets);
4827 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
4828 num_qsets);
4829 }
4830
4831 if (buf_size != sum_size)
4832 return ICE_ERR_PARAM;
4833
4834 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4835
4836 cmd->num_qset_grps = num_qset_grps;
4837
4838 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
4839 }
4840
4841 /* End of FW Admin Queue command wrappers */
4842
4843 /**
4844 * ice_write_byte - write a byte to a packed context structure
4845 * @src_ctx: the context structure to read from
4846 * @dest_ctx: the context to be written to
4847 * @ce_info: a description of the struct to be filled
4848 */
4849 static void
ice_write_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4850 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4851 {
4852 u8 src_byte, dest_byte, mask;
4853 u8 *from, *dest;
4854 u16 shift_width;
4855
4856 /* copy from the next struct field */
4857 from = src_ctx + ce_info->offset;
4858
4859 /* prepare the bits and mask */
4860 shift_width = ce_info->lsb % 8;
4861 mask = (u8)(BIT(ce_info->width) - 1);
4862
4863 src_byte = *from;
4864 src_byte &= mask;
4865
4866 /* shift to correct alignment */
4867 mask <<= shift_width;
4868 src_byte <<= shift_width;
4869
4870 /* get the current bits from the target bit string */
4871 dest = dest_ctx + (ce_info->lsb / 8);
4872
4873 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
4874
4875 dest_byte &= ~mask; /* get the bits not changing */
4876 dest_byte |= src_byte; /* add in the new bits */
4877
4878 /* put it all back */
4879 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
4880 }
4881
4882 /**
4883 * ice_write_word - write a word to a packed context structure
4884 * @src_ctx: the context structure to read from
4885 * @dest_ctx: the context to be written to
4886 * @ce_info: a description of the struct to be filled
4887 */
4888 static void
ice_write_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4889 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4890 {
4891 u16 src_word, mask;
4892 __le16 dest_word;
4893 u8 *from, *dest;
4894 u16 shift_width;
4895
4896 /* copy from the next struct field */
4897 from = src_ctx + ce_info->offset;
4898
4899 /* prepare the bits and mask */
4900 shift_width = ce_info->lsb % 8;
4901 mask = BIT(ce_info->width) - 1;
4902
4903 /* don't swizzle the bits until after the mask because the mask bits
4904 * will be in a different bit position on big endian machines
4905 */
4906 src_word = *(u16 *)from;
4907 src_word &= mask;
4908
4909 /* shift to correct alignment */
4910 mask <<= shift_width;
4911 src_word <<= shift_width;
4912
4913 /* get the current bits from the target bit string */
4914 dest = dest_ctx + (ce_info->lsb / 8);
4915
4916 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_NONDMA_TO_NONDMA);
4917
4918 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
4919 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
4920
4921 /* put it all back */
4922 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_NONDMA);
4923 }
4924
4925 /**
4926 * ice_write_dword - write a dword to a packed context structure
4927 * @src_ctx: the context structure to read from
4928 * @dest_ctx: the context to be written to
4929 * @ce_info: a description of the struct to be filled
4930 */
4931 static void
ice_write_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4932 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4933 {
4934 u32 src_dword, mask;
4935 __le32 dest_dword;
4936 u8 *from, *dest;
4937 u16 shift_width;
4938
4939 /* copy from the next struct field */
4940 from = src_ctx + ce_info->offset;
4941
4942 /* prepare the bits and mask */
4943 shift_width = ce_info->lsb % 8;
4944
4945 /* if the field width is exactly 32 on an x86 machine, then the shift
4946 * operation will not work because the SHL instructions count is masked
4947 * to 5 bits so the shift will do nothing
4948 */
4949 if (ce_info->width < 32)
4950 mask = BIT(ce_info->width) - 1;
4951 else
4952 mask = (u32)~0;
4953
4954 /* don't swizzle the bits until after the mask because the mask bits
4955 * will be in a different bit position on big endian machines
4956 */
4957 src_dword = *(u32 *)from;
4958 src_dword &= mask;
4959
4960 /* shift to correct alignment */
4961 mask <<= shift_width;
4962 src_dword <<= shift_width;
4963
4964 /* get the current bits from the target bit string */
4965 dest = dest_ctx + (ce_info->lsb / 8);
4966
4967 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_NONDMA_TO_NONDMA);
4968
4969 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
4970 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
4971
4972 /* put it all back */
4973 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_NONDMA);
4974 }
4975
4976 /**
4977 * ice_write_qword - write a qword to a packed context structure
4978 * @src_ctx: the context structure to read from
4979 * @dest_ctx: the context to be written to
4980 * @ce_info: a description of the struct to be filled
4981 */
4982 static void
ice_write_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4983 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4984 {
4985 u64 src_qword, mask;
4986 __le64 dest_qword;
4987 u8 *from, *dest;
4988 u16 shift_width;
4989
4990 /* copy from the next struct field */
4991 from = src_ctx + ce_info->offset;
4992
4993 /* prepare the bits and mask */
4994 shift_width = ce_info->lsb % 8;
4995
4996 /* if the field width is exactly 64 on an x86 machine, then the shift
4997 * operation will not work because the SHL instructions count is masked
4998 * to 6 bits so the shift will do nothing
4999 */
5000 if (ce_info->width < 64)
5001 mask = BIT_ULL(ce_info->width) - 1;
5002 else
5003 mask = (u64)~0;
5004
5005 /* don't swizzle the bits until after the mask because the mask bits
5006 * will be in a different bit position on big endian machines
5007 */
5008 src_qword = *(u64 *)from;
5009 src_qword &= mask;
5010
5011 /* shift to correct alignment */
5012 mask <<= shift_width;
5013 src_qword <<= shift_width;
5014
5015 /* get the current bits from the target bit string */
5016 dest = dest_ctx + (ce_info->lsb / 8);
5017
5018 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_NONDMA_TO_NONDMA);
5019
5020 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
5021 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
5022
5023 /* put it all back */
5024 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_NONDMA);
5025 }
5026
5027 /**
5028 * ice_set_ctx - set context bits in packed structure
5029 * @hw: pointer to the hardware structure
5030 * @src_ctx: pointer to a generic non-packed context structure
5031 * @dest_ctx: pointer to memory for the packed structure
5032 * @ce_info: a description of the structure to be transformed
5033 */
5034 int
ice_set_ctx(struct ice_hw * hw,u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5035 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
5036 const struct ice_ctx_ele *ce_info)
5037 {
5038 int f;
5039
5040 for (f = 0; ce_info[f].width; f++) {
5041 /* We have to deal with each element of the FW response
5042 * using the correct size so that we are correct regardless
5043 * of the endianness of the machine.
5044 */
5045 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
5046 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
5047 f, ce_info[f].width, ce_info[f].size_of);
5048 continue;
5049 }
5050 switch (ce_info[f].size_of) {
5051 case sizeof(u8):
5052 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
5053 break;
5054 case sizeof(u16):
5055 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
5056 break;
5057 case sizeof(u32):
5058 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
5059 break;
5060 case sizeof(u64):
5061 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
5062 break;
5063 default:
5064 return ICE_ERR_INVAL_SIZE;
5065 }
5066 }
5067
5068 return 0;
5069 }
5070
5071 /**
5072 * ice_aq_get_internal_data
5073 * @hw: pointer to the hardware structure
5074 * @cluster_id: specific cluster to dump
5075 * @table_id: table ID within cluster
5076 * @start: index of line in the block to read
5077 * @buf: dump buffer
5078 * @buf_size: dump buffer size
5079 * @ret_buf_size: return buffer size (returned by FW)
5080 * @ret_next_cluster: next cluster to read (returned by FW)
5081 * @ret_next_table: next block to read (returned by FW)
5082 * @ret_next_index: next index to read (returned by FW)
5083 * @cd: pointer to command details structure
5084 *
5085 * Get internal FW/HW data (0xFF08) for debug purposes.
5086 */
5087 int
ice_aq_get_internal_data(struct ice_hw * hw,u16 cluster_id,u16 table_id,u32 start,void * buf,u16 buf_size,u16 * ret_buf_size,u16 * ret_next_cluster,u16 * ret_next_table,u32 * ret_next_index,struct ice_sq_cd * cd)5088 ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
5089 u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
5090 u16 *ret_next_cluster, u16 *ret_next_table,
5091 u32 *ret_next_index, struct ice_sq_cd *cd)
5092 {
5093 struct ice_aqc_debug_dump_internals *cmd;
5094 struct ice_aq_desc desc;
5095 int status;
5096
5097 cmd = &desc.params.debug_dump;
5098
5099 if (buf_size == 0 || !buf)
5100 return ICE_ERR_PARAM;
5101
5102 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals);
5103
5104 cmd->cluster_id = CPU_TO_LE16(cluster_id);
5105 cmd->table_id = CPU_TO_LE16(table_id);
5106 cmd->idx = CPU_TO_LE32(start);
5107
5108 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5109
5110 if (!status) {
5111 if (ret_buf_size)
5112 *ret_buf_size = LE16_TO_CPU(desc.datalen);
5113 if (ret_next_cluster)
5114 *ret_next_cluster = LE16_TO_CPU(cmd->cluster_id);
5115 if (ret_next_table)
5116 *ret_next_table = LE16_TO_CPU(cmd->table_id);
5117 if (ret_next_index)
5118 *ret_next_index = LE32_TO_CPU(cmd->idx);
5119 }
5120
5121 return status;
5122 }
5123
5124 /**
5125 * ice_read_byte - read context byte into struct
5126 * @src_ctx: the context structure to read from
5127 * @dest_ctx: the context to be written to
5128 * @ce_info: a description of the struct to be filled
5129 */
5130 static void
ice_read_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5131 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
5132 {
5133 u8 dest_byte, mask;
5134 u8 *src, *target;
5135 u16 shift_width;
5136
5137 /* prepare the bits and mask */
5138 shift_width = ce_info->lsb % 8;
5139 mask = (u8)(BIT(ce_info->width) - 1);
5140
5141 /* shift to correct alignment */
5142 mask <<= shift_width;
5143
5144 /* get the current bits from the src bit string */
5145 src = src_ctx + (ce_info->lsb / 8);
5146
5147 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
5148
5149 dest_byte &= mask;
5150
5151 dest_byte >>= shift_width;
5152
5153 /* get the address from the struct field */
5154 target = dest_ctx + ce_info->offset;
5155
5156 /* put it back in the struct */
5157 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
5158 }
5159
5160 /**
5161 * ice_read_word - read context word into struct
5162 * @src_ctx: the context structure to read from
5163 * @dest_ctx: the context to be written to
5164 * @ce_info: a description of the struct to be filled
5165 */
5166 static void
ice_read_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5167 ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
5168 {
5169 u16 dest_word, mask;
5170 u8 *src, *target;
5171 __le16 src_word;
5172 u16 shift_width;
5173
5174 /* prepare the bits and mask */
5175 shift_width = ce_info->lsb % 8;
5176 mask = BIT(ce_info->width) - 1;
5177
5178 /* shift to correct alignment */
5179 mask <<= shift_width;
5180
5181 /* get the current bits from the src bit string */
5182 src = src_ctx + (ce_info->lsb / 8);
5183
5184 ice_memcpy(&src_word, src, sizeof(src_word), ICE_NONDMA_TO_NONDMA);
5185
5186 /* the data in the memory is stored as little endian so mask it
5187 * correctly
5188 */
5189 src_word &= CPU_TO_LE16(mask);
5190
5191 /* get the data back into host order before shifting */
5192 dest_word = LE16_TO_CPU(src_word);
5193
5194 dest_word >>= shift_width;
5195
5196 /* get the address from the struct field */
5197 target = dest_ctx + ce_info->offset;
5198
5199 /* put it back in the struct */
5200 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_NONDMA);
5201 }
5202
5203 /**
5204 * ice_read_dword - read context dword into struct
5205 * @src_ctx: the context structure to read from
5206 * @dest_ctx: the context to be written to
5207 * @ce_info: a description of the struct to be filled
5208 */
5209 static void
ice_read_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5210 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
5211 {
5212 u32 dest_dword, mask;
5213 __le32 src_dword;
5214 u8 *src, *target;
5215 u16 shift_width;
5216
5217 /* prepare the bits and mask */
5218 shift_width = ce_info->lsb % 8;
5219
5220 /* if the field width is exactly 32 on an x86 machine, then the shift
5221 * operation will not work because the SHL instructions count is masked
5222 * to 5 bits so the shift will do nothing
5223 */
5224 if (ce_info->width < 32)
5225 mask = BIT(ce_info->width) - 1;
5226 else
5227 mask = (u32)~0;
5228
5229 /* shift to correct alignment */
5230 mask <<= shift_width;
5231
5232 /* get the current bits from the src bit string */
5233 src = src_ctx + (ce_info->lsb / 8);
5234
5235 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_NONDMA_TO_NONDMA);
5236
5237 /* the data in the memory is stored as little endian so mask it
5238 * correctly
5239 */
5240 src_dword &= CPU_TO_LE32(mask);
5241
5242 /* get the data back into host order before shifting */
5243 dest_dword = LE32_TO_CPU(src_dword);
5244
5245 dest_dword >>= shift_width;
5246
5247 /* get the address from the struct field */
5248 target = dest_ctx + ce_info->offset;
5249
5250 /* put it back in the struct */
5251 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_NONDMA);
5252 }
5253
5254 /**
5255 * ice_read_qword - read context qword into struct
5256 * @src_ctx: the context structure to read from
5257 * @dest_ctx: the context to be written to
5258 * @ce_info: a description of the struct to be filled
5259 */
5260 static void
ice_read_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5261 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
5262 {
5263 u64 dest_qword, mask;
5264 __le64 src_qword;
5265 u8 *src, *target;
5266 u16 shift_width;
5267
5268 /* prepare the bits and mask */
5269 shift_width = ce_info->lsb % 8;
5270
5271 /* if the field width is exactly 64 on an x86 machine, then the shift
5272 * operation will not work because the SHL instructions count is masked
5273 * to 6 bits so the shift will do nothing
5274 */
5275 if (ce_info->width < 64)
5276 mask = BIT_ULL(ce_info->width) - 1;
5277 else
5278 mask = (u64)~0;
5279
5280 /* shift to correct alignment */
5281 mask <<= shift_width;
5282
5283 /* get the current bits from the src bit string */
5284 src = src_ctx + (ce_info->lsb / 8);
5285
5286 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_NONDMA_TO_NONDMA);
5287
5288 /* the data in the memory is stored as little endian so mask it
5289 * correctly
5290 */
5291 src_qword &= CPU_TO_LE64(mask);
5292
5293 /* get the data back into host order before shifting */
5294 dest_qword = LE64_TO_CPU(src_qword);
5295
5296 dest_qword >>= shift_width;
5297
5298 /* get the address from the struct field */
5299 target = dest_ctx + ce_info->offset;
5300
5301 /* put it back in the struct */
5302 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_NONDMA);
5303 }
5304
5305 /**
5306 * ice_get_ctx - extract context bits from a packed structure
5307 * @src_ctx: pointer to a generic packed context structure
5308 * @dest_ctx: pointer to a generic non-packed context structure
5309 * @ce_info: a description of the structure to be read from
5310 */
5311 int
ice_get_ctx(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5312 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
5313 {
5314 int f;
5315
5316 for (f = 0; ce_info[f].width; f++) {
5317 switch (ce_info[f].size_of) {
5318 case 1:
5319 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
5320 break;
5321 case 2:
5322 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
5323 break;
5324 case 4:
5325 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
5326 break;
5327 case 8:
5328 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
5329 break;
5330 default:
5331 /* nothing to do, just keep going */
5332 break;
5333 }
5334 }
5335
5336 return 0;
5337 }
5338
5339 /**
5340 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
5341 * @hw: pointer to the HW struct
5342 * @vsi_handle: software VSI handle
5343 * @tc: TC number
5344 * @q_handle: software queue handle
5345 */
5346 struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 q_handle)5347 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
5348 {
5349 struct ice_vsi_ctx *vsi;
5350 struct ice_q_ctx *q_ctx;
5351
5352 vsi = ice_get_vsi_ctx(hw, vsi_handle);
5353 if (!vsi)
5354 return NULL;
5355 if (q_handle >= vsi->num_lan_q_entries[tc])
5356 return NULL;
5357 if (!vsi->lan_q_ctx[tc])
5358 return NULL;
5359 q_ctx = vsi->lan_q_ctx[tc];
5360 return &q_ctx[q_handle];
5361 }
5362
5363 /**
5364 * ice_ena_vsi_txq
5365 * @pi: port information structure
5366 * @vsi_handle: software VSI handle
5367 * @tc: TC number
5368 * @q_handle: software queue handle
5369 * @num_qgrps: Number of added queue groups
5370 * @buf: list of queue groups to be added
5371 * @buf_size: size of buffer for indirect command
5372 * @cd: pointer to command details structure or NULL
5373 *
5374 * This function adds one LAN queue
5375 */
5376 int
ice_ena_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * buf,u16 buf_size,struct ice_sq_cd * cd)5377 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
5378 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
5379 struct ice_sq_cd *cd)
5380 {
5381 struct ice_aqc_txsched_elem_data node = { 0 };
5382 struct ice_sched_node *parent;
5383 struct ice_q_ctx *q_ctx;
5384 struct ice_hw *hw;
5385 int status;
5386
5387 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5388 return ICE_ERR_CFG;
5389
5390 if (num_qgrps > 1 || buf->num_txqs > 1)
5391 return ICE_ERR_MAX_LIMIT;
5392
5393 hw = pi->hw;
5394
5395 if (!ice_is_vsi_valid(hw, vsi_handle))
5396 return ICE_ERR_PARAM;
5397
5398 ice_acquire_lock(&pi->sched_lock);
5399
5400 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
5401 if (!q_ctx) {
5402 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
5403 q_handle);
5404 status = ICE_ERR_PARAM;
5405 goto ena_txq_exit;
5406 }
5407
5408 /* find a parent node */
5409 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5410 ICE_SCHED_NODE_OWNER_LAN);
5411 if (!parent) {
5412 status = ICE_ERR_PARAM;
5413 goto ena_txq_exit;
5414 }
5415
5416 buf->parent_teid = parent->info.node_teid;
5417 node.parent_teid = parent->info.node_teid;
5418 /* Mark that the values in the "generic" section as valid. The default
5419 * value in the "generic" section is zero. This means that :
5420 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
5421 * - 0 priority among siblings, indicated by Bit 1-3.
5422 * - WFQ, indicated by Bit 4.
5423 * - 0 Adjustment value is used in PSM credit update flow, indicated by
5424 * Bit 5-6.
5425 * - Bit 7 is reserved.
5426 * Without setting the generic section as valid in valid_sections, the
5427 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
5428 */
5429 buf->txqs[0].info.valid_sections =
5430 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5431 ICE_AQC_ELEM_VALID_EIR;
5432 buf->txqs[0].info.generic = 0;
5433 buf->txqs[0].info.cir_bw.bw_profile_idx =
5434 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5435 buf->txqs[0].info.cir_bw.bw_alloc =
5436 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5437 buf->txqs[0].info.eir_bw.bw_profile_idx =
5438 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5439 buf->txqs[0].info.eir_bw.bw_alloc =
5440 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5441
5442 /* add the LAN queue */
5443 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
5444 if (status) {
5445 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
5446 LE16_TO_CPU(buf->txqs[0].txq_id),
5447 hw->adminq.sq_last_status);
5448 goto ena_txq_exit;
5449 }
5450
5451 node.node_teid = buf->txqs[0].q_teid;
5452 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5453 q_ctx->q_handle = q_handle;
5454 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
5455
5456 /* add a leaf node into scheduler tree queue layer */
5457 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);
5458 if (!status)
5459 status = ice_sched_replay_q_bw(pi, q_ctx);
5460
5461 ena_txq_exit:
5462 ice_release_lock(&pi->sched_lock);
5463 return status;
5464 }
5465
5466 /**
5467 * ice_dis_vsi_txq
5468 * @pi: port information structure
5469 * @vsi_handle: software VSI handle
5470 * @tc: TC number
5471 * @num_queues: number of queues
5472 * @q_handles: pointer to software queue handle array
5473 * @q_ids: pointer to the q_id array
5474 * @q_teids: pointer to queue node teids
5475 * @rst_src: if called due to reset, specifies the reset source
5476 * @vmvf_num: the relative VM or VF number that is undergoing the reset
5477 * @cd: pointer to command details structure or NULL
5478 *
5479 * This function removes queues and their corresponding nodes in SW DB
5480 */
5481 int
ice_dis_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 num_queues,u16 * q_handles,u16 * q_ids,u32 * q_teids,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)5482 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
5483 u16 *q_handles, u16 *q_ids, u32 *q_teids,
5484 enum ice_disq_rst_src rst_src, u16 vmvf_num,
5485 struct ice_sq_cd *cd)
5486 {
5487 struct ice_aqc_dis_txq_item *qg_list;
5488 struct ice_q_ctx *q_ctx;
5489 int status = ICE_ERR_DOES_NOT_EXIST;
5490 struct ice_hw *hw;
5491 u16 i, buf_size;
5492
5493 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5494 return ICE_ERR_CFG;
5495
5496 hw = pi->hw;
5497
5498 if (!num_queues) {
5499 /* if queue is disabled already yet the disable queue command
5500 * has to be sent to complete the VF reset, then call
5501 * ice_aq_dis_lan_txq without any queue information
5502 */
5503 if (rst_src)
5504 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
5505 vmvf_num, NULL);
5506 return ICE_ERR_CFG;
5507 }
5508
5509 buf_size = ice_struct_size(qg_list, q_id, 1);
5510 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
5511 if (!qg_list)
5512 return ICE_ERR_NO_MEMORY;
5513
5514 ice_acquire_lock(&pi->sched_lock);
5515
5516 for (i = 0; i < num_queues; i++) {
5517 struct ice_sched_node *node;
5518
5519 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
5520 if (!node)
5521 continue;
5522 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
5523 if (!q_ctx) {
5524 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
5525 q_handles[i]);
5526 continue;
5527 }
5528 if (q_ctx->q_handle != q_handles[i]) {
5529 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
5530 q_ctx->q_handle, q_handles[i]);
5531 continue;
5532 }
5533 qg_list->parent_teid = node->info.parent_teid;
5534 qg_list->num_qs = 1;
5535 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
5536 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
5537 vmvf_num, cd);
5538
5539 if (status)
5540 break;
5541 ice_free_sched_node(pi, node);
5542 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
5543 }
5544 ice_release_lock(&pi->sched_lock);
5545 ice_free(hw, qg_list);
5546 return status;
5547 }
5548
5549 /**
5550 * ice_cfg_vsi_qs - configure the new/existing VSI queues
5551 * @pi: port information structure
5552 * @vsi_handle: software VSI handle
5553 * @tc_bitmap: TC bitmap
5554 * @maxqs: max queues array per TC
5555 * @owner: LAN or RDMA
5556 *
5557 * This function adds/updates the VSI queues per TC.
5558 */
5559 static int
ice_cfg_vsi_qs(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * maxqs,u8 owner)5560 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5561 u16 *maxqs, u8 owner)
5562 {
5563 int status = 0;
5564 u8 i;
5565
5566 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5567 return ICE_ERR_CFG;
5568
5569 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5570 return ICE_ERR_PARAM;
5571
5572 ice_acquire_lock(&pi->sched_lock);
5573
5574 ice_for_each_traffic_class(i) {
5575 /* configuration is possible only if TC node is present */
5576 if (!ice_sched_get_tc_node(pi, i))
5577 continue;
5578
5579 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
5580 ice_is_tc_ena(tc_bitmap, i));
5581 if (status)
5582 break;
5583 }
5584
5585 ice_release_lock(&pi->sched_lock);
5586 return status;
5587 }
5588
5589 /**
5590 * ice_cfg_vsi_lan - configure VSI LAN queues
5591 * @pi: port information structure
5592 * @vsi_handle: software VSI handle
5593 * @tc_bitmap: TC bitmap
5594 * @max_lanqs: max LAN queues array per TC
5595 *
5596 * This function adds/updates the VSI LAN queues per TC.
5597 */
5598 int
ice_cfg_vsi_lan(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * max_lanqs)5599 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5600 u16 *max_lanqs)
5601 {
5602 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
5603 ICE_SCHED_NODE_OWNER_LAN);
5604 }
5605
5606 /**
5607 * ice_cfg_vsi_rdma - configure the VSI RDMA queues
5608 * @pi: port information structure
5609 * @vsi_handle: software VSI handle
5610 * @tc_bitmap: TC bitmap
5611 * @max_rdmaqs: max RDMA queues array per TC
5612 *
5613 * This function adds/updates the VSI RDMA queues per TC.
5614 */
5615 int
ice_cfg_vsi_rdma(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * max_rdmaqs)5616 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5617 u16 *max_rdmaqs)
5618 {
5619 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
5620 ICE_SCHED_NODE_OWNER_RDMA);
5621 }
5622
5623 /**
5624 * ice_ena_vsi_rdma_qset
5625 * @pi: port information structure
5626 * @vsi_handle: software VSI handle
5627 * @tc: TC number
5628 * @rdma_qset: pointer to RDMA qset
5629 * @num_qsets: number of RDMA qsets
5630 * @qset_teid: pointer to qset node teids
5631 *
5632 * This function adds RDMA qset
5633 */
5634 int
ice_ena_vsi_rdma_qset(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 * rdma_qset,u16 num_qsets,u32 * qset_teid)5635 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
5636 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
5637 {
5638 struct ice_aqc_txsched_elem_data node = { 0 };
5639 struct ice_aqc_add_rdma_qset_data *buf;
5640 struct ice_sched_node *parent;
5641 struct ice_hw *hw;
5642 u16 i, buf_size;
5643 int status;
5644
5645 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5646 return ICE_ERR_CFG;
5647 hw = pi->hw;
5648
5649 if (!ice_is_vsi_valid(hw, vsi_handle))
5650 return ICE_ERR_PARAM;
5651
5652 buf_size = ice_struct_size(buf, rdma_qsets, num_qsets);
5653 buf = (struct ice_aqc_add_rdma_qset_data *)ice_malloc(hw, buf_size);
5654 if (!buf)
5655 return ICE_ERR_NO_MEMORY;
5656 ice_acquire_lock(&pi->sched_lock);
5657
5658 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5659 ICE_SCHED_NODE_OWNER_RDMA);
5660 if (!parent) {
5661 status = ICE_ERR_PARAM;
5662 goto rdma_error_exit;
5663 }
5664 buf->parent_teid = parent->info.node_teid;
5665 node.parent_teid = parent->info.node_teid;
5666
5667 buf->num_qsets = CPU_TO_LE16(num_qsets);
5668 for (i = 0; i < num_qsets; i++) {
5669 buf->rdma_qsets[i].tx_qset_id = CPU_TO_LE16(rdma_qset[i]);
5670 buf->rdma_qsets[i].info.valid_sections =
5671 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5672 ICE_AQC_ELEM_VALID_EIR;
5673 buf->rdma_qsets[i].info.generic = 0;
5674 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
5675 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5676 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
5677 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5678 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
5679 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5680 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
5681 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5682 }
5683 status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
5684 if (status) {
5685 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
5686 goto rdma_error_exit;
5687 }
5688 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5689 for (i = 0; i < num_qsets; i++) {
5690 node.node_teid = buf->rdma_qsets[i].qset_teid;
5691 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
5692 &node, NULL);
5693 if (status)
5694 break;
5695 qset_teid[i] = LE32_TO_CPU(node.node_teid);
5696 }
5697 rdma_error_exit:
5698 ice_release_lock(&pi->sched_lock);
5699 ice_free(hw, buf);
5700 return status;
5701 }
5702
5703 /**
5704 * ice_dis_vsi_rdma_qset - free RDMA resources
5705 * @pi: port_info struct
5706 * @count: number of RDMA qsets to free
5707 * @qset_teid: TEID of qset node
5708 * @q_id: list of queue IDs being disabled
5709 */
5710 int
ice_dis_vsi_rdma_qset(struct ice_port_info * pi,u16 count,u32 * qset_teid,u16 * q_id)5711 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
5712 u16 *q_id)
5713 {
5714 struct ice_aqc_dis_txq_item *qg_list;
5715 struct ice_hw *hw;
5716 int status = 0;
5717 u16 qg_size;
5718 int i;
5719
5720 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5721 return ICE_ERR_CFG;
5722
5723 hw = pi->hw;
5724
5725 qg_size = ice_struct_size(qg_list, q_id, 1);
5726 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, qg_size);
5727 if (!qg_list)
5728 return ICE_ERR_NO_MEMORY;
5729
5730 ice_acquire_lock(&pi->sched_lock);
5731
5732 for (i = 0; i < count; i++) {
5733 struct ice_sched_node *node;
5734
5735 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
5736 if (!node)
5737 continue;
5738
5739 qg_list->parent_teid = node->info.parent_teid;
5740 qg_list->num_qs = 1;
5741 qg_list->q_id[0] =
5742 CPU_TO_LE16(q_id[i] |
5743 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
5744
5745 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
5746 ICE_NO_RESET, 0, NULL);
5747 if (status)
5748 break;
5749
5750 ice_free_sched_node(pi, node);
5751 }
5752
5753 ice_release_lock(&pi->sched_lock);
5754 ice_free(hw, qg_list);
5755 return status;
5756 }
5757
5758 /**
5759 * ice_aq_get_sensor_reading
5760 * @hw: pointer to the HW struct
5761 * @sensor: sensor type
5762 * @format: requested response format
5763 * @data: pointer to data to be read from the sensor
5764 * @cd: pointer to command details structure or NULL
5765 *
5766 * Get sensor reading (0x0632)
5767 */
5768 int
ice_aq_get_sensor_reading(struct ice_hw * hw,u8 sensor,u8 format,struct ice_aqc_get_sensor_reading_resp * data,struct ice_sq_cd * cd)5769 ice_aq_get_sensor_reading(struct ice_hw *hw, u8 sensor, u8 format,
5770 struct ice_aqc_get_sensor_reading_resp *data,
5771 struct ice_sq_cd *cd)
5772 {
5773 struct ice_aqc_get_sensor_reading *cmd;
5774 struct ice_aq_desc desc;
5775 int status;
5776
5777 if (!data)
5778 return ICE_ERR_PARAM;
5779
5780 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
5781 cmd = &desc.params.get_sensor_reading;
5782 cmd->sensor = sensor;
5783 cmd->format = format;
5784
5785 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5786
5787 if (!status)
5788 ice_memcpy(data, &desc.params.get_sensor_reading_resp,
5789 sizeof(*data), ICE_NONDMA_TO_NONDMA);
5790
5791 return status;
5792 }
5793
5794 /**
5795 * ice_is_main_vsi - checks whether the VSI is main VSI
5796 * @hw: pointer to the HW struct
5797 * @vsi_handle: VSI handle
5798 *
5799 * Checks whether the VSI is the main VSI (the first PF VSI created on
5800 * given PF).
5801 */
ice_is_main_vsi(struct ice_hw * hw,u16 vsi_handle)5802 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
5803 {
5804 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
5805 }
5806
5807 /**
5808 * ice_replay_pre_init - replay pre initialization
5809 * @hw: pointer to the HW struct
5810 * @sw: pointer to switch info struct for which function initializes filters
5811 *
5812 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5813 */
5814 int
ice_replay_pre_init(struct ice_hw * hw,struct ice_switch_info * sw)5815 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
5816 {
5817 int status;
5818 u8 i;
5819
5820 /* Delete old entries from replay filter list head if there is any */
5821 ice_rm_sw_replay_rule_info(hw, sw);
5822 /* In start of replay, move entries into replay_rules list, it
5823 * will allow adding rules entries back to filt_rules list,
5824 * which is operational list.
5825 */
5826 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
5827 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
5828 &sw->recp_list[i].filt_replay_rules);
5829 ice_sched_replay_agg_vsi_preinit(hw);
5830
5831 status = ice_sched_replay_root_node_bw(hw->port_info);
5832 if (status)
5833 return status;
5834
5835 return ice_sched_replay_tc_node_bw(hw->port_info);
5836 }
5837
5838 /**
5839 * ice_replay_vsi - replay VSI configuration
5840 * @hw: pointer to the HW struct
5841 * @vsi_handle: driver VSI handle
5842 *
5843 * Restore all VSI configuration after reset. It is required to call this
5844 * function with main VSI first.
5845 */
ice_replay_vsi(struct ice_hw * hw,u16 vsi_handle)5846 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5847 {
5848 struct ice_switch_info *sw = hw->switch_info;
5849 struct ice_port_info *pi = hw->port_info;
5850 int status;
5851
5852 if (!ice_is_vsi_valid(hw, vsi_handle))
5853 return ICE_ERR_PARAM;
5854
5855 /* Replay pre-initialization if there is any */
5856 if (ice_is_main_vsi(hw, vsi_handle)) {
5857 status = ice_replay_pre_init(hw, sw);
5858 if (status)
5859 return status;
5860 }
5861 /* Replay per VSI all RSS configurations */
5862 status = ice_replay_rss_cfg(hw, vsi_handle);
5863 if (status)
5864 return status;
5865 /* Replay per VSI all filters */
5866 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
5867 if (!status)
5868 status = ice_replay_vsi_agg(hw, vsi_handle);
5869 return status;
5870 }
5871
5872 /**
5873 * ice_replay_post - post replay configuration cleanup
5874 * @hw: pointer to the HW struct
5875 *
5876 * Post replay cleanup.
5877 */
ice_replay_post(struct ice_hw * hw)5878 void ice_replay_post(struct ice_hw *hw)
5879 {
5880 /* Delete old entries from replay filter list head */
5881 ice_rm_all_sw_replay_rule_info(hw);
5882 ice_sched_replay_agg(hw);
5883 }
5884
5885 /**
5886 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5887 * @hw: ptr to the hardware info
5888 * @reg: offset of 64 bit HW register to read from
5889 * @prev_stat_loaded: bool to specify if previous stats are loaded
5890 * @prev_stat: ptr to previous loaded stat value
5891 * @cur_stat: ptr to current stat value
5892 */
5893 void
ice_stat_update40(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)5894 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5895 u64 *prev_stat, u64 *cur_stat)
5896 {
5897 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5898
5899 /* device stats are not reset at PFR, they likely will not be zeroed
5900 * when the driver starts. Thus, save the value from the first read
5901 * without adding to the statistic value so that we report stats which
5902 * count up from zero.
5903 */
5904 if (!prev_stat_loaded) {
5905 *prev_stat = new_data;
5906 return;
5907 }
5908
5909 /* Calculate the difference between the new and old values, and then
5910 * add it to the software stat value.
5911 */
5912 if (new_data >= *prev_stat)
5913 *cur_stat += new_data - *prev_stat;
5914 else
5915 /* to manage the potential roll-over */
5916 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5917
5918 /* Update the previously stored value to prepare for next read */
5919 *prev_stat = new_data;
5920 }
5921
5922 /**
5923 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5924 * @hw: ptr to the hardware info
5925 * @reg: offset of HW register to read from
5926 * @prev_stat_loaded: bool to specify if previous stats are loaded
5927 * @prev_stat: ptr to previous loaded stat value
5928 * @cur_stat: ptr to current stat value
5929 */
5930 void
ice_stat_update32(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)5931 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5932 u64 *prev_stat, u64 *cur_stat)
5933 {
5934 u32 new_data;
5935
5936 new_data = rd32(hw, reg);
5937
5938 /* device stats are not reset at PFR, they likely will not be zeroed
5939 * when the driver starts. Thus, save the value from the first read
5940 * without adding to the statistic value so that we report stats which
5941 * count up from zero.
5942 */
5943 if (!prev_stat_loaded) {
5944 *prev_stat = new_data;
5945 return;
5946 }
5947
5948 /* Calculate the difference between the new and old values, and then
5949 * add it to the software stat value.
5950 */
5951 if (new_data >= *prev_stat)
5952 *cur_stat += new_data - *prev_stat;
5953 else
5954 /* to manage the potential roll-over */
5955 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5956
5957 /* Update the previously stored value to prepare for next read */
5958 *prev_stat = new_data;
5959 }
5960
5961 /**
5962 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
5963 * @hw: ptr to the hardware info
5964 * @vsi_handle: VSI handle
5965 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
5966 * @cur_stats: ptr to current stats structure
5967 *
5968 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
5969 * thus cannot be read using the normal ice_stat_update32 function.
5970 *
5971 * Read the GLV_REPC register associated with the given VSI, and update the
5972 * rx_no_desc and rx_error values in the ice_eth_stats structure.
5973 *
5974 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
5975 * cleared each time it's read.
5976 *
5977 * Note that the GLV_RDPC register also counts the causes that would trigger
5978 * GLV_REPC. However, it does not give the finer grained detail about why the
5979 * packets are being dropped. The GLV_REPC values can be used to distinguish
5980 * whether Rx packets are dropped due to errors or due to no available
5981 * descriptors.
5982 */
5983 void
ice_stat_update_repc(struct ice_hw * hw,u16 vsi_handle,bool prev_stat_loaded,struct ice_eth_stats * cur_stats)5984 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
5985 struct ice_eth_stats *cur_stats)
5986 {
5987 u16 vsi_num, no_desc, error_cnt;
5988 u32 repc;
5989
5990 if (!ice_is_vsi_valid(hw, vsi_handle))
5991 return;
5992
5993 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
5994
5995 /* If we haven't loaded stats yet, just clear the current value */
5996 if (!prev_stat_loaded) {
5997 wr32(hw, GLV_REPC(vsi_num), 0);
5998 return;
5999 }
6000
6001 repc = rd32(hw, GLV_REPC(vsi_num));
6002 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
6003 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
6004
6005 /* Clear the count by writing to the stats register */
6006 wr32(hw, GLV_REPC(vsi_num), 0);
6007
6008 cur_stats->rx_no_desc += no_desc;
6009 cur_stats->rx_errors += error_cnt;
6010 }
6011
6012 /**
6013 * ice_aq_alternate_write
6014 * @hw: pointer to the hardware structure
6015 * @reg_addr0: address of first dword to be written
6016 * @reg_val0: value to be written under 'reg_addr0'
6017 * @reg_addr1: address of second dword to be written
6018 * @reg_val1: value to be written under 'reg_addr1'
6019 *
6020 * Write one or two dwords to alternate structure. Fields are indicated
6021 * by 'reg_addr0' and 'reg_addr1' register numbers.
6022 */
6023 int
ice_aq_alternate_write(struct ice_hw * hw,u32 reg_addr0,u32 reg_val0,u32 reg_addr1,u32 reg_val1)6024 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
6025 u32 reg_addr1, u32 reg_val1)
6026 {
6027 struct ice_aqc_read_write_alt_direct *cmd;
6028 struct ice_aq_desc desc;
6029 int status;
6030
6031 cmd = &desc.params.read_write_alt_direct;
6032
6033 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
6034 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
6035 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
6036 cmd->dword0_value = CPU_TO_LE32(reg_val0);
6037 cmd->dword1_value = CPU_TO_LE32(reg_val1);
6038
6039 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6040
6041 return status;
6042 }
6043
6044 /**
6045 * ice_aq_alternate_read
6046 * @hw: pointer to the hardware structure
6047 * @reg_addr0: address of first dword to be read
6048 * @reg_val0: pointer for data read from 'reg_addr0'
6049 * @reg_addr1: address of second dword to be read
6050 * @reg_val1: pointer for data read from 'reg_addr1'
6051 *
6052 * Read one or two dwords from alternate structure. Fields are indicated
6053 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
6054 * is not passed then only register at 'reg_addr0' is read.
6055 */
6056 int
ice_aq_alternate_read(struct ice_hw * hw,u32 reg_addr0,u32 * reg_val0,u32 reg_addr1,u32 * reg_val1)6057 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
6058 u32 reg_addr1, u32 *reg_val1)
6059 {
6060 struct ice_aqc_read_write_alt_direct *cmd;
6061 struct ice_aq_desc desc;
6062 int status;
6063
6064 cmd = &desc.params.read_write_alt_direct;
6065
6066 if (!reg_val0)
6067 return ICE_ERR_PARAM;
6068
6069 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
6070 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
6071 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
6072
6073 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6074
6075 if (!status) {
6076 *reg_val0 = LE32_TO_CPU(cmd->dword0_value);
6077
6078 if (reg_val1)
6079 *reg_val1 = LE32_TO_CPU(cmd->dword1_value);
6080 }
6081
6082 return status;
6083 }
6084
6085 /**
6086 * ice_aq_alternate_write_done
6087 * @hw: pointer to the HW structure.
6088 * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
6089 * @reset_needed: indicates the SW should trigger GLOBAL reset
6090 *
6091 * Indicates to the FW that alternate structures have been changed.
6092 */
6093 int
ice_aq_alternate_write_done(struct ice_hw * hw,u8 bios_mode,bool * reset_needed)6094 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
6095 {
6096 struct ice_aqc_done_alt_write *cmd;
6097 struct ice_aq_desc desc;
6098 int status;
6099
6100 cmd = &desc.params.done_alt_write;
6101
6102 if (!reset_needed)
6103 return ICE_ERR_PARAM;
6104
6105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
6106 cmd->flags = bios_mode;
6107
6108 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6109 if (!status)
6110 *reset_needed = (LE16_TO_CPU(cmd->flags) &
6111 ICE_AQC_RESP_RESET_NEEDED) != 0;
6112
6113 return status;
6114 }
6115
6116 /**
6117 * ice_aq_alternate_clear
6118 * @hw: pointer to the HW structure.
6119 *
6120 * Clear the alternate structures of the port from which the function
6121 * is called.
6122 */
ice_aq_alternate_clear(struct ice_hw * hw)6123 int ice_aq_alternate_clear(struct ice_hw *hw)
6124 {
6125 struct ice_aq_desc desc;
6126 int status;
6127
6128 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
6129
6130 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6131
6132 return status;
6133 }
6134
6135 /**
6136 * ice_sched_query_elem - query element information from HW
6137 * @hw: pointer to the HW struct
6138 * @node_teid: node TEID to be queried
6139 * @buf: buffer to element information
6140 *
6141 * This function queries HW element information
6142 */
6143 int
ice_sched_query_elem(struct ice_hw * hw,u32 node_teid,struct ice_aqc_txsched_elem_data * buf)6144 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
6145 struct ice_aqc_txsched_elem_data *buf)
6146 {
6147 u16 buf_size, num_elem_ret = 0;
6148 int status;
6149
6150 buf_size = sizeof(*buf);
6151 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
6152 buf->node_teid = CPU_TO_LE32(node_teid);
6153 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
6154 NULL);
6155 if (status || num_elem_ret != 1)
6156 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
6157 return status;
6158 }
6159
6160 /**
6161 * ice_get_fw_mode - returns FW mode
6162 * @hw: pointer to the HW struct
6163 */
ice_get_fw_mode(struct ice_hw * hw)6164 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
6165 {
6166 #define ICE_FW_MODE_DBG_M BIT(0)
6167 #define ICE_FW_MODE_REC_M BIT(1)
6168 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
6169 u32 fw_mode;
6170
6171 /* check the current FW mode */
6172 fw_mode = rd32(hw, GL_MNG_FWSM) & E800_GL_MNG_FWSM_FW_MODES_M;
6173 if (fw_mode & ICE_FW_MODE_DBG_M)
6174 return ICE_FW_MODE_DBG;
6175 else if (fw_mode & ICE_FW_MODE_REC_M)
6176 return ICE_FW_MODE_REC;
6177 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
6178 return ICE_FW_MODE_ROLLBACK;
6179 else
6180 return ICE_FW_MODE_NORMAL;
6181 }
6182
6183 /**
6184 * ice_get_cur_lldp_persist_status
6185 * @hw: pointer to the HW struct
6186 * @lldp_status: return value of LLDP persistent status
6187 *
6188 * Get the current status of LLDP persistent
6189 */
6190 int
ice_get_cur_lldp_persist_status(struct ice_hw * hw,u32 * lldp_status)6191 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
6192 {
6193 struct ice_port_info *pi = hw->port_info;
6194 __le32 raw_data;
6195 u32 data, mask;
6196 int ret;
6197
6198 if (!lldp_status)
6199 return ICE_ERR_BAD_PTR;
6200
6201 ret = ice_acquire_nvm(hw, ICE_RES_READ);
6202 if (ret)
6203 return ret;
6204
6205 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
6206 ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
6207 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
6208 false, true, NULL);
6209 if (!ret) {
6210 data = LE32_TO_CPU(raw_data);
6211 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
6212 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
6213 data = data & mask;
6214 *lldp_status = data >>
6215 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
6216 }
6217
6218 ice_release_nvm(hw);
6219
6220 return ret;
6221 }
6222
6223 /**
6224 * ice_get_dflt_lldp_persist_status
6225 * @hw: pointer to the HW struct
6226 * @lldp_status: return value of LLDP persistent status
6227 *
6228 * Get the default status of LLDP persistent
6229 */
6230 int
ice_get_dflt_lldp_persist_status(struct ice_hw * hw,u32 * lldp_status)6231 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
6232 {
6233 struct ice_port_info *pi = hw->port_info;
6234 u32 data, mask, loc_data, loc_data_tmp;
6235 __le16 loc_raw_data;
6236 __le32 raw_data;
6237 int ret;
6238
6239 if (!lldp_status)
6240 return ICE_ERR_BAD_PTR;
6241
6242 ret = ice_acquire_nvm(hw, ICE_RES_READ);
6243 if (ret)
6244 return ret;
6245
6246 /* Read the offset of EMP_SR_PTR */
6247 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
6248 ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
6249 ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
6250 &loc_raw_data, false, true, NULL);
6251 if (ret)
6252 goto exit;
6253
6254 loc_data = LE16_TO_CPU(loc_raw_data);
6255 if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
6256 loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
6257 loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
6258 } else {
6259 loc_data *= ICE_AQC_NVM_WORD_UNIT;
6260 }
6261
6262 /* Read the offset of LLDP configuration pointer */
6263 loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
6264 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
6265 ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
6266 false, true, NULL);
6267 if (ret)
6268 goto exit;
6269
6270 loc_data_tmp = LE16_TO_CPU(loc_raw_data);
6271 loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
6272 loc_data += loc_data_tmp;
6273
6274 /* We need to skip LLDP configuration section length (2 bytes) */
6275 loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
6276
6277 /* Read the LLDP Default Configure */
6278 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
6279 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
6280 true, NULL);
6281 if (!ret) {
6282 data = LE32_TO_CPU(raw_data);
6283 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
6284 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
6285 data = data & mask;
6286 *lldp_status = data >>
6287 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
6288 }
6289
6290 exit:
6291 ice_release_nvm(hw);
6292
6293 return ret;
6294 }
6295
6296 /**
6297 * ice_aq_read_i2c
6298 * @hw: pointer to the hw struct
6299 * @topo_addr: topology address for a device to communicate with
6300 * @bus_addr: 7-bit I2C bus address
6301 * @addr: I2C memory address (I2C offset) with up to 16 bits
6302 * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
6303 * bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
6304 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
6305 * @cd: pointer to command details structure or NULL
6306 *
6307 * Read I2C (0x06E2)
6308 */
6309 int
ice_aq_read_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data,struct ice_sq_cd * cd)6310 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
6311 u16 bus_addr, __le16 addr, u8 params, u8 *data,
6312 struct ice_sq_cd *cd)
6313 {
6314 struct ice_aq_desc desc = { 0 };
6315 struct ice_aqc_i2c *cmd;
6316 u8 data_size;
6317 int status;
6318
6319 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
6320 cmd = &desc.params.read_write_i2c;
6321
6322 if (!data)
6323 return ICE_ERR_PARAM;
6324
6325 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
6326
6327 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
6328 cmd->topo_addr = topo_addr;
6329 cmd->i2c_params = params;
6330 cmd->i2c_addr = addr;
6331
6332 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6333 if (!status) {
6334 struct ice_aqc_read_i2c_resp *resp;
6335 u8 i;
6336
6337 resp = &desc.params.read_i2c_resp;
6338 for (i = 0; i < data_size; i++) {
6339 *data = resp->i2c_data[i];
6340 data++;
6341 }
6342 }
6343
6344 return status;
6345 }
6346
6347 /**
6348 * ice_aq_write_i2c
6349 * @hw: pointer to the hw struct
6350 * @topo_addr: topology address for a device to communicate with
6351 * @bus_addr: 7-bit I2C bus address
6352 * @addr: I2C memory address (I2C offset) with up to 16 bits
6353 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
6354 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
6355 * @cd: pointer to command details structure or NULL
6356 *
6357 * Write I2C (0x06E3)
6358 */
6359 int
ice_aq_write_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,const u8 * data,struct ice_sq_cd * cd)6360 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
6361 u16 bus_addr, __le16 addr, u8 params, const u8 *data,
6362 struct ice_sq_cd *cd)
6363 {
6364 struct ice_aq_desc desc = { 0 };
6365 struct ice_aqc_i2c *cmd;
6366 u8 i, data_size;
6367
6368 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
6369 cmd = &desc.params.read_write_i2c;
6370
6371 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
6372
6373 /* data_size limited to 4 */
6374 if (data_size > 4)
6375 return ICE_ERR_PARAM;
6376
6377 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
6378 cmd->topo_addr = topo_addr;
6379 cmd->i2c_params = params;
6380 cmd->i2c_addr = addr;
6381
6382 for (i = 0; i < data_size; i++) {
6383 cmd->i2c_data[i] = *data;
6384 data++;
6385 }
6386
6387 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6388 }
6389
6390 /**
6391 * ice_aq_set_gpio
6392 * @hw: pointer to the hw struct
6393 * @gpio_ctrl_handle: GPIO controller node handle
6394 * @pin_idx: IO Number of the GPIO that needs to be set
6395 * @value: SW provide IO value to set in the LSB
6396 * @cd: pointer to command details structure or NULL
6397 *
6398 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
6399 */
6400 int
ice_aq_set_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool value,struct ice_sq_cd * cd)6401 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
6402 struct ice_sq_cd *cd)
6403 {
6404 struct ice_aqc_gpio *cmd;
6405 struct ice_aq_desc desc;
6406
6407 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
6408 cmd = &desc.params.read_write_gpio;
6409 cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
6410 cmd->gpio_num = pin_idx;
6411 cmd->gpio_val = value ? 1 : 0;
6412
6413 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6414 }
6415
6416 /**
6417 * ice_aq_get_gpio
6418 * @hw: pointer to the hw struct
6419 * @gpio_ctrl_handle: GPIO controller node handle
6420 * @pin_idx: IO Number of the GPIO that needs to be set
6421 * @value: IO value read
6422 * @cd: pointer to command details structure or NULL
6423 *
6424 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
6425 * the topology
6426 */
6427 int
ice_aq_get_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool * value,struct ice_sq_cd * cd)6428 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
6429 bool *value, struct ice_sq_cd *cd)
6430 {
6431 struct ice_aqc_gpio *cmd;
6432 struct ice_aq_desc desc;
6433 int status;
6434
6435 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
6436 cmd = &desc.params.read_write_gpio;
6437 cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
6438 cmd->gpio_num = pin_idx;
6439
6440 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6441 if (status)
6442 return status;
6443
6444 *value = !!cmd->gpio_val;
6445 return 0;
6446 }
6447
6448 /**
6449 * ice_is_fw_api_min_ver
6450 * @hw: pointer to the hardware structure
6451 * @maj: major version
6452 * @min: minor version
6453 * @patch: patch version
6454 *
6455 * Checks if the firmware is minimum version
6456 */
ice_is_fw_api_min_ver(struct ice_hw * hw,u8 maj,u8 min,u8 patch)6457 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
6458 {
6459 if (hw->api_maj_ver == maj) {
6460 if (hw->api_min_ver > min)
6461 return true;
6462 if (hw->api_min_ver == min && hw->api_patch >= patch)
6463 return true;
6464 } else if (hw->api_maj_ver > maj) {
6465 return true;
6466 }
6467
6468 return false;
6469 }
6470
6471 /**
6472 * ice_is_fw_min_ver
6473 * @hw: pointer to the hardware structure
6474 * @branch: branch version
6475 * @maj: major version
6476 * @min: minor version
6477 * @patch: patch version
6478 *
6479 * Checks if the firmware is minimum version
6480 */
ice_is_fw_min_ver(struct ice_hw * hw,u8 branch,u8 maj,u8 min,u8 patch)6481 static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min,
6482 u8 patch)
6483 {
6484 if (hw->fw_branch == branch) {
6485 if (hw->fw_maj_ver > maj)
6486 return true;
6487 if (hw->fw_maj_ver == maj) {
6488 if (hw->fw_min_ver > min)
6489 return true;
6490 if (hw->fw_min_ver == min && hw->fw_patch >= patch)
6491 return true;
6492 }
6493 }
6494
6495 return false;
6496 }
6497
6498 /**
6499 * ice_fw_supports_link_override
6500 * @hw: pointer to the hardware structure
6501 *
6502 * Checks if the firmware supports link override
6503 */
ice_fw_supports_link_override(struct ice_hw * hw)6504 bool ice_fw_supports_link_override(struct ice_hw *hw)
6505 {
6506 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
6507 ICE_FW_API_LINK_OVERRIDE_MIN,
6508 ICE_FW_API_LINK_OVERRIDE_PATCH);
6509 }
6510
6511 /**
6512 * ice_get_link_default_override
6513 * @ldo: pointer to the link default override struct
6514 * @pi: pointer to the port info struct
6515 *
6516 * Gets the link default override for a port
6517 */
6518 int
ice_get_link_default_override(struct ice_link_default_override_tlv * ldo,struct ice_port_info * pi)6519 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
6520 struct ice_port_info *pi)
6521 {
6522 u16 i, tlv, tlv_len, tlv_start, buf, offset;
6523 struct ice_hw *hw = pi->hw;
6524 int status;
6525
6526 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
6527 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
6528 if (status) {
6529 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
6530 return status;
6531 }
6532
6533 /* Each port has its own config; calculate for our port */
6534 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
6535 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
6536
6537 /* link options first */
6538 status = ice_read_sr_word(hw, tlv_start, &buf);
6539 if (status) {
6540 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6541 return status;
6542 }
6543 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
6544 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
6545 ICE_LINK_OVERRIDE_PHY_CFG_S;
6546
6547 /* link PHY config */
6548 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
6549 status = ice_read_sr_word(hw, offset, &buf);
6550 if (status) {
6551 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
6552 return status;
6553 }
6554 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
6555
6556 /* PHY types low */
6557 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
6558 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6559 status = ice_read_sr_word(hw, (offset + i), &buf);
6560 if (status) {
6561 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6562 return status;
6563 }
6564 /* shift 16 bits at a time to fill 64 bits */
6565 ldo->phy_type_low |= ((u64)buf << (i * 16));
6566 }
6567
6568 /* PHY types high */
6569 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
6570 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
6571 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6572 status = ice_read_sr_word(hw, (offset + i), &buf);
6573 if (status) {
6574 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6575 return status;
6576 }
6577 /* shift 16 bits at a time to fill 64 bits */
6578 ldo->phy_type_high |= ((u64)buf << (i * 16));
6579 }
6580
6581 return status;
6582 }
6583
6584 /**
6585 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
6586 * @caps: get PHY capability data
6587 */
ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data * caps)6588 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
6589 {
6590 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
6591 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
6592 ICE_AQC_PHY_AN_EN_CLAUSE73 |
6593 ICE_AQC_PHY_AN_EN_CLAUSE37))
6594 return true;
6595
6596 return false;
6597 }
6598
6599 /**
6600 * ice_is_fw_health_report_supported
6601 * @hw: pointer to the hardware structure
6602 *
6603 * Return true if firmware supports health status reports,
6604 * false otherwise
6605 */
ice_is_fw_health_report_supported(struct ice_hw * hw)6606 bool ice_is_fw_health_report_supported(struct ice_hw *hw)
6607 {
6608 if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ)
6609 return true;
6610
6611 if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) {
6612 if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN)
6613 return true;
6614 if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN &&
6615 hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH)
6616 return true;
6617 }
6618
6619 return false;
6620 }
6621
6622 /**
6623 * ice_aq_set_health_status_config - Configure FW health events
6624 * @hw: pointer to the HW struct
6625 * @event_source: type of diagnostic events to enable
6626 * @cd: pointer to command details structure or NULL
6627 *
6628 * Configure the health status event types that the firmware will send to this
6629 * PF. The supported event types are: PF-specific, all PFs, and global
6630 */
6631 int
ice_aq_set_health_status_config(struct ice_hw * hw,u8 event_source,struct ice_sq_cd * cd)6632 ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
6633 struct ice_sq_cd *cd)
6634 {
6635 struct ice_aqc_set_health_status_config *cmd;
6636 struct ice_aq_desc desc;
6637
6638 cmd = &desc.params.set_health_status_config;
6639
6640 ice_fill_dflt_direct_cmd_desc(&desc,
6641 ice_aqc_opc_set_health_status_config);
6642
6643 cmd->event_source = event_source;
6644
6645 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6646 }
6647
6648 /**
6649 * ice_aq_get_port_options
6650 * @hw: pointer to the hw struct
6651 * @options: buffer for the resultant port options
6652 * @option_count: input - size of the buffer in port options structures,
6653 * output - number of returned port options
6654 * @lport: logical port to call the command with (optional)
6655 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
6656 * when PF owns more than 1 port it must be true
6657 * @active_option_idx: index of active port option in returned buffer
6658 * @active_option_valid: active option in returned buffer is valid
6659 * @pending_option_idx: index of pending port option in returned buffer
6660 * @pending_option_valid: pending option in returned buffer is valid
6661 *
6662 * Calls Get Port Options AQC (0x06ea) and verifies result.
6663 */
6664 int
ice_aq_get_port_options(struct ice_hw * hw,struct ice_aqc_get_port_options_elem * options,u8 * option_count,u8 lport,bool lport_valid,u8 * active_option_idx,bool * active_option_valid,u8 * pending_option_idx,bool * pending_option_valid)6665 ice_aq_get_port_options(struct ice_hw *hw,
6666 struct ice_aqc_get_port_options_elem *options,
6667 u8 *option_count, u8 lport, bool lport_valid,
6668 u8 *active_option_idx, bool *active_option_valid,
6669 u8 *pending_option_idx, bool *pending_option_valid)
6670 {
6671 struct ice_aqc_get_port_options *cmd;
6672 struct ice_aq_desc desc;
6673 int status;
6674 u8 i;
6675
6676 /* options buffer shall be able to hold max returned options */
6677 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
6678 return ICE_ERR_PARAM;
6679
6680 cmd = &desc.params.get_port_options;
6681 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
6682
6683 cmd->lport_num = lport;
6684 cmd->lport_num_valid = lport_valid;
6685
6686 status = ice_aq_send_cmd(hw, &desc, options,
6687 *option_count * sizeof(*options), NULL);
6688 if (status)
6689 return status;
6690
6691 /* verify direct FW response & set output parameters */
6692 *option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M;
6693 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
6694 *active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID;
6695 if (*active_option_valid) {
6696 *active_option_idx = cmd->port_options &
6697 ICE_AQC_PORT_OPT_ACTIVE_M;
6698 if (*active_option_idx > (*option_count - 1))
6699 return ICE_ERR_OUT_OF_RANGE;
6700 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
6701 *active_option_idx);
6702 }
6703
6704 *pending_option_valid = cmd->pending_port_option_status &
6705 ICE_AQC_PENDING_PORT_OPT_VALID;
6706 if (*pending_option_valid) {
6707 *pending_option_idx = cmd->pending_port_option_status &
6708 ICE_AQC_PENDING_PORT_OPT_IDX_M;
6709 if (*pending_option_idx > (*option_count - 1))
6710 return ICE_ERR_OUT_OF_RANGE;
6711 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
6712 *pending_option_idx);
6713 }
6714
6715 /* mask output options fields */
6716 for (i = 0; i < *option_count; i++) {
6717 options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M;
6718 options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M;
6719 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
6720 options[i].pmd, options[i].max_lane_speed);
6721 }
6722
6723 return 0;
6724 }
6725
6726 /**
6727 * ice_aq_set_port_option
6728 * @hw: pointer to the hw struct
6729 * @lport: logical port to call the command with
6730 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
6731 * when PF owns more than 1 port it must be true
6732 * @new_option: new port option to be written
6733 *
6734 * Calls Set Port Options AQC (0x06eb).
6735 */
6736 int
ice_aq_set_port_option(struct ice_hw * hw,u8 lport,u8 lport_valid,u8 new_option)6737 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
6738 u8 new_option)
6739 {
6740 struct ice_aqc_set_port_option *cmd;
6741 struct ice_aq_desc desc;
6742
6743 if (new_option >= ICE_AQC_PORT_OPT_COUNT_M)
6744 return ICE_ERR_PARAM;
6745
6746 cmd = &desc.params.set_port_option;
6747 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
6748
6749 cmd->lport_num = lport;
6750
6751 cmd->lport_num_valid = lport_valid;
6752 cmd->selected_port_option = new_option;
6753
6754 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6755 }
6756
6757 /**
6758 * ice_aq_set_lldp_mib - Set the LLDP MIB
6759 * @hw: pointer to the HW struct
6760 * @mib_type: Local, Remote or both Local and Remote MIBs
6761 * @buf: pointer to the caller-supplied buffer to store the MIB block
6762 * @buf_size: size of the buffer (in bytes)
6763 * @cd: pointer to command details structure or NULL
6764 *
6765 * Set the LLDP MIB. (0x0A08)
6766 */
6767 int
ice_aq_set_lldp_mib(struct ice_hw * hw,u8 mib_type,void * buf,u16 buf_size,struct ice_sq_cd * cd)6768 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
6769 struct ice_sq_cd *cd)
6770 {
6771 struct ice_aqc_lldp_set_local_mib *cmd;
6772 struct ice_aq_desc desc;
6773
6774 cmd = &desc.params.lldp_set_mib;
6775
6776 if (buf_size == 0 || !buf)
6777 return ICE_ERR_PARAM;
6778
6779 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
6780
6781 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
6782 desc.datalen = CPU_TO_LE16(buf_size);
6783
6784 cmd->type = mib_type;
6785 cmd->length = CPU_TO_LE16(buf_size);
6786
6787 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
6788 }
6789
6790 /**
6791 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
6792 * @hw: pointer to HW struct
6793 */
ice_fw_supports_lldp_fltr_ctrl(struct ice_hw * hw)6794 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
6795 {
6796 if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC)
6797 return false;
6798
6799 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
6800 ICE_FW_API_LLDP_FLTR_MIN,
6801 ICE_FW_API_LLDP_FLTR_PATCH);
6802 }
6803
6804 /**
6805 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
6806 * @hw: pointer to HW struct
6807 * @vsi_num: absolute HW index for VSI
6808 * @add: boolean for if adding or removing a filter
6809 */
6810 int
ice_lldp_fltr_add_remove(struct ice_hw * hw,u16 vsi_num,bool add)6811 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
6812 {
6813 struct ice_aqc_lldp_filter_ctrl *cmd;
6814 struct ice_aq_desc desc;
6815
6816 cmd = &desc.params.lldp_filter_ctrl;
6817
6818 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
6819
6820 if (add)
6821 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
6822 else
6823 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
6824
6825 cmd->vsi_num = CPU_TO_LE16(vsi_num);
6826
6827 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6828 }
6829
6830 /**
6831 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
6832 * @hw: pointer to HW struct
6833 */
ice_lldp_execute_pending_mib(struct ice_hw * hw)6834 int ice_lldp_execute_pending_mib(struct ice_hw *hw)
6835 {
6836 struct ice_aq_desc desc;
6837
6838 ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib);
6839
6840 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6841 }
6842
6843 /**
6844 * ice_fw_supports_report_dflt_cfg
6845 * @hw: pointer to the hardware structure
6846 *
6847 * Checks if the firmware supports report default configuration
6848 */
ice_fw_supports_report_dflt_cfg(struct ice_hw * hw)6849 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
6850 {
6851 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
6852 ICE_FW_API_REPORT_DFLT_CFG_MIN,
6853 ICE_FW_API_REPORT_DFLT_CFG_PATCH);
6854 }
6855
6856 /* each of the indexes into the following array match the speed of a return
6857 * value from the list of AQ returned speeds like the range:
6858 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
6859 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) The array is defined as 15
6860 * elements long because the link_speed returned by the firmware is a 16 bit
6861 * value, but is indexed by [fls(speed) - 1]
6862 */
6863 static const u32 ice_aq_to_link_speed[] = {
6864 ICE_LINK_SPEED_10MBPS, /* BIT(0) */
6865 ICE_LINK_SPEED_100MBPS,
6866 ICE_LINK_SPEED_1000MBPS,
6867 ICE_LINK_SPEED_2500MBPS,
6868 ICE_LINK_SPEED_5000MBPS,
6869 ICE_LINK_SPEED_10000MBPS,
6870 ICE_LINK_SPEED_20000MBPS,
6871 ICE_LINK_SPEED_25000MBPS,
6872 ICE_LINK_SPEED_40000MBPS,
6873 ICE_LINK_SPEED_50000MBPS,
6874 ICE_LINK_SPEED_100000MBPS, /* BIT(10) */
6875 ICE_LINK_SPEED_200000MBPS,
6876 };
6877
6878 /**
6879 * ice_get_link_speed - get integer speed from table
6880 * @index: array index from fls(aq speed) - 1
6881 *
6882 * Returns: u32 value containing integer speed
6883 */
ice_get_link_speed(u16 index)6884 u32 ice_get_link_speed(u16 index)
6885 {
6886 if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
6887 return ICE_LINK_SPEED_UNKNOWN;
6888
6889 return ice_aq_to_link_speed[index];
6890 }
6891
6892 /**
6893 * ice_fw_supports_fec_dis_auto
6894 * @hw: pointer to the hardware structure
6895 *
6896 * Checks if the firmware supports FEC disable in Auto FEC mode
6897 */
ice_fw_supports_fec_dis_auto(struct ice_hw * hw)6898 bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
6899 {
6900 if (ice_is_e830(hw))
6901 return true;
6902 return ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E810,
6903 ICE_FW_FEC_DIS_AUTO_MAJ,
6904 ICE_FW_FEC_DIS_AUTO_MIN,
6905 ICE_FW_FEC_DIS_AUTO_PATCH) ||
6906 ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E82X,
6907 ICE_FW_FEC_DIS_AUTO_MAJ_E82X,
6908 ICE_FW_FEC_DIS_AUTO_MIN_E82X,
6909 ICE_FW_FEC_DIS_AUTO_PATCH_E82X);
6910 }
6911
6912 /**
6913 * ice_is_fw_auto_drop_supported
6914 * @hw: pointer to the hardware structure
6915 *
6916 * Checks if the firmware supports auto drop feature
6917 */
ice_is_fw_auto_drop_supported(struct ice_hw * hw)6918 bool ice_is_fw_auto_drop_supported(struct ice_hw *hw)
6919 {
6920 if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ &&
6921 hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN)
6922 return true;
6923 return false;
6924 }
6925
6926