1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) 2021, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*$FreeBSD$*/
32
33 #include "ice_common.h"
34 #include "ice_sched.h"
35 #include "ice_adminq_cmd.h"
36
37 #include "ice_flow.h"
38 #include "ice_switch.h"
39
40 #define ICE_PF_RESET_WAIT_COUNT 300
41
42 /**
43 * dump_phy_type - helper function that prints PHY type strings
44 * @hw: pointer to the HW structure
45 * @phy: 64 bit PHY type to decipher
46 * @i: bit index within phy
47 * @phy_string: string corresponding to bit i in phy
48 * @prefix: prefix string to differentiate multiple dumps
49 */
50 static void
dump_phy_type(struct ice_hw * hw,u64 phy,u8 i,const char * phy_string,const char * prefix)51 dump_phy_type(struct ice_hw *hw, u64 phy, u8 i, const char *phy_string,
52 const char *prefix)
53 {
54 if (phy & BIT_ULL(i))
55 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", prefix, i,
56 phy_string);
57 }
58
59 /**
60 * ice_dump_phy_type_low - helper function to dump phy_type_low
61 * @hw: pointer to the HW structure
62 * @low: 64 bit value for phy_type_low
63 * @prefix: prefix string to differentiate multiple dumps
64 */
65 static void
ice_dump_phy_type_low(struct ice_hw * hw,u64 low,const char * prefix)66 ice_dump_phy_type_low(struct ice_hw *hw, u64 low, const char *prefix)
67 {
68 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
69 (unsigned long long)low);
70
71 dump_phy_type(hw, low, 0, "100BASE_TX", prefix);
72 dump_phy_type(hw, low, 1, "100M_SGMII", prefix);
73 dump_phy_type(hw, low, 2, "1000BASE_T", prefix);
74 dump_phy_type(hw, low, 3, "1000BASE_SX", prefix);
75 dump_phy_type(hw, low, 4, "1000BASE_LX", prefix);
76 dump_phy_type(hw, low, 5, "1000BASE_KX", prefix);
77 dump_phy_type(hw, low, 6, "1G_SGMII", prefix);
78 dump_phy_type(hw, low, 7, "2500BASE_T", prefix);
79 dump_phy_type(hw, low, 8, "2500BASE_X", prefix);
80 dump_phy_type(hw, low, 9, "2500BASE_KX", prefix);
81 dump_phy_type(hw, low, 10, "5GBASE_T", prefix);
82 dump_phy_type(hw, low, 11, "5GBASE_KR", prefix);
83 dump_phy_type(hw, low, 12, "10GBASE_T", prefix);
84 dump_phy_type(hw, low, 13, "10G_SFI_DA", prefix);
85 dump_phy_type(hw, low, 14, "10GBASE_SR", prefix);
86 dump_phy_type(hw, low, 15, "10GBASE_LR", prefix);
87 dump_phy_type(hw, low, 16, "10GBASE_KR_CR1", prefix);
88 dump_phy_type(hw, low, 17, "10G_SFI_AOC_ACC", prefix);
89 dump_phy_type(hw, low, 18, "10G_SFI_C2C", prefix);
90 dump_phy_type(hw, low, 19, "25GBASE_T", prefix);
91 dump_phy_type(hw, low, 20, "25GBASE_CR", prefix);
92 dump_phy_type(hw, low, 21, "25GBASE_CR_S", prefix);
93 dump_phy_type(hw, low, 22, "25GBASE_CR1", prefix);
94 dump_phy_type(hw, low, 23, "25GBASE_SR", prefix);
95 dump_phy_type(hw, low, 24, "25GBASE_LR", prefix);
96 dump_phy_type(hw, low, 25, "25GBASE_KR", prefix);
97 dump_phy_type(hw, low, 26, "25GBASE_KR_S", prefix);
98 dump_phy_type(hw, low, 27, "25GBASE_KR1", prefix);
99 dump_phy_type(hw, low, 28, "25G_AUI_AOC_ACC", prefix);
100 dump_phy_type(hw, low, 29, "25G_AUI_C2C", prefix);
101 dump_phy_type(hw, low, 30, "40GBASE_CR4", prefix);
102 dump_phy_type(hw, low, 31, "40GBASE_SR4", prefix);
103 dump_phy_type(hw, low, 32, "40GBASE_LR4", prefix);
104 dump_phy_type(hw, low, 33, "40GBASE_KR4", prefix);
105 dump_phy_type(hw, low, 34, "40G_XLAUI_AOC_ACC", prefix);
106 dump_phy_type(hw, low, 35, "40G_XLAUI", prefix);
107 dump_phy_type(hw, low, 36, "50GBASE_CR2", prefix);
108 dump_phy_type(hw, low, 37, "50GBASE_SR2", prefix);
109 dump_phy_type(hw, low, 38, "50GBASE_LR2", prefix);
110 dump_phy_type(hw, low, 39, "50GBASE_KR2", prefix);
111 dump_phy_type(hw, low, 40, "50G_LAUI2_AOC_ACC", prefix);
112 dump_phy_type(hw, low, 41, "50G_LAUI2", prefix);
113 dump_phy_type(hw, low, 42, "50G_AUI2_AOC_ACC", prefix);
114 dump_phy_type(hw, low, 43, "50G_AUI2", prefix);
115 dump_phy_type(hw, low, 44, "50GBASE_CP", prefix);
116 dump_phy_type(hw, low, 45, "50GBASE_SR", prefix);
117 dump_phy_type(hw, low, 46, "50GBASE_FR", prefix);
118 dump_phy_type(hw, low, 47, "50GBASE_LR", prefix);
119 dump_phy_type(hw, low, 48, "50GBASE_KR_PAM4", prefix);
120 dump_phy_type(hw, low, 49, "50G_AUI1_AOC_ACC", prefix);
121 dump_phy_type(hw, low, 50, "50G_AUI1", prefix);
122 dump_phy_type(hw, low, 51, "100GBASE_CR4", prefix);
123 dump_phy_type(hw, low, 52, "100GBASE_SR4", prefix);
124 dump_phy_type(hw, low, 53, "100GBASE_LR4", prefix);
125 dump_phy_type(hw, low, 54, "100GBASE_KR4", prefix);
126 dump_phy_type(hw, low, 55, "100G_CAUI4_AOC_ACC", prefix);
127 dump_phy_type(hw, low, 56, "100G_CAUI4", prefix);
128 dump_phy_type(hw, low, 57, "100G_AUI4_AOC_ACC", prefix);
129 dump_phy_type(hw, low, 58, "100G_AUI4", prefix);
130 dump_phy_type(hw, low, 59, "100GBASE_CR_PAM4", prefix);
131 dump_phy_type(hw, low, 60, "100GBASE_KR_PAM4", prefix);
132 dump_phy_type(hw, low, 61, "100GBASE_CP2", prefix);
133 dump_phy_type(hw, low, 62, "100GBASE_SR2", prefix);
134 dump_phy_type(hw, low, 63, "100GBASE_DR", prefix);
135 }
136
137 /**
138 * ice_dump_phy_type_high - helper function to dump phy_type_high
139 * @hw: pointer to the HW structure
140 * @high: 64 bit value for phy_type_high
141 * @prefix: prefix string to differentiate multiple dumps
142 */
143 static void
ice_dump_phy_type_high(struct ice_hw * hw,u64 high,const char * prefix)144 ice_dump_phy_type_high(struct ice_hw *hw, u64 high, const char *prefix)
145 {
146 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
147 (unsigned long long)high);
148
149 dump_phy_type(hw, high, 0, "100GBASE_KR2_PAM4", prefix);
150 dump_phy_type(hw, high, 1, "100G_CAUI2_AOC_ACC", prefix);
151 dump_phy_type(hw, high, 2, "100G_CAUI2", prefix);
152 dump_phy_type(hw, high, 3, "100G_AUI2_AOC_ACC", prefix);
153 dump_phy_type(hw, high, 4, "100G_AUI2", prefix);
154 }
155
156 /**
157 * ice_set_mac_type - Sets MAC type
158 * @hw: pointer to the HW structure
159 *
160 * This function sets the MAC type of the adapter based on the
161 * vendor ID and device ID stored in the HW structure.
162 */
ice_set_mac_type(struct ice_hw * hw)163 enum ice_status ice_set_mac_type(struct ice_hw *hw)
164 {
165 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
166
167 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
168 return ICE_ERR_DEVICE_NOT_SUPPORTED;
169
170 switch (hw->device_id) {
171 case ICE_DEV_ID_E810C_BACKPLANE:
172 case ICE_DEV_ID_E810C_QSFP:
173 case ICE_DEV_ID_E810C_SFP:
174 case ICE_DEV_ID_E810_XXV_BACKPLANE:
175 case ICE_DEV_ID_E810_XXV_QSFP:
176 case ICE_DEV_ID_E810_XXV_SFP:
177 hw->mac_type = ICE_MAC_E810;
178 break;
179 case ICE_DEV_ID_E822C_10G_BASE_T:
180 case ICE_DEV_ID_E822C_BACKPLANE:
181 case ICE_DEV_ID_E822C_QSFP:
182 case ICE_DEV_ID_E822C_SFP:
183 case ICE_DEV_ID_E822C_SGMII:
184 case ICE_DEV_ID_E822L_10G_BASE_T:
185 case ICE_DEV_ID_E822L_BACKPLANE:
186 case ICE_DEV_ID_E822L_SFP:
187 case ICE_DEV_ID_E822L_SGMII:
188 case ICE_DEV_ID_E823L_10G_BASE_T:
189 case ICE_DEV_ID_E823L_1GBE:
190 case ICE_DEV_ID_E823L_BACKPLANE:
191 case ICE_DEV_ID_E823L_QSFP:
192 case ICE_DEV_ID_E823L_SFP:
193 case ICE_DEV_ID_E823C_10G_BASE_T:
194 case ICE_DEV_ID_E823C_BACKPLANE:
195 case ICE_DEV_ID_E823C_QSFP:
196 case ICE_DEV_ID_E823C_SFP:
197 case ICE_DEV_ID_E823C_SGMII:
198 hw->mac_type = ICE_MAC_GENERIC;
199 break;
200 default:
201 hw->mac_type = ICE_MAC_UNKNOWN;
202 break;
203 }
204
205 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
206 return ICE_SUCCESS;
207 }
208
209 /**
210 * ice_is_e810
211 * @hw: pointer to the hardware structure
212 *
213 * returns true if the device is E810 based, false if not.
214 */
ice_is_e810(struct ice_hw * hw)215 bool ice_is_e810(struct ice_hw *hw)
216 {
217 return hw->mac_type == ICE_MAC_E810;
218 }
219
220 /**
221 * ice_is_e810t
222 * @hw: pointer to the hardware structure
223 *
224 * returns true if the device is E810T based, false if not.
225 */
ice_is_e810t(struct ice_hw * hw)226 bool ice_is_e810t(struct ice_hw *hw)
227 {
228 switch (hw->device_id) {
229 case ICE_DEV_ID_E810C_SFP:
230 if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
231 hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
232 return true;
233 break;
234 case ICE_DEV_ID_E810C_QSFP:
235 if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
236 return true;
237 break;
238 default:
239 break;
240 }
241
242 return false;
243 }
244
245 /**
246 * ice_clear_pf_cfg - Clear PF configuration
247 * @hw: pointer to the hardware structure
248 *
249 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
250 * configuration, flow director filters, etc.).
251 */
ice_clear_pf_cfg(struct ice_hw * hw)252 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
253 {
254 struct ice_aq_desc desc;
255
256 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
257
258 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
259 }
260
261 /**
262 * ice_aq_manage_mac_read - manage MAC address read command
263 * @hw: pointer to the HW struct
264 * @buf: a virtual buffer to hold the manage MAC read response
265 * @buf_size: Size of the virtual buffer
266 * @cd: pointer to command details structure or NULL
267 *
268 * This function is used to return per PF station MAC address (0x0107).
269 * NOTE: Upon successful completion of this command, MAC address information
270 * is returned in user specified buffer. Please interpret user specified
271 * buffer as "manage_mac_read" response.
272 * Response such as various MAC addresses are stored in HW struct (port.mac)
273 * ice_discover_dev_caps is expected to be called before this function is
274 * called.
275 */
276 enum ice_status
ice_aq_manage_mac_read(struct ice_hw * hw,void * buf,u16 buf_size,struct ice_sq_cd * cd)277 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
278 struct ice_sq_cd *cd)
279 {
280 struct ice_aqc_manage_mac_read_resp *resp;
281 struct ice_aqc_manage_mac_read *cmd;
282 struct ice_aq_desc desc;
283 enum ice_status status;
284 u16 flags;
285 u8 i;
286
287 cmd = &desc.params.mac_read;
288
289 if (buf_size < sizeof(*resp))
290 return ICE_ERR_BUF_TOO_SHORT;
291
292 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
293
294 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
295 if (status)
296 return status;
297
298 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
299 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
300
301 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
302 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
303 return ICE_ERR_CFG;
304 }
305
306 /* A single port can report up to two (LAN and WoL) addresses */
307 for (i = 0; i < cmd->num_addr; i++)
308 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
309 ice_memcpy(hw->port_info->mac.lan_addr,
310 resp[i].mac_addr, ETH_ALEN,
311 ICE_DMA_TO_NONDMA);
312 ice_memcpy(hw->port_info->mac.perm_addr,
313 resp[i].mac_addr,
314 ETH_ALEN, ICE_DMA_TO_NONDMA);
315 break;
316 }
317 return ICE_SUCCESS;
318 }
319
320 /**
321 * ice_aq_get_phy_caps - returns PHY capabilities
322 * @pi: port information structure
323 * @qual_mods: report qualified modules
324 * @report_mode: report mode capabilities
325 * @pcaps: structure for PHY capabilities to be filled
326 * @cd: pointer to command details structure or NULL
327 *
328 * Returns the various PHY capabilities supported on the Port (0x0600)
329 */
330 enum ice_status
ice_aq_get_phy_caps(struct ice_port_info * pi,bool qual_mods,u8 report_mode,struct ice_aqc_get_phy_caps_data * pcaps,struct ice_sq_cd * cd)331 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
332 struct ice_aqc_get_phy_caps_data *pcaps,
333 struct ice_sq_cd *cd)
334 {
335 struct ice_aqc_get_phy_caps *cmd;
336 u16 pcaps_size = sizeof(*pcaps);
337 struct ice_aq_desc desc;
338 enum ice_status status;
339 const char *prefix;
340 struct ice_hw *hw;
341
342 cmd = &desc.params.get_phy;
343
344 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
345 return ICE_ERR_PARAM;
346 hw = pi->hw;
347
348 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
349 !ice_fw_supports_report_dflt_cfg(hw))
350 return ICE_ERR_PARAM;
351
352 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
353
354 if (qual_mods)
355 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
356
357 cmd->param0 |= CPU_TO_LE16(report_mode);
358 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
359
360 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
361
362 if (report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA)
363 prefix = "phy_caps_media";
364 else if (report_mode == ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA)
365 prefix = "phy_caps_no_media";
366 else if (report_mode == ICE_AQC_REPORT_ACTIVE_CFG)
367 prefix = "phy_caps_active";
368 else if (report_mode == ICE_AQC_REPORT_DFLT_CFG)
369 prefix = "phy_caps_default";
370 else
371 prefix = "phy_caps_invalid";
372
373 ice_dump_phy_type_low(hw, LE64_TO_CPU(pcaps->phy_type_low), prefix);
374 ice_dump_phy_type_high(hw, LE64_TO_CPU(pcaps->phy_type_high), prefix);
375
376 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
377 prefix, report_mode);
378 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
379 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
380 pcaps->low_power_ctrl_an);
381 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
382 pcaps->eee_cap);
383 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
384 pcaps->eeer_value);
385 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
386 pcaps->link_fec_options);
387 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
388 prefix, pcaps->module_compliance_enforcement);
389 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
390 prefix, pcaps->extended_compliance_code);
391 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
392 pcaps->module_type[0]);
393 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
394 pcaps->module_type[1]);
395 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
396 pcaps->module_type[2]);
397
398 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
399 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
400 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
401 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
402 sizeof(pi->phy.link_info.module_type),
403 ICE_NONDMA_TO_NONDMA);
404 }
405
406 return status;
407 }
408
409 /**
410 * ice_aq_get_netlist_node
411 * @hw: pointer to the hw struct
412 * @cmd: get_link_topo AQ structure
413 * @node_part_number: output node part number if node found
414 * @node_handle: output node handle parameter if node found
415 */
416 enum ice_status
ice_aq_get_netlist_node(struct ice_hw * hw,struct ice_aqc_get_link_topo * cmd,u8 * node_part_number,u16 * node_handle)417 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
418 u8 *node_part_number, u16 *node_handle)
419 {
420 struct ice_aq_desc desc;
421
422 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
423 desc.params.get_link_topo = *cmd;
424
425 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
426 return ICE_ERR_NOT_SUPPORTED;
427
428 if (node_handle)
429 *node_handle =
430 LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
431 if (node_part_number)
432 *node_part_number = desc.params.get_link_topo.node_part_num;
433
434 return ICE_SUCCESS;
435 }
436
437 #define MAX_NETLIST_SIZE 10
438 /**
439 * ice_find_netlist_node
440 * @hw: pointer to the hw struct
441 * @node_type_ctx: type of netlist node to look for
442 * @node_part_number: node part number to look for
443 * @node_handle: output parameter if node found - optional
444 *
445 * Find and return the node handle for a given node type and part number in the
446 * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST
447 * otherwise. If @node_handle provided, it would be set to found node handle.
448 */
449 enum ice_status
ice_find_netlist_node(struct ice_hw * hw,u8 node_type_ctx,u8 node_part_number,u16 * node_handle)450 ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
451 u16 *node_handle)
452 {
453 struct ice_aqc_get_link_topo cmd;
454 u8 rec_node_part_number;
455 enum ice_status status;
456 u16 rec_node_handle;
457 u8 idx;
458
459 for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
460 memset(&cmd, 0, sizeof(cmd));
461
462 cmd.addr.topo_params.node_type_ctx =
463 (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
464 cmd.addr.topo_params.index = idx;
465
466 status = ice_aq_get_netlist_node(hw, &cmd,
467 &rec_node_part_number,
468 &rec_node_handle);
469 if (status)
470 return status;
471
472 if (rec_node_part_number == node_part_number) {
473 if (node_handle)
474 *node_handle = rec_node_handle;
475 return ICE_SUCCESS;
476 }
477 }
478
479 return ICE_ERR_DOES_NOT_EXIST;
480 }
481
482 /**
483 * ice_is_media_cage_present
484 * @pi: port information structure
485 *
486 * Returns true if media cage is present, else false. If no cage, then
487 * media type is backplane or BASE-T.
488 */
ice_is_media_cage_present(struct ice_port_info * pi)489 static bool ice_is_media_cage_present(struct ice_port_info *pi)
490 {
491 struct ice_aqc_get_link_topo *cmd;
492 struct ice_aq_desc desc;
493
494 cmd = &desc.params.get_link_topo;
495
496 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
497
498 cmd->addr.topo_params.node_type_ctx =
499 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
500 ICE_AQC_LINK_TOPO_NODE_CTX_S);
501
502 /* set node type */
503 cmd->addr.topo_params.node_type_ctx |=
504 (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
505 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE);
506
507 /* Node type cage can be used to determine if cage is present. If AQC
508 * returns error (ENOENT), then no cage present. If no cage present then
509 * connection type is backplane or BASE-T.
510 */
511 return ice_aq_get_netlist_node(pi->hw, cmd, NULL, NULL);
512 }
513
514 /**
515 * ice_get_media_type - Gets media type
516 * @pi: port information structure
517 */
ice_get_media_type(struct ice_port_info * pi)518 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
519 {
520 struct ice_link_status *hw_link_info;
521
522 if (!pi)
523 return ICE_MEDIA_UNKNOWN;
524
525 hw_link_info = &pi->phy.link_info;
526 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
527 /* If more than one media type is selected, report unknown */
528 return ICE_MEDIA_UNKNOWN;
529
530 if (hw_link_info->phy_type_low) {
531 /* 1G SGMII is a special case where some DA cable PHYs
532 * may show this as an option when it really shouldn't
533 * be since SGMII is meant to be between a MAC and a PHY
534 * in a backplane. Try to detect this case and handle it
535 */
536 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
537 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
538 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
539 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
540 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
541 return ICE_MEDIA_DA;
542
543 switch (hw_link_info->phy_type_low) {
544 case ICE_PHY_TYPE_LOW_1000BASE_SX:
545 case ICE_PHY_TYPE_LOW_1000BASE_LX:
546 case ICE_PHY_TYPE_LOW_10GBASE_SR:
547 case ICE_PHY_TYPE_LOW_10GBASE_LR:
548 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
549 case ICE_PHY_TYPE_LOW_25GBASE_SR:
550 case ICE_PHY_TYPE_LOW_25GBASE_LR:
551 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
552 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
553 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
554 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
555 case ICE_PHY_TYPE_LOW_50GBASE_SR:
556 case ICE_PHY_TYPE_LOW_50GBASE_FR:
557 case ICE_PHY_TYPE_LOW_50GBASE_LR:
558 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
559 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
560 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
561 case ICE_PHY_TYPE_LOW_100GBASE_DR:
562 return ICE_MEDIA_FIBER;
563 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
564 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
565 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
566 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
567 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
568 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
569 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
570 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
571 return ICE_MEDIA_FIBER;
572 case ICE_PHY_TYPE_LOW_100BASE_TX:
573 case ICE_PHY_TYPE_LOW_1000BASE_T:
574 case ICE_PHY_TYPE_LOW_2500BASE_T:
575 case ICE_PHY_TYPE_LOW_5GBASE_T:
576 case ICE_PHY_TYPE_LOW_10GBASE_T:
577 case ICE_PHY_TYPE_LOW_25GBASE_T:
578 return ICE_MEDIA_BASET;
579 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
580 case ICE_PHY_TYPE_LOW_25GBASE_CR:
581 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
582 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
583 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
584 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
585 case ICE_PHY_TYPE_LOW_50GBASE_CP:
586 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
587 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
588 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
589 return ICE_MEDIA_DA;
590 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
591 case ICE_PHY_TYPE_LOW_40G_XLAUI:
592 case ICE_PHY_TYPE_LOW_50G_LAUI2:
593 case ICE_PHY_TYPE_LOW_50G_AUI2:
594 case ICE_PHY_TYPE_LOW_50G_AUI1:
595 case ICE_PHY_TYPE_LOW_100G_AUI4:
596 case ICE_PHY_TYPE_LOW_100G_CAUI4:
597 if (ice_is_media_cage_present(pi))
598 return ICE_MEDIA_AUI;
599 /* fall-through */
600 case ICE_PHY_TYPE_LOW_1000BASE_KX:
601 case ICE_PHY_TYPE_LOW_2500BASE_KX:
602 case ICE_PHY_TYPE_LOW_2500BASE_X:
603 case ICE_PHY_TYPE_LOW_5GBASE_KR:
604 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
605 case ICE_PHY_TYPE_LOW_25GBASE_KR:
606 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
607 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
608 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
609 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
610 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
611 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
612 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
613 return ICE_MEDIA_BACKPLANE;
614 }
615 } else {
616 switch (hw_link_info->phy_type_high) {
617 case ICE_PHY_TYPE_HIGH_100G_AUI2:
618 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
619 if (ice_is_media_cage_present(pi))
620 return ICE_MEDIA_AUI;
621 /* fall-through */
622 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
623 return ICE_MEDIA_BACKPLANE;
624 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
625 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
626 return ICE_MEDIA_FIBER;
627 }
628 }
629 return ICE_MEDIA_UNKNOWN;
630 }
631
632 /**
633 * ice_aq_get_link_info
634 * @pi: port information structure
635 * @ena_lse: enable/disable LinkStatusEvent reporting
636 * @link: pointer to link status structure - optional
637 * @cd: pointer to command details structure or NULL
638 *
639 * Get Link Status (0x607). Returns the link status of the adapter.
640 */
641 enum ice_status
ice_aq_get_link_info(struct ice_port_info * pi,bool ena_lse,struct ice_link_status * link,struct ice_sq_cd * cd)642 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
643 struct ice_link_status *link, struct ice_sq_cd *cd)
644 {
645 struct ice_aqc_get_link_status_data link_data = { 0 };
646 struct ice_aqc_get_link_status *resp;
647 struct ice_link_status *li_old, *li;
648 enum ice_media_type *hw_media_type;
649 struct ice_fc_info *hw_fc_info;
650 bool tx_pause, rx_pause;
651 struct ice_aq_desc desc;
652 enum ice_status status;
653 struct ice_hw *hw;
654 u16 cmd_flags;
655
656 if (!pi)
657 return ICE_ERR_PARAM;
658 hw = pi->hw;
659
660 li_old = &pi->phy.link_info_old;
661 hw_media_type = &pi->phy.media_type;
662 li = &pi->phy.link_info;
663 hw_fc_info = &pi->fc;
664
665 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
666 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
667 resp = &desc.params.get_link_status;
668 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
669 resp->lport_num = pi->lport;
670
671 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
672
673 if (status != ICE_SUCCESS)
674 return status;
675
676 /* save off old link status information */
677 *li_old = *li;
678
679 /* update current link status information */
680 li->link_speed = LE16_TO_CPU(link_data.link_speed);
681 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
682 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
683 *hw_media_type = ice_get_media_type(pi);
684 li->link_info = link_data.link_info;
685 li->link_cfg_err = link_data.link_cfg_err;
686 li->an_info = link_data.an_info;
687 li->ext_info = link_data.ext_info;
688 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
689 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
690 li->topo_media_conflict = link_data.topo_media_conflict;
691 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
692 ICE_AQ_CFG_PACING_TYPE_M);
693
694 /* update fc info */
695 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
696 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
697 if (tx_pause && rx_pause)
698 hw_fc_info->current_mode = ICE_FC_FULL;
699 else if (tx_pause)
700 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
701 else if (rx_pause)
702 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
703 else
704 hw_fc_info->current_mode = ICE_FC_NONE;
705
706 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
707
708 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
709 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
710 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
711 (unsigned long long)li->phy_type_low);
712 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
713 (unsigned long long)li->phy_type_high);
714 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
715 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
716 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
717 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
718 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
719 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
720 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
721 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
722 li->max_frame_size);
723 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
724
725 /* save link status information */
726 if (link)
727 *link = *li;
728
729 /* flag cleared so calling functions don't call AQ again */
730 pi->phy.get_link_info = false;
731
732 return ICE_SUCCESS;
733 }
734
735 /**
736 * ice_fill_tx_timer_and_fc_thresh
737 * @hw: pointer to the HW struct
738 * @cmd: pointer to MAC cfg structure
739 *
740 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
741 * descriptor
742 */
743 static void
ice_fill_tx_timer_and_fc_thresh(struct ice_hw * hw,struct ice_aqc_set_mac_cfg * cmd)744 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
745 struct ice_aqc_set_mac_cfg *cmd)
746 {
747 u16 fc_thres_val, tx_timer_val;
748 u32 val;
749
750 /* We read back the transmit timer and fc threshold value of
751 * LFC. Thus, we will use index =
752 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
753 *
754 * Also, because we are operating on transmit timer and fc
755 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
756 */
757 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
758
759 /* Retrieve the transmit timer */
760 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
761 tx_timer_val = val &
762 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
763 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
764
765 /* Retrieve the fc threshold */
766 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
767 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
768
769 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
770 }
771
772 /**
773 * ice_aq_set_mac_cfg
774 * @hw: pointer to the HW struct
775 * @max_frame_size: Maximum Frame Size to be supported
776 * @auto_drop: Tell HW to drop packets if TC queue is blocked
777 * @cd: pointer to command details structure or NULL
778 *
779 * Set MAC configuration (0x0603)
780 */
781 enum ice_status
ice_aq_set_mac_cfg(struct ice_hw * hw,u16 max_frame_size,bool auto_drop,struct ice_sq_cd * cd)782 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
783 struct ice_sq_cd *cd)
784 {
785 struct ice_aqc_set_mac_cfg *cmd;
786 struct ice_aq_desc desc;
787
788 cmd = &desc.params.set_mac_cfg;
789
790 if (max_frame_size == 0)
791 return ICE_ERR_PARAM;
792
793 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
794
795 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
796
797 if (ice_is_fw_auto_drop_supported(hw) && auto_drop)
798 cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS;
799 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
800
801 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
802 }
803
804 /**
805 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
806 * @hw: pointer to the HW struct
807 */
ice_init_fltr_mgmt_struct(struct ice_hw * hw)808 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
809 {
810 struct ice_switch_info *sw;
811 enum ice_status status;
812
813 hw->switch_info = (struct ice_switch_info *)
814 ice_malloc(hw, sizeof(*hw->switch_info));
815
816 sw = hw->switch_info;
817
818 if (!sw)
819 return ICE_ERR_NO_MEMORY;
820
821 INIT_LIST_HEAD(&sw->vsi_list_map_head);
822 sw->prof_res_bm_init = 0;
823
824 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
825 if (status) {
826 ice_free(hw, hw->switch_info);
827 return status;
828 }
829 return ICE_SUCCESS;
830 }
831
832 /**
833 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
834 * @hw: pointer to the HW struct
835 * @sw: pointer to switch info struct for which function clears filters
836 */
837 static void
ice_cleanup_fltr_mgmt_single(struct ice_hw * hw,struct ice_switch_info * sw)838 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
839 {
840 struct ice_vsi_list_map_info *v_pos_map;
841 struct ice_vsi_list_map_info *v_tmp_map;
842 struct ice_sw_recipe *recps;
843 u8 i;
844
845 if (!sw)
846 return;
847
848 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
849 ice_vsi_list_map_info, list_entry) {
850 LIST_DEL(&v_pos_map->list_entry);
851 ice_free(hw, v_pos_map);
852 }
853 recps = sw->recp_list;
854 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
855 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
856
857 recps[i].root_rid = i;
858 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
859 &recps[i].rg_list, ice_recp_grp_entry,
860 l_entry) {
861 LIST_DEL(&rg_entry->l_entry);
862 ice_free(hw, rg_entry);
863 }
864
865 if (recps[i].adv_rule) {
866 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
867 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
868
869 ice_destroy_lock(&recps[i].filt_rule_lock);
870 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
871 &recps[i].filt_rules,
872 ice_adv_fltr_mgmt_list_entry,
873 list_entry) {
874 LIST_DEL(&lst_itr->list_entry);
875 ice_free(hw, lst_itr->lkups);
876 ice_free(hw, lst_itr);
877 }
878 } else {
879 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
880
881 ice_destroy_lock(&recps[i].filt_rule_lock);
882 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
883 &recps[i].filt_rules,
884 ice_fltr_mgmt_list_entry,
885 list_entry) {
886 LIST_DEL(&lst_itr->list_entry);
887 ice_free(hw, lst_itr);
888 }
889 }
890 if (recps[i].root_buf)
891 ice_free(hw, recps[i].root_buf);
892 }
893 ice_rm_sw_replay_rule_info(hw, sw);
894 ice_free(hw, sw->recp_list);
895 ice_free(hw, sw);
896 }
897
898 /**
899 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
900 * @hw: pointer to the HW struct
901 */
ice_cleanup_fltr_mgmt_struct(struct ice_hw * hw)902 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
903 {
904 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
905 }
906
907 /**
908 * ice_get_itr_intrl_gran
909 * @hw: pointer to the HW struct
910 *
911 * Determines the ITR/INTRL granularities based on the maximum aggregate
912 * bandwidth according to the device's configuration during power-on.
913 */
ice_get_itr_intrl_gran(struct ice_hw * hw)914 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
915 {
916 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
917 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
918 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
919
920 switch (max_agg_bw) {
921 case ICE_MAX_AGG_BW_200G:
922 case ICE_MAX_AGG_BW_100G:
923 case ICE_MAX_AGG_BW_50G:
924 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
925 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
926 break;
927 case ICE_MAX_AGG_BW_25G:
928 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
929 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
930 break;
931 }
932 }
933
934 /**
935 * ice_print_rollback_msg - print FW rollback message
936 * @hw: pointer to the hardware structure
937 */
ice_print_rollback_msg(struct ice_hw * hw)938 void ice_print_rollback_msg(struct ice_hw *hw)
939 {
940 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
941 struct ice_orom_info *orom;
942 struct ice_nvm_info *nvm;
943
944 orom = &hw->flash.orom;
945 nvm = &hw->flash.nvm;
946
947 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
948 nvm->major, nvm->minor, nvm->eetrack, orom->major,
949 orom->build, orom->patch);
950 ice_warn(hw,
951 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
952 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
953 }
954
955 /**
956 * ice_set_umac_shared
957 * @hw: pointer to the hw struct
958 *
959 * Set boolean flag to allow unicast MAC sharing
960 */
ice_set_umac_shared(struct ice_hw * hw)961 void ice_set_umac_shared(struct ice_hw *hw)
962 {
963 hw->umac_shared = true;
964 }
965
966 /**
967 * ice_init_hw - main hardware initialization routine
968 * @hw: pointer to the hardware structure
969 */
ice_init_hw(struct ice_hw * hw)970 enum ice_status ice_init_hw(struct ice_hw *hw)
971 {
972 struct ice_aqc_get_phy_caps_data *pcaps;
973 enum ice_status status;
974 u16 mac_buf_len;
975 void *mac_buf;
976
977 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
978
979 /* Set MAC type based on DeviceID */
980 status = ice_set_mac_type(hw);
981 if (status)
982 return status;
983
984 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
985 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
986 PF_FUNC_RID_FUNCTION_NUMBER_S;
987
988 status = ice_reset(hw, ICE_RESET_PFR);
989 if (status)
990 return status;
991 ice_get_itr_intrl_gran(hw);
992
993 status = ice_create_all_ctrlq(hw);
994 if (status)
995 goto err_unroll_cqinit;
996
997 ice_fwlog_set_support_ena(hw);
998 status = ice_fwlog_set(hw, &hw->fwlog_cfg);
999 if (status) {
1000 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging, status %d.\n",
1001 status);
1002 } else {
1003 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_REGISTER_ON_INIT) {
1004 status = ice_fwlog_register(hw);
1005 if (status)
1006 ice_debug(hw, ICE_DBG_INIT, "Failed to register for FW logging events, status %d.\n",
1007 status);
1008 } else {
1009 status = ice_fwlog_unregister(hw);
1010 if (status)
1011 ice_debug(hw, ICE_DBG_INIT, "Failed to unregister for FW logging events, status %d.\n",
1012 status);
1013 }
1014 }
1015
1016 status = ice_init_nvm(hw);
1017 if (status)
1018 goto err_unroll_cqinit;
1019
1020 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
1021 ice_print_rollback_msg(hw);
1022
1023 status = ice_clear_pf_cfg(hw);
1024 if (status)
1025 goto err_unroll_cqinit;
1026
1027 ice_clear_pxe_mode(hw);
1028
1029 status = ice_get_caps(hw);
1030 if (status)
1031 goto err_unroll_cqinit;
1032
1033 hw->port_info = (struct ice_port_info *)
1034 ice_malloc(hw, sizeof(*hw->port_info));
1035 if (!hw->port_info) {
1036 status = ICE_ERR_NO_MEMORY;
1037 goto err_unroll_cqinit;
1038 }
1039
1040 /* set the back pointer to HW */
1041 hw->port_info->hw = hw;
1042
1043 /* Initialize port_info struct with switch configuration data */
1044 status = ice_get_initial_sw_cfg(hw);
1045 if (status)
1046 goto err_unroll_alloc;
1047
1048 hw->evb_veb = true;
1049 /* Query the allocated resources for Tx scheduler */
1050 status = ice_sched_query_res_alloc(hw);
1051 if (status) {
1052 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
1053 goto err_unroll_alloc;
1054 }
1055 ice_sched_get_psm_clk_freq(hw);
1056
1057 /* Initialize port_info struct with scheduler data */
1058 status = ice_sched_init_port(hw->port_info);
1059 if (status)
1060 goto err_unroll_sched;
1061 pcaps = (struct ice_aqc_get_phy_caps_data *)
1062 ice_malloc(hw, sizeof(*pcaps));
1063 if (!pcaps) {
1064 status = ICE_ERR_NO_MEMORY;
1065 goto err_unroll_sched;
1066 }
1067
1068 /* Initialize port_info struct with PHY capabilities */
1069 status = ice_aq_get_phy_caps(hw->port_info, false,
1070 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
1071 ice_free(hw, pcaps);
1072 if (status)
1073 ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
1074 status);
1075
1076 /* Initialize port_info struct with link information */
1077 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1078 if (status)
1079 goto err_unroll_sched;
1080 /* need a valid SW entry point to build a Tx tree */
1081 if (!hw->sw_entry_point_layer) {
1082 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1083 status = ICE_ERR_CFG;
1084 goto err_unroll_sched;
1085 }
1086 INIT_LIST_HEAD(&hw->agg_list);
1087 /* Initialize max burst size */
1088 if (!hw->max_burst_size)
1089 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1090 status = ice_init_fltr_mgmt_struct(hw);
1091 if (status)
1092 goto err_unroll_sched;
1093
1094 /* Get MAC information */
1095
1096 /* A single port can report up to two (LAN and WoL) addresses */
1097 mac_buf = ice_calloc(hw, 2,
1098 sizeof(struct ice_aqc_manage_mac_read_resp));
1099 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1100
1101 if (!mac_buf) {
1102 status = ICE_ERR_NO_MEMORY;
1103 goto err_unroll_fltr_mgmt_struct;
1104 }
1105
1106 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1107 ice_free(hw, mac_buf);
1108
1109 if (status)
1110 goto err_unroll_fltr_mgmt_struct;
1111
1112 /* enable jumbo frame support at MAC level */
1113 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false,
1114 NULL);
1115 if (status)
1116 goto err_unroll_fltr_mgmt_struct;
1117
1118 status = ice_init_hw_tbls(hw);
1119 if (status)
1120 goto err_unroll_fltr_mgmt_struct;
1121 ice_init_lock(&hw->tnl_lock);
1122
1123 return ICE_SUCCESS;
1124
1125 err_unroll_fltr_mgmt_struct:
1126 ice_cleanup_fltr_mgmt_struct(hw);
1127 err_unroll_sched:
1128 ice_sched_cleanup_all(hw);
1129 err_unroll_alloc:
1130 ice_free(hw, hw->port_info);
1131 hw->port_info = NULL;
1132 err_unroll_cqinit:
1133 ice_destroy_all_ctrlq(hw);
1134 return status;
1135 }
1136
1137 /**
1138 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1139 * @hw: pointer to the hardware structure
1140 *
1141 * This should be called only during nominal operation, not as a result of
1142 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1143 * applicable initializations if it fails for any reason.
1144 */
ice_deinit_hw(struct ice_hw * hw)1145 void ice_deinit_hw(struct ice_hw *hw)
1146 {
1147 ice_cleanup_fltr_mgmt_struct(hw);
1148
1149 ice_sched_cleanup_all(hw);
1150 ice_sched_clear_agg(hw);
1151 ice_free_seg(hw);
1152 ice_free_hw_tbls(hw);
1153 ice_destroy_lock(&hw->tnl_lock);
1154
1155 if (hw->port_info) {
1156 ice_free(hw, hw->port_info);
1157 hw->port_info = NULL;
1158 }
1159
1160 ice_destroy_all_ctrlq(hw);
1161
1162 /* Clear VSI contexts if not already cleared */
1163 ice_clear_all_vsi_ctx(hw);
1164 }
1165
1166 /**
1167 * ice_check_reset - Check to see if a global reset is complete
1168 * @hw: pointer to the hardware structure
1169 */
ice_check_reset(struct ice_hw * hw)1170 enum ice_status ice_check_reset(struct ice_hw *hw)
1171 {
1172 u32 cnt, reg = 0, grst_timeout, uld_mask;
1173
1174 /* Poll for Device Active state in case a recent CORER, GLOBR,
1175 * or EMPR has occurred. The grst delay value is in 100ms units.
1176 * Add 1sec for outstanding AQ commands that can take a long time.
1177 */
1178 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1179 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1180
1181 for (cnt = 0; cnt < grst_timeout; cnt++) {
1182 ice_msec_delay(100, true);
1183 reg = rd32(hw, GLGEN_RSTAT);
1184 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1185 break;
1186 }
1187
1188 if (cnt == grst_timeout) {
1189 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1190 return ICE_ERR_RESET_FAILED;
1191 }
1192
1193 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1194 GLNVM_ULD_PCIER_DONE_1_M |\
1195 GLNVM_ULD_CORER_DONE_M |\
1196 GLNVM_ULD_GLOBR_DONE_M |\
1197 GLNVM_ULD_POR_DONE_M |\
1198 GLNVM_ULD_POR_DONE_1_M |\
1199 GLNVM_ULD_PCIER_DONE_2_M)
1200
1201 uld_mask = ICE_RESET_DONE_MASK;
1202
1203 /* Device is Active; check Global Reset processes are done */
1204 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1205 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1206 if (reg == uld_mask) {
1207 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1208 break;
1209 }
1210 ice_msec_delay(10, true);
1211 }
1212
1213 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1214 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1215 reg);
1216 return ICE_ERR_RESET_FAILED;
1217 }
1218
1219 return ICE_SUCCESS;
1220 }
1221
1222 /**
1223 * ice_pf_reset - Reset the PF
1224 * @hw: pointer to the hardware structure
1225 *
1226 * If a global reset has been triggered, this function checks
1227 * for its completion and then issues the PF reset
1228 */
ice_pf_reset(struct ice_hw * hw)1229 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1230 {
1231 u32 cnt, reg;
1232
1233 /* If at function entry a global reset was already in progress, i.e.
1234 * state is not 'device active' or any of the reset done bits are not
1235 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1236 * global reset is done.
1237 */
1238 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1239 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1240 /* poll on global reset currently in progress until done */
1241 if (ice_check_reset(hw))
1242 return ICE_ERR_RESET_FAILED;
1243
1244 return ICE_SUCCESS;
1245 }
1246
1247 /* Reset the PF */
1248 reg = rd32(hw, PFGEN_CTRL);
1249
1250 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1251
1252 /* Wait for the PFR to complete. The wait time is the global config lock
1253 * timeout plus the PFR timeout which will account for a possible reset
1254 * that is occurring during a download package operation.
1255 */
1256 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1257 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1258 reg = rd32(hw, PFGEN_CTRL);
1259 if (!(reg & PFGEN_CTRL_PFSWR_M))
1260 break;
1261
1262 ice_msec_delay(1, true);
1263 }
1264
1265 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1266 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1267 return ICE_ERR_RESET_FAILED;
1268 }
1269
1270 return ICE_SUCCESS;
1271 }
1272
1273 /**
1274 * ice_reset - Perform different types of reset
1275 * @hw: pointer to the hardware structure
1276 * @req: reset request
1277 *
1278 * This function triggers a reset as specified by the req parameter.
1279 *
1280 * Note:
1281 * If anything other than a PF reset is triggered, PXE mode is restored.
1282 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1283 * interface has been restored in the rebuild flow.
1284 */
ice_reset(struct ice_hw * hw,enum ice_reset_req req)1285 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1286 {
1287 u32 val = 0;
1288
1289 switch (req) {
1290 case ICE_RESET_PFR:
1291 return ice_pf_reset(hw);
1292 case ICE_RESET_CORER:
1293 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1294 val = GLGEN_RTRIG_CORER_M;
1295 break;
1296 case ICE_RESET_GLOBR:
1297 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1298 val = GLGEN_RTRIG_GLOBR_M;
1299 break;
1300 default:
1301 return ICE_ERR_PARAM;
1302 }
1303
1304 val |= rd32(hw, GLGEN_RTRIG);
1305 wr32(hw, GLGEN_RTRIG, val);
1306 ice_flush(hw);
1307
1308 /* wait for the FW to be ready */
1309 return ice_check_reset(hw);
1310 }
1311
1312 /**
1313 * ice_copy_rxq_ctx_to_hw
1314 * @hw: pointer to the hardware structure
1315 * @ice_rxq_ctx: pointer to the rxq context
1316 * @rxq_index: the index of the Rx queue
1317 *
1318 * Copies rxq context from dense structure to HW register space
1319 */
1320 static enum ice_status
ice_copy_rxq_ctx_to_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1321 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1322 {
1323 u8 i;
1324
1325 if (!ice_rxq_ctx)
1326 return ICE_ERR_BAD_PTR;
1327
1328 if (rxq_index > QRX_CTRL_MAX_INDEX)
1329 return ICE_ERR_PARAM;
1330
1331 /* Copy each dword separately to HW */
1332 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1333 wr32(hw, QRX_CONTEXT(i, rxq_index),
1334 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1335
1336 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1337 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1338 }
1339
1340 return ICE_SUCCESS;
1341 }
1342
1343 /* LAN Rx Queue Context */
1344 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1345 /* Field Width LSB */
1346 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1347 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1348 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1349 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1350 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1351 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1352 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1353 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1354 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1355 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1356 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1357 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1358 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1359 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1360 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1361 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1362 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1363 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1364 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1365 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1366 { 0 }
1367 };
1368
1369 /**
1370 * ice_write_rxq_ctx
1371 * @hw: pointer to the hardware structure
1372 * @rlan_ctx: pointer to the rxq context
1373 * @rxq_index: the index of the Rx queue
1374 *
1375 * Converts rxq context from sparse to dense structure and then writes
1376 * it to HW register space and enables the hardware to prefetch descriptors
1377 * instead of only fetching them on demand
1378 */
1379 enum ice_status
ice_write_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1380 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1381 u32 rxq_index)
1382 {
1383 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1384
1385 if (!rlan_ctx)
1386 return ICE_ERR_BAD_PTR;
1387
1388 rlan_ctx->prefena = 1;
1389
1390 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1391 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1392 }
1393
1394 /**
1395 * ice_clear_rxq_ctx
1396 * @hw: pointer to the hardware structure
1397 * @rxq_index: the index of the Rx queue to clear
1398 *
1399 * Clears rxq context in HW register space
1400 */
ice_clear_rxq_ctx(struct ice_hw * hw,u32 rxq_index)1401 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1402 {
1403 u8 i;
1404
1405 if (rxq_index > QRX_CTRL_MAX_INDEX)
1406 return ICE_ERR_PARAM;
1407
1408 /* Clear each dword register separately */
1409 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1410 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1411
1412 return ICE_SUCCESS;
1413 }
1414
1415 /* LAN Tx Queue Context */
1416 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1417 /* Field Width LSB */
1418 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1419 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1420 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1421 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1422 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1423 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1424 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1425 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1426 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1427 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1428 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1429 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1430 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1431 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1432 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1433 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1434 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1435 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1436 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1437 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1438 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1439 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1440 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1441 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1442 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1443 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1444 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1445 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1446 { 0 }
1447 };
1448
1449 /**
1450 * ice_copy_tx_cmpltnq_ctx_to_hw
1451 * @hw: pointer to the hardware structure
1452 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1453 * @tx_cmpltnq_index: the index of the completion queue
1454 *
1455 * Copies Tx completion queue context from dense structure to HW register space
1456 */
1457 static enum ice_status
ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw * hw,u8 * ice_tx_cmpltnq_ctx,u32 tx_cmpltnq_index)1458 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1459 u32 tx_cmpltnq_index)
1460 {
1461 u8 i;
1462
1463 if (!ice_tx_cmpltnq_ctx)
1464 return ICE_ERR_BAD_PTR;
1465
1466 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1467 return ICE_ERR_PARAM;
1468
1469 /* Copy each dword separately to HW */
1470 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1471 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1472 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1473
1474 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1475 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1476 }
1477
1478 return ICE_SUCCESS;
1479 }
1480
1481 /* LAN Tx Completion Queue Context */
1482 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1483 /* Field Width LSB */
1484 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1485 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1486 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1487 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1488 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1489 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1490 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1491 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1492 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1493 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1494 { 0 }
1495 };
1496
1497 /**
1498 * ice_write_tx_cmpltnq_ctx
1499 * @hw: pointer to the hardware structure
1500 * @tx_cmpltnq_ctx: pointer to the completion queue context
1501 * @tx_cmpltnq_index: the index of the completion queue
1502 *
1503 * Converts completion queue context from sparse to dense structure and then
1504 * writes it to HW register space
1505 */
1506 enum ice_status
ice_write_tx_cmpltnq_ctx(struct ice_hw * hw,struct ice_tx_cmpltnq_ctx * tx_cmpltnq_ctx,u32 tx_cmpltnq_index)1507 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1508 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1509 u32 tx_cmpltnq_index)
1510 {
1511 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1512
1513 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1514 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1515 }
1516
1517 /**
1518 * ice_clear_tx_cmpltnq_ctx
1519 * @hw: pointer to the hardware structure
1520 * @tx_cmpltnq_index: the index of the completion queue to clear
1521 *
1522 * Clears Tx completion queue context in HW register space
1523 */
1524 enum ice_status
ice_clear_tx_cmpltnq_ctx(struct ice_hw * hw,u32 tx_cmpltnq_index)1525 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1526 {
1527 u8 i;
1528
1529 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1530 return ICE_ERR_PARAM;
1531
1532 /* Clear each dword register separately */
1533 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1534 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1535
1536 return ICE_SUCCESS;
1537 }
1538
1539 /**
1540 * ice_copy_tx_drbell_q_ctx_to_hw
1541 * @hw: pointer to the hardware structure
1542 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1543 * @tx_drbell_q_index: the index of the doorbell queue
1544 *
1545 * Copies doorbell queue context from dense structure to HW register space
1546 */
1547 static enum ice_status
ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw * hw,u8 * ice_tx_drbell_q_ctx,u32 tx_drbell_q_index)1548 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1549 u32 tx_drbell_q_index)
1550 {
1551 u8 i;
1552
1553 if (!ice_tx_drbell_q_ctx)
1554 return ICE_ERR_BAD_PTR;
1555
1556 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1557 return ICE_ERR_PARAM;
1558
1559 /* Copy each dword separately to HW */
1560 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1561 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1562 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1563
1564 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1565 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1566 }
1567
1568 return ICE_SUCCESS;
1569 }
1570
1571 /* LAN Tx Doorbell Queue Context info */
1572 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1573 /* Field Width LSB */
1574 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1575 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1576 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1577 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1578 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1579 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1580 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1581 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1582 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1583 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1584 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1585 { 0 }
1586 };
1587
1588 /**
1589 * ice_write_tx_drbell_q_ctx
1590 * @hw: pointer to the hardware structure
1591 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1592 * @tx_drbell_q_index: the index of the doorbell queue
1593 *
1594 * Converts doorbell queue context from sparse to dense structure and then
1595 * writes it to HW register space
1596 */
1597 enum ice_status
ice_write_tx_drbell_q_ctx(struct ice_hw * hw,struct ice_tx_drbell_q_ctx * tx_drbell_q_ctx,u32 tx_drbell_q_index)1598 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1599 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1600 u32 tx_drbell_q_index)
1601 {
1602 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1603
1604 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1605 ice_tx_drbell_q_ctx_info);
1606 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1607 }
1608
1609 /**
1610 * ice_clear_tx_drbell_q_ctx
1611 * @hw: pointer to the hardware structure
1612 * @tx_drbell_q_index: the index of the doorbell queue to clear
1613 *
1614 * Clears doorbell queue context in HW register space
1615 */
1616 enum ice_status
ice_clear_tx_drbell_q_ctx(struct ice_hw * hw,u32 tx_drbell_q_index)1617 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1618 {
1619 u8 i;
1620
1621 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1622 return ICE_ERR_PARAM;
1623
1624 /* Clear each dword register separately */
1625 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1626 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1627
1628 return ICE_SUCCESS;
1629 }
1630
1631 /* FW Admin Queue command wrappers */
1632
1633 /**
1634 * ice_should_retry_sq_send_cmd
1635 * @opcode: AQ opcode
1636 *
1637 * Decide if we should retry the send command routine for the ATQ, depending
1638 * on the opcode.
1639 */
ice_should_retry_sq_send_cmd(u16 opcode)1640 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1641 {
1642 switch (opcode) {
1643 case ice_aqc_opc_dnl_get_status:
1644 case ice_aqc_opc_dnl_run:
1645 case ice_aqc_opc_dnl_call:
1646 case ice_aqc_opc_dnl_read_sto:
1647 case ice_aqc_opc_dnl_write_sto:
1648 case ice_aqc_opc_dnl_set_breakpoints:
1649 case ice_aqc_opc_dnl_read_log:
1650 case ice_aqc_opc_get_link_topo:
1651 case ice_aqc_opc_done_alt_write:
1652 case ice_aqc_opc_lldp_stop:
1653 case ice_aqc_opc_lldp_start:
1654 case ice_aqc_opc_lldp_filter_ctrl:
1655 return true;
1656 }
1657
1658 return false;
1659 }
1660
1661 /**
1662 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1663 * @hw: pointer to the HW struct
1664 * @cq: pointer to the specific Control queue
1665 * @desc: prefilled descriptor describing the command
1666 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1667 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1668 * @cd: pointer to command details structure
1669 *
1670 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1671 * Queue if the EBUSY AQ error is returned.
1672 */
1673 static enum ice_status
ice_sq_send_cmd_retry(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1674 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1675 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1676 struct ice_sq_cd *cd)
1677 {
1678 struct ice_aq_desc desc_cpy;
1679 enum ice_status status;
1680 bool is_cmd_for_retry;
1681 u8 *buf_cpy = NULL;
1682 u8 idx = 0;
1683 u16 opcode;
1684
1685 opcode = LE16_TO_CPU(desc->opcode);
1686 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1687 ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1688
1689 if (is_cmd_for_retry) {
1690 if (buf) {
1691 buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1692 if (!buf_cpy)
1693 return ICE_ERR_NO_MEMORY;
1694 }
1695
1696 ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1697 ICE_NONDMA_TO_NONDMA);
1698 }
1699
1700 do {
1701 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1702
1703 if (!is_cmd_for_retry || status == ICE_SUCCESS ||
1704 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1705 break;
1706
1707 if (buf_cpy)
1708 ice_memcpy(buf, buf_cpy, buf_size,
1709 ICE_NONDMA_TO_NONDMA);
1710
1711 ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1712 ICE_NONDMA_TO_NONDMA);
1713
1714 ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1715
1716 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1717
1718 if (buf_cpy)
1719 ice_free(hw, buf_cpy);
1720
1721 return status;
1722 }
1723
1724 /**
1725 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1726 * @hw: pointer to the HW struct
1727 * @desc: descriptor describing the command
1728 * @buf: buffer to use for indirect commands (NULL for direct commands)
1729 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1730 * @cd: pointer to command details structure
1731 *
1732 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1733 */
1734 enum ice_status
ice_aq_send_cmd(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1735 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1736 u16 buf_size, struct ice_sq_cd *cd)
1737 {
1738 return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1739 }
1740
1741 /**
1742 * ice_aq_get_fw_ver
1743 * @hw: pointer to the HW struct
1744 * @cd: pointer to command details structure or NULL
1745 *
1746 * Get the firmware version (0x0001) from the admin queue commands
1747 */
ice_aq_get_fw_ver(struct ice_hw * hw,struct ice_sq_cd * cd)1748 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1749 {
1750 struct ice_aqc_get_ver *resp;
1751 struct ice_aq_desc desc;
1752 enum ice_status status;
1753
1754 resp = &desc.params.get_ver;
1755
1756 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1757
1758 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1759
1760 if (!status) {
1761 hw->fw_branch = resp->fw_branch;
1762 hw->fw_maj_ver = resp->fw_major;
1763 hw->fw_min_ver = resp->fw_minor;
1764 hw->fw_patch = resp->fw_patch;
1765 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1766 hw->api_branch = resp->api_branch;
1767 hw->api_maj_ver = resp->api_major;
1768 hw->api_min_ver = resp->api_minor;
1769 hw->api_patch = resp->api_patch;
1770 }
1771
1772 return status;
1773 }
1774
1775 /**
1776 * ice_aq_send_driver_ver
1777 * @hw: pointer to the HW struct
1778 * @dv: driver's major, minor version
1779 * @cd: pointer to command details structure or NULL
1780 *
1781 * Send the driver version (0x0002) to the firmware
1782 */
1783 enum ice_status
ice_aq_send_driver_ver(struct ice_hw * hw,struct ice_driver_ver * dv,struct ice_sq_cd * cd)1784 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1785 struct ice_sq_cd *cd)
1786 {
1787 struct ice_aqc_driver_ver *cmd;
1788 struct ice_aq_desc desc;
1789 u16 len;
1790
1791 cmd = &desc.params.driver_ver;
1792
1793 if (!dv)
1794 return ICE_ERR_PARAM;
1795
1796 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1797
1798 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1799 cmd->major_ver = dv->major_ver;
1800 cmd->minor_ver = dv->minor_ver;
1801 cmd->build_ver = dv->build_ver;
1802 cmd->subbuild_ver = dv->subbuild_ver;
1803
1804 len = 0;
1805 while (len < sizeof(dv->driver_string) &&
1806 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1807 len++;
1808
1809 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1810 }
1811
1812 /**
1813 * ice_aq_q_shutdown
1814 * @hw: pointer to the HW struct
1815 * @unloading: is the driver unloading itself
1816 *
1817 * Tell the Firmware that we're shutting down the AdminQ and whether
1818 * or not the driver is unloading as well (0x0003).
1819 */
ice_aq_q_shutdown(struct ice_hw * hw,bool unloading)1820 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1821 {
1822 struct ice_aqc_q_shutdown *cmd;
1823 struct ice_aq_desc desc;
1824
1825 cmd = &desc.params.q_shutdown;
1826
1827 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1828
1829 if (unloading)
1830 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1831
1832 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1833 }
1834
1835 /**
1836 * ice_aq_req_res
1837 * @hw: pointer to the HW struct
1838 * @res: resource ID
1839 * @access: access type
1840 * @sdp_number: resource number
1841 * @timeout: the maximum time in ms that the driver may hold the resource
1842 * @cd: pointer to command details structure or NULL
1843 *
1844 * Requests common resource using the admin queue commands (0x0008).
1845 * When attempting to acquire the Global Config Lock, the driver can
1846 * learn of three states:
1847 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1848 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1849 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1850 * successfully downloaded the package; the driver does
1851 * not have to download the package and can continue
1852 * loading
1853 *
1854 * Note that if the caller is in an acquire lock, perform action, release lock
1855 * phase of operation, it is possible that the FW may detect a timeout and issue
1856 * a CORER. In this case, the driver will receive a CORER interrupt and will
1857 * have to determine its cause. The calling thread that is handling this flow
1858 * will likely get an error propagated back to it indicating the Download
1859 * Package, Update Package or the Release Resource AQ commands timed out.
1860 */
1861 static enum ice_status
ice_aq_req_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u8 sdp_number,u32 * timeout,struct ice_sq_cd * cd)1862 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1863 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1864 struct ice_sq_cd *cd)
1865 {
1866 struct ice_aqc_req_res *cmd_resp;
1867 struct ice_aq_desc desc;
1868 enum ice_status status;
1869
1870 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1871
1872 cmd_resp = &desc.params.res_owner;
1873
1874 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1875
1876 cmd_resp->res_id = CPU_TO_LE16(res);
1877 cmd_resp->access_type = CPU_TO_LE16(access);
1878 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1879 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1880 *timeout = 0;
1881
1882 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1883
1884 /* The completion specifies the maximum time in ms that the driver
1885 * may hold the resource in the Timeout field.
1886 */
1887
1888 /* Global config lock response utilizes an additional status field.
1889 *
1890 * If the Global config lock resource is held by some other driver, the
1891 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1892 * and the timeout field indicates the maximum time the current owner
1893 * of the resource has to free it.
1894 */
1895 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1896 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1897 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1898 return ICE_SUCCESS;
1899 } else if (LE16_TO_CPU(cmd_resp->status) ==
1900 ICE_AQ_RES_GLBL_IN_PROG) {
1901 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1902 return ICE_ERR_AQ_ERROR;
1903 } else if (LE16_TO_CPU(cmd_resp->status) ==
1904 ICE_AQ_RES_GLBL_DONE) {
1905 return ICE_ERR_AQ_NO_WORK;
1906 }
1907
1908 /* invalid FW response, force a timeout immediately */
1909 *timeout = 0;
1910 return ICE_ERR_AQ_ERROR;
1911 }
1912
1913 /* If the resource is held by some other driver, the command completes
1914 * with a busy return value and the timeout field indicates the maximum
1915 * time the current owner of the resource has to free it.
1916 */
1917 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1918 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1919
1920 return status;
1921 }
1922
1923 /**
1924 * ice_aq_release_res
1925 * @hw: pointer to the HW struct
1926 * @res: resource ID
1927 * @sdp_number: resource number
1928 * @cd: pointer to command details structure or NULL
1929 *
1930 * release common resource using the admin queue commands (0x0009)
1931 */
1932 static enum ice_status
ice_aq_release_res(struct ice_hw * hw,enum ice_aq_res_ids res,u8 sdp_number,struct ice_sq_cd * cd)1933 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1934 struct ice_sq_cd *cd)
1935 {
1936 struct ice_aqc_req_res *cmd;
1937 struct ice_aq_desc desc;
1938
1939 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1940
1941 cmd = &desc.params.res_owner;
1942
1943 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1944
1945 cmd->res_id = CPU_TO_LE16(res);
1946 cmd->res_number = CPU_TO_LE32(sdp_number);
1947
1948 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1949 }
1950
1951 /**
1952 * ice_acquire_res
1953 * @hw: pointer to the HW structure
1954 * @res: resource ID
1955 * @access: access type (read or write)
1956 * @timeout: timeout in milliseconds
1957 *
1958 * This function will attempt to acquire the ownership of a resource.
1959 */
1960 enum ice_status
ice_acquire_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u32 timeout)1961 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1962 enum ice_aq_res_access_type access, u32 timeout)
1963 {
1964 #define ICE_RES_POLLING_DELAY_MS 10
1965 u32 delay = ICE_RES_POLLING_DELAY_MS;
1966 u32 time_left = timeout;
1967 enum ice_status status;
1968
1969 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1970
1971 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1972
1973 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1974 * previously acquired the resource and performed any necessary updates;
1975 * in this case the caller does not obtain the resource and has no
1976 * further work to do.
1977 */
1978 if (status == ICE_ERR_AQ_NO_WORK)
1979 goto ice_acquire_res_exit;
1980
1981 if (status)
1982 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1983
1984 /* If necessary, poll until the current lock owner timeouts */
1985 timeout = time_left;
1986 while (status && timeout && time_left) {
1987 ice_msec_delay(delay, true);
1988 timeout = (timeout > delay) ? timeout - delay : 0;
1989 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1990
1991 if (status == ICE_ERR_AQ_NO_WORK)
1992 /* lock free, but no work to do */
1993 break;
1994
1995 if (!status)
1996 /* lock acquired */
1997 break;
1998 }
1999 if (status && status != ICE_ERR_AQ_NO_WORK)
2000 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
2001
2002 ice_acquire_res_exit:
2003 if (status == ICE_ERR_AQ_NO_WORK) {
2004 if (access == ICE_RES_WRITE)
2005 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
2006 else
2007 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
2008 }
2009 return status;
2010 }
2011
2012 /**
2013 * ice_release_res
2014 * @hw: pointer to the HW structure
2015 * @res: resource ID
2016 *
2017 * This function will release a resource using the proper Admin Command.
2018 */
ice_release_res(struct ice_hw * hw,enum ice_aq_res_ids res)2019 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
2020 {
2021 enum ice_status status;
2022 u32 total_delay = 0;
2023
2024 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2025
2026 status = ice_aq_release_res(hw, res, 0, NULL);
2027
2028 /* there are some rare cases when trying to release the resource
2029 * results in an admin queue timeout, so handle them correctly
2030 */
2031 while ((status == ICE_ERR_AQ_TIMEOUT) &&
2032 (total_delay < hw->adminq.sq_cmd_timeout)) {
2033 ice_msec_delay(1, true);
2034 status = ice_aq_release_res(hw, res, 0, NULL);
2035 total_delay++;
2036 }
2037 }
2038
2039 /**
2040 * ice_aq_alloc_free_res - command to allocate/free resources
2041 * @hw: pointer to the HW struct
2042 * @num_entries: number of resource entries in buffer
2043 * @buf: Indirect buffer to hold data parameters and response
2044 * @buf_size: size of buffer for indirect commands
2045 * @opc: pass in the command opcode
2046 * @cd: pointer to command details structure or NULL
2047 *
2048 * Helper function to allocate/free resources using the admin queue commands
2049 */
2050 enum ice_status
ice_aq_alloc_free_res(struct ice_hw * hw,u16 num_entries,struct ice_aqc_alloc_free_res_elem * buf,u16 buf_size,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2051 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
2052 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
2053 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2054 {
2055 struct ice_aqc_alloc_free_res_cmd *cmd;
2056 struct ice_aq_desc desc;
2057
2058 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2059
2060 cmd = &desc.params.sw_res_ctrl;
2061
2062 if (!buf)
2063 return ICE_ERR_PARAM;
2064
2065 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
2066 return ICE_ERR_PARAM;
2067
2068 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2069
2070 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2071
2072 cmd->num_entries = CPU_TO_LE16(num_entries);
2073
2074 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2075 }
2076
2077 /**
2078 * ice_alloc_hw_res - allocate resource
2079 * @hw: pointer to the HW struct
2080 * @type: type of resource
2081 * @num: number of resources to allocate
2082 * @btm: allocate from bottom
2083 * @res: pointer to array that will receive the resources
2084 */
2085 enum ice_status
ice_alloc_hw_res(struct ice_hw * hw,u16 type,u16 num,bool btm,u16 * res)2086 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2087 {
2088 struct ice_aqc_alloc_free_res_elem *buf;
2089 enum ice_status status;
2090 u16 buf_len;
2091
2092 buf_len = ice_struct_size(buf, elem, num);
2093 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2094 if (!buf)
2095 return ICE_ERR_NO_MEMORY;
2096
2097 /* Prepare buffer to allocate resource. */
2098 buf->num_elems = CPU_TO_LE16(num);
2099 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2100 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
2101 if (btm)
2102 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2103
2104 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2105 ice_aqc_opc_alloc_res, NULL);
2106 if (status)
2107 goto ice_alloc_res_exit;
2108
2109 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
2110 ICE_NONDMA_TO_NONDMA);
2111
2112 ice_alloc_res_exit:
2113 ice_free(hw, buf);
2114 return status;
2115 }
2116
2117 /**
2118 * ice_free_hw_res - free allocated HW resource
2119 * @hw: pointer to the HW struct
2120 * @type: type of resource to free
2121 * @num: number of resources
2122 * @res: pointer to array that contains the resources to free
2123 */
ice_free_hw_res(struct ice_hw * hw,u16 type,u16 num,u16 * res)2124 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2125 {
2126 struct ice_aqc_alloc_free_res_elem *buf;
2127 enum ice_status status;
2128 u16 buf_len;
2129
2130 buf_len = ice_struct_size(buf, elem, num);
2131 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2132 if (!buf)
2133 return ICE_ERR_NO_MEMORY;
2134
2135 /* Prepare buffer to free resource. */
2136 buf->num_elems = CPU_TO_LE16(num);
2137 buf->res_type = CPU_TO_LE16(type);
2138 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
2139 ICE_NONDMA_TO_NONDMA);
2140
2141 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
2142 ice_aqc_opc_free_res, NULL);
2143 if (status)
2144 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2145
2146 ice_free(hw, buf);
2147 return status;
2148 }
2149
2150 /**
2151 * ice_get_num_per_func - determine number of resources per PF
2152 * @hw: pointer to the HW structure
2153 * @max: value to be evenly split between each PF
2154 *
2155 * Determine the number of valid functions by going through the bitmap returned
2156 * from parsing capabilities and use this to calculate the number of resources
2157 * per PF based on the max value passed in.
2158 */
ice_get_num_per_func(struct ice_hw * hw,u32 max)2159 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2160 {
2161 u8 funcs;
2162
2163 #define ICE_CAPS_VALID_FUNCS_M 0xFF
2164 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
2165 ICE_CAPS_VALID_FUNCS_M);
2166
2167 if (!funcs)
2168 return 0;
2169
2170 return max / funcs;
2171 }
2172
2173 /**
2174 * ice_print_led_caps - print LED capabilities
2175 * @hw: pointer to the ice_hw instance
2176 * @caps: pointer to common caps instance
2177 * @prefix: string to prefix when printing
2178 * @dbg: set to indicate debug print
2179 */
2180 static void
ice_print_led_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,char const * prefix,bool dbg)2181 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2182 char const *prefix, bool dbg)
2183 {
2184 u8 i;
2185
2186 if (dbg)
2187 ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
2188 caps->led_pin_num);
2189 else
2190 ice_info(hw, "%s: led_pin_num = %d\n", prefix,
2191 caps->led_pin_num);
2192
2193 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
2194 if (!caps->led[i])
2195 continue;
2196
2197 if (dbg)
2198 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
2199 prefix, i, caps->led[i]);
2200 else
2201 ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
2202 caps->led[i]);
2203 }
2204 }
2205
2206 /**
2207 * ice_print_sdp_caps - print SDP capabilities
2208 * @hw: pointer to the ice_hw instance
2209 * @caps: pointer to common caps instance
2210 * @prefix: string to prefix when printing
2211 * @dbg: set to indicate debug print
2212 */
2213 static void
ice_print_sdp_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,char const * prefix,bool dbg)2214 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2215 char const *prefix, bool dbg)
2216 {
2217 u8 i;
2218
2219 if (dbg)
2220 ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
2221 caps->sdp_pin_num);
2222 else
2223 ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
2224 caps->sdp_pin_num);
2225
2226 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
2227 if (!caps->sdp[i])
2228 continue;
2229
2230 if (dbg)
2231 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
2232 prefix, i, caps->sdp[i]);
2233 else
2234 ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
2235 i, caps->sdp[i]);
2236 }
2237 }
2238
2239 /**
2240 * ice_parse_common_caps - parse common device/function capabilities
2241 * @hw: pointer to the HW struct
2242 * @caps: pointer to common capabilities structure
2243 * @elem: the capability element to parse
2244 * @prefix: message prefix for tracing capabilities
2245 *
2246 * Given a capability element, extract relevant details into the common
2247 * capability structure.
2248 *
2249 * Returns: true if the capability matches one of the common capability ids,
2250 * false otherwise.
2251 */
2252 static bool
ice_parse_common_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,struct ice_aqc_list_caps_elem * elem,const char * prefix)2253 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2254 struct ice_aqc_list_caps_elem *elem, const char *prefix)
2255 {
2256 u32 logical_id = LE32_TO_CPU(elem->logical_id);
2257 u32 phys_id = LE32_TO_CPU(elem->phys_id);
2258 u32 number = LE32_TO_CPU(elem->number);
2259 u16 cap = LE16_TO_CPU(elem->cap);
2260 bool found = true;
2261
2262 switch (cap) {
2263 case ICE_AQC_CAPS_SWITCHING_MODE:
2264 caps->switching_mode = number;
2265 ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix,
2266 caps->switching_mode);
2267 break;
2268 case ICE_AQC_CAPS_MANAGEABILITY_MODE:
2269 caps->mgmt_mode = number;
2270 caps->mgmt_protocols_mctp = logical_id;
2271 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix,
2272 caps->mgmt_mode);
2273 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix,
2274 caps->mgmt_protocols_mctp);
2275 break;
2276 case ICE_AQC_CAPS_OS2BMC:
2277 caps->os2bmc = number;
2278 ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc);
2279 break;
2280 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2281 caps->valid_functions = number;
2282 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2283 caps->valid_functions);
2284 break;
2285 case ICE_AQC_CAPS_SRIOV:
2286 caps->sr_iov_1_1 = (number == 1);
2287 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2288 caps->sr_iov_1_1);
2289 break;
2290 case ICE_AQC_CAPS_802_1QBG:
2291 caps->evb_802_1_qbg = (number == 1);
2292 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
2293 break;
2294 case ICE_AQC_CAPS_802_1BR:
2295 caps->evb_802_1_qbh = (number == 1);
2296 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number);
2297 break;
2298 case ICE_AQC_CAPS_DCB:
2299 caps->dcb = (number == 1);
2300 caps->active_tc_bitmap = logical_id;
2301 caps->maxtc = phys_id;
2302 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2303 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2304 caps->active_tc_bitmap);
2305 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2306 break;
2307 case ICE_AQC_CAPS_ISCSI:
2308 caps->iscsi = (number == 1);
2309 ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi);
2310 break;
2311 case ICE_AQC_CAPS_RSS:
2312 caps->rss_table_size = number;
2313 caps->rss_table_entry_width = logical_id;
2314 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2315 caps->rss_table_size);
2316 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2317 caps->rss_table_entry_width);
2318 break;
2319 case ICE_AQC_CAPS_RXQS:
2320 caps->num_rxq = number;
2321 caps->rxq_first_id = phys_id;
2322 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2323 caps->num_rxq);
2324 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2325 caps->rxq_first_id);
2326 break;
2327 case ICE_AQC_CAPS_TXQS:
2328 caps->num_txq = number;
2329 caps->txq_first_id = phys_id;
2330 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2331 caps->num_txq);
2332 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2333 caps->txq_first_id);
2334 break;
2335 case ICE_AQC_CAPS_MSIX:
2336 caps->num_msix_vectors = number;
2337 caps->msix_vector_first_id = phys_id;
2338 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2339 caps->num_msix_vectors);
2340 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2341 caps->msix_vector_first_id);
2342 break;
2343 case ICE_AQC_CAPS_NVM_VER:
2344 break;
2345 case ICE_AQC_CAPS_NVM_MGMT:
2346 caps->sec_rev_disabled =
2347 (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
2348 true : false;
2349 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
2350 caps->sec_rev_disabled);
2351 caps->update_disabled =
2352 (number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
2353 true : false;
2354 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
2355 caps->update_disabled);
2356 caps->nvm_unified_update =
2357 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2358 true : false;
2359 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2360 caps->nvm_unified_update);
2361 break;
2362 case ICE_AQC_CAPS_CEM:
2363 caps->mgmt_cem = (number == 1);
2364 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix,
2365 caps->mgmt_cem);
2366 break;
2367 case ICE_AQC_CAPS_LED:
2368 if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
2369 caps->led[phys_id] = true;
2370 caps->led_pin_num++;
2371 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id);
2372 }
2373 break;
2374 case ICE_AQC_CAPS_SDP:
2375 if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
2376 caps->sdp[phys_id] = true;
2377 caps->sdp_pin_num++;
2378 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id);
2379 }
2380 break;
2381 case ICE_AQC_CAPS_WR_CSR_PROT:
2382 caps->wr_csr_prot = number;
2383 caps->wr_csr_prot |= (u64)logical_id << 32;
2384 ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
2385 (unsigned long long)caps->wr_csr_prot);
2386 break;
2387 case ICE_AQC_CAPS_WOL_PROXY:
2388 caps->num_wol_proxy_fltr = number;
2389 caps->wol_proxy_vsi_seid = logical_id;
2390 caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2391 caps->acpi_prog_mthd = !!(phys_id &
2392 ICE_ACPI_PROG_MTHD_M);
2393 caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2394 ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix,
2395 caps->num_wol_proxy_fltr);
2396 ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
2397 caps->wol_proxy_vsi_seid);
2398 ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %d\n",
2399 prefix, caps->apm_wol_support);
2400 break;
2401 case ICE_AQC_CAPS_MAX_MTU:
2402 caps->max_mtu = number;
2403 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2404 prefix, caps->max_mtu);
2405 break;
2406 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2407 caps->pcie_reset_avoidance = (number > 0);
2408 ice_debug(hw, ICE_DBG_INIT,
2409 "%s: pcie_reset_avoidance = %d\n", prefix,
2410 caps->pcie_reset_avoidance);
2411 break;
2412 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2413 caps->reset_restrict_support = (number == 1);
2414 ice_debug(hw, ICE_DBG_INIT,
2415 "%s: reset_restrict_support = %d\n", prefix,
2416 caps->reset_restrict_support);
2417 break;
2418 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
2419 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
2420 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
2421 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
2422 {
2423 u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
2424
2425 caps->ext_topo_dev_img_ver_high[index] = number;
2426 caps->ext_topo_dev_img_ver_low[index] = logical_id;
2427 caps->ext_topo_dev_img_part_num[index] =
2428 (phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
2429 ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
2430 caps->ext_topo_dev_img_load_en[index] =
2431 (phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
2432 caps->ext_topo_dev_img_prog_en[index] =
2433 (phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
2434 ice_debug(hw, ICE_DBG_INIT,
2435 "%s: ext_topo_dev_img_ver_high[%d] = %d\n",
2436 prefix, index,
2437 caps->ext_topo_dev_img_ver_high[index]);
2438 ice_debug(hw, ICE_DBG_INIT,
2439 "%s: ext_topo_dev_img_ver_low[%d] = %d\n",
2440 prefix, index,
2441 caps->ext_topo_dev_img_ver_low[index]);
2442 ice_debug(hw, ICE_DBG_INIT,
2443 "%s: ext_topo_dev_img_part_num[%d] = %d\n",
2444 prefix, index,
2445 caps->ext_topo_dev_img_part_num[index]);
2446 ice_debug(hw, ICE_DBG_INIT,
2447 "%s: ext_topo_dev_img_load_en[%d] = %d\n",
2448 prefix, index,
2449 caps->ext_topo_dev_img_load_en[index]);
2450 ice_debug(hw, ICE_DBG_INIT,
2451 "%s: ext_topo_dev_img_prog_en[%d] = %d\n",
2452 prefix, index,
2453 caps->ext_topo_dev_img_prog_en[index]);
2454 break;
2455 }
2456 default:
2457 /* Not one of the recognized common capabilities */
2458 found = false;
2459 }
2460
2461 return found;
2462 }
2463
2464 /**
2465 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2466 * @hw: pointer to the HW structure
2467 * @caps: pointer to capabilities structure to fix
2468 *
2469 * Re-calculate the capabilities that are dependent on the number of physical
2470 * ports; i.e. some features are not supported or function differently on
2471 * devices with more than 4 ports.
2472 */
2473 static void
ice_recalc_port_limited_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps)2474 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2475 {
2476 /* This assumes device capabilities are always scanned before function
2477 * capabilities during the initialization flow.
2478 */
2479 if (hw->dev_caps.num_funcs > 4) {
2480 /* Max 4 TCs per port */
2481 caps->maxtc = 4;
2482 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2483 caps->maxtc);
2484 }
2485 }
2486
2487 /**
2488 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2489 * @hw: pointer to the HW struct
2490 * @func_p: pointer to function capabilities structure
2491 * @cap: pointer to the capability element to parse
2492 *
2493 * Extract function capabilities for ICE_AQC_CAPS_VF.
2494 */
2495 static void
ice_parse_vf_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2496 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2497 struct ice_aqc_list_caps_elem *cap)
2498 {
2499 u32 number = LE32_TO_CPU(cap->number);
2500 u32 logical_id = LE32_TO_CPU(cap->logical_id);
2501
2502 func_p->num_allocd_vfs = number;
2503 func_p->vf_base_id = logical_id;
2504 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2505 func_p->num_allocd_vfs);
2506 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2507 func_p->vf_base_id);
2508 }
2509
2510 /**
2511 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2512 * @hw: pointer to the HW struct
2513 * @func_p: pointer to function capabilities structure
2514 * @cap: pointer to the capability element to parse
2515 *
2516 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2517 */
2518 static void
ice_parse_vsi_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2519 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2520 struct ice_aqc_list_caps_elem *cap)
2521 {
2522 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2523 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2524 LE32_TO_CPU(cap->number));
2525 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2526 func_p->guar_num_vsi);
2527 }
2528
2529 /**
2530 * ice_parse_func_caps - Parse function capabilities
2531 * @hw: pointer to the HW struct
2532 * @func_p: pointer to function capabilities structure
2533 * @buf: buffer containing the function capability records
2534 * @cap_count: the number of capabilities
2535 *
2536 * Helper function to parse function (0x000A) capabilities list. For
2537 * capabilities shared between device and function, this relies on
2538 * ice_parse_common_caps.
2539 *
2540 * Loop through the list of provided capabilities and extract the relevant
2541 * data into the function capabilities structured.
2542 */
2543 static void
ice_parse_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,void * buf,u32 cap_count)2544 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2545 void *buf, u32 cap_count)
2546 {
2547 struct ice_aqc_list_caps_elem *cap_resp;
2548 u32 i;
2549
2550 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2551
2552 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2553
2554 for (i = 0; i < cap_count; i++) {
2555 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2556 bool found;
2557
2558 found = ice_parse_common_caps(hw, &func_p->common_cap,
2559 &cap_resp[i], "func caps");
2560
2561 switch (cap) {
2562 case ICE_AQC_CAPS_VF:
2563 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2564 break;
2565 case ICE_AQC_CAPS_VSI:
2566 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2567 break;
2568 default:
2569 /* Don't list common capabilities as unknown */
2570 if (!found)
2571 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2572 i, cap);
2573 break;
2574 }
2575 }
2576
2577 ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
2578 ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
2579
2580 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2581 }
2582
2583 /**
2584 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2585 * @hw: pointer to the HW struct
2586 * @dev_p: pointer to device capabilities structure
2587 * @cap: capability element to parse
2588 *
2589 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2590 */
2591 static void
ice_parse_valid_functions_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2592 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2593 struct ice_aqc_list_caps_elem *cap)
2594 {
2595 u32 number = LE32_TO_CPU(cap->number);
2596
2597 dev_p->num_funcs = ice_hweight32(number);
2598 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2599 dev_p->num_funcs);
2600
2601 }
2602
2603 /**
2604 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2605 * @hw: pointer to the HW struct
2606 * @dev_p: pointer to device capabilities structure
2607 * @cap: capability element to parse
2608 *
2609 * Parse ICE_AQC_CAPS_VF for device capabilities.
2610 */
2611 static void
ice_parse_vf_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2612 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2613 struct ice_aqc_list_caps_elem *cap)
2614 {
2615 u32 number = LE32_TO_CPU(cap->number);
2616
2617 dev_p->num_vfs_exposed = number;
2618 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2619 dev_p->num_vfs_exposed);
2620 }
2621
2622 /**
2623 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2624 * @hw: pointer to the HW struct
2625 * @dev_p: pointer to device capabilities structure
2626 * @cap: capability element to parse
2627 *
2628 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2629 */
2630 static void
ice_parse_vsi_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2631 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2632 struct ice_aqc_list_caps_elem *cap)
2633 {
2634 u32 number = LE32_TO_CPU(cap->number);
2635
2636 dev_p->num_vsi_allocd_to_host = number;
2637 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2638 dev_p->num_vsi_allocd_to_host);
2639 }
2640
2641 /**
2642 * ice_parse_dev_caps - Parse device capabilities
2643 * @hw: pointer to the HW struct
2644 * @dev_p: pointer to device capabilities structure
2645 * @buf: buffer containing the device capability records
2646 * @cap_count: the number of capabilities
2647 *
2648 * Helper device to parse device (0x000B) capabilities list. For
2649 * capabilities shared between device and function, this relies on
2650 * ice_parse_common_caps.
2651 *
2652 * Loop through the list of provided capabilities and extract the relevant
2653 * data into the device capabilities structured.
2654 */
2655 static void
ice_parse_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,void * buf,u32 cap_count)2656 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2657 void *buf, u32 cap_count)
2658 {
2659 struct ice_aqc_list_caps_elem *cap_resp;
2660 u32 i;
2661
2662 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2663
2664 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2665
2666 for (i = 0; i < cap_count; i++) {
2667 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2668 bool found;
2669
2670 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2671 &cap_resp[i], "dev caps");
2672
2673 switch (cap) {
2674 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2675 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2676 break;
2677 case ICE_AQC_CAPS_VF:
2678 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2679 break;
2680 case ICE_AQC_CAPS_VSI:
2681 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2682 break;
2683 default:
2684 /* Don't list common capabilities as unknown */
2685 if (!found)
2686 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2687 i, cap);
2688 break;
2689 }
2690 }
2691
2692 ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
2693 ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
2694
2695 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2696 }
2697
2698 /**
2699 * ice_aq_list_caps - query function/device capabilities
2700 * @hw: pointer to the HW struct
2701 * @buf: a buffer to hold the capabilities
2702 * @buf_size: size of the buffer
2703 * @cap_count: if not NULL, set to the number of capabilities reported
2704 * @opc: capabilities type to discover, device or function
2705 * @cd: pointer to command details structure or NULL
2706 *
2707 * Get the function (0x000A) or device (0x000B) capabilities description from
2708 * firmware and store it in the buffer.
2709 *
2710 * If the cap_count pointer is not NULL, then it is set to the number of
2711 * capabilities firmware will report. Note that if the buffer size is too
2712 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2713 * cap_count will still be updated in this case. It is recommended that the
2714 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2715 * firmware could return) to avoid this.
2716 */
2717 static enum ice_status
ice_aq_list_caps(struct ice_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2718 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2719 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2720 {
2721 struct ice_aqc_list_caps *cmd;
2722 struct ice_aq_desc desc;
2723 enum ice_status status;
2724
2725 cmd = &desc.params.get_cap;
2726
2727 if (opc != ice_aqc_opc_list_func_caps &&
2728 opc != ice_aqc_opc_list_dev_caps)
2729 return ICE_ERR_PARAM;
2730
2731 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2732 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2733
2734 if (cap_count)
2735 *cap_count = LE32_TO_CPU(cmd->count);
2736
2737 return status;
2738 }
2739
2740 /**
2741 * ice_discover_dev_caps - Read and extract device capabilities
2742 * @hw: pointer to the hardware structure
2743 * @dev_caps: pointer to device capabilities structure
2744 *
2745 * Read the device capabilities and extract them into the dev_caps structure
2746 * for later use.
2747 */
2748 static enum ice_status
ice_discover_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_caps)2749 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2750 {
2751 enum ice_status status;
2752 u32 cap_count = 0;
2753 void *cbuf;
2754
2755 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2756 if (!cbuf)
2757 return ICE_ERR_NO_MEMORY;
2758
2759 /* Although the driver doesn't know the number of capabilities the
2760 * device will return, we can simply send a 4KB buffer, the maximum
2761 * possible size that firmware can return.
2762 */
2763 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2764
2765 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2766 ice_aqc_opc_list_dev_caps, NULL);
2767 if (!status)
2768 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2769 ice_free(hw, cbuf);
2770
2771 return status;
2772 }
2773
2774 /**
2775 * ice_discover_func_caps - Read and extract function capabilities
2776 * @hw: pointer to the hardware structure
2777 * @func_caps: pointer to function capabilities structure
2778 *
2779 * Read the function capabilities and extract them into the func_caps structure
2780 * for later use.
2781 */
2782 static enum ice_status
ice_discover_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_caps)2783 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2784 {
2785 enum ice_status status;
2786 u32 cap_count = 0;
2787 void *cbuf;
2788
2789 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2790 if (!cbuf)
2791 return ICE_ERR_NO_MEMORY;
2792
2793 /* Although the driver doesn't know the number of capabilities the
2794 * device will return, we can simply send a 4KB buffer, the maximum
2795 * possible size that firmware can return.
2796 */
2797 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2798
2799 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2800 ice_aqc_opc_list_func_caps, NULL);
2801 if (!status)
2802 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2803 ice_free(hw, cbuf);
2804
2805 return status;
2806 }
2807
2808 /**
2809 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2810 * @hw: pointer to the hardware structure
2811 */
ice_set_safe_mode_caps(struct ice_hw * hw)2812 void ice_set_safe_mode_caps(struct ice_hw *hw)
2813 {
2814 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2815 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2816 struct ice_hw_common_caps cached_caps;
2817 u32 num_funcs;
2818
2819 /* cache some func_caps values that should be restored after memset */
2820 cached_caps = func_caps->common_cap;
2821
2822 /* unset func capabilities */
2823 memset(func_caps, 0, sizeof(*func_caps));
2824
2825 #define ICE_RESTORE_FUNC_CAP(name) \
2826 func_caps->common_cap.name = cached_caps.name
2827
2828 /* restore cached values */
2829 ICE_RESTORE_FUNC_CAP(valid_functions);
2830 ICE_RESTORE_FUNC_CAP(txq_first_id);
2831 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2832 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2833 ICE_RESTORE_FUNC_CAP(max_mtu);
2834 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2835
2836 /* one Tx and one Rx queue in safe mode */
2837 func_caps->common_cap.num_rxq = 1;
2838 func_caps->common_cap.num_txq = 1;
2839
2840 /* two MSIX vectors, one for traffic and one for misc causes */
2841 func_caps->common_cap.num_msix_vectors = 2;
2842 func_caps->guar_num_vsi = 1;
2843
2844 /* cache some dev_caps values that should be restored after memset */
2845 cached_caps = dev_caps->common_cap;
2846 num_funcs = dev_caps->num_funcs;
2847
2848 /* unset dev capabilities */
2849 memset(dev_caps, 0, sizeof(*dev_caps));
2850
2851 #define ICE_RESTORE_DEV_CAP(name) \
2852 dev_caps->common_cap.name = cached_caps.name
2853
2854 /* restore cached values */
2855 ICE_RESTORE_DEV_CAP(valid_functions);
2856 ICE_RESTORE_DEV_CAP(txq_first_id);
2857 ICE_RESTORE_DEV_CAP(rxq_first_id);
2858 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2859 ICE_RESTORE_DEV_CAP(max_mtu);
2860 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2861 dev_caps->num_funcs = num_funcs;
2862
2863 /* one Tx and one Rx queue per function in safe mode */
2864 dev_caps->common_cap.num_rxq = num_funcs;
2865 dev_caps->common_cap.num_txq = num_funcs;
2866
2867 /* two MSIX vectors per function */
2868 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2869 }
2870
2871 /**
2872 * ice_get_caps - get info about the HW
2873 * @hw: pointer to the hardware structure
2874 */
ice_get_caps(struct ice_hw * hw)2875 enum ice_status ice_get_caps(struct ice_hw *hw)
2876 {
2877 enum ice_status status;
2878
2879 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2880 if (status)
2881 return status;
2882
2883 return ice_discover_func_caps(hw, &hw->func_caps);
2884 }
2885
2886 /**
2887 * ice_aq_manage_mac_write - manage MAC address write command
2888 * @hw: pointer to the HW struct
2889 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2890 * @flags: flags to control write behavior
2891 * @cd: pointer to command details structure or NULL
2892 *
2893 * This function is used to write MAC address to the NVM (0x0108).
2894 */
2895 enum ice_status
ice_aq_manage_mac_write(struct ice_hw * hw,const u8 * mac_addr,u8 flags,struct ice_sq_cd * cd)2896 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2897 struct ice_sq_cd *cd)
2898 {
2899 struct ice_aqc_manage_mac_write *cmd;
2900 struct ice_aq_desc desc;
2901
2902 cmd = &desc.params.mac_write;
2903 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2904
2905 cmd->flags = flags;
2906 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
2907
2908 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2909 }
2910
2911 /**
2912 * ice_aq_clear_pxe_mode
2913 * @hw: pointer to the HW struct
2914 *
2915 * Tell the firmware that the driver is taking over from PXE (0x0110).
2916 */
ice_aq_clear_pxe_mode(struct ice_hw * hw)2917 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2918 {
2919 struct ice_aq_desc desc;
2920
2921 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2922 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2923
2924 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2925 }
2926
2927 /**
2928 * ice_clear_pxe_mode - clear pxe operations mode
2929 * @hw: pointer to the HW struct
2930 *
2931 * Make sure all PXE mode settings are cleared, including things
2932 * like descriptor fetch/write-back mode.
2933 */
ice_clear_pxe_mode(struct ice_hw * hw)2934 void ice_clear_pxe_mode(struct ice_hw *hw)
2935 {
2936 if (ice_check_sq_alive(hw, &hw->adminq))
2937 ice_aq_clear_pxe_mode(hw);
2938 }
2939
2940 /**
2941 * ice_aq_set_port_params - set physical port parameters.
2942 * @pi: pointer to the port info struct
2943 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2944 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2945 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2946 * @double_vlan: if set double VLAN is enabled
2947 * @cd: pointer to command details structure or NULL
2948 *
2949 * Set Physical port parameters (0x0203)
2950 */
2951 enum ice_status
ice_aq_set_port_params(struct ice_port_info * pi,u16 bad_frame_vsi,bool save_bad_pac,bool pad_short_pac,bool double_vlan,struct ice_sq_cd * cd)2952 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2953 bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2954 struct ice_sq_cd *cd)
2955
2956 {
2957 struct ice_aqc_set_port_params *cmd;
2958 struct ice_hw *hw = pi->hw;
2959 struct ice_aq_desc desc;
2960 u16 cmd_flags = 0;
2961
2962 cmd = &desc.params.set_port_params;
2963
2964 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2965 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
2966 if (save_bad_pac)
2967 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
2968 if (pad_short_pac)
2969 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
2970 if (double_vlan)
2971 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2972 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
2973
2974 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2975 }
2976
2977 /**
2978 * ice_is_100m_speed_supported
2979 * @hw: pointer to the HW struct
2980 *
2981 * returns true if 100M speeds are supported by the device,
2982 * false otherwise.
2983 */
ice_is_100m_speed_supported(struct ice_hw * hw)2984 bool ice_is_100m_speed_supported(struct ice_hw *hw)
2985 {
2986 switch (hw->device_id) {
2987 case ICE_DEV_ID_E822C_10G_BASE_T:
2988 case ICE_DEV_ID_E822C_SGMII:
2989 case ICE_DEV_ID_E822L_10G_BASE_T:
2990 case ICE_DEV_ID_E822L_SGMII:
2991 case ICE_DEV_ID_E823L_10G_BASE_T:
2992 case ICE_DEV_ID_E823L_1GBE:
2993 return true;
2994 default:
2995 return false;
2996 }
2997 }
2998
2999 /**
3000 * ice_get_link_speed_based_on_phy_type - returns link speed
3001 * @phy_type_low: lower part of phy_type
3002 * @phy_type_high: higher part of phy_type
3003 *
3004 * This helper function will convert an entry in PHY type structure
3005 * [phy_type_low, phy_type_high] to its corresponding link speed.
3006 * Note: In the structure of [phy_type_low, phy_type_high], there should
3007 * be one bit set, as this function will convert one PHY type to its
3008 * speed.
3009 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
3010 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
3011 */
3012 static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low,u64 phy_type_high)3013 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
3014 {
3015 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3016 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3017
3018 switch (phy_type_low) {
3019 case ICE_PHY_TYPE_LOW_100BASE_TX:
3020 case ICE_PHY_TYPE_LOW_100M_SGMII:
3021 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3022 break;
3023 case ICE_PHY_TYPE_LOW_1000BASE_T:
3024 case ICE_PHY_TYPE_LOW_1000BASE_SX:
3025 case ICE_PHY_TYPE_LOW_1000BASE_LX:
3026 case ICE_PHY_TYPE_LOW_1000BASE_KX:
3027 case ICE_PHY_TYPE_LOW_1G_SGMII:
3028 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3029 break;
3030 case ICE_PHY_TYPE_LOW_2500BASE_T:
3031 case ICE_PHY_TYPE_LOW_2500BASE_X:
3032 case ICE_PHY_TYPE_LOW_2500BASE_KX:
3033 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3034 break;
3035 case ICE_PHY_TYPE_LOW_5GBASE_T:
3036 case ICE_PHY_TYPE_LOW_5GBASE_KR:
3037 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3038 break;
3039 case ICE_PHY_TYPE_LOW_10GBASE_T:
3040 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3041 case ICE_PHY_TYPE_LOW_10GBASE_SR:
3042 case ICE_PHY_TYPE_LOW_10GBASE_LR:
3043 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3044 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3045 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3046 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3047 break;
3048 case ICE_PHY_TYPE_LOW_25GBASE_T:
3049 case ICE_PHY_TYPE_LOW_25GBASE_CR:
3050 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3051 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3052 case ICE_PHY_TYPE_LOW_25GBASE_SR:
3053 case ICE_PHY_TYPE_LOW_25GBASE_LR:
3054 case ICE_PHY_TYPE_LOW_25GBASE_KR:
3055 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3056 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3057 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3058 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3059 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3060 break;
3061 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3062 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3063 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3064 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3065 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3066 case ICE_PHY_TYPE_LOW_40G_XLAUI:
3067 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3068 break;
3069 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3070 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3071 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3072 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3073 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3074 case ICE_PHY_TYPE_LOW_50G_LAUI2:
3075 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3076 case ICE_PHY_TYPE_LOW_50G_AUI2:
3077 case ICE_PHY_TYPE_LOW_50GBASE_CP:
3078 case ICE_PHY_TYPE_LOW_50GBASE_SR:
3079 case ICE_PHY_TYPE_LOW_50GBASE_FR:
3080 case ICE_PHY_TYPE_LOW_50GBASE_LR:
3081 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3082 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3083 case ICE_PHY_TYPE_LOW_50G_AUI1:
3084 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3085 break;
3086 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3087 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3088 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3089 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3090 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3091 case ICE_PHY_TYPE_LOW_100G_CAUI4:
3092 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3093 case ICE_PHY_TYPE_LOW_100G_AUI4:
3094 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3095 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3096 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3097 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3098 case ICE_PHY_TYPE_LOW_100GBASE_DR:
3099 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3100 break;
3101 default:
3102 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3103 break;
3104 }
3105
3106 switch (phy_type_high) {
3107 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3108 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3109 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3110 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3111 case ICE_PHY_TYPE_HIGH_100G_AUI2:
3112 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3113 break;
3114 default:
3115 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3116 break;
3117 }
3118
3119 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3120 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3121 return ICE_AQ_LINK_SPEED_UNKNOWN;
3122 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3123 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3124 return ICE_AQ_LINK_SPEED_UNKNOWN;
3125 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3126 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3127 return speed_phy_type_low;
3128 else
3129 return speed_phy_type_high;
3130 }
3131
3132 /**
3133 * ice_update_phy_type
3134 * @phy_type_low: pointer to the lower part of phy_type
3135 * @phy_type_high: pointer to the higher part of phy_type
3136 * @link_speeds_bitmap: targeted link speeds bitmap
3137 *
3138 * Note: For the link_speeds_bitmap structure, you can check it at
3139 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3140 * link_speeds_bitmap include multiple speeds.
3141 *
3142 * Each entry in this [phy_type_low, phy_type_high] structure will
3143 * present a certain link speed. This helper function will turn on bits
3144 * in [phy_type_low, phy_type_high] structure based on the value of
3145 * link_speeds_bitmap input parameter.
3146 */
3147 void
ice_update_phy_type(u64 * phy_type_low,u64 * phy_type_high,u16 link_speeds_bitmap)3148 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3149 u16 link_speeds_bitmap)
3150 {
3151 u64 pt_high;
3152 u64 pt_low;
3153 int index;
3154 u16 speed;
3155
3156 /* We first check with low part of phy_type */
3157 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3158 pt_low = BIT_ULL(index);
3159 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
3160
3161 if (link_speeds_bitmap & speed)
3162 *phy_type_low |= BIT_ULL(index);
3163 }
3164
3165 /* We then check with high part of phy_type */
3166 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3167 pt_high = BIT_ULL(index);
3168 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3169
3170 if (link_speeds_bitmap & speed)
3171 *phy_type_high |= BIT_ULL(index);
3172 }
3173 }
3174
3175 /**
3176 * ice_aq_set_phy_cfg
3177 * @hw: pointer to the HW struct
3178 * @pi: port info structure of the interested logical port
3179 * @cfg: structure with PHY configuration data to be set
3180 * @cd: pointer to command details structure or NULL
3181 *
3182 * Set the various PHY configuration parameters supported on the Port.
3183 * One or more of the Set PHY config parameters may be ignored in an MFP
3184 * mode as the PF may not have the privilege to set some of the PHY Config
3185 * parameters. This status will be indicated by the command response (0x0601).
3186 */
3187 enum ice_status
ice_aq_set_phy_cfg(struct ice_hw * hw,struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,struct ice_sq_cd * cd)3188 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3189 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3190 {
3191 struct ice_aq_desc desc;
3192 enum ice_status status;
3193
3194 if (!cfg)
3195 return ICE_ERR_PARAM;
3196
3197 /* Ensure that only valid bits of cfg->caps can be turned on. */
3198 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3199 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3200 cfg->caps);
3201
3202 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3203 }
3204
3205 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3206 desc.params.set_phy.lport_num = pi->lport;
3207 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3208
3209 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3210 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
3211 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
3212 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
3213 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
3214 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
3215 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
3216 cfg->low_power_ctrl_an);
3217 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
3218 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
3219 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
3220 cfg->link_fec_opt);
3221
3222 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3223
3224 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3225 status = ICE_SUCCESS;
3226
3227 if (!status)
3228 pi->phy.curr_user_phy_cfg = *cfg;
3229
3230 return status;
3231 }
3232
3233 /**
3234 * ice_update_link_info - update status of the HW network link
3235 * @pi: port info structure of the interested logical port
3236 */
ice_update_link_info(struct ice_port_info * pi)3237 enum ice_status ice_update_link_info(struct ice_port_info *pi)
3238 {
3239 struct ice_link_status *li;
3240 enum ice_status status;
3241
3242 if (!pi)
3243 return ICE_ERR_PARAM;
3244
3245 li = &pi->phy.link_info;
3246
3247 status = ice_aq_get_link_info(pi, true, NULL, NULL);
3248 if (status)
3249 return status;
3250
3251 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3252 struct ice_aqc_get_phy_caps_data *pcaps;
3253 struct ice_hw *hw;
3254
3255 hw = pi->hw;
3256 pcaps = (struct ice_aqc_get_phy_caps_data *)
3257 ice_malloc(hw, sizeof(*pcaps));
3258 if (!pcaps)
3259 return ICE_ERR_NO_MEMORY;
3260
3261 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3262 pcaps, NULL);
3263
3264 if (status == ICE_SUCCESS)
3265 ice_memcpy(li->module_type, &pcaps->module_type,
3266 sizeof(li->module_type),
3267 ICE_NONDMA_TO_NONDMA);
3268
3269 ice_free(hw, pcaps);
3270 }
3271
3272 return status;
3273 }
3274
3275 /**
3276 * ice_cache_phy_user_req
3277 * @pi: port information structure
3278 * @cache_data: PHY logging data
3279 * @cache_mode: PHY logging mode
3280 *
3281 * Log the user request on (FC, FEC, SPEED) for later user.
3282 */
3283 static void
ice_cache_phy_user_req(struct ice_port_info * pi,struct ice_phy_cache_mode_data cache_data,enum ice_phy_cache_mode cache_mode)3284 ice_cache_phy_user_req(struct ice_port_info *pi,
3285 struct ice_phy_cache_mode_data cache_data,
3286 enum ice_phy_cache_mode cache_mode)
3287 {
3288 if (!pi)
3289 return;
3290
3291 switch (cache_mode) {
3292 case ICE_FC_MODE:
3293 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3294 break;
3295 case ICE_SPEED_MODE:
3296 pi->phy.curr_user_speed_req =
3297 cache_data.data.curr_user_speed_req;
3298 break;
3299 case ICE_FEC_MODE:
3300 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3301 break;
3302 default:
3303 break;
3304 }
3305 }
3306
3307 /**
3308 * ice_caps_to_fc_mode
3309 * @caps: PHY capabilities
3310 *
3311 * Convert PHY FC capabilities to ice FC mode
3312 */
ice_caps_to_fc_mode(u8 caps)3313 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3314 {
3315 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3316 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3317 return ICE_FC_FULL;
3318
3319 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3320 return ICE_FC_TX_PAUSE;
3321
3322 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3323 return ICE_FC_RX_PAUSE;
3324
3325 return ICE_FC_NONE;
3326 }
3327
3328 /**
3329 * ice_caps_to_fec_mode
3330 * @caps: PHY capabilities
3331 * @fec_options: Link FEC options
3332 *
3333 * Convert PHY FEC capabilities to ice FEC mode
3334 */
ice_caps_to_fec_mode(u8 caps,u8 fec_options)3335 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3336 {
3337 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3338 return ICE_FEC_AUTO;
3339
3340 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3341 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3342 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3343 ICE_AQC_PHY_FEC_25G_KR_REQ))
3344 return ICE_FEC_BASER;
3345
3346 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3347 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3348 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3349 return ICE_FEC_RS;
3350
3351 return ICE_FEC_NONE;
3352 }
3353
3354 /**
3355 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3356 * @pi: port information structure
3357 * @cfg: PHY configuration data to set FC mode
3358 * @req_mode: FC mode to configure
3359 */
3360 static enum ice_status
ice_cfg_phy_fc(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fc_mode req_mode)3361 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3362 enum ice_fc_mode req_mode)
3363 {
3364 struct ice_phy_cache_mode_data cache_data;
3365 u8 pause_mask = 0x0;
3366
3367 if (!pi || !cfg)
3368 return ICE_ERR_BAD_PTR;
3369 switch (req_mode) {
3370 case ICE_FC_AUTO:
3371 {
3372 struct ice_aqc_get_phy_caps_data *pcaps;
3373 enum ice_status status;
3374
3375 pcaps = (struct ice_aqc_get_phy_caps_data *)
3376 ice_malloc(pi->hw, sizeof(*pcaps));
3377 if (!pcaps)
3378 return ICE_ERR_NO_MEMORY;
3379 /* Query the value of FC that both the NIC and attached media
3380 * can do.
3381 */
3382 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3383 pcaps, NULL);
3384 if (status) {
3385 ice_free(pi->hw, pcaps);
3386 return status;
3387 }
3388
3389 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3390 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3391
3392 ice_free(pi->hw, pcaps);
3393 break;
3394 }
3395 case ICE_FC_FULL:
3396 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3397 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3398 break;
3399 case ICE_FC_RX_PAUSE:
3400 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3401 break;
3402 case ICE_FC_TX_PAUSE:
3403 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3404 break;
3405 default:
3406 break;
3407 }
3408
3409 /* clear the old pause settings */
3410 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3411 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3412
3413 /* set the new capabilities */
3414 cfg->caps |= pause_mask;
3415
3416 /* Cache user FC request */
3417 cache_data.data.curr_user_fc_req = req_mode;
3418 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3419
3420 return ICE_SUCCESS;
3421 }
3422
3423 /**
3424 * ice_set_fc
3425 * @pi: port information structure
3426 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3427 * @ena_auto_link_update: enable automatic link update
3428 *
3429 * Set the requested flow control mode.
3430 */
3431 enum ice_status
ice_set_fc(struct ice_port_info * pi,u8 * aq_failures,bool ena_auto_link_update)3432 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3433 {
3434 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3435 struct ice_aqc_get_phy_caps_data *pcaps;
3436 enum ice_status status;
3437 struct ice_hw *hw;
3438
3439 if (!pi || !aq_failures)
3440 return ICE_ERR_BAD_PTR;
3441
3442 *aq_failures = 0;
3443 hw = pi->hw;
3444
3445 pcaps = (struct ice_aqc_get_phy_caps_data *)
3446 ice_malloc(hw, sizeof(*pcaps));
3447 if (!pcaps)
3448 return ICE_ERR_NO_MEMORY;
3449
3450 /* Get the current PHY config */
3451 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3452 pcaps, NULL);
3453
3454 if (status) {
3455 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3456 goto out;
3457 }
3458
3459 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3460
3461 /* Configure the set PHY data */
3462 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3463 if (status) {
3464 if (status != ICE_ERR_BAD_PTR)
3465 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3466
3467 goto out;
3468 }
3469
3470 /* If the capabilities have changed, then set the new config */
3471 if (cfg.caps != pcaps->caps) {
3472 int retry_count, retry_max = 10;
3473
3474 /* Auto restart link so settings take effect */
3475 if (ena_auto_link_update)
3476 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3477
3478 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3479 if (status) {
3480 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3481 goto out;
3482 }
3483
3484 /* Update the link info
3485 * It sometimes takes a really long time for link to
3486 * come back from the atomic reset. Thus, we wait a
3487 * little bit.
3488 */
3489 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3490 status = ice_update_link_info(pi);
3491
3492 if (status == ICE_SUCCESS)
3493 break;
3494
3495 ice_msec_delay(100, true);
3496 }
3497
3498 if (status)
3499 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3500 }
3501
3502 out:
3503 ice_free(hw, pcaps);
3504 return status;
3505 }
3506
3507 /**
3508 * ice_phy_caps_equals_cfg
3509 * @phy_caps: PHY capabilities
3510 * @phy_cfg: PHY configuration
3511 *
3512 * Helper function to determine if PHY capabilities matches PHY
3513 * configuration
3514 */
3515 bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data * phy_caps,struct ice_aqc_set_phy_cfg_data * phy_cfg)3516 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3517 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3518 {
3519 u8 caps_mask, cfg_mask;
3520
3521 if (!phy_caps || !phy_cfg)
3522 return false;
3523
3524 /* These bits are not common between capabilities and configuration.
3525 * Do not use them to determine equality.
3526 */
3527 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3528 ICE_AQC_PHY_EN_MOD_QUAL);
3529 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3530
3531 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3532 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3533 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3534 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3535 phy_caps->eee_cap != phy_cfg->eee_cap ||
3536 phy_caps->eeer_value != phy_cfg->eeer_value ||
3537 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3538 return false;
3539
3540 return true;
3541 }
3542
3543 /**
3544 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3545 * @pi: port information structure
3546 * @caps: PHY ability structure to copy data from
3547 * @cfg: PHY configuration structure to copy data to
3548 *
3549 * Helper function to copy AQC PHY get ability data to PHY set configuration
3550 * data structure
3551 */
3552 void
ice_copy_phy_caps_to_cfg(struct ice_port_info * pi,struct ice_aqc_get_phy_caps_data * caps,struct ice_aqc_set_phy_cfg_data * cfg)3553 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3554 struct ice_aqc_get_phy_caps_data *caps,
3555 struct ice_aqc_set_phy_cfg_data *cfg)
3556 {
3557 if (!pi || !caps || !cfg)
3558 return;
3559
3560 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3561 cfg->phy_type_low = caps->phy_type_low;
3562 cfg->phy_type_high = caps->phy_type_high;
3563 cfg->caps = caps->caps;
3564 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3565 cfg->eee_cap = caps->eee_cap;
3566 cfg->eeer_value = caps->eeer_value;
3567 cfg->link_fec_opt = caps->link_fec_options;
3568 cfg->module_compliance_enforcement =
3569 caps->module_compliance_enforcement;
3570 }
3571
3572 /**
3573 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3574 * @pi: port information structure
3575 * @cfg: PHY configuration data to set FEC mode
3576 * @fec: FEC mode to configure
3577 */
3578 enum ice_status
ice_cfg_phy_fec(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fec_mode fec)3579 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3580 enum ice_fec_mode fec)
3581 {
3582 struct ice_aqc_get_phy_caps_data *pcaps;
3583 enum ice_status status = ICE_SUCCESS;
3584 struct ice_hw *hw;
3585
3586 if (!pi || !cfg)
3587 return ICE_ERR_BAD_PTR;
3588
3589 hw = pi->hw;
3590
3591 pcaps = (struct ice_aqc_get_phy_caps_data *)
3592 ice_malloc(hw, sizeof(*pcaps));
3593 if (!pcaps)
3594 return ICE_ERR_NO_MEMORY;
3595
3596 status = ice_aq_get_phy_caps(pi, false,
3597 (ice_fw_supports_report_dflt_cfg(hw) ?
3598 ICE_AQC_REPORT_DFLT_CFG :
3599 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3600
3601 if (status)
3602 goto out;
3603
3604 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3605 cfg->link_fec_opt = pcaps->link_fec_options;
3606
3607 switch (fec) {
3608 case ICE_FEC_BASER:
3609 /* Clear RS bits, and AND BASE-R ability
3610 * bits and OR request bits.
3611 */
3612 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3613 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3614 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3615 ICE_AQC_PHY_FEC_25G_KR_REQ;
3616 break;
3617 case ICE_FEC_RS:
3618 /* Clear BASE-R bits, and AND RS ability
3619 * bits and OR request bits.
3620 */
3621 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3622 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3623 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3624 break;
3625 case ICE_FEC_NONE:
3626 /* Clear all FEC option bits. */
3627 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3628 break;
3629 case ICE_FEC_AUTO:
3630 /* AND auto FEC bit, and all caps bits. */
3631 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3632 cfg->link_fec_opt |= pcaps->link_fec_options;
3633 break;
3634 default:
3635 status = ICE_ERR_PARAM;
3636 break;
3637 }
3638
3639 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
3640 !ice_fw_supports_report_dflt_cfg(pi->hw)) {
3641 struct ice_link_default_override_tlv tlv;
3642
3643 if (ice_get_link_default_override(&tlv, pi))
3644 goto out;
3645
3646 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3647 (tlv.options & ICE_LINK_OVERRIDE_EN))
3648 cfg->link_fec_opt = tlv.fec_options;
3649 }
3650
3651 out:
3652 ice_free(hw, pcaps);
3653
3654 return status;
3655 }
3656
3657 /**
3658 * ice_get_link_status - get status of the HW network link
3659 * @pi: port information structure
3660 * @link_up: pointer to bool (true/false = linkup/linkdown)
3661 *
3662 * Variable link_up is true if link is up, false if link is down.
3663 * The variable link_up is invalid if status is non zero. As a
3664 * result of this call, link status reporting becomes enabled
3665 */
ice_get_link_status(struct ice_port_info * pi,bool * link_up)3666 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3667 {
3668 struct ice_phy_info *phy_info;
3669 enum ice_status status = ICE_SUCCESS;
3670
3671 if (!pi || !link_up)
3672 return ICE_ERR_PARAM;
3673
3674 phy_info = &pi->phy;
3675
3676 if (phy_info->get_link_info) {
3677 status = ice_update_link_info(pi);
3678
3679 if (status)
3680 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3681 status);
3682 }
3683
3684 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3685
3686 return status;
3687 }
3688
3689 /**
3690 * ice_aq_set_link_restart_an
3691 * @pi: pointer to the port information structure
3692 * @ena_link: if true: enable link, if false: disable link
3693 * @cd: pointer to command details structure or NULL
3694 *
3695 * Sets up the link and restarts the Auto-Negotiation over the link.
3696 */
3697 enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info * pi,bool ena_link,struct ice_sq_cd * cd)3698 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3699 struct ice_sq_cd *cd)
3700 {
3701 struct ice_aqc_restart_an *cmd;
3702 struct ice_aq_desc desc;
3703
3704 cmd = &desc.params.restart_an;
3705
3706 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3707
3708 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3709 cmd->lport_num = pi->lport;
3710 if (ena_link)
3711 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3712 else
3713 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3714
3715 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3716 }
3717
3718 /**
3719 * ice_aq_set_event_mask
3720 * @hw: pointer to the HW struct
3721 * @port_num: port number of the physical function
3722 * @mask: event mask to be set
3723 * @cd: pointer to command details structure or NULL
3724 *
3725 * Set event mask (0x0613)
3726 */
3727 enum ice_status
ice_aq_set_event_mask(struct ice_hw * hw,u8 port_num,u16 mask,struct ice_sq_cd * cd)3728 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3729 struct ice_sq_cd *cd)
3730 {
3731 struct ice_aqc_set_event_mask *cmd;
3732 struct ice_aq_desc desc;
3733
3734 cmd = &desc.params.set_event_mask;
3735
3736 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3737
3738 cmd->lport_num = port_num;
3739
3740 cmd->event_mask = CPU_TO_LE16(mask);
3741 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3742 }
3743
3744 /**
3745 * ice_aq_set_mac_loopback
3746 * @hw: pointer to the HW struct
3747 * @ena_lpbk: Enable or Disable loopback
3748 * @cd: pointer to command details structure or NULL
3749 *
3750 * Enable/disable loopback on a given port
3751 */
3752 enum ice_status
ice_aq_set_mac_loopback(struct ice_hw * hw,bool ena_lpbk,struct ice_sq_cd * cd)3753 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3754 {
3755 struct ice_aqc_set_mac_lb *cmd;
3756 struct ice_aq_desc desc;
3757
3758 cmd = &desc.params.set_mac_lb;
3759
3760 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3761 if (ena_lpbk)
3762 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3763
3764 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3765 }
3766
3767 /**
3768 * ice_aq_set_port_id_led
3769 * @pi: pointer to the port information
3770 * @is_orig_mode: is this LED set to original mode (by the net-list)
3771 * @cd: pointer to command details structure or NULL
3772 *
3773 * Set LED value for the given port (0x06e9)
3774 */
3775 enum ice_status
ice_aq_set_port_id_led(struct ice_port_info * pi,bool is_orig_mode,struct ice_sq_cd * cd)3776 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3777 struct ice_sq_cd *cd)
3778 {
3779 struct ice_aqc_set_port_id_led *cmd;
3780 struct ice_hw *hw = pi->hw;
3781 struct ice_aq_desc desc;
3782
3783 cmd = &desc.params.set_port_id_led;
3784
3785 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3786
3787 if (is_orig_mode)
3788 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3789 else
3790 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3791
3792 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3793 }
3794
3795 /**
3796 * ice_aq_sff_eeprom
3797 * @hw: pointer to the HW struct
3798 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3799 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3800 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3801 * @page: QSFP page
3802 * @set_page: set or ignore the page
3803 * @data: pointer to data buffer to be read/written to the I2C device.
3804 * @length: 1-16 for read, 1 for write.
3805 * @write: 0 read, 1 for write.
3806 * @cd: pointer to command details structure or NULL
3807 *
3808 * Read/Write SFF EEPROM (0x06EE)
3809 */
3810 enum ice_status
ice_aq_sff_eeprom(struct ice_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 set_page,u8 * data,u8 length,bool write,struct ice_sq_cd * cd)3811 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3812 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3813 bool write, struct ice_sq_cd *cd)
3814 {
3815 struct ice_aqc_sff_eeprom *cmd;
3816 struct ice_aq_desc desc;
3817 enum ice_status status;
3818
3819 if (!data || (mem_addr & 0xff00))
3820 return ICE_ERR_PARAM;
3821
3822 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3823 cmd = &desc.params.read_write_sff_param;
3824 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3825 cmd->lport_num = (u8)(lport & 0xff);
3826 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3827 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3828 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3829 ((set_page <<
3830 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3831 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3832 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3833 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3834 if (write)
3835 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3836
3837 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3838 return status;
3839 }
3840
3841 /**
3842 * ice_aq_prog_topo_dev_nvm
3843 * @hw: pointer to the hardware structure
3844 * @topo_params: pointer to structure storing topology parameters for a device
3845 * @cd: pointer to command details structure or NULL
3846 *
3847 * Program Topology Device NVM (0x06F2)
3848 *
3849 */
3850 enum ice_status
ice_aq_prog_topo_dev_nvm(struct ice_hw * hw,struct ice_aqc_link_topo_params * topo_params,struct ice_sq_cd * cd)3851 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
3852 struct ice_aqc_link_topo_params *topo_params,
3853 struct ice_sq_cd *cd)
3854 {
3855 struct ice_aqc_prog_topo_dev_nvm *cmd;
3856 struct ice_aq_desc desc;
3857
3858 cmd = &desc.params.prog_topo_dev_nvm;
3859
3860 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
3861
3862 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3863 ICE_NONDMA_TO_NONDMA);
3864
3865 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3866 }
3867
3868 /**
3869 * ice_aq_read_topo_dev_nvm
3870 * @hw: pointer to the hardware structure
3871 * @topo_params: pointer to structure storing topology parameters for a device
3872 * @start_address: byte offset in the topology device NVM
3873 * @data: pointer to data buffer
3874 * @data_size: number of bytes to be read from the topology device NVM
3875 * @cd: pointer to command details structure or NULL
3876 * Read Topology Device NVM (0x06F3)
3877 *
3878 */
3879 enum ice_status
ice_aq_read_topo_dev_nvm(struct ice_hw * hw,struct ice_aqc_link_topo_params * topo_params,u32 start_address,u8 * data,u8 data_size,struct ice_sq_cd * cd)3880 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
3881 struct ice_aqc_link_topo_params *topo_params,
3882 u32 start_address, u8 *data, u8 data_size,
3883 struct ice_sq_cd *cd)
3884 {
3885 struct ice_aqc_read_topo_dev_nvm *cmd;
3886 struct ice_aq_desc desc;
3887 enum ice_status status;
3888
3889 if (!data || data_size == 0 ||
3890 data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
3891 return ICE_ERR_PARAM;
3892
3893 cmd = &desc.params.read_topo_dev_nvm;
3894
3895 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
3896
3897 desc.datalen = data_size;
3898 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3899 ICE_NONDMA_TO_NONDMA);
3900 cmd->start_address = CPU_TO_LE32(start_address);
3901
3902 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3903 if (status)
3904 return status;
3905
3906 ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
3907
3908 return ICE_SUCCESS;
3909 }
3910
3911 /**
3912 * __ice_aq_get_set_rss_lut
3913 * @hw: pointer to the hardware structure
3914 * @params: RSS LUT parameters
3915 * @set: set true to set the table, false to get the table
3916 *
3917 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3918 */
3919 static enum ice_status
__ice_aq_get_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * params,bool set)3920 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3921 {
3922 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3923 struct ice_aqc_get_set_rss_lut *cmd_resp;
3924 struct ice_aq_desc desc;
3925 enum ice_status status;
3926 u8 *lut;
3927
3928 if (!params)
3929 return ICE_ERR_PARAM;
3930
3931 vsi_handle = params->vsi_handle;
3932 lut = params->lut;
3933
3934 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3935 return ICE_ERR_PARAM;
3936
3937 lut_size = params->lut_size;
3938 lut_type = params->lut_type;
3939 glob_lut_idx = params->global_lut_id;
3940 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3941
3942 cmd_resp = &desc.params.get_set_rss_lut;
3943
3944 if (set) {
3945 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3946 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3947 } else {
3948 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3949 }
3950
3951 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3952 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3953 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3954 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3955
3956 switch (lut_type) {
3957 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3958 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3959 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3960 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3961 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3962 break;
3963 default:
3964 status = ICE_ERR_PARAM;
3965 goto ice_aq_get_set_rss_lut_exit;
3966 }
3967
3968 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3969 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3970 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3971
3972 if (!set)
3973 goto ice_aq_get_set_rss_lut_send;
3974 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3975 if (!set)
3976 goto ice_aq_get_set_rss_lut_send;
3977 } else {
3978 goto ice_aq_get_set_rss_lut_send;
3979 }
3980
3981 /* LUT size is only valid for Global and PF table types */
3982 switch (lut_size) {
3983 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3984 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3985 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3986 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3987 break;
3988 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3989 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3990 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3991 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3992 break;
3993 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3994 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3995 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3996 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3997 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3998 break;
3999 }
4000 /* fall-through */
4001 default:
4002 status = ICE_ERR_PARAM;
4003 goto ice_aq_get_set_rss_lut_exit;
4004 }
4005
4006 ice_aq_get_set_rss_lut_send:
4007 cmd_resp->flags = CPU_TO_LE16(flags);
4008 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
4009
4010 ice_aq_get_set_rss_lut_exit:
4011 return status;
4012 }
4013
4014 /**
4015 * ice_aq_get_rss_lut
4016 * @hw: pointer to the hardware structure
4017 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
4018 *
4019 * get the RSS lookup table, PF or VSI type
4020 */
4021 enum ice_status
ice_aq_get_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * get_params)4022 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
4023 {
4024 return __ice_aq_get_set_rss_lut(hw, get_params, false);
4025 }
4026
4027 /**
4028 * ice_aq_set_rss_lut
4029 * @hw: pointer to the hardware structure
4030 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
4031 *
4032 * set the RSS lookup table, PF or VSI type
4033 */
4034 enum ice_status
ice_aq_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * set_params)4035 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4036 {
4037 return __ice_aq_get_set_rss_lut(hw, set_params, true);
4038 }
4039
4040 /**
4041 * __ice_aq_get_set_rss_key
4042 * @hw: pointer to the HW struct
4043 * @vsi_id: VSI FW index
4044 * @key: pointer to key info struct
4045 * @set: set true to set the key, false to get the key
4046 *
4047 * get (0x0B04) or set (0x0B02) the RSS key per VSI
4048 */
4049 static enum
__ice_aq_get_set_rss_key(struct ice_hw * hw,u16 vsi_id,struct ice_aqc_get_set_rss_keys * key,bool set)4050 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4051 struct ice_aqc_get_set_rss_keys *key,
4052 bool set)
4053 {
4054 struct ice_aqc_get_set_rss_key *cmd_resp;
4055 u16 key_size = sizeof(*key);
4056 struct ice_aq_desc desc;
4057
4058 cmd_resp = &desc.params.get_set_rss_key;
4059
4060 if (set) {
4061 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4062 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4063 } else {
4064 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4065 }
4066
4067 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4068 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
4069 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
4070 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
4071
4072 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4073 }
4074
4075 /**
4076 * ice_aq_get_rss_key
4077 * @hw: pointer to the HW struct
4078 * @vsi_handle: software VSI handle
4079 * @key: pointer to key info struct
4080 *
4081 * get the RSS key per VSI
4082 */
4083 enum ice_status
ice_aq_get_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * key)4084 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4085 struct ice_aqc_get_set_rss_keys *key)
4086 {
4087 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4088 return ICE_ERR_PARAM;
4089
4090 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4091 key, false);
4092 }
4093
4094 /**
4095 * ice_aq_set_rss_key
4096 * @hw: pointer to the HW struct
4097 * @vsi_handle: software VSI handle
4098 * @keys: pointer to key info struct
4099 *
4100 * set the RSS key per VSI
4101 */
4102 enum ice_status
ice_aq_set_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * keys)4103 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4104 struct ice_aqc_get_set_rss_keys *keys)
4105 {
4106 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4107 return ICE_ERR_PARAM;
4108
4109 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4110 keys, true);
4111 }
4112
4113 /**
4114 * ice_aq_add_lan_txq
4115 * @hw: pointer to the hardware structure
4116 * @num_qgrps: Number of added queue groups
4117 * @qg_list: list of queue groups to be added
4118 * @buf_size: size of buffer for indirect command
4119 * @cd: pointer to command details structure or NULL
4120 *
4121 * Add Tx LAN queue (0x0C30)
4122 *
4123 * NOTE:
4124 * Prior to calling add Tx LAN queue:
4125 * Initialize the following as part of the Tx queue context:
4126 * Completion queue ID if the queue uses Completion queue, Quanta profile,
4127 * Cache profile and Packet shaper profile.
4128 *
4129 * After add Tx LAN queue AQ command is completed:
4130 * Interrupts should be associated with specific queues,
4131 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4132 * flow.
4133 */
4134 enum ice_status
ice_aq_add_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * qg_list,u16 buf_size,struct ice_sq_cd * cd)4135 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4136 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4137 struct ice_sq_cd *cd)
4138 {
4139 struct ice_aqc_add_tx_qgrp *list;
4140 struct ice_aqc_add_txqs *cmd;
4141 struct ice_aq_desc desc;
4142 u16 i, sum_size = 0;
4143
4144 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4145
4146 cmd = &desc.params.add_txqs;
4147
4148 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4149
4150 if (!qg_list)
4151 return ICE_ERR_PARAM;
4152
4153 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4154 return ICE_ERR_PARAM;
4155
4156 for (i = 0, list = qg_list; i < num_qgrps; i++) {
4157 sum_size += ice_struct_size(list, txqs, list->num_txqs);
4158 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4159 list->num_txqs);
4160 }
4161
4162 if (buf_size != sum_size)
4163 return ICE_ERR_PARAM;
4164
4165 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4166
4167 cmd->num_qgrps = num_qgrps;
4168
4169 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4170 }
4171
4172 /**
4173 * ice_aq_dis_lan_txq
4174 * @hw: pointer to the hardware structure
4175 * @num_qgrps: number of groups in the list
4176 * @qg_list: the list of groups to disable
4177 * @buf_size: the total size of the qg_list buffer in bytes
4178 * @rst_src: if called due to reset, specifies the reset source
4179 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4180 * @cd: pointer to command details structure or NULL
4181 *
4182 * Disable LAN Tx queue (0x0C31)
4183 */
4184 static enum ice_status
ice_aq_dis_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_dis_txq_item * qg_list,u16 buf_size,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)4185 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4186 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4187 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4188 struct ice_sq_cd *cd)
4189 {
4190 struct ice_aqc_dis_txq_item *item;
4191 struct ice_aqc_dis_txqs *cmd;
4192 struct ice_aq_desc desc;
4193 enum ice_status status;
4194 u16 i, sz = 0;
4195
4196 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4197 cmd = &desc.params.dis_txqs;
4198 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4199
4200 /* qg_list can be NULL only in VM/VF reset flow */
4201 if (!qg_list && !rst_src)
4202 return ICE_ERR_PARAM;
4203
4204 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4205 return ICE_ERR_PARAM;
4206
4207 cmd->num_entries = num_qgrps;
4208
4209 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
4210 ICE_AQC_Q_DIS_TIMEOUT_M);
4211
4212 switch (rst_src) {
4213 case ICE_VM_RESET:
4214 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4215 cmd->vmvf_and_timeout |=
4216 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
4217 break;
4218 case ICE_VF_RESET:
4219 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
4220 /* In this case, FW expects vmvf_num to be absolute VF ID */
4221 cmd->vmvf_and_timeout |=
4222 CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
4223 ICE_AQC_Q_DIS_VMVF_NUM_M);
4224 break;
4225 case ICE_NO_RESET:
4226 default:
4227 break;
4228 }
4229
4230 /* flush pipe on time out */
4231 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4232 /* If no queue group info, we are in a reset flow. Issue the AQ */
4233 if (!qg_list)
4234 goto do_aq;
4235
4236 /* set RD bit to indicate that command buffer is provided by the driver
4237 * and it needs to be read by the firmware
4238 */
4239 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4240
4241 for (i = 0, item = qg_list; i < num_qgrps; i++) {
4242 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
4243
4244 /* If the num of queues is even, add 2 bytes of padding */
4245 if ((item->num_qs % 2) == 0)
4246 item_size += 2;
4247
4248 sz += item_size;
4249
4250 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4251 }
4252
4253 if (buf_size != sz)
4254 return ICE_ERR_PARAM;
4255
4256 do_aq:
4257 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4258 if (status) {
4259 if (!qg_list)
4260 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4261 vmvf_num, hw->adminq.sq_last_status);
4262 else
4263 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4264 LE16_TO_CPU(qg_list[0].q_id[0]),
4265 hw->adminq.sq_last_status);
4266 }
4267 return status;
4268 }
4269
4270 /**
4271 * ice_aq_move_recfg_lan_txq
4272 * @hw: pointer to the hardware structure
4273 * @num_qs: number of queues to move/reconfigure
4274 * @is_move: true if this operation involves node movement
4275 * @is_tc_change: true if this operation involves a TC change
4276 * @subseq_call: true if this operation is a subsequent call
4277 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
4278 * @timeout: timeout in units of 100 usec (valid values 0-50)
4279 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
4280 * @buf: struct containing src/dest TEID and per-queue info
4281 * @buf_size: size of buffer for indirect command
4282 * @txqs_moved: out param, number of queues successfully moved
4283 * @cd: pointer to command details structure or NULL
4284 *
4285 * Move / Reconfigure Tx LAN queues (0x0C32)
4286 */
4287 enum ice_status
ice_aq_move_recfg_lan_txq(struct ice_hw * hw,u8 num_qs,bool is_move,bool is_tc_change,bool subseq_call,bool flush_pipe,u8 timeout,u32 * blocked_cgds,struct ice_aqc_move_txqs_data * buf,u16 buf_size,u8 * txqs_moved,struct ice_sq_cd * cd)4288 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
4289 bool is_tc_change, bool subseq_call, bool flush_pipe,
4290 u8 timeout, u32 *blocked_cgds,
4291 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
4292 u8 *txqs_moved, struct ice_sq_cd *cd)
4293 {
4294 struct ice_aqc_move_txqs *cmd;
4295 struct ice_aq_desc desc;
4296 enum ice_status status;
4297
4298 cmd = &desc.params.move_txqs;
4299 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
4300
4301 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
4302 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
4303 return ICE_ERR_PARAM;
4304
4305 if (is_tc_change && !flush_pipe && !blocked_cgds)
4306 return ICE_ERR_PARAM;
4307
4308 if (!is_move && !is_tc_change)
4309 return ICE_ERR_PARAM;
4310
4311 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4312
4313 if (is_move)
4314 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
4315
4316 if (is_tc_change)
4317 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
4318
4319 if (subseq_call)
4320 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
4321
4322 if (flush_pipe)
4323 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
4324
4325 cmd->num_qs = num_qs;
4326 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
4327 ICE_AQC_Q_CMD_TIMEOUT_M);
4328
4329 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4330
4331 if (!status && txqs_moved)
4332 *txqs_moved = cmd->num_qs;
4333
4334 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
4335 is_tc_change && !flush_pipe)
4336 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
4337
4338 return status;
4339 }
4340
4341 /* End of FW Admin Queue command wrappers */
4342
4343 /**
4344 * ice_write_byte - write a byte to a packed context structure
4345 * @src_ctx: the context structure to read from
4346 * @dest_ctx: the context to be written to
4347 * @ce_info: a description of the struct to be filled
4348 */
4349 static void
ice_write_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4350 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4351 {
4352 u8 src_byte, dest_byte, mask;
4353 u8 *from, *dest;
4354 u16 shift_width;
4355
4356 /* copy from the next struct field */
4357 from = src_ctx + ce_info->offset;
4358
4359 /* prepare the bits and mask */
4360 shift_width = ce_info->lsb % 8;
4361 mask = (u8)(BIT(ce_info->width) - 1);
4362
4363 src_byte = *from;
4364 src_byte &= mask;
4365
4366 /* shift to correct alignment */
4367 mask <<= shift_width;
4368 src_byte <<= shift_width;
4369
4370 /* get the current bits from the target bit string */
4371 dest = dest_ctx + (ce_info->lsb / 8);
4372
4373 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4374
4375 dest_byte &= ~mask; /* get the bits not changing */
4376 dest_byte |= src_byte; /* add in the new bits */
4377
4378 /* put it all back */
4379 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4380 }
4381
4382 /**
4383 * ice_write_word - write a word to a packed context structure
4384 * @src_ctx: the context structure to read from
4385 * @dest_ctx: the context to be written to
4386 * @ce_info: a description of the struct to be filled
4387 */
4388 static void
ice_write_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4389 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4390 {
4391 u16 src_word, mask;
4392 __le16 dest_word;
4393 u8 *from, *dest;
4394 u16 shift_width;
4395
4396 /* copy from the next struct field */
4397 from = src_ctx + ce_info->offset;
4398
4399 /* prepare the bits and mask */
4400 shift_width = ce_info->lsb % 8;
4401 mask = BIT(ce_info->width) - 1;
4402
4403 /* don't swizzle the bits until after the mask because the mask bits
4404 * will be in a different bit position on big endian machines
4405 */
4406 src_word = *(u16 *)from;
4407 src_word &= mask;
4408
4409 /* shift to correct alignment */
4410 mask <<= shift_width;
4411 src_word <<= shift_width;
4412
4413 /* get the current bits from the target bit string */
4414 dest = dest_ctx + (ce_info->lsb / 8);
4415
4416 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
4417
4418 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
4419 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
4420
4421 /* put it all back */
4422 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4423 }
4424
4425 /**
4426 * ice_write_dword - write a dword to a packed context structure
4427 * @src_ctx: the context structure to read from
4428 * @dest_ctx: the context to be written to
4429 * @ce_info: a description of the struct to be filled
4430 */
4431 static void
ice_write_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4432 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4433 {
4434 u32 src_dword, mask;
4435 __le32 dest_dword;
4436 u8 *from, *dest;
4437 u16 shift_width;
4438
4439 /* copy from the next struct field */
4440 from = src_ctx + ce_info->offset;
4441
4442 /* prepare the bits and mask */
4443 shift_width = ce_info->lsb % 8;
4444
4445 /* if the field width is exactly 32 on an x86 machine, then the shift
4446 * operation will not work because the SHL instructions count is masked
4447 * to 5 bits so the shift will do nothing
4448 */
4449 if (ce_info->width < 32)
4450 mask = BIT(ce_info->width) - 1;
4451 else
4452 mask = (u32)~0;
4453
4454 /* don't swizzle the bits until after the mask because the mask bits
4455 * will be in a different bit position on big endian machines
4456 */
4457 src_dword = *(u32 *)from;
4458 src_dword &= mask;
4459
4460 /* shift to correct alignment */
4461 mask <<= shift_width;
4462 src_dword <<= shift_width;
4463
4464 /* get the current bits from the target bit string */
4465 dest = dest_ctx + (ce_info->lsb / 8);
4466
4467 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
4468
4469 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
4470 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
4471
4472 /* put it all back */
4473 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4474 }
4475
4476 /**
4477 * ice_write_qword - write a qword to a packed context structure
4478 * @src_ctx: the context structure to read from
4479 * @dest_ctx: the context to be written to
4480 * @ce_info: a description of the struct to be filled
4481 */
4482 static void
ice_write_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4483 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4484 {
4485 u64 src_qword, mask;
4486 __le64 dest_qword;
4487 u8 *from, *dest;
4488 u16 shift_width;
4489
4490 /* copy from the next struct field */
4491 from = src_ctx + ce_info->offset;
4492
4493 /* prepare the bits and mask */
4494 shift_width = ce_info->lsb % 8;
4495
4496 /* if the field width is exactly 64 on an x86 machine, then the shift
4497 * operation will not work because the SHL instructions count is masked
4498 * to 6 bits so the shift will do nothing
4499 */
4500 if (ce_info->width < 64)
4501 mask = BIT_ULL(ce_info->width) - 1;
4502 else
4503 mask = (u64)~0;
4504
4505 /* don't swizzle the bits until after the mask because the mask bits
4506 * will be in a different bit position on big endian machines
4507 */
4508 src_qword = *(u64 *)from;
4509 src_qword &= mask;
4510
4511 /* shift to correct alignment */
4512 mask <<= shift_width;
4513 src_qword <<= shift_width;
4514
4515 /* get the current bits from the target bit string */
4516 dest = dest_ctx + (ce_info->lsb / 8);
4517
4518 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4519
4520 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
4521 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
4522
4523 /* put it all back */
4524 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4525 }
4526
4527 /**
4528 * ice_set_ctx - set context bits in packed structure
4529 * @hw: pointer to the hardware structure
4530 * @src_ctx: pointer to a generic non-packed context structure
4531 * @dest_ctx: pointer to memory for the packed structure
4532 * @ce_info: a description of the structure to be transformed
4533 */
4534 enum ice_status
ice_set_ctx(struct ice_hw * hw,u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4535 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4536 const struct ice_ctx_ele *ce_info)
4537 {
4538 int f;
4539
4540 for (f = 0; ce_info[f].width; f++) {
4541 /* We have to deal with each element of the FW response
4542 * using the correct size so that we are correct regardless
4543 * of the endianness of the machine.
4544 */
4545 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4546 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4547 f, ce_info[f].width, ce_info[f].size_of);
4548 continue;
4549 }
4550 switch (ce_info[f].size_of) {
4551 case sizeof(u8):
4552 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4553 break;
4554 case sizeof(u16):
4555 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4556 break;
4557 case sizeof(u32):
4558 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4559 break;
4560 case sizeof(u64):
4561 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4562 break;
4563 default:
4564 return ICE_ERR_INVAL_SIZE;
4565 }
4566 }
4567
4568 return ICE_SUCCESS;
4569 }
4570
4571 /**
4572 * ice_aq_get_internal_data
4573 * @hw: pointer to the hardware structure
4574 * @cluster_id: specific cluster to dump
4575 * @table_id: table ID within cluster
4576 * @start: index of line in the block to read
4577 * @buf: dump buffer
4578 * @buf_size: dump buffer size
4579 * @ret_buf_size: return buffer size (returned by FW)
4580 * @ret_next_table: next block to read (returned by FW)
4581 * @ret_next_index: next index to read (returned by FW)
4582 * @cd: pointer to command details structure
4583 *
4584 * Get internal FW/HW data (0xFF08) for debug purposes.
4585 */
4586 enum ice_status
ice_aq_get_internal_data(struct ice_hw * hw,u8 cluster_id,u16 table_id,u32 start,void * buf,u16 buf_size,u16 * ret_buf_size,u16 * ret_next_table,u32 * ret_next_index,struct ice_sq_cd * cd)4587 ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
4588 u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
4589 u16 *ret_next_table, u32 *ret_next_index,
4590 struct ice_sq_cd *cd)
4591 {
4592 struct ice_aqc_debug_dump_internals *cmd;
4593 struct ice_aq_desc desc;
4594 enum ice_status status;
4595
4596 cmd = &desc.params.debug_dump;
4597
4598 if (buf_size == 0 || !buf)
4599 return ICE_ERR_PARAM;
4600
4601 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals);
4602
4603 cmd->cluster_id = cluster_id;
4604 cmd->table_id = CPU_TO_LE16(table_id);
4605 cmd->idx = CPU_TO_LE32(start);
4606
4607 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4608
4609 if (!status) {
4610 if (ret_buf_size)
4611 *ret_buf_size = LE16_TO_CPU(desc.datalen);
4612 if (ret_next_table)
4613 *ret_next_table = LE16_TO_CPU(cmd->table_id);
4614 if (ret_next_index)
4615 *ret_next_index = LE32_TO_CPU(cmd->idx);
4616 }
4617
4618 return status;
4619 }
4620
4621 /**
4622 * ice_read_byte - read context byte into struct
4623 * @src_ctx: the context structure to read from
4624 * @dest_ctx: the context to be written to
4625 * @ce_info: a description of the struct to be filled
4626 */
4627 static void
ice_read_byte(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)4628 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4629 {
4630 u8 dest_byte, mask;
4631 u8 *src, *target;
4632 u16 shift_width;
4633
4634 /* prepare the bits and mask */
4635 shift_width = ce_info->lsb % 8;
4636 mask = (u8)(BIT(ce_info->width) - 1);
4637
4638 /* shift to correct alignment */
4639 mask <<= shift_width;
4640
4641 /* get the current bits from the src bit string */
4642 src = src_ctx + (ce_info->lsb / 8);
4643
4644 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4645
4646 dest_byte &= ~(mask);
4647
4648 dest_byte >>= shift_width;
4649
4650 /* get the address from the struct field */
4651 target = dest_ctx + ce_info->offset;
4652
4653 /* put it back in the struct */
4654 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4655 }
4656
4657 /**
4658 * ice_read_word - read context word into struct
4659 * @src_ctx: the context structure to read from
4660 * @dest_ctx: the context to be written to
4661 * @ce_info: a description of the struct to be filled
4662 */
4663 static void
ice_read_word(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)4664 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4665 {
4666 u16 dest_word, mask;
4667 u8 *src, *target;
4668 __le16 src_word;
4669 u16 shift_width;
4670
4671 /* prepare the bits and mask */
4672 shift_width = ce_info->lsb % 8;
4673 mask = BIT(ce_info->width) - 1;
4674
4675 /* shift to correct alignment */
4676 mask <<= shift_width;
4677
4678 /* get the current bits from the src bit string */
4679 src = src_ctx + (ce_info->lsb / 8);
4680
4681 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4682
4683 /* the data in the memory is stored as little endian so mask it
4684 * correctly
4685 */
4686 src_word &= ~(CPU_TO_LE16(mask));
4687
4688 /* get the data back into host order before shifting */
4689 dest_word = LE16_TO_CPU(src_word);
4690
4691 dest_word >>= shift_width;
4692
4693 /* get the address from the struct field */
4694 target = dest_ctx + ce_info->offset;
4695
4696 /* put it back in the struct */
4697 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4698 }
4699
4700 /**
4701 * ice_read_dword - read context dword into struct
4702 * @src_ctx: the context structure to read from
4703 * @dest_ctx: the context to be written to
4704 * @ce_info: a description of the struct to be filled
4705 */
4706 static void
ice_read_dword(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)4707 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4708 {
4709 u32 dest_dword, mask;
4710 __le32 src_dword;
4711 u8 *src, *target;
4712 u16 shift_width;
4713
4714 /* prepare the bits and mask */
4715 shift_width = ce_info->lsb % 8;
4716
4717 /* if the field width is exactly 32 on an x86 machine, then the shift
4718 * operation will not work because the SHL instructions count is masked
4719 * to 5 bits so the shift will do nothing
4720 */
4721 if (ce_info->width < 32)
4722 mask = BIT(ce_info->width) - 1;
4723 else
4724 mask = (u32)~0;
4725
4726 /* shift to correct alignment */
4727 mask <<= shift_width;
4728
4729 /* get the current bits from the src bit string */
4730 src = src_ctx + (ce_info->lsb / 8);
4731
4732 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4733
4734 /* the data in the memory is stored as little endian so mask it
4735 * correctly
4736 */
4737 src_dword &= ~(CPU_TO_LE32(mask));
4738
4739 /* get the data back into host order before shifting */
4740 dest_dword = LE32_TO_CPU(src_dword);
4741
4742 dest_dword >>= shift_width;
4743
4744 /* get the address from the struct field */
4745 target = dest_ctx + ce_info->offset;
4746
4747 /* put it back in the struct */
4748 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4749 }
4750
4751 /**
4752 * ice_read_qword - read context qword into struct
4753 * @src_ctx: the context structure to read from
4754 * @dest_ctx: the context to be written to
4755 * @ce_info: a description of the struct to be filled
4756 */
4757 static void
ice_read_qword(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)4758 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4759 {
4760 u64 dest_qword, mask;
4761 __le64 src_qword;
4762 u8 *src, *target;
4763 u16 shift_width;
4764
4765 /* prepare the bits and mask */
4766 shift_width = ce_info->lsb % 8;
4767
4768 /* if the field width is exactly 64 on an x86 machine, then the shift
4769 * operation will not work because the SHL instructions count is masked
4770 * to 6 bits so the shift will do nothing
4771 */
4772 if (ce_info->width < 64)
4773 mask = BIT_ULL(ce_info->width) - 1;
4774 else
4775 mask = (u64)~0;
4776
4777 /* shift to correct alignment */
4778 mask <<= shift_width;
4779
4780 /* get the current bits from the src bit string */
4781 src = src_ctx + (ce_info->lsb / 8);
4782
4783 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4784
4785 /* the data in the memory is stored as little endian so mask it
4786 * correctly
4787 */
4788 src_qword &= ~(CPU_TO_LE64(mask));
4789
4790 /* get the data back into host order before shifting */
4791 dest_qword = LE64_TO_CPU(src_qword);
4792
4793 dest_qword >>= shift_width;
4794
4795 /* get the address from the struct field */
4796 target = dest_ctx + ce_info->offset;
4797
4798 /* put it back in the struct */
4799 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4800 }
4801
4802 /**
4803 * ice_get_ctx - extract context bits from a packed structure
4804 * @src_ctx: pointer to a generic packed context structure
4805 * @dest_ctx: pointer to a generic non-packed context structure
4806 * @ce_info: a description of the structure to be read from
4807 */
4808 enum ice_status
ice_get_ctx(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)4809 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4810 {
4811 int f;
4812
4813 for (f = 0; ce_info[f].width; f++) {
4814 switch (ce_info[f].size_of) {
4815 case 1:
4816 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4817 break;
4818 case 2:
4819 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4820 break;
4821 case 4:
4822 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4823 break;
4824 case 8:
4825 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4826 break;
4827 default:
4828 /* nothing to do, just keep going */
4829 break;
4830 }
4831 }
4832
4833 return ICE_SUCCESS;
4834 }
4835
4836 /**
4837 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4838 * @hw: pointer to the HW struct
4839 * @vsi_handle: software VSI handle
4840 * @tc: TC number
4841 * @q_handle: software queue handle
4842 */
4843 struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 q_handle)4844 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4845 {
4846 struct ice_vsi_ctx *vsi;
4847 struct ice_q_ctx *q_ctx;
4848
4849 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4850 if (!vsi)
4851 return NULL;
4852 if (q_handle >= vsi->num_lan_q_entries[tc])
4853 return NULL;
4854 if (!vsi->lan_q_ctx[tc])
4855 return NULL;
4856 q_ctx = vsi->lan_q_ctx[tc];
4857 return &q_ctx[q_handle];
4858 }
4859
4860 /**
4861 * ice_ena_vsi_txq
4862 * @pi: port information structure
4863 * @vsi_handle: software VSI handle
4864 * @tc: TC number
4865 * @q_handle: software queue handle
4866 * @num_qgrps: Number of added queue groups
4867 * @buf: list of queue groups to be added
4868 * @buf_size: size of buffer for indirect command
4869 * @cd: pointer to command details structure or NULL
4870 *
4871 * This function adds one LAN queue
4872 */
4873 enum ice_status
ice_ena_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * buf,u16 buf_size,struct ice_sq_cd * cd)4874 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4875 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4876 struct ice_sq_cd *cd)
4877 {
4878 struct ice_aqc_txsched_elem_data node = { 0 };
4879 struct ice_sched_node *parent;
4880 struct ice_q_ctx *q_ctx;
4881 enum ice_status status;
4882 struct ice_hw *hw;
4883
4884 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4885 return ICE_ERR_CFG;
4886
4887 if (num_qgrps > 1 || buf->num_txqs > 1)
4888 return ICE_ERR_MAX_LIMIT;
4889
4890 hw = pi->hw;
4891
4892 if (!ice_is_vsi_valid(hw, vsi_handle))
4893 return ICE_ERR_PARAM;
4894
4895 ice_acquire_lock(&pi->sched_lock);
4896
4897 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4898 if (!q_ctx) {
4899 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4900 q_handle);
4901 status = ICE_ERR_PARAM;
4902 goto ena_txq_exit;
4903 }
4904
4905 /* find a parent node */
4906 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4907 ICE_SCHED_NODE_OWNER_LAN);
4908 if (!parent) {
4909 status = ICE_ERR_PARAM;
4910 goto ena_txq_exit;
4911 }
4912
4913 buf->parent_teid = parent->info.node_teid;
4914 node.parent_teid = parent->info.node_teid;
4915 /* Mark that the values in the "generic" section as valid. The default
4916 * value in the "generic" section is zero. This means that :
4917 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4918 * - 0 priority among siblings, indicated by Bit 1-3.
4919 * - WFQ, indicated by Bit 4.
4920 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4921 * Bit 5-6.
4922 * - Bit 7 is reserved.
4923 * Without setting the generic section as valid in valid_sections, the
4924 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4925 */
4926 buf->txqs[0].info.valid_sections =
4927 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4928 ICE_AQC_ELEM_VALID_EIR;
4929 buf->txqs[0].info.generic = 0;
4930 buf->txqs[0].info.cir_bw.bw_profile_idx =
4931 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4932 buf->txqs[0].info.cir_bw.bw_alloc =
4933 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4934 buf->txqs[0].info.eir_bw.bw_profile_idx =
4935 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4936 buf->txqs[0].info.eir_bw.bw_alloc =
4937 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4938
4939 /* add the LAN queue */
4940 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4941 if (status != ICE_SUCCESS) {
4942 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4943 LE16_TO_CPU(buf->txqs[0].txq_id),
4944 hw->adminq.sq_last_status);
4945 goto ena_txq_exit;
4946 }
4947
4948 node.node_teid = buf->txqs[0].q_teid;
4949 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4950 q_ctx->q_handle = q_handle;
4951 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4952
4953 /* add a leaf node into scheduler tree queue layer */
4954 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4955 if (!status)
4956 status = ice_sched_replay_q_bw(pi, q_ctx);
4957
4958 ena_txq_exit:
4959 ice_release_lock(&pi->sched_lock);
4960 return status;
4961 }
4962
4963 /**
4964 * ice_dis_vsi_txq
4965 * @pi: port information structure
4966 * @vsi_handle: software VSI handle
4967 * @tc: TC number
4968 * @num_queues: number of queues
4969 * @q_handles: pointer to software queue handle array
4970 * @q_ids: pointer to the q_id array
4971 * @q_teids: pointer to queue node teids
4972 * @rst_src: if called due to reset, specifies the reset source
4973 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4974 * @cd: pointer to command details structure or NULL
4975 *
4976 * This function removes queues and their corresponding nodes in SW DB
4977 */
4978 enum ice_status
ice_dis_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 num_queues,u16 * q_handles,u16 * q_ids,u32 * q_teids,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)4979 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4980 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4981 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4982 struct ice_sq_cd *cd)
4983 {
4984 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4985 struct ice_aqc_dis_txq_item *qg_list;
4986 struct ice_q_ctx *q_ctx;
4987 struct ice_hw *hw;
4988 u16 i, buf_size;
4989
4990 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4991 return ICE_ERR_CFG;
4992
4993 hw = pi->hw;
4994
4995 if (!num_queues) {
4996 /* if queue is disabled already yet the disable queue command
4997 * has to be sent to complete the VF reset, then call
4998 * ice_aq_dis_lan_txq without any queue information
4999 */
5000 if (rst_src)
5001 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
5002 vmvf_num, NULL);
5003 return ICE_ERR_CFG;
5004 }
5005
5006 buf_size = ice_struct_size(qg_list, q_id, 1);
5007 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
5008 if (!qg_list)
5009 return ICE_ERR_NO_MEMORY;
5010
5011 ice_acquire_lock(&pi->sched_lock);
5012
5013 for (i = 0; i < num_queues; i++) {
5014 struct ice_sched_node *node;
5015
5016 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
5017 if (!node)
5018 continue;
5019 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
5020 if (!q_ctx) {
5021 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
5022 q_handles[i]);
5023 continue;
5024 }
5025 if (q_ctx->q_handle != q_handles[i]) {
5026 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
5027 q_ctx->q_handle, q_handles[i]);
5028 continue;
5029 }
5030 qg_list->parent_teid = node->info.parent_teid;
5031 qg_list->num_qs = 1;
5032 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
5033 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
5034 vmvf_num, cd);
5035
5036 if (status != ICE_SUCCESS)
5037 break;
5038 ice_free_sched_node(pi, node);
5039 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
5040 }
5041 ice_release_lock(&pi->sched_lock);
5042 ice_free(hw, qg_list);
5043 return status;
5044 }
5045
5046 /**
5047 * ice_cfg_vsi_qs - configure the new/existing VSI queues
5048 * @pi: port information structure
5049 * @vsi_handle: software VSI handle
5050 * @tc_bitmap: TC bitmap
5051 * @maxqs: max queues array per TC
5052 * @owner: LAN or RDMA
5053 *
5054 * This function adds/updates the VSI queues per TC.
5055 */
5056 static enum ice_status
ice_cfg_vsi_qs(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * maxqs,u8 owner)5057 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5058 u16 *maxqs, u8 owner)
5059 {
5060 enum ice_status status = ICE_SUCCESS;
5061 u8 i;
5062
5063 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5064 return ICE_ERR_CFG;
5065
5066 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5067 return ICE_ERR_PARAM;
5068
5069 ice_acquire_lock(&pi->sched_lock);
5070
5071 ice_for_each_traffic_class(i) {
5072 /* configuration is possible only if TC node is present */
5073 if (!ice_sched_get_tc_node(pi, i))
5074 continue;
5075
5076 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
5077 ice_is_tc_ena(tc_bitmap, i));
5078 if (status)
5079 break;
5080 }
5081
5082 ice_release_lock(&pi->sched_lock);
5083 return status;
5084 }
5085
5086 /**
5087 * ice_cfg_vsi_lan - configure VSI LAN queues
5088 * @pi: port information structure
5089 * @vsi_handle: software VSI handle
5090 * @tc_bitmap: TC bitmap
5091 * @max_lanqs: max LAN queues array per TC
5092 *
5093 * This function adds/updates the VSI LAN queues per TC.
5094 */
5095 enum ice_status
ice_cfg_vsi_lan(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * max_lanqs)5096 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5097 u16 *max_lanqs)
5098 {
5099 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
5100 ICE_SCHED_NODE_OWNER_LAN);
5101 }
5102
5103 /**
5104 * ice_is_main_vsi - checks whether the VSI is main VSI
5105 * @hw: pointer to the HW struct
5106 * @vsi_handle: VSI handle
5107 *
5108 * Checks whether the VSI is the main VSI (the first PF VSI created on
5109 * given PF).
5110 */
ice_is_main_vsi(struct ice_hw * hw,u16 vsi_handle)5111 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
5112 {
5113 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
5114 }
5115
5116 /**
5117 * ice_replay_pre_init - replay pre initialization
5118 * @hw: pointer to the HW struct
5119 * @sw: pointer to switch info struct for which function initializes filters
5120 *
5121 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5122 */
5123 enum ice_status
ice_replay_pre_init(struct ice_hw * hw,struct ice_switch_info * sw)5124 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
5125 {
5126 enum ice_status status;
5127 u8 i;
5128
5129 /* Delete old entries from replay filter list head if there is any */
5130 ice_rm_sw_replay_rule_info(hw, sw);
5131 /* In start of replay, move entries into replay_rules list, it
5132 * will allow adding rules entries back to filt_rules list,
5133 * which is operational list.
5134 */
5135 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
5136 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
5137 &sw->recp_list[i].filt_replay_rules);
5138 ice_sched_replay_agg_vsi_preinit(hw);
5139
5140 status = ice_sched_replay_root_node_bw(hw->port_info);
5141 if (status)
5142 return status;
5143
5144 return ice_sched_replay_tc_node_bw(hw->port_info);
5145 }
5146
5147 /**
5148 * ice_replay_vsi - replay VSI configuration
5149 * @hw: pointer to the HW struct
5150 * @vsi_handle: driver VSI handle
5151 *
5152 * Restore all VSI configuration after reset. It is required to call this
5153 * function with main VSI first.
5154 */
ice_replay_vsi(struct ice_hw * hw,u16 vsi_handle)5155 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5156 {
5157 struct ice_switch_info *sw = hw->switch_info;
5158 struct ice_port_info *pi = hw->port_info;
5159 enum ice_status status;
5160
5161 if (!ice_is_vsi_valid(hw, vsi_handle))
5162 return ICE_ERR_PARAM;
5163
5164 /* Replay pre-initialization if there is any */
5165 if (ice_is_main_vsi(hw, vsi_handle)) {
5166 status = ice_replay_pre_init(hw, sw);
5167 if (status)
5168 return status;
5169 }
5170 /* Replay per VSI all RSS configurations */
5171 status = ice_replay_rss_cfg(hw, vsi_handle);
5172 if (status)
5173 return status;
5174 /* Replay per VSI all filters */
5175 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
5176 if (!status)
5177 status = ice_replay_vsi_agg(hw, vsi_handle);
5178 return status;
5179 }
5180
5181 /**
5182 * ice_replay_post - post replay configuration cleanup
5183 * @hw: pointer to the HW struct
5184 *
5185 * Post replay cleanup.
5186 */
ice_replay_post(struct ice_hw * hw)5187 void ice_replay_post(struct ice_hw *hw)
5188 {
5189 /* Delete old entries from replay filter list head */
5190 ice_rm_all_sw_replay_rule_info(hw);
5191 ice_sched_replay_agg(hw);
5192 }
5193
5194 /**
5195 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5196 * @hw: ptr to the hardware info
5197 * @reg: offset of 64 bit HW register to read from
5198 * @prev_stat_loaded: bool to specify if previous stats are loaded
5199 * @prev_stat: ptr to previous loaded stat value
5200 * @cur_stat: ptr to current stat value
5201 */
5202 void
ice_stat_update40(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)5203 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5204 u64 *prev_stat, u64 *cur_stat)
5205 {
5206 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5207
5208 /* device stats are not reset at PFR, they likely will not be zeroed
5209 * when the driver starts. Thus, save the value from the first read
5210 * without adding to the statistic value so that we report stats which
5211 * count up from zero.
5212 */
5213 if (!prev_stat_loaded) {
5214 *prev_stat = new_data;
5215 return;
5216 }
5217
5218 /* Calculate the difference between the new and old values, and then
5219 * add it to the software stat value.
5220 */
5221 if (new_data >= *prev_stat)
5222 *cur_stat += new_data - *prev_stat;
5223 else
5224 /* to manage the potential roll-over */
5225 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5226
5227 /* Update the previously stored value to prepare for next read */
5228 *prev_stat = new_data;
5229 }
5230
5231 /**
5232 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5233 * @hw: ptr to the hardware info
5234 * @reg: offset of HW register to read from
5235 * @prev_stat_loaded: bool to specify if previous stats are loaded
5236 * @prev_stat: ptr to previous loaded stat value
5237 * @cur_stat: ptr to current stat value
5238 */
5239 void
ice_stat_update32(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)5240 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5241 u64 *prev_stat, u64 *cur_stat)
5242 {
5243 u32 new_data;
5244
5245 new_data = rd32(hw, reg);
5246
5247 /* device stats are not reset at PFR, they likely will not be zeroed
5248 * when the driver starts. Thus, save the value from the first read
5249 * without adding to the statistic value so that we report stats which
5250 * count up from zero.
5251 */
5252 if (!prev_stat_loaded) {
5253 *prev_stat = new_data;
5254 return;
5255 }
5256
5257 /* Calculate the difference between the new and old values, and then
5258 * add it to the software stat value.
5259 */
5260 if (new_data >= *prev_stat)
5261 *cur_stat += new_data - *prev_stat;
5262 else
5263 /* to manage the potential roll-over */
5264 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5265
5266 /* Update the previously stored value to prepare for next read */
5267 *prev_stat = new_data;
5268 }
5269
5270 /**
5271 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
5272 * @hw: ptr to the hardware info
5273 * @vsi_handle: VSI handle
5274 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
5275 * @cur_stats: ptr to current stats structure
5276 *
5277 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
5278 * thus cannot be read using the normal ice_stat_update32 function.
5279 *
5280 * Read the GLV_REPC register associated with the given VSI, and update the
5281 * rx_no_desc and rx_error values in the ice_eth_stats structure.
5282 *
5283 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
5284 * cleared each time it's read.
5285 *
5286 * Note that the GLV_RDPC register also counts the causes that would trigger
5287 * GLV_REPC. However, it does not give the finer grained detail about why the
5288 * packets are being dropped. The GLV_REPC values can be used to distinguish
5289 * whether Rx packets are dropped due to errors or due to no available
5290 * descriptors.
5291 */
5292 void
ice_stat_update_repc(struct ice_hw * hw,u16 vsi_handle,bool prev_stat_loaded,struct ice_eth_stats * cur_stats)5293 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
5294 struct ice_eth_stats *cur_stats)
5295 {
5296 u16 vsi_num, no_desc, error_cnt;
5297 u32 repc;
5298
5299 if (!ice_is_vsi_valid(hw, vsi_handle))
5300 return;
5301
5302 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
5303
5304 /* If we haven't loaded stats yet, just clear the current value */
5305 if (!prev_stat_loaded) {
5306 wr32(hw, GLV_REPC(vsi_num), 0);
5307 return;
5308 }
5309
5310 repc = rd32(hw, GLV_REPC(vsi_num));
5311 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
5312 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
5313
5314 /* Clear the count by writing to the stats register */
5315 wr32(hw, GLV_REPC(vsi_num), 0);
5316
5317 cur_stats->rx_no_desc += no_desc;
5318 cur_stats->rx_errors += error_cnt;
5319 }
5320
5321 /**
5322 * ice_aq_alternate_write
5323 * @hw: pointer to the hardware structure
5324 * @reg_addr0: address of first dword to be written
5325 * @reg_val0: value to be written under 'reg_addr0'
5326 * @reg_addr1: address of second dword to be written
5327 * @reg_val1: value to be written under 'reg_addr1'
5328 *
5329 * Write one or two dwords to alternate structure. Fields are indicated
5330 * by 'reg_addr0' and 'reg_addr1' register numbers.
5331 */
5332 enum ice_status
ice_aq_alternate_write(struct ice_hw * hw,u32 reg_addr0,u32 reg_val0,u32 reg_addr1,u32 reg_val1)5333 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
5334 u32 reg_addr1, u32 reg_val1)
5335 {
5336 struct ice_aqc_read_write_alt_direct *cmd;
5337 struct ice_aq_desc desc;
5338 enum ice_status status;
5339
5340 cmd = &desc.params.read_write_alt_direct;
5341
5342 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
5343 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
5344 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
5345 cmd->dword0_value = CPU_TO_LE32(reg_val0);
5346 cmd->dword1_value = CPU_TO_LE32(reg_val1);
5347
5348 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5349
5350 return status;
5351 }
5352
5353 /**
5354 * ice_aq_alternate_read
5355 * @hw: pointer to the hardware structure
5356 * @reg_addr0: address of first dword to be read
5357 * @reg_val0: pointer for data read from 'reg_addr0'
5358 * @reg_addr1: address of second dword to be read
5359 * @reg_val1: pointer for data read from 'reg_addr1'
5360 *
5361 * Read one or two dwords from alternate structure. Fields are indicated
5362 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
5363 * is not passed then only register at 'reg_addr0' is read.
5364 */
5365 enum ice_status
ice_aq_alternate_read(struct ice_hw * hw,u32 reg_addr0,u32 * reg_val0,u32 reg_addr1,u32 * reg_val1)5366 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
5367 u32 reg_addr1, u32 *reg_val1)
5368 {
5369 struct ice_aqc_read_write_alt_direct *cmd;
5370 struct ice_aq_desc desc;
5371 enum ice_status status;
5372
5373 cmd = &desc.params.read_write_alt_direct;
5374
5375 if (!reg_val0)
5376 return ICE_ERR_PARAM;
5377
5378 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
5379 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
5380 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
5381
5382 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5383
5384 if (status == ICE_SUCCESS) {
5385 *reg_val0 = LE32_TO_CPU(cmd->dword0_value);
5386
5387 if (reg_val1)
5388 *reg_val1 = LE32_TO_CPU(cmd->dword1_value);
5389 }
5390
5391 return status;
5392 }
5393
5394 /**
5395 * ice_aq_alternate_write_done
5396 * @hw: pointer to the HW structure.
5397 * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
5398 * @reset_needed: indicates the SW should trigger GLOBAL reset
5399 *
5400 * Indicates to the FW that alternate structures have been changed.
5401 */
5402 enum ice_status
ice_aq_alternate_write_done(struct ice_hw * hw,u8 bios_mode,bool * reset_needed)5403 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
5404 {
5405 struct ice_aqc_done_alt_write *cmd;
5406 struct ice_aq_desc desc;
5407 enum ice_status status;
5408
5409 cmd = &desc.params.done_alt_write;
5410
5411 if (!reset_needed)
5412 return ICE_ERR_PARAM;
5413
5414 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
5415 cmd->flags = bios_mode;
5416
5417 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5418 if (!status)
5419 *reset_needed = (LE16_TO_CPU(cmd->flags) &
5420 ICE_AQC_RESP_RESET_NEEDED) != 0;
5421
5422 return status;
5423 }
5424
5425 /**
5426 * ice_aq_alternate_clear
5427 * @hw: pointer to the HW structure.
5428 *
5429 * Clear the alternate structures of the port from which the function
5430 * is called.
5431 */
ice_aq_alternate_clear(struct ice_hw * hw)5432 enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
5433 {
5434 struct ice_aq_desc desc;
5435 enum ice_status status;
5436
5437 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
5438
5439 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5440
5441 return status;
5442 }
5443
5444 /**
5445 * ice_sched_query_elem - query element information from HW
5446 * @hw: pointer to the HW struct
5447 * @node_teid: node TEID to be queried
5448 * @buf: buffer to element information
5449 *
5450 * This function queries HW element information
5451 */
5452 enum ice_status
ice_sched_query_elem(struct ice_hw * hw,u32 node_teid,struct ice_aqc_txsched_elem_data * buf)5453 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
5454 struct ice_aqc_txsched_elem_data *buf)
5455 {
5456 u16 buf_size, num_elem_ret = 0;
5457 enum ice_status status;
5458
5459 buf_size = sizeof(*buf);
5460 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
5461 buf->node_teid = CPU_TO_LE32(node_teid);
5462 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5463 NULL);
5464 if (status != ICE_SUCCESS || num_elem_ret != 1)
5465 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5466 return status;
5467 }
5468
5469 /**
5470 * ice_get_fw_mode - returns FW mode
5471 * @hw: pointer to the HW struct
5472 */
ice_get_fw_mode(struct ice_hw * hw)5473 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
5474 {
5475 #define ICE_FW_MODE_DBG_M BIT(0)
5476 #define ICE_FW_MODE_REC_M BIT(1)
5477 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
5478 u32 fw_mode;
5479
5480 /* check the current FW mode */
5481 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
5482 if (fw_mode & ICE_FW_MODE_DBG_M)
5483 return ICE_FW_MODE_DBG;
5484 else if (fw_mode & ICE_FW_MODE_REC_M)
5485 return ICE_FW_MODE_REC;
5486 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
5487 return ICE_FW_MODE_ROLLBACK;
5488 else
5489 return ICE_FW_MODE_NORMAL;
5490 }
5491
5492 /**
5493 * ice_cfg_get_cur_lldp_persist_status
5494 * @hw: pointer to the HW struct
5495 * @lldp_status: return value of LLDP persistent status
5496 *
5497 * Get the current status of LLDP persistent
5498 */
5499 enum ice_status
ice_get_cur_lldp_persist_status(struct ice_hw * hw,u32 * lldp_status)5500 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
5501 {
5502 struct ice_port_info *pi = hw->port_info;
5503 enum ice_status ret;
5504 __le32 raw_data;
5505 u32 data, mask;
5506
5507 if (!lldp_status)
5508 return ICE_ERR_BAD_PTR;
5509
5510 ret = ice_acquire_nvm(hw, ICE_RES_READ);
5511 if (ret)
5512 return ret;
5513
5514 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
5515 ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
5516 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
5517 false, true, NULL);
5518 if (!ret) {
5519 data = LE32_TO_CPU(raw_data);
5520 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5521 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5522 data = data & mask;
5523 *lldp_status = data >>
5524 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5525 }
5526
5527 ice_release_nvm(hw);
5528
5529 return ret;
5530 }
5531
5532 /**
5533 * ice_get_dflt_lldp_persist_status
5534 * @hw: pointer to the HW struct
5535 * @lldp_status: return value of LLDP persistent status
5536 *
5537 * Get the default status of LLDP persistent
5538 */
5539 enum ice_status
ice_get_dflt_lldp_persist_status(struct ice_hw * hw,u32 * lldp_status)5540 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
5541 {
5542 struct ice_port_info *pi = hw->port_info;
5543 u32 data, mask, loc_data, loc_data_tmp;
5544 enum ice_status ret;
5545 __le16 loc_raw_data;
5546 __le32 raw_data;
5547
5548 if (!lldp_status)
5549 return ICE_ERR_BAD_PTR;
5550
5551 ret = ice_acquire_nvm(hw, ICE_RES_READ);
5552 if (ret)
5553 return ret;
5554
5555 /* Read the offset of EMP_SR_PTR */
5556 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
5557 ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
5558 ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
5559 &loc_raw_data, false, true, NULL);
5560 if (ret)
5561 goto exit;
5562
5563 loc_data = LE16_TO_CPU(loc_raw_data);
5564 if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
5565 loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
5566 loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
5567 } else {
5568 loc_data *= ICE_AQC_NVM_WORD_UNIT;
5569 }
5570
5571 /* Read the offset of LLDP configuration pointer */
5572 loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
5573 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5574 ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
5575 false, true, NULL);
5576 if (ret)
5577 goto exit;
5578
5579 loc_data_tmp = LE16_TO_CPU(loc_raw_data);
5580 loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
5581 loc_data += loc_data_tmp;
5582
5583 /* We need to skip LLDP configuration section length (2 bytes) */
5584 loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
5585
5586 /* Read the LLDP Default Configure */
5587 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5588 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
5589 true, NULL);
5590 if (!ret) {
5591 data = LE32_TO_CPU(raw_data);
5592 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5593 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5594 data = data & mask;
5595 *lldp_status = data >>
5596 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5597 }
5598
5599 exit:
5600 ice_release_nvm(hw);
5601
5602 return ret;
5603 }
5604
5605 /**
5606 * ice_aq_read_i2c
5607 * @hw: pointer to the hw struct
5608 * @topo_addr: topology address for a device to communicate with
5609 * @bus_addr: 7-bit I2C bus address
5610 * @addr: I2C memory address (I2C offset) with up to 16 bits
5611 * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
5612 * bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
5613 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
5614 * @cd: pointer to command details structure or NULL
5615 *
5616 * Read I2C (0x06E2)
5617 */
5618 enum ice_status
ice_aq_read_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data,struct ice_sq_cd * cd)5619 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5620 u16 bus_addr, __le16 addr, u8 params, u8 *data,
5621 struct ice_sq_cd *cd)
5622 {
5623 struct ice_aq_desc desc = { 0 };
5624 struct ice_aqc_i2c *cmd;
5625 enum ice_status status;
5626 u8 data_size;
5627
5628 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
5629 cmd = &desc.params.read_write_i2c;
5630
5631 if (!data)
5632 return ICE_ERR_PARAM;
5633
5634 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5635
5636 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5637 cmd->topo_addr = topo_addr;
5638 cmd->i2c_params = params;
5639 cmd->i2c_addr = addr;
5640
5641 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5642 if (!status) {
5643 struct ice_aqc_read_i2c_resp *resp;
5644 u8 i;
5645
5646 resp = &desc.params.read_i2c_resp;
5647 for (i = 0; i < data_size; i++) {
5648 *data = resp->i2c_data[i];
5649 data++;
5650 }
5651 }
5652
5653 return status;
5654 }
5655
5656 /**
5657 * ice_aq_write_i2c
5658 * @hw: pointer to the hw struct
5659 * @topo_addr: topology address for a device to communicate with
5660 * @bus_addr: 7-bit I2C bus address
5661 * @addr: I2C memory address (I2C offset) with up to 16 bits
5662 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
5663 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
5664 * @cd: pointer to command details structure or NULL
5665 *
5666 * Write I2C (0x06E3)
5667 */
5668 enum ice_status
ice_aq_write_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data,struct ice_sq_cd * cd)5669 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5670 u16 bus_addr, __le16 addr, u8 params, u8 *data,
5671 struct ice_sq_cd *cd)
5672 {
5673 struct ice_aq_desc desc = { 0 };
5674 struct ice_aqc_i2c *cmd;
5675 u8 i, data_size;
5676
5677 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
5678 cmd = &desc.params.read_write_i2c;
5679
5680 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5681
5682 /* data_size limited to 4 */
5683 if (data_size > 4)
5684 return ICE_ERR_PARAM;
5685
5686 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5687 cmd->topo_addr = topo_addr;
5688 cmd->i2c_params = params;
5689 cmd->i2c_addr = addr;
5690
5691 for (i = 0; i < data_size; i++) {
5692 cmd->i2c_data[i] = *data;
5693 data++;
5694 }
5695
5696 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5697 }
5698
5699 /**
5700 * ice_aq_set_gpio
5701 * @hw: pointer to the hw struct
5702 * @gpio_ctrl_handle: GPIO controller node handle
5703 * @pin_idx: IO Number of the GPIO that needs to be set
5704 * @value: SW provide IO value to set in the LSB
5705 * @cd: pointer to command details structure or NULL
5706 *
5707 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
5708 */
5709 enum ice_status
ice_aq_set_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool value,struct ice_sq_cd * cd)5710 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
5711 struct ice_sq_cd *cd)
5712 {
5713 struct ice_aqc_gpio *cmd;
5714 struct ice_aq_desc desc;
5715
5716 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
5717 cmd = &desc.params.read_write_gpio;
5718 cmd->gpio_ctrl_handle = gpio_ctrl_handle;
5719 cmd->gpio_num = pin_idx;
5720 cmd->gpio_val = value ? 1 : 0;
5721
5722 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5723 }
5724
5725 /**
5726 * ice_aq_get_gpio
5727 * @hw: pointer to the hw struct
5728 * @gpio_ctrl_handle: GPIO controller node handle
5729 * @pin_idx: IO Number of the GPIO that needs to be set
5730 * @value: IO value read
5731 * @cd: pointer to command details structure or NULL
5732 *
5733 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
5734 * the topology
5735 */
5736 enum ice_status
ice_aq_get_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool * value,struct ice_sq_cd * cd)5737 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
5738 bool *value, struct ice_sq_cd *cd)
5739 {
5740 struct ice_aqc_gpio *cmd;
5741 struct ice_aq_desc desc;
5742 enum ice_status status;
5743
5744 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
5745 cmd = &desc.params.read_write_gpio;
5746 cmd->gpio_ctrl_handle = gpio_ctrl_handle;
5747 cmd->gpio_num = pin_idx;
5748
5749 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5750 if (status)
5751 return status;
5752
5753 *value = !!cmd->gpio_val;
5754 return ICE_SUCCESS;
5755 }
5756
5757 /**
5758 * ice_fw_supports_link_override
5759 * @hw: pointer to the hardware structure
5760 *
5761 * Checks if the firmware supports link override
5762 */
ice_fw_supports_link_override(struct ice_hw * hw)5763 bool ice_fw_supports_link_override(struct ice_hw *hw)
5764 {
5765 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
5766 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
5767 return true;
5768 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
5769 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
5770 return true;
5771 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5772 return true;
5773 }
5774
5775 return false;
5776 }
5777
5778 /**
5779 * ice_get_link_default_override
5780 * @ldo: pointer to the link default override struct
5781 * @pi: pointer to the port info struct
5782 *
5783 * Gets the link default override for a port
5784 */
5785 enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv * ldo,struct ice_port_info * pi)5786 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5787 struct ice_port_info *pi)
5788 {
5789 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5790 struct ice_hw *hw = pi->hw;
5791 enum ice_status status;
5792
5793 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5794 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5795 if (status) {
5796 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5797 return status;
5798 }
5799
5800 /* Each port has its own config; calculate for our port */
5801 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5802 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5803
5804 /* link options first */
5805 status = ice_read_sr_word(hw, tlv_start, &buf);
5806 if (status) {
5807 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5808 return status;
5809 }
5810 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5811 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5812 ICE_LINK_OVERRIDE_PHY_CFG_S;
5813
5814 /* link PHY config */
5815 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5816 status = ice_read_sr_word(hw, offset, &buf);
5817 if (status) {
5818 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5819 return status;
5820 }
5821 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5822
5823 /* PHY types low */
5824 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5825 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5826 status = ice_read_sr_word(hw, (offset + i), &buf);
5827 if (status) {
5828 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5829 return status;
5830 }
5831 /* shift 16 bits at a time to fill 64 bits */
5832 ldo->phy_type_low |= ((u64)buf << (i * 16));
5833 }
5834
5835 /* PHY types high */
5836 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5837 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5838 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5839 status = ice_read_sr_word(hw, (offset + i), &buf);
5840 if (status) {
5841 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5842 return status;
5843 }
5844 /* shift 16 bits at a time to fill 64 bits */
5845 ldo->phy_type_high |= ((u64)buf << (i * 16));
5846 }
5847
5848 return status;
5849 }
5850
5851 /**
5852 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5853 * @caps: get PHY capability data
5854 */
ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data * caps)5855 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5856 {
5857 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5858 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5859 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5860 ICE_AQC_PHY_AN_EN_CLAUSE37))
5861 return true;
5862
5863 return false;
5864 }
5865
5866 /**
5867 * ice_is_fw_health_report_supported
5868 * @hw: pointer to the hardware structure
5869 *
5870 * Return true if firmware supports health status reports,
5871 * false otherwise
5872 */
ice_is_fw_health_report_supported(struct ice_hw * hw)5873 bool ice_is_fw_health_report_supported(struct ice_hw *hw)
5874 {
5875 if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ)
5876 return true;
5877
5878 if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) {
5879 if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN)
5880 return true;
5881 if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN &&
5882 hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH)
5883 return true;
5884 }
5885
5886 return false;
5887 }
5888
5889 /**
5890 * ice_aq_set_health_status_config - Configure FW health events
5891 * @hw: pointer to the HW struct
5892 * @event_source: type of diagnostic events to enable
5893 * @cd: pointer to command details structure or NULL
5894 *
5895 * Configure the health status event types that the firmware will send to this
5896 * PF. The supported event types are: PF-specific, all PFs, and global
5897 */
5898 enum ice_status
ice_aq_set_health_status_config(struct ice_hw * hw,u8 event_source,struct ice_sq_cd * cd)5899 ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
5900 struct ice_sq_cd *cd)
5901 {
5902 struct ice_aqc_set_health_status_config *cmd;
5903 struct ice_aq_desc desc;
5904
5905 cmd = &desc.params.set_health_status_config;
5906
5907 ice_fill_dflt_direct_cmd_desc(&desc,
5908 ice_aqc_opc_set_health_status_config);
5909
5910 cmd->event_source = event_source;
5911
5912 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5913 }
5914
5915 /**
5916 * ice_aq_get_port_options
5917 * @hw: pointer to the hw struct
5918 * @options: buffer for the resultant port options
5919 * @option_count: input - size of the buffer in port options structures,
5920 * output - number of returned port options
5921 * @lport: logical port to call the command with (optional)
5922 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
5923 * when PF owns more than 1 port it must be true
5924 * @active_option_idx: index of active port option in returned buffer
5925 * @active_option_valid: active option in returned buffer is valid
5926 *
5927 * Calls Get Port Options AQC (0x06ea) and verifies result.
5928 */
5929 enum ice_status
ice_aq_get_port_options(struct ice_hw * hw,struct ice_aqc_get_port_options_elem * options,u8 * option_count,u8 lport,bool lport_valid,u8 * active_option_idx,bool * active_option_valid)5930 ice_aq_get_port_options(struct ice_hw *hw,
5931 struct ice_aqc_get_port_options_elem *options,
5932 u8 *option_count, u8 lport, bool lport_valid,
5933 u8 *active_option_idx, bool *active_option_valid)
5934 {
5935 struct ice_aqc_get_port_options *cmd;
5936 struct ice_aq_desc desc;
5937 enum ice_status status;
5938 u8 pmd_count;
5939 u8 max_speed;
5940 u8 i;
5941
5942 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5943
5944 /* options buffer shall be able to hold max returned options */
5945 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
5946 return ICE_ERR_PARAM;
5947
5948 cmd = &desc.params.get_port_options;
5949 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
5950
5951 if (lport_valid)
5952 cmd->lport_num = lport;
5953 cmd->lport_num_valid = lport_valid;
5954
5955 status = ice_aq_send_cmd(hw, &desc, options,
5956 *option_count * sizeof(*options), NULL);
5957 if (status != ICE_SUCCESS)
5958 return status;
5959
5960 /* verify direct FW response & set output parameters */
5961 *option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M;
5962 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
5963 *active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID;
5964 if (*active_option_valid) {
5965 *active_option_idx = cmd->port_options &
5966 ICE_AQC_PORT_OPT_ACTIVE_M;
5967 if (*active_option_idx > (*option_count - 1))
5968 return ICE_ERR_OUT_OF_RANGE;
5969 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
5970 *active_option_idx);
5971 }
5972
5973 /* verify indirect FW response & mask output options fields */
5974 for (i = 0; i < *option_count; i++) {
5975 options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M;
5976 options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M;
5977 pmd_count = options[i].pmd;
5978 max_speed = options[i].max_lane_speed;
5979 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
5980 pmd_count, max_speed);
5981
5982 /* check only entries containing valid max pmd speed values,
5983 * other reserved values may be returned, when logical port
5984 * used is unrelated to specific option
5985 */
5986 if (max_speed <= ICE_AQC_PORT_OPT_MAX_LANE_100G) {
5987 if (pmd_count > ICE_MAX_PORT_PER_PCI_DEV)
5988 return ICE_ERR_OUT_OF_RANGE;
5989 if (pmd_count > 2 &&
5990 max_speed > ICE_AQC_PORT_OPT_MAX_LANE_25G)
5991 return ICE_ERR_CFG;
5992 if (pmd_count > 7 &&
5993 max_speed > ICE_AQC_PORT_OPT_MAX_LANE_10G)
5994 return ICE_ERR_CFG;
5995 }
5996 }
5997
5998 return ICE_SUCCESS;
5999 }
6000
6001 /**
6002 * ice_aq_set_lldp_mib - Set the LLDP MIB
6003 * @hw: pointer to the HW struct
6004 * @mib_type: Local, Remote or both Local and Remote MIBs
6005 * @buf: pointer to the caller-supplied buffer to store the MIB block
6006 * @buf_size: size of the buffer (in bytes)
6007 * @cd: pointer to command details structure or NULL
6008 *
6009 * Set the LLDP MIB. (0x0A08)
6010 */
6011 enum ice_status
ice_aq_set_lldp_mib(struct ice_hw * hw,u8 mib_type,void * buf,u16 buf_size,struct ice_sq_cd * cd)6012 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
6013 struct ice_sq_cd *cd)
6014 {
6015 struct ice_aqc_lldp_set_local_mib *cmd;
6016 struct ice_aq_desc desc;
6017
6018 cmd = &desc.params.lldp_set_mib;
6019
6020 if (buf_size == 0 || !buf)
6021 return ICE_ERR_PARAM;
6022
6023 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
6024
6025 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
6026 desc.datalen = CPU_TO_LE16(buf_size);
6027
6028 cmd->type = mib_type;
6029 cmd->length = CPU_TO_LE16(buf_size);
6030
6031 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
6032 }
6033
6034 /**
6035 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
6036 * @hw: pointer to HW struct
6037 */
ice_fw_supports_lldp_fltr_ctrl(struct ice_hw * hw)6038 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
6039 {
6040 if (hw->mac_type != ICE_MAC_E810)
6041 return false;
6042
6043 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
6044 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
6045 return true;
6046 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
6047 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
6048 return true;
6049 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
6050 return true;
6051 }
6052 return false;
6053 }
6054
6055 /**
6056 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
6057 * @hw: pointer to HW struct
6058 * @vsi_num: absolute HW index for VSI
6059 * @add: boolean for if adding or removing a filter
6060 */
6061 enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw * hw,u16 vsi_num,bool add)6062 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
6063 {
6064 struct ice_aqc_lldp_filter_ctrl *cmd;
6065 struct ice_aq_desc desc;
6066
6067 cmd = &desc.params.lldp_filter_ctrl;
6068
6069 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
6070
6071 if (add)
6072 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
6073 else
6074 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
6075
6076 cmd->vsi_num = CPU_TO_LE16(vsi_num);
6077
6078 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6079 }
6080
6081 /**
6082 * ice_fw_supports_report_dflt_cfg
6083 * @hw: pointer to the hardware structure
6084 *
6085 * Checks if the firmware supports report default configuration
6086 */
ice_fw_supports_report_dflt_cfg(struct ice_hw * hw)6087 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
6088 {
6089 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
6090 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
6091 return true;
6092 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
6093 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
6094 return true;
6095 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
6096 return true;
6097 }
6098 return false;
6099 }
6100
6101 /**
6102 * ice_is_fw_auto_drop_supported
6103 * @hw: pointer to the hardware structure
6104 *
6105 * Checks if the firmware supports auto drop feature
6106 */
ice_is_fw_auto_drop_supported(struct ice_hw * hw)6107 bool ice_is_fw_auto_drop_supported(struct ice_hw *hw)
6108 {
6109 if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ &&
6110 hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN)
6111 return true;
6112 return false;
6113 }
6114