xref: /dpdk/drivers/net/ice/base/ice_common.c (revision 6fd3889c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4 
5 #include "ice_common.h"
6 #include "ice_sched.h"
7 #include "ice_adminq_cmd.h"
8 
9 #include "ice_flow.h"
10 #include "ice_switch.h"
11 
12 #define ICE_PF_RESET_WAIT_COUNT	300
13 
14 /**
15  * dump_phy_type - helper function that prints PHY type strings
16  * @hw: pointer to the HW structure
17  * @phy: 64 bit PHY type to decipher
18  * @i: bit index within phy
19  * @phy_string: string corresponding to bit i in phy
20  * @prefix: prefix string to differentiate multiple dumps
21  */
22 static void
dump_phy_type(struct ice_hw * hw,u64 phy,u8 i,const char * phy_string,const char * prefix)23 dump_phy_type(struct ice_hw *hw, u64 phy, u8 i, const char *phy_string,
24 	      const char *prefix)
25 {
26 	if (phy & BIT_ULL(i))
27 		ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", prefix, i,
28 			  phy_string);
29 }
30 
31 /**
32  * ice_dump_phy_type_low - helper function to dump phy_type_low
33  * @hw: pointer to the HW structure
34  * @low: 64 bit value for phy_type_low
35  * @prefix: prefix string to differentiate multiple dumps
36  */
37 static void
ice_dump_phy_type_low(struct ice_hw * hw,u64 low,const char * prefix)38 ice_dump_phy_type_low(struct ice_hw *hw, u64 low, const char *prefix)
39 {
40 	ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
41 		  (unsigned long long)low);
42 
43 	dump_phy_type(hw, low, 0, "100BASE_TX", prefix);
44 	dump_phy_type(hw, low, 1, "100M_SGMII", prefix);
45 	dump_phy_type(hw, low, 2, "1000BASE_T", prefix);
46 	dump_phy_type(hw, low, 3, "1000BASE_SX", prefix);
47 	dump_phy_type(hw, low, 4, "1000BASE_LX", prefix);
48 	dump_phy_type(hw, low, 5, "1000BASE_KX", prefix);
49 	dump_phy_type(hw, low, 6, "1G_SGMII", prefix);
50 	dump_phy_type(hw, low, 7, "2500BASE_T", prefix);
51 	dump_phy_type(hw, low, 8, "2500BASE_X", prefix);
52 	dump_phy_type(hw, low, 9, "2500BASE_KX", prefix);
53 	dump_phy_type(hw, low, 10, "5GBASE_T", prefix);
54 	dump_phy_type(hw, low, 11, "5GBASE_KR", prefix);
55 	dump_phy_type(hw, low, 12, "10GBASE_T", prefix);
56 	dump_phy_type(hw, low, 13, "10G_SFI_DA", prefix);
57 	dump_phy_type(hw, low, 14, "10GBASE_SR", prefix);
58 	dump_phy_type(hw, low, 15, "10GBASE_LR", prefix);
59 	dump_phy_type(hw, low, 16, "10GBASE_KR_CR1", prefix);
60 	dump_phy_type(hw, low, 17, "10G_SFI_AOC_ACC", prefix);
61 	dump_phy_type(hw, low, 18, "10G_SFI_C2C", prefix);
62 	dump_phy_type(hw, low, 19, "25GBASE_T", prefix);
63 	dump_phy_type(hw, low, 20, "25GBASE_CR", prefix);
64 	dump_phy_type(hw, low, 21, "25GBASE_CR_S", prefix);
65 	dump_phy_type(hw, low, 22, "25GBASE_CR1", prefix);
66 	dump_phy_type(hw, low, 23, "25GBASE_SR", prefix);
67 	dump_phy_type(hw, low, 24, "25GBASE_LR", prefix);
68 	dump_phy_type(hw, low, 25, "25GBASE_KR", prefix);
69 	dump_phy_type(hw, low, 26, "25GBASE_KR_S", prefix);
70 	dump_phy_type(hw, low, 27, "25GBASE_KR1", prefix);
71 	dump_phy_type(hw, low, 28, "25G_AUI_AOC_ACC", prefix);
72 	dump_phy_type(hw, low, 29, "25G_AUI_C2C", prefix);
73 	dump_phy_type(hw, low, 30, "40GBASE_CR4", prefix);
74 	dump_phy_type(hw, low, 31, "40GBASE_SR4", prefix);
75 	dump_phy_type(hw, low, 32, "40GBASE_LR4", prefix);
76 	dump_phy_type(hw, low, 33, "40GBASE_KR4", prefix);
77 	dump_phy_type(hw, low, 34, "40G_XLAUI_AOC_ACC", prefix);
78 	dump_phy_type(hw, low, 35, "40G_XLAUI", prefix);
79 	dump_phy_type(hw, low, 36, "50GBASE_CR2", prefix);
80 	dump_phy_type(hw, low, 37, "50GBASE_SR2", prefix);
81 	dump_phy_type(hw, low, 38, "50GBASE_LR2", prefix);
82 	dump_phy_type(hw, low, 39, "50GBASE_KR2", prefix);
83 	dump_phy_type(hw, low, 40, "50G_LAUI2_AOC_ACC", prefix);
84 	dump_phy_type(hw, low, 41, "50G_LAUI2", prefix);
85 	dump_phy_type(hw, low, 42, "50G_AUI2_AOC_ACC", prefix);
86 	dump_phy_type(hw, low, 43, "50G_AUI2", prefix);
87 	dump_phy_type(hw, low, 44, "50GBASE_CP", prefix);
88 	dump_phy_type(hw, low, 45, "50GBASE_SR", prefix);
89 	dump_phy_type(hw, low, 46, "50GBASE_FR", prefix);
90 	dump_phy_type(hw, low, 47, "50GBASE_LR", prefix);
91 	dump_phy_type(hw, low, 48, "50GBASE_KR_PAM4", prefix);
92 	dump_phy_type(hw, low, 49, "50G_AUI1_AOC_ACC", prefix);
93 	dump_phy_type(hw, low, 50, "50G_AUI1", prefix);
94 	dump_phy_type(hw, low, 51, "100GBASE_CR4", prefix);
95 	dump_phy_type(hw, low, 52, "100GBASE_SR4", prefix);
96 	dump_phy_type(hw, low, 53, "100GBASE_LR4", prefix);
97 	dump_phy_type(hw, low, 54, "100GBASE_KR4", prefix);
98 	dump_phy_type(hw, low, 55, "100G_CAUI4_AOC_ACC", prefix);
99 	dump_phy_type(hw, low, 56, "100G_CAUI4", prefix);
100 	dump_phy_type(hw, low, 57, "100G_AUI4_AOC_ACC", prefix);
101 	dump_phy_type(hw, low, 58, "100G_AUI4", prefix);
102 	dump_phy_type(hw, low, 59, "100GBASE_CR_PAM4", prefix);
103 	dump_phy_type(hw, low, 60, "100GBASE_KR_PAM4", prefix);
104 	dump_phy_type(hw, low, 61, "100GBASE_CP2", prefix);
105 	dump_phy_type(hw, low, 62, "100GBASE_SR2", prefix);
106 	dump_phy_type(hw, low, 63, "100GBASE_DR", prefix);
107 }
108 
109 /**
110  * ice_dump_phy_type_high - helper function to dump phy_type_high
111  * @hw: pointer to the HW structure
112  * @high: 64 bit value for phy_type_high
113  * @prefix: prefix string to differentiate multiple dumps
114  */
115 static void
ice_dump_phy_type_high(struct ice_hw * hw,u64 high,const char * prefix)116 ice_dump_phy_type_high(struct ice_hw *hw, u64 high, const char *prefix)
117 {
118 	ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
119 		  (unsigned long long)high);
120 
121 	dump_phy_type(hw, high, 0, "100GBASE_KR2_PAM4", prefix);
122 	dump_phy_type(hw, high, 1, "100G_CAUI2_AOC_ACC", prefix);
123 	dump_phy_type(hw, high, 2, "100G_CAUI2", prefix);
124 	dump_phy_type(hw, high, 3, "100G_AUI2_AOC_ACC", prefix);
125 	dump_phy_type(hw, high, 4, "100G_AUI2", prefix);
126 }
127 
128 /**
129  * ice_set_mac_type - Sets MAC type
130  * @hw: pointer to the HW structure
131  *
132  * This function sets the MAC type of the adapter based on the
133  * vendor ID and device ID stored in the HW structure.
134  */
ice_set_mac_type(struct ice_hw * hw)135 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
136 {
137 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
138 
139 	if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
140 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
141 
142 	switch (hw->device_id) {
143 	case ICE_DEV_ID_E810C_BACKPLANE:
144 	case ICE_DEV_ID_E810C_QSFP:
145 	case ICE_DEV_ID_E810C_SFP:
146 	case ICE_DEV_ID_E810_XXV_BACKPLANE:
147 	case ICE_DEV_ID_E810_XXV_QSFP:
148 	case ICE_DEV_ID_E810_XXV_SFP:
149 		hw->mac_type = ICE_MAC_E810;
150 		break;
151 	case ICE_DEV_ID_E822C_10G_BASE_T:
152 	case ICE_DEV_ID_E822C_BACKPLANE:
153 	case ICE_DEV_ID_E822C_QSFP:
154 	case ICE_DEV_ID_E822C_SFP:
155 	case ICE_DEV_ID_E822C_SGMII:
156 	case ICE_DEV_ID_E822L_10G_BASE_T:
157 	case ICE_DEV_ID_E822L_BACKPLANE:
158 	case ICE_DEV_ID_E822L_SFP:
159 	case ICE_DEV_ID_E822L_SGMII:
160 	case ICE_DEV_ID_E823L_10G_BASE_T:
161 	case ICE_DEV_ID_E823L_1GBE:
162 	case ICE_DEV_ID_E823L_BACKPLANE:
163 	case ICE_DEV_ID_E823L_QSFP:
164 	case ICE_DEV_ID_E823L_SFP:
165 	case ICE_DEV_ID_E823C_10G_BASE_T:
166 	case ICE_DEV_ID_E823C_BACKPLANE:
167 	case ICE_DEV_ID_E823C_QSFP:
168 	case ICE_DEV_ID_E823C_SFP:
169 	case ICE_DEV_ID_E823C_SGMII:
170 	case ICE_DEV_ID_E824S:
171 	case ICE_DEV_ID_E825C_BACKPLANE:
172 	case ICE_DEV_ID_E825C_QSFP:
173 	case ICE_DEV_ID_E825C_SFP:
174 	case ICE_DEV_ID_E825C_1GBE:
175 	case ICE_DEV_ID_E825X:
176 		hw->mac_type = ICE_MAC_GENERIC;
177 		break;
178 	default:
179 		hw->mac_type = ICE_MAC_UNKNOWN;
180 		break;
181 	}
182 
183 	ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
184 	return ICE_SUCCESS;
185 }
186 
187 /**
188  * ice_is_generic_mac
189  * @hw: pointer to the hardware structure
190  *
191  * returns true if mac_type is ICE_MAC_GENERIC, false if not
192  */
ice_is_generic_mac(struct ice_hw * hw)193 bool ice_is_generic_mac(struct ice_hw *hw)
194 {
195 	return hw->mac_type == ICE_MAC_GENERIC;
196 }
197 
198 /**
199  * ice_is_e810
200  * @hw: pointer to the hardware structure
201  *
202  * returns true if the device is E810 based, false if not.
203  */
ice_is_e810(struct ice_hw * hw)204 bool ice_is_e810(struct ice_hw *hw)
205 {
206 	return hw->mac_type == ICE_MAC_E810;
207 }
208 
209 /**
210  * ice_is_e810t
211  * @hw: pointer to the hardware structure
212  *
213  * returns true if the device is E810T based, false if not.
214  */
ice_is_e810t(struct ice_hw * hw)215 bool ice_is_e810t(struct ice_hw *hw)
216 {
217 	switch (hw->device_id) {
218 	case ICE_DEV_ID_E810C_SFP:
219 		if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
220 		    hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
221 			return true;
222 		break;
223 	case ICE_DEV_ID_E810C_QSFP:
224 		if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
225 			return true;
226 		break;
227 	default:
228 		break;
229 	}
230 
231 	return false;
232 }
233 
234 /**
235  * ice_clear_pf_cfg - Clear PF configuration
236  * @hw: pointer to the hardware structure
237  *
238  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
239  * configuration, flow director filters, etc.).
240  */
ice_clear_pf_cfg(struct ice_hw * hw)241 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
242 {
243 	struct ice_aq_desc desc;
244 
245 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
246 
247 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
248 }
249 
250 /**
251  * ice_aq_manage_mac_read - manage MAC address read command
252  * @hw: pointer to the HW struct
253  * @buf: a virtual buffer to hold the manage MAC read response
254  * @buf_size: Size of the virtual buffer
255  * @cd: pointer to command details structure or NULL
256  *
257  * This function is used to return per PF station MAC address (0x0107).
258  * NOTE: Upon successful completion of this command, MAC address information
259  * is returned in user specified buffer. Please interpret user specified
260  * buffer as "manage_mac_read" response.
261  * Response such as various MAC addresses are stored in HW struct (port.mac)
262  * ice_discover_dev_caps is expected to be called before this function is
263  * called.
264  */
265 static enum ice_status
ice_aq_manage_mac_read(struct ice_hw * hw,void * buf,u16 buf_size,struct ice_sq_cd * cd)266 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
267 		       struct ice_sq_cd *cd)
268 {
269 	struct ice_aqc_manage_mac_read_resp *resp;
270 	struct ice_aqc_manage_mac_read *cmd;
271 	struct ice_aq_desc desc;
272 	enum ice_status status;
273 	u16 flags;
274 	u8 i;
275 
276 	cmd = &desc.params.mac_read;
277 
278 	if (buf_size < sizeof(*resp))
279 		return ICE_ERR_BUF_TOO_SHORT;
280 
281 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
282 
283 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
284 	if (status)
285 		return status;
286 
287 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
288 	flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
289 
290 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
291 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
292 		return ICE_ERR_CFG;
293 	}
294 
295 	/* A single port can report up to two (LAN and WoL) addresses */
296 	for (i = 0; i < cmd->num_addr; i++)
297 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
298 			ice_memcpy(hw->port_info->mac.lan_addr,
299 				   resp[i].mac_addr, ETH_ALEN,
300 				   ICE_DMA_TO_NONDMA);
301 			ice_memcpy(hw->port_info->mac.perm_addr,
302 				   resp[i].mac_addr,
303 				   ETH_ALEN, ICE_DMA_TO_NONDMA);
304 			break;
305 		}
306 	return ICE_SUCCESS;
307 }
308 
309 /**
310  * ice_aq_get_phy_caps - returns PHY capabilities
311  * @pi: port information structure
312  * @qual_mods: report qualified modules
313  * @report_mode: report mode capabilities
314  * @pcaps: structure for PHY capabilities to be filled
315  * @cd: pointer to command details structure or NULL
316  *
317  * Returns the various PHY capabilities supported on the Port (0x0600)
318  */
319 enum ice_status
ice_aq_get_phy_caps(struct ice_port_info * pi,bool qual_mods,u8 report_mode,struct ice_aqc_get_phy_caps_data * pcaps,struct ice_sq_cd * cd)320 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
321 		    struct ice_aqc_get_phy_caps_data *pcaps,
322 		    struct ice_sq_cd *cd)
323 {
324 	struct ice_aqc_get_phy_caps *cmd;
325 	u16 pcaps_size = sizeof(*pcaps);
326 	struct ice_aq_desc desc;
327 	enum ice_status status;
328 	const char *prefix;
329 	struct ice_hw *hw;
330 
331 	cmd = &desc.params.get_phy;
332 
333 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
334 		return ICE_ERR_PARAM;
335 	hw = pi->hw;
336 
337 	if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
338 	    !ice_fw_supports_report_dflt_cfg(hw))
339 		return ICE_ERR_PARAM;
340 
341 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
342 
343 	if (qual_mods)
344 		cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
345 
346 	cmd->param0 |= CPU_TO_LE16(report_mode);
347 	status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
348 
349 	ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
350 
351 	if (report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA)
352 		prefix = "phy_caps_media";
353 	else if (report_mode == ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA)
354 		prefix = "phy_caps_no_media";
355 	else if (report_mode == ICE_AQC_REPORT_ACTIVE_CFG)
356 		prefix = "phy_caps_active";
357 	else if (report_mode == ICE_AQC_REPORT_DFLT_CFG)
358 		prefix = "phy_caps_default";
359 	else
360 		prefix = "phy_caps_invalid";
361 
362 	ice_dump_phy_type_low(hw, LE64_TO_CPU(pcaps->phy_type_low), prefix);
363 	ice_dump_phy_type_high(hw, LE64_TO_CPU(pcaps->phy_type_high), prefix);
364 
365 	ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
366 		  prefix, report_mode);
367 	ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
368 	ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
369 		  pcaps->low_power_ctrl_an);
370 	ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
371 		  pcaps->eee_cap);
372 	ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
373 		  pcaps->eeer_value);
374 	ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
375 		  pcaps->link_fec_options);
376 	ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
377 		  prefix, pcaps->module_compliance_enforcement);
378 	ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
379 		  prefix, pcaps->extended_compliance_code);
380 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
381 		  pcaps->module_type[0]);
382 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
383 		  pcaps->module_type[1]);
384 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
385 		  pcaps->module_type[2]);
386 
387 	if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
388 		pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
389 		pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
390 		ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
391 			   sizeof(pi->phy.link_info.module_type),
392 			   ICE_NONDMA_TO_NONDMA);
393 	}
394 
395 	return status;
396 }
397 
398 /**
399  * ice_aq_get_link_topo_handle - get link topology node return status
400  * @pi: port information structure
401  * @node_type: requested node type
402  * @cd: pointer to command details structure or NULL
403  *
404  * Get link topology node return status for specified node type (0x06E0)
405  *
406  * Node type cage can be used to determine if cage is present. If AQC
407  * returns error (ENOENT), then no cage present. If no cage present, then
408  * connection type is backplane or BASE-T.
409  */
410 static enum ice_status
ice_aq_get_link_topo_handle(struct ice_port_info * pi,u8 node_type,struct ice_sq_cd * cd)411 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
412 			    struct ice_sq_cd *cd)
413 {
414 	struct ice_aqc_get_link_topo *cmd;
415 	struct ice_aq_desc desc;
416 
417 	cmd = &desc.params.get_link_topo;
418 
419 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
420 
421 	cmd->addr.topo_params.node_type_ctx =
422 		(ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
423 		 ICE_AQC_LINK_TOPO_NODE_CTX_S);
424 
425 	/* set node type */
426 	cmd->addr.topo_params.node_type_ctx |=
427 		(ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
428 
429 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
430 }
431 
432 /**
433  * ice_is_media_cage_present
434  * @pi: port information structure
435  *
436  * Returns true if media cage is present, else false. If no cage, then
437  * media type is backplane or BASE-T.
438  */
ice_is_media_cage_present(struct ice_port_info * pi)439 static bool ice_is_media_cage_present(struct ice_port_info *pi)
440 {
441 	/* Node type cage can be used to determine if cage is present. If AQC
442 	 * returns error (ENOENT), then no cage present. If no cage present then
443 	 * connection type is backplane or BASE-T.
444 	 */
445 	return !ice_aq_get_link_topo_handle(pi,
446 					    ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
447 					    NULL);
448 }
449 
450 /**
451  * ice_get_media_type - Gets media type
452  * @pi: port information structure
453  */
ice_get_media_type(struct ice_port_info * pi)454 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
455 {
456 	struct ice_link_status *hw_link_info;
457 
458 	if (!pi)
459 		return ICE_MEDIA_UNKNOWN;
460 
461 	hw_link_info = &pi->phy.link_info;
462 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
463 		/* If more than one media type is selected, report unknown */
464 		return ICE_MEDIA_UNKNOWN;
465 
466 	if (hw_link_info->phy_type_low) {
467 		/* 1G SGMII is a special case where some DA cable PHYs
468 		 * may show this as an option when it really shouldn't
469 		 * be since SGMII is meant to be between a MAC and a PHY
470 		 * in a backplane. Try to detect this case and handle it
471 		 */
472 		if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
473 		    (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
474 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
475 		    hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
476 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
477 			return ICE_MEDIA_DA;
478 
479 		switch (hw_link_info->phy_type_low) {
480 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
481 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
482 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
483 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
484 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
485 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
486 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
487 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
488 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
489 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
490 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
491 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
492 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
493 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
494 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
495 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
496 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
497 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
498 			return ICE_MEDIA_FIBER;
499 		case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
500 		case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
501 		case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
502 		case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
503 		case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
504 		case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
505 		case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
506 		case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
507 			return ICE_MEDIA_FIBER;
508 		case ICE_PHY_TYPE_LOW_100BASE_TX:
509 		case ICE_PHY_TYPE_LOW_1000BASE_T:
510 		case ICE_PHY_TYPE_LOW_2500BASE_T:
511 		case ICE_PHY_TYPE_LOW_5GBASE_T:
512 		case ICE_PHY_TYPE_LOW_10GBASE_T:
513 		case ICE_PHY_TYPE_LOW_25GBASE_T:
514 			return ICE_MEDIA_BASET;
515 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
516 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
517 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
518 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
519 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
520 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
521 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
522 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
523 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
524 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
525 			return ICE_MEDIA_DA;
526 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
527 		case ICE_PHY_TYPE_LOW_40G_XLAUI:
528 		case ICE_PHY_TYPE_LOW_50G_LAUI2:
529 		case ICE_PHY_TYPE_LOW_50G_AUI2:
530 		case ICE_PHY_TYPE_LOW_50G_AUI1:
531 		case ICE_PHY_TYPE_LOW_100G_AUI4:
532 		case ICE_PHY_TYPE_LOW_100G_CAUI4:
533 			if (ice_is_media_cage_present(pi))
534 				return ICE_MEDIA_AUI;
535 			/* fall-through */
536 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
537 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
538 		case ICE_PHY_TYPE_LOW_2500BASE_X:
539 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
540 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
541 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
542 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
543 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
544 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
545 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
546 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
547 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
548 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
549 			return ICE_MEDIA_BACKPLANE;
550 		}
551 	} else {
552 		switch (hw_link_info->phy_type_high) {
553 		case ICE_PHY_TYPE_HIGH_100G_AUI2:
554 		case ICE_PHY_TYPE_HIGH_100G_CAUI2:
555 			if (ice_is_media_cage_present(pi))
556 				return ICE_MEDIA_AUI;
557 			/* fall-through */
558 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
559 			return ICE_MEDIA_BACKPLANE;
560 		case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
561 		case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
562 			return ICE_MEDIA_FIBER;
563 		}
564 	}
565 	return ICE_MEDIA_UNKNOWN;
566 }
567 
568 /**
569  * ice_aq_get_link_info
570  * @pi: port information structure
571  * @ena_lse: enable/disable LinkStatusEvent reporting
572  * @link: pointer to link status structure - optional
573  * @cd: pointer to command details structure or NULL
574  *
575  * Get Link Status (0x607). Returns the link status of the adapter.
576  */
577 enum ice_status
ice_aq_get_link_info(struct ice_port_info * pi,bool ena_lse,struct ice_link_status * link,struct ice_sq_cd * cd)578 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
579 		     struct ice_link_status *link, struct ice_sq_cd *cd)
580 {
581 	struct ice_aqc_get_link_status_data link_data = { 0 };
582 	struct ice_aqc_get_link_status *resp;
583 	struct ice_link_status *li_old, *li;
584 	enum ice_media_type *hw_media_type;
585 	struct ice_fc_info *hw_fc_info;
586 	bool tx_pause, rx_pause;
587 	struct ice_aq_desc desc;
588 	enum ice_status status;
589 	struct ice_hw *hw;
590 	u16 cmd_flags;
591 
592 	if (!pi)
593 		return ICE_ERR_PARAM;
594 	hw = pi->hw;
595 	li_old = &pi->phy.link_info_old;
596 	hw_media_type = &pi->phy.media_type;
597 	li = &pi->phy.link_info;
598 	hw_fc_info = &pi->fc;
599 
600 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
601 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
602 	resp = &desc.params.get_link_status;
603 	resp->cmd_flags = CPU_TO_LE16(cmd_flags);
604 	resp->lport_num = pi->lport;
605 
606 	status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
607 
608 	if (status != ICE_SUCCESS)
609 		return status;
610 
611 	/* save off old link status information */
612 	*li_old = *li;
613 
614 	/* update current link status information */
615 	li->link_speed = LE16_TO_CPU(link_data.link_speed);
616 	li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
617 	li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
618 	*hw_media_type = ice_get_media_type(pi);
619 	li->link_info = link_data.link_info;
620 	li->link_cfg_err = link_data.link_cfg_err;
621 	li->an_info = link_data.an_info;
622 	li->ext_info = link_data.ext_info;
623 	li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
624 	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
625 	li->topo_media_conflict = link_data.topo_media_conflict;
626 	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
627 				      ICE_AQ_CFG_PACING_TYPE_M);
628 
629 	/* update fc info */
630 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
631 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
632 	if (tx_pause && rx_pause)
633 		hw_fc_info->current_mode = ICE_FC_FULL;
634 	else if (tx_pause)
635 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
636 	else if (rx_pause)
637 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
638 	else
639 		hw_fc_info->current_mode = ICE_FC_NONE;
640 
641 	li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
642 
643 	ice_debug(hw, ICE_DBG_LINK, "get link info\n");
644 	ice_debug(hw, ICE_DBG_LINK, "	link_speed = 0x%x\n", li->link_speed);
645 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
646 		  (unsigned long long)li->phy_type_low);
647 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
648 		  (unsigned long long)li->phy_type_high);
649 	ice_debug(hw, ICE_DBG_LINK, "	media_type = 0x%x\n", *hw_media_type);
650 	ice_debug(hw, ICE_DBG_LINK, "	link_info = 0x%x\n", li->link_info);
651 	ice_debug(hw, ICE_DBG_LINK, "	link_cfg_err = 0x%x\n", li->link_cfg_err);
652 	ice_debug(hw, ICE_DBG_LINK, "	an_info = 0x%x\n", li->an_info);
653 	ice_debug(hw, ICE_DBG_LINK, "	ext_info = 0x%x\n", li->ext_info);
654 	ice_debug(hw, ICE_DBG_LINK, "	fec_info = 0x%x\n", li->fec_info);
655 	ice_debug(hw, ICE_DBG_LINK, "	lse_ena = 0x%x\n", li->lse_ena);
656 	ice_debug(hw, ICE_DBG_LINK, "	max_frame = 0x%x\n",
657 		  li->max_frame_size);
658 	ice_debug(hw, ICE_DBG_LINK, "	pacing = 0x%x\n", li->pacing);
659 
660 	/* save link status information */
661 	if (link)
662 		*link = *li;
663 
664 	/* flag cleared so calling functions don't call AQ again */
665 	pi->phy.get_link_info = false;
666 
667 	return ICE_SUCCESS;
668 }
669 
670 /**
671  * ice_fill_tx_timer_and_fc_thresh
672  * @hw: pointer to the HW struct
673  * @cmd: pointer to MAC cfg structure
674  *
675  * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
676  * descriptor
677  */
678 static void
ice_fill_tx_timer_and_fc_thresh(struct ice_hw * hw,struct ice_aqc_set_mac_cfg * cmd)679 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
680 				struct ice_aqc_set_mac_cfg *cmd)
681 {
682 	u16 fc_thres_val, tx_timer_val;
683 	u32 val;
684 
685 	/* We read back the transmit timer and fc threshold value of
686 	 * LFC. Thus, we will use index =
687 	 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
688 	 *
689 	 * Also, because we are opearating on transmit timer and fc
690 	 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
691 	 */
692 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
693 
694 	/* Retrieve the transmit timer */
695 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
696 	tx_timer_val = val &
697 		PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
698 	cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
699 
700 	/* Retrieve the fc threshold */
701 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
702 	fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
703 
704 	cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
705 }
706 
707 /**
708  * ice_aq_set_mac_cfg
709  * @hw: pointer to the HW struct
710  * @max_frame_size: Maximum Frame Size to be supported
711  * @cd: pointer to command details structure or NULL
712  *
713  * Set MAC configuration (0x0603)
714  */
715 enum ice_status
ice_aq_set_mac_cfg(struct ice_hw * hw,u16 max_frame_size,struct ice_sq_cd * cd)716 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
717 {
718 	struct ice_aqc_set_mac_cfg *cmd;
719 	struct ice_aq_desc desc;
720 
721 	cmd = &desc.params.set_mac_cfg;
722 
723 	if (max_frame_size == 0)
724 		return ICE_ERR_PARAM;
725 
726 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
727 
728 	cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
729 
730 	ice_fill_tx_timer_and_fc_thresh(hw, cmd);
731 
732 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
733 }
734 
735 /**
736  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
737  * @hw: pointer to the HW struct
738  */
ice_init_fltr_mgmt_struct(struct ice_hw * hw)739 enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
740 {
741 	struct ice_switch_info *sw;
742 	enum ice_status status;
743 
744 	hw->switch_info = (struct ice_switch_info *)
745 			  ice_malloc(hw, sizeof(*hw->switch_info));
746 
747 	sw = hw->switch_info;
748 
749 	if (!sw)
750 		return ICE_ERR_NO_MEMORY;
751 
752 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
753 	sw->prof_res_bm_init = 0;
754 
755 	status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
756 	if (status) {
757 		ice_free(hw, hw->switch_info);
758 		return status;
759 	}
760 	return ICE_SUCCESS;
761 }
762 
763 /**
764  * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
765  * @hw: pointer to the HW struct
766  * @sw: pointer to switch info struct for which function clears filters
767  */
768 static void
ice_cleanup_fltr_mgmt_single(struct ice_hw * hw,struct ice_switch_info * sw)769 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
770 {
771 	struct ice_vsi_list_map_info *v_pos_map;
772 	struct ice_vsi_list_map_info *v_tmp_map;
773 	struct ice_sw_recipe *recps;
774 	u8 i;
775 
776 	if (!sw)
777 		return;
778 
779 	LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
780 				 ice_vsi_list_map_info, list_entry) {
781 		LIST_DEL(&v_pos_map->list_entry);
782 		ice_free(hw, v_pos_map);
783 	}
784 	recps = sw->recp_list;
785 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
786 		struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
787 
788 		recps[i].root_rid = i;
789 		LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
790 					 &recps[i].rg_list, ice_recp_grp_entry,
791 					 l_entry) {
792 			LIST_DEL(&rg_entry->l_entry);
793 			ice_free(hw, rg_entry);
794 		}
795 
796 		if (recps[i].adv_rule) {
797 			struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
798 			struct ice_adv_fltr_mgmt_list_entry *lst_itr;
799 
800 			ice_destroy_lock(&recps[i].filt_rule_lock);
801 			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
802 						 &recps[i].filt_rules,
803 						 ice_adv_fltr_mgmt_list_entry,
804 						 list_entry) {
805 				LIST_DEL(&lst_itr->list_entry);
806 				ice_free(hw, lst_itr->lkups);
807 				ice_free(hw, lst_itr);
808 			}
809 		} else {
810 			struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
811 
812 			ice_destroy_lock(&recps[i].filt_rule_lock);
813 			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
814 						 &recps[i].filt_rules,
815 						 ice_fltr_mgmt_list_entry,
816 						 list_entry) {
817 				LIST_DEL(&lst_itr->list_entry);
818 				ice_free(hw, lst_itr);
819 			}
820 		}
821 		if (recps[i].root_buf)
822 			ice_free(hw, recps[i].root_buf);
823 	}
824 	ice_rm_sw_replay_rule_info(hw, sw);
825 	ice_free(hw, sw->recp_list);
826 	ice_free(hw, sw);
827 }
828 
829 /**
830  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
831  * @hw: pointer to the HW struct
832  */
ice_cleanup_fltr_mgmt_struct(struct ice_hw * hw)833 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
834 {
835 	ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
836 }
837 
838 /**
839  * ice_get_itr_intrl_gran
840  * @hw: pointer to the HW struct
841  *
842  * Determines the ITR/INTRL granularities based on the maximum aggregate
843  * bandwidth according to the device's configuration during power-on.
844  */
ice_get_itr_intrl_gran(struct ice_hw * hw)845 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
846 {
847 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
848 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
849 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
850 
851 	switch (max_agg_bw) {
852 	case ICE_MAX_AGG_BW_200G:
853 	case ICE_MAX_AGG_BW_100G:
854 	case ICE_MAX_AGG_BW_50G:
855 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
856 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
857 		break;
858 	case ICE_MAX_AGG_BW_25G:
859 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
860 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
861 		break;
862 	}
863 }
864 
865 /**
866  * ice_print_rollback_msg - print FW rollback message
867  * @hw: pointer to the hardware structure
868  */
ice_print_rollback_msg(struct ice_hw * hw)869 void ice_print_rollback_msg(struct ice_hw *hw)
870 {
871 	char nvm_str[ICE_NVM_VER_LEN] = { 0 };
872 	struct ice_orom_info *orom;
873 	struct ice_nvm_info *nvm;
874 
875 	orom = &hw->flash.orom;
876 	nvm = &hw->flash.nvm;
877 
878 	SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
879 		 nvm->major, nvm->minor, nvm->eetrack, orom->major,
880 		 orom->build, orom->patch);
881 	ice_warn(hw,
882 		 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
883 		 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
884 }
885 
886 /**
887  * ice_set_umac_shared
888  * @hw: pointer to the hw struct
889  *
890  * Set boolean flag to allow unicast MAC sharing
891  */
ice_set_umac_shared(struct ice_hw * hw)892 void ice_set_umac_shared(struct ice_hw *hw)
893 {
894 	hw->umac_shared = true;
895 }
896 
897 /**
898  * ice_init_hw - main hardware initialization routine
899  * @hw: pointer to the hardware structure
900  */
ice_init_hw(struct ice_hw * hw)901 enum ice_status ice_init_hw(struct ice_hw *hw)
902 {
903 	struct ice_aqc_get_phy_caps_data *pcaps;
904 	enum ice_status status;
905 	u16 mac_buf_len;
906 	void *mac_buf;
907 
908 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
909 
910 	/* Set MAC type based on DeviceID */
911 	status = ice_set_mac_type(hw);
912 	if (status)
913 		return status;
914 
915 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
916 			 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
917 		PF_FUNC_RID_FUNCTION_NUMBER_S;
918 
919 	status = ice_reset(hw, ICE_RESET_PFR);
920 	if (status)
921 		return status;
922 
923 	ice_get_itr_intrl_gran(hw);
924 
925 	status = ice_create_all_ctrlq(hw);
926 	if (status)
927 		goto err_unroll_cqinit;
928 
929 	status = ice_init_nvm(hw);
930 	if (status)
931 		goto err_unroll_cqinit;
932 
933 	if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
934 		ice_print_rollback_msg(hw);
935 
936 	status = ice_clear_pf_cfg(hw);
937 	if (status)
938 		goto err_unroll_cqinit;
939 
940 	/* Set bit to enable Flow Director filters */
941 	wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
942 	INIT_LIST_HEAD(&hw->fdir_list_head);
943 
944 	ice_clear_pxe_mode(hw);
945 
946 	status = ice_get_caps(hw);
947 	if (status)
948 		goto err_unroll_cqinit;
949 
950 	hw->port_info = (struct ice_port_info *)
951 			ice_malloc(hw, sizeof(*hw->port_info));
952 	if (!hw->port_info) {
953 		status = ICE_ERR_NO_MEMORY;
954 		goto err_unroll_cqinit;
955 	}
956 
957 	/* set the back pointer to HW */
958 	hw->port_info->hw = hw;
959 
960 	/* Initialize port_info struct with switch configuration data */
961 	status = ice_get_initial_sw_cfg(hw);
962 	if (status)
963 		goto err_unroll_alloc;
964 
965 	hw->evb_veb = true;
966 	/* Query the allocated resources for Tx scheduler */
967 	status = ice_sched_query_res_alloc(hw);
968 	if (status) {
969 		ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
970 		goto err_unroll_alloc;
971 	}
972 	ice_sched_get_psm_clk_freq(hw);
973 
974 	/* Initialize port_info struct with scheduler data */
975 	status = ice_sched_init_port(hw->port_info);
976 	if (status)
977 		goto err_unroll_sched;
978 	pcaps = (struct ice_aqc_get_phy_caps_data *)
979 		ice_malloc(hw, sizeof(*pcaps));
980 	if (!pcaps) {
981 		status = ICE_ERR_NO_MEMORY;
982 		goto err_unroll_sched;
983 	}
984 
985 	/* Initialize port_info struct with PHY capabilities */
986 	status = ice_aq_get_phy_caps(hw->port_info, false,
987 				     ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
988 	ice_free(hw, pcaps);
989 	if (status)
990 		ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
991 			 status);
992 
993 	/* Initialize port_info struct with link information */
994 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
995 	if (status)
996 		goto err_unroll_sched;
997 	/* need a valid SW entry point to build a Tx tree */
998 	if (!hw->sw_entry_point_layer) {
999 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1000 		status = ICE_ERR_CFG;
1001 		goto err_unroll_sched;
1002 	}
1003 	INIT_LIST_HEAD(&hw->agg_list);
1004 	/* Initialize max burst size */
1005 	if (!hw->max_burst_size)
1006 		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1007 	status = ice_init_fltr_mgmt_struct(hw);
1008 	if (status)
1009 		goto err_unroll_sched;
1010 
1011 	/* Get MAC information */
1012 	/* A single port can report up to two (LAN and WoL) addresses */
1013 	mac_buf = ice_calloc(hw, 2,
1014 			     sizeof(struct ice_aqc_manage_mac_read_resp));
1015 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1016 
1017 	if (!mac_buf) {
1018 		status = ICE_ERR_NO_MEMORY;
1019 		goto err_unroll_fltr_mgmt_struct;
1020 	}
1021 
1022 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1023 	ice_free(hw, mac_buf);
1024 
1025 	if (status)
1026 		goto err_unroll_fltr_mgmt_struct;
1027 
1028 	/* enable jumbo frame support at MAC level */
1029 	status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
1030 	if (status)
1031 		goto err_unroll_fltr_mgmt_struct;
1032 
1033 	/* Obtain counter base index which would be used by flow director */
1034 	status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
1035 	if (status)
1036 		goto err_unroll_fltr_mgmt_struct;
1037 	status = ice_init_hw_tbls(hw);
1038 	if (status)
1039 		goto err_unroll_fltr_mgmt_struct;
1040 	ice_init_lock(&hw->tnl_lock);
1041 
1042 	return ICE_SUCCESS;
1043 
1044 err_unroll_fltr_mgmt_struct:
1045 	ice_cleanup_fltr_mgmt_struct(hw);
1046 err_unroll_sched:
1047 	ice_sched_cleanup_all(hw);
1048 err_unroll_alloc:
1049 	ice_free(hw, hw->port_info);
1050 	hw->port_info = NULL;
1051 err_unroll_cqinit:
1052 	ice_destroy_all_ctrlq(hw);
1053 	return status;
1054 }
1055 
1056 /**
1057  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1058  * @hw: pointer to the hardware structure
1059  *
1060  * This should be called only during nominal operation, not as a result of
1061  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1062  * applicable initializations if it fails for any reason.
1063  */
ice_deinit_hw(struct ice_hw * hw)1064 void ice_deinit_hw(struct ice_hw *hw)
1065 {
1066 	ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1067 	ice_cleanup_fltr_mgmt_struct(hw);
1068 
1069 	ice_sched_cleanup_all(hw);
1070 	ice_sched_clear_agg(hw);
1071 	ice_free_seg(hw);
1072 	ice_free_hw_tbls(hw);
1073 	ice_destroy_lock(&hw->tnl_lock);
1074 
1075 	if (hw->port_info) {
1076 		ice_free(hw, hw->port_info);
1077 		hw->port_info = NULL;
1078 	}
1079 
1080 	ice_destroy_all_ctrlq(hw);
1081 
1082 	/* Clear VSI contexts if not already cleared */
1083 	ice_clear_all_vsi_ctx(hw);
1084 }
1085 
1086 /**
1087  * ice_check_reset - Check to see if a global reset is complete
1088  * @hw: pointer to the hardware structure
1089  */
ice_check_reset(struct ice_hw * hw)1090 enum ice_status ice_check_reset(struct ice_hw *hw)
1091 {
1092 	u32 cnt, reg = 0, grst_timeout, uld_mask;
1093 
1094 	/* Poll for Device Active state in case a recent CORER, GLOBR,
1095 	 * or EMPR has occurred. The grst delay value is in 100ms units.
1096 	 * Add 1sec for outstanding AQ commands that can take a long time.
1097 	 */
1098 	grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1099 			GLGEN_RSTCTL_GRSTDEL_S) + 10;
1100 
1101 	for (cnt = 0; cnt < grst_timeout; cnt++) {
1102 		ice_msec_delay(100, true);
1103 		reg = rd32(hw, GLGEN_RSTAT);
1104 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1105 			break;
1106 	}
1107 
1108 	if (cnt == grst_timeout) {
1109 		ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1110 		return ICE_ERR_RESET_FAILED;
1111 	}
1112 
1113 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
1114 				 GLNVM_ULD_PCIER_DONE_1_M |\
1115 				 GLNVM_ULD_CORER_DONE_M |\
1116 				 GLNVM_ULD_GLOBR_DONE_M |\
1117 				 GLNVM_ULD_POR_DONE_M |\
1118 				 GLNVM_ULD_POR_DONE_1_M |\
1119 				 GLNVM_ULD_PCIER_DONE_2_M)
1120 
1121 	uld_mask = ICE_RESET_DONE_MASK;
1122 
1123 	/* Device is Active; check Global Reset processes are done */
1124 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1125 		reg = rd32(hw, GLNVM_ULD) & uld_mask;
1126 		if (reg == uld_mask) {
1127 			ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1128 			break;
1129 		}
1130 		ice_msec_delay(10, true);
1131 	}
1132 
1133 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1134 		ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1135 			  reg);
1136 		return ICE_ERR_RESET_FAILED;
1137 	}
1138 
1139 	return ICE_SUCCESS;
1140 }
1141 
1142 /**
1143  * ice_pf_reset - Reset the PF
1144  * @hw: pointer to the hardware structure
1145  *
1146  * If a global reset has been triggered, this function checks
1147  * for its completion and then issues the PF reset
1148  */
ice_pf_reset(struct ice_hw * hw)1149 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1150 {
1151 	u32 cnt, reg;
1152 
1153 	/* If at function entry a global reset was already in progress, i.e.
1154 	 * state is not 'device active' or any of the reset done bits are not
1155 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1156 	 * global reset is done.
1157 	 */
1158 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1159 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1160 		/* poll on global reset currently in progress until done */
1161 		if (ice_check_reset(hw))
1162 			return ICE_ERR_RESET_FAILED;
1163 
1164 		return ICE_SUCCESS;
1165 	}
1166 
1167 	/* Reset the PF */
1168 	reg = rd32(hw, PFGEN_CTRL);
1169 
1170 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1171 
1172 	/* Wait for the PFR to complete. The wait time is the global config lock
1173 	 * timeout plus the PFR timeout which will account for a possible reset
1174 	 * that is occurring during a download package operation.
1175 	 */
1176 	for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1177 	     ICE_PF_RESET_WAIT_COUNT; cnt++) {
1178 		reg = rd32(hw, PFGEN_CTRL);
1179 		if (!(reg & PFGEN_CTRL_PFSWR_M))
1180 			break;
1181 
1182 		ice_msec_delay(1, true);
1183 	}
1184 
1185 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1186 		ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1187 		return ICE_ERR_RESET_FAILED;
1188 	}
1189 
1190 	return ICE_SUCCESS;
1191 }
1192 
1193 /**
1194  * ice_reset - Perform different types of reset
1195  * @hw: pointer to the hardware structure
1196  * @req: reset request
1197  *
1198  * This function triggers a reset as specified by the req parameter.
1199  *
1200  * Note:
1201  * If anything other than a PF reset is triggered, PXE mode is restored.
1202  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1203  * interface has been restored in the rebuild flow.
1204  */
ice_reset(struct ice_hw * hw,enum ice_reset_req req)1205 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1206 {
1207 	u32 val = 0;
1208 
1209 	switch (req) {
1210 	case ICE_RESET_PFR:
1211 		return ice_pf_reset(hw);
1212 	case ICE_RESET_CORER:
1213 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1214 		val = GLGEN_RTRIG_CORER_M;
1215 		break;
1216 	case ICE_RESET_GLOBR:
1217 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1218 		val = GLGEN_RTRIG_GLOBR_M;
1219 		break;
1220 	default:
1221 		return ICE_ERR_PARAM;
1222 	}
1223 
1224 	val |= rd32(hw, GLGEN_RTRIG);
1225 	wr32(hw, GLGEN_RTRIG, val);
1226 	ice_flush(hw);
1227 
1228 	/* wait for the FW to be ready */
1229 	return ice_check_reset(hw);
1230 }
1231 
1232 /**
1233  * ice_copy_rxq_ctx_to_hw
1234  * @hw: pointer to the hardware structure
1235  * @ice_rxq_ctx: pointer to the rxq context
1236  * @rxq_index: the index of the Rx queue
1237  *
1238  * Copies rxq context from dense structure to HW register space
1239  */
1240 static enum ice_status
ice_copy_rxq_ctx_to_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1241 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1242 {
1243 	u8 i;
1244 
1245 	if (!ice_rxq_ctx)
1246 		return ICE_ERR_BAD_PTR;
1247 
1248 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1249 		return ICE_ERR_PARAM;
1250 
1251 	/* Copy each dword separately to HW */
1252 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1253 		wr32(hw, QRX_CONTEXT(i, rxq_index),
1254 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1255 
1256 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1257 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1258 	}
1259 
1260 	return ICE_SUCCESS;
1261 }
1262 
1263 /* LAN Rx Queue Context */
1264 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1265 	/* Field		Width	LSB */
1266 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1267 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1268 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1269 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1270 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1271 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1272 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1273 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1274 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1275 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1276 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1277 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1278 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1279 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1280 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1281 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1282 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1283 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1284 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1285 	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1286 	{ 0 }
1287 };
1288 
1289 /**
1290  * ice_write_rxq_ctx
1291  * @hw: pointer to the hardware structure
1292  * @rlan_ctx: pointer to the rxq context
1293  * @rxq_index: the index of the Rx queue
1294  *
1295  * Converts rxq context from sparse to dense structure and then writes
1296  * it to HW register space and enables the hardware to prefetch descriptors
1297  * instead of only fetching them on demand
1298  */
1299 enum ice_status
ice_write_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1300 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1301 		  u32 rxq_index)
1302 {
1303 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1304 
1305 	if (!rlan_ctx)
1306 		return ICE_ERR_BAD_PTR;
1307 
1308 	rlan_ctx->prefena = 1;
1309 
1310 	ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1311 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1312 }
1313 
1314 /**
1315  * ice_clear_rxq_ctx
1316  * @hw: pointer to the hardware structure
1317  * @rxq_index: the index of the Rx queue to clear
1318  *
1319  * Clears rxq context in HW register space
1320  */
ice_clear_rxq_ctx(struct ice_hw * hw,u32 rxq_index)1321 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1322 {
1323 	u8 i;
1324 
1325 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1326 		return ICE_ERR_PARAM;
1327 
1328 	/* Clear each dword register separately */
1329 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1330 		wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1331 
1332 	return ICE_SUCCESS;
1333 }
1334 
1335 /* LAN Tx Queue Context */
1336 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1337 				    /* Field			Width	LSB */
1338 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1339 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1340 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1341 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1342 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1343 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1344 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1345 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1346 	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1347 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1348 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1349 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1350 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1351 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1352 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1353 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1354 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1355 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1356 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1357 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1358 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1359 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1360 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1361 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1362 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1363 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1364 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1365 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1366 	ICE_CTX_STORE(ice_tlan_ctx, gsc_ena,			1,	172),
1367 	{ 0 }
1368 };
1369 
1370 /**
1371  * ice_copy_tx_cmpltnq_ctx_to_hw
1372  * @hw: pointer to the hardware structure
1373  * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1374  * @tx_cmpltnq_index: the index of the completion queue
1375  *
1376  * Copies Tx completion queue context from dense structure to HW register space
1377  */
1378 static enum ice_status
ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw * hw,u8 * ice_tx_cmpltnq_ctx,u32 tx_cmpltnq_index)1379 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1380 			      u32 tx_cmpltnq_index)
1381 {
1382 	u8 i;
1383 
1384 	if (!ice_tx_cmpltnq_ctx)
1385 		return ICE_ERR_BAD_PTR;
1386 
1387 	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1388 		return ICE_ERR_PARAM;
1389 
1390 	/* Copy each dword separately to HW */
1391 	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1392 		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1393 		     *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1394 
1395 		ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1396 			  *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1397 	}
1398 
1399 	return ICE_SUCCESS;
1400 }
1401 
1402 /* LAN Tx Completion Queue Context */
1403 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1404 				       /* Field			Width   LSB */
1405 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base,			57,	0),
1406 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len,		18,	64),
1407 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation,		1,	96),
1408 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr,		22,	97),
1409 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num,		3,	128),
1410 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num,		10,	131),
1411 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type,		2,	141),
1412 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr,		1,	160),
1413 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid,		8,	161),
1414 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache,		512,	192),
1415 	{ 0 }
1416 };
1417 
1418 /**
1419  * ice_write_tx_cmpltnq_ctx
1420  * @hw: pointer to the hardware structure
1421  * @tx_cmpltnq_ctx: pointer to the completion queue context
1422  * @tx_cmpltnq_index: the index of the completion queue
1423  *
1424  * Converts completion queue context from sparse to dense structure and then
1425  * writes it to HW register space
1426  */
1427 enum ice_status
ice_write_tx_cmpltnq_ctx(struct ice_hw * hw,struct ice_tx_cmpltnq_ctx * tx_cmpltnq_ctx,u32 tx_cmpltnq_index)1428 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1429 			 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1430 			 u32 tx_cmpltnq_index)
1431 {
1432 	u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1433 
1434 	ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1435 	return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1436 }
1437 
1438 /**
1439  * ice_clear_tx_cmpltnq_ctx
1440  * @hw: pointer to the hardware structure
1441  * @tx_cmpltnq_index: the index of the completion queue to clear
1442  *
1443  * Clears Tx completion queue context in HW register space
1444  */
1445 enum ice_status
ice_clear_tx_cmpltnq_ctx(struct ice_hw * hw,u32 tx_cmpltnq_index)1446 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1447 {
1448 	u8 i;
1449 
1450 	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1451 		return ICE_ERR_PARAM;
1452 
1453 	/* Clear each dword register separately */
1454 	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1455 		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1456 
1457 	return ICE_SUCCESS;
1458 }
1459 
1460 /**
1461  * ice_copy_tx_drbell_q_ctx_to_hw
1462  * @hw: pointer to the hardware structure
1463  * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1464  * @tx_drbell_q_index: the index of the doorbell queue
1465  *
1466  * Copies doorbell queue context from dense structure to HW register space
1467  */
1468 static enum ice_status
ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw * hw,u8 * ice_tx_drbell_q_ctx,u32 tx_drbell_q_index)1469 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1470 			       u32 tx_drbell_q_index)
1471 {
1472 	u8 i;
1473 
1474 	if (!ice_tx_drbell_q_ctx)
1475 		return ICE_ERR_BAD_PTR;
1476 
1477 	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1478 		return ICE_ERR_PARAM;
1479 
1480 	/* Copy each dword separately to HW */
1481 	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1482 		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1483 		     *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1484 
1485 		ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1486 			  *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1487 	}
1488 
1489 	return ICE_SUCCESS;
1490 }
1491 
1492 /* LAN Tx Doorbell Queue Context info */
1493 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1494 					/* Field		Width   LSB */
1495 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, base,		57,	0),
1496 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len,		13,	64),
1497 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num,		3,	80),
1498 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num,		8,	84),
1499 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type,		2,	94),
1500 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid,		8,	96),
1501 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd,		1,	104),
1502 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr,		1,	108),
1503 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en,		1,	112),
1504 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head,		13,	128),
1505 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail,		13,	144),
1506 	{ 0 }
1507 };
1508 
1509 /**
1510  * ice_write_tx_drbell_q_ctx
1511  * @hw: pointer to the hardware structure
1512  * @tx_drbell_q_ctx: pointer to the doorbell queue context
1513  * @tx_drbell_q_index: the index of the doorbell queue
1514  *
1515  * Converts doorbell queue context from sparse to dense structure and then
1516  * writes it to HW register space
1517  */
1518 enum ice_status
ice_write_tx_drbell_q_ctx(struct ice_hw * hw,struct ice_tx_drbell_q_ctx * tx_drbell_q_ctx,u32 tx_drbell_q_index)1519 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1520 			  struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1521 			  u32 tx_drbell_q_index)
1522 {
1523 	u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1524 
1525 	ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1526 		    ice_tx_drbell_q_ctx_info);
1527 	return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1528 }
1529 
1530 /**
1531  * ice_clear_tx_drbell_q_ctx
1532  * @hw: pointer to the hardware structure
1533  * @tx_drbell_q_index: the index of the doorbell queue to clear
1534  *
1535  * Clears doorbell queue context in HW register space
1536  */
1537 enum ice_status
ice_clear_tx_drbell_q_ctx(struct ice_hw * hw,u32 tx_drbell_q_index)1538 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1539 {
1540 	u8 i;
1541 
1542 	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1543 		return ICE_ERR_PARAM;
1544 
1545 	/* Clear each dword register separately */
1546 	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1547 		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1548 
1549 	return ICE_SUCCESS;
1550 }
1551 
1552 /* Sideband Queue command wrappers */
1553 
1554 /**
1555  * ice_get_sbq - returns the right control queue to use for sideband
1556  * @hw: pointer to the hardware structure
1557  */
ice_get_sbq(struct ice_hw * hw)1558 static struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
1559 {
1560 	if (!ice_is_generic_mac(hw))
1561 		return &hw->adminq;
1562 	return &hw->sbq;
1563 }
1564 
1565 /**
1566  * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1567  * @hw: pointer to the HW struct
1568  * @desc: descriptor describing the command
1569  * @buf: buffer to use for indirect commands (NULL for direct commands)
1570  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1571  * @cd: pointer to command details structure
1572  */
1573 static enum ice_status
ice_sbq_send_cmd(struct ice_hw * hw,struct ice_sbq_cmd_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1574 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1575 		 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1576 {
1577 	return ice_sq_send_cmd(hw, ice_get_sbq(hw), (struct ice_aq_desc *)desc,
1578 			       buf, buf_size, cd);
1579 }
1580 
1581 /**
1582  * ice_sbq_send_cmd_nolock - send Sideband Queue command to Sideband Queue
1583  *                           but do not lock sq_lock
1584  * @hw: pointer to the HW struct
1585  * @desc: descriptor describing the command
1586  * @buf: buffer to use for indirect commands (NULL for direct commands)
1587  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1588  * @cd: pointer to command details structure
1589  */
1590 static enum ice_status
ice_sbq_send_cmd_nolock(struct ice_hw * hw,struct ice_sbq_cmd_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1591 ice_sbq_send_cmd_nolock(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1592 			void *buf, u16 buf_size, struct ice_sq_cd *cd)
1593 {
1594 	return ice_sq_send_cmd_nolock(hw, ice_get_sbq(hw),
1595 				      (struct ice_aq_desc *)desc, buf,
1596 				      buf_size, cd);
1597 }
1598 
1599 /**
1600  * ice_sbq_rw_reg_lp - Fill Sideband Queue command, with lock parameter
1601  * @hw: pointer to the HW struct
1602  * @in: message info to be filled in descriptor
1603  * @lock: true to lock the sq_lock (the usual case); false if the sq_lock has
1604  *        already been locked at a higher level
1605  */
ice_sbq_rw_reg_lp(struct ice_hw * hw,struct ice_sbq_msg_input * in,bool lock)1606 enum ice_status ice_sbq_rw_reg_lp(struct ice_hw *hw,
1607 				  struct ice_sbq_msg_input *in, bool lock)
1608 {
1609 	struct ice_sbq_cmd_desc desc = {0};
1610 	struct ice_sbq_msg_req msg = {0};
1611 	enum ice_status status;
1612 	u16 msg_len;
1613 
1614 	msg_len = sizeof(msg);
1615 
1616 	msg.dest_dev = in->dest_dev;
1617 	msg.opcode = in->opcode;
1618 	msg.flags = ICE_SBQ_MSG_FLAGS;
1619 	msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1620 	msg.msg_addr_low = CPU_TO_LE16(in->msg_addr_low);
1621 	msg.msg_addr_high = CPU_TO_LE32(in->msg_addr_high);
1622 
1623 	if (in->opcode)
1624 		msg.data = CPU_TO_LE32(in->data);
1625 	else
1626 		/* data read comes back in completion, so shorten the struct by
1627 		 * sizeof(msg.data)
1628 		 */
1629 		msg_len -= sizeof(msg.data);
1630 
1631 	desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
1632 	desc.opcode = CPU_TO_LE16(ice_sbq_opc_neigh_dev_req);
1633 	desc.param0.cmd_len = CPU_TO_LE16(msg_len);
1634 	if (lock)
1635 		status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1636 	else
1637 		status = ice_sbq_send_cmd_nolock(hw, &desc, &msg, msg_len,
1638 						 NULL);
1639 	if (!status && !in->opcode)
1640 		in->data = LE32_TO_CPU
1641 			(((struct ice_sbq_msg_cmpl *)&msg)->data);
1642 	return status;
1643 }
1644 
1645 /**
1646  * ice_sbq_rw_reg - Fill Sideband Queue command
1647  * @hw: pointer to the HW struct
1648  * @in: message info to be filled in descriptor
1649  */
ice_sbq_rw_reg(struct ice_hw * hw,struct ice_sbq_msg_input * in)1650 enum ice_status ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1651 {
1652 	return ice_sbq_rw_reg_lp(hw, in, true);
1653 }
1654 
1655 /**
1656  * ice_sbq_lock - Lock the sideband queue's sq_lock
1657  * @hw: pointer to the HW struct
1658  */
ice_sbq_lock(struct ice_hw * hw)1659 void ice_sbq_lock(struct ice_hw *hw)
1660 {
1661 	ice_acquire_lock(&ice_get_sbq(hw)->sq_lock);
1662 }
1663 
1664 /**
1665  * ice_sbq_unlock - Unlock the sideband queue's sq_lock
1666  * @hw: pointer to the HW struct
1667  */
ice_sbq_unlock(struct ice_hw * hw)1668 void ice_sbq_unlock(struct ice_hw *hw)
1669 {
1670 	ice_release_lock(&ice_get_sbq(hw)->sq_lock);
1671 }
1672 
1673 /* FW Admin Queue command wrappers */
1674 
1675 /**
1676  * ice_should_retry_sq_send_cmd
1677  * @opcode: AQ opcode
1678  *
1679  * Decide if we should retry the send command routine for the ATQ, depending
1680  * on the opcode.
1681  */
ice_should_retry_sq_send_cmd(u16 opcode)1682 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1683 {
1684 	switch (opcode) {
1685 	case ice_aqc_opc_get_link_topo:
1686 	case ice_aqc_opc_lldp_stop:
1687 	case ice_aqc_opc_lldp_start:
1688 	case ice_aqc_opc_lldp_filter_ctrl:
1689 		return true;
1690 	}
1691 
1692 	return false;
1693 }
1694 
1695 /**
1696  * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1697  * @hw: pointer to the HW struct
1698  * @cq: pointer to the specific Control queue
1699  * @desc: prefilled descriptor describing the command
1700  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1701  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1702  * @cd: pointer to command details structure
1703  *
1704  * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1705  * Queue if the EBUSY AQ error is returned.
1706  */
1707 static enum ice_status
ice_sq_send_cmd_retry(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1708 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1709 		      struct ice_aq_desc *desc, void *buf, u16 buf_size,
1710 		      struct ice_sq_cd *cd)
1711 {
1712 	struct ice_aq_desc desc_cpy;
1713 	enum ice_status status;
1714 	bool is_cmd_for_retry;
1715 	u8 *buf_cpy = NULL;
1716 	u8 idx = 0;
1717 	u16 opcode;
1718 
1719 	opcode = LE16_TO_CPU(desc->opcode);
1720 	is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1721 	ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1722 
1723 	if (is_cmd_for_retry) {
1724 		if (buf) {
1725 			buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1726 			if (!buf_cpy)
1727 				return ICE_ERR_NO_MEMORY;
1728 		}
1729 
1730 		ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1731 			   ICE_NONDMA_TO_NONDMA);
1732 	}
1733 
1734 	do {
1735 		status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1736 
1737 		if (!is_cmd_for_retry || status == ICE_SUCCESS ||
1738 		    hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1739 			break;
1740 
1741 		if (buf_cpy)
1742 			ice_memcpy(buf, buf_cpy, buf_size,
1743 				   ICE_NONDMA_TO_NONDMA);
1744 
1745 		ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1746 			   ICE_NONDMA_TO_NONDMA);
1747 
1748 		ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1749 
1750 	} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1751 
1752 	if (buf_cpy)
1753 		ice_free(hw, buf_cpy);
1754 
1755 	return status;
1756 }
1757 
1758 /**
1759  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1760  * @hw: pointer to the HW struct
1761  * @desc: descriptor describing the command
1762  * @buf: buffer to use for indirect commands (NULL for direct commands)
1763  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1764  * @cd: pointer to command details structure
1765  *
1766  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1767  */
1768 enum ice_status
ice_aq_send_cmd(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1769 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1770 		u16 buf_size, struct ice_sq_cd *cd)
1771 {
1772 	if (hw->aq_send_cmd_fn) {
1773 		enum ice_status status = ICE_ERR_NOT_READY;
1774 		u16 retval = ICE_AQ_RC_OK;
1775 
1776 		ice_acquire_lock(&hw->adminq.sq_lock);
1777 		if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc,
1778 					buf, buf_size)) {
1779 			retval = LE16_TO_CPU(desc->retval);
1780 			/* strip off FW internal code */
1781 			if (retval)
1782 				retval &= 0xff;
1783 			if (retval == ICE_AQ_RC_OK)
1784 				status = ICE_SUCCESS;
1785 			else
1786 				status = ICE_ERR_AQ_ERROR;
1787 		}
1788 
1789 		hw->adminq.sq_last_status = (enum ice_aq_err)retval;
1790 		ice_release_lock(&hw->adminq.sq_lock);
1791 
1792 		return status;
1793 	}
1794 	return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1795 }
1796 
1797 /**
1798  * ice_aq_get_fw_ver
1799  * @hw: pointer to the HW struct
1800  * @cd: pointer to command details structure or NULL
1801  *
1802  * Get the firmware version (0x0001) from the admin queue commands
1803  */
ice_aq_get_fw_ver(struct ice_hw * hw,struct ice_sq_cd * cd)1804 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1805 {
1806 	struct ice_aqc_get_ver *resp;
1807 	struct ice_aq_desc desc;
1808 	enum ice_status status;
1809 
1810 	resp = &desc.params.get_ver;
1811 
1812 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1813 
1814 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1815 
1816 	if (!status) {
1817 		hw->fw_branch = resp->fw_branch;
1818 		hw->fw_maj_ver = resp->fw_major;
1819 		hw->fw_min_ver = resp->fw_minor;
1820 		hw->fw_patch = resp->fw_patch;
1821 		hw->fw_build = LE32_TO_CPU(resp->fw_build);
1822 		hw->api_branch = resp->api_branch;
1823 		hw->api_maj_ver = resp->api_major;
1824 		hw->api_min_ver = resp->api_minor;
1825 		hw->api_patch = resp->api_patch;
1826 	}
1827 
1828 	return status;
1829 }
1830 
1831 /**
1832  * ice_aq_send_driver_ver
1833  * @hw: pointer to the HW struct
1834  * @dv: driver's major, minor version
1835  * @cd: pointer to command details structure or NULL
1836  *
1837  * Send the driver version (0x0002) to the firmware
1838  */
1839 enum ice_status
ice_aq_send_driver_ver(struct ice_hw * hw,struct ice_driver_ver * dv,struct ice_sq_cd * cd)1840 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1841 		       struct ice_sq_cd *cd)
1842 {
1843 	struct ice_aqc_driver_ver *cmd;
1844 	struct ice_aq_desc desc;
1845 	u16 len;
1846 
1847 	cmd = &desc.params.driver_ver;
1848 
1849 	if (!dv)
1850 		return ICE_ERR_PARAM;
1851 
1852 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1853 
1854 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1855 	cmd->major_ver = dv->major_ver;
1856 	cmd->minor_ver = dv->minor_ver;
1857 	cmd->build_ver = dv->build_ver;
1858 	cmd->subbuild_ver = dv->subbuild_ver;
1859 
1860 	len = 0;
1861 	while (len < sizeof(dv->driver_string) &&
1862 	       IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1863 		len++;
1864 
1865 	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1866 }
1867 
1868 /**
1869  * ice_aq_q_shutdown
1870  * @hw: pointer to the HW struct
1871  * @unloading: is the driver unloading itself
1872  *
1873  * Tell the Firmware that we're shutting down the AdminQ and whether
1874  * or not the driver is unloading as well (0x0003).
1875  */
ice_aq_q_shutdown(struct ice_hw * hw,bool unloading)1876 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1877 {
1878 	struct ice_aqc_q_shutdown *cmd;
1879 	struct ice_aq_desc desc;
1880 
1881 	cmd = &desc.params.q_shutdown;
1882 
1883 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1884 
1885 	if (unloading)
1886 		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1887 
1888 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1889 }
1890 
1891 /**
1892  * ice_aq_req_res
1893  * @hw: pointer to the HW struct
1894  * @res: resource ID
1895  * @access: access type
1896  * @sdp_number: resource number
1897  * @timeout: the maximum time in ms that the driver may hold the resource
1898  * @cd: pointer to command details structure or NULL
1899  *
1900  * Requests common resource using the admin queue commands (0x0008).
1901  * When attempting to acquire the Global Config Lock, the driver can
1902  * learn of three states:
1903  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1904  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1905  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1906  *                          successfully downloaded the package; the driver does
1907  *                          not have to download the package and can continue
1908  *                          loading
1909  *
1910  * Note that if the caller is in an acquire lock, perform action, release lock
1911  * phase of operation, it is possible that the FW may detect a timeout and issue
1912  * a CORER. In this case, the driver will receive a CORER interrupt and will
1913  * have to determine its cause. The calling thread that is handling this flow
1914  * will likely get an error propagated back to it indicating the Download
1915  * Package, Update Package or the Release Resource AQ commands timed out.
1916  */
1917 static enum ice_status
ice_aq_req_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u8 sdp_number,u32 * timeout,struct ice_sq_cd * cd)1918 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1919 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1920 	       struct ice_sq_cd *cd)
1921 {
1922 	struct ice_aqc_req_res *cmd_resp;
1923 	struct ice_aq_desc desc;
1924 	enum ice_status status;
1925 
1926 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1927 
1928 	cmd_resp = &desc.params.res_owner;
1929 
1930 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1931 
1932 	cmd_resp->res_id = CPU_TO_LE16(res);
1933 	cmd_resp->access_type = CPU_TO_LE16(access);
1934 	cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1935 	cmd_resp->timeout = CPU_TO_LE32(*timeout);
1936 	*timeout = 0;
1937 
1938 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1939 
1940 	/* The completion specifies the maximum time in ms that the driver
1941 	 * may hold the resource in the Timeout field.
1942 	 */
1943 
1944 	/* Global config lock response utilizes an additional status field.
1945 	 *
1946 	 * If the Global config lock resource is held by some other driver, the
1947 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1948 	 * and the timeout field indicates the maximum time the current owner
1949 	 * of the resource has to free it.
1950 	 */
1951 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1952 		if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1953 			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1954 			return ICE_SUCCESS;
1955 		} else if (LE16_TO_CPU(cmd_resp->status) ==
1956 			   ICE_AQ_RES_GLBL_IN_PROG) {
1957 			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1958 			return ICE_ERR_AQ_ERROR;
1959 		} else if (LE16_TO_CPU(cmd_resp->status) ==
1960 			   ICE_AQ_RES_GLBL_DONE) {
1961 			return ICE_ERR_AQ_NO_WORK;
1962 		}
1963 
1964 		/* invalid FW response, force a timeout immediately */
1965 		*timeout = 0;
1966 		return ICE_ERR_AQ_ERROR;
1967 	}
1968 
1969 	/* If the resource is held by some other driver, the command completes
1970 	 * with a busy return value and the timeout field indicates the maximum
1971 	 * time the current owner of the resource has to free it.
1972 	 */
1973 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1974 		*timeout = LE32_TO_CPU(cmd_resp->timeout);
1975 
1976 	return status;
1977 }
1978 
1979 /**
1980  * ice_aq_release_res
1981  * @hw: pointer to the HW struct
1982  * @res: resource ID
1983  * @sdp_number: resource number
1984  * @cd: pointer to command details structure or NULL
1985  *
1986  * release common resource using the admin queue commands (0x0009)
1987  */
1988 static enum ice_status
ice_aq_release_res(struct ice_hw * hw,enum ice_aq_res_ids res,u8 sdp_number,struct ice_sq_cd * cd)1989 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1990 		   struct ice_sq_cd *cd)
1991 {
1992 	struct ice_aqc_req_res *cmd;
1993 	struct ice_aq_desc desc;
1994 
1995 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1996 
1997 	cmd = &desc.params.res_owner;
1998 
1999 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
2000 
2001 	cmd->res_id = CPU_TO_LE16(res);
2002 	cmd->res_number = CPU_TO_LE32(sdp_number);
2003 
2004 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2005 }
2006 
2007 /**
2008  * ice_acquire_res
2009  * @hw: pointer to the HW structure
2010  * @res: resource ID
2011  * @access: access type (read or write)
2012  * @timeout: timeout in milliseconds
2013  *
2014  * This function will attempt to acquire the ownership of a resource.
2015  */
2016 enum ice_status
ice_acquire_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u32 timeout)2017 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
2018 		enum ice_aq_res_access_type access, u32 timeout)
2019 {
2020 #define ICE_RES_POLLING_DELAY_MS	10
2021 	u32 delay = ICE_RES_POLLING_DELAY_MS;
2022 	u32 time_left = timeout;
2023 	enum ice_status status;
2024 
2025 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2026 
2027 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2028 
2029 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
2030 	 * previously acquired the resource and performed any necessary updates;
2031 	 * in this case the caller does not obtain the resource and has no
2032 	 * further work to do.
2033 	 */
2034 	if (status == ICE_ERR_AQ_NO_WORK)
2035 		goto ice_acquire_res_exit;
2036 
2037 	if (status)
2038 		ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
2039 
2040 	/* If necessary, poll until the current lock owner timeouts */
2041 	timeout = time_left;
2042 	while (status && timeout && time_left) {
2043 		ice_msec_delay(delay, true);
2044 		timeout = (timeout > delay) ? timeout - delay : 0;
2045 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2046 
2047 		if (status == ICE_ERR_AQ_NO_WORK)
2048 			/* lock free, but no work to do */
2049 			break;
2050 
2051 		if (!status)
2052 			/* lock acquired */
2053 			break;
2054 	}
2055 	if (status && status != ICE_ERR_AQ_NO_WORK)
2056 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
2057 
2058 ice_acquire_res_exit:
2059 	if (status == ICE_ERR_AQ_NO_WORK) {
2060 		if (access == ICE_RES_WRITE)
2061 			ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
2062 		else
2063 			ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
2064 	}
2065 	return status;
2066 }
2067 
2068 /**
2069  * ice_release_res
2070  * @hw: pointer to the HW structure
2071  * @res: resource ID
2072  *
2073  * This function will release a resource using the proper Admin Command.
2074  */
ice_release_res(struct ice_hw * hw,enum ice_aq_res_ids res)2075 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
2076 {
2077 	enum ice_status status;
2078 	u32 total_delay = 0;
2079 
2080 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2081 
2082 	status = ice_aq_release_res(hw, res, 0, NULL);
2083 
2084 	/* there are some rare cases when trying to release the resource
2085 	 * results in an admin queue timeout, so handle them correctly
2086 	 */
2087 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
2088 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
2089 		ice_msec_delay(1, true);
2090 		status = ice_aq_release_res(hw, res, 0, NULL);
2091 		total_delay++;
2092 	}
2093 }
2094 
2095 /**
2096  * ice_aq_alloc_free_res - command to allocate/free resources
2097  * @hw: pointer to the HW struct
2098  * @num_entries: number of resource entries in buffer
2099  * @buf: Indirect buffer to hold data parameters and response
2100  * @buf_size: size of buffer for indirect commands
2101  * @opc: pass in the command opcode
2102  * @cd: pointer to command details structure or NULL
2103  *
2104  * Helper function to allocate/free resources using the admin queue commands
2105  */
2106 enum ice_status
ice_aq_alloc_free_res(struct ice_hw * hw,u16 num_entries,struct ice_aqc_alloc_free_res_elem * buf,u16 buf_size,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2107 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
2108 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
2109 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2110 {
2111 	struct ice_aqc_alloc_free_res_cmd *cmd;
2112 	struct ice_aq_desc desc;
2113 
2114 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2115 
2116 	cmd = &desc.params.sw_res_ctrl;
2117 
2118 	if (!buf)
2119 		return ICE_ERR_PARAM;
2120 
2121 	if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
2122 		return ICE_ERR_PARAM;
2123 
2124 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2125 
2126 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2127 
2128 	cmd->num_entries = CPU_TO_LE16(num_entries);
2129 
2130 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2131 }
2132 
2133 /**
2134  * ice_alloc_hw_res - allocate resource
2135  * @hw: pointer to the HW struct
2136  * @type: type of resource
2137  * @num: number of resources to allocate
2138  * @btm: allocate from bottom
2139  * @res: pointer to array that will receive the resources
2140  */
2141 enum ice_status
ice_alloc_hw_res(struct ice_hw * hw,u16 type,u16 num,bool btm,u16 * res)2142 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2143 {
2144 	struct ice_aqc_alloc_free_res_elem *buf;
2145 	enum ice_status status;
2146 	u16 buf_len;
2147 
2148 	buf_len = ice_struct_size(buf, elem, num);
2149 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2150 	if (!buf)
2151 		return ICE_ERR_NO_MEMORY;
2152 
2153 	/* Prepare buffer to allocate resource. */
2154 	buf->num_elems = CPU_TO_LE16(num);
2155 	buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2156 				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
2157 	if (btm)
2158 		buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2159 
2160 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2161 				       ice_aqc_opc_alloc_res, NULL);
2162 	if (status)
2163 		goto ice_alloc_res_exit;
2164 
2165 	ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
2166 		   ICE_NONDMA_TO_NONDMA);
2167 
2168 ice_alloc_res_exit:
2169 	ice_free(hw, buf);
2170 	return status;
2171 }
2172 
2173 /**
2174  * ice_free_hw_res - free allocated HW resource
2175  * @hw: pointer to the HW struct
2176  * @type: type of resource to free
2177  * @num: number of resources
2178  * @res: pointer to array that contains the resources to free
2179  */
ice_free_hw_res(struct ice_hw * hw,u16 type,u16 num,u16 * res)2180 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2181 {
2182 	struct ice_aqc_alloc_free_res_elem *buf;
2183 	enum ice_status status;
2184 	u16 buf_len;
2185 
2186 	buf_len = ice_struct_size(buf, elem, num);
2187 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2188 	if (!buf)
2189 		return ICE_ERR_NO_MEMORY;
2190 
2191 	/* Prepare buffer to free resource. */
2192 	buf->num_elems = CPU_TO_LE16(num);
2193 	buf->res_type = CPU_TO_LE16(type);
2194 	ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
2195 		   ICE_NONDMA_TO_NONDMA);
2196 
2197 	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
2198 				       ice_aqc_opc_free_res, NULL);
2199 	if (status)
2200 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2201 
2202 	ice_free(hw, buf);
2203 	return status;
2204 }
2205 
2206 /**
2207  * ice_get_num_per_func - determine number of resources per PF
2208  * @hw: pointer to the HW structure
2209  * @max: value to be evenly split between each PF
2210  *
2211  * Determine the number of valid functions by going through the bitmap returned
2212  * from parsing capabilities and use this to calculate the number of resources
2213  * per PF based on the max value passed in.
2214  */
ice_get_num_per_func(struct ice_hw * hw,u32 max)2215 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2216 {
2217 	u8 funcs;
2218 
2219 #define ICE_CAPS_VALID_FUNCS_M	0xFF
2220 	funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
2221 			     ICE_CAPS_VALID_FUNCS_M);
2222 
2223 	if (!funcs)
2224 		return 0;
2225 
2226 	return max / funcs;
2227 }
2228 
2229 /**
2230  * ice_parse_common_caps - parse common device/function capabilities
2231  * @hw: pointer to the HW struct
2232  * @caps: pointer to common capabilities structure
2233  * @elem: the capability element to parse
2234  * @prefix: message prefix for tracing capabilities
2235  *
2236  * Given a capability element, extract relevant details into the common
2237  * capability structure.
2238  *
2239  * Returns: true if the capability matches one of the common capability ids,
2240  * false otherwise.
2241  */
2242 static bool
ice_parse_common_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,struct ice_aqc_list_caps_elem * elem,const char * prefix)2243 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2244 		      struct ice_aqc_list_caps_elem *elem, const char *prefix)
2245 {
2246 	u32 logical_id = LE32_TO_CPU(elem->logical_id);
2247 	u32 phys_id = LE32_TO_CPU(elem->phys_id);
2248 	u32 number = LE32_TO_CPU(elem->number);
2249 	u16 cap = LE16_TO_CPU(elem->cap);
2250 	bool found = true;
2251 
2252 	switch (cap) {
2253 	case ICE_AQC_CAPS_VALID_FUNCTIONS:
2254 		caps->valid_functions = number;
2255 		ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2256 			  caps->valid_functions);
2257 		break;
2258 	case ICE_AQC_CAPS_DCB:
2259 		caps->dcb = (number == 1);
2260 		caps->active_tc_bitmap = logical_id;
2261 		caps->maxtc = phys_id;
2262 		ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2263 		ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2264 			  caps->active_tc_bitmap);
2265 		ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2266 		break;
2267 	case ICE_AQC_CAPS_RSS:
2268 		caps->rss_table_size = number;
2269 		caps->rss_table_entry_width = logical_id;
2270 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2271 			  caps->rss_table_size);
2272 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2273 			  caps->rss_table_entry_width);
2274 		break;
2275 	case ICE_AQC_CAPS_RXQS:
2276 		caps->num_rxq = number;
2277 		caps->rxq_first_id = phys_id;
2278 		ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2279 			  caps->num_rxq);
2280 		ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2281 			  caps->rxq_first_id);
2282 		break;
2283 	case ICE_AQC_CAPS_TXQS:
2284 		caps->num_txq = number;
2285 		caps->txq_first_id = phys_id;
2286 		ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2287 			  caps->num_txq);
2288 		ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2289 			  caps->txq_first_id);
2290 		break;
2291 	case ICE_AQC_CAPS_MSIX:
2292 		caps->num_msix_vectors = number;
2293 		caps->msix_vector_first_id = phys_id;
2294 		ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2295 			  caps->num_msix_vectors);
2296 		ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2297 			  caps->msix_vector_first_id);
2298 		break;
2299 	case ICE_AQC_CAPS_NVM_MGMT:
2300 		caps->sec_rev_disabled =
2301 			(number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
2302 			true : false;
2303 		ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
2304 			  caps->sec_rev_disabled);
2305 		caps->update_disabled =
2306 			(number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
2307 			true : false;
2308 		ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
2309 			  caps->update_disabled);
2310 		caps->nvm_unified_update =
2311 			(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2312 			true : false;
2313 		ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2314 			  caps->nvm_unified_update);
2315 		break;
2316 	case ICE_AQC_CAPS_MAX_MTU:
2317 		caps->max_mtu = number;
2318 		ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2319 			  prefix, caps->max_mtu);
2320 		break;
2321 	case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2322 		caps->pcie_reset_avoidance = (number > 0);
2323 		ice_debug(hw, ICE_DBG_INIT,
2324 			  "%s: pcie_reset_avoidance = %d\n", prefix,
2325 			  caps->pcie_reset_avoidance);
2326 		break;
2327 	case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2328 		caps->reset_restrict_support = (number == 1);
2329 		ice_debug(hw, ICE_DBG_INIT,
2330 			  "%s: reset_restrict_support = %d\n", prefix,
2331 			  caps->reset_restrict_support);
2332 		break;
2333 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
2334 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
2335 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
2336 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
2337 	{
2338 		u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
2339 
2340 		caps->ext_topo_dev_img_ver_high[index] = number;
2341 		caps->ext_topo_dev_img_ver_low[index] = logical_id;
2342 		caps->ext_topo_dev_img_part_num[index] =
2343 			(phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
2344 			ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
2345 		caps->ext_topo_dev_img_load_en[index] =
2346 			(phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
2347 		caps->ext_topo_dev_img_prog_en[index] =
2348 			(phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
2349 		ice_debug(hw, ICE_DBG_INIT,
2350 			  "%s: ext_topo_dev_img_ver_high[%d] = %d\n",
2351 			  prefix, index,
2352 			  caps->ext_topo_dev_img_ver_high[index]);
2353 		ice_debug(hw, ICE_DBG_INIT,
2354 			  "%s: ext_topo_dev_img_ver_low[%d] = %d\n",
2355 			  prefix, index,
2356 			  caps->ext_topo_dev_img_ver_low[index]);
2357 		ice_debug(hw, ICE_DBG_INIT,
2358 			  "%s: ext_topo_dev_img_part_num[%d] = %d\n",
2359 			  prefix, index,
2360 			  caps->ext_topo_dev_img_part_num[index]);
2361 		ice_debug(hw, ICE_DBG_INIT,
2362 			  "%s: ext_topo_dev_img_load_en[%d] = %d\n",
2363 			  prefix, index,
2364 			  caps->ext_topo_dev_img_load_en[index]);
2365 		ice_debug(hw, ICE_DBG_INIT,
2366 			  "%s: ext_topo_dev_img_prog_en[%d] = %d\n",
2367 			  prefix, index,
2368 			  caps->ext_topo_dev_img_prog_en[index]);
2369 		break;
2370 	}
2371 	default:
2372 		/* Not one of the recognized common capabilities */
2373 		found = false;
2374 	}
2375 
2376 	return found;
2377 }
2378 
2379 /**
2380  * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2381  * @hw: pointer to the HW structure
2382  * @caps: pointer to capabilities structure to fix
2383  *
2384  * Re-calculate the capabilities that are dependent on the number of physical
2385  * ports; i.e. some features are not supported or function differently on
2386  * devices with more than 4 ports.
2387  */
2388 static void
ice_recalc_port_limited_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps)2389 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2390 {
2391 	/* This assumes device capabilities are always scanned before function
2392 	 * capabilities during the initialization flow.
2393 	 */
2394 	if (hw->dev_caps.num_funcs > 4) {
2395 		/* Max 4 TCs per port */
2396 		caps->maxtc = 4;
2397 		ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2398 			  caps->maxtc);
2399 	}
2400 }
2401 
2402 /**
2403  * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2404  * @hw: pointer to the HW struct
2405  * @func_p: pointer to function capabilities structure
2406  * @cap: pointer to the capability element to parse
2407  *
2408  * Extract function capabilities for ICE_AQC_CAPS_VSI.
2409  */
2410 static void
ice_parse_vsi_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2411 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2412 			struct ice_aqc_list_caps_elem *cap)
2413 {
2414 	func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2415 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2416 		  LE32_TO_CPU(cap->number));
2417 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2418 		  func_p->guar_num_vsi);
2419 }
2420 
2421 /**
2422  * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2423  * @hw: pointer to the HW struct
2424  * @func_p: pointer to function capabilities structure
2425  * @cap: pointer to the capability element to parse
2426  *
2427  * Extract function capabilities for ICE_AQC_CAPS_1588.
2428  */
2429 static void
ice_parse_1588_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2430 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2431 			 struct ice_aqc_list_caps_elem *cap)
2432 {
2433 	struct ice_ts_func_info *info = &func_p->ts_func_info;
2434 	u32 number = LE32_TO_CPU(cap->number);
2435 
2436 	info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2437 	func_p->common_cap.ieee_1588 = info->ena;
2438 
2439 	info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2440 	info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2441 	info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2442 	info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2443 
2444 	info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2445 	info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2446 
2447 	if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
2448 		info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
2449 	} else {
2450 		/* Unknown clock frequency, so assume a (probably incorrect)
2451 		 * default to avoid out-of-bounds look ups of frequency
2452 		 * related information.
2453 		 */
2454 		ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
2455 			  info->clk_freq);
2456 		info->time_ref = ICE_TIME_REF_FREQ_25_000;
2457 	}
2458 
2459 	ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2460 		  func_p->common_cap.ieee_1588);
2461 	ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2462 		  info->src_tmr_owned);
2463 	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2464 		  info->tmr_ena);
2465 	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2466 		  info->tmr_index_owned);
2467 	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2468 		  info->tmr_index_assoc);
2469 	ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2470 		  info->clk_freq);
2471 	ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2472 		  info->clk_src);
2473 }
2474 
2475 /**
2476  * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2477  * @hw: pointer to the HW struct
2478  * @func_p: pointer to function capabilities structure
2479  *
2480  * Extract function capabilities for ICE_AQC_CAPS_FD.
2481  */
2482 static void
ice_parse_fdir_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p)2483 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2484 {
2485 	u32 reg_val, val;
2486 
2487 	if (hw->dcf_enabled)
2488 		return;
2489 	reg_val = rd32(hw, GLQF_FD_SIZE);
2490 	val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2491 		GLQF_FD_SIZE_FD_GSIZE_S;
2492 	func_p->fd_fltr_guar =
2493 		ice_get_num_per_func(hw, val);
2494 	val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2495 		GLQF_FD_SIZE_FD_BSIZE_S;
2496 	func_p->fd_fltr_best_effort = val;
2497 
2498 	ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2499 		  func_p->fd_fltr_guar);
2500 	ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2501 		  func_p->fd_fltr_best_effort);
2502 }
2503 
2504 /**
2505  * ice_parse_func_caps - Parse function capabilities
2506  * @hw: pointer to the HW struct
2507  * @func_p: pointer to function capabilities structure
2508  * @buf: buffer containing the function capability records
2509  * @cap_count: the number of capabilities
2510  *
2511  * Helper function to parse function (0x000A) capabilities list. For
2512  * capabilities shared between device and function, this relies on
2513  * ice_parse_common_caps.
2514  *
2515  * Loop through the list of provided capabilities and extract the relevant
2516  * data into the function capabilities structured.
2517  */
2518 static void
ice_parse_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,void * buf,u32 cap_count)2519 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2520 		    void *buf, u32 cap_count)
2521 {
2522 	struct ice_aqc_list_caps_elem *cap_resp;
2523 	u32 i;
2524 
2525 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2526 
2527 	ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2528 
2529 	for (i = 0; i < cap_count; i++) {
2530 		u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2531 		bool found;
2532 
2533 		found = ice_parse_common_caps(hw, &func_p->common_cap,
2534 					      &cap_resp[i], "func caps");
2535 
2536 		switch (cap) {
2537 		case ICE_AQC_CAPS_VSI:
2538 			ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2539 			break;
2540 		case ICE_AQC_CAPS_1588:
2541 			ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2542 			break;
2543 		case ICE_AQC_CAPS_FD:
2544 			ice_parse_fdir_func_caps(hw, func_p);
2545 			break;
2546 		default:
2547 			/* Don't list common capabilities as unknown */
2548 			if (!found)
2549 				ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2550 					  i, cap);
2551 			break;
2552 		}
2553 	}
2554 
2555 	ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2556 }
2557 
2558 /**
2559  * ice_func_id_to_logical_id - map from function id to logical pf id
2560  * @active_function_bitmap: active function bitmap
2561  * @pf_id: function number of device
2562  */
ice_func_id_to_logical_id(u32 active_function_bitmap,u8 pf_id)2563 static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id)
2564 {
2565 	u8 logical_id = 0;
2566 	u8 i;
2567 
2568 	for (i = 0; i < pf_id; i++)
2569 		if (active_function_bitmap & BIT(i))
2570 			logical_id++;
2571 
2572 	return logical_id;
2573 }
2574 
2575 /**
2576  * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2577  * @hw: pointer to the HW struct
2578  * @dev_p: pointer to device capabilities structure
2579  * @cap: capability element to parse
2580  *
2581  * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2582  */
2583 static void
ice_parse_valid_functions_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2584 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2585 			      struct ice_aqc_list_caps_elem *cap)
2586 {
2587 	u32 number = LE32_TO_CPU(cap->number);
2588 
2589 	dev_p->num_funcs = ice_hweight32(number);
2590 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2591 		  dev_p->num_funcs);
2592 
2593 	hw->logical_pf_id = ice_func_id_to_logical_id(number, hw->pf_id);
2594 }
2595 
2596 /**
2597  * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2598  * @hw: pointer to the HW struct
2599  * @dev_p: pointer to device capabilities structure
2600  * @cap: capability element to parse
2601  *
2602  * Parse ICE_AQC_CAPS_VSI for device capabilities.
2603  */
2604 static void
ice_parse_vsi_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2605 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2606 		       struct ice_aqc_list_caps_elem *cap)
2607 {
2608 	u32 number = LE32_TO_CPU(cap->number);
2609 
2610 	dev_p->num_vsi_allocd_to_host = number;
2611 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2612 		  dev_p->num_vsi_allocd_to_host);
2613 }
2614 
2615 /**
2616  * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2617  * @hw: pointer to the HW struct
2618  * @dev_p: pointer to device capabilities structure
2619  * @cap: capability element to parse
2620  *
2621  * Parse ICE_AQC_CAPS_1588 for device capabilities.
2622  */
2623 static void
ice_parse_1588_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2624 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2625 			struct ice_aqc_list_caps_elem *cap)
2626 {
2627 	struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2628 	u32 logical_id = LE32_TO_CPU(cap->logical_id);
2629 	u32 phys_id = LE32_TO_CPU(cap->phys_id);
2630 	u32 number = LE32_TO_CPU(cap->number);
2631 
2632 	info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2633 	dev_p->common_cap.ieee_1588 = info->ena;
2634 
2635 	info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2636 	info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2637 	info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2638 
2639 	info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2640 	info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2641 	info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2642 
2643 	info->ena_ports = logical_id;
2644 	info->tmr_own_map = phys_id;
2645 
2646 	ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2647 		  dev_p->common_cap.ieee_1588);
2648 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2649 		  info->tmr0_owner);
2650 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2651 		  info->tmr0_owned);
2652 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2653 		  info->tmr0_ena);
2654 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2655 		  info->tmr1_owner);
2656 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2657 		  info->tmr1_owned);
2658 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2659 		  info->tmr1_ena);
2660 	ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2661 		  info->ena_ports);
2662 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2663 		  info->tmr_own_map);
2664 }
2665 
2666 /**
2667  * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2668  * @hw: pointer to the HW struct
2669  * @dev_p: pointer to device capabilities structure
2670  * @cap: capability element to parse
2671  *
2672  * Parse ICE_AQC_CAPS_FD for device capabilities.
2673  */
2674 static void
ice_parse_fdir_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2675 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2676 			struct ice_aqc_list_caps_elem *cap)
2677 {
2678 	u32 number = LE32_TO_CPU(cap->number);
2679 
2680 	dev_p->num_flow_director_fltr = number;
2681 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2682 		  dev_p->num_flow_director_fltr);
2683 }
2684 
2685 /**
2686  * ice_parse_dev_caps - Parse device capabilities
2687  * @hw: pointer to the HW struct
2688  * @dev_p: pointer to device capabilities structure
2689  * @buf: buffer containing the device capability records
2690  * @cap_count: the number of capabilities
2691  *
2692  * Helper device to parse device (0x000B) capabilities list. For
2693  * capabilities shared between device and function, this relies on
2694  * ice_parse_common_caps.
2695  *
2696  * Loop through the list of provided capabilities and extract the relevant
2697  * data into the device capabilities structured.
2698  */
2699 static void
ice_parse_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,void * buf,u32 cap_count)2700 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2701 		   void *buf, u32 cap_count)
2702 {
2703 	struct ice_aqc_list_caps_elem *cap_resp;
2704 	u32 i;
2705 
2706 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2707 
2708 	ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2709 
2710 	for (i = 0; i < cap_count; i++) {
2711 		u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2712 		bool found;
2713 
2714 		found = ice_parse_common_caps(hw, &dev_p->common_cap,
2715 					      &cap_resp[i], "dev caps");
2716 
2717 		switch (cap) {
2718 		case ICE_AQC_CAPS_VALID_FUNCTIONS:
2719 			ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2720 			break;
2721 		case ICE_AQC_CAPS_VSI:
2722 			ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2723 			break;
2724 		case ICE_AQC_CAPS_1588:
2725 			ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2726 			break;
2727 		case  ICE_AQC_CAPS_FD:
2728 			ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2729 			break;
2730 		default:
2731 			/* Don't list common capabilities as unknown */
2732 			if (!found)
2733 				ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2734 					  i, cap);
2735 			break;
2736 		}
2737 	}
2738 
2739 	ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2740 }
2741 
2742 /**
2743  * ice_aq_list_caps - query function/device capabilities
2744  * @hw: pointer to the HW struct
2745  * @buf: a buffer to hold the capabilities
2746  * @buf_size: size of the buffer
2747  * @cap_count: if not NULL, set to the number of capabilities reported
2748  * @opc: capabilities type to discover, device or function
2749  * @cd: pointer to command details structure or NULL
2750  *
2751  * Get the function (0x000A) or device (0x000B) capabilities description from
2752  * firmware and store it in the buffer.
2753  *
2754  * If the cap_count pointer is not NULL, then it is set to the number of
2755  * capabilities firmware will report. Note that if the buffer size is too
2756  * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2757  * cap_count will still be updated in this case. It is recommended that the
2758  * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2759  * firmware could return) to avoid this.
2760  */
2761 static enum ice_status
ice_aq_list_caps(struct ice_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2762 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2763 		 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2764 {
2765 	struct ice_aqc_list_caps *cmd;
2766 	struct ice_aq_desc desc;
2767 	enum ice_status status;
2768 
2769 	cmd = &desc.params.get_cap;
2770 
2771 	if (opc != ice_aqc_opc_list_func_caps &&
2772 	    opc != ice_aqc_opc_list_dev_caps)
2773 		return ICE_ERR_PARAM;
2774 
2775 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2776 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2777 
2778 	if (cap_count)
2779 		*cap_count = LE32_TO_CPU(cmd->count);
2780 
2781 	return status;
2782 }
2783 
2784 /**
2785  * ice_discover_dev_caps - Read and extract device capabilities
2786  * @hw: pointer to the hardware structure
2787  * @dev_caps: pointer to device capabilities structure
2788  *
2789  * Read the device capabilities and extract them into the dev_caps structure
2790  * for later use.
2791  */
2792 static enum ice_status
ice_discover_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_caps)2793 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2794 {
2795 	enum ice_status status;
2796 	u32 cap_count = 0;
2797 	void *cbuf;
2798 
2799 	cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2800 	if (!cbuf)
2801 		return ICE_ERR_NO_MEMORY;
2802 
2803 	/* Although the driver doesn't know the number of capabilities the
2804 	 * device will return, we can simply send a 4KB buffer, the maximum
2805 	 * possible size that firmware can return.
2806 	 */
2807 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2808 
2809 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2810 				  ice_aqc_opc_list_dev_caps, NULL);
2811 	if (!status)
2812 		ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2813 	ice_free(hw, cbuf);
2814 
2815 	return status;
2816 }
2817 
2818 /**
2819  * ice_discover_func_caps - Read and extract function capabilities
2820  * @hw: pointer to the hardware structure
2821  * @func_caps: pointer to function capabilities structure
2822  *
2823  * Read the function capabilities and extract them into the func_caps structure
2824  * for later use.
2825  */
2826 static enum ice_status
ice_discover_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_caps)2827 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2828 {
2829 	enum ice_status status;
2830 	u32 cap_count = 0;
2831 	void *cbuf;
2832 
2833 	cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2834 	if (!cbuf)
2835 		return ICE_ERR_NO_MEMORY;
2836 
2837 	/* Although the driver doesn't know the number of capabilities the
2838 	 * device will return, we can simply send a 4KB buffer, the maximum
2839 	 * possible size that firmware can return.
2840 	 */
2841 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2842 
2843 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2844 				  ice_aqc_opc_list_func_caps, NULL);
2845 	if (!status)
2846 		ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2847 	ice_free(hw, cbuf);
2848 
2849 	return status;
2850 }
2851 
2852 /**
2853  * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2854  * @hw: pointer to the hardware structure
2855  */
ice_set_safe_mode_caps(struct ice_hw * hw)2856 void ice_set_safe_mode_caps(struct ice_hw *hw)
2857 {
2858 	struct ice_hw_func_caps *func_caps = &hw->func_caps;
2859 	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2860 	struct ice_hw_common_caps cached_caps;
2861 	u32 num_funcs;
2862 
2863 	/* cache some func_caps values that should be restored after memset */
2864 	cached_caps = func_caps->common_cap;
2865 
2866 	/* unset func capabilities */
2867 	memset(func_caps, 0, sizeof(*func_caps));
2868 
2869 #define ICE_RESTORE_FUNC_CAP(name) \
2870 	func_caps->common_cap.name = cached_caps.name
2871 
2872 	/* restore cached values */
2873 	ICE_RESTORE_FUNC_CAP(valid_functions);
2874 	ICE_RESTORE_FUNC_CAP(txq_first_id);
2875 	ICE_RESTORE_FUNC_CAP(rxq_first_id);
2876 	ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2877 	ICE_RESTORE_FUNC_CAP(max_mtu);
2878 	ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2879 
2880 	/* one Tx and one Rx queue in safe mode */
2881 	func_caps->common_cap.num_rxq = 1;
2882 	func_caps->common_cap.num_txq = 1;
2883 
2884 	/* two MSIX vectors, one for traffic and one for misc causes */
2885 	func_caps->common_cap.num_msix_vectors = 2;
2886 	func_caps->guar_num_vsi = 1;
2887 
2888 	/* cache some dev_caps values that should be restored after memset */
2889 	cached_caps = dev_caps->common_cap;
2890 	num_funcs = dev_caps->num_funcs;
2891 
2892 	/* unset dev capabilities */
2893 	memset(dev_caps, 0, sizeof(*dev_caps));
2894 
2895 #define ICE_RESTORE_DEV_CAP(name) \
2896 	dev_caps->common_cap.name = cached_caps.name
2897 
2898 	/* restore cached values */
2899 	ICE_RESTORE_DEV_CAP(valid_functions);
2900 	ICE_RESTORE_DEV_CAP(txq_first_id);
2901 	ICE_RESTORE_DEV_CAP(rxq_first_id);
2902 	ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2903 	ICE_RESTORE_DEV_CAP(max_mtu);
2904 	ICE_RESTORE_DEV_CAP(nvm_unified_update);
2905 	dev_caps->num_funcs = num_funcs;
2906 
2907 	/* one Tx and one Rx queue per function in safe mode */
2908 	dev_caps->common_cap.num_rxq = num_funcs;
2909 	dev_caps->common_cap.num_txq = num_funcs;
2910 
2911 	/* two MSIX vectors per function */
2912 	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2913 }
2914 
2915 /**
2916  * ice_get_caps - get info about the HW
2917  * @hw: pointer to the hardware structure
2918  */
ice_get_caps(struct ice_hw * hw)2919 enum ice_status ice_get_caps(struct ice_hw *hw)
2920 {
2921 	enum ice_status status;
2922 
2923 	status = ice_discover_dev_caps(hw, &hw->dev_caps);
2924 	if (status)
2925 		return status;
2926 
2927 	return ice_discover_func_caps(hw, &hw->func_caps);
2928 }
2929 
2930 /**
2931  * ice_aq_manage_mac_write - manage MAC address write command
2932  * @hw: pointer to the HW struct
2933  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2934  * @flags: flags to control write behavior
2935  * @cd: pointer to command details structure or NULL
2936  *
2937  * This function is used to write MAC address to the NVM (0x0108).
2938  */
2939 enum ice_status
ice_aq_manage_mac_write(struct ice_hw * hw,const u8 * mac_addr,u8 flags,struct ice_sq_cd * cd)2940 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2941 			struct ice_sq_cd *cd)
2942 {
2943 	struct ice_aqc_manage_mac_write *cmd;
2944 	struct ice_aq_desc desc;
2945 
2946 	cmd = &desc.params.mac_write;
2947 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2948 
2949 	cmd->flags = flags;
2950 	ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
2951 
2952 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2953 }
2954 
2955 /**
2956  * ice_aq_clear_pxe_mode
2957  * @hw: pointer to the HW struct
2958  *
2959  * Tell the firmware that the driver is taking over from PXE (0x0110).
2960  */
ice_aq_clear_pxe_mode(struct ice_hw * hw)2961 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2962 {
2963 	struct ice_aq_desc desc;
2964 
2965 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2966 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2967 
2968 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2969 }
2970 
2971 /**
2972  * ice_clear_pxe_mode - clear pxe operations mode
2973  * @hw: pointer to the HW struct
2974  *
2975  * Make sure all PXE mode settings are cleared, including things
2976  * like descriptor fetch/write-back mode.
2977  */
ice_clear_pxe_mode(struct ice_hw * hw)2978 void ice_clear_pxe_mode(struct ice_hw *hw)
2979 {
2980 	if (ice_check_sq_alive(hw, &hw->adminq))
2981 		ice_aq_clear_pxe_mode(hw);
2982 }
2983 
2984 /**
2985  * ice_aq_set_port_params - set physical port parameters.
2986  * @pi: pointer to the port info struct
2987  * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2988  * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2989  * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2990  * @double_vlan: if set double VLAN is enabled
2991  * @cd: pointer to command details structure or NULL
2992  *
2993  * Set Physical port parameters (0x0203)
2994  */
2995 enum ice_status
ice_aq_set_port_params(struct ice_port_info * pi,u16 bad_frame_vsi,bool save_bad_pac,bool pad_short_pac,bool double_vlan,struct ice_sq_cd * cd)2996 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2997 		       bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2998 		       struct ice_sq_cd *cd)
2999 
3000 {
3001 	struct ice_aqc_set_port_params *cmd;
3002 	struct ice_hw *hw = pi->hw;
3003 	struct ice_aq_desc desc;
3004 	u16 cmd_flags = 0;
3005 
3006 	cmd = &desc.params.set_port_params;
3007 
3008 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
3009 	cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
3010 	if (save_bad_pac)
3011 		cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
3012 	if (pad_short_pac)
3013 		cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
3014 	if (double_vlan)
3015 		cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
3016 	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
3017 
3018 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3019 }
3020 
3021 /**
3022  * ice_is_100m_speed_supported
3023  * @hw: pointer to the HW struct
3024  *
3025  * returns true if 100M speeds are supported by the device,
3026  * false otherwise.
3027  */
ice_is_100m_speed_supported(struct ice_hw * hw)3028 bool ice_is_100m_speed_supported(struct ice_hw *hw)
3029 {
3030 	switch (hw->device_id) {
3031 	case ICE_DEV_ID_E822C_10G_BASE_T:
3032 	case ICE_DEV_ID_E822C_SGMII:
3033 	case ICE_DEV_ID_E822L_10G_BASE_T:
3034 	case ICE_DEV_ID_E822L_SGMII:
3035 	case ICE_DEV_ID_E823L_10G_BASE_T:
3036 	case ICE_DEV_ID_E823L_1GBE:
3037 		return true;
3038 	default:
3039 		return false;
3040 	}
3041 }
3042 
3043 /**
3044  * ice_get_link_speed_based_on_phy_type - returns link speed
3045  * @phy_type_low: lower part of phy_type
3046  * @phy_type_high: higher part of phy_type
3047  *
3048  * This helper function will convert an entry in PHY type structure
3049  * [phy_type_low, phy_type_high] to its corresponding link speed.
3050  * Note: In the structure of [phy_type_low, phy_type_high], there should
3051  * be one bit set, as this function will convert one PHY type to its
3052  * speed.
3053  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
3054  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
3055  */
3056 static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low,u64 phy_type_high)3057 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
3058 {
3059 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3060 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3061 
3062 	switch (phy_type_low) {
3063 	case ICE_PHY_TYPE_LOW_100BASE_TX:
3064 	case ICE_PHY_TYPE_LOW_100M_SGMII:
3065 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3066 		break;
3067 	case ICE_PHY_TYPE_LOW_1000BASE_T:
3068 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
3069 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
3070 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
3071 	case ICE_PHY_TYPE_LOW_1G_SGMII:
3072 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3073 		break;
3074 	case ICE_PHY_TYPE_LOW_2500BASE_T:
3075 	case ICE_PHY_TYPE_LOW_2500BASE_X:
3076 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
3077 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3078 		break;
3079 	case ICE_PHY_TYPE_LOW_5GBASE_T:
3080 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
3081 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3082 		break;
3083 	case ICE_PHY_TYPE_LOW_10GBASE_T:
3084 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3085 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
3086 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
3087 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3088 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3089 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3090 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3091 		break;
3092 	case ICE_PHY_TYPE_LOW_25GBASE_T:
3093 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
3094 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3095 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3096 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
3097 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
3098 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
3099 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3100 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3101 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3102 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3103 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3104 		break;
3105 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3106 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3107 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3108 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3109 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3110 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
3111 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3112 		break;
3113 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3114 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3115 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3116 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3117 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3118 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
3119 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3120 	case ICE_PHY_TYPE_LOW_50G_AUI2:
3121 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
3122 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
3123 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
3124 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
3125 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3126 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3127 	case ICE_PHY_TYPE_LOW_50G_AUI1:
3128 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3129 		break;
3130 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3131 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3132 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3133 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3134 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3135 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
3136 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3137 	case ICE_PHY_TYPE_LOW_100G_AUI4:
3138 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3139 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3140 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3141 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3142 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
3143 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3144 		break;
3145 	default:
3146 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3147 		break;
3148 	}
3149 
3150 	switch (phy_type_high) {
3151 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3152 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3153 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3154 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3155 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
3156 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3157 		break;
3158 	default:
3159 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3160 		break;
3161 	}
3162 
3163 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3164 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3165 		return ICE_AQ_LINK_SPEED_UNKNOWN;
3166 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3167 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3168 		return ICE_AQ_LINK_SPEED_UNKNOWN;
3169 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3170 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3171 		return speed_phy_type_low;
3172 	else
3173 		return speed_phy_type_high;
3174 }
3175 
3176 /**
3177  * ice_update_phy_type
3178  * @phy_type_low: pointer to the lower part of phy_type
3179  * @phy_type_high: pointer to the higher part of phy_type
3180  * @link_speeds_bitmap: targeted link speeds bitmap
3181  *
3182  * Note: For the link_speeds_bitmap structure, you can check it at
3183  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3184  * link_speeds_bitmap include multiple speeds.
3185  *
3186  * Each entry in this [phy_type_low, phy_type_high] structure will
3187  * present a certain link speed. This helper function will turn on bits
3188  * in [phy_type_low, phy_type_high] structure based on the value of
3189  * link_speeds_bitmap input parameter.
3190  */
3191 void
ice_update_phy_type(u64 * phy_type_low,u64 * phy_type_high,u16 link_speeds_bitmap)3192 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3193 		    u16 link_speeds_bitmap)
3194 {
3195 	u64 pt_high;
3196 	u64 pt_low;
3197 	int index;
3198 	u16 speed;
3199 
3200 	/* We first check with low part of phy_type */
3201 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3202 		pt_low = BIT_ULL(index);
3203 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
3204 
3205 		if (link_speeds_bitmap & speed)
3206 			*phy_type_low |= BIT_ULL(index);
3207 	}
3208 
3209 	/* We then check with high part of phy_type */
3210 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3211 		pt_high = BIT_ULL(index);
3212 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3213 
3214 		if (link_speeds_bitmap & speed)
3215 			*phy_type_high |= BIT_ULL(index);
3216 	}
3217 }
3218 
3219 /**
3220  * ice_aq_set_phy_cfg
3221  * @hw: pointer to the HW struct
3222  * @pi: port info structure of the interested logical port
3223  * @cfg: structure with PHY configuration data to be set
3224  * @cd: pointer to command details structure or NULL
3225  *
3226  * Set the various PHY configuration parameters supported on the Port.
3227  * One or more of the Set PHY config parameters may be ignored in an MFP
3228  * mode as the PF may not have the privilege to set some of the PHY Config
3229  * parameters. This status will be indicated by the command response (0x0601).
3230  */
3231 enum ice_status
ice_aq_set_phy_cfg(struct ice_hw * hw,struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,struct ice_sq_cd * cd)3232 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3233 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3234 {
3235 	struct ice_aq_desc desc;
3236 	enum ice_status status;
3237 
3238 	if (!cfg)
3239 		return ICE_ERR_PARAM;
3240 
3241 	/* Ensure that only valid bits of cfg->caps can be turned on. */
3242 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3243 		ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3244 			  cfg->caps);
3245 
3246 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3247 	}
3248 
3249 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3250 	desc.params.set_phy.lport_num = pi->lport;
3251 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3252 
3253 	ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3254 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
3255 		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
3256 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
3257 		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
3258 	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", cfg->caps);
3259 	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
3260 		  cfg->low_power_ctrl_an);
3261 	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", cfg->eee_cap);
3262 	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n", cfg->eeer_value);
3263 	ice_debug(hw, ICE_DBG_LINK, "	link_fec_opt = 0x%x\n",
3264 		  cfg->link_fec_opt);
3265 
3266 	status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3267 
3268 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3269 		status = ICE_SUCCESS;
3270 
3271 	if (!status)
3272 		pi->phy.curr_user_phy_cfg = *cfg;
3273 
3274 	return status;
3275 }
3276 
3277 /**
3278  * ice_update_link_info - update status of the HW network link
3279  * @pi: port info structure of the interested logical port
3280  */
ice_update_link_info(struct ice_port_info * pi)3281 enum ice_status ice_update_link_info(struct ice_port_info *pi)
3282 {
3283 	struct ice_link_status *li;
3284 	enum ice_status status;
3285 
3286 	if (!pi)
3287 		return ICE_ERR_PARAM;
3288 
3289 	li = &pi->phy.link_info;
3290 
3291 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
3292 	if (status)
3293 		return status;
3294 
3295 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3296 		struct ice_aqc_get_phy_caps_data *pcaps;
3297 		struct ice_hw *hw;
3298 
3299 		hw = pi->hw;
3300 		pcaps = (struct ice_aqc_get_phy_caps_data *)
3301 			ice_malloc(hw, sizeof(*pcaps));
3302 		if (!pcaps)
3303 			return ICE_ERR_NO_MEMORY;
3304 
3305 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3306 					     pcaps, NULL);
3307 
3308 		if (status == ICE_SUCCESS)
3309 			ice_memcpy(li->module_type, &pcaps->module_type,
3310 				   sizeof(li->module_type),
3311 				   ICE_NONDMA_TO_NONDMA);
3312 
3313 		ice_free(hw, pcaps);
3314 	}
3315 
3316 	return status;
3317 }
3318 
3319 /**
3320  * ice_cache_phy_user_req
3321  * @pi: port information structure
3322  * @cache_data: PHY logging data
3323  * @cache_mode: PHY logging mode
3324  *
3325  * Log the user request on (FC, FEC, SPEED) for later user.
3326  */
3327 static void
ice_cache_phy_user_req(struct ice_port_info * pi,struct ice_phy_cache_mode_data cache_data,enum ice_phy_cache_mode cache_mode)3328 ice_cache_phy_user_req(struct ice_port_info *pi,
3329 		       struct ice_phy_cache_mode_data cache_data,
3330 		       enum ice_phy_cache_mode cache_mode)
3331 {
3332 	if (!pi)
3333 		return;
3334 
3335 	switch (cache_mode) {
3336 	case ICE_FC_MODE:
3337 		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3338 		break;
3339 	case ICE_SPEED_MODE:
3340 		pi->phy.curr_user_speed_req =
3341 			cache_data.data.curr_user_speed_req;
3342 		break;
3343 	case ICE_FEC_MODE:
3344 		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3345 		break;
3346 	default:
3347 		break;
3348 	}
3349 }
3350 
3351 /**
3352  * ice_caps_to_fc_mode
3353  * @caps: PHY capabilities
3354  *
3355  * Convert PHY FC capabilities to ice FC mode
3356  */
ice_caps_to_fc_mode(u8 caps)3357 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3358 {
3359 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3360 	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3361 		return ICE_FC_FULL;
3362 
3363 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3364 		return ICE_FC_TX_PAUSE;
3365 
3366 	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3367 		return ICE_FC_RX_PAUSE;
3368 
3369 	return ICE_FC_NONE;
3370 }
3371 
3372 /**
3373  * ice_caps_to_fec_mode
3374  * @caps: PHY capabilities
3375  * @fec_options: Link FEC options
3376  *
3377  * Convert PHY FEC capabilities to ice FEC mode
3378  */
ice_caps_to_fec_mode(u8 caps,u8 fec_options)3379 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3380 {
3381 	if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3382 		return ICE_FEC_AUTO;
3383 
3384 	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3385 			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3386 			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3387 			   ICE_AQC_PHY_FEC_25G_KR_REQ))
3388 		return ICE_FEC_BASER;
3389 
3390 	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3391 			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3392 			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3393 		return ICE_FEC_RS;
3394 
3395 	return ICE_FEC_NONE;
3396 }
3397 
3398 /**
3399  * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3400  * @pi: port information structure
3401  * @cfg: PHY configuration data to set FC mode
3402  * @req_mode: FC mode to configure
3403  */
3404 static enum ice_status
ice_cfg_phy_fc(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fc_mode req_mode)3405 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3406 	       enum ice_fc_mode req_mode)
3407 {
3408 	struct ice_phy_cache_mode_data cache_data;
3409 	u8 pause_mask = 0x0;
3410 
3411 	if (!pi || !cfg)
3412 		return ICE_ERR_BAD_PTR;
3413 
3414 	switch (req_mode) {
3415 	case ICE_FC_AUTO:
3416 	{
3417 		struct ice_aqc_get_phy_caps_data *pcaps;
3418 		enum ice_status status;
3419 
3420 		pcaps = (struct ice_aqc_get_phy_caps_data *)
3421 			ice_malloc(pi->hw, sizeof(*pcaps));
3422 		if (!pcaps)
3423 			return ICE_ERR_NO_MEMORY;
3424 
3425 		/* Query the value of FC that both the NIC and attached media
3426 		 * can do.
3427 		 */
3428 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3429 					     pcaps, NULL);
3430 		if (status) {
3431 			ice_free(pi->hw, pcaps);
3432 			return status;
3433 		}
3434 
3435 		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3436 		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3437 
3438 		ice_free(pi->hw, pcaps);
3439 		break;
3440 	}
3441 	case ICE_FC_FULL:
3442 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3443 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3444 		break;
3445 	case ICE_FC_RX_PAUSE:
3446 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3447 		break;
3448 	case ICE_FC_TX_PAUSE:
3449 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3450 		break;
3451 	default:
3452 		break;
3453 	}
3454 
3455 	/* clear the old pause settings */
3456 	cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3457 		ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3458 
3459 	/* set the new capabilities */
3460 	cfg->caps |= pause_mask;
3461 
3462 	/* Cache user FC request */
3463 	cache_data.data.curr_user_fc_req = req_mode;
3464 	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3465 
3466 	return ICE_SUCCESS;
3467 }
3468 
3469 /**
3470  * ice_set_fc
3471  * @pi: port information structure
3472  * @aq_failures: pointer to status code, specific to ice_set_fc routine
3473  * @ena_auto_link_update: enable automatic link update
3474  *
3475  * Set the requested flow control mode.
3476  */
3477 enum ice_status
ice_set_fc(struct ice_port_info * pi,u8 * aq_failures,bool ena_auto_link_update)3478 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3479 {
3480 	struct ice_aqc_set_phy_cfg_data  cfg = { 0 };
3481 	struct ice_aqc_get_phy_caps_data *pcaps;
3482 	enum ice_status status;
3483 	struct ice_hw *hw;
3484 
3485 	if (!pi || !aq_failures)
3486 		return ICE_ERR_BAD_PTR;
3487 
3488 	*aq_failures = 0;
3489 	hw = pi->hw;
3490 
3491 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3492 		ice_malloc(hw, sizeof(*pcaps));
3493 	if (!pcaps)
3494 		return ICE_ERR_NO_MEMORY;
3495 
3496 	/* Get the current PHY config */
3497 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3498 				     pcaps, NULL);
3499 
3500 	if (status) {
3501 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3502 		goto out;
3503 	}
3504 
3505 	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3506 
3507 	/* Configure the set PHY data */
3508 	status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3509 	if (status) {
3510 		if (status != ICE_ERR_BAD_PTR)
3511 			*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3512 
3513 		goto out;
3514 	}
3515 
3516 	/* If the capabilities have changed, then set the new config */
3517 	if (cfg.caps != pcaps->caps) {
3518 		int retry_count, retry_max = 10;
3519 
3520 		/* Auto restart link so settings take effect */
3521 		if (ena_auto_link_update)
3522 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3523 
3524 		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3525 		if (status) {
3526 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3527 			goto out;
3528 		}
3529 
3530 		/* Update the link info
3531 		 * It sometimes takes a really long time for link to
3532 		 * come back from the atomic reset. Thus, we wait a
3533 		 * little bit.
3534 		 */
3535 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
3536 			status = ice_update_link_info(pi);
3537 
3538 			if (status == ICE_SUCCESS)
3539 				break;
3540 
3541 			ice_msec_delay(100, true);
3542 		}
3543 
3544 		if (status)
3545 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3546 	}
3547 
3548 out:
3549 	ice_free(hw, pcaps);
3550 	return status;
3551 }
3552 
3553 /**
3554  * ice_phy_caps_equals_cfg
3555  * @phy_caps: PHY capabilities
3556  * @phy_cfg: PHY configuration
3557  *
3558  * Helper function to determine if PHY capabilities matches PHY
3559  * configuration
3560  */
3561 bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data * phy_caps,struct ice_aqc_set_phy_cfg_data * phy_cfg)3562 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3563 			struct ice_aqc_set_phy_cfg_data *phy_cfg)
3564 {
3565 	u8 caps_mask, cfg_mask;
3566 
3567 	if (!phy_caps || !phy_cfg)
3568 		return false;
3569 
3570 	/* These bits are not common between capabilities and configuration.
3571 	 * Do not use them to determine equality.
3572 	 */
3573 	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3574 					      ICE_AQC_PHY_EN_MOD_QUAL);
3575 	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3576 
3577 	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3578 	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3579 	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3580 	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3581 	    phy_caps->eee_cap != phy_cfg->eee_cap ||
3582 	    phy_caps->eeer_value != phy_cfg->eeer_value ||
3583 	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3584 		return false;
3585 
3586 	return true;
3587 }
3588 
3589 /**
3590  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3591  * @pi: port information structure
3592  * @caps: PHY ability structure to copy data from
3593  * @cfg: PHY configuration structure to copy data to
3594  *
3595  * Helper function to copy AQC PHY get ability data to PHY set configuration
3596  * data structure
3597  */
3598 void
ice_copy_phy_caps_to_cfg(struct ice_port_info * pi,struct ice_aqc_get_phy_caps_data * caps,struct ice_aqc_set_phy_cfg_data * cfg)3599 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3600 			 struct ice_aqc_get_phy_caps_data *caps,
3601 			 struct ice_aqc_set_phy_cfg_data *cfg)
3602 {
3603 	if (!pi || !caps || !cfg)
3604 		return;
3605 
3606 	ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3607 	cfg->phy_type_low = caps->phy_type_low;
3608 	cfg->phy_type_high = caps->phy_type_high;
3609 	cfg->caps = caps->caps;
3610 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3611 	cfg->eee_cap = caps->eee_cap;
3612 	cfg->eeer_value = caps->eeer_value;
3613 	cfg->link_fec_opt = caps->link_fec_options;
3614 	cfg->module_compliance_enforcement =
3615 		caps->module_compliance_enforcement;
3616 }
3617 
3618 /**
3619  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3620  * @pi: port information structure
3621  * @cfg: PHY configuration data to set FEC mode
3622  * @fec: FEC mode to configure
3623  */
3624 enum ice_status
ice_cfg_phy_fec(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fec_mode fec)3625 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3626 		enum ice_fec_mode fec)
3627 {
3628 	struct ice_aqc_get_phy_caps_data *pcaps;
3629 	enum ice_status status = ICE_SUCCESS;
3630 	struct ice_hw *hw;
3631 
3632 	if (!pi || !cfg)
3633 		return ICE_ERR_BAD_PTR;
3634 
3635 	hw = pi->hw;
3636 
3637 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3638 		ice_malloc(hw, sizeof(*pcaps));
3639 	if (!pcaps)
3640 		return ICE_ERR_NO_MEMORY;
3641 
3642 	status = ice_aq_get_phy_caps(pi, false,
3643 				     (ice_fw_supports_report_dflt_cfg(hw) ?
3644 				      ICE_AQC_REPORT_DFLT_CFG :
3645 				      ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3646 
3647 	if (status)
3648 		goto out;
3649 
3650 	cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3651 	cfg->link_fec_opt = pcaps->link_fec_options;
3652 
3653 	switch (fec) {
3654 	case ICE_FEC_BASER:
3655 		/* Clear RS bits, and AND BASE-R ability
3656 		 * bits and OR request bits.
3657 		 */
3658 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3659 			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3660 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3661 			ICE_AQC_PHY_FEC_25G_KR_REQ;
3662 		break;
3663 	case ICE_FEC_RS:
3664 		/* Clear BASE-R bits, and AND RS ability
3665 		 * bits and OR request bits.
3666 		 */
3667 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3668 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3669 			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3670 		break;
3671 	case ICE_FEC_NONE:
3672 		/* Clear all FEC option bits. */
3673 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3674 		break;
3675 	case ICE_FEC_AUTO:
3676 		/* AND auto FEC bit, and all caps bits. */
3677 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3678 		cfg->link_fec_opt |= pcaps->link_fec_options;
3679 		break;
3680 	default:
3681 		status = ICE_ERR_PARAM;
3682 		break;
3683 	}
3684 
3685 	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
3686 	    !ice_fw_supports_report_dflt_cfg(pi->hw)) {
3687 		struct ice_link_default_override_tlv tlv;
3688 
3689 		if (ice_get_link_default_override(&tlv, pi))
3690 			goto out;
3691 
3692 		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3693 		    (tlv.options & ICE_LINK_OVERRIDE_EN))
3694 			cfg->link_fec_opt = tlv.fec_options;
3695 	}
3696 
3697 out:
3698 	ice_free(hw, pcaps);
3699 
3700 	return status;
3701 }
3702 
3703 /**
3704  * ice_get_link_status - get status of the HW network link
3705  * @pi: port information structure
3706  * @link_up: pointer to bool (true/false = linkup/linkdown)
3707  *
3708  * Variable link_up is true if link is up, false if link is down.
3709  * The variable link_up is invalid if status is non zero. As a
3710  * result of this call, link status reporting becomes enabled
3711  */
ice_get_link_status(struct ice_port_info * pi,bool * link_up)3712 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3713 {
3714 	struct ice_phy_info *phy_info;
3715 	enum ice_status status = ICE_SUCCESS;
3716 
3717 	if (!pi || !link_up)
3718 		return ICE_ERR_PARAM;
3719 
3720 	phy_info = &pi->phy;
3721 
3722 	if (phy_info->get_link_info) {
3723 		status = ice_update_link_info(pi);
3724 
3725 		if (status)
3726 			ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3727 				  status);
3728 	}
3729 
3730 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3731 
3732 	return status;
3733 }
3734 
3735 /**
3736  * ice_aq_set_link_restart_an
3737  * @pi: pointer to the port information structure
3738  * @ena_link: if true: enable link, if false: disable link
3739  * @cd: pointer to command details structure or NULL
3740  *
3741  * Sets up the link and restarts the Auto-Negotiation over the link.
3742  */
3743 enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info * pi,bool ena_link,struct ice_sq_cd * cd)3744 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3745 			   struct ice_sq_cd *cd)
3746 {
3747 	struct ice_aqc_restart_an *cmd;
3748 	struct ice_aq_desc desc;
3749 
3750 	cmd = &desc.params.restart_an;
3751 
3752 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3753 
3754 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3755 	cmd->lport_num = pi->lport;
3756 	if (ena_link)
3757 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3758 	else
3759 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3760 
3761 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3762 }
3763 
3764 /**
3765  * ice_aq_set_event_mask
3766  * @hw: pointer to the HW struct
3767  * @port_num: port number of the physical function
3768  * @mask: event mask to be set
3769  * @cd: pointer to command details structure or NULL
3770  *
3771  * Set event mask (0x0613)
3772  */
3773 enum ice_status
ice_aq_set_event_mask(struct ice_hw * hw,u8 port_num,u16 mask,struct ice_sq_cd * cd)3774 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3775 		      struct ice_sq_cd *cd)
3776 {
3777 	struct ice_aqc_set_event_mask *cmd;
3778 	struct ice_aq_desc desc;
3779 
3780 	cmd = &desc.params.set_event_mask;
3781 
3782 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3783 
3784 	cmd->lport_num = port_num;
3785 
3786 	cmd->event_mask = CPU_TO_LE16(mask);
3787 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3788 }
3789 
3790 /**
3791  * ice_aq_set_mac_loopback
3792  * @hw: pointer to the HW struct
3793  * @ena_lpbk: Enable or Disable loopback
3794  * @cd: pointer to command details structure or NULL
3795  *
3796  * Enable/disable loopback on a given port
3797  */
3798 enum ice_status
ice_aq_set_mac_loopback(struct ice_hw * hw,bool ena_lpbk,struct ice_sq_cd * cd)3799 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3800 {
3801 	struct ice_aqc_set_mac_lb *cmd;
3802 	struct ice_aq_desc desc;
3803 
3804 	cmd = &desc.params.set_mac_lb;
3805 
3806 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3807 	if (ena_lpbk)
3808 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3809 
3810 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3811 }
3812 
3813 /**
3814  * ice_aq_set_port_id_led
3815  * @pi: pointer to the port information
3816  * @is_orig_mode: is this LED set to original mode (by the net-list)
3817  * @cd: pointer to command details structure or NULL
3818  *
3819  * Set LED value for the given port (0x06e9)
3820  */
3821 enum ice_status
ice_aq_set_port_id_led(struct ice_port_info * pi,bool is_orig_mode,struct ice_sq_cd * cd)3822 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3823 		       struct ice_sq_cd *cd)
3824 {
3825 	struct ice_aqc_set_port_id_led *cmd;
3826 	struct ice_hw *hw = pi->hw;
3827 	struct ice_aq_desc desc;
3828 
3829 	cmd = &desc.params.set_port_id_led;
3830 
3831 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3832 
3833 	if (is_orig_mode)
3834 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3835 	else
3836 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3837 
3838 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3839 }
3840 
3841 /**
3842  * ice_aq_sff_eeprom
3843  * @hw: pointer to the HW struct
3844  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3845  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3846  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3847  * @page: QSFP page
3848  * @set_page: set or ignore the page
3849  * @data: pointer to data buffer to be read/written to the I2C device.
3850  * @length: 1-16 for read, 1 for write.
3851  * @write: 0 read, 1 for write.
3852  * @cd: pointer to command details structure or NULL
3853  *
3854  * Read/Write SFF EEPROM (0x06EE)
3855  */
3856 enum ice_status
ice_aq_sff_eeprom(struct ice_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 set_page,u8 * data,u8 length,bool write,struct ice_sq_cd * cd)3857 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3858 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3859 		  bool write, struct ice_sq_cd *cd)
3860 {
3861 	struct ice_aqc_sff_eeprom *cmd;
3862 	struct ice_aq_desc desc;
3863 	enum ice_status status;
3864 
3865 	if (!data || (mem_addr & 0xff00))
3866 		return ICE_ERR_PARAM;
3867 
3868 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3869 	cmd = &desc.params.read_write_sff_param;
3870 	desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3871 	cmd->lport_num = (u8)(lport & 0xff);
3872 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3873 	cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3874 					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3875 					((set_page <<
3876 					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3877 					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3878 	cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3879 	cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3880 	if (write)
3881 		cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3882 
3883 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3884 	return status;
3885 }
3886 
3887 /**
3888  * ice_aq_prog_topo_dev_nvm
3889  * @hw: pointer to the hardware structure
3890  * @topo_params: pointer to structure storing topology parameters for a device
3891  * @cd: pointer to command details structure or NULL
3892  *
3893  * Program Topology Device NVM (0x06F2)
3894  *
3895  */
3896 enum ice_status
ice_aq_prog_topo_dev_nvm(struct ice_hw * hw,struct ice_aqc_link_topo_params * topo_params,struct ice_sq_cd * cd)3897 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
3898 			 struct ice_aqc_link_topo_params *topo_params,
3899 			 struct ice_sq_cd *cd)
3900 {
3901 	struct ice_aqc_prog_topo_dev_nvm *cmd;
3902 	struct ice_aq_desc desc;
3903 
3904 	cmd = &desc.params.prog_topo_dev_nvm;
3905 
3906 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
3907 
3908 	ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3909 		   ICE_NONDMA_TO_NONDMA);
3910 
3911 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3912 }
3913 
3914 /**
3915  * ice_aq_read_topo_dev_nvm
3916  * @hw: pointer to the hardware structure
3917  * @topo_params: pointer to structure storing topology parameters for a device
3918  * @start_address: byte offset in the topology device NVM
3919  * @data: pointer to data buffer
3920  * @data_size: number of bytes to be read from the topology device NVM
3921  * @cd: pointer to command details structure or NULL
3922  * Read Topology Device NVM (0x06F3)
3923  *
3924  */
3925 enum ice_status
ice_aq_read_topo_dev_nvm(struct ice_hw * hw,struct ice_aqc_link_topo_params * topo_params,u32 start_address,u8 * data,u8 data_size,struct ice_sq_cd * cd)3926 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
3927 			 struct ice_aqc_link_topo_params *topo_params,
3928 			 u32 start_address, u8 *data, u8 data_size,
3929 			 struct ice_sq_cd *cd)
3930 {
3931 	struct ice_aqc_read_topo_dev_nvm *cmd;
3932 	struct ice_aq_desc desc;
3933 	enum ice_status status;
3934 
3935 	if (!data || data_size == 0 ||
3936 	    data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
3937 		return ICE_ERR_PARAM;
3938 
3939 	cmd = &desc.params.read_topo_dev_nvm;
3940 
3941 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
3942 
3943 	desc.datalen = data_size;
3944 	ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3945 		   ICE_NONDMA_TO_NONDMA);
3946 	cmd->start_address = CPU_TO_LE32(start_address);
3947 
3948 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3949 	if (status)
3950 		return status;
3951 
3952 	ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
3953 
3954 	return ICE_SUCCESS;
3955 }
3956 
3957 /**
3958  * __ice_aq_get_set_rss_lut
3959  * @hw: pointer to the hardware structure
3960  * @params: RSS LUT parameters
3961  * @set: set true to set the table, false to get the table
3962  *
3963  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3964  */
3965 static enum ice_status
__ice_aq_get_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * params,bool set)3966 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3967 {
3968 	u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3969 	struct ice_aqc_get_set_rss_lut *cmd_resp;
3970 	struct ice_aq_desc desc;
3971 	enum ice_status status;
3972 	u8 *lut;
3973 
3974 	if (!params)
3975 		return ICE_ERR_PARAM;
3976 
3977 	vsi_handle = params->vsi_handle;
3978 	lut = params->lut;
3979 
3980 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3981 		return ICE_ERR_PARAM;
3982 
3983 	lut_size = params->lut_size;
3984 	lut_type = params->lut_type;
3985 	glob_lut_idx = params->global_lut_id;
3986 	vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3987 
3988 	cmd_resp = &desc.params.get_set_rss_lut;
3989 
3990 	if (set) {
3991 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3992 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3993 	} else {
3994 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3995 	}
3996 
3997 	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3998 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3999 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
4000 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
4001 
4002 	switch (lut_type) {
4003 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
4004 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
4005 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
4006 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
4007 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
4008 		break;
4009 	default:
4010 		status = ICE_ERR_PARAM;
4011 		goto ice_aq_get_set_rss_lut_exit;
4012 	}
4013 
4014 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
4015 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
4016 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
4017 
4018 		if (!set)
4019 			goto ice_aq_get_set_rss_lut_send;
4020 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
4021 		if (!set)
4022 			goto ice_aq_get_set_rss_lut_send;
4023 	} else {
4024 		goto ice_aq_get_set_rss_lut_send;
4025 	}
4026 
4027 	/* LUT size is only valid for Global and PF table types */
4028 	switch (lut_size) {
4029 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
4030 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
4031 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4032 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4033 		break;
4034 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
4035 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
4036 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4037 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4038 		break;
4039 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
4040 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
4041 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
4042 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4043 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4044 			break;
4045 		}
4046 		/* fall-through */
4047 	default:
4048 		status = ICE_ERR_PARAM;
4049 		goto ice_aq_get_set_rss_lut_exit;
4050 	}
4051 
4052 ice_aq_get_set_rss_lut_send:
4053 	cmd_resp->flags = CPU_TO_LE16(flags);
4054 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
4055 
4056 ice_aq_get_set_rss_lut_exit:
4057 	return status;
4058 }
4059 
4060 /**
4061  * ice_aq_get_rss_lut
4062  * @hw: pointer to the hardware structure
4063  * @get_params: RSS LUT parameters used to specify which RSS LUT to get
4064  *
4065  * get the RSS lookup table, PF or VSI type
4066  */
4067 enum ice_status
ice_aq_get_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * get_params)4068 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
4069 {
4070 	return __ice_aq_get_set_rss_lut(hw, get_params, false);
4071 }
4072 
4073 /**
4074  * ice_aq_set_rss_lut
4075  * @hw: pointer to the hardware structure
4076  * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
4077  *
4078  * set the RSS lookup table, PF or VSI type
4079  */
4080 enum ice_status
ice_aq_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * set_params)4081 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4082 {
4083 	return __ice_aq_get_set_rss_lut(hw, set_params, true);
4084 }
4085 
4086 /**
4087  * __ice_aq_get_set_rss_key
4088  * @hw: pointer to the HW struct
4089  * @vsi_id: VSI FW index
4090  * @key: pointer to key info struct
4091  * @set: set true to set the key, false to get the key
4092  *
4093  * get (0x0B04) or set (0x0B02) the RSS key per VSI
4094  */
4095 static enum
__ice_aq_get_set_rss_key(struct ice_hw * hw,u16 vsi_id,struct ice_aqc_get_set_rss_keys * key,bool set)4096 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4097 				    struct ice_aqc_get_set_rss_keys *key,
4098 				    bool set)
4099 {
4100 	struct ice_aqc_get_set_rss_key *cmd_resp;
4101 	u16 key_size = sizeof(*key);
4102 	struct ice_aq_desc desc;
4103 
4104 	cmd_resp = &desc.params.get_set_rss_key;
4105 
4106 	if (set) {
4107 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4108 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4109 	} else {
4110 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4111 	}
4112 
4113 	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4114 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
4115 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
4116 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
4117 
4118 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4119 }
4120 
4121 /**
4122  * ice_aq_get_rss_key
4123  * @hw: pointer to the HW struct
4124  * @vsi_handle: software VSI handle
4125  * @key: pointer to key info struct
4126  *
4127  * get the RSS key per VSI
4128  */
4129 enum ice_status
ice_aq_get_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * key)4130 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4131 		   struct ice_aqc_get_set_rss_keys *key)
4132 {
4133 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4134 		return ICE_ERR_PARAM;
4135 
4136 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4137 					key, false);
4138 }
4139 
4140 /**
4141  * ice_aq_set_rss_key
4142  * @hw: pointer to the HW struct
4143  * @vsi_handle: software VSI handle
4144  * @keys: pointer to key info struct
4145  *
4146  * set the RSS key per VSI
4147  */
4148 enum ice_status
ice_aq_set_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * keys)4149 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4150 		   struct ice_aqc_get_set_rss_keys *keys)
4151 {
4152 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4153 		return ICE_ERR_PARAM;
4154 
4155 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4156 					keys, true);
4157 }
4158 
4159 /**
4160  * ice_aq_add_lan_txq
4161  * @hw: pointer to the hardware structure
4162  * @num_qgrps: Number of added queue groups
4163  * @qg_list: list of queue groups to be added
4164  * @buf_size: size of buffer for indirect command
4165  * @cd: pointer to command details structure or NULL
4166  *
4167  * Add Tx LAN queue (0x0C30)
4168  *
4169  * NOTE:
4170  * Prior to calling add Tx LAN queue:
4171  * Initialize the following as part of the Tx queue context:
4172  * Completion queue ID if the queue uses Completion queue, Quanta profile,
4173  * Cache profile and Packet shaper profile.
4174  *
4175  * After add Tx LAN queue AQ command is completed:
4176  * Interrupts should be associated with specific queues,
4177  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4178  * flow.
4179  */
4180 enum ice_status
ice_aq_add_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * qg_list,u16 buf_size,struct ice_sq_cd * cd)4181 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4182 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4183 		   struct ice_sq_cd *cd)
4184 {
4185 	struct ice_aqc_add_tx_qgrp *list;
4186 	struct ice_aqc_add_txqs *cmd;
4187 	struct ice_aq_desc desc;
4188 	u16 i, sum_size = 0;
4189 
4190 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4191 
4192 	cmd = &desc.params.add_txqs;
4193 
4194 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4195 
4196 	if (!qg_list)
4197 		return ICE_ERR_PARAM;
4198 
4199 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4200 		return ICE_ERR_PARAM;
4201 
4202 	for (i = 0, list = qg_list; i < num_qgrps; i++) {
4203 		sum_size += ice_struct_size(list, txqs, list->num_txqs);
4204 		list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4205 						      list->num_txqs);
4206 	}
4207 
4208 	if (buf_size != sum_size)
4209 		return ICE_ERR_PARAM;
4210 
4211 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4212 
4213 	cmd->num_qgrps = num_qgrps;
4214 
4215 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4216 }
4217 
4218 /**
4219  * ice_aq_dis_lan_txq
4220  * @hw: pointer to the hardware structure
4221  * @num_qgrps: number of groups in the list
4222  * @qg_list: the list of groups to disable
4223  * @buf_size: the total size of the qg_list buffer in bytes
4224  * @rst_src: if called due to reset, specifies the reset source
4225  * @vmvf_num: the relative VM or VF number that is undergoing the reset
4226  * @cd: pointer to command details structure or NULL
4227  *
4228  * Disable LAN Tx queue (0x0C31)
4229  */
4230 static enum ice_status
ice_aq_dis_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_dis_txq_item * qg_list,u16 buf_size,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)4231 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4232 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4233 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
4234 		   struct ice_sq_cd *cd)
4235 {
4236 	struct ice_aqc_dis_txq_item *item;
4237 	struct ice_aqc_dis_txqs *cmd;
4238 	struct ice_aq_desc desc;
4239 	enum ice_status status;
4240 	u16 i, sz = 0;
4241 
4242 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4243 	cmd = &desc.params.dis_txqs;
4244 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4245 
4246 	/* qg_list can be NULL only in VM/VF reset flow */
4247 	if (!qg_list && !rst_src)
4248 		return ICE_ERR_PARAM;
4249 
4250 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4251 		return ICE_ERR_PARAM;
4252 
4253 	cmd->num_entries = num_qgrps;
4254 
4255 	cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
4256 					    ICE_AQC_Q_DIS_TIMEOUT_M);
4257 
4258 	switch (rst_src) {
4259 	case ICE_VM_RESET:
4260 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4261 		cmd->vmvf_and_timeout |=
4262 			CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
4263 		break;
4264 	case ICE_NO_RESET:
4265 	default:
4266 		break;
4267 	}
4268 
4269 	/* flush pipe on time out */
4270 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4271 	/* If no queue group info, we are in a reset flow. Issue the AQ */
4272 	if (!qg_list)
4273 		goto do_aq;
4274 
4275 	/* set RD bit to indicate that command buffer is provided by the driver
4276 	 * and it needs to be read by the firmware
4277 	 */
4278 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4279 
4280 	for (i = 0, item = qg_list; i < num_qgrps; i++) {
4281 		u16 item_size = ice_struct_size(item, q_id, item->num_qs);
4282 
4283 		/* If the num of queues is even, add 2 bytes of padding */
4284 		if ((item->num_qs % 2) == 0)
4285 			item_size += 2;
4286 
4287 		sz += item_size;
4288 
4289 		item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4290 	}
4291 
4292 	if (buf_size != sz)
4293 		return ICE_ERR_PARAM;
4294 
4295 do_aq:
4296 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4297 	if (status) {
4298 		if (!qg_list)
4299 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4300 				  vmvf_num, hw->adminq.sq_last_status);
4301 		else
4302 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4303 				  LE16_TO_CPU(qg_list[0].q_id[0]),
4304 				  hw->adminq.sq_last_status);
4305 	}
4306 	return status;
4307 }
4308 
4309 /**
4310  * ice_aq_move_recfg_lan_txq
4311  * @hw: pointer to the hardware structure
4312  * @num_qs: number of queues to move/reconfigure
4313  * @is_move: true if this operation involves node movement
4314  * @is_tc_change: true if this operation involves a TC change
4315  * @subseq_call: true if this operation is a subsequent call
4316  * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
4317  * @timeout: timeout in units of 100 usec (valid values 0-50)
4318  * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
4319  * @buf: struct containing src/dest TEID and per-queue info
4320  * @buf_size: size of buffer for indirect command
4321  * @txqs_moved: out param, number of queues successfully moved
4322  * @cd: pointer to command details structure or NULL
4323  *
4324  * Move / Reconfigure Tx LAN queues (0x0C32)
4325  */
4326 enum ice_status
ice_aq_move_recfg_lan_txq(struct ice_hw * hw,u8 num_qs,bool is_move,bool is_tc_change,bool subseq_call,bool flush_pipe,u8 timeout,u32 * blocked_cgds,struct ice_aqc_move_txqs_data * buf,u16 buf_size,u8 * txqs_moved,struct ice_sq_cd * cd)4327 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
4328 			  bool is_tc_change, bool subseq_call, bool flush_pipe,
4329 			  u8 timeout, u32 *blocked_cgds,
4330 			  struct ice_aqc_move_txqs_data *buf, u16 buf_size,
4331 			  u8 *txqs_moved, struct ice_sq_cd *cd)
4332 {
4333 	struct ice_aqc_move_txqs *cmd;
4334 	struct ice_aq_desc desc;
4335 	enum ice_status status;
4336 
4337 	cmd = &desc.params.move_txqs;
4338 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
4339 
4340 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
4341 	if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
4342 		return ICE_ERR_PARAM;
4343 
4344 	if (is_tc_change && !flush_pipe && !blocked_cgds)
4345 		return ICE_ERR_PARAM;
4346 
4347 	if (!is_move && !is_tc_change)
4348 		return ICE_ERR_PARAM;
4349 
4350 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4351 
4352 	if (is_move)
4353 		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
4354 
4355 	if (is_tc_change)
4356 		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
4357 
4358 	if (subseq_call)
4359 		cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
4360 
4361 	if (flush_pipe)
4362 		cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
4363 
4364 	cmd->num_qs = num_qs;
4365 	cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
4366 			ICE_AQC_Q_CMD_TIMEOUT_M);
4367 
4368 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4369 
4370 	if (!status && txqs_moved)
4371 		*txqs_moved = cmd->num_qs;
4372 
4373 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
4374 	    is_tc_change && !flush_pipe)
4375 		*blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
4376 
4377 	return status;
4378 }
4379 
4380 /* End of FW Admin Queue command wrappers */
4381 
4382 /**
4383  * ice_write_byte - write a byte to a packed context structure
4384  * @src_ctx:  the context structure to read from
4385  * @dest_ctx: the context to be written to
4386  * @ce_info:  a description of the struct to be filled
4387  */
4388 static void
ice_write_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4389 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4390 {
4391 	u8 src_byte, dest_byte, mask;
4392 	u8 *from, *dest;
4393 	u16 shift_width;
4394 
4395 	/* copy from the next struct field */
4396 	from = src_ctx + ce_info->offset;
4397 
4398 	/* prepare the bits and mask */
4399 	shift_width = ce_info->lsb % 8;
4400 	mask = (u8)(BIT(ce_info->width) - 1);
4401 
4402 	src_byte = *from;
4403 	src_byte &= mask;
4404 
4405 	/* shift to correct alignment */
4406 	mask <<= shift_width;
4407 	src_byte <<= shift_width;
4408 
4409 	/* get the current bits from the target bit string */
4410 	dest = dest_ctx + (ce_info->lsb / 8);
4411 
4412 	ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4413 
4414 	dest_byte &= ~mask;	/* get the bits not changing */
4415 	dest_byte |= src_byte;	/* add in the new bits */
4416 
4417 	/* put it all back */
4418 	ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4419 }
4420 
4421 /**
4422  * ice_write_word - write a word to a packed context structure
4423  * @src_ctx:  the context structure to read from
4424  * @dest_ctx: the context to be written to
4425  * @ce_info:  a description of the struct to be filled
4426  */
4427 static void
ice_write_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4428 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4429 {
4430 	u16 src_word, mask;
4431 	__le16 dest_word;
4432 	u8 *from, *dest;
4433 	u16 shift_width;
4434 
4435 	/* copy from the next struct field */
4436 	from = src_ctx + ce_info->offset;
4437 
4438 	/* prepare the bits and mask */
4439 	shift_width = ce_info->lsb % 8;
4440 	mask = BIT(ce_info->width) - 1;
4441 
4442 	/* don't swizzle the bits until after the mask because the mask bits
4443 	 * will be in a different bit position on big endian machines
4444 	 */
4445 	src_word = *(u16 *)from;
4446 	src_word &= mask;
4447 
4448 	/* shift to correct alignment */
4449 	mask <<= shift_width;
4450 	src_word <<= shift_width;
4451 
4452 	/* get the current bits from the target bit string */
4453 	dest = dest_ctx + (ce_info->lsb / 8);
4454 
4455 	ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
4456 
4457 	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
4458 	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
4459 
4460 	/* put it all back */
4461 	ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4462 }
4463 
4464 /**
4465  * ice_write_dword - write a dword to a packed context structure
4466  * @src_ctx:  the context structure to read from
4467  * @dest_ctx: the context to be written to
4468  * @ce_info:  a description of the struct to be filled
4469  */
4470 static void
ice_write_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4471 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4472 {
4473 	u32 src_dword, mask;
4474 	__le32 dest_dword;
4475 	u8 *from, *dest;
4476 	u16 shift_width;
4477 
4478 	/* copy from the next struct field */
4479 	from = src_ctx + ce_info->offset;
4480 
4481 	/* prepare the bits and mask */
4482 	shift_width = ce_info->lsb % 8;
4483 
4484 	/* if the field width is exactly 32 on an x86 machine, then the shift
4485 	 * operation will not work because the SHL instructions count is masked
4486 	 * to 5 bits so the shift will do nothing
4487 	 */
4488 	if (ce_info->width < 32)
4489 		mask = BIT(ce_info->width) - 1;
4490 	else
4491 		mask = (u32)~0;
4492 
4493 	/* don't swizzle the bits until after the mask because the mask bits
4494 	 * will be in a different bit position on big endian machines
4495 	 */
4496 	src_dword = *(u32 *)from;
4497 	src_dword &= mask;
4498 
4499 	/* shift to correct alignment */
4500 	mask <<= shift_width;
4501 	src_dword <<= shift_width;
4502 
4503 	/* get the current bits from the target bit string */
4504 	dest = dest_ctx + (ce_info->lsb / 8);
4505 
4506 	ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
4507 
4508 	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
4509 	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
4510 
4511 	/* put it all back */
4512 	ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4513 }
4514 
4515 /**
4516  * ice_write_qword - write a qword to a packed context structure
4517  * @src_ctx:  the context structure to read from
4518  * @dest_ctx: the context to be written to
4519  * @ce_info:  a description of the struct to be filled
4520  */
4521 static void
ice_write_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4522 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4523 {
4524 	u64 src_qword, mask;
4525 	__le64 dest_qword;
4526 	u8 *from, *dest;
4527 	u16 shift_width;
4528 
4529 	/* copy from the next struct field */
4530 	from = src_ctx + ce_info->offset;
4531 
4532 	/* prepare the bits and mask */
4533 	shift_width = ce_info->lsb % 8;
4534 
4535 	/* if the field width is exactly 64 on an x86 machine, then the shift
4536 	 * operation will not work because the SHL instructions count is masked
4537 	 * to 6 bits so the shift will do nothing
4538 	 */
4539 	if (ce_info->width < 64)
4540 		mask = BIT_ULL(ce_info->width) - 1;
4541 	else
4542 		mask = (u64)~0;
4543 
4544 	/* don't swizzle the bits until after the mask because the mask bits
4545 	 * will be in a different bit position on big endian machines
4546 	 */
4547 	src_qword = *(u64 *)from;
4548 	src_qword &= mask;
4549 
4550 	/* shift to correct alignment */
4551 	mask <<= shift_width;
4552 	src_qword <<= shift_width;
4553 
4554 	/* get the current bits from the target bit string */
4555 	dest = dest_ctx + (ce_info->lsb / 8);
4556 
4557 	ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4558 
4559 	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
4560 	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
4561 
4562 	/* put it all back */
4563 	ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4564 }
4565 
4566 /**
4567  * ice_set_ctx - set context bits in packed structure
4568  * @hw: pointer to the hardware structure
4569  * @src_ctx:  pointer to a generic non-packed context structure
4570  * @dest_ctx: pointer to memory for the packed structure
4571  * @ce_info:  a description of the structure to be transformed
4572  */
4573 enum ice_status
ice_set_ctx(struct ice_hw * hw,u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4574 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4575 	    const struct ice_ctx_ele *ce_info)
4576 {
4577 	int f;
4578 
4579 	for (f = 0; ce_info[f].width; f++) {
4580 		/* We have to deal with each element of the FW response
4581 		 * using the correct size so that we are correct regardless
4582 		 * of the endianness of the machine.
4583 		 */
4584 		if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4585 			ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4586 				  f, ce_info[f].width, ce_info[f].size_of);
4587 			continue;
4588 		}
4589 		switch (ce_info[f].size_of) {
4590 		case sizeof(u8):
4591 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4592 			break;
4593 		case sizeof(u16):
4594 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4595 			break;
4596 		case sizeof(u32):
4597 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4598 			break;
4599 		case sizeof(u64):
4600 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4601 			break;
4602 		default:
4603 			return ICE_ERR_INVAL_SIZE;
4604 		}
4605 	}
4606 
4607 	return ICE_SUCCESS;
4608 }
4609 
4610 /**
4611  * ice_aq_get_internal_data
4612  * @hw: pointer to the hardware structure
4613  * @cluster_id: specific cluster to dump
4614  * @table_id: table ID within cluster
4615  * @start: index of line in the block to read
4616  * @buf: dump buffer
4617  * @buf_size: dump buffer size
4618  * @ret_buf_size: return buffer size (returned by FW)
4619  * @ret_next_table: next block to read (returned by FW)
4620  * @ret_next_index: next index to read (returned by FW)
4621  * @cd: pointer to command details structure
4622  *
4623  * Get internal FW/HW data (0xFF08) for debug purposes.
4624  */
4625 enum ice_status
ice_aq_get_internal_data(struct ice_hw * hw,u8 cluster_id,u16 table_id,u32 start,void * buf,u16 buf_size,u16 * ret_buf_size,u16 * ret_next_table,u32 * ret_next_index,struct ice_sq_cd * cd)4626 ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
4627 			 u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
4628 			 u16 *ret_next_table, u32 *ret_next_index,
4629 			 struct ice_sq_cd *cd)
4630 {
4631 	struct ice_aqc_debug_dump_internals *cmd;
4632 	struct ice_aq_desc desc;
4633 	enum ice_status status;
4634 
4635 	cmd = &desc.params.debug_dump;
4636 
4637 	if (buf_size == 0 || !buf)
4638 		return ICE_ERR_PARAM;
4639 
4640 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals);
4641 
4642 	cmd->cluster_id = cluster_id;
4643 	cmd->table_id = CPU_TO_LE16(table_id);
4644 	cmd->idx = CPU_TO_LE32(start);
4645 
4646 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4647 
4648 	if (!status) {
4649 		if (ret_buf_size)
4650 			*ret_buf_size = LE16_TO_CPU(desc.datalen);
4651 		if (ret_next_table)
4652 			*ret_next_table = LE16_TO_CPU(cmd->table_id);
4653 		if (ret_next_index)
4654 			*ret_next_index = LE32_TO_CPU(cmd->idx);
4655 	}
4656 
4657 	return status;
4658 }
4659 
4660 /**
4661  * ice_read_byte - read context byte into struct
4662  * @src_ctx:  the context structure to read from
4663  * @dest_ctx: the context to be written to
4664  * @ce_info:  a description of the struct to be filled
4665  */
4666 static void
ice_read_byte(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)4667 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4668 {
4669 	u8 dest_byte, mask;
4670 	u8 *src, *target;
4671 	u16 shift_width;
4672 
4673 	/* prepare the bits and mask */
4674 	shift_width = ce_info->lsb % 8;
4675 	mask = (u8)(BIT(ce_info->width) - 1);
4676 
4677 	/* shift to correct alignment */
4678 	mask <<= shift_width;
4679 
4680 	/* get the current bits from the src bit string */
4681 	src = src_ctx + (ce_info->lsb / 8);
4682 
4683 	ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4684 
4685 	dest_byte &= ~(mask);
4686 
4687 	dest_byte >>= shift_width;
4688 
4689 	/* get the address from the struct field */
4690 	target = dest_ctx + ce_info->offset;
4691 
4692 	/* put it back in the struct */
4693 	ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4694 }
4695 
4696 /**
4697  * ice_read_word - read context word into struct
4698  * @src_ctx:  the context structure to read from
4699  * @dest_ctx: the context to be written to
4700  * @ce_info:  a description of the struct to be filled
4701  */
4702 static void
ice_read_word(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)4703 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4704 {
4705 	u16 dest_word, mask;
4706 	u8 *src, *target;
4707 	__le16 src_word;
4708 	u16 shift_width;
4709 
4710 	/* prepare the bits and mask */
4711 	shift_width = ce_info->lsb % 8;
4712 	mask = BIT(ce_info->width) - 1;
4713 
4714 	/* shift to correct alignment */
4715 	mask <<= shift_width;
4716 
4717 	/* get the current bits from the src bit string */
4718 	src = src_ctx + (ce_info->lsb / 8);
4719 
4720 	ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4721 
4722 	/* the data in the memory is stored as little endian so mask it
4723 	 * correctly
4724 	 */
4725 	src_word &= ~(CPU_TO_LE16(mask));
4726 
4727 	/* get the data back into host order before shifting */
4728 	dest_word = LE16_TO_CPU(src_word);
4729 
4730 	dest_word >>= shift_width;
4731 
4732 	/* get the address from the struct field */
4733 	target = dest_ctx + ce_info->offset;
4734 
4735 	/* put it back in the struct */
4736 	ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4737 }
4738 
4739 /**
4740  * ice_read_dword - read context dword into struct
4741  * @src_ctx:  the context structure to read from
4742  * @dest_ctx: the context to be written to
4743  * @ce_info:  a description of the struct to be filled
4744  */
4745 static void
ice_read_dword(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)4746 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4747 {
4748 	u32 dest_dword, mask;
4749 	__le32 src_dword;
4750 	u8 *src, *target;
4751 	u16 shift_width;
4752 
4753 	/* prepare the bits and mask */
4754 	shift_width = ce_info->lsb % 8;
4755 
4756 	/* if the field width is exactly 32 on an x86 machine, then the shift
4757 	 * operation will not work because the SHL instructions count is masked
4758 	 * to 5 bits so the shift will do nothing
4759 	 */
4760 	if (ce_info->width < 32)
4761 		mask = BIT(ce_info->width) - 1;
4762 	else
4763 		mask = (u32)~0;
4764 
4765 	/* shift to correct alignment */
4766 	mask <<= shift_width;
4767 
4768 	/* get the current bits from the src bit string */
4769 	src = src_ctx + (ce_info->lsb / 8);
4770 
4771 	ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4772 
4773 	/* the data in the memory is stored as little endian so mask it
4774 	 * correctly
4775 	 */
4776 	src_dword &= ~(CPU_TO_LE32(mask));
4777 
4778 	/* get the data back into host order before shifting */
4779 	dest_dword = LE32_TO_CPU(src_dword);
4780 
4781 	dest_dword >>= shift_width;
4782 
4783 	/* get the address from the struct field */
4784 	target = dest_ctx + ce_info->offset;
4785 
4786 	/* put it back in the struct */
4787 	ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4788 }
4789 
4790 /**
4791  * ice_read_qword - read context qword into struct
4792  * @src_ctx:  the context structure to read from
4793  * @dest_ctx: the context to be written to
4794  * @ce_info:  a description of the struct to be filled
4795  */
4796 static void
ice_read_qword(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)4797 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4798 {
4799 	u64 dest_qword, mask;
4800 	__le64 src_qword;
4801 	u8 *src, *target;
4802 	u16 shift_width;
4803 
4804 	/* prepare the bits and mask */
4805 	shift_width = ce_info->lsb % 8;
4806 
4807 	/* if the field width is exactly 64 on an x86 machine, then the shift
4808 	 * operation will not work because the SHL instructions count is masked
4809 	 * to 6 bits so the shift will do nothing
4810 	 */
4811 	if (ce_info->width < 64)
4812 		mask = BIT_ULL(ce_info->width) - 1;
4813 	else
4814 		mask = (u64)~0;
4815 
4816 	/* shift to correct alignment */
4817 	mask <<= shift_width;
4818 
4819 	/* get the current bits from the src bit string */
4820 	src = src_ctx + (ce_info->lsb / 8);
4821 
4822 	ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4823 
4824 	/* the data in the memory is stored as little endian so mask it
4825 	 * correctly
4826 	 */
4827 	src_qword &= ~(CPU_TO_LE64(mask));
4828 
4829 	/* get the data back into host order before shifting */
4830 	dest_qword = LE64_TO_CPU(src_qword);
4831 
4832 	dest_qword >>= shift_width;
4833 
4834 	/* get the address from the struct field */
4835 	target = dest_ctx + ce_info->offset;
4836 
4837 	/* put it back in the struct */
4838 	ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4839 }
4840 
4841 /**
4842  * ice_get_ctx - extract context bits from a packed structure
4843  * @src_ctx:  pointer to a generic packed context structure
4844  * @dest_ctx: pointer to a generic non-packed context structure
4845  * @ce_info:  a description of the structure to be read from
4846  */
4847 enum ice_status
ice_get_ctx(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)4848 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4849 {
4850 	int f;
4851 
4852 	for (f = 0; ce_info[f].width; f++) {
4853 		switch (ce_info[f].size_of) {
4854 		case 1:
4855 			ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4856 			break;
4857 		case 2:
4858 			ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4859 			break;
4860 		case 4:
4861 			ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4862 			break;
4863 		case 8:
4864 			ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4865 			break;
4866 		default:
4867 			/* nothing to do, just keep going */
4868 			break;
4869 		}
4870 	}
4871 
4872 	return ICE_SUCCESS;
4873 }
4874 
4875 /**
4876  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4877  * @hw: pointer to the HW struct
4878  * @vsi_handle: software VSI handle
4879  * @tc: TC number
4880  * @q_handle: software queue handle
4881  */
4882 struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 q_handle)4883 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4884 {
4885 	struct ice_vsi_ctx *vsi;
4886 	struct ice_q_ctx *q_ctx;
4887 
4888 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
4889 	if (!vsi)
4890 		return NULL;
4891 	if (q_handle >= vsi->num_lan_q_entries[tc])
4892 		return NULL;
4893 	if (!vsi->lan_q_ctx[tc])
4894 		return NULL;
4895 	q_ctx = vsi->lan_q_ctx[tc];
4896 	return &q_ctx[q_handle];
4897 }
4898 
4899 /**
4900  * ice_ena_vsi_txq
4901  * @pi: port information structure
4902  * @vsi_handle: software VSI handle
4903  * @tc: TC number
4904  * @q_handle: software queue handle
4905  * @num_qgrps: Number of added queue groups
4906  * @buf: list of queue groups to be added
4907  * @buf_size: size of buffer for indirect command
4908  * @cd: pointer to command details structure or NULL
4909  *
4910  * This function adds one LAN queue
4911  */
4912 enum ice_status
ice_ena_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * buf,u16 buf_size,struct ice_sq_cd * cd)4913 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4914 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4915 		struct ice_sq_cd *cd)
4916 {
4917 	struct ice_aqc_txsched_elem_data node = { 0 };
4918 	struct ice_sched_node *parent;
4919 	struct ice_q_ctx *q_ctx;
4920 	enum ice_status status;
4921 	struct ice_hw *hw;
4922 
4923 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4924 		return ICE_ERR_CFG;
4925 
4926 	if (num_qgrps > 1 || buf->num_txqs > 1)
4927 		return ICE_ERR_MAX_LIMIT;
4928 
4929 	hw = pi->hw;
4930 
4931 	if (!ice_is_vsi_valid(hw, vsi_handle))
4932 		return ICE_ERR_PARAM;
4933 
4934 	ice_acquire_lock(&pi->sched_lock);
4935 
4936 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4937 	if (!q_ctx) {
4938 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4939 			  q_handle);
4940 		status = ICE_ERR_PARAM;
4941 		goto ena_txq_exit;
4942 	}
4943 
4944 	/* find a parent node */
4945 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4946 					    ICE_SCHED_NODE_OWNER_LAN);
4947 	if (!parent) {
4948 		status = ICE_ERR_PARAM;
4949 		goto ena_txq_exit;
4950 	}
4951 
4952 	buf->parent_teid = parent->info.node_teid;
4953 	node.parent_teid = parent->info.node_teid;
4954 	/* Mark that the values in the "generic" section as valid. The default
4955 	 * value in the "generic" section is zero. This means that :
4956 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4957 	 * - 0 priority among siblings, indicated by Bit 1-3.
4958 	 * - WFQ, indicated by Bit 4.
4959 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4960 	 * Bit 5-6.
4961 	 * - Bit 7 is reserved.
4962 	 * Without setting the generic section as valid in valid_sections, the
4963 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4964 	 */
4965 	buf->txqs[0].info.valid_sections =
4966 		ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4967 		ICE_AQC_ELEM_VALID_EIR;
4968 	buf->txqs[0].info.generic = 0;
4969 	buf->txqs[0].info.cir_bw.bw_profile_idx =
4970 		CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4971 	buf->txqs[0].info.cir_bw.bw_alloc =
4972 		CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4973 	buf->txqs[0].info.eir_bw.bw_profile_idx =
4974 		CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4975 	buf->txqs[0].info.eir_bw.bw_alloc =
4976 		CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4977 
4978 	/* add the LAN queue */
4979 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4980 	if (status != ICE_SUCCESS) {
4981 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4982 			  LE16_TO_CPU(buf->txqs[0].txq_id),
4983 			  hw->adminq.sq_last_status);
4984 		goto ena_txq_exit;
4985 	}
4986 
4987 	node.node_teid = buf->txqs[0].q_teid;
4988 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4989 	q_ctx->q_handle = q_handle;
4990 	q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4991 
4992 	/* add a leaf node into scheduler tree queue layer */
4993 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4994 	if (!status)
4995 		status = ice_sched_replay_q_bw(pi, q_ctx);
4996 
4997 ena_txq_exit:
4998 	ice_release_lock(&pi->sched_lock);
4999 	return status;
5000 }
5001 
5002 /**
5003  * ice_dis_vsi_txq
5004  * @pi: port information structure
5005  * @vsi_handle: software VSI handle
5006  * @tc: TC number
5007  * @num_queues: number of queues
5008  * @q_handles: pointer to software queue handle array
5009  * @q_ids: pointer to the q_id array
5010  * @q_teids: pointer to queue node teids
5011  * @rst_src: if called due to reset, specifies the reset source
5012  * @vmvf_num: the relative VM or VF number that is undergoing the reset
5013  * @cd: pointer to command details structure or NULL
5014  *
5015  * This function removes queues and their corresponding nodes in SW DB
5016  */
5017 enum ice_status
ice_dis_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 num_queues,u16 * q_handles,u16 * q_ids,u32 * q_teids,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)5018 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
5019 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
5020 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
5021 		struct ice_sq_cd *cd)
5022 {
5023 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
5024 	struct ice_aqc_dis_txq_item *qg_list;
5025 	struct ice_q_ctx *q_ctx;
5026 	struct ice_hw *hw;
5027 	u16 i, buf_size;
5028 
5029 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5030 		return ICE_ERR_CFG;
5031 
5032 	hw = pi->hw;
5033 
5034 	if (!num_queues) {
5035 		/* if queue is disabled already yet the disable queue command
5036 		 * has to be sent to complete the VF reset, then call
5037 		 * ice_aq_dis_lan_txq without any queue information
5038 		 */
5039 		if (rst_src)
5040 			return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
5041 						  vmvf_num, NULL);
5042 		return ICE_ERR_CFG;
5043 	}
5044 
5045 	buf_size = ice_struct_size(qg_list, q_id, 1);
5046 	qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
5047 	if (!qg_list)
5048 		return ICE_ERR_NO_MEMORY;
5049 
5050 	ice_acquire_lock(&pi->sched_lock);
5051 
5052 	for (i = 0; i < num_queues; i++) {
5053 		struct ice_sched_node *node;
5054 
5055 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
5056 		if (!node)
5057 			continue;
5058 		q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
5059 		if (!q_ctx) {
5060 			ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
5061 				  q_handles[i]);
5062 			continue;
5063 		}
5064 		if (q_ctx->q_handle != q_handles[i]) {
5065 			ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
5066 				  q_ctx->q_handle, q_handles[i]);
5067 			continue;
5068 		}
5069 		qg_list->parent_teid = node->info.parent_teid;
5070 		qg_list->num_qs = 1;
5071 		qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
5072 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
5073 					    vmvf_num, cd);
5074 
5075 		if (status != ICE_SUCCESS)
5076 			break;
5077 		ice_free_sched_node(pi, node);
5078 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
5079 	}
5080 	ice_release_lock(&pi->sched_lock);
5081 	ice_free(hw, qg_list);
5082 	return status;
5083 }
5084 
5085 /**
5086  * ice_cfg_vsi_qs - configure the new/existing VSI queues
5087  * @pi: port information structure
5088  * @vsi_handle: software VSI handle
5089  * @tc_bitmap: TC bitmap
5090  * @maxqs: max queues array per TC
5091  * @owner: LAN or RDMA
5092  *
5093  * This function adds/updates the VSI queues per TC.
5094  */
5095 static enum ice_status
ice_cfg_vsi_qs(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * maxqs,u8 owner)5096 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5097 	       u16 *maxqs, u8 owner)
5098 {
5099 	enum ice_status status = ICE_SUCCESS;
5100 	u8 i;
5101 
5102 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5103 		return ICE_ERR_CFG;
5104 
5105 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5106 		return ICE_ERR_PARAM;
5107 
5108 	ice_acquire_lock(&pi->sched_lock);
5109 
5110 	ice_for_each_traffic_class(i) {
5111 		/* configuration is possible only if TC node is present */
5112 		if (!ice_sched_get_tc_node(pi, i))
5113 			continue;
5114 
5115 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
5116 					   ice_is_tc_ena(tc_bitmap, i));
5117 		if (status)
5118 			break;
5119 	}
5120 
5121 	ice_release_lock(&pi->sched_lock);
5122 	return status;
5123 }
5124 
5125 /**
5126  * ice_cfg_vsi_lan - configure VSI LAN queues
5127  * @pi: port information structure
5128  * @vsi_handle: software VSI handle
5129  * @tc_bitmap: TC bitmap
5130  * @max_lanqs: max LAN queues array per TC
5131  *
5132  * This function adds/updates the VSI LAN queues per TC.
5133  */
5134 enum ice_status
ice_cfg_vsi_lan(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * max_lanqs)5135 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5136 		u16 *max_lanqs)
5137 {
5138 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
5139 			      ICE_SCHED_NODE_OWNER_LAN);
5140 }
5141 
5142 /**
5143  * ice_is_main_vsi - checks whether the VSI is main VSI
5144  * @hw: pointer to the HW struct
5145  * @vsi_handle: VSI handle
5146  *
5147  * Checks whether the VSI is the main VSI (the first PF VSI created on
5148  * given PF).
5149  */
ice_is_main_vsi(struct ice_hw * hw,u16 vsi_handle)5150 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
5151 {
5152 	return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
5153 }
5154 
5155 /**
5156  * ice_replay_pre_init - replay pre initialization
5157  * @hw: pointer to the HW struct
5158  * @sw: pointer to switch info struct for which function initializes filters
5159  *
5160  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5161  */
5162 enum ice_status
ice_replay_pre_init(struct ice_hw * hw,struct ice_switch_info * sw)5163 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
5164 {
5165 	enum ice_status status;
5166 	u8 i;
5167 
5168 	/* Delete old entries from replay filter list head if there is any */
5169 	ice_rm_sw_replay_rule_info(hw, sw);
5170 	/* In start of replay, move entries into replay_rules list, it
5171 	 * will allow adding rules entries back to filt_rules list,
5172 	 * which is operational list.
5173 	 */
5174 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
5175 		LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
5176 				  &sw->recp_list[i].filt_replay_rules);
5177 	ice_sched_replay_agg_vsi_preinit(hw);
5178 
5179 	status = ice_sched_replay_root_node_bw(hw->port_info);
5180 	if (status)
5181 		return status;
5182 
5183 	return ice_sched_replay_tc_node_bw(hw->port_info);
5184 }
5185 
5186 /**
5187  * ice_replay_vsi - replay VSI configuration
5188  * @hw: pointer to the HW struct
5189  * @vsi_handle: driver VSI handle
5190  *
5191  * Restore all VSI configuration after reset. It is required to call this
5192  * function with main VSI first.
5193  */
ice_replay_vsi(struct ice_hw * hw,u16 vsi_handle)5194 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5195 {
5196 	struct ice_switch_info *sw = hw->switch_info;
5197 	struct ice_port_info *pi = hw->port_info;
5198 	enum ice_status status;
5199 
5200 	if (!ice_is_vsi_valid(hw, vsi_handle))
5201 		return ICE_ERR_PARAM;
5202 
5203 	/* Replay pre-initialization if there is any */
5204 	if (ice_is_main_vsi(hw, vsi_handle)) {
5205 		status = ice_replay_pre_init(hw, sw);
5206 		if (status)
5207 			return status;
5208 	}
5209 	/* Replay per VSI all RSS configurations */
5210 	status = ice_replay_rss_cfg(hw, vsi_handle);
5211 	if (status)
5212 		return status;
5213 	/* Replay per VSI all filters */
5214 	status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
5215 	if (!status)
5216 		status = ice_replay_vsi_agg(hw, vsi_handle);
5217 	return status;
5218 }
5219 
5220 /**
5221  * ice_replay_post - post replay configuration cleanup
5222  * @hw: pointer to the HW struct
5223  *
5224  * Post replay cleanup.
5225  */
ice_replay_post(struct ice_hw * hw)5226 void ice_replay_post(struct ice_hw *hw)
5227 {
5228 	/* Delete old entries from replay filter list head */
5229 	ice_rm_all_sw_replay_rule_info(hw);
5230 	ice_sched_replay_agg(hw);
5231 }
5232 
5233 /**
5234  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5235  * @hw: ptr to the hardware info
5236  * @reg: offset of 64 bit HW register to read from
5237  * @prev_stat_loaded: bool to specify if previous stats are loaded
5238  * @prev_stat: ptr to previous loaded stat value
5239  * @cur_stat: ptr to current stat value
5240  */
5241 void
ice_stat_update40(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)5242 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5243 		  u64 *prev_stat, u64 *cur_stat)
5244 {
5245 	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5246 
5247 	/* device stats are not reset at PFR, they likely will not be zeroed
5248 	 * when the driver starts. Thus, save the value from the first read
5249 	 * without adding to the statistic value so that we report stats which
5250 	 * count up from zero.
5251 	 */
5252 	if (!prev_stat_loaded) {
5253 		*prev_stat = new_data;
5254 		return;
5255 	}
5256 
5257 	/* Calculate the difference between the new and old values, and then
5258 	 * add it to the software stat value.
5259 	 */
5260 	if (new_data >= *prev_stat)
5261 		*cur_stat += new_data - *prev_stat;
5262 	else
5263 		/* to manage the potential roll-over */
5264 		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5265 
5266 	/* Update the previously stored value to prepare for next read */
5267 	*prev_stat = new_data;
5268 }
5269 
5270 /**
5271  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5272  * @hw: ptr to the hardware info
5273  * @reg: offset of HW register to read from
5274  * @prev_stat_loaded: bool to specify if previous stats are loaded
5275  * @prev_stat: ptr to previous loaded stat value
5276  * @cur_stat: ptr to current stat value
5277  */
5278 void
ice_stat_update32(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)5279 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5280 		  u64 *prev_stat, u64 *cur_stat)
5281 {
5282 	u32 new_data;
5283 
5284 	new_data = rd32(hw, reg);
5285 
5286 	/* device stats are not reset at PFR, they likely will not be zeroed
5287 	 * when the driver starts. Thus, save the value from the first read
5288 	 * without adding to the statistic value so that we report stats which
5289 	 * count up from zero.
5290 	 */
5291 	if (!prev_stat_loaded) {
5292 		*prev_stat = new_data;
5293 		return;
5294 	}
5295 
5296 	/* Calculate the difference between the new and old values, and then
5297 	 * add it to the software stat value.
5298 	 */
5299 	if (new_data >= *prev_stat)
5300 		*cur_stat += new_data - *prev_stat;
5301 	else
5302 		/* to manage the potential roll-over */
5303 		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5304 
5305 	/* Update the previously stored value to prepare for next read */
5306 	*prev_stat = new_data;
5307 }
5308 
5309 /**
5310  * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
5311  * @hw: ptr to the hardware info
5312  * @vsi_handle: VSI handle
5313  * @prev_stat_loaded: bool to specify if the previous stat values are loaded
5314  * @cur_stats: ptr to current stats structure
5315  *
5316  * The GLV_REPC statistic register actually tracks two 16bit statistics, and
5317  * thus cannot be read using the normal ice_stat_update32 function.
5318  *
5319  * Read the GLV_REPC register associated with the given VSI, and update the
5320  * rx_no_desc and rx_error values in the ice_eth_stats structure.
5321  *
5322  * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
5323  * cleared each time it's read.
5324  *
5325  * Note that the GLV_RDPC register also counts the causes that would trigger
5326  * GLV_REPC. However, it does not give the finer grained detail about why the
5327  * packets are being dropped. The GLV_REPC values can be used to distinguish
5328  * whether Rx packets are dropped due to errors or due to no available
5329  * descriptors.
5330  */
5331 void
ice_stat_update_repc(struct ice_hw * hw,u16 vsi_handle,bool prev_stat_loaded,struct ice_eth_stats * cur_stats)5332 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
5333 		     struct ice_eth_stats *cur_stats)
5334 {
5335 	u16 vsi_num, no_desc, error_cnt;
5336 	u32 repc;
5337 
5338 	if (!ice_is_vsi_valid(hw, vsi_handle))
5339 		return;
5340 
5341 	vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
5342 
5343 	/* If we haven't loaded stats yet, just clear the current value */
5344 	if (!prev_stat_loaded) {
5345 		wr32(hw, GLV_REPC(vsi_num), 0);
5346 		return;
5347 	}
5348 
5349 	repc = rd32(hw, GLV_REPC(vsi_num));
5350 	no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
5351 	error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
5352 
5353 	/* Clear the count by writing to the stats register */
5354 	wr32(hw, GLV_REPC(vsi_num), 0);
5355 
5356 	cur_stats->rx_no_desc += no_desc;
5357 	cur_stats->rx_errors += error_cnt;
5358 }
5359 
5360 /**
5361  * ice_sched_query_elem - query element information from HW
5362  * @hw: pointer to the HW struct
5363  * @node_teid: node TEID to be queried
5364  * @buf: buffer to element information
5365  *
5366  * This function queries HW element information
5367  */
5368 enum ice_status
ice_sched_query_elem(struct ice_hw * hw,u32 node_teid,struct ice_aqc_txsched_elem_data * buf)5369 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
5370 		     struct ice_aqc_txsched_elem_data *buf)
5371 {
5372 	u16 buf_size, num_elem_ret = 0;
5373 	enum ice_status status;
5374 
5375 	buf_size = sizeof(*buf);
5376 	ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
5377 	buf->node_teid = CPU_TO_LE32(node_teid);
5378 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5379 					  NULL);
5380 	if (status != ICE_SUCCESS || num_elem_ret != 1)
5381 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5382 	return status;
5383 }
5384 
5385 /**
5386  * ice_get_fw_mode - returns FW mode
5387  * @hw: pointer to the HW struct
5388  */
ice_get_fw_mode(struct ice_hw * hw)5389 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
5390 {
5391 #define ICE_FW_MODE_DBG_M BIT(0)
5392 #define ICE_FW_MODE_REC_M BIT(1)
5393 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
5394 	u32 fw_mode;
5395 
5396 	/* check the current FW mode */
5397 	fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
5398 
5399 	if (fw_mode & ICE_FW_MODE_DBG_M)
5400 		return ICE_FW_MODE_DBG;
5401 	else if (fw_mode & ICE_FW_MODE_REC_M)
5402 		return ICE_FW_MODE_REC;
5403 	else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
5404 		return ICE_FW_MODE_ROLLBACK;
5405 	else
5406 		return ICE_FW_MODE_NORMAL;
5407 }
5408 
5409 /**
5410  * ice_aq_read_i2c
5411  * @hw: pointer to the hw struct
5412  * @topo_addr: topology address for a device to communicate with
5413  * @bus_addr: 7-bit I2C bus address
5414  * @addr: I2C memory address (I2C offset) with up to 16 bits
5415  * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
5416  *			    bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
5417  * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
5418  * @cd: pointer to command details structure or NULL
5419  *
5420  * Read I2C (0x06E2)
5421  */
5422 enum ice_status
ice_aq_read_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data,struct ice_sq_cd * cd)5423 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5424 		u16 bus_addr, __le16 addr, u8 params, u8 *data,
5425 		struct ice_sq_cd *cd)
5426 {
5427 	struct ice_aq_desc desc = { 0 };
5428 	struct ice_aqc_i2c *cmd;
5429 	enum ice_status status;
5430 	u8 data_size;
5431 
5432 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
5433 	cmd = &desc.params.read_write_i2c;
5434 
5435 	if (!data)
5436 		return ICE_ERR_PARAM;
5437 
5438 	data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5439 
5440 	cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5441 	cmd->topo_addr = topo_addr;
5442 	cmd->i2c_params = params;
5443 	cmd->i2c_addr = addr;
5444 
5445 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5446 	if (!status) {
5447 		struct ice_aqc_read_i2c_resp *resp;
5448 		u8 i;
5449 
5450 		resp = &desc.params.read_i2c_resp;
5451 		for (i = 0; i < data_size; i++) {
5452 			*data = resp->i2c_data[i];
5453 			data++;
5454 		}
5455 	}
5456 
5457 	return status;
5458 }
5459 
5460 /**
5461  * ice_aq_write_i2c
5462  * @hw: pointer to the hw struct
5463  * @topo_addr: topology address for a device to communicate with
5464  * @bus_addr: 7-bit I2C bus address
5465  * @addr: I2C memory address (I2C offset) with up to 16 bits
5466  * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
5467  * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
5468  * @cd: pointer to command details structure or NULL
5469  *
5470  * Write I2C (0x06E3)
5471  */
5472 enum ice_status
ice_aq_write_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data,struct ice_sq_cd * cd)5473 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5474 		 u16 bus_addr, __le16 addr, u8 params, u8 *data,
5475 		 struct ice_sq_cd *cd)
5476 {
5477 	struct ice_aq_desc desc = { 0 };
5478 	struct ice_aqc_i2c *cmd;
5479 	u8 i, data_size;
5480 
5481 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
5482 	cmd = &desc.params.read_write_i2c;
5483 
5484 	data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5485 
5486 	/* data_size limited to 4 */
5487 	if (data_size > 4)
5488 		return ICE_ERR_PARAM;
5489 
5490 	cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5491 	cmd->topo_addr = topo_addr;
5492 	cmd->i2c_params = params;
5493 	cmd->i2c_addr = addr;
5494 
5495 	for (i = 0; i < data_size; i++) {
5496 		cmd->i2c_data[i] = *data;
5497 		data++;
5498 	}
5499 
5500 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5501 }
5502 
5503 /**
5504  * ice_aq_set_driver_param - Set driver parameter to share via firmware
5505  * @hw: pointer to the HW struct
5506  * @idx: parameter index to set
5507  * @value: the value to set the parameter to
5508  * @cd: pointer to command details structure or NULL
5509  *
5510  * Set the value of one of the software defined parameters. All PFs connected
5511  * to this device can read the value using ice_aq_get_driver_param.
5512  *
5513  * Note that firmware provides no synchronization or locking, and will not
5514  * save the parameter value during a device reset. It is expected that
5515  * a single PF will write the parameter value, while all other PFs will only
5516  * read it.
5517  */
5518 enum ice_status
ice_aq_set_driver_param(struct ice_hw * hw,enum ice_aqc_driver_params idx,u32 value,struct ice_sq_cd * cd)5519 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
5520 			u32 value, struct ice_sq_cd *cd)
5521 {
5522 	struct ice_aqc_driver_shared_params *cmd;
5523 	struct ice_aq_desc desc;
5524 
5525 	if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
5526 		return ICE_ERR_OUT_OF_RANGE;
5527 
5528 	cmd = &desc.params.drv_shared_params;
5529 
5530 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
5531 
5532 	cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
5533 	cmd->param_indx = idx;
5534 	cmd->param_val = CPU_TO_LE32(value);
5535 
5536 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5537 }
5538 
5539 /**
5540  * ice_aq_get_driver_param - Get driver parameter shared via firmware
5541  * @hw: pointer to the HW struct
5542  * @idx: parameter index to set
5543  * @value: storage to return the shared parameter
5544  * @cd: pointer to command details structure or NULL
5545  *
5546  * Get the value of one of the software defined parameters.
5547  *
5548  * Note that firmware provides no synchronization or locking. It is expected
5549  * that only a single PF will write a given parameter.
5550  */
5551 enum ice_status
ice_aq_get_driver_param(struct ice_hw * hw,enum ice_aqc_driver_params idx,u32 * value,struct ice_sq_cd * cd)5552 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
5553 			u32 *value, struct ice_sq_cd *cd)
5554 {
5555 	struct ice_aqc_driver_shared_params *cmd;
5556 	struct ice_aq_desc desc;
5557 	enum ice_status status;
5558 
5559 	if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
5560 		return ICE_ERR_OUT_OF_RANGE;
5561 
5562 	cmd = &desc.params.drv_shared_params;
5563 
5564 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
5565 
5566 	cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
5567 	cmd->param_indx = idx;
5568 
5569 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5570 	if (status)
5571 		return status;
5572 
5573 	*value = LE32_TO_CPU(cmd->param_val);
5574 
5575 	return ICE_SUCCESS;
5576 }
5577 
5578 /**
5579  * ice_aq_set_gpio
5580  * @hw: pointer to the hw struct
5581  * @gpio_ctrl_handle: GPIO controller node handle
5582  * @pin_idx: IO Number of the GPIO that needs to be set
5583  * @value: SW provide IO value to set in the LSB
5584  * @cd: pointer to command details structure or NULL
5585  *
5586  * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
5587  */
5588 enum ice_status
ice_aq_set_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool value,struct ice_sq_cd * cd)5589 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
5590 		struct ice_sq_cd *cd)
5591 {
5592 	struct ice_aqc_gpio *cmd;
5593 	struct ice_aq_desc desc;
5594 
5595 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
5596 	cmd = &desc.params.read_write_gpio;
5597 	cmd->gpio_ctrl_handle = gpio_ctrl_handle;
5598 	cmd->gpio_num = pin_idx;
5599 	cmd->gpio_val = value ? 1 : 0;
5600 
5601 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5602 }
5603 
5604 /**
5605  * ice_aq_get_gpio
5606  * @hw: pointer to the hw struct
5607  * @gpio_ctrl_handle: GPIO controller node handle
5608  * @pin_idx: IO Number of the GPIO that needs to be set
5609  * @value: IO value read
5610  * @cd: pointer to command details structure or NULL
5611  *
5612  * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
5613  * the topology
5614  */
5615 enum ice_status
ice_aq_get_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool * value,struct ice_sq_cd * cd)5616 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
5617 		bool *value, struct ice_sq_cd *cd)
5618 {
5619 	struct ice_aqc_gpio *cmd;
5620 	struct ice_aq_desc desc;
5621 	enum ice_status status;
5622 
5623 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
5624 	cmd = &desc.params.read_write_gpio;
5625 	cmd->gpio_ctrl_handle = gpio_ctrl_handle;
5626 	cmd->gpio_num = pin_idx;
5627 
5628 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5629 	if (status)
5630 		return status;
5631 
5632 	*value = !!cmd->gpio_val;
5633 	return ICE_SUCCESS;
5634 }
5635 
5636 /**
5637  * ice_fw_supports_link_override
5638  * @hw: pointer to the hardware structure
5639  *
5640  * Checks if the firmware supports link override
5641  */
ice_fw_supports_link_override(struct ice_hw * hw)5642 bool ice_fw_supports_link_override(struct ice_hw *hw)
5643 {
5644 	if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
5645 		if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
5646 			return true;
5647 		if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
5648 		    hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
5649 			return true;
5650 	} else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5651 		return true;
5652 	}
5653 
5654 	return false;
5655 }
5656 
5657 /**
5658  * ice_get_link_default_override
5659  * @ldo: pointer to the link default override struct
5660  * @pi: pointer to the port info struct
5661  *
5662  * Gets the link default override for a port
5663  */
5664 enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv * ldo,struct ice_port_info * pi)5665 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5666 			      struct ice_port_info *pi)
5667 {
5668 	u16 i, tlv, tlv_len, tlv_start, buf, offset;
5669 	struct ice_hw *hw = pi->hw;
5670 	enum ice_status status;
5671 
5672 	status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5673 					ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5674 	if (status) {
5675 		ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5676 		return status;
5677 	}
5678 
5679 	/* Each port has its own config; calculate for our port */
5680 	tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5681 		ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5682 
5683 	/* link options first */
5684 	status = ice_read_sr_word(hw, tlv_start, &buf);
5685 	if (status) {
5686 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5687 		return status;
5688 	}
5689 	ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5690 	ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5691 		ICE_LINK_OVERRIDE_PHY_CFG_S;
5692 
5693 	/* link PHY config */
5694 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5695 	status = ice_read_sr_word(hw, offset, &buf);
5696 	if (status) {
5697 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5698 		return status;
5699 	}
5700 	ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5701 
5702 	/* PHY types low */
5703 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5704 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5705 		status = ice_read_sr_word(hw, (offset + i), &buf);
5706 		if (status) {
5707 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5708 			return status;
5709 		}
5710 		/* shift 16 bits at a time to fill 64 bits */
5711 		ldo->phy_type_low |= ((u64)buf << (i * 16));
5712 	}
5713 
5714 	/* PHY types high */
5715 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5716 		ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5717 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5718 		status = ice_read_sr_word(hw, (offset + i), &buf);
5719 		if (status) {
5720 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5721 			return status;
5722 		}
5723 		/* shift 16 bits at a time to fill 64 bits */
5724 		ldo->phy_type_high |= ((u64)buf << (i * 16));
5725 	}
5726 
5727 	return status;
5728 }
5729 
5730 /**
5731  * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5732  * @caps: get PHY capability data
5733  */
ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data * caps)5734 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5735 {
5736 	if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5737 	    caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5738 				       ICE_AQC_PHY_AN_EN_CLAUSE73 |
5739 				       ICE_AQC_PHY_AN_EN_CLAUSE37))
5740 		return true;
5741 
5742 	return false;
5743 }
5744 
5745 /**
5746  * ice_aq_set_lldp_mib - Set the LLDP MIB
5747  * @hw: pointer to the HW struct
5748  * @mib_type: Local, Remote or both Local and Remote MIBs
5749  * @buf: pointer to the caller-supplied buffer to store the MIB block
5750  * @buf_size: size of the buffer (in bytes)
5751  * @cd: pointer to command details structure or NULL
5752  *
5753  * Set the LLDP MIB. (0x0A08)
5754  */
5755 enum ice_status
ice_aq_set_lldp_mib(struct ice_hw * hw,u8 mib_type,void * buf,u16 buf_size,struct ice_sq_cd * cd)5756 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5757 		    struct ice_sq_cd *cd)
5758 {
5759 	struct ice_aqc_lldp_set_local_mib *cmd;
5760 	struct ice_aq_desc desc;
5761 
5762 	cmd = &desc.params.lldp_set_mib;
5763 
5764 	if (buf_size == 0 || !buf)
5765 		return ICE_ERR_PARAM;
5766 
5767 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5768 
5769 	desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
5770 	desc.datalen = CPU_TO_LE16(buf_size);
5771 
5772 	cmd->type = mib_type;
5773 	cmd->length = CPU_TO_LE16(buf_size);
5774 
5775 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5776 }
5777 
5778 /**
5779  * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
5780  * @hw: pointer to HW struct
5781  */
ice_fw_supports_lldp_fltr_ctrl(struct ice_hw * hw)5782 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5783 {
5784 	if (hw->mac_type != ICE_MAC_E810)
5785 		return false;
5786 
5787 	if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5788 		if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5789 			return true;
5790 		if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5791 		    hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5792 			return true;
5793 	} else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5794 		return true;
5795 	}
5796 	return false;
5797 }
5798 
5799 /**
5800  * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5801  * @hw: pointer to HW struct
5802  * @vsi_num: absolute HW index for VSI
5803  * @add: boolean for if adding or removing a filter
5804  */
5805 enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw * hw,u16 vsi_num,bool add)5806 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5807 {
5808 	struct ice_aqc_lldp_filter_ctrl *cmd;
5809 	struct ice_aq_desc desc;
5810 
5811 	cmd = &desc.params.lldp_filter_ctrl;
5812 
5813 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5814 
5815 	if (add)
5816 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5817 	else
5818 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5819 
5820 	cmd->vsi_num = CPU_TO_LE16(vsi_num);
5821 
5822 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5823 }
5824 
5825 /**
5826  * ice_fw_supports_report_dflt_cfg
5827  * @hw: pointer to the hardware structure
5828  *
5829  * Checks if the firmware supports report default configuration
5830  */
ice_fw_supports_report_dflt_cfg(struct ice_hw * hw)5831 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5832 {
5833 	if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5834 		if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
5835 			return true;
5836 		if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
5837 		    hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
5838 			return true;
5839 	} else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5840 		return true;
5841 	}
5842 	return false;
5843 }
5844