1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
3 */
4
5 #include "ice_common.h"
6 #include "ice_sched.h"
7 #include "ice_adminq_cmd.h"
8
9 #include "ice_flow.h"
10 #include "ice_switch.h"
11
12 #define ICE_PF_RESET_WAIT_COUNT 300
13
14 /**
15 * ice_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
17 *
18 * This function sets the MAC type of the adapter based on the
19 * vendor ID and device ID stored in the HW structure.
20 */
ice_set_mac_type(struct ice_hw * hw)21 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
22 {
23 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
24
25 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
26 return ICE_ERR_DEVICE_NOT_SUPPORTED;
27
28 switch (hw->device_id) {
29 case ICE_DEV_ID_E810C_BACKPLANE:
30 case ICE_DEV_ID_E810C_QSFP:
31 case ICE_DEV_ID_E810C_SFP:
32 case ICE_DEV_ID_E810_XXV_BACKPLANE:
33 case ICE_DEV_ID_E810_XXV_QSFP:
34 case ICE_DEV_ID_E810_XXV_SFP:
35 hw->mac_type = ICE_MAC_E810;
36 break;
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 case ICE_DEV_ID_E823L_10G_BASE_T:
47 case ICE_DEV_ID_E823L_1GBE:
48 case ICE_DEV_ID_E823L_BACKPLANE:
49 case ICE_DEV_ID_E823L_QSFP:
50 case ICE_DEV_ID_E823L_SFP:
51 hw->mac_type = ICE_MAC_GENERIC;
52 break;
53 default:
54 hw->mac_type = ICE_MAC_UNKNOWN;
55 break;
56 }
57
58 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
59 return ICE_SUCCESS;
60 }
61
62 /**
63 * ice_clear_pf_cfg - Clear PF configuration
64 * @hw: pointer to the hardware structure
65 *
66 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
67 * configuration, flow director filters, etc.).
68 */
ice_clear_pf_cfg(struct ice_hw * hw)69 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
70 {
71 struct ice_aq_desc desc;
72
73 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
74
75 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
76 }
77
78 /**
79 * ice_aq_manage_mac_read - manage MAC address read command
80 * @hw: pointer to the HW struct
81 * @buf: a virtual buffer to hold the manage MAC read response
82 * @buf_size: Size of the virtual buffer
83 * @cd: pointer to command details structure or NULL
84 *
85 * This function is used to return per PF station MAC address (0x0107).
86 * NOTE: Upon successful completion of this command, MAC address information
87 * is returned in user specified buffer. Please interpret user specified
88 * buffer as "manage_mac_read" response.
89 * Response such as various MAC addresses are stored in HW struct (port.mac)
90 * ice_discover_dev_caps is expected to be called before this function is
91 * called.
92 */
93 static enum ice_status
ice_aq_manage_mac_read(struct ice_hw * hw,void * buf,u16 buf_size,struct ice_sq_cd * cd)94 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
95 struct ice_sq_cd *cd)
96 {
97 struct ice_aqc_manage_mac_read_resp *resp;
98 struct ice_aqc_manage_mac_read *cmd;
99 struct ice_aq_desc desc;
100 enum ice_status status;
101 u16 flags;
102 u8 i;
103
104 cmd = &desc.params.mac_read;
105
106 if (buf_size < sizeof(*resp))
107 return ICE_ERR_BUF_TOO_SHORT;
108
109 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
110
111 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
112 if (status)
113 return status;
114
115 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
116 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
117
118 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
119 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
120 return ICE_ERR_CFG;
121 }
122
123 /* A single port can report up to two (LAN and WoL) addresses */
124 for (i = 0; i < cmd->num_addr; i++)
125 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
126 ice_memcpy(hw->port_info->mac.lan_addr,
127 resp[i].mac_addr, ETH_ALEN,
128 ICE_DMA_TO_NONDMA);
129 ice_memcpy(hw->port_info->mac.perm_addr,
130 resp[i].mac_addr,
131 ETH_ALEN, ICE_DMA_TO_NONDMA);
132 break;
133 }
134 return ICE_SUCCESS;
135 }
136
137 /**
138 * ice_aq_get_phy_caps - returns PHY capabilities
139 * @pi: port information structure
140 * @qual_mods: report qualified modules
141 * @report_mode: report mode capabilities
142 * @pcaps: structure for PHY capabilities to be filled
143 * @cd: pointer to command details structure or NULL
144 *
145 * Returns the various PHY capabilities supported on the Port (0x0600)
146 */
147 enum ice_status
ice_aq_get_phy_caps(struct ice_port_info * pi,bool qual_mods,u8 report_mode,struct ice_aqc_get_phy_caps_data * pcaps,struct ice_sq_cd * cd)148 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
149 struct ice_aqc_get_phy_caps_data *pcaps,
150 struct ice_sq_cd *cd)
151 {
152 struct ice_aqc_get_phy_caps *cmd;
153 u16 pcaps_size = sizeof(*pcaps);
154 struct ice_aq_desc desc;
155 enum ice_status status;
156 struct ice_hw *hw;
157
158 cmd = &desc.params.get_phy;
159
160 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
161 return ICE_ERR_PARAM;
162 hw = pi->hw;
163
164 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
165
166 if (qual_mods)
167 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
168
169 cmd->param0 |= CPU_TO_LE16(report_mode);
170 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
171
172 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
173 report_mode);
174 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
175 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
176 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
177 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
178 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
179 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
180 pcaps->low_power_ctrl_an);
181 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
182 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
183 pcaps->eeer_value);
184 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
185 pcaps->link_fec_options);
186 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
187 pcaps->module_compliance_enforcement);
188 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
189 pcaps->extended_compliance_code);
190 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
191 pcaps->module_type[0]);
192 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
193 pcaps->module_type[1]);
194 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
195 pcaps->module_type[2]);
196
197 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
198 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
199 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
200 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
201 sizeof(pi->phy.link_info.module_type),
202 ICE_NONDMA_TO_NONDMA);
203 }
204
205 return status;
206 }
207
208 /**
209 * ice_aq_get_link_topo_handle - get link topology node return status
210 * @pi: port information structure
211 * @node_type: requested node type
212 * @cd: pointer to command details structure or NULL
213 *
214 * Get link topology node return status for specified node type (0x06E0)
215 *
216 * Node type cage can be used to determine if cage is present. If AQC
217 * returns error (ENOENT), then no cage present. If no cage present, then
218 * connection type is backplane or BASE-T.
219 */
220 static enum ice_status
ice_aq_get_link_topo_handle(struct ice_port_info * pi,u8 node_type,struct ice_sq_cd * cd)221 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
222 struct ice_sq_cd *cd)
223 {
224 struct ice_aqc_get_link_topo *cmd;
225 struct ice_aq_desc desc;
226
227 cmd = &desc.params.get_link_topo;
228
229 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
230
231 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
232 ICE_AQC_LINK_TOPO_NODE_CTX_S);
233
234 /* set node type */
235 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
236
237 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
238 }
239
240 /**
241 * ice_is_media_cage_present
242 * @pi: port information structure
243 *
244 * Returns true if media cage is present, else false. If no cage, then
245 * media type is backplane or BASE-T.
246 */
ice_is_media_cage_present(struct ice_port_info * pi)247 static bool ice_is_media_cage_present(struct ice_port_info *pi)
248 {
249 /* Node type cage can be used to determine if cage is present. If AQC
250 * returns error (ENOENT), then no cage present. If no cage present then
251 * connection type is backplane or BASE-T.
252 */
253 return !ice_aq_get_link_topo_handle(pi,
254 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
255 NULL);
256 }
257
258 /**
259 * ice_get_media_type - Gets media type
260 * @pi: port information structure
261 */
ice_get_media_type(struct ice_port_info * pi)262 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
263 {
264 struct ice_link_status *hw_link_info;
265
266 if (!pi)
267 return ICE_MEDIA_UNKNOWN;
268
269 hw_link_info = &pi->phy.link_info;
270 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
271 /* If more than one media type is selected, report unknown */
272 return ICE_MEDIA_UNKNOWN;
273
274 if (hw_link_info->phy_type_low) {
275 /* 1G SGMII is a special case where some DA cable PHYs
276 * may show this as an option when it really shouldn't
277 * be since SGMII is meant to be between a MAC and a PHY
278 * in a backplane. Try to detect this case and handle it
279 */
280 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
281 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
282 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
283 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
284 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
285 return ICE_MEDIA_DA;
286
287 switch (hw_link_info->phy_type_low) {
288 case ICE_PHY_TYPE_LOW_1000BASE_SX:
289 case ICE_PHY_TYPE_LOW_1000BASE_LX:
290 case ICE_PHY_TYPE_LOW_10GBASE_SR:
291 case ICE_PHY_TYPE_LOW_10GBASE_LR:
292 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
293 case ICE_PHY_TYPE_LOW_25GBASE_SR:
294 case ICE_PHY_TYPE_LOW_25GBASE_LR:
295 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
296 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
297 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
298 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
299 case ICE_PHY_TYPE_LOW_50GBASE_SR:
300 case ICE_PHY_TYPE_LOW_50GBASE_FR:
301 case ICE_PHY_TYPE_LOW_50GBASE_LR:
302 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
303 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
304 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
305 case ICE_PHY_TYPE_LOW_100GBASE_DR:
306 return ICE_MEDIA_FIBER;
307 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
308 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
309 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
310 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
311 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
312 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
313 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
314 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
315 return ICE_MEDIA_FIBER;
316 case ICE_PHY_TYPE_LOW_100BASE_TX:
317 case ICE_PHY_TYPE_LOW_1000BASE_T:
318 case ICE_PHY_TYPE_LOW_2500BASE_T:
319 case ICE_PHY_TYPE_LOW_5GBASE_T:
320 case ICE_PHY_TYPE_LOW_10GBASE_T:
321 case ICE_PHY_TYPE_LOW_25GBASE_T:
322 return ICE_MEDIA_BASET;
323 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
324 case ICE_PHY_TYPE_LOW_25GBASE_CR:
325 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
326 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
327 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
328 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
329 case ICE_PHY_TYPE_LOW_50GBASE_CP:
330 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
331 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
332 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
333 return ICE_MEDIA_DA;
334 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
335 case ICE_PHY_TYPE_LOW_40G_XLAUI:
336 case ICE_PHY_TYPE_LOW_50G_LAUI2:
337 case ICE_PHY_TYPE_LOW_50G_AUI2:
338 case ICE_PHY_TYPE_LOW_50G_AUI1:
339 case ICE_PHY_TYPE_LOW_100G_AUI4:
340 case ICE_PHY_TYPE_LOW_100G_CAUI4:
341 if (ice_is_media_cage_present(pi))
342 return ICE_MEDIA_AUI;
343 /* fall-through */
344 case ICE_PHY_TYPE_LOW_1000BASE_KX:
345 case ICE_PHY_TYPE_LOW_2500BASE_KX:
346 case ICE_PHY_TYPE_LOW_2500BASE_X:
347 case ICE_PHY_TYPE_LOW_5GBASE_KR:
348 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
349 case ICE_PHY_TYPE_LOW_25GBASE_KR:
350 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
351 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
352 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
353 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
354 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
355 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
356 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
357 return ICE_MEDIA_BACKPLANE;
358 }
359 } else {
360 switch (hw_link_info->phy_type_high) {
361 case ICE_PHY_TYPE_HIGH_100G_AUI2:
362 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
363 if (ice_is_media_cage_present(pi))
364 return ICE_MEDIA_AUI;
365 /* fall-through */
366 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
367 return ICE_MEDIA_BACKPLANE;
368 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
369 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
370 return ICE_MEDIA_FIBER;
371 }
372 }
373 return ICE_MEDIA_UNKNOWN;
374 }
375
376 /**
377 * ice_aq_get_link_info
378 * @pi: port information structure
379 * @ena_lse: enable/disable LinkStatusEvent reporting
380 * @link: pointer to link status structure - optional
381 * @cd: pointer to command details structure or NULL
382 *
383 * Get Link Status (0x607). Returns the link status of the adapter.
384 */
385 enum ice_status
ice_aq_get_link_info(struct ice_port_info * pi,bool ena_lse,struct ice_link_status * link,struct ice_sq_cd * cd)386 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
387 struct ice_link_status *link, struct ice_sq_cd *cd)
388 {
389 struct ice_aqc_get_link_status_data link_data = { 0 };
390 struct ice_aqc_get_link_status *resp;
391 struct ice_link_status *li_old, *li;
392 enum ice_media_type *hw_media_type;
393 struct ice_fc_info *hw_fc_info;
394 bool tx_pause, rx_pause;
395 struct ice_aq_desc desc;
396 enum ice_status status;
397 struct ice_hw *hw;
398 u16 cmd_flags;
399
400 if (!pi)
401 return ICE_ERR_PARAM;
402 hw = pi->hw;
403 li_old = &pi->phy.link_info_old;
404 hw_media_type = &pi->phy.media_type;
405 li = &pi->phy.link_info;
406 hw_fc_info = &pi->fc;
407
408 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
409 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
410 resp = &desc.params.get_link_status;
411 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
412 resp->lport_num = pi->lport;
413
414 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
415
416 if (status != ICE_SUCCESS)
417 return status;
418
419 /* save off old link status information */
420 *li_old = *li;
421
422 /* update current link status information */
423 li->link_speed = LE16_TO_CPU(link_data.link_speed);
424 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
425 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
426 *hw_media_type = ice_get_media_type(pi);
427 li->link_info = link_data.link_info;
428 li->an_info = link_data.an_info;
429 li->ext_info = link_data.ext_info;
430 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
431 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
432 li->topo_media_conflict = link_data.topo_media_conflict;
433 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
434 ICE_AQ_CFG_PACING_TYPE_M);
435
436 /* update fc info */
437 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
438 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
439 if (tx_pause && rx_pause)
440 hw_fc_info->current_mode = ICE_FC_FULL;
441 else if (tx_pause)
442 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
443 else if (rx_pause)
444 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
445 else
446 hw_fc_info->current_mode = ICE_FC_NONE;
447
448 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
449
450 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
451 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
452 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
453 (unsigned long long)li->phy_type_low);
454 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
455 (unsigned long long)li->phy_type_high);
456 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
457 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
458 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
459 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
460 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
461 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
462 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
463 li->max_frame_size);
464 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
465
466 /* save link status information */
467 if (link)
468 *link = *li;
469
470 /* flag cleared so calling functions don't call AQ again */
471 pi->phy.get_link_info = false;
472
473 return ICE_SUCCESS;
474 }
475
476 /**
477 * ice_fill_tx_timer_and_fc_thresh
478 * @hw: pointer to the HW struct
479 * @cmd: pointer to MAC cfg structure
480 *
481 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
482 * descriptor
483 */
484 static void
ice_fill_tx_timer_and_fc_thresh(struct ice_hw * hw,struct ice_aqc_set_mac_cfg * cmd)485 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
486 struct ice_aqc_set_mac_cfg *cmd)
487 {
488 u16 fc_thres_val, tx_timer_val;
489 u32 val;
490
491 /* We read back the transmit timer and fc threshold value of
492 * LFC. Thus, we will use index =
493 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
494 *
495 * Also, because we are opearating on transmit timer and fc
496 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
497 */
498 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
499
500 /* Retrieve the transmit timer */
501 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
502 tx_timer_val = val &
503 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
504 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
505
506 /* Retrieve the fc threshold */
507 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
508 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
509
510 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
511 }
512
513 /**
514 * ice_aq_set_mac_cfg
515 * @hw: pointer to the HW struct
516 * @max_frame_size: Maximum Frame Size to be supported
517 * @cd: pointer to command details structure or NULL
518 *
519 * Set MAC configuration (0x0603)
520 */
521 enum ice_status
ice_aq_set_mac_cfg(struct ice_hw * hw,u16 max_frame_size,struct ice_sq_cd * cd)522 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
523 {
524 struct ice_aqc_set_mac_cfg *cmd;
525 struct ice_aq_desc desc;
526
527 cmd = &desc.params.set_mac_cfg;
528
529 if (max_frame_size == 0)
530 return ICE_ERR_PARAM;
531
532 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
533
534 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
535
536 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
537
538 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
539 }
540
541 /**
542 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
543 * @hw: pointer to the HW struct
544 */
ice_init_fltr_mgmt_struct(struct ice_hw * hw)545 enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
546 {
547 struct ice_switch_info *sw;
548 enum ice_status status;
549
550 hw->switch_info = (struct ice_switch_info *)
551 ice_malloc(hw, sizeof(*hw->switch_info));
552
553 sw = hw->switch_info;
554
555 if (!sw)
556 return ICE_ERR_NO_MEMORY;
557
558 INIT_LIST_HEAD(&sw->vsi_list_map_head);
559 sw->prof_res_bm_init = 0;
560
561 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
562 if (status) {
563 ice_free(hw, hw->switch_info);
564 return status;
565 }
566 return ICE_SUCCESS;
567 }
568
569 /**
570 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
571 * @hw: pointer to the HW struct
572 * @sw: pointer to switch info struct for which function clears filters
573 */
574 static void
ice_cleanup_fltr_mgmt_single(struct ice_hw * hw,struct ice_switch_info * sw)575 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
576 {
577 struct ice_vsi_list_map_info *v_pos_map;
578 struct ice_vsi_list_map_info *v_tmp_map;
579 struct ice_sw_recipe *recps;
580 u8 i;
581
582 if (!sw)
583 return;
584
585 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
586 ice_vsi_list_map_info, list_entry) {
587 LIST_DEL(&v_pos_map->list_entry);
588 ice_free(hw, v_pos_map);
589 }
590 recps = sw->recp_list;
591 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
592 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
593
594 recps[i].root_rid = i;
595 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
596 &recps[i].rg_list, ice_recp_grp_entry,
597 l_entry) {
598 LIST_DEL(&rg_entry->l_entry);
599 ice_free(hw, rg_entry);
600 }
601
602 if (recps[i].adv_rule) {
603 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
604 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
605
606 ice_destroy_lock(&recps[i].filt_rule_lock);
607 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
608 &recps[i].filt_rules,
609 ice_adv_fltr_mgmt_list_entry,
610 list_entry) {
611 LIST_DEL(&lst_itr->list_entry);
612 ice_free(hw, lst_itr->lkups);
613 ice_free(hw, lst_itr);
614 }
615 } else {
616 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
617
618 ice_destroy_lock(&recps[i].filt_rule_lock);
619 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
620 &recps[i].filt_rules,
621 ice_fltr_mgmt_list_entry,
622 list_entry) {
623 LIST_DEL(&lst_itr->list_entry);
624 ice_free(hw, lst_itr);
625 }
626 }
627 if (recps[i].root_buf)
628 ice_free(hw, recps[i].root_buf);
629 }
630 ice_rm_sw_replay_rule_info(hw, sw);
631 ice_free(hw, sw->recp_list);
632 ice_free(hw, sw);
633 }
634
635 /**
636 * ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks
637 * @hw: pointer to the HW struct
638 */
ice_cleanup_fltr_mgmt_struct(struct ice_hw * hw)639 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
640 {
641 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
642 }
643
644 /**
645 * ice_get_itr_intrl_gran
646 * @hw: pointer to the HW struct
647 *
648 * Determines the ITR/INTRL granularities based on the maximum aggregate
649 * bandwidth according to the device's configuration during power-on.
650 */
ice_get_itr_intrl_gran(struct ice_hw * hw)651 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
652 {
653 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
654 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
655 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
656
657 switch (max_agg_bw) {
658 case ICE_MAX_AGG_BW_200G:
659 case ICE_MAX_AGG_BW_100G:
660 case ICE_MAX_AGG_BW_50G:
661 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
662 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
663 break;
664 case ICE_MAX_AGG_BW_25G:
665 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
666 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
667 break;
668 }
669 }
670
671 /**
672 * ice_print_rollback_msg - print FW rollback message
673 * @hw: pointer to the hardware structure
674 */
ice_print_rollback_msg(struct ice_hw * hw)675 void ice_print_rollback_msg(struct ice_hw *hw)
676 {
677 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
678 struct ice_orom_info *orom;
679 struct ice_nvm_info *nvm;
680
681 orom = &hw->flash.orom;
682 nvm = &hw->flash.nvm;
683
684 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
685 nvm->major, nvm->minor, nvm->eetrack, orom->major,
686 orom->build, orom->patch);
687 ice_warn(hw,
688 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
689 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
690 }
691
692 /**
693 * ice_init_hw - main hardware initialization routine
694 * @hw: pointer to the hardware structure
695 */
ice_init_hw(struct ice_hw * hw)696 enum ice_status ice_init_hw(struct ice_hw *hw)
697 {
698 struct ice_aqc_get_phy_caps_data *pcaps;
699 enum ice_status status;
700 u16 mac_buf_len;
701 void *mac_buf;
702
703 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
704
705 /* Set MAC type based on DeviceID */
706 status = ice_set_mac_type(hw);
707 if (status)
708 return status;
709
710 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
711 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
712 PF_FUNC_RID_FUNCTION_NUMBER_S;
713
714 status = ice_reset(hw, ICE_RESET_PFR);
715 if (status)
716 return status;
717
718 ice_get_itr_intrl_gran(hw);
719
720 status = ice_create_all_ctrlq(hw);
721 if (status)
722 goto err_unroll_cqinit;
723
724 status = ice_init_nvm(hw);
725 if (status)
726 goto err_unroll_cqinit;
727
728 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
729 ice_print_rollback_msg(hw);
730
731 status = ice_clear_pf_cfg(hw);
732 if (status)
733 goto err_unroll_cqinit;
734
735 /* Set bit to enable Flow Director filters */
736 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
737 INIT_LIST_HEAD(&hw->fdir_list_head);
738
739 ice_clear_pxe_mode(hw);
740
741 status = ice_get_caps(hw);
742 if (status)
743 goto err_unroll_cqinit;
744
745 hw->port_info = (struct ice_port_info *)
746 ice_malloc(hw, sizeof(*hw->port_info));
747 if (!hw->port_info) {
748 status = ICE_ERR_NO_MEMORY;
749 goto err_unroll_cqinit;
750 }
751
752 /* set the back pointer to HW */
753 hw->port_info->hw = hw;
754
755 /* Initialize port_info struct with switch configuration data */
756 status = ice_get_initial_sw_cfg(hw);
757 if (status)
758 goto err_unroll_alloc;
759
760 hw->evb_veb = true;
761 /* Query the allocated resources for Tx scheduler */
762 status = ice_sched_query_res_alloc(hw);
763 if (status) {
764 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
765 goto err_unroll_alloc;
766 }
767 ice_sched_get_psm_clk_freq(hw);
768
769 /* Initialize port_info struct with scheduler data */
770 status = ice_sched_init_port(hw->port_info);
771 if (status)
772 goto err_unroll_sched;
773 pcaps = (struct ice_aqc_get_phy_caps_data *)
774 ice_malloc(hw, sizeof(*pcaps));
775 if (!pcaps) {
776 status = ICE_ERR_NO_MEMORY;
777 goto err_unroll_sched;
778 }
779
780 /* Initialize port_info struct with PHY capabilities */
781 status = ice_aq_get_phy_caps(hw->port_info, false,
782 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
783 ice_free(hw, pcaps);
784 if (status)
785 ice_debug(hw, ICE_DBG_PHY, "Get PHY capabilities failed, continuing anyway\n");
786
787 /* Initialize port_info struct with link information */
788 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
789 if (status)
790 goto err_unroll_sched;
791 /* need a valid SW entry point to build a Tx tree */
792 if (!hw->sw_entry_point_layer) {
793 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
794 status = ICE_ERR_CFG;
795 goto err_unroll_sched;
796 }
797 INIT_LIST_HEAD(&hw->agg_list);
798 /* Initialize max burst size */
799 if (!hw->max_burst_size)
800 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
801 status = ice_init_fltr_mgmt_struct(hw);
802 if (status)
803 goto err_unroll_sched;
804
805 /* Get MAC information */
806 /* A single port can report up to two (LAN and WoL) addresses */
807 mac_buf = ice_calloc(hw, 2,
808 sizeof(struct ice_aqc_manage_mac_read_resp));
809 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
810
811 if (!mac_buf) {
812 status = ICE_ERR_NO_MEMORY;
813 goto err_unroll_fltr_mgmt_struct;
814 }
815
816 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
817 ice_free(hw, mac_buf);
818
819 if (status)
820 goto err_unroll_fltr_mgmt_struct;
821 /* enable jumbo frame support at MAC level */
822 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
823 if (status)
824 goto err_unroll_fltr_mgmt_struct;
825 /* Obtain counter base index which would be used by flow director */
826 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
827 if (status)
828 goto err_unroll_fltr_mgmt_struct;
829 status = ice_init_hw_tbls(hw);
830 if (status)
831 goto err_unroll_fltr_mgmt_struct;
832 ice_init_lock(&hw->tnl_lock);
833 return ICE_SUCCESS;
834
835 err_unroll_fltr_mgmt_struct:
836 ice_cleanup_fltr_mgmt_struct(hw);
837 err_unroll_sched:
838 ice_sched_cleanup_all(hw);
839 err_unroll_alloc:
840 ice_free(hw, hw->port_info);
841 hw->port_info = NULL;
842 err_unroll_cqinit:
843 ice_destroy_all_ctrlq(hw);
844 return status;
845 }
846
847 /**
848 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
849 * @hw: pointer to the hardware structure
850 *
851 * This should be called only during nominal operation, not as a result of
852 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
853 * applicable initializations if it fails for any reason.
854 */
ice_deinit_hw(struct ice_hw * hw)855 void ice_deinit_hw(struct ice_hw *hw)
856 {
857 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
858 ice_cleanup_fltr_mgmt_struct(hw);
859
860 ice_sched_cleanup_all(hw);
861 ice_sched_clear_agg(hw);
862 ice_free_seg(hw);
863 ice_free_hw_tbls(hw);
864 ice_destroy_lock(&hw->tnl_lock);
865
866 if (hw->port_info) {
867 ice_free(hw, hw->port_info);
868 hw->port_info = NULL;
869 }
870
871 ice_destroy_all_ctrlq(hw);
872
873 /* Clear VSI contexts if not already cleared */
874 ice_clear_all_vsi_ctx(hw);
875 }
876
877 /**
878 * ice_check_reset - Check to see if a global reset is complete
879 * @hw: pointer to the hardware structure
880 */
ice_check_reset(struct ice_hw * hw)881 enum ice_status ice_check_reset(struct ice_hw *hw)
882 {
883 u32 cnt, reg = 0, grst_timeout, uld_mask;
884
885 /* Poll for Device Active state in case a recent CORER, GLOBR,
886 * or EMPR has occurred. The grst delay value is in 100ms units.
887 * Add 1sec for outstanding AQ commands that can take a long time.
888 */
889 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
890 GLGEN_RSTCTL_GRSTDEL_S) + 10;
891
892 for (cnt = 0; cnt < grst_timeout; cnt++) {
893 ice_msec_delay(100, true);
894 reg = rd32(hw, GLGEN_RSTAT);
895 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
896 break;
897 }
898
899 if (cnt == grst_timeout) {
900 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
901 return ICE_ERR_RESET_FAILED;
902 }
903
904 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
905 GLNVM_ULD_PCIER_DONE_1_M |\
906 GLNVM_ULD_CORER_DONE_M |\
907 GLNVM_ULD_GLOBR_DONE_M |\
908 GLNVM_ULD_POR_DONE_M |\
909 GLNVM_ULD_POR_DONE_1_M |\
910 GLNVM_ULD_PCIER_DONE_2_M)
911
912 uld_mask = ICE_RESET_DONE_MASK;
913
914 /* Device is Active; check Global Reset processes are done */
915 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
916 reg = rd32(hw, GLNVM_ULD) & uld_mask;
917 if (reg == uld_mask) {
918 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
919 break;
920 }
921 ice_msec_delay(10, true);
922 }
923
924 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
925 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
926 reg);
927 return ICE_ERR_RESET_FAILED;
928 }
929
930 return ICE_SUCCESS;
931 }
932
933 /**
934 * ice_pf_reset - Reset the PF
935 * @hw: pointer to the hardware structure
936 *
937 * If a global reset has been triggered, this function checks
938 * for its completion and then issues the PF reset
939 */
ice_pf_reset(struct ice_hw * hw)940 static enum ice_status ice_pf_reset(struct ice_hw *hw)
941 {
942 u32 cnt, reg;
943
944 /* If at function entry a global reset was already in progress, i.e.
945 * state is not 'device active' or any of the reset done bits are not
946 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
947 * global reset is done.
948 */
949 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
950 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
951 /* poll on global reset currently in progress until done */
952 if (ice_check_reset(hw))
953 return ICE_ERR_RESET_FAILED;
954
955 return ICE_SUCCESS;
956 }
957
958 /* Reset the PF */
959 reg = rd32(hw, PFGEN_CTRL);
960
961 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
962
963 /* Wait for the PFR to complete. The wait time is the global config lock
964 * timeout plus the PFR timeout which will account for a possible reset
965 * that is occurring during a download package operation.
966 */
967 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
968 ICE_PF_RESET_WAIT_COUNT; cnt++) {
969 reg = rd32(hw, PFGEN_CTRL);
970 if (!(reg & PFGEN_CTRL_PFSWR_M))
971 break;
972
973 ice_msec_delay(1, true);
974 }
975
976 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
977 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
978 return ICE_ERR_RESET_FAILED;
979 }
980
981 return ICE_SUCCESS;
982 }
983
984 /**
985 * ice_reset - Perform different types of reset
986 * @hw: pointer to the hardware structure
987 * @req: reset request
988 *
989 * This function triggers a reset as specified by the req parameter.
990 *
991 * Note:
992 * If anything other than a PF reset is triggered, PXE mode is restored.
993 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
994 * interface has been restored in the rebuild flow.
995 */
ice_reset(struct ice_hw * hw,enum ice_reset_req req)996 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
997 {
998 u32 val = 0;
999
1000 switch (req) {
1001 case ICE_RESET_PFR:
1002 return ice_pf_reset(hw);
1003 case ICE_RESET_CORER:
1004 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1005 val = GLGEN_RTRIG_CORER_M;
1006 break;
1007 case ICE_RESET_GLOBR:
1008 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1009 val = GLGEN_RTRIG_GLOBR_M;
1010 break;
1011 default:
1012 return ICE_ERR_PARAM;
1013 }
1014
1015 val |= rd32(hw, GLGEN_RTRIG);
1016 wr32(hw, GLGEN_RTRIG, val);
1017 ice_flush(hw);
1018
1019 /* wait for the FW to be ready */
1020 return ice_check_reset(hw);
1021 }
1022
1023 /**
1024 * ice_copy_rxq_ctx_to_hw
1025 * @hw: pointer to the hardware structure
1026 * @ice_rxq_ctx: pointer to the rxq context
1027 * @rxq_index: the index of the Rx queue
1028 *
1029 * Copies rxq context from dense structure to HW register space
1030 */
1031 static enum ice_status
ice_copy_rxq_ctx_to_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1032 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1033 {
1034 u8 i;
1035
1036 if (!ice_rxq_ctx)
1037 return ICE_ERR_BAD_PTR;
1038
1039 if (rxq_index > QRX_CTRL_MAX_INDEX)
1040 return ICE_ERR_PARAM;
1041
1042 /* Copy each dword separately to HW */
1043 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1044 wr32(hw, QRX_CONTEXT(i, rxq_index),
1045 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1046
1047 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1048 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1049 }
1050
1051 return ICE_SUCCESS;
1052 }
1053
1054 /* LAN Rx Queue Context */
1055 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1056 /* Field Width LSB */
1057 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1058 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1059 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1060 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1061 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1062 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1063 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1064 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1065 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1066 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1067 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1068 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1069 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1070 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1071 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1072 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1073 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1074 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1075 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1076 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1077 { 0 }
1078 };
1079
1080 /**
1081 * ice_write_rxq_ctx
1082 * @hw: pointer to the hardware structure
1083 * @rlan_ctx: pointer to the rxq context
1084 * @rxq_index: the index of the Rx queue
1085 *
1086 * Converts rxq context from sparse to dense structure and then writes
1087 * it to HW register space and enables the hardware to prefetch descriptors
1088 * instead of only fetching them on demand
1089 */
1090 enum ice_status
ice_write_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1091 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1092 u32 rxq_index)
1093 {
1094 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1095
1096 if (!rlan_ctx)
1097 return ICE_ERR_BAD_PTR;
1098
1099 rlan_ctx->prefena = 1;
1100
1101 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1102 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1103 }
1104
1105 /**
1106 * ice_clear_rxq_ctx
1107 * @hw: pointer to the hardware structure
1108 * @rxq_index: the index of the Rx queue to clear
1109 *
1110 * Clears rxq context in HW register space
1111 */
ice_clear_rxq_ctx(struct ice_hw * hw,u32 rxq_index)1112 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1113 {
1114 u8 i;
1115
1116 if (rxq_index > QRX_CTRL_MAX_INDEX)
1117 return ICE_ERR_PARAM;
1118
1119 /* Clear each dword register separately */
1120 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1121 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1122
1123 return ICE_SUCCESS;
1124 }
1125
1126 /* LAN Tx Queue Context */
1127 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1128 /* Field Width LSB */
1129 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1130 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1131 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1132 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1133 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1134 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1135 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1136 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1137 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1138 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1139 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1140 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1141 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1142 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1143 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1144 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1145 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1146 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1147 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1148 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1149 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1150 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1151 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1152 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1153 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1154 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1155 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1156 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1157 { 0 }
1158 };
1159
1160 /**
1161 * ice_copy_tx_cmpltnq_ctx_to_hw
1162 * @hw: pointer to the hardware structure
1163 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1164 * @tx_cmpltnq_index: the index of the completion queue
1165 *
1166 * Copies Tx completion queue context from dense structure to HW register space
1167 */
1168 static enum ice_status
ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw * hw,u8 * ice_tx_cmpltnq_ctx,u32 tx_cmpltnq_index)1169 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1170 u32 tx_cmpltnq_index)
1171 {
1172 u8 i;
1173
1174 if (!ice_tx_cmpltnq_ctx)
1175 return ICE_ERR_BAD_PTR;
1176
1177 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1178 return ICE_ERR_PARAM;
1179
1180 /* Copy each dword separately to HW */
1181 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1182 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1183 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1184
1185 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1186 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1187 }
1188
1189 return ICE_SUCCESS;
1190 }
1191
1192 /* LAN Tx Completion Queue Context */
1193 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1194 /* Field Width LSB */
1195 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1196 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1197 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1198 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1199 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1200 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1201 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1202 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1203 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1204 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1205 { 0 }
1206 };
1207
1208 /**
1209 * ice_write_tx_cmpltnq_ctx
1210 * @hw: pointer to the hardware structure
1211 * @tx_cmpltnq_ctx: pointer to the completion queue context
1212 * @tx_cmpltnq_index: the index of the completion queue
1213 *
1214 * Converts completion queue context from sparse to dense structure and then
1215 * writes it to HW register space
1216 */
1217 enum ice_status
ice_write_tx_cmpltnq_ctx(struct ice_hw * hw,struct ice_tx_cmpltnq_ctx * tx_cmpltnq_ctx,u32 tx_cmpltnq_index)1218 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1219 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1220 u32 tx_cmpltnq_index)
1221 {
1222 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1223
1224 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1225 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1226 }
1227
1228 /**
1229 * ice_clear_tx_cmpltnq_ctx
1230 * @hw: pointer to the hardware structure
1231 * @tx_cmpltnq_index: the index of the completion queue to clear
1232 *
1233 * Clears Tx completion queue context in HW register space
1234 */
1235 enum ice_status
ice_clear_tx_cmpltnq_ctx(struct ice_hw * hw,u32 tx_cmpltnq_index)1236 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1237 {
1238 u8 i;
1239
1240 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1241 return ICE_ERR_PARAM;
1242
1243 /* Clear each dword register separately */
1244 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1245 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1246
1247 return ICE_SUCCESS;
1248 }
1249
1250 /**
1251 * ice_copy_tx_drbell_q_ctx_to_hw
1252 * @hw: pointer to the hardware structure
1253 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1254 * @tx_drbell_q_index: the index of the doorbell queue
1255 *
1256 * Copies doorbell queue context from dense structure to HW register space
1257 */
1258 static enum ice_status
ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw * hw,u8 * ice_tx_drbell_q_ctx,u32 tx_drbell_q_index)1259 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1260 u32 tx_drbell_q_index)
1261 {
1262 u8 i;
1263
1264 if (!ice_tx_drbell_q_ctx)
1265 return ICE_ERR_BAD_PTR;
1266
1267 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1268 return ICE_ERR_PARAM;
1269
1270 /* Copy each dword separately to HW */
1271 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1272 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1273 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1274
1275 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1276 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1277 }
1278
1279 return ICE_SUCCESS;
1280 }
1281
1282 /* LAN Tx Doorbell Queue Context info */
1283 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1284 /* Field Width LSB */
1285 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1286 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1287 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1288 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1289 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1290 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1291 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1292 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1293 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1294 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1295 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1296 { 0 }
1297 };
1298
1299 /**
1300 * ice_write_tx_drbell_q_ctx
1301 * @hw: pointer to the hardware structure
1302 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1303 * @tx_drbell_q_index: the index of the doorbell queue
1304 *
1305 * Converts doorbell queue context from sparse to dense structure and then
1306 * writes it to HW register space
1307 */
1308 enum ice_status
ice_write_tx_drbell_q_ctx(struct ice_hw * hw,struct ice_tx_drbell_q_ctx * tx_drbell_q_ctx,u32 tx_drbell_q_index)1309 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1310 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1311 u32 tx_drbell_q_index)
1312 {
1313 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1314
1315 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1316 ice_tx_drbell_q_ctx_info);
1317 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1318 }
1319
1320 /**
1321 * ice_clear_tx_drbell_q_ctx
1322 * @hw: pointer to the hardware structure
1323 * @tx_drbell_q_index: the index of the doorbell queue to clear
1324 *
1325 * Clears doorbell queue context in HW register space
1326 */
1327 enum ice_status
ice_clear_tx_drbell_q_ctx(struct ice_hw * hw,u32 tx_drbell_q_index)1328 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1329 {
1330 u8 i;
1331
1332 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1333 return ICE_ERR_PARAM;
1334
1335 /* Clear each dword register separately */
1336 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1337 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1338
1339 return ICE_SUCCESS;
1340 }
1341
1342 /* FW Admin Queue command wrappers */
1343
1344 /**
1345 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1346 * @hw: pointer to the HW struct
1347 * @desc: descriptor describing the command
1348 * @buf: buffer to use for indirect commands (NULL for direct commands)
1349 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1350 * @cd: pointer to command details structure
1351 *
1352 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1353 */
1354 enum ice_status
ice_aq_send_cmd(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1355 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1356 u16 buf_size, struct ice_sq_cd *cd)
1357 {
1358 if (hw->aq_send_cmd_fn) {
1359 enum ice_status status = ICE_ERR_NOT_READY;
1360 u16 retval = ICE_AQ_RC_OK;
1361
1362 ice_acquire_lock(&hw->adminq.sq_lock);
1363 if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc,
1364 buf, buf_size)) {
1365 retval = LE16_TO_CPU(desc->retval);
1366 /* strip off FW internal code */
1367 if (retval)
1368 retval &= 0xff;
1369 if (retval == ICE_AQ_RC_OK)
1370 status = ICE_SUCCESS;
1371 else
1372 status = ICE_ERR_AQ_ERROR;
1373 }
1374
1375 hw->adminq.sq_last_status = (enum ice_aq_err)retval;
1376 ice_release_lock(&hw->adminq.sq_lock);
1377
1378 return status;
1379 }
1380 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1381 }
1382
1383 /**
1384 * ice_aq_get_fw_ver
1385 * @hw: pointer to the HW struct
1386 * @cd: pointer to command details structure or NULL
1387 *
1388 * Get the firmware version (0x0001) from the admin queue commands
1389 */
ice_aq_get_fw_ver(struct ice_hw * hw,struct ice_sq_cd * cd)1390 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1391 {
1392 struct ice_aqc_get_ver *resp;
1393 struct ice_aq_desc desc;
1394 enum ice_status status;
1395
1396 resp = &desc.params.get_ver;
1397
1398 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1399
1400 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1401
1402 if (!status) {
1403 hw->fw_branch = resp->fw_branch;
1404 hw->fw_maj_ver = resp->fw_major;
1405 hw->fw_min_ver = resp->fw_minor;
1406 hw->fw_patch = resp->fw_patch;
1407 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1408 hw->api_branch = resp->api_branch;
1409 hw->api_maj_ver = resp->api_major;
1410 hw->api_min_ver = resp->api_minor;
1411 hw->api_patch = resp->api_patch;
1412 }
1413
1414 return status;
1415 }
1416
1417 /**
1418 * ice_aq_send_driver_ver
1419 * @hw: pointer to the HW struct
1420 * @dv: driver's major, minor version
1421 * @cd: pointer to command details structure or NULL
1422 *
1423 * Send the driver version (0x0002) to the firmware
1424 */
1425 enum ice_status
ice_aq_send_driver_ver(struct ice_hw * hw,struct ice_driver_ver * dv,struct ice_sq_cd * cd)1426 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1427 struct ice_sq_cd *cd)
1428 {
1429 struct ice_aqc_driver_ver *cmd;
1430 struct ice_aq_desc desc;
1431 u16 len;
1432
1433 cmd = &desc.params.driver_ver;
1434
1435 if (!dv)
1436 return ICE_ERR_PARAM;
1437
1438 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1439
1440 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1441 cmd->major_ver = dv->major_ver;
1442 cmd->minor_ver = dv->minor_ver;
1443 cmd->build_ver = dv->build_ver;
1444 cmd->subbuild_ver = dv->subbuild_ver;
1445
1446 len = 0;
1447 while (len < sizeof(dv->driver_string) &&
1448 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1449 len++;
1450
1451 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1452 }
1453
1454 /**
1455 * ice_aq_q_shutdown
1456 * @hw: pointer to the HW struct
1457 * @unloading: is the driver unloading itself
1458 *
1459 * Tell the Firmware that we're shutting down the AdminQ and whether
1460 * or not the driver is unloading as well (0x0003).
1461 */
ice_aq_q_shutdown(struct ice_hw * hw,bool unloading)1462 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1463 {
1464 struct ice_aqc_q_shutdown *cmd;
1465 struct ice_aq_desc desc;
1466
1467 cmd = &desc.params.q_shutdown;
1468
1469 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1470
1471 if (unloading)
1472 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1473
1474 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1475 }
1476
1477 /**
1478 * ice_aq_req_res
1479 * @hw: pointer to the HW struct
1480 * @res: resource ID
1481 * @access: access type
1482 * @sdp_number: resource number
1483 * @timeout: the maximum time in ms that the driver may hold the resource
1484 * @cd: pointer to command details structure or NULL
1485 *
1486 * Requests common resource using the admin queue commands (0x0008).
1487 * When attempting to acquire the Global Config Lock, the driver can
1488 * learn of three states:
1489 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1490 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1491 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1492 * successfully downloaded the package; the driver does
1493 * not have to download the package and can continue
1494 * loading
1495 *
1496 * Note that if the caller is in an acquire lock, perform action, release lock
1497 * phase of operation, it is possible that the FW may detect a timeout and issue
1498 * a CORER. In this case, the driver will receive a CORER interrupt and will
1499 * have to determine its cause. The calling thread that is handling this flow
1500 * will likely get an error propagated back to it indicating the Download
1501 * Package, Update Package or the Release Resource AQ commands timed out.
1502 */
1503 static enum ice_status
ice_aq_req_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u8 sdp_number,u32 * timeout,struct ice_sq_cd * cd)1504 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1505 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1506 struct ice_sq_cd *cd)
1507 {
1508 struct ice_aqc_req_res *cmd_resp;
1509 struct ice_aq_desc desc;
1510 enum ice_status status;
1511
1512 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1513
1514 cmd_resp = &desc.params.res_owner;
1515
1516 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1517
1518 cmd_resp->res_id = CPU_TO_LE16(res);
1519 cmd_resp->access_type = CPU_TO_LE16(access);
1520 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1521 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1522 *timeout = 0;
1523
1524 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1525
1526 /* The completion specifies the maximum time in ms that the driver
1527 * may hold the resource in the Timeout field.
1528 */
1529
1530 /* Global config lock response utilizes an additional status field.
1531 *
1532 * If the Global config lock resource is held by some other driver, the
1533 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1534 * and the timeout field indicates the maximum time the current owner
1535 * of the resource has to free it.
1536 */
1537 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1538 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1539 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1540 return ICE_SUCCESS;
1541 } else if (LE16_TO_CPU(cmd_resp->status) ==
1542 ICE_AQ_RES_GLBL_IN_PROG) {
1543 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1544 return ICE_ERR_AQ_ERROR;
1545 } else if (LE16_TO_CPU(cmd_resp->status) ==
1546 ICE_AQ_RES_GLBL_DONE) {
1547 return ICE_ERR_AQ_NO_WORK;
1548 }
1549
1550 /* invalid FW response, force a timeout immediately */
1551 *timeout = 0;
1552 return ICE_ERR_AQ_ERROR;
1553 }
1554
1555 /* If the resource is held by some other driver, the command completes
1556 * with a busy return value and the timeout field indicates the maximum
1557 * time the current owner of the resource has to free it.
1558 */
1559 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1560 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1561
1562 return status;
1563 }
1564
1565 /**
1566 * ice_aq_release_res
1567 * @hw: pointer to the HW struct
1568 * @res: resource ID
1569 * @sdp_number: resource number
1570 * @cd: pointer to command details structure or NULL
1571 *
1572 * release common resource using the admin queue commands (0x0009)
1573 */
1574 static enum ice_status
ice_aq_release_res(struct ice_hw * hw,enum ice_aq_res_ids res,u8 sdp_number,struct ice_sq_cd * cd)1575 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1576 struct ice_sq_cd *cd)
1577 {
1578 struct ice_aqc_req_res *cmd;
1579 struct ice_aq_desc desc;
1580
1581 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1582
1583 cmd = &desc.params.res_owner;
1584
1585 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1586
1587 cmd->res_id = CPU_TO_LE16(res);
1588 cmd->res_number = CPU_TO_LE32(sdp_number);
1589
1590 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1591 }
1592
1593 /**
1594 * ice_acquire_res
1595 * @hw: pointer to the HW structure
1596 * @res: resource ID
1597 * @access: access type (read or write)
1598 * @timeout: timeout in milliseconds
1599 *
1600 * This function will attempt to acquire the ownership of a resource.
1601 */
1602 enum ice_status
ice_acquire_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u32 timeout)1603 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1604 enum ice_aq_res_access_type access, u32 timeout)
1605 {
1606 #define ICE_RES_POLLING_DELAY_MS 10
1607 u32 delay = ICE_RES_POLLING_DELAY_MS;
1608 u32 time_left = timeout;
1609 enum ice_status status;
1610
1611 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1612
1613 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1614
1615 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1616 * previously acquired the resource and performed any necessary updates;
1617 * in this case the caller does not obtain the resource and has no
1618 * further work to do.
1619 */
1620 if (status == ICE_ERR_AQ_NO_WORK)
1621 goto ice_acquire_res_exit;
1622
1623 if (status)
1624 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1625
1626 /* If necessary, poll until the current lock owner timeouts */
1627 timeout = time_left;
1628 while (status && timeout && time_left) {
1629 ice_msec_delay(delay, true);
1630 timeout = (timeout > delay) ? timeout - delay : 0;
1631 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1632
1633 if (status == ICE_ERR_AQ_NO_WORK)
1634 /* lock free, but no work to do */
1635 break;
1636
1637 if (!status)
1638 /* lock acquired */
1639 break;
1640 }
1641 if (status && status != ICE_ERR_AQ_NO_WORK)
1642 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1643
1644 ice_acquire_res_exit:
1645 if (status == ICE_ERR_AQ_NO_WORK) {
1646 if (access == ICE_RES_WRITE)
1647 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1648 else
1649 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1650 }
1651 return status;
1652 }
1653
1654 /**
1655 * ice_release_res
1656 * @hw: pointer to the HW structure
1657 * @res: resource ID
1658 *
1659 * This function will release a resource using the proper Admin Command.
1660 */
ice_release_res(struct ice_hw * hw,enum ice_aq_res_ids res)1661 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1662 {
1663 enum ice_status status;
1664 u32 total_delay = 0;
1665
1666 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1667
1668 status = ice_aq_release_res(hw, res, 0, NULL);
1669
1670 /* there are some rare cases when trying to release the resource
1671 * results in an admin queue timeout, so handle them correctly
1672 */
1673 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1674 (total_delay < hw->adminq.sq_cmd_timeout)) {
1675 ice_msec_delay(1, true);
1676 status = ice_aq_release_res(hw, res, 0, NULL);
1677 total_delay++;
1678 }
1679 }
1680
1681 /**
1682 * ice_aq_alloc_free_res - command to allocate/free resources
1683 * @hw: pointer to the HW struct
1684 * @num_entries: number of resource entries in buffer
1685 * @buf: Indirect buffer to hold data parameters and response
1686 * @buf_size: size of buffer for indirect commands
1687 * @opc: pass in the command opcode
1688 * @cd: pointer to command details structure or NULL
1689 *
1690 * Helper function to allocate/free resources using the admin queue commands
1691 */
1692 enum ice_status
ice_aq_alloc_free_res(struct ice_hw * hw,u16 num_entries,struct ice_aqc_alloc_free_res_elem * buf,u16 buf_size,enum ice_adminq_opc opc,struct ice_sq_cd * cd)1693 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1694 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1695 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1696 {
1697 struct ice_aqc_alloc_free_res_cmd *cmd;
1698 struct ice_aq_desc desc;
1699
1700 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1701
1702 cmd = &desc.params.sw_res_ctrl;
1703
1704 if (!buf)
1705 return ICE_ERR_PARAM;
1706
1707 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
1708 return ICE_ERR_PARAM;
1709
1710 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1711
1712 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1713
1714 cmd->num_entries = CPU_TO_LE16(num_entries);
1715
1716 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1717 }
1718
1719 /**
1720 * ice_alloc_hw_res - allocate resource
1721 * @hw: pointer to the HW struct
1722 * @type: type of resource
1723 * @num: number of resources to allocate
1724 * @btm: allocate from bottom
1725 * @res: pointer to array that will receive the resources
1726 */
1727 enum ice_status
ice_alloc_hw_res(struct ice_hw * hw,u16 type,u16 num,bool btm,u16 * res)1728 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1729 {
1730 struct ice_aqc_alloc_free_res_elem *buf;
1731 enum ice_status status;
1732 u16 buf_len;
1733
1734 buf_len = ice_struct_size(buf, elem, num);
1735 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1736 if (!buf)
1737 return ICE_ERR_NO_MEMORY;
1738
1739 /* Prepare buffer to allocate resource. */
1740 buf->num_elems = CPU_TO_LE16(num);
1741 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1742 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1743 if (btm)
1744 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1745
1746 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1747 ice_aqc_opc_alloc_res, NULL);
1748 if (status)
1749 goto ice_alloc_res_exit;
1750
1751 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
1752 ICE_NONDMA_TO_NONDMA);
1753
1754 ice_alloc_res_exit:
1755 ice_free(hw, buf);
1756 return status;
1757 }
1758
1759 /**
1760 * ice_free_hw_res - free allocated HW resource
1761 * @hw: pointer to the HW struct
1762 * @type: type of resource to free
1763 * @num: number of resources
1764 * @res: pointer to array that contains the resources to free
1765 */
ice_free_hw_res(struct ice_hw * hw,u16 type,u16 num,u16 * res)1766 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1767 {
1768 struct ice_aqc_alloc_free_res_elem *buf;
1769 enum ice_status status;
1770 u16 buf_len;
1771
1772 buf_len = ice_struct_size(buf, elem, num);
1773 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1774 if (!buf)
1775 return ICE_ERR_NO_MEMORY;
1776
1777 /* Prepare buffer to free resource. */
1778 buf->num_elems = CPU_TO_LE16(num);
1779 buf->res_type = CPU_TO_LE16(type);
1780 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
1781 ICE_NONDMA_TO_NONDMA);
1782
1783 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1784 ice_aqc_opc_free_res, NULL);
1785 if (status)
1786 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1787
1788 ice_free(hw, buf);
1789 return status;
1790 }
1791
1792 /**
1793 * ice_get_num_per_func - determine number of resources per PF
1794 * @hw: pointer to the HW structure
1795 * @max: value to be evenly split between each PF
1796 *
1797 * Determine the number of valid functions by going through the bitmap returned
1798 * from parsing capabilities and use this to calculate the number of resources
1799 * per PF based on the max value passed in.
1800 */
ice_get_num_per_func(struct ice_hw * hw,u32 max)1801 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1802 {
1803 u8 funcs;
1804
1805 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1806 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1807 ICE_CAPS_VALID_FUNCS_M);
1808
1809 if (!funcs)
1810 return 0;
1811
1812 return max / funcs;
1813 }
1814
1815 /**
1816 * ice_parse_common_caps - parse common device/function capabilities
1817 * @hw: pointer to the HW struct
1818 * @caps: pointer to common capabilities structure
1819 * @elem: the capability element to parse
1820 * @prefix: message prefix for tracing capabilities
1821 *
1822 * Given a capability element, extract relevant details into the common
1823 * capability structure.
1824 *
1825 * Returns: true if the capability matches one of the common capability ids,
1826 * false otherwise.
1827 */
1828 static bool
ice_parse_common_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,struct ice_aqc_list_caps_elem * elem,const char * prefix)1829 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1830 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1831 {
1832 u32 logical_id = LE32_TO_CPU(elem->logical_id);
1833 u32 phys_id = LE32_TO_CPU(elem->phys_id);
1834 u32 number = LE32_TO_CPU(elem->number);
1835 u16 cap = LE16_TO_CPU(elem->cap);
1836 bool found = true;
1837
1838 switch (cap) {
1839 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1840 caps->valid_functions = number;
1841 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1842 caps->valid_functions);
1843 break;
1844 case ICE_AQC_CAPS_DCB:
1845 caps->dcb = (number == 1);
1846 caps->active_tc_bitmap = logical_id;
1847 caps->maxtc = phys_id;
1848 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1849 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1850 caps->active_tc_bitmap);
1851 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1852 break;
1853 case ICE_AQC_CAPS_RSS:
1854 caps->rss_table_size = number;
1855 caps->rss_table_entry_width = logical_id;
1856 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1857 caps->rss_table_size);
1858 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1859 caps->rss_table_entry_width);
1860 break;
1861 case ICE_AQC_CAPS_RXQS:
1862 caps->num_rxq = number;
1863 caps->rxq_first_id = phys_id;
1864 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1865 caps->num_rxq);
1866 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1867 caps->rxq_first_id);
1868 break;
1869 case ICE_AQC_CAPS_TXQS:
1870 caps->num_txq = number;
1871 caps->txq_first_id = phys_id;
1872 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1873 caps->num_txq);
1874 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1875 caps->txq_first_id);
1876 break;
1877 case ICE_AQC_CAPS_MSIX:
1878 caps->num_msix_vectors = number;
1879 caps->msix_vector_first_id = phys_id;
1880 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1881 caps->num_msix_vectors);
1882 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1883 caps->msix_vector_first_id);
1884 break;
1885 case ICE_AQC_CAPS_MAX_MTU:
1886 caps->max_mtu = number;
1887 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1888 prefix, caps->max_mtu);
1889 break;
1890 default:
1891 /* Not one of the recognized common capabilities */
1892 found = false;
1893 }
1894
1895 return found;
1896 }
1897
1898 /**
1899 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
1900 * @hw: pointer to the HW structure
1901 * @caps: pointer to capabilities structure to fix
1902 *
1903 * Re-calculate the capabilities that are dependent on the number of physical
1904 * ports; i.e. some features are not supported or function differently on
1905 * devices with more than 4 ports.
1906 */
1907 static void
ice_recalc_port_limited_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps)1908 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
1909 {
1910 /* This assumes device capabilities are always scanned before function
1911 * capabilities during the initialization flow.
1912 */
1913 if (hw->dev_caps.num_funcs > 4) {
1914 /* Max 4 TCs per port */
1915 caps->maxtc = 4;
1916 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
1917 caps->maxtc);
1918 }
1919 }
1920
1921 /**
1922 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
1923 * @hw: pointer to the HW struct
1924 * @func_p: pointer to function capabilities structure
1925 * @cap: pointer to the capability element to parse
1926 *
1927 * Extract function capabilities for ICE_AQC_CAPS_VSI.
1928 */
1929 static void
ice_parse_vsi_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)1930 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1931 struct ice_aqc_list_caps_elem *cap)
1932 {
1933 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
1934 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
1935 LE32_TO_CPU(cap->number));
1936 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
1937 func_p->guar_num_vsi);
1938 }
1939
1940 /**
1941 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
1942 * @hw: pointer to the HW struct
1943 * @func_p: pointer to function capabilities structure
1944 *
1945 * Extract function capabilities for ICE_AQC_CAPS_FD.
1946 */
1947 static void
ice_parse_fdir_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p)1948 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
1949 {
1950 u32 reg_val, val;
1951
1952 if (hw->dcf_enabled)
1953 return;
1954 reg_val = rd32(hw, GLQF_FD_SIZE);
1955 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
1956 GLQF_FD_SIZE_FD_GSIZE_S;
1957 func_p->fd_fltr_guar =
1958 ice_get_num_per_func(hw, val);
1959 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
1960 GLQF_FD_SIZE_FD_BSIZE_S;
1961 func_p->fd_fltr_best_effort = val;
1962
1963 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
1964 func_p->fd_fltr_guar);
1965 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
1966 func_p->fd_fltr_best_effort);
1967 }
1968
1969 /**
1970 * ice_parse_func_caps - Parse function capabilities
1971 * @hw: pointer to the HW struct
1972 * @func_p: pointer to function capabilities structure
1973 * @buf: buffer containing the function capability records
1974 * @cap_count: the number of capabilities
1975 *
1976 * Helper function to parse function (0x000A) capabilities list. For
1977 * capabilities shared between device and function, this relies on
1978 * ice_parse_common_caps.
1979 *
1980 * Loop through the list of provided capabilities and extract the relevant
1981 * data into the function capabilities structured.
1982 */
1983 static void
ice_parse_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,void * buf,u32 cap_count)1984 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1985 void *buf, u32 cap_count)
1986 {
1987 struct ice_aqc_list_caps_elem *cap_resp;
1988 u32 i;
1989
1990 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1991
1992 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
1993
1994 for (i = 0; i < cap_count; i++) {
1995 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
1996 bool found;
1997
1998 found = ice_parse_common_caps(hw, &func_p->common_cap,
1999 &cap_resp[i], "func caps");
2000
2001 switch (cap) {
2002 case ICE_AQC_CAPS_VSI:
2003 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2004 break;
2005 case ICE_AQC_CAPS_FD:
2006 ice_parse_fdir_func_caps(hw, func_p);
2007 break;
2008 default:
2009 /* Don't list common capabilities as unknown */
2010 if (!found)
2011 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2012 i, cap);
2013 break;
2014 }
2015 }
2016
2017 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2018 }
2019
2020 /**
2021 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2022 * @hw: pointer to the HW struct
2023 * @dev_p: pointer to device capabilities structure
2024 * @cap: capability element to parse
2025 *
2026 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2027 */
2028 static void
ice_parse_valid_functions_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2029 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2030 struct ice_aqc_list_caps_elem *cap)
2031 {
2032 u32 number = LE32_TO_CPU(cap->number);
2033
2034 dev_p->num_funcs = ice_hweight32(number);
2035 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2036 dev_p->num_funcs);
2037 }
2038
2039 /**
2040 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2041 * @hw: pointer to the HW struct
2042 * @dev_p: pointer to device capabilities structure
2043 * @cap: capability element to parse
2044 *
2045 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2046 */
2047 static void
ice_parse_vsi_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2048 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2049 struct ice_aqc_list_caps_elem *cap)
2050 {
2051 u32 number = LE32_TO_CPU(cap->number);
2052
2053 dev_p->num_vsi_allocd_to_host = number;
2054 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2055 dev_p->num_vsi_allocd_to_host);
2056 }
2057
2058 /**
2059 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2060 * @hw: pointer to the HW struct
2061 * @dev_p: pointer to device capabilities structure
2062 * @cap: capability element to parse
2063 *
2064 * Parse ICE_AQC_CAPS_FD for device capabilities.
2065 */
2066 static void
ice_parse_fdir_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2067 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2068 struct ice_aqc_list_caps_elem *cap)
2069 {
2070 u32 number = LE32_TO_CPU(cap->number);
2071
2072 dev_p->num_flow_director_fltr = number;
2073 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2074 dev_p->num_flow_director_fltr);
2075 }
2076
2077 /**
2078 * ice_parse_dev_caps - Parse device capabilities
2079 * @hw: pointer to the HW struct
2080 * @dev_p: pointer to device capabilities structure
2081 * @buf: buffer containing the device capability records
2082 * @cap_count: the number of capabilities
2083 *
2084 * Helper device to parse device (0x000B) capabilities list. For
2085 * capabilities shared between device and function, this relies on
2086 * ice_parse_common_caps.
2087 *
2088 * Loop through the list of provided capabilities and extract the relevant
2089 * data into the device capabilities structured.
2090 */
2091 static void
ice_parse_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,void * buf,u32 cap_count)2092 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2093 void *buf, u32 cap_count)
2094 {
2095 struct ice_aqc_list_caps_elem *cap_resp;
2096 u32 i;
2097
2098 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2099
2100 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2101
2102 for (i = 0; i < cap_count; i++) {
2103 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2104 bool found;
2105
2106 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2107 &cap_resp[i], "dev caps");
2108
2109 switch (cap) {
2110 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2111 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2112 break;
2113 case ICE_AQC_CAPS_VSI:
2114 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2115 break;
2116 case ICE_AQC_CAPS_FD:
2117 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2118 break;
2119 default:
2120 /* Don't list common capabilities as unknown */
2121 if (!found)
2122 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2123 i, cap);
2124 break;
2125 }
2126 }
2127
2128 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2129 }
2130
2131 /**
2132 * ice_aq_list_caps - query function/device capabilities
2133 * @hw: pointer to the HW struct
2134 * @buf: a buffer to hold the capabilities
2135 * @buf_size: size of the buffer
2136 * @cap_count: if not NULL, set to the number of capabilities reported
2137 * @opc: capabilities type to discover, device or function
2138 * @cd: pointer to command details structure or NULL
2139 *
2140 * Get the function (0x000A) or device (0x000B) capabilities description from
2141 * firmware and store it in the buffer.
2142 *
2143 * If the cap_count pointer is not NULL, then it is set to the number of
2144 * capabilities firmware will report. Note that if the buffer size is too
2145 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2146 * cap_count will still be updated in this case. It is recommended that the
2147 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2148 * firmware could return) to avoid this.
2149 */
2150 static enum ice_status
ice_aq_list_caps(struct ice_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2151 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2152 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2153 {
2154 struct ice_aqc_list_caps *cmd;
2155 struct ice_aq_desc desc;
2156 enum ice_status status;
2157
2158 cmd = &desc.params.get_cap;
2159
2160 if (opc != ice_aqc_opc_list_func_caps &&
2161 opc != ice_aqc_opc_list_dev_caps)
2162 return ICE_ERR_PARAM;
2163
2164 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2165 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2166
2167 if (cap_count)
2168 *cap_count = LE32_TO_CPU(cmd->count);
2169
2170 return status;
2171 }
2172
2173 /**
2174 * ice_discover_dev_caps - Read and extract device capabilities
2175 * @hw: pointer to the hardware structure
2176 * @dev_caps: pointer to device capabilities structure
2177 *
2178 * Read the device capabilities and extract them into the dev_caps structure
2179 * for later use.
2180 */
2181 static enum ice_status
ice_discover_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_caps)2182 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2183 {
2184 enum ice_status status;
2185 u32 cap_count = 0;
2186 void *cbuf;
2187
2188 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2189 if (!cbuf)
2190 return ICE_ERR_NO_MEMORY;
2191
2192 /* Although the driver doesn't know the number of capabilities the
2193 * device will return, we can simply send a 4KB buffer, the maximum
2194 * possible size that firmware can return.
2195 */
2196 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2197
2198 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2199 ice_aqc_opc_list_dev_caps, NULL);
2200 if (!status)
2201 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2202 ice_free(hw, cbuf);
2203
2204 return status;
2205 }
2206
2207 /**
2208 * ice_discover_func_caps - Read and extract function capabilities
2209 * @hw: pointer to the hardware structure
2210 * @func_caps: pointer to function capabilities structure
2211 *
2212 * Read the function capabilities and extract them into the func_caps structure
2213 * for later use.
2214 */
2215 static enum ice_status
ice_discover_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_caps)2216 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2217 {
2218 enum ice_status status;
2219 u32 cap_count = 0;
2220 void *cbuf;
2221
2222 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2223 if (!cbuf)
2224 return ICE_ERR_NO_MEMORY;
2225
2226 /* Although the driver doesn't know the number of capabilities the
2227 * device will return, we can simply send a 4KB buffer, the maximum
2228 * possible size that firmware can return.
2229 */
2230 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2231
2232 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2233 ice_aqc_opc_list_func_caps, NULL);
2234 if (!status)
2235 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2236 ice_free(hw, cbuf);
2237
2238 return status;
2239 }
2240
2241 /**
2242 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2243 * @hw: pointer to the hardware structure
2244 */
ice_set_safe_mode_caps(struct ice_hw * hw)2245 void ice_set_safe_mode_caps(struct ice_hw *hw)
2246 {
2247 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2248 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2249 struct ice_hw_common_caps cached_caps;
2250 u32 num_funcs;
2251
2252 /* cache some func_caps values that should be restored after memset */
2253 cached_caps = func_caps->common_cap;
2254
2255 /* unset func capabilities */
2256 memset(func_caps, 0, sizeof(*func_caps));
2257
2258 #define ICE_RESTORE_FUNC_CAP(name) \
2259 func_caps->common_cap.name = cached_caps.name
2260
2261 /* restore cached values */
2262 ICE_RESTORE_FUNC_CAP(valid_functions);
2263 ICE_RESTORE_FUNC_CAP(txq_first_id);
2264 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2265 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2266 ICE_RESTORE_FUNC_CAP(max_mtu);
2267 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2268
2269 /* one Tx and one Rx queue in safe mode */
2270 func_caps->common_cap.num_rxq = 1;
2271 func_caps->common_cap.num_txq = 1;
2272
2273 /* two MSIX vectors, one for traffic and one for misc causes */
2274 func_caps->common_cap.num_msix_vectors = 2;
2275 func_caps->guar_num_vsi = 1;
2276
2277 /* cache some dev_caps values that should be restored after memset */
2278 cached_caps = dev_caps->common_cap;
2279 num_funcs = dev_caps->num_funcs;
2280
2281 /* unset dev capabilities */
2282 memset(dev_caps, 0, sizeof(*dev_caps));
2283
2284 #define ICE_RESTORE_DEV_CAP(name) \
2285 dev_caps->common_cap.name = cached_caps.name
2286
2287 /* restore cached values */
2288 ICE_RESTORE_DEV_CAP(valid_functions);
2289 ICE_RESTORE_DEV_CAP(txq_first_id);
2290 ICE_RESTORE_DEV_CAP(rxq_first_id);
2291 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2292 ICE_RESTORE_DEV_CAP(max_mtu);
2293 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2294 dev_caps->num_funcs = num_funcs;
2295
2296 /* one Tx and one Rx queue per function in safe mode */
2297 dev_caps->common_cap.num_rxq = num_funcs;
2298 dev_caps->common_cap.num_txq = num_funcs;
2299
2300 /* two MSIX vectors per function */
2301 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2302 }
2303
2304 /**
2305 * ice_get_caps - get info about the HW
2306 * @hw: pointer to the hardware structure
2307 */
ice_get_caps(struct ice_hw * hw)2308 enum ice_status ice_get_caps(struct ice_hw *hw)
2309 {
2310 enum ice_status status;
2311
2312 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2313 if (status)
2314 return status;
2315
2316 return ice_discover_func_caps(hw, &hw->func_caps);
2317 }
2318
2319 /**
2320 * ice_aq_manage_mac_write - manage MAC address write command
2321 * @hw: pointer to the HW struct
2322 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2323 * @flags: flags to control write behavior
2324 * @cd: pointer to command details structure or NULL
2325 *
2326 * This function is used to write MAC address to the NVM (0x0108).
2327 */
2328 enum ice_status
ice_aq_manage_mac_write(struct ice_hw * hw,const u8 * mac_addr,u8 flags,struct ice_sq_cd * cd)2329 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2330 struct ice_sq_cd *cd)
2331 {
2332 struct ice_aqc_manage_mac_write *cmd;
2333 struct ice_aq_desc desc;
2334
2335 cmd = &desc.params.mac_write;
2336 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2337
2338 cmd->flags = flags;
2339 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA);
2340
2341 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2342 }
2343
2344 /**
2345 * ice_aq_clear_pxe_mode
2346 * @hw: pointer to the HW struct
2347 *
2348 * Tell the firmware that the driver is taking over from PXE (0x0110).
2349 */
ice_aq_clear_pxe_mode(struct ice_hw * hw)2350 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2351 {
2352 struct ice_aq_desc desc;
2353
2354 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2355 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2356
2357 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2358 }
2359
2360 /**
2361 * ice_clear_pxe_mode - clear pxe operations mode
2362 * @hw: pointer to the HW struct
2363 *
2364 * Make sure all PXE mode settings are cleared, including things
2365 * like descriptor fetch/write-back mode.
2366 */
ice_clear_pxe_mode(struct ice_hw * hw)2367 void ice_clear_pxe_mode(struct ice_hw *hw)
2368 {
2369 if (ice_check_sq_alive(hw, &hw->adminq))
2370 ice_aq_clear_pxe_mode(hw);
2371 }
2372
2373 /**
2374 * ice_get_link_speed_based_on_phy_type - returns link speed
2375 * @phy_type_low: lower part of phy_type
2376 * @phy_type_high: higher part of phy_type
2377 *
2378 * This helper function will convert an entry in PHY type structure
2379 * [phy_type_low, phy_type_high] to its corresponding link speed.
2380 * Note: In the structure of [phy_type_low, phy_type_high], there should
2381 * be one bit set, as this function will convert one PHY type to its
2382 * speed.
2383 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2384 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2385 */
2386 static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low,u64 phy_type_high)2387 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2388 {
2389 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2390 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2391
2392 switch (phy_type_low) {
2393 case ICE_PHY_TYPE_LOW_100BASE_TX:
2394 case ICE_PHY_TYPE_LOW_100M_SGMII:
2395 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2396 break;
2397 case ICE_PHY_TYPE_LOW_1000BASE_T:
2398 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2399 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2400 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2401 case ICE_PHY_TYPE_LOW_1G_SGMII:
2402 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2403 break;
2404 case ICE_PHY_TYPE_LOW_2500BASE_T:
2405 case ICE_PHY_TYPE_LOW_2500BASE_X:
2406 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2407 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2408 break;
2409 case ICE_PHY_TYPE_LOW_5GBASE_T:
2410 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2411 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2412 break;
2413 case ICE_PHY_TYPE_LOW_10GBASE_T:
2414 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2415 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2416 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2417 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2418 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2419 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2420 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2421 break;
2422 case ICE_PHY_TYPE_LOW_25GBASE_T:
2423 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2424 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2425 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2426 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2427 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2428 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2429 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2430 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2431 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2432 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2433 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2434 break;
2435 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2436 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2437 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2438 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2439 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2440 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2441 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2442 break;
2443 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2444 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2445 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2446 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2447 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2448 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2449 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2450 case ICE_PHY_TYPE_LOW_50G_AUI2:
2451 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2452 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2453 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2454 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2455 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2456 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2457 case ICE_PHY_TYPE_LOW_50G_AUI1:
2458 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2459 break;
2460 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2461 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2462 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2463 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2464 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2465 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2466 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2467 case ICE_PHY_TYPE_LOW_100G_AUI4:
2468 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2469 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2470 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2471 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2472 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2473 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2474 break;
2475 default:
2476 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2477 break;
2478 }
2479
2480 switch (phy_type_high) {
2481 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2482 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2483 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2484 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2485 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2486 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2487 break;
2488 default:
2489 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2490 break;
2491 }
2492
2493 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2494 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2495 return ICE_AQ_LINK_SPEED_UNKNOWN;
2496 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2497 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2498 return ICE_AQ_LINK_SPEED_UNKNOWN;
2499 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2500 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2501 return speed_phy_type_low;
2502 else
2503 return speed_phy_type_high;
2504 }
2505
2506 /**
2507 * ice_update_phy_type
2508 * @phy_type_low: pointer to the lower part of phy_type
2509 * @phy_type_high: pointer to the higher part of phy_type
2510 * @link_speeds_bitmap: targeted link speeds bitmap
2511 *
2512 * Note: For the link_speeds_bitmap structure, you can check it at
2513 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2514 * link_speeds_bitmap include multiple speeds.
2515 *
2516 * Each entry in this [phy_type_low, phy_type_high] structure will
2517 * present a certain link speed. This helper function will turn on bits
2518 * in [phy_type_low, phy_type_high] structure based on the value of
2519 * link_speeds_bitmap input parameter.
2520 */
2521 void
ice_update_phy_type(u64 * phy_type_low,u64 * phy_type_high,u16 link_speeds_bitmap)2522 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2523 u16 link_speeds_bitmap)
2524 {
2525 u64 pt_high;
2526 u64 pt_low;
2527 int index;
2528 u16 speed;
2529
2530 /* We first check with low part of phy_type */
2531 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2532 pt_low = BIT_ULL(index);
2533 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2534
2535 if (link_speeds_bitmap & speed)
2536 *phy_type_low |= BIT_ULL(index);
2537 }
2538
2539 /* We then check with high part of phy_type */
2540 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2541 pt_high = BIT_ULL(index);
2542 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2543
2544 if (link_speeds_bitmap & speed)
2545 *phy_type_high |= BIT_ULL(index);
2546 }
2547 }
2548
2549 /**
2550 * ice_aq_set_phy_cfg
2551 * @hw: pointer to the HW struct
2552 * @pi: port info structure of the interested logical port
2553 * @cfg: structure with PHY configuration data to be set
2554 * @cd: pointer to command details structure or NULL
2555 *
2556 * Set the various PHY configuration parameters supported on the Port.
2557 * One or more of the Set PHY config parameters may be ignored in an MFP
2558 * mode as the PF may not have the privilege to set some of the PHY Config
2559 * parameters. This status will be indicated by the command response (0x0601).
2560 */
2561 enum ice_status
ice_aq_set_phy_cfg(struct ice_hw * hw,struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,struct ice_sq_cd * cd)2562 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2563 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2564 {
2565 struct ice_aq_desc desc;
2566 enum ice_status status;
2567
2568 if (!cfg)
2569 return ICE_ERR_PARAM;
2570
2571 /* Ensure that only valid bits of cfg->caps can be turned on. */
2572 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2573 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2574 cfg->caps);
2575
2576 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2577 }
2578
2579 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2580 desc.params.set_phy.lport_num = pi->lport;
2581 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2582
2583 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2584 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2585 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2586 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2587 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2588 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2589 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2590 cfg->low_power_ctrl_an);
2591 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2592 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2593 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2594 cfg->link_fec_opt);
2595
2596 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2597
2598 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2599 status = ICE_SUCCESS;
2600
2601 if (!status)
2602 pi->phy.curr_user_phy_cfg = *cfg;
2603
2604 return status;
2605 }
2606
2607 /**
2608 * ice_update_link_info - update status of the HW network link
2609 * @pi: port info structure of the interested logical port
2610 */
ice_update_link_info(struct ice_port_info * pi)2611 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2612 {
2613 struct ice_link_status *li;
2614 enum ice_status status;
2615
2616 if (!pi)
2617 return ICE_ERR_PARAM;
2618
2619 li = &pi->phy.link_info;
2620
2621 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2622 if (status)
2623 return status;
2624
2625 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2626 struct ice_aqc_get_phy_caps_data *pcaps;
2627 struct ice_hw *hw;
2628
2629 hw = pi->hw;
2630 pcaps = (struct ice_aqc_get_phy_caps_data *)
2631 ice_malloc(hw, sizeof(*pcaps));
2632 if (!pcaps)
2633 return ICE_ERR_NO_MEMORY;
2634
2635 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2636 pcaps, NULL);
2637
2638 ice_free(hw, pcaps);
2639 }
2640
2641 return status;
2642 }
2643
2644 /**
2645 * ice_cache_phy_user_req
2646 * @pi: port information structure
2647 * @cache_data: PHY logging data
2648 * @cache_mode: PHY logging mode
2649 *
2650 * Log the user request on (FC, FEC, SPEED) for later user.
2651 */
2652 static void
ice_cache_phy_user_req(struct ice_port_info * pi,struct ice_phy_cache_mode_data cache_data,enum ice_phy_cache_mode cache_mode)2653 ice_cache_phy_user_req(struct ice_port_info *pi,
2654 struct ice_phy_cache_mode_data cache_data,
2655 enum ice_phy_cache_mode cache_mode)
2656 {
2657 if (!pi)
2658 return;
2659
2660 switch (cache_mode) {
2661 case ICE_FC_MODE:
2662 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2663 break;
2664 case ICE_SPEED_MODE:
2665 pi->phy.curr_user_speed_req =
2666 cache_data.data.curr_user_speed_req;
2667 break;
2668 case ICE_FEC_MODE:
2669 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2670 break;
2671 default:
2672 break;
2673 }
2674 }
2675
2676 /**
2677 * ice_caps_to_fc_mode
2678 * @caps: PHY capabilities
2679 *
2680 * Convert PHY FC capabilities to ice FC mode
2681 */
ice_caps_to_fc_mode(u8 caps)2682 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2683 {
2684 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2685 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2686 return ICE_FC_FULL;
2687
2688 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2689 return ICE_FC_TX_PAUSE;
2690
2691 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2692 return ICE_FC_RX_PAUSE;
2693
2694 return ICE_FC_NONE;
2695 }
2696
2697 /**
2698 * ice_caps_to_fec_mode
2699 * @caps: PHY capabilities
2700 * @fec_options: Link FEC options
2701 *
2702 * Convert PHY FEC capabilities to ice FEC mode
2703 */
ice_caps_to_fec_mode(u8 caps,u8 fec_options)2704 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2705 {
2706 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2707 return ICE_FEC_AUTO;
2708
2709 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2710 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2711 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2712 ICE_AQC_PHY_FEC_25G_KR_REQ))
2713 return ICE_FEC_BASER;
2714
2715 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2716 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2717 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2718 return ICE_FEC_RS;
2719
2720 return ICE_FEC_NONE;
2721 }
2722
2723 /**
2724 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2725 * @pi: port information structure
2726 * @cfg: PHY configuration data to set FC mode
2727 * @req_mode: FC mode to configure
2728 */
2729 static enum ice_status
ice_cfg_phy_fc(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fc_mode req_mode)2730 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2731 enum ice_fc_mode req_mode)
2732 {
2733 struct ice_phy_cache_mode_data cache_data;
2734 u8 pause_mask = 0x0;
2735
2736 if (!pi || !cfg)
2737 return ICE_ERR_BAD_PTR;
2738
2739 switch (req_mode) {
2740 case ICE_FC_AUTO:
2741 {
2742 struct ice_aqc_get_phy_caps_data *pcaps;
2743 enum ice_status status;
2744
2745 pcaps = (struct ice_aqc_get_phy_caps_data *)
2746 ice_malloc(pi->hw, sizeof(*pcaps));
2747 if (!pcaps)
2748 return ICE_ERR_NO_MEMORY;
2749
2750 /* Query the value of FC that both the NIC and attached media
2751 * can do.
2752 */
2753 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2754 pcaps, NULL);
2755 if (status) {
2756 ice_free(pi->hw, pcaps);
2757 return status;
2758 }
2759
2760 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2761 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2762
2763 ice_free(pi->hw, pcaps);
2764 break;
2765 }
2766 case ICE_FC_FULL:
2767 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2768 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2769 break;
2770 case ICE_FC_RX_PAUSE:
2771 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2772 break;
2773 case ICE_FC_TX_PAUSE:
2774 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2775 break;
2776 default:
2777 break;
2778 }
2779
2780 /* clear the old pause settings */
2781 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2782 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2783
2784 /* set the new capabilities */
2785 cfg->caps |= pause_mask;
2786
2787 /* Cache user FC request */
2788 cache_data.data.curr_user_fc_req = req_mode;
2789 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2790
2791 return ICE_SUCCESS;
2792 }
2793
2794 /**
2795 * ice_set_fc
2796 * @pi: port information structure
2797 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2798 * @ena_auto_link_update: enable automatic link update
2799 *
2800 * Set the requested flow control mode.
2801 */
2802 enum ice_status
ice_set_fc(struct ice_port_info * pi,u8 * aq_failures,bool ena_auto_link_update)2803 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2804 {
2805 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2806 struct ice_aqc_get_phy_caps_data *pcaps;
2807 enum ice_status status;
2808 struct ice_hw *hw;
2809
2810 if (!pi || !aq_failures)
2811 return ICE_ERR_BAD_PTR;
2812
2813 *aq_failures = 0;
2814 hw = pi->hw;
2815
2816 pcaps = (struct ice_aqc_get_phy_caps_data *)
2817 ice_malloc(hw, sizeof(*pcaps));
2818 if (!pcaps)
2819 return ICE_ERR_NO_MEMORY;
2820
2821 /* Get the current PHY config */
2822 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2823 NULL);
2824 if (status) {
2825 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2826 goto out;
2827 }
2828
2829 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2830
2831 /* Configure the set PHY data */
2832 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2833 if (status) {
2834 if (status != ICE_ERR_BAD_PTR)
2835 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2836
2837 goto out;
2838 }
2839
2840 /* If the capabilities have changed, then set the new config */
2841 if (cfg.caps != pcaps->caps) {
2842 int retry_count, retry_max = 10;
2843
2844 /* Auto restart link so settings take effect */
2845 if (ena_auto_link_update)
2846 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2847
2848 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2849 if (status) {
2850 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2851 goto out;
2852 }
2853
2854 /* Update the link info
2855 * It sometimes takes a really long time for link to
2856 * come back from the atomic reset. Thus, we wait a
2857 * little bit.
2858 */
2859 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2860 status = ice_update_link_info(pi);
2861
2862 if (status == ICE_SUCCESS)
2863 break;
2864
2865 ice_msec_delay(100, true);
2866 }
2867
2868 if (status)
2869 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2870 }
2871
2872 out:
2873 ice_free(hw, pcaps);
2874 return status;
2875 }
2876
2877 /**
2878 * ice_phy_caps_equals_cfg
2879 * @phy_caps: PHY capabilities
2880 * @phy_cfg: PHY configuration
2881 *
2882 * Helper function to determine if PHY capabilities matches PHY
2883 * configuration
2884 */
2885 bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data * phy_caps,struct ice_aqc_set_phy_cfg_data * phy_cfg)2886 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2887 struct ice_aqc_set_phy_cfg_data *phy_cfg)
2888 {
2889 u8 caps_mask, cfg_mask;
2890
2891 if (!phy_caps || !phy_cfg)
2892 return false;
2893
2894 /* These bits are not common between capabilities and configuration.
2895 * Do not use them to determine equality.
2896 */
2897 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2898 ICE_AQC_PHY_EN_MOD_QUAL);
2899 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2900
2901 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2902 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2903 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2904 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2905 phy_caps->eee_cap != phy_cfg->eee_cap ||
2906 phy_caps->eeer_value != phy_cfg->eeer_value ||
2907 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2908 return false;
2909
2910 return true;
2911 }
2912
2913 /**
2914 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2915 * @pi: port information structure
2916 * @caps: PHY ability structure to copy date from
2917 * @cfg: PHY configuration structure to copy data to
2918 *
2919 * Helper function to copy AQC PHY get ability data to PHY set configuration
2920 * data structure
2921 */
2922 void
ice_copy_phy_caps_to_cfg(struct ice_port_info * pi,struct ice_aqc_get_phy_caps_data * caps,struct ice_aqc_set_phy_cfg_data * cfg)2923 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
2924 struct ice_aqc_get_phy_caps_data *caps,
2925 struct ice_aqc_set_phy_cfg_data *cfg)
2926 {
2927 if (!pi || !caps || !cfg)
2928 return;
2929
2930 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
2931 cfg->phy_type_low = caps->phy_type_low;
2932 cfg->phy_type_high = caps->phy_type_high;
2933 cfg->caps = caps->caps;
2934 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
2935 cfg->eee_cap = caps->eee_cap;
2936 cfg->eeer_value = caps->eeer_value;
2937 cfg->link_fec_opt = caps->link_fec_options;
2938 cfg->module_compliance_enforcement =
2939 caps->module_compliance_enforcement;
2940
2941 if (ice_fw_supports_link_override(pi->hw)) {
2942 struct ice_link_default_override_tlv tlv;
2943
2944 if (ice_get_link_default_override(&tlv, pi))
2945 return;
2946
2947 if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
2948 cfg->module_compliance_enforcement |=
2949 ICE_LINK_OVERRIDE_STRICT_MODE;
2950 }
2951 }
2952
2953 /**
2954 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2955 * @pi: port information structure
2956 * @cfg: PHY configuration data to set FEC mode
2957 * @fec: FEC mode to configure
2958 */
2959 enum ice_status
ice_cfg_phy_fec(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fec_mode fec)2960 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2961 enum ice_fec_mode fec)
2962 {
2963 struct ice_aqc_get_phy_caps_data *pcaps;
2964 enum ice_status status = ICE_SUCCESS;
2965 struct ice_hw *hw;
2966
2967 if (!pi || !cfg)
2968 return ICE_ERR_BAD_PTR;
2969
2970 hw = pi->hw;
2971
2972 pcaps = (struct ice_aqc_get_phy_caps_data *)
2973 ice_malloc(hw, sizeof(*pcaps));
2974 if (!pcaps)
2975 return ICE_ERR_NO_MEMORY;
2976
2977 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
2978 NULL);
2979 if (status)
2980 goto out;
2981
2982 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
2983 cfg->link_fec_opt = pcaps->link_fec_options;
2984
2985 switch (fec) {
2986 case ICE_FEC_BASER:
2987 /* Clear RS bits, and AND BASE-R ability
2988 * bits and OR request bits.
2989 */
2990 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2991 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2992 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2993 ICE_AQC_PHY_FEC_25G_KR_REQ;
2994 break;
2995 case ICE_FEC_RS:
2996 /* Clear BASE-R bits, and AND RS ability
2997 * bits and OR request bits.
2998 */
2999 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3000 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3001 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3002 break;
3003 case ICE_FEC_NONE:
3004 /* Clear all FEC option bits. */
3005 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3006 break;
3007 case ICE_FEC_AUTO:
3008 /* AND auto FEC bit, and all caps bits. */
3009 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3010 cfg->link_fec_opt |= pcaps->link_fec_options;
3011 break;
3012 default:
3013 status = ICE_ERR_PARAM;
3014 break;
3015 }
3016
3017 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
3018 struct ice_link_default_override_tlv tlv;
3019
3020 if (ice_get_link_default_override(&tlv, pi))
3021 goto out;
3022
3023 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3024 (tlv.options & ICE_LINK_OVERRIDE_EN))
3025 cfg->link_fec_opt = tlv.fec_options;
3026 }
3027
3028 out:
3029 ice_free(hw, pcaps);
3030
3031 return status;
3032 }
3033
3034 /**
3035 * ice_get_link_status - get status of the HW network link
3036 * @pi: port information structure
3037 * @link_up: pointer to bool (true/false = linkup/linkdown)
3038 *
3039 * Variable link_up is true if link is up, false if link is down.
3040 * The variable link_up is invalid if status is non zero. As a
3041 * result of this call, link status reporting becomes enabled
3042 */
ice_get_link_status(struct ice_port_info * pi,bool * link_up)3043 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3044 {
3045 struct ice_phy_info *phy_info;
3046 enum ice_status status = ICE_SUCCESS;
3047
3048 if (!pi || !link_up)
3049 return ICE_ERR_PARAM;
3050
3051 phy_info = &pi->phy;
3052
3053 if (phy_info->get_link_info) {
3054 status = ice_update_link_info(pi);
3055
3056 if (status)
3057 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3058 status);
3059 }
3060
3061 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3062
3063 return status;
3064 }
3065
3066 /**
3067 * ice_aq_set_link_restart_an
3068 * @pi: pointer to the port information structure
3069 * @ena_link: if true: enable link, if false: disable link
3070 * @cd: pointer to command details structure or NULL
3071 *
3072 * Sets up the link and restarts the Auto-Negotiation over the link.
3073 */
3074 enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info * pi,bool ena_link,struct ice_sq_cd * cd)3075 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3076 struct ice_sq_cd *cd)
3077 {
3078 struct ice_aqc_restart_an *cmd;
3079 struct ice_aq_desc desc;
3080
3081 cmd = &desc.params.restart_an;
3082
3083 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3084
3085 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3086 cmd->lport_num = pi->lport;
3087 if (ena_link)
3088 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3089 else
3090 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3091
3092 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3093 }
3094
3095 /**
3096 * ice_aq_set_event_mask
3097 * @hw: pointer to the HW struct
3098 * @port_num: port number of the physical function
3099 * @mask: event mask to be set
3100 * @cd: pointer to command details structure or NULL
3101 *
3102 * Set event mask (0x0613)
3103 */
3104 enum ice_status
ice_aq_set_event_mask(struct ice_hw * hw,u8 port_num,u16 mask,struct ice_sq_cd * cd)3105 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3106 struct ice_sq_cd *cd)
3107 {
3108 struct ice_aqc_set_event_mask *cmd;
3109 struct ice_aq_desc desc;
3110
3111 cmd = &desc.params.set_event_mask;
3112
3113 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3114
3115 cmd->lport_num = port_num;
3116
3117 cmd->event_mask = CPU_TO_LE16(mask);
3118 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3119 }
3120
3121 /**
3122 * ice_aq_set_mac_loopback
3123 * @hw: pointer to the HW struct
3124 * @ena_lpbk: Enable or Disable loopback
3125 * @cd: pointer to command details structure or NULL
3126 *
3127 * Enable/disable loopback on a given port
3128 */
3129 enum ice_status
ice_aq_set_mac_loopback(struct ice_hw * hw,bool ena_lpbk,struct ice_sq_cd * cd)3130 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3131 {
3132 struct ice_aqc_set_mac_lb *cmd;
3133 struct ice_aq_desc desc;
3134
3135 cmd = &desc.params.set_mac_lb;
3136
3137 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3138 if (ena_lpbk)
3139 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3140
3141 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3142 }
3143
3144 /**
3145 * ice_aq_set_port_id_led
3146 * @pi: pointer to the port information
3147 * @is_orig_mode: is this LED set to original mode (by the net-list)
3148 * @cd: pointer to command details structure or NULL
3149 *
3150 * Set LED value for the given port (0x06e9)
3151 */
3152 enum ice_status
ice_aq_set_port_id_led(struct ice_port_info * pi,bool is_orig_mode,struct ice_sq_cd * cd)3153 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3154 struct ice_sq_cd *cd)
3155 {
3156 struct ice_aqc_set_port_id_led *cmd;
3157 struct ice_hw *hw = pi->hw;
3158 struct ice_aq_desc desc;
3159
3160 cmd = &desc.params.set_port_id_led;
3161
3162 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3163
3164 if (is_orig_mode)
3165 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3166 else
3167 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3168
3169 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3170 }
3171
3172 /**
3173 * ice_aq_sff_eeprom
3174 * @hw: pointer to the HW struct
3175 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3176 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3177 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3178 * @page: QSFP page
3179 * @set_page: set or ignore the page
3180 * @data: pointer to data buffer to be read/written to the I2C device.
3181 * @length: 1-16 for read, 1 for write.
3182 * @write: 0 read, 1 for write.
3183 * @cd: pointer to command details structure or NULL
3184 *
3185 * Read/Write SFF EEPROM (0x06EE)
3186 */
3187 enum ice_status
ice_aq_sff_eeprom(struct ice_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 set_page,u8 * data,u8 length,bool write,struct ice_sq_cd * cd)3188 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3189 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3190 bool write, struct ice_sq_cd *cd)
3191 {
3192 struct ice_aqc_sff_eeprom *cmd;
3193 struct ice_aq_desc desc;
3194 enum ice_status status;
3195
3196 if (!data || (mem_addr & 0xff00))
3197 return ICE_ERR_PARAM;
3198
3199 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3200 cmd = &desc.params.read_write_sff_param;
3201 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3202 cmd->lport_num = (u8)(lport & 0xff);
3203 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3204 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3205 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3206 ((set_page <<
3207 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3208 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3209 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3210 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3211 if (write)
3212 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3213
3214 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3215 return status;
3216 }
3217
3218 /**
3219 * __ice_aq_get_set_rss_lut
3220 * @hw: pointer to the hardware structure
3221 * @params: RSS LUT parameters
3222 * @set: set true to set the table, false to get the table
3223 *
3224 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3225 */
3226 static enum ice_status
__ice_aq_get_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * params,bool set)3227 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3228 {
3229 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3230 struct ice_aqc_get_set_rss_lut *cmd_resp;
3231 struct ice_aq_desc desc;
3232 enum ice_status status;
3233 u8 *lut;
3234
3235 if (!params)
3236 return ICE_ERR_PARAM;
3237
3238 vsi_handle = params->vsi_handle;
3239 lut = params->lut;
3240
3241 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3242 return ICE_ERR_PARAM;
3243
3244 lut_size = params->lut_size;
3245 lut_type = params->lut_type;
3246 glob_lut_idx = params->global_lut_id;
3247 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3248
3249 cmd_resp = &desc.params.get_set_rss_lut;
3250
3251 if (set) {
3252 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3253 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3254 } else {
3255 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3256 }
3257
3258 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3259 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3260 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3261 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3262
3263 switch (lut_type) {
3264 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3265 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3266 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3267 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3268 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3269 break;
3270 default:
3271 status = ICE_ERR_PARAM;
3272 goto ice_aq_get_set_rss_lut_exit;
3273 }
3274
3275 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3276 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3277 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3278
3279 if (!set)
3280 goto ice_aq_get_set_rss_lut_send;
3281 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3282 if (!set)
3283 goto ice_aq_get_set_rss_lut_send;
3284 } else {
3285 goto ice_aq_get_set_rss_lut_send;
3286 }
3287
3288 /* LUT size is only valid for Global and PF table types */
3289 switch (lut_size) {
3290 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3291 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3292 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3293 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3294 break;
3295 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3296 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3297 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3298 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3299 break;
3300 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3301 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3302 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3303 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3304 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3305 break;
3306 }
3307 /* fall-through */
3308 default:
3309 status = ICE_ERR_PARAM;
3310 goto ice_aq_get_set_rss_lut_exit;
3311 }
3312
3313 ice_aq_get_set_rss_lut_send:
3314 cmd_resp->flags = CPU_TO_LE16(flags);
3315 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3316
3317 ice_aq_get_set_rss_lut_exit:
3318 return status;
3319 }
3320
3321 /**
3322 * ice_aq_get_rss_lut
3323 * @hw: pointer to the hardware structure
3324 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3325 *
3326 * get the RSS lookup table, PF or VSI type
3327 */
3328 enum ice_status
ice_aq_get_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * get_params)3329 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3330 {
3331 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3332 }
3333
3334 /**
3335 * ice_aq_set_rss_lut
3336 * @hw: pointer to the hardware structure
3337 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3338 *
3339 * set the RSS lookup table, PF or VSI type
3340 */
3341 enum ice_status
ice_aq_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * set_params)3342 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3343 {
3344 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3345 }
3346
3347 /**
3348 * __ice_aq_get_set_rss_key
3349 * @hw: pointer to the HW struct
3350 * @vsi_id: VSI FW index
3351 * @key: pointer to key info struct
3352 * @set: set true to set the key, false to get the key
3353 *
3354 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3355 */
3356 static enum
__ice_aq_get_set_rss_key(struct ice_hw * hw,u16 vsi_id,struct ice_aqc_get_set_rss_keys * key,bool set)3357 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3358 struct ice_aqc_get_set_rss_keys *key,
3359 bool set)
3360 {
3361 struct ice_aqc_get_set_rss_key *cmd_resp;
3362 u16 key_size = sizeof(*key);
3363 struct ice_aq_desc desc;
3364
3365 cmd_resp = &desc.params.get_set_rss_key;
3366
3367 if (set) {
3368 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3369 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3370 } else {
3371 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3372 }
3373
3374 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3375 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3376 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3377 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3378
3379 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3380 }
3381
3382 /**
3383 * ice_aq_get_rss_key
3384 * @hw: pointer to the HW struct
3385 * @vsi_handle: software VSI handle
3386 * @key: pointer to key info struct
3387 *
3388 * get the RSS key per VSI
3389 */
3390 enum ice_status
ice_aq_get_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * key)3391 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3392 struct ice_aqc_get_set_rss_keys *key)
3393 {
3394 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3395 return ICE_ERR_PARAM;
3396
3397 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3398 key, false);
3399 }
3400
3401 /**
3402 * ice_aq_set_rss_key
3403 * @hw: pointer to the HW struct
3404 * @vsi_handle: software VSI handle
3405 * @keys: pointer to key info struct
3406 *
3407 * set the RSS key per VSI
3408 */
3409 enum ice_status
ice_aq_set_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * keys)3410 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3411 struct ice_aqc_get_set_rss_keys *keys)
3412 {
3413 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3414 return ICE_ERR_PARAM;
3415
3416 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3417 keys, true);
3418 }
3419
3420 /**
3421 * ice_aq_add_lan_txq
3422 * @hw: pointer to the hardware structure
3423 * @num_qgrps: Number of added queue groups
3424 * @qg_list: list of queue groups to be added
3425 * @buf_size: size of buffer for indirect command
3426 * @cd: pointer to command details structure or NULL
3427 *
3428 * Add Tx LAN queue (0x0C30)
3429 *
3430 * NOTE:
3431 * Prior to calling add Tx LAN queue:
3432 * Initialize the following as part of the Tx queue context:
3433 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3434 * Cache profile and Packet shaper profile.
3435 *
3436 * After add Tx LAN queue AQ command is completed:
3437 * Interrupts should be associated with specific queues,
3438 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3439 * flow.
3440 */
3441 enum ice_status
ice_aq_add_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * qg_list,u16 buf_size,struct ice_sq_cd * cd)3442 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3443 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3444 struct ice_sq_cd *cd)
3445 {
3446 struct ice_aqc_add_tx_qgrp *list;
3447 struct ice_aqc_add_txqs *cmd;
3448 struct ice_aq_desc desc;
3449 u16 i, sum_size = 0;
3450
3451 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3452
3453 cmd = &desc.params.add_txqs;
3454
3455 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3456
3457 if (!qg_list)
3458 return ICE_ERR_PARAM;
3459
3460 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3461 return ICE_ERR_PARAM;
3462
3463 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3464 sum_size += ice_struct_size(list, txqs, list->num_txqs);
3465 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3466 list->num_txqs);
3467 }
3468
3469 if (buf_size != sum_size)
3470 return ICE_ERR_PARAM;
3471
3472 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3473
3474 cmd->num_qgrps = num_qgrps;
3475
3476 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3477 }
3478
3479 /**
3480 * ice_aq_dis_lan_txq
3481 * @hw: pointer to the hardware structure
3482 * @num_qgrps: number of groups in the list
3483 * @qg_list: the list of groups to disable
3484 * @buf_size: the total size of the qg_list buffer in bytes
3485 * @rst_src: if called due to reset, specifies the reset source
3486 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3487 * @cd: pointer to command details structure or NULL
3488 *
3489 * Disable LAN Tx queue (0x0C31)
3490 */
3491 static enum ice_status
ice_aq_dis_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_dis_txq_item * qg_list,u16 buf_size,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)3492 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3493 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3494 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3495 struct ice_sq_cd *cd)
3496 {
3497 struct ice_aqc_dis_txq_item *item;
3498 struct ice_aqc_dis_txqs *cmd;
3499 struct ice_aq_desc desc;
3500 enum ice_status status;
3501 u16 i, sz = 0;
3502
3503 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3504 cmd = &desc.params.dis_txqs;
3505 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3506
3507 /* qg_list can be NULL only in VM/VF reset flow */
3508 if (!qg_list && !rst_src)
3509 return ICE_ERR_PARAM;
3510
3511 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3512 return ICE_ERR_PARAM;
3513
3514 cmd->num_entries = num_qgrps;
3515
3516 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3517 ICE_AQC_Q_DIS_TIMEOUT_M);
3518
3519 switch (rst_src) {
3520 case ICE_VM_RESET:
3521 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3522 cmd->vmvf_and_timeout |=
3523 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3524 break;
3525 case ICE_NO_RESET:
3526 default:
3527 break;
3528 }
3529
3530 /* flush pipe on time out */
3531 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3532 /* If no queue group info, we are in a reset flow. Issue the AQ */
3533 if (!qg_list)
3534 goto do_aq;
3535
3536 /* set RD bit to indicate that command buffer is provided by the driver
3537 * and it needs to be read by the firmware
3538 */
3539 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3540
3541 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3542 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
3543
3544 /* If the num of queues is even, add 2 bytes of padding */
3545 if ((item->num_qs % 2) == 0)
3546 item_size += 2;
3547
3548 sz += item_size;
3549
3550 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3551 }
3552
3553 if (buf_size != sz)
3554 return ICE_ERR_PARAM;
3555
3556 do_aq:
3557 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3558 if (status) {
3559 if (!qg_list)
3560 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3561 vmvf_num, hw->adminq.sq_last_status);
3562 else
3563 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3564 LE16_TO_CPU(qg_list[0].q_id[0]),
3565 hw->adminq.sq_last_status);
3566 }
3567 return status;
3568 }
3569
3570 /**
3571 * ice_aq_move_recfg_lan_txq
3572 * @hw: pointer to the hardware structure
3573 * @num_qs: number of queues to move/reconfigure
3574 * @is_move: true if this operation involves node movement
3575 * @is_tc_change: true if this operation involves a TC change
3576 * @subseq_call: true if this operation is a subsequent call
3577 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3578 * @timeout: timeout in units of 100 usec (valid values 0-50)
3579 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3580 * @buf: struct containing src/dest TEID and per-queue info
3581 * @buf_size: size of buffer for indirect command
3582 * @txqs_moved: out param, number of queues successfully moved
3583 * @cd: pointer to command details structure or NULL
3584 *
3585 * Move / Reconfigure Tx LAN queues (0x0C32)
3586 */
3587 enum ice_status
ice_aq_move_recfg_lan_txq(struct ice_hw * hw,u8 num_qs,bool is_move,bool is_tc_change,bool subseq_call,bool flush_pipe,u8 timeout,u32 * blocked_cgds,struct ice_aqc_move_txqs_data * buf,u16 buf_size,u8 * txqs_moved,struct ice_sq_cd * cd)3588 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3589 bool is_tc_change, bool subseq_call, bool flush_pipe,
3590 u8 timeout, u32 *blocked_cgds,
3591 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3592 u8 *txqs_moved, struct ice_sq_cd *cd)
3593 {
3594 struct ice_aqc_move_txqs *cmd;
3595 struct ice_aq_desc desc;
3596 enum ice_status status;
3597
3598 cmd = &desc.params.move_txqs;
3599 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3600
3601 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3602 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3603 return ICE_ERR_PARAM;
3604
3605 if (is_tc_change && !flush_pipe && !blocked_cgds)
3606 return ICE_ERR_PARAM;
3607
3608 if (!is_move && !is_tc_change)
3609 return ICE_ERR_PARAM;
3610
3611 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3612
3613 if (is_move)
3614 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3615
3616 if (is_tc_change)
3617 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3618
3619 if (subseq_call)
3620 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3621
3622 if (flush_pipe)
3623 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3624
3625 cmd->num_qs = num_qs;
3626 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3627 ICE_AQC_Q_CMD_TIMEOUT_M);
3628
3629 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3630
3631 if (!status && txqs_moved)
3632 *txqs_moved = cmd->num_qs;
3633
3634 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3635 is_tc_change && !flush_pipe)
3636 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3637
3638 return status;
3639 }
3640
3641 /* End of FW Admin Queue command wrappers */
3642
3643 /**
3644 * ice_write_byte - write a byte to a packed context structure
3645 * @src_ctx: the context structure to read from
3646 * @dest_ctx: the context to be written to
3647 * @ce_info: a description of the struct to be filled
3648 */
3649 static void
ice_write_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3650 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3651 {
3652 u8 src_byte, dest_byte, mask;
3653 u8 *from, *dest;
3654 u16 shift_width;
3655
3656 /* copy from the next struct field */
3657 from = src_ctx + ce_info->offset;
3658
3659 /* prepare the bits and mask */
3660 shift_width = ce_info->lsb % 8;
3661 mask = (u8)(BIT(ce_info->width) - 1);
3662
3663 src_byte = *from;
3664 src_byte &= mask;
3665
3666 /* shift to correct alignment */
3667 mask <<= shift_width;
3668 src_byte <<= shift_width;
3669
3670 /* get the current bits from the target bit string */
3671 dest = dest_ctx + (ce_info->lsb / 8);
3672
3673 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3674
3675 dest_byte &= ~mask; /* get the bits not changing */
3676 dest_byte |= src_byte; /* add in the new bits */
3677
3678 /* put it all back */
3679 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3680 }
3681
3682 /**
3683 * ice_write_word - write a word to a packed context structure
3684 * @src_ctx: the context structure to read from
3685 * @dest_ctx: the context to be written to
3686 * @ce_info: a description of the struct to be filled
3687 */
3688 static void
ice_write_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3689 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3690 {
3691 u16 src_word, mask;
3692 __le16 dest_word;
3693 u8 *from, *dest;
3694 u16 shift_width;
3695
3696 /* copy from the next struct field */
3697 from = src_ctx + ce_info->offset;
3698
3699 /* prepare the bits and mask */
3700 shift_width = ce_info->lsb % 8;
3701 mask = BIT(ce_info->width) - 1;
3702
3703 /* don't swizzle the bits until after the mask because the mask bits
3704 * will be in a different bit position on big endian machines
3705 */
3706 src_word = *(u16 *)from;
3707 src_word &= mask;
3708
3709 /* shift to correct alignment */
3710 mask <<= shift_width;
3711 src_word <<= shift_width;
3712
3713 /* get the current bits from the target bit string */
3714 dest = dest_ctx + (ce_info->lsb / 8);
3715
3716 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3717
3718 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3719 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3720
3721 /* put it all back */
3722 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3723 }
3724
3725 /**
3726 * ice_write_dword - write a dword to a packed context structure
3727 * @src_ctx: the context structure to read from
3728 * @dest_ctx: the context to be written to
3729 * @ce_info: a description of the struct to be filled
3730 */
3731 static void
ice_write_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3732 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3733 {
3734 u32 src_dword, mask;
3735 __le32 dest_dword;
3736 u8 *from, *dest;
3737 u16 shift_width;
3738
3739 /* copy from the next struct field */
3740 from = src_ctx + ce_info->offset;
3741
3742 /* prepare the bits and mask */
3743 shift_width = ce_info->lsb % 8;
3744
3745 /* if the field width is exactly 32 on an x86 machine, then the shift
3746 * operation will not work because the SHL instructions count is masked
3747 * to 5 bits so the shift will do nothing
3748 */
3749 if (ce_info->width < 32)
3750 mask = BIT(ce_info->width) - 1;
3751 else
3752 mask = (u32)~0;
3753
3754 /* don't swizzle the bits until after the mask because the mask bits
3755 * will be in a different bit position on big endian machines
3756 */
3757 src_dword = *(u32 *)from;
3758 src_dword &= mask;
3759
3760 /* shift to correct alignment */
3761 mask <<= shift_width;
3762 src_dword <<= shift_width;
3763
3764 /* get the current bits from the target bit string */
3765 dest = dest_ctx + (ce_info->lsb / 8);
3766
3767 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3768
3769 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
3770 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
3771
3772 /* put it all back */
3773 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3774 }
3775
3776 /**
3777 * ice_write_qword - write a qword to a packed context structure
3778 * @src_ctx: the context structure to read from
3779 * @dest_ctx: the context to be written to
3780 * @ce_info: a description of the struct to be filled
3781 */
3782 static void
ice_write_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3783 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3784 {
3785 u64 src_qword, mask;
3786 __le64 dest_qword;
3787 u8 *from, *dest;
3788 u16 shift_width;
3789
3790 /* copy from the next struct field */
3791 from = src_ctx + ce_info->offset;
3792
3793 /* prepare the bits and mask */
3794 shift_width = ce_info->lsb % 8;
3795
3796 /* if the field width is exactly 64 on an x86 machine, then the shift
3797 * operation will not work because the SHL instructions count is masked
3798 * to 6 bits so the shift will do nothing
3799 */
3800 if (ce_info->width < 64)
3801 mask = BIT_ULL(ce_info->width) - 1;
3802 else
3803 mask = (u64)~0;
3804
3805 /* don't swizzle the bits until after the mask because the mask bits
3806 * will be in a different bit position on big endian machines
3807 */
3808 src_qword = *(u64 *)from;
3809 src_qword &= mask;
3810
3811 /* shift to correct alignment */
3812 mask <<= shift_width;
3813 src_qword <<= shift_width;
3814
3815 /* get the current bits from the target bit string */
3816 dest = dest_ctx + (ce_info->lsb / 8);
3817
3818 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
3819
3820 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
3821 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
3822
3823 /* put it all back */
3824 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3825 }
3826
3827 /**
3828 * ice_set_ctx - set context bits in packed structure
3829 * @hw: pointer to the hardware structure
3830 * @src_ctx: pointer to a generic non-packed context structure
3831 * @dest_ctx: pointer to memory for the packed structure
3832 * @ce_info: a description of the structure to be transformed
3833 */
3834 enum ice_status
ice_set_ctx(struct ice_hw * hw,u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3835 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
3836 const struct ice_ctx_ele *ce_info)
3837 {
3838 int f;
3839
3840 for (f = 0; ce_info[f].width; f++) {
3841 /* We have to deal with each element of the FW response
3842 * using the correct size so that we are correct regardless
3843 * of the endianness of the machine.
3844 */
3845 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
3846 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
3847 f, ce_info[f].width, ce_info[f].size_of);
3848 continue;
3849 }
3850 switch (ce_info[f].size_of) {
3851 case sizeof(u8):
3852 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3853 break;
3854 case sizeof(u16):
3855 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3856 break;
3857 case sizeof(u32):
3858 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3859 break;
3860 case sizeof(u64):
3861 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3862 break;
3863 default:
3864 return ICE_ERR_INVAL_SIZE;
3865 }
3866 }
3867
3868 return ICE_SUCCESS;
3869 }
3870
3871 /**
3872 * ice_read_byte - read context byte into struct
3873 * @src_ctx: the context structure to read from
3874 * @dest_ctx: the context to be written to
3875 * @ce_info: a description of the struct to be filled
3876 */
3877 static void
ice_read_byte(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)3878 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3879 {
3880 u8 dest_byte, mask;
3881 u8 *src, *target;
3882 u16 shift_width;
3883
3884 /* prepare the bits and mask */
3885 shift_width = ce_info->lsb % 8;
3886 mask = (u8)(BIT(ce_info->width) - 1);
3887
3888 /* shift to correct alignment */
3889 mask <<= shift_width;
3890
3891 /* get the current bits from the src bit string */
3892 src = src_ctx + (ce_info->lsb / 8);
3893
3894 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3895
3896 dest_byte &= ~(mask);
3897
3898 dest_byte >>= shift_width;
3899
3900 /* get the address from the struct field */
3901 target = dest_ctx + ce_info->offset;
3902
3903 /* put it back in the struct */
3904 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3905 }
3906
3907 /**
3908 * ice_read_word - read context word into struct
3909 * @src_ctx: the context structure to read from
3910 * @dest_ctx: the context to be written to
3911 * @ce_info: a description of the struct to be filled
3912 */
3913 static void
ice_read_word(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)3914 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3915 {
3916 u16 dest_word, mask;
3917 u8 *src, *target;
3918 __le16 src_word;
3919 u16 shift_width;
3920
3921 /* prepare the bits and mask */
3922 shift_width = ce_info->lsb % 8;
3923 mask = BIT(ce_info->width) - 1;
3924
3925 /* shift to correct alignment */
3926 mask <<= shift_width;
3927
3928 /* get the current bits from the src bit string */
3929 src = src_ctx + (ce_info->lsb / 8);
3930
3931 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
3932
3933 /* the data in the memory is stored as little endian so mask it
3934 * correctly
3935 */
3936 src_word &= ~(CPU_TO_LE16(mask));
3937
3938 /* get the data back into host order before shifting */
3939 dest_word = LE16_TO_CPU(src_word);
3940
3941 dest_word >>= shift_width;
3942
3943 /* get the address from the struct field */
3944 target = dest_ctx + ce_info->offset;
3945
3946 /* put it back in the struct */
3947 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3948 }
3949
3950 /**
3951 * ice_read_dword - read context dword into struct
3952 * @src_ctx: the context structure to read from
3953 * @dest_ctx: the context to be written to
3954 * @ce_info: a description of the struct to be filled
3955 */
3956 static void
ice_read_dword(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)3957 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3958 {
3959 u32 dest_dword, mask;
3960 __le32 src_dword;
3961 u8 *src, *target;
3962 u16 shift_width;
3963
3964 /* prepare the bits and mask */
3965 shift_width = ce_info->lsb % 8;
3966
3967 /* if the field width is exactly 32 on an x86 machine, then the shift
3968 * operation will not work because the SHL instructions count is masked
3969 * to 5 bits so the shift will do nothing
3970 */
3971 if (ce_info->width < 32)
3972 mask = BIT(ce_info->width) - 1;
3973 else
3974 mask = (u32)~0;
3975
3976 /* shift to correct alignment */
3977 mask <<= shift_width;
3978
3979 /* get the current bits from the src bit string */
3980 src = src_ctx + (ce_info->lsb / 8);
3981
3982 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
3983
3984 /* the data in the memory is stored as little endian so mask it
3985 * correctly
3986 */
3987 src_dword &= ~(CPU_TO_LE32(mask));
3988
3989 /* get the data back into host order before shifting */
3990 dest_dword = LE32_TO_CPU(src_dword);
3991
3992 dest_dword >>= shift_width;
3993
3994 /* get the address from the struct field */
3995 target = dest_ctx + ce_info->offset;
3996
3997 /* put it back in the struct */
3998 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3999 }
4000
4001 /**
4002 * ice_read_qword - read context qword into struct
4003 * @src_ctx: the context structure to read from
4004 * @dest_ctx: the context to be written to
4005 * @ce_info: a description of the struct to be filled
4006 */
4007 static void
ice_read_qword(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)4008 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4009 {
4010 u64 dest_qword, mask;
4011 __le64 src_qword;
4012 u8 *src, *target;
4013 u16 shift_width;
4014
4015 /* prepare the bits and mask */
4016 shift_width = ce_info->lsb % 8;
4017
4018 /* if the field width is exactly 64 on an x86 machine, then the shift
4019 * operation will not work because the SHL instructions count is masked
4020 * to 6 bits so the shift will do nothing
4021 */
4022 if (ce_info->width < 64)
4023 mask = BIT_ULL(ce_info->width) - 1;
4024 else
4025 mask = (u64)~0;
4026
4027 /* shift to correct alignment */
4028 mask <<= shift_width;
4029
4030 /* get the current bits from the src bit string */
4031 src = src_ctx + (ce_info->lsb / 8);
4032
4033 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4034
4035 /* the data in the memory is stored as little endian so mask it
4036 * correctly
4037 */
4038 src_qword &= ~(CPU_TO_LE64(mask));
4039
4040 /* get the data back into host order before shifting */
4041 dest_qword = LE64_TO_CPU(src_qword);
4042
4043 dest_qword >>= shift_width;
4044
4045 /* get the address from the struct field */
4046 target = dest_ctx + ce_info->offset;
4047
4048 /* put it back in the struct */
4049 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4050 }
4051
4052 /**
4053 * ice_get_ctx - extract context bits from a packed structure
4054 * @src_ctx: pointer to a generic packed context structure
4055 * @dest_ctx: pointer to a generic non-packed context structure
4056 * @ce_info: a description of the structure to be read from
4057 */
4058 enum ice_status
ice_get_ctx(u8 * src_ctx,u8 * dest_ctx,struct ice_ctx_ele * ce_info)4059 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4060 {
4061 int f;
4062
4063 for (f = 0; ce_info[f].width; f++) {
4064 switch (ce_info[f].size_of) {
4065 case 1:
4066 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4067 break;
4068 case 2:
4069 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4070 break;
4071 case 4:
4072 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4073 break;
4074 case 8:
4075 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4076 break;
4077 default:
4078 /* nothing to do, just keep going */
4079 break;
4080 }
4081 }
4082
4083 return ICE_SUCCESS;
4084 }
4085
4086 /**
4087 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4088 * @hw: pointer to the HW struct
4089 * @vsi_handle: software VSI handle
4090 * @tc: TC number
4091 * @q_handle: software queue handle
4092 */
4093 struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 q_handle)4094 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4095 {
4096 struct ice_vsi_ctx *vsi;
4097 struct ice_q_ctx *q_ctx;
4098
4099 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4100 if (!vsi)
4101 return NULL;
4102 if (q_handle >= vsi->num_lan_q_entries[tc])
4103 return NULL;
4104 if (!vsi->lan_q_ctx[tc])
4105 return NULL;
4106 q_ctx = vsi->lan_q_ctx[tc];
4107 return &q_ctx[q_handle];
4108 }
4109
4110 /**
4111 * ice_ena_vsi_txq
4112 * @pi: port information structure
4113 * @vsi_handle: software VSI handle
4114 * @tc: TC number
4115 * @q_handle: software queue handle
4116 * @num_qgrps: Number of added queue groups
4117 * @buf: list of queue groups to be added
4118 * @buf_size: size of buffer for indirect command
4119 * @cd: pointer to command details structure or NULL
4120 *
4121 * This function adds one LAN queue
4122 */
4123 enum ice_status
ice_ena_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * buf,u16 buf_size,struct ice_sq_cd * cd)4124 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4125 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4126 struct ice_sq_cd *cd)
4127 {
4128 struct ice_aqc_txsched_elem_data node = { 0 };
4129 struct ice_sched_node *parent;
4130 struct ice_q_ctx *q_ctx;
4131 enum ice_status status;
4132 struct ice_hw *hw;
4133
4134 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4135 return ICE_ERR_CFG;
4136
4137 if (num_qgrps > 1 || buf->num_txqs > 1)
4138 return ICE_ERR_MAX_LIMIT;
4139
4140 hw = pi->hw;
4141
4142 if (!ice_is_vsi_valid(hw, vsi_handle))
4143 return ICE_ERR_PARAM;
4144
4145 ice_acquire_lock(&pi->sched_lock);
4146
4147 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4148 if (!q_ctx) {
4149 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4150 q_handle);
4151 status = ICE_ERR_PARAM;
4152 goto ena_txq_exit;
4153 }
4154
4155 /* find a parent node */
4156 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4157 ICE_SCHED_NODE_OWNER_LAN);
4158 if (!parent) {
4159 status = ICE_ERR_PARAM;
4160 goto ena_txq_exit;
4161 }
4162
4163 buf->parent_teid = parent->info.node_teid;
4164 node.parent_teid = parent->info.node_teid;
4165 /* Mark that the values in the "generic" section as valid. The default
4166 * value in the "generic" section is zero. This means that :
4167 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4168 * - 0 priority among siblings, indicated by Bit 1-3.
4169 * - WFQ, indicated by Bit 4.
4170 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4171 * Bit 5-6.
4172 * - Bit 7 is reserved.
4173 * Without setting the generic section as valid in valid_sections, the
4174 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4175 */
4176 buf->txqs[0].info.valid_sections =
4177 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4178 ICE_AQC_ELEM_VALID_EIR;
4179 buf->txqs[0].info.generic = 0;
4180 buf->txqs[0].info.cir_bw.bw_profile_idx =
4181 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4182 buf->txqs[0].info.cir_bw.bw_alloc =
4183 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4184 buf->txqs[0].info.eir_bw.bw_profile_idx =
4185 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4186 buf->txqs[0].info.eir_bw.bw_alloc =
4187 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4188
4189 /* add the LAN queue */
4190 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4191 if (status != ICE_SUCCESS) {
4192 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4193 LE16_TO_CPU(buf->txqs[0].txq_id),
4194 hw->adminq.sq_last_status);
4195 goto ena_txq_exit;
4196 }
4197
4198 node.node_teid = buf->txqs[0].q_teid;
4199 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4200 q_ctx->q_handle = q_handle;
4201 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4202
4203 /* add a leaf node into scheduler tree queue layer */
4204 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4205 if (!status)
4206 status = ice_sched_replay_q_bw(pi, q_ctx);
4207
4208 ena_txq_exit:
4209 ice_release_lock(&pi->sched_lock);
4210 return status;
4211 }
4212
4213 /**
4214 * ice_dis_vsi_txq
4215 * @pi: port information structure
4216 * @vsi_handle: software VSI handle
4217 * @tc: TC number
4218 * @num_queues: number of queues
4219 * @q_handles: pointer to software queue handle array
4220 * @q_ids: pointer to the q_id array
4221 * @q_teids: pointer to queue node teids
4222 * @rst_src: if called due to reset, specifies the reset source
4223 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4224 * @cd: pointer to command details structure or NULL
4225 *
4226 * This function removes queues and their corresponding nodes in SW DB
4227 */
4228 enum ice_status
ice_dis_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 num_queues,u16 * q_handles,u16 * q_ids,u32 * q_teids,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)4229 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4230 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4231 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4232 struct ice_sq_cd *cd)
4233 {
4234 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4235 struct ice_aqc_dis_txq_item *qg_list;
4236 struct ice_q_ctx *q_ctx;
4237 struct ice_hw *hw;
4238 u16 i, buf_size;
4239
4240 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4241 return ICE_ERR_CFG;
4242
4243 hw = pi->hw;
4244
4245 if (!num_queues) {
4246 /* if queue is disabled already yet the disable queue command
4247 * has to be sent to complete the VF reset, then call
4248 * ice_aq_dis_lan_txq without any queue information
4249 */
4250 if (rst_src)
4251 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4252 vmvf_num, NULL);
4253 return ICE_ERR_CFG;
4254 }
4255
4256 buf_size = ice_struct_size(qg_list, q_id, 1);
4257 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
4258 if (!qg_list)
4259 return ICE_ERR_NO_MEMORY;
4260
4261 ice_acquire_lock(&pi->sched_lock);
4262
4263 for (i = 0; i < num_queues; i++) {
4264 struct ice_sched_node *node;
4265
4266 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4267 if (!node)
4268 continue;
4269 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4270 if (!q_ctx) {
4271 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4272 q_handles[i]);
4273 continue;
4274 }
4275 if (q_ctx->q_handle != q_handles[i]) {
4276 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4277 q_ctx->q_handle, q_handles[i]);
4278 continue;
4279 }
4280 qg_list->parent_teid = node->info.parent_teid;
4281 qg_list->num_qs = 1;
4282 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
4283 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4284 vmvf_num, cd);
4285
4286 if (status != ICE_SUCCESS)
4287 break;
4288 ice_free_sched_node(pi, node);
4289 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4290 }
4291 ice_release_lock(&pi->sched_lock);
4292 ice_free(hw, qg_list);
4293 return status;
4294 }
4295
4296 /**
4297 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4298 * @pi: port information structure
4299 * @vsi_handle: software VSI handle
4300 * @tc_bitmap: TC bitmap
4301 * @maxqs: max queues array per TC
4302 * @owner: LAN or RDMA
4303 *
4304 * This function adds/updates the VSI queues per TC.
4305 */
4306 static enum ice_status
ice_cfg_vsi_qs(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * maxqs,u8 owner)4307 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4308 u16 *maxqs, u8 owner)
4309 {
4310 enum ice_status status = ICE_SUCCESS;
4311 u8 i;
4312
4313 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4314 return ICE_ERR_CFG;
4315
4316 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4317 return ICE_ERR_PARAM;
4318
4319 ice_acquire_lock(&pi->sched_lock);
4320
4321 ice_for_each_traffic_class(i) {
4322 /* configuration is possible only if TC node is present */
4323 if (!ice_sched_get_tc_node(pi, i))
4324 continue;
4325
4326 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4327 ice_is_tc_ena(tc_bitmap, i));
4328 if (status)
4329 break;
4330 }
4331
4332 ice_release_lock(&pi->sched_lock);
4333 return status;
4334 }
4335
4336 /**
4337 * ice_cfg_vsi_lan - configure VSI LAN queues
4338 * @pi: port information structure
4339 * @vsi_handle: software VSI handle
4340 * @tc_bitmap: TC bitmap
4341 * @max_lanqs: max LAN queues array per TC
4342 *
4343 * This function adds/updates the VSI LAN queues per TC.
4344 */
4345 enum ice_status
ice_cfg_vsi_lan(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * max_lanqs)4346 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4347 u16 *max_lanqs)
4348 {
4349 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4350 ICE_SCHED_NODE_OWNER_LAN);
4351 }
4352
4353 /**
4354 * ice_is_main_vsi - checks whether the VSI is main VSI
4355 * @hw: pointer to the HW struct
4356 * @vsi_handle: VSI handle
4357 *
4358 * Checks whether the VSI is the main VSI (the first PF VSI created on
4359 * given PF).
4360 */
ice_is_main_vsi(struct ice_hw * hw,u16 vsi_handle)4361 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
4362 {
4363 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
4364 }
4365
4366 /**
4367 * ice_replay_pre_init - replay pre initialization
4368 * @hw: pointer to the HW struct
4369 * @sw: pointer to switch info struct for which function initializes filters
4370 *
4371 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4372 */
4373 static enum ice_status
ice_replay_pre_init(struct ice_hw * hw,struct ice_switch_info * sw)4374 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
4375 {
4376 enum ice_status status;
4377 u8 i;
4378
4379 /* Delete old entries from replay filter list head if there is any */
4380 ice_rm_sw_replay_rule_info(hw, sw);
4381 /* In start of replay, move entries into replay_rules list, it
4382 * will allow adding rules entries back to filt_rules list,
4383 * which is operational list.
4384 */
4385 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4386 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4387 &sw->recp_list[i].filt_replay_rules);
4388 ice_sched_replay_agg_vsi_preinit(hw);
4389
4390 status = ice_sched_replay_root_node_bw(hw->port_info);
4391 if (status)
4392 return status;
4393
4394 return ice_sched_replay_tc_node_bw(hw->port_info);
4395 }
4396
4397 /**
4398 * ice_replay_vsi - replay VSI configuration
4399 * @hw: pointer to the HW struct
4400 * @vsi_handle: driver VSI handle
4401 *
4402 * Restore all VSI configuration after reset. It is required to call this
4403 * function with main VSI first.
4404 */
ice_replay_vsi(struct ice_hw * hw,u16 vsi_handle)4405 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4406 {
4407 struct ice_switch_info *sw = hw->switch_info;
4408 struct ice_port_info *pi = hw->port_info;
4409 enum ice_status status;
4410
4411 if (!ice_is_vsi_valid(hw, vsi_handle))
4412 return ICE_ERR_PARAM;
4413
4414 /* Replay pre-initialization if there is any */
4415 if (ice_is_main_vsi(hw, vsi_handle)) {
4416 status = ice_replay_pre_init(hw, sw);
4417 if (status)
4418 return status;
4419 }
4420 /* Replay per VSI all RSS configurations */
4421 status = ice_replay_rss_cfg(hw, vsi_handle);
4422 if (status)
4423 return status;
4424 /* Replay per VSI all filters */
4425 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
4426 if (!status)
4427 status = ice_replay_vsi_agg(hw, vsi_handle);
4428 return status;
4429 }
4430
4431 /**
4432 * ice_replay_post - post replay configuration cleanup
4433 * @hw: pointer to the HW struct
4434 *
4435 * Post replay cleanup.
4436 */
ice_replay_post(struct ice_hw * hw)4437 void ice_replay_post(struct ice_hw *hw)
4438 {
4439 /* Delete old entries from replay filter list head */
4440 ice_rm_all_sw_replay_rule_info(hw);
4441 ice_sched_replay_agg(hw);
4442 }
4443
4444 /**
4445 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4446 * @hw: ptr to the hardware info
4447 * @reg: offset of 64 bit HW register to read from
4448 * @prev_stat_loaded: bool to specify if previous stats are loaded
4449 * @prev_stat: ptr to previous loaded stat value
4450 * @cur_stat: ptr to current stat value
4451 */
4452 void
ice_stat_update40(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)4453 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4454 u64 *prev_stat, u64 *cur_stat)
4455 {
4456 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4457
4458 /* device stats are not reset at PFR, they likely will not be zeroed
4459 * when the driver starts. Thus, save the value from the first read
4460 * without adding to the statistic value so that we report stats which
4461 * count up from zero.
4462 */
4463 if (!prev_stat_loaded) {
4464 *prev_stat = new_data;
4465 return;
4466 }
4467
4468 /* Calculate the difference between the new and old values, and then
4469 * add it to the software stat value.
4470 */
4471 if (new_data >= *prev_stat)
4472 *cur_stat += new_data - *prev_stat;
4473 else
4474 /* to manage the potential roll-over */
4475 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4476
4477 /* Update the previously stored value to prepare for next read */
4478 *prev_stat = new_data;
4479 }
4480
4481 /**
4482 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4483 * @hw: ptr to the hardware info
4484 * @reg: offset of HW register to read from
4485 * @prev_stat_loaded: bool to specify if previous stats are loaded
4486 * @prev_stat: ptr to previous loaded stat value
4487 * @cur_stat: ptr to current stat value
4488 */
4489 void
ice_stat_update32(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)4490 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4491 u64 *prev_stat, u64 *cur_stat)
4492 {
4493 u32 new_data;
4494
4495 new_data = rd32(hw, reg);
4496
4497 /* device stats are not reset at PFR, they likely will not be zeroed
4498 * when the driver starts. Thus, save the value from the first read
4499 * without adding to the statistic value so that we report stats which
4500 * count up from zero.
4501 */
4502 if (!prev_stat_loaded) {
4503 *prev_stat = new_data;
4504 return;
4505 }
4506
4507 /* Calculate the difference between the new and old values, and then
4508 * add it to the software stat value.
4509 */
4510 if (new_data >= *prev_stat)
4511 *cur_stat += new_data - *prev_stat;
4512 else
4513 /* to manage the potential roll-over */
4514 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4515
4516 /* Update the previously stored value to prepare for next read */
4517 *prev_stat = new_data;
4518 }
4519
4520 /**
4521 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4522 * @hw: ptr to the hardware info
4523 * @vsi_handle: VSI handle
4524 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4525 * @cur_stats: ptr to current stats structure
4526 *
4527 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4528 * thus cannot be read using the normal ice_stat_update32 function.
4529 *
4530 * Read the GLV_REPC register associated with the given VSI, and update the
4531 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4532 *
4533 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4534 * cleared each time it's read.
4535 *
4536 * Note that the GLV_RDPC register also counts the causes that would trigger
4537 * GLV_REPC. However, it does not give the finer grained detail about why the
4538 * packets are being dropped. The GLV_REPC values can be used to distinguish
4539 * whether Rx packets are dropped due to errors or due to no available
4540 * descriptors.
4541 */
4542 void
ice_stat_update_repc(struct ice_hw * hw,u16 vsi_handle,bool prev_stat_loaded,struct ice_eth_stats * cur_stats)4543 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4544 struct ice_eth_stats *cur_stats)
4545 {
4546 u16 vsi_num, no_desc, error_cnt;
4547 u32 repc;
4548
4549 if (!ice_is_vsi_valid(hw, vsi_handle))
4550 return;
4551
4552 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4553
4554 /* If we haven't loaded stats yet, just clear the current value */
4555 if (!prev_stat_loaded) {
4556 wr32(hw, GLV_REPC(vsi_num), 0);
4557 return;
4558 }
4559
4560 repc = rd32(hw, GLV_REPC(vsi_num));
4561 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4562 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4563
4564 /* Clear the count by writing to the stats register */
4565 wr32(hw, GLV_REPC(vsi_num), 0);
4566
4567 cur_stats->rx_no_desc += no_desc;
4568 cur_stats->rx_errors += error_cnt;
4569 }
4570
4571 /**
4572 * ice_sched_query_elem - query element information from HW
4573 * @hw: pointer to the HW struct
4574 * @node_teid: node TEID to be queried
4575 * @buf: buffer to element information
4576 *
4577 * This function queries HW element information
4578 */
4579 enum ice_status
ice_sched_query_elem(struct ice_hw * hw,u32 node_teid,struct ice_aqc_txsched_elem_data * buf)4580 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4581 struct ice_aqc_txsched_elem_data *buf)
4582 {
4583 u16 buf_size, num_elem_ret = 0;
4584 enum ice_status status;
4585
4586 buf_size = sizeof(*buf);
4587 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4588 buf->node_teid = CPU_TO_LE32(node_teid);
4589 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4590 NULL);
4591 if (status != ICE_SUCCESS || num_elem_ret != 1)
4592 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4593 return status;
4594 }
4595
4596 /**
4597 * ice_get_fw_mode - returns FW mode
4598 * @hw: pointer to the HW struct
4599 */
ice_get_fw_mode(struct ice_hw * hw)4600 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4601 {
4602 #define ICE_FW_MODE_DBG_M BIT(0)
4603 #define ICE_FW_MODE_REC_M BIT(1)
4604 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4605 u32 fw_mode;
4606
4607 /* check the current FW mode */
4608 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4609
4610 if (fw_mode & ICE_FW_MODE_DBG_M)
4611 return ICE_FW_MODE_DBG;
4612 else if (fw_mode & ICE_FW_MODE_REC_M)
4613 return ICE_FW_MODE_REC;
4614 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4615 return ICE_FW_MODE_ROLLBACK;
4616 else
4617 return ICE_FW_MODE_NORMAL;
4618 }
4619
4620 /**
4621 * ice_fw_supports_link_override
4622 * @hw: pointer to the hardware structure
4623 *
4624 * Checks if the firmware supports link override
4625 */
ice_fw_supports_link_override(struct ice_hw * hw)4626 bool ice_fw_supports_link_override(struct ice_hw *hw)
4627 {
4628 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4629 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4630 return true;
4631 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4632 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4633 return true;
4634 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4635 return true;
4636 }
4637
4638 return false;
4639 }
4640
4641 /**
4642 * ice_get_link_default_override
4643 * @ldo: pointer to the link default override struct
4644 * @pi: pointer to the port info struct
4645 *
4646 * Gets the link default override for a port
4647 */
4648 enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv * ldo,struct ice_port_info * pi)4649 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4650 struct ice_port_info *pi)
4651 {
4652 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4653 struct ice_hw *hw = pi->hw;
4654 enum ice_status status;
4655
4656 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4657 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4658 if (status) {
4659 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
4660 return status;
4661 }
4662
4663 /* Each port has its own config; calculate for our port */
4664 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4665 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4666
4667 /* link options first */
4668 status = ice_read_sr_word(hw, tlv_start, &buf);
4669 if (status) {
4670 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4671 return status;
4672 }
4673 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4674 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4675 ICE_LINK_OVERRIDE_PHY_CFG_S;
4676
4677 /* link PHY config */
4678 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4679 status = ice_read_sr_word(hw, offset, &buf);
4680 if (status) {
4681 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
4682 return status;
4683 }
4684 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4685
4686 /* PHY types low */
4687 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4688 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4689 status = ice_read_sr_word(hw, (offset + i), &buf);
4690 if (status) {
4691 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4692 return status;
4693 }
4694 /* shift 16 bits at a time to fill 64 bits */
4695 ldo->phy_type_low |= ((u64)buf << (i * 16));
4696 }
4697
4698 /* PHY types high */
4699 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4700 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4701 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4702 status = ice_read_sr_word(hw, (offset + i), &buf);
4703 if (status) {
4704 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4705 return status;
4706 }
4707 /* shift 16 bits at a time to fill 64 bits */
4708 ldo->phy_type_high |= ((u64)buf << (i * 16));
4709 }
4710
4711 return status;
4712 }
4713
4714 /**
4715 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4716 * @caps: get PHY capability data
4717 */
ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data * caps)4718 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4719 {
4720 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4721 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4722 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4723 ICE_AQC_PHY_AN_EN_CLAUSE37))
4724 return true;
4725
4726 return false;
4727 }
4728
4729 /**
4730 * ice_aq_set_lldp_mib - Set the LLDP MIB
4731 * @hw: pointer to the HW struct
4732 * @mib_type: Local, Remote or both Local and Remote MIBs
4733 * @buf: pointer to the caller-supplied buffer to store the MIB block
4734 * @buf_size: size of the buffer (in bytes)
4735 * @cd: pointer to command details structure or NULL
4736 *
4737 * Set the LLDP MIB. (0x0A08)
4738 */
4739 enum ice_status
ice_aq_set_lldp_mib(struct ice_hw * hw,u8 mib_type,void * buf,u16 buf_size,struct ice_sq_cd * cd)4740 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4741 struct ice_sq_cd *cd)
4742 {
4743 struct ice_aqc_lldp_set_local_mib *cmd;
4744 struct ice_aq_desc desc;
4745
4746 cmd = &desc.params.lldp_set_mib;
4747
4748 if (buf_size == 0 || !buf)
4749 return ICE_ERR_PARAM;
4750
4751 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4752
4753 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
4754 desc.datalen = CPU_TO_LE16(buf_size);
4755
4756 cmd->type = mib_type;
4757 cmd->length = CPU_TO_LE16(buf_size);
4758
4759 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4760 }
4761
4762 /**
4763 * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
4764 * @hw: pointer to HW struct
4765 */
ice_fw_supports_lldp_fltr_ctrl(struct ice_hw * hw)4766 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
4767 {
4768 if (hw->mac_type != ICE_MAC_E810)
4769 return false;
4770
4771 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
4772 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
4773 return true;
4774 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
4775 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
4776 return true;
4777 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
4778 return true;
4779 }
4780 return false;
4781 }
4782
4783 /**
4784 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
4785 * @hw: pointer to HW struct
4786 * @vsi_num: absolute HW index for VSI
4787 * @add: boolean for if adding or removing a filter
4788 */
4789 enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw * hw,u16 vsi_num,bool add)4790 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
4791 {
4792 struct ice_aqc_lldp_filter_ctrl *cmd;
4793 struct ice_aq_desc desc;
4794
4795 cmd = &desc.params.lldp_filter_ctrl;
4796
4797 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
4798
4799 if (add)
4800 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
4801 else
4802 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
4803
4804 cmd->vsi_num = CPU_TO_LE16(vsi_num);
4805
4806 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4807 }
4808