1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
3 */
4
5 #include "ixgbe_x550.h"
6 #include "ixgbe_x540.h"
7 #include "ixgbe_type.h"
8 #include "ixgbe_api.h"
9 #include "ixgbe_common.h"
10 #include "ixgbe_phy.h"
11
12 STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
13 STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
14 STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
15 STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
16
17 /**
18 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
19 * @hw: pointer to hardware structure
20 *
21 * Initialize the function pointers and assign the MAC type for X550.
22 * Does not touch the hardware.
23 **/
ixgbe_init_ops_X550(struct ixgbe_hw * hw)24 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
25 {
26 struct ixgbe_mac_info *mac = &hw->mac;
27 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
28 s32 ret_val;
29
30 DEBUGFUNC("ixgbe_init_ops_X550");
31
32 ret_val = ixgbe_init_ops_X540(hw);
33 mac->ops.dmac_config = ixgbe_dmac_config_X550;
34 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
35 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
36 mac->ops.setup_eee = NULL;
37 mac->ops.set_source_address_pruning =
38 ixgbe_set_source_address_pruning_X550;
39 mac->ops.set_ethertype_anti_spoofing =
40 ixgbe_set_ethertype_anti_spoofing_X550;
41
42 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
43 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
44 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
45 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
46 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
47 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
48 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
49 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
50 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
51
52 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
53 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
54 mac->ops.mdd_event = ixgbe_mdd_event_X550;
55 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
56 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
57 mac->ops.disable_rx = ixgbe_disable_rx_x550;
58 /* Manageability interface */
59 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
60 switch (hw->device_id) {
61 case IXGBE_DEV_ID_X550EM_X_1G_T:
62 hw->mac.ops.led_on = NULL;
63 hw->mac.ops.led_off = NULL;
64 break;
65 case IXGBE_DEV_ID_X550EM_X_10G_T:
66 case IXGBE_DEV_ID_X550EM_A_10G_T:
67 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
68 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
69 break;
70 default:
71 break;
72 }
73 return ret_val;
74 }
75
76 /**
77 * ixgbe_read_cs4227 - Read CS4227 register
78 * @hw: pointer to hardware structure
79 * @reg: register number to write
80 * @value: pointer to receive value read
81 *
82 * Returns status code
83 **/
ixgbe_read_cs4227(struct ixgbe_hw * hw,u16 reg,u16 * value)84 STATIC s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
85 {
86 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
87 }
88
89 /**
90 * ixgbe_write_cs4227 - Write CS4227 register
91 * @hw: pointer to hardware structure
92 * @reg: register number to write
93 * @value: value to write to register
94 *
95 * Returns status code
96 **/
ixgbe_write_cs4227(struct ixgbe_hw * hw,u16 reg,u16 value)97 STATIC s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
98 {
99 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
100 }
101
102 /**
103 * ixgbe_read_pe - Read register from port expander
104 * @hw: pointer to hardware structure
105 * @reg: register number to read
106 * @value: pointer to receive read value
107 *
108 * Returns status code
109 **/
ixgbe_read_pe(struct ixgbe_hw * hw,u8 reg,u8 * value)110 STATIC s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
111 {
112 s32 status;
113
114 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
115 if (status != IXGBE_SUCCESS)
116 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
117 "port expander access failed with %d\n", status);
118 return status;
119 }
120
121 /**
122 * ixgbe_write_pe - Write register to port expander
123 * @hw: pointer to hardware structure
124 * @reg: register number to write
125 * @value: value to write
126 *
127 * Returns status code
128 **/
ixgbe_write_pe(struct ixgbe_hw * hw,u8 reg,u8 value)129 STATIC s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
130 {
131 s32 status;
132
133 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
134 if (status != IXGBE_SUCCESS)
135 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
136 "port expander access failed with %d\n", status);
137 return status;
138 }
139
140 /**
141 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
142 * @hw: pointer to hardware structure
143 *
144 * This function assumes that the caller has acquired the proper semaphore.
145 * Returns error code
146 **/
ixgbe_reset_cs4227(struct ixgbe_hw * hw)147 STATIC s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
148 {
149 s32 status;
150 u32 retry;
151 u16 value;
152 u8 reg;
153
154 /* Trigger hard reset. */
155 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
156 if (status != IXGBE_SUCCESS)
157 return status;
158 reg |= IXGBE_PE_BIT1;
159 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
160 if (status != IXGBE_SUCCESS)
161 return status;
162
163 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
164 if (status != IXGBE_SUCCESS)
165 return status;
166 reg &= ~IXGBE_PE_BIT1;
167 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
168 if (status != IXGBE_SUCCESS)
169 return status;
170
171 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
172 if (status != IXGBE_SUCCESS)
173 return status;
174 reg &= ~IXGBE_PE_BIT1;
175 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
176 if (status != IXGBE_SUCCESS)
177 return status;
178
179 usec_delay(IXGBE_CS4227_RESET_HOLD);
180
181 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
182 if (status != IXGBE_SUCCESS)
183 return status;
184 reg |= IXGBE_PE_BIT1;
185 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
186 if (status != IXGBE_SUCCESS)
187 return status;
188
189 /* Wait for the reset to complete. */
190 msec_delay(IXGBE_CS4227_RESET_DELAY);
191 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
192 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
193 &value);
194 if (status == IXGBE_SUCCESS &&
195 value == IXGBE_CS4227_EEPROM_LOAD_OK)
196 break;
197 msec_delay(IXGBE_CS4227_CHECK_DELAY);
198 }
199 if (retry == IXGBE_CS4227_RETRIES) {
200 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
201 "CS4227 reset did not complete.");
202 return IXGBE_ERR_PHY;
203 }
204
205 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
206 if (status != IXGBE_SUCCESS ||
207 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
208 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
209 "CS4227 EEPROM did not load successfully.");
210 return IXGBE_ERR_PHY;
211 }
212
213 return IXGBE_SUCCESS;
214 }
215
216 /**
217 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
218 * @hw: pointer to hardware structure
219 **/
ixgbe_check_cs4227(struct ixgbe_hw * hw)220 STATIC void ixgbe_check_cs4227(struct ixgbe_hw *hw)
221 {
222 s32 status = IXGBE_SUCCESS;
223 u32 swfw_mask = hw->phy.phy_semaphore_mask;
224 u16 value = 0;
225 u8 retry;
226
227 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
228 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
229 if (status != IXGBE_SUCCESS) {
230 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
231 "semaphore failed with %d", status);
232 msec_delay(IXGBE_CS4227_CHECK_DELAY);
233 continue;
234 }
235
236 /* Get status of reset flow. */
237 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
238
239 if (status == IXGBE_SUCCESS &&
240 value == IXGBE_CS4227_RESET_COMPLETE)
241 goto out;
242
243 if (status != IXGBE_SUCCESS ||
244 value != IXGBE_CS4227_RESET_PENDING)
245 break;
246
247 /* Reset is pending. Wait and check again. */
248 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
249 msec_delay(IXGBE_CS4227_CHECK_DELAY);
250 }
251
252 /* If still pending, assume other instance failed. */
253 if (retry == IXGBE_CS4227_RETRIES) {
254 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
255 if (status != IXGBE_SUCCESS) {
256 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
257 "semaphore failed with %d", status);
258 return;
259 }
260 }
261
262 /* Reset the CS4227. */
263 status = ixgbe_reset_cs4227(hw);
264 if (status != IXGBE_SUCCESS) {
265 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
266 "CS4227 reset failed: %d", status);
267 goto out;
268 }
269
270 /* Reset takes so long, temporarily release semaphore in case the
271 * other driver instance is waiting for the reset indication.
272 */
273 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
274 IXGBE_CS4227_RESET_PENDING);
275 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
276 msec_delay(10);
277 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
278 if (status != IXGBE_SUCCESS) {
279 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
280 "semaphore failed with %d", status);
281 return;
282 }
283
284 /* Record completion for next time. */
285 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
286 IXGBE_CS4227_RESET_COMPLETE);
287
288 out:
289 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
290 msec_delay(hw->eeprom.semaphore_delay);
291 }
292
293 /**
294 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
295 * @hw: pointer to hardware structure
296 **/
ixgbe_setup_mux_ctl(struct ixgbe_hw * hw)297 STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
298 {
299 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
300
301 if (hw->bus.lan_id) {
302 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
303 esdp |= IXGBE_ESDP_SDP1_DIR;
304 }
305 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
306 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
307 IXGBE_WRITE_FLUSH(hw);
308 }
309
310 /**
311 * ixgbe_identify_phy_x550em - Get PHY type based on device id
312 * @hw: pointer to hardware structure
313 *
314 * Returns error code
315 */
ixgbe_identify_phy_x550em(struct ixgbe_hw * hw)316 STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
317 {
318 hw->mac.ops.set_lan_id(hw);
319
320 ixgbe_read_mng_if_sel_x550em(hw);
321
322 switch (hw->device_id) {
323 case IXGBE_DEV_ID_X550EM_A_SFP:
324 return ixgbe_identify_sfp_module_X550em(hw);
325 case IXGBE_DEV_ID_X550EM_X_SFP:
326 /* set up for CS4227 usage */
327 ixgbe_setup_mux_ctl(hw);
328 ixgbe_check_cs4227(hw);
329 /* Fallthrough */
330
331 case IXGBE_DEV_ID_X550EM_A_SFP_N:
332 return ixgbe_identify_sfp_module_X550em(hw);
333 break;
334 case IXGBE_DEV_ID_X550EM_X_KX4:
335 hw->phy.type = ixgbe_phy_x550em_kx4;
336 break;
337 case IXGBE_DEV_ID_X550EM_X_XFI:
338 hw->phy.type = ixgbe_phy_x550em_xfi;
339 break;
340 case IXGBE_DEV_ID_X550EM_X_KR:
341 case IXGBE_DEV_ID_X550EM_A_KR:
342 case IXGBE_DEV_ID_X550EM_A_KR_L:
343 hw->phy.type = ixgbe_phy_x550em_kr;
344 break;
345 case IXGBE_DEV_ID_X550EM_A_10G_T:
346 case IXGBE_DEV_ID_X550EM_X_10G_T:
347 return ixgbe_identify_phy_generic(hw);
348 case IXGBE_DEV_ID_X550EM_X_1G_T:
349 hw->phy.type = ixgbe_phy_ext_1g_t;
350 break;
351 case IXGBE_DEV_ID_X550EM_A_1G_T:
352 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
353 hw->phy.type = ixgbe_phy_fw;
354 if (hw->bus.lan_id)
355 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
356 else
357 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
358 break;
359 default:
360 break;
361 }
362 return IXGBE_SUCCESS;
363 }
364
365 /**
366 * ixgbe_fw_phy_activity - Perform an activity on a PHY
367 * @hw: pointer to hardware structure
368 * @activity: activity to perform
369 * @data: Pointer to 4 32-bit words of data
370 */
ixgbe_fw_phy_activity(struct ixgbe_hw * hw,u16 activity,u32 (* data)[FW_PHY_ACT_DATA_COUNT])371 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
372 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
373 {
374 union {
375 struct ixgbe_hic_phy_activity_req cmd;
376 struct ixgbe_hic_phy_activity_resp rsp;
377 } hic;
378 u16 retries = FW_PHY_ACT_RETRIES;
379 s32 rc;
380 u16 i;
381
382 do {
383 memset(&hic, 0, sizeof(hic));
384 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
385 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
386 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
387 hic.cmd.port_number = hw->bus.lan_id;
388 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
389 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
390 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
391
392 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
393 sizeof(hic.cmd),
394 IXGBE_HI_COMMAND_TIMEOUT,
395 true);
396 if (rc != IXGBE_SUCCESS)
397 return rc;
398 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
399 FW_CEM_RESP_STATUS_SUCCESS) {
400 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
401 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
402 return IXGBE_SUCCESS;
403 }
404 usec_delay(20);
405 --retries;
406 } while (retries > 0);
407
408 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
409 }
410
411 static const struct {
412 u16 fw_speed;
413 ixgbe_link_speed phy_speed;
414 } ixgbe_fw_map[] = {
415 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
416 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
417 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
418 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
419 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
420 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
421 };
422
423 /**
424 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
425 * @hw: pointer to hardware structure
426 *
427 * Returns error code
428 */
ixgbe_get_phy_id_fw(struct ixgbe_hw * hw)429 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
430 {
431 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
432 u16 phy_speeds;
433 u16 phy_id_lo;
434 s32 rc;
435 u16 i;
436
437 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
438 if (rc)
439 return rc;
440
441 hw->phy.speeds_supported = 0;
442 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
443 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
444 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
445 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
446 }
447 if (!hw->phy.autoneg_advertised)
448 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
449
450 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
451 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
452 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
453 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
454 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
455 return IXGBE_ERR_PHY_ADDR_INVALID;
456 return IXGBE_SUCCESS;
457 }
458
459 /**
460 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
461 * @hw: pointer to hardware structure
462 *
463 * Returns error code
464 */
ixgbe_identify_phy_fw(struct ixgbe_hw * hw)465 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
466 {
467 if (hw->bus.lan_id)
468 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
469 else
470 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
471
472 hw->phy.type = ixgbe_phy_fw;
473 hw->phy.ops.read_reg = NULL;
474 hw->phy.ops.write_reg = NULL;
475 return ixgbe_get_phy_id_fw(hw);
476 }
477
478 /**
479 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
480 * @hw: pointer to hardware structure
481 *
482 * Returns error code
483 */
ixgbe_shutdown_fw_phy(struct ixgbe_hw * hw)484 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
485 {
486 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
487
488 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
489 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
490 }
491
ixgbe_read_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)492 STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
493 u32 device_type, u16 *phy_data)
494 {
495 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
496 return IXGBE_NOT_IMPLEMENTED;
497 }
498
ixgbe_write_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)499 STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
500 u32 device_type, u16 phy_data)
501 {
502 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
503 return IXGBE_NOT_IMPLEMENTED;
504 }
505
506 /**
507 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
508 * @hw: pointer to the hardware structure
509 * @addr: I2C bus address to read from
510 * @reg: I2C device register to read from
511 * @val: pointer to location to receive read value
512 *
513 * Returns an error code on error.
514 **/
ixgbe_read_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)515 STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
516 u16 reg, u16 *val)
517 {
518 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
519 }
520
521 /**
522 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
523 * @hw: pointer to the hardware structure
524 * @addr: I2C bus address to read from
525 * @reg: I2C device register to read from
526 * @val: pointer to location to receive read value
527 *
528 * Returns an error code on error.
529 **/
530 STATIC s32
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)531 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
532 u16 reg, u16 *val)
533 {
534 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false);
535 }
536
537 /**
538 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
539 * @hw: pointer to the hardware structure
540 * @addr: I2C bus address to write to
541 * @reg: I2C device register to write to
542 * @val: value to write
543 *
544 * Returns an error code on error.
545 **/
ixgbe_write_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)546 STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
547 u8 addr, u16 reg, u16 val)
548 {
549 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
550 }
551
552 /**
553 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
554 * @hw: pointer to the hardware structure
555 * @addr: I2C bus address to write to
556 * @reg: I2C device register to write to
557 * @val: value to write
558 *
559 * Returns an error code on error.
560 **/
561 STATIC s32
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)562 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
563 u8 addr, u16 reg, u16 val)
564 {
565 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
566 }
567
568 /**
569 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
570 * @hw: pointer to hardware structure
571 *
572 * Initialize the function pointers and for MAC type X550EM.
573 * Does not touch the hardware.
574 **/
ixgbe_init_ops_X550EM(struct ixgbe_hw * hw)575 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
576 {
577 struct ixgbe_mac_info *mac = &hw->mac;
578 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
579 struct ixgbe_phy_info *phy = &hw->phy;
580 s32 ret_val;
581
582 DEBUGFUNC("ixgbe_init_ops_X550EM");
583
584 /* Similar to X550 so start there. */
585 ret_val = ixgbe_init_ops_X550(hw);
586
587 /* Since this function eventually calls
588 * ixgbe_init_ops_540 by design, we are setting
589 * the pointers to NULL explicitly here to overwrite
590 * the values being set in the x540 function.
591 */
592 /* Thermal sensor not supported in x550EM */
593 mac->ops.get_thermal_sensor_data = NULL;
594 mac->ops.init_thermal_sensor_thresh = NULL;
595 mac->thermal_sensor_enabled = false;
596
597 /* FCOE not supported in x550EM */
598 mac->ops.get_san_mac_addr = NULL;
599 mac->ops.set_san_mac_addr = NULL;
600 mac->ops.get_wwn_prefix = NULL;
601 mac->ops.get_fcoe_boot_status = NULL;
602
603 /* IPsec not supported in x550EM */
604 mac->ops.disable_sec_rx_path = NULL;
605 mac->ops.enable_sec_rx_path = NULL;
606
607 /* AUTOC register is not present in x550EM. */
608 mac->ops.prot_autoc_read = NULL;
609 mac->ops.prot_autoc_write = NULL;
610
611 /* X550EM bus type is internal*/
612 hw->bus.type = ixgbe_bus_type_internal;
613 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
614
615
616 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
617 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
618 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
619 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
620 mac->ops.get_supported_physical_layer =
621 ixgbe_get_supported_physical_layer_X550em;
622
623 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
624 mac->ops.setup_fc = ixgbe_setup_fc_generic;
625 else
626 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
627
628 /* PHY */
629 phy->ops.init = ixgbe_init_phy_ops_X550em;
630 switch (hw->device_id) {
631 case IXGBE_DEV_ID_X550EM_A_1G_T:
632 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
633 mac->ops.setup_fc = NULL;
634 phy->ops.identify = ixgbe_identify_phy_fw;
635 phy->ops.set_phy_power = NULL;
636 phy->ops.get_firmware_version = NULL;
637 break;
638 case IXGBE_DEV_ID_X550EM_X_1G_T:
639 mac->ops.setup_fc = NULL;
640 phy->ops.identify = ixgbe_identify_phy_x550em;
641 phy->ops.set_phy_power = NULL;
642 break;
643 default:
644 phy->ops.identify = ixgbe_identify_phy_x550em;
645 }
646
647 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
648 phy->ops.set_phy_power = NULL;
649
650
651 /* EEPROM */
652 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
653 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
654 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
655 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
656 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
657 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
658 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
659 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
660
661 return ret_val;
662 }
663
664 /**
665 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
666 * @hw: pointer to hardware structure
667 */
ixgbe_setup_fw_link(struct ixgbe_hw * hw)668 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
669 {
670 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
671 s32 rc;
672 u16 i;
673
674 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
675 return 0;
676
677 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
678 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
679 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
680 return IXGBE_ERR_INVALID_LINK_SETTINGS;
681 }
682
683 switch (hw->fc.requested_mode) {
684 case ixgbe_fc_full:
685 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
686 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
687 break;
688 case ixgbe_fc_rx_pause:
689 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
690 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
691 break;
692 case ixgbe_fc_tx_pause:
693 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
694 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
695 break;
696 default:
697 break;
698 }
699
700 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
701 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
702 setup[0] |= (u32)(ixgbe_fw_map[i].fw_speed);
703 }
704 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
705
706 if (hw->phy.eee_speeds_advertised)
707 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
708
709 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
710 if (rc)
711 return rc;
712 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
713 return IXGBE_ERR_OVERTEMP;
714 return IXGBE_SUCCESS;
715 }
716
717 /**
718 * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
719 * @hw: pointer to hardware structure
720 *
721 * Called at init time to set up flow control.
722 */
ixgbe_fc_autoneg_fw(struct ixgbe_hw * hw)723 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
724 {
725 if (hw->fc.requested_mode == ixgbe_fc_default)
726 hw->fc.requested_mode = ixgbe_fc_full;
727
728 return ixgbe_setup_fw_link(hw);
729 }
730
731 /**
732 * ixgbe_setup_eee_fw - Enable/disable EEE support
733 * @hw: pointer to the HW structure
734 * @enable_eee: boolean flag to enable EEE
735 *
736 * Enable/disable EEE based on enable_eee flag.
737 * This function controls EEE for firmware-based PHY implementations.
738 */
ixgbe_setup_eee_fw(struct ixgbe_hw * hw,bool enable_eee)739 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
740 {
741 if (!!hw->phy.eee_speeds_advertised == enable_eee)
742 return IXGBE_SUCCESS;
743 if (enable_eee)
744 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
745 else
746 hw->phy.eee_speeds_advertised = 0;
747 return hw->phy.ops.setup_link(hw);
748 }
749
750 /**
751 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
752 * @hw: pointer to hardware structure
753 *
754 * Initialize the function pointers and for MAC type X550EM_a.
755 * Does not touch the hardware.
756 **/
ixgbe_init_ops_X550EM_a(struct ixgbe_hw * hw)757 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
758 {
759 struct ixgbe_mac_info *mac = &hw->mac;
760 s32 ret_val;
761
762 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
763
764 /* Start with generic X550EM init */
765 ret_val = ixgbe_init_ops_X550EM(hw);
766
767 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
768 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
769 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
770 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
771 } else {
772 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
773 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
774 }
775 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
776 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
777
778 switch (mac->ops.get_media_type(hw)) {
779 case ixgbe_media_type_fiber:
780 mac->ops.setup_fc = NULL;
781 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
782 break;
783 case ixgbe_media_type_backplane:
784 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
785 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
786 break;
787 default:
788 break;
789 }
790
791 switch (hw->device_id) {
792 case IXGBE_DEV_ID_X550EM_A_1G_T:
793 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
794 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
795 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
796 mac->ops.setup_eee = ixgbe_setup_eee_fw;
797 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
798 IXGBE_LINK_SPEED_1GB_FULL;
799 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
800 break;
801 default:
802 break;
803 }
804
805 return ret_val;
806 }
807
808 /**
809 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
810 * @hw: pointer to hardware structure
811 *
812 * Initialize the function pointers and for MAC type X550EM_x.
813 * Does not touch the hardware.
814 **/
ixgbe_init_ops_X550EM_x(struct ixgbe_hw * hw)815 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
816 {
817 struct ixgbe_mac_info *mac = &hw->mac;
818 struct ixgbe_link_info *link = &hw->link;
819 s32 ret_val;
820
821 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
822
823 /* Start with generic X550EM init */
824 ret_val = ixgbe_init_ops_X550EM(hw);
825
826 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
827 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
828 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
829 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
830 link->ops.read_link = ixgbe_read_i2c_combined_generic;
831 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
832 link->ops.write_link = ixgbe_write_i2c_combined_generic;
833 link->ops.write_link_unlocked =
834 ixgbe_write_i2c_combined_generic_unlocked;
835 link->addr = IXGBE_CS4227;
836
837 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
838 mac->ops.setup_fc = NULL;
839 mac->ops.setup_eee = NULL;
840 mac->ops.init_led_link_act = NULL;
841 }
842
843 return ret_val;
844 }
845
846 /**
847 * ixgbe_dmac_config_X550
848 * @hw: pointer to hardware structure
849 *
850 * Configure DMA coalescing. If enabling dmac, dmac is activated.
851 * When disabling dmac, dmac enable dmac bit is cleared.
852 **/
ixgbe_dmac_config_X550(struct ixgbe_hw * hw)853 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
854 {
855 u32 reg, high_pri_tc;
856
857 DEBUGFUNC("ixgbe_dmac_config_X550");
858
859 /* Disable DMA coalescing before configuring */
860 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
861 reg &= ~IXGBE_DMACR_DMAC_EN;
862 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
863
864 /* Disable DMA Coalescing if the watchdog timer is 0 */
865 if (!hw->mac.dmac_config.watchdog_timer)
866 goto out;
867
868 ixgbe_dmac_config_tcs_X550(hw);
869
870 /* Configure DMA Coalescing Control Register */
871 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
872
873 /* Set the watchdog timer in units of 40.96 usec */
874 reg &= ~IXGBE_DMACR_DMACWT_MASK;
875 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
876
877 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
878 /* If fcoe is enabled, set high priority traffic class */
879 if (hw->mac.dmac_config.fcoe_en) {
880 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
881 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
882 IXGBE_DMACR_HIGH_PRI_TC_MASK);
883 }
884 reg |= IXGBE_DMACR_EN_MNG_IND;
885
886 /* Enable DMA coalescing after configuration */
887 reg |= IXGBE_DMACR_DMAC_EN;
888 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
889
890 out:
891 return IXGBE_SUCCESS;
892 }
893
894 /**
895 * ixgbe_dmac_config_tcs_X550
896 * @hw: pointer to hardware structure
897 *
898 * Configure DMA coalescing threshold per TC. The dmac enable bit must
899 * be cleared before configuring.
900 **/
ixgbe_dmac_config_tcs_X550(struct ixgbe_hw * hw)901 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
902 {
903 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
904
905 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
906
907 /* Configure DMA coalescing enabled */
908 switch (hw->mac.dmac_config.link_speed) {
909 case IXGBE_LINK_SPEED_10_FULL:
910 case IXGBE_LINK_SPEED_100_FULL:
911 pb_headroom = IXGBE_DMACRXT_100M;
912 break;
913 case IXGBE_LINK_SPEED_1GB_FULL:
914 pb_headroom = IXGBE_DMACRXT_1G;
915 break;
916 default:
917 pb_headroom = IXGBE_DMACRXT_10G;
918 break;
919 }
920
921 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
922 IXGBE_MHADD_MFS_SHIFT) / 1024);
923
924 /* Set the per Rx packet buffer receive threshold */
925 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
926 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
927 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
928
929 if (tc < hw->mac.dmac_config.num_tcs) {
930 /* Get Rx PB size */
931 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
932 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
933 IXGBE_RXPBSIZE_SHIFT;
934
935 /* Calculate receive buffer threshold in kilobytes */
936 if (rx_pb_size > pb_headroom)
937 rx_pb_size = rx_pb_size - pb_headroom;
938 else
939 rx_pb_size = 0;
940
941 /* Minimum of MFS shall be set for DMCTH */
942 reg |= (rx_pb_size > maxframe_size_kb) ?
943 rx_pb_size : maxframe_size_kb;
944 }
945 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
946 }
947 return IXGBE_SUCCESS;
948 }
949
950 /**
951 * ixgbe_dmac_update_tcs_X550
952 * @hw: pointer to hardware structure
953 *
954 * Disables dmac, updates per TC settings, and then enables dmac.
955 **/
ixgbe_dmac_update_tcs_X550(struct ixgbe_hw * hw)956 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
957 {
958 u32 reg;
959
960 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
961
962 /* Disable DMA coalescing before configuring */
963 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
964 reg &= ~IXGBE_DMACR_DMAC_EN;
965 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
966
967 ixgbe_dmac_config_tcs_X550(hw);
968
969 /* Enable DMA coalescing after configuration */
970 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
971 reg |= IXGBE_DMACR_DMAC_EN;
972 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
973
974 return IXGBE_SUCCESS;
975 }
976
977 /**
978 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
979 * @hw: pointer to hardware structure
980 *
981 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
982 * ixgbe_hw struct in order to set up EEPROM access.
983 **/
ixgbe_init_eeprom_params_X550(struct ixgbe_hw * hw)984 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
985 {
986 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
987 u32 eec;
988 u16 eeprom_size;
989
990 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
991
992 if (eeprom->type == ixgbe_eeprom_uninitialized) {
993 eeprom->semaphore_delay = 10;
994 eeprom->type = ixgbe_flash;
995
996 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
997 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
998 IXGBE_EEC_SIZE_SHIFT);
999 eeprom->word_size = 1 << (eeprom_size +
1000 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1001
1002 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1003 eeprom->type, eeprom->word_size);
1004 }
1005
1006 return IXGBE_SUCCESS;
1007 }
1008
1009 /**
1010 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1011 * @hw: pointer to hardware structure
1012 * @enable: enable or disable source address pruning
1013 * @pool: Rx pool to set source address pruning for
1014 **/
ixgbe_set_source_address_pruning_X550(struct ixgbe_hw * hw,bool enable,unsigned int pool)1015 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1016 unsigned int pool)
1017 {
1018 u64 pfflp;
1019
1020 /* max rx pool is 63 */
1021 if (pool > 63)
1022 return;
1023
1024 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1025 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1026
1027 if (enable)
1028 pfflp |= (1ULL << pool);
1029 else
1030 pfflp &= ~(1ULL << pool);
1031
1032 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1033 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1034 }
1035
1036 /**
1037 * ixgbe_set_ethertype_anti_spoofing_X550 - Configure Ethertype anti-spoofing
1038 * @hw: pointer to hardware structure
1039 * @enable: enable or disable switch for Ethertype anti-spoofing
1040 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1041 *
1042 **/
ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw * hw,bool enable,int vf)1043 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1044 bool enable, int vf)
1045 {
1046 int vf_target_reg = vf >> 3;
1047 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1048 u32 pfvfspoof;
1049
1050 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1051
1052 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1053 if (enable)
1054 pfvfspoof |= (1 << vf_target_shift);
1055 else
1056 pfvfspoof &= ~(1 << vf_target_shift);
1057
1058 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1059 }
1060
1061 /**
1062 * ixgbe_iosf_wait - Wait for IOSF command completion
1063 * @hw: pointer to hardware structure
1064 * @ctrl: pointer to location to receive final IOSF control value
1065 *
1066 * Returns failing status on timeout
1067 *
1068 * Note: ctrl can be NULL if the IOSF control register value is not needed
1069 **/
ixgbe_iosf_wait(struct ixgbe_hw * hw,u32 * ctrl)1070 STATIC s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1071 {
1072 u32 i, command = 0;
1073
1074 /* Check every 10 usec to see if the address cycle completed.
1075 * The SB IOSF BUSY bit will clear when the operation is
1076 * complete
1077 */
1078 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1079 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1080 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1081 break;
1082 usec_delay(10);
1083 }
1084 if (ctrl)
1085 *ctrl = command;
1086 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1087 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1088 return IXGBE_ERR_PHY;
1089 }
1090
1091 return IXGBE_SUCCESS;
1092 }
1093
1094 /**
1095 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1096 * of the IOSF device
1097 * @hw: pointer to hardware structure
1098 * @reg_addr: 32 bit PHY register to write
1099 * @device_type: 3 bit device type
1100 * @data: Data to write to the register
1101 **/
ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 data)1102 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1103 u32 device_type, u32 data)
1104 {
1105 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1106 u32 command, error;
1107 s32 ret;
1108
1109 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1110 if (ret != IXGBE_SUCCESS)
1111 return ret;
1112
1113 ret = ixgbe_iosf_wait(hw, NULL);
1114 if (ret != IXGBE_SUCCESS)
1115 goto out;
1116
1117 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1118 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1119
1120 /* Write IOSF control register */
1121 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1122
1123 /* Write IOSF data register */
1124 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1125
1126 ret = ixgbe_iosf_wait(hw, &command);
1127
1128 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1129 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1130 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1131 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1132 "Failed to write, error %x\n", error);
1133 ret = IXGBE_ERR_PHY;
1134 }
1135
1136 out:
1137 ixgbe_release_swfw_semaphore(hw, gssr);
1138 return ret;
1139 }
1140
1141 /**
1142 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1143 * @hw: pointer to hardware structure
1144 * @reg_addr: 32 bit PHY register to write
1145 * @device_type: 3 bit device type
1146 * @data: Pointer to read data from the register
1147 **/
ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 * data)1148 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1149 u32 device_type, u32 *data)
1150 {
1151 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1152 u32 command, error;
1153 s32 ret;
1154
1155 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1156 if (ret != IXGBE_SUCCESS)
1157 return ret;
1158
1159 ret = ixgbe_iosf_wait(hw, NULL);
1160 if (ret != IXGBE_SUCCESS)
1161 goto out;
1162
1163 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1164 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1165
1166 /* Write IOSF control register */
1167 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1168
1169 ret = ixgbe_iosf_wait(hw, &command);
1170
1171 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1172 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1173 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1174 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1175 "Failed to read, error %x\n", error);
1176 ret = IXGBE_ERR_PHY;
1177 }
1178
1179 if (ret == IXGBE_SUCCESS)
1180 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1181
1182 out:
1183 ixgbe_release_swfw_semaphore(hw, gssr);
1184 return ret;
1185 }
1186
1187 /**
1188 * ixgbe_get_phy_token - Get the token for shared phy access
1189 * @hw: Pointer to hardware structure
1190 */
1191
ixgbe_get_phy_token(struct ixgbe_hw * hw)1192 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1193 {
1194 struct ixgbe_hic_phy_token_req token_cmd;
1195 s32 status;
1196
1197 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1198 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1199 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1200 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1201 token_cmd.port_number = hw->bus.lan_id;
1202 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1203 token_cmd.pad = 0;
1204 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1205 sizeof(token_cmd),
1206 IXGBE_HI_COMMAND_TIMEOUT,
1207 true);
1208 if (status) {
1209 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1210 status);
1211 return status;
1212 }
1213 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1214 return IXGBE_SUCCESS;
1215 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1216 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1217 token_cmd.hdr.cmd_or_resp.ret_status);
1218 return IXGBE_ERR_FW_RESP_INVALID;
1219 }
1220
1221 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1222 return IXGBE_ERR_TOKEN_RETRY;
1223 }
1224
1225 /**
1226 * ixgbe_put_phy_token - Put the token for shared phy access
1227 * @hw: Pointer to hardware structure
1228 */
1229
ixgbe_put_phy_token(struct ixgbe_hw * hw)1230 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1231 {
1232 struct ixgbe_hic_phy_token_req token_cmd;
1233 s32 status;
1234
1235 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1236 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1237 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1238 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1239 token_cmd.port_number = hw->bus.lan_id;
1240 token_cmd.command_type = FW_PHY_TOKEN_REL;
1241 token_cmd.pad = 0;
1242 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1243 sizeof(token_cmd),
1244 IXGBE_HI_COMMAND_TIMEOUT,
1245 true);
1246 if (status)
1247 return status;
1248 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1249 return IXGBE_SUCCESS;
1250
1251 DEBUGOUT("Put PHY Token host interface command failed");
1252 return IXGBE_ERR_FW_RESP_INVALID;
1253 }
1254
1255 /**
1256 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1257 * of the IOSF device
1258 * @hw: pointer to hardware structure
1259 * @reg_addr: 32 bit PHY register to write
1260 * @device_type: 3 bit device type
1261 * @data: Data to write to the register
1262 **/
ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 data)1263 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1264 u32 device_type, u32 data)
1265 {
1266 struct ixgbe_hic_internal_phy_req write_cmd;
1267 s32 status;
1268 UNREFERENCED_1PARAMETER(device_type);
1269
1270 memset(&write_cmd, 0, sizeof(write_cmd));
1271 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1272 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1273 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1274 write_cmd.port_number = hw->bus.lan_id;
1275 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1276 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1277 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1278
1279 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1280 sizeof(write_cmd),
1281 IXGBE_HI_COMMAND_TIMEOUT, false);
1282
1283 return status;
1284 }
1285
1286 /**
1287 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1288 * @hw: pointer to hardware structure
1289 * @reg_addr: 32 bit PHY register to write
1290 * @device_type: 3 bit device type
1291 * @data: Pointer to read data from the register
1292 **/
ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 * data)1293 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1294 u32 device_type, u32 *data)
1295 {
1296 union {
1297 struct ixgbe_hic_internal_phy_req cmd;
1298 struct ixgbe_hic_internal_phy_resp rsp;
1299 } hic;
1300 s32 status;
1301 UNREFERENCED_1PARAMETER(device_type);
1302
1303 memset(&hic, 0, sizeof(hic));
1304 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1305 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1306 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1307 hic.cmd.port_number = hw->bus.lan_id;
1308 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1309 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1310
1311 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1312 sizeof(hic.cmd),
1313 IXGBE_HI_COMMAND_TIMEOUT, true);
1314
1315 /* Extract the register value from the response. */
1316 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1317
1318 return status;
1319 }
1320
1321 /**
1322 * ixgbe_disable_mdd_X550
1323 * @hw: pointer to hardware structure
1324 *
1325 * Disable malicious driver detection
1326 **/
ixgbe_disable_mdd_X550(struct ixgbe_hw * hw)1327 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1328 {
1329 u32 reg;
1330
1331 DEBUGFUNC("ixgbe_disable_mdd_X550");
1332
1333 /* Disable MDD for TX DMA and interrupt */
1334 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1335 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1336 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1337
1338 /* Disable MDD for RX and interrupt */
1339 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1340 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1341 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1342 }
1343
1344 /**
1345 * ixgbe_enable_mdd_X550
1346 * @hw: pointer to hardware structure
1347 *
1348 * Enable malicious driver detection
1349 **/
ixgbe_enable_mdd_X550(struct ixgbe_hw * hw)1350 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1351 {
1352 u32 reg;
1353
1354 DEBUGFUNC("ixgbe_enable_mdd_X550");
1355
1356 /* Enable MDD for TX DMA and interrupt */
1357 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1358 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1359 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1360
1361 /* Enable MDD for RX and interrupt */
1362 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1363 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1364 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1365 }
1366
1367 /**
1368 * ixgbe_restore_mdd_vf_X550
1369 * @hw: pointer to hardware structure
1370 * @vf: vf index
1371 *
1372 * Restore VF that was disabled during malicious driver detection event
1373 **/
ixgbe_restore_mdd_vf_X550(struct ixgbe_hw * hw,u32 vf)1374 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1375 {
1376 u32 idx, reg, num_qs, start_q, bitmask;
1377
1378 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1379
1380 /* Map VF to queues */
1381 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1382 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1383 case IXGBE_MRQC_VMDQRT8TCEN:
1384 num_qs = 8; /* 16 VFs / pools */
1385 bitmask = 0x000000FF;
1386 break;
1387 case IXGBE_MRQC_VMDQRSS32EN:
1388 case IXGBE_MRQC_VMDQRT4TCEN:
1389 num_qs = 4; /* 32 VFs / pools */
1390 bitmask = 0x0000000F;
1391 break;
1392 default: /* 64 VFs / pools */
1393 num_qs = 2;
1394 bitmask = 0x00000003;
1395 break;
1396 }
1397 start_q = vf * num_qs;
1398
1399 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1400 idx = start_q / 32;
1401 reg = 0;
1402 reg |= (bitmask << (start_q % 32));
1403 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1404 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1405 }
1406
1407 /**
1408 * ixgbe_mdd_event_X550
1409 * @hw: pointer to hardware structure
1410 * @vf_bitmap: vf bitmap of malicious vfs
1411 *
1412 * Handle malicious driver detection event.
1413 **/
ixgbe_mdd_event_X550(struct ixgbe_hw * hw,u32 * vf_bitmap)1414 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1415 {
1416 u32 wqbr;
1417 u32 i, j, reg, q, shift, vf, idx;
1418
1419 DEBUGFUNC("ixgbe_mdd_event_X550");
1420
1421 /* figure out pool size for mapping to vf's */
1422 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1423 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1424 case IXGBE_MRQC_VMDQRT8TCEN:
1425 shift = 3; /* 16 VFs / pools */
1426 break;
1427 case IXGBE_MRQC_VMDQRSS32EN:
1428 case IXGBE_MRQC_VMDQRT4TCEN:
1429 shift = 2; /* 32 VFs / pools */
1430 break;
1431 default:
1432 shift = 1; /* 64 VFs / pools */
1433 break;
1434 }
1435
1436 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1437 for (i = 0; i < 4; i++) {
1438 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1439 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1440
1441 if (!wqbr)
1442 continue;
1443
1444 /* Get malicious queue */
1445 for (j = 0; j < 32 && wqbr; j++) {
1446
1447 if (!(wqbr & (1 << j)))
1448 continue;
1449
1450 /* Get queue from bitmask */
1451 q = j + (i * 32);
1452
1453 /* Map queue to vf */
1454 vf = (q >> shift);
1455
1456 /* Set vf bit in vf_bitmap */
1457 idx = vf / 32;
1458 vf_bitmap[idx] |= (1 << (vf % 32));
1459 wqbr &= ~(1 << j);
1460 }
1461 }
1462 }
1463
1464 /**
1465 * ixgbe_get_media_type_X550em - Get media type
1466 * @hw: pointer to hardware structure
1467 *
1468 * Returns the media type (fiber, copper, backplane)
1469 */
ixgbe_get_media_type_X550em(struct ixgbe_hw * hw)1470 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1471 {
1472 enum ixgbe_media_type media_type;
1473
1474 DEBUGFUNC("ixgbe_get_media_type_X550em");
1475
1476 /* Detect if there is a copper PHY attached. */
1477 switch (hw->device_id) {
1478 case IXGBE_DEV_ID_X550EM_X_KR:
1479 case IXGBE_DEV_ID_X550EM_X_KX4:
1480 case IXGBE_DEV_ID_X550EM_X_XFI:
1481 case IXGBE_DEV_ID_X550EM_A_KR:
1482 case IXGBE_DEV_ID_X550EM_A_KR_L:
1483 media_type = ixgbe_media_type_backplane;
1484 break;
1485 case IXGBE_DEV_ID_X550EM_X_SFP:
1486 case IXGBE_DEV_ID_X550EM_A_SFP:
1487 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1488 case IXGBE_DEV_ID_X550EM_A_QSFP:
1489 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1490 media_type = ixgbe_media_type_fiber;
1491 break;
1492 case IXGBE_DEV_ID_X550EM_X_1G_T:
1493 case IXGBE_DEV_ID_X550EM_X_10G_T:
1494 case IXGBE_DEV_ID_X550EM_A_10G_T:
1495 media_type = ixgbe_media_type_copper;
1496 break;
1497 case IXGBE_DEV_ID_X550EM_A_SGMII:
1498 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1499 media_type = ixgbe_media_type_backplane;
1500 hw->phy.type = ixgbe_phy_sgmii;
1501 break;
1502 case IXGBE_DEV_ID_X550EM_A_1G_T:
1503 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1504 media_type = ixgbe_media_type_copper;
1505 break;
1506 default:
1507 media_type = ixgbe_media_type_unknown;
1508 break;
1509 }
1510 return media_type;
1511 }
1512
1513 /**
1514 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1515 * @hw: pointer to hardware structure
1516 * @linear: true if SFP module is linear
1517 */
ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw * hw,bool * linear)1518 STATIC s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1519 {
1520 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1521
1522 switch (hw->phy.sfp_type) {
1523 case ixgbe_sfp_type_not_present:
1524 return IXGBE_ERR_SFP_NOT_PRESENT;
1525 case ixgbe_sfp_type_da_cu_core0:
1526 case ixgbe_sfp_type_da_cu_core1:
1527 *linear = true;
1528 break;
1529 case ixgbe_sfp_type_srlr_core0:
1530 case ixgbe_sfp_type_srlr_core1:
1531 case ixgbe_sfp_type_da_act_lmt_core0:
1532 case ixgbe_sfp_type_da_act_lmt_core1:
1533 case ixgbe_sfp_type_1g_sx_core0:
1534 case ixgbe_sfp_type_1g_sx_core1:
1535 case ixgbe_sfp_type_1g_lx_core0:
1536 case ixgbe_sfp_type_1g_lx_core1:
1537 case ixgbe_sfp_type_1g_lha_core0:
1538 case ixgbe_sfp_type_1g_lha_core1:
1539 *linear = false;
1540 break;
1541 case ixgbe_sfp_type_unknown:
1542 case ixgbe_sfp_type_1g_cu_core0:
1543 case ixgbe_sfp_type_1g_cu_core1:
1544 default:
1545 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1546 }
1547
1548 return IXGBE_SUCCESS;
1549 }
1550
1551 /**
1552 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1553 * @hw: pointer to hardware structure
1554 *
1555 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1556 **/
ixgbe_identify_sfp_module_X550em(struct ixgbe_hw * hw)1557 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1558 {
1559 s32 status;
1560 bool linear;
1561
1562 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1563
1564 status = ixgbe_identify_module_generic(hw);
1565
1566 if (status != IXGBE_SUCCESS)
1567 return status;
1568
1569 /* Check if SFP module is supported */
1570 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1571
1572 return status;
1573 }
1574
1575 /**
1576 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1577 * @hw: pointer to hardware structure
1578 */
ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw * hw)1579 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1580 {
1581 s32 status;
1582 bool linear;
1583
1584 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1585
1586 /* Check if SFP module is supported */
1587 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1588
1589 if (status != IXGBE_SUCCESS)
1590 return status;
1591
1592 ixgbe_init_mac_link_ops_X550em(hw);
1593 hw->phy.ops.reset = NULL;
1594
1595 return IXGBE_SUCCESS;
1596 }
1597
1598 /**
1599 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1600 * internal PHY
1601 * @hw: pointer to hardware structure
1602 **/
ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw * hw)1603 STATIC s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1604 {
1605 s32 status;
1606 u32 link_ctrl;
1607
1608 /* Restart auto-negotiation. */
1609 status = hw->mac.ops.read_iosf_sb_reg(hw,
1610 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1611 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1612
1613 if (status) {
1614 DEBUGOUT("Auto-negotiation did not complete\n");
1615 return status;
1616 }
1617
1618 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1619 status = hw->mac.ops.write_iosf_sb_reg(hw,
1620 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1621 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1622
1623 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1624 u32 flx_mask_st20;
1625
1626 /* Indicate to FW that AN restart has been asserted */
1627 status = hw->mac.ops.read_iosf_sb_reg(hw,
1628 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1629 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1630
1631 if (status) {
1632 DEBUGOUT("Auto-negotiation did not complete\n");
1633 return status;
1634 }
1635
1636 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1637 status = hw->mac.ops.write_iosf_sb_reg(hw,
1638 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1639 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1640 }
1641
1642 return status;
1643 }
1644
1645 /**
1646 * ixgbe_setup_sgmii - Set up link for sgmii
1647 * @hw: pointer to hardware structure
1648 * @speed: new link speed
1649 * @autoneg_wait: true when waiting for completion is needed
1650 */
ixgbe_setup_sgmii(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1651 STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1652 bool autoneg_wait)
1653 {
1654 struct ixgbe_mac_info *mac = &hw->mac;
1655 u32 lval, sval, flx_val;
1656 s32 rc;
1657
1658 rc = mac->ops.read_iosf_sb_reg(hw,
1659 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1660 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1661 if (rc)
1662 return rc;
1663
1664 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1665 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1666 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1667 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1668 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1669 rc = mac->ops.write_iosf_sb_reg(hw,
1670 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1671 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1672 if (rc)
1673 return rc;
1674
1675 rc = mac->ops.read_iosf_sb_reg(hw,
1676 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1677 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1678 if (rc)
1679 return rc;
1680
1681 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1682 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1683 rc = mac->ops.write_iosf_sb_reg(hw,
1684 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1685 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1686 if (rc)
1687 return rc;
1688
1689 rc = mac->ops.read_iosf_sb_reg(hw,
1690 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1691 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1692 if (rc)
1693 return rc;
1694
1695 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1696 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1697 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1698 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1699 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1700
1701 rc = mac->ops.write_iosf_sb_reg(hw,
1702 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1703 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1704 if (rc)
1705 return rc;
1706
1707 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1708 if (rc)
1709 return rc;
1710
1711 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1712 }
1713
1714 /**
1715 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1716 * @hw: pointer to hardware structure
1717 * @speed: new link speed
1718 * @autoneg_wait: true when waiting for completion is needed
1719 */
ixgbe_setup_sgmii_fw(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1720 STATIC s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1721 bool autoneg_wait)
1722 {
1723 struct ixgbe_mac_info *mac = &hw->mac;
1724 u32 lval, sval, flx_val;
1725 s32 rc;
1726
1727 rc = mac->ops.read_iosf_sb_reg(hw,
1728 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1729 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1730 if (rc)
1731 return rc;
1732
1733 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1734 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1735 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1736 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1737 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1738 rc = mac->ops.write_iosf_sb_reg(hw,
1739 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1740 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1741 if (rc)
1742 return rc;
1743
1744 rc = mac->ops.read_iosf_sb_reg(hw,
1745 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1746 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1747 if (rc)
1748 return rc;
1749
1750 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1751 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1752 rc = mac->ops.write_iosf_sb_reg(hw,
1753 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1754 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1755 if (rc)
1756 return rc;
1757
1758 rc = mac->ops.write_iosf_sb_reg(hw,
1759 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1760 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1761 if (rc)
1762 return rc;
1763
1764 rc = mac->ops.read_iosf_sb_reg(hw,
1765 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1766 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1767 if (rc)
1768 return rc;
1769
1770 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1771 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1772 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1773 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1774 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1775
1776 rc = mac->ops.write_iosf_sb_reg(hw,
1777 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1778 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1779 if (rc)
1780 return rc;
1781
1782 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1783
1784 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1785 }
1786
1787 /**
1788 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1789 * @hw: pointer to hardware structure
1790 */
ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw * hw)1791 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1792 {
1793 struct ixgbe_mac_info *mac = &hw->mac;
1794
1795 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
1796
1797 switch (hw->mac.ops.get_media_type(hw)) {
1798 case ixgbe_media_type_fiber:
1799 /* CS4227 does not support autoneg, so disable the laser control
1800 * functions for SFP+ fiber
1801 */
1802 mac->ops.disable_tx_laser = NULL;
1803 mac->ops.enable_tx_laser = NULL;
1804 mac->ops.flap_tx_laser = NULL;
1805 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
1806 mac->ops.set_rate_select_speed =
1807 ixgbe_set_soft_rate_select_speed;
1808
1809 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
1810 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
1811 mac->ops.setup_mac_link =
1812 ixgbe_setup_mac_link_sfp_x550a;
1813 else
1814 mac->ops.setup_mac_link =
1815 ixgbe_setup_mac_link_sfp_x550em;
1816 break;
1817 case ixgbe_media_type_copper:
1818 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
1819 break;
1820 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1821 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
1822 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
1823 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
1824 mac->ops.check_link =
1825 ixgbe_check_mac_link_generic;
1826 } else {
1827 mac->ops.setup_link =
1828 ixgbe_setup_mac_link_t_X550em;
1829 }
1830 } else {
1831 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
1832 mac->ops.check_link = ixgbe_check_link_t_X550em;
1833 }
1834 break;
1835 case ixgbe_media_type_backplane:
1836 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
1837 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
1838 mac->ops.setup_link = ixgbe_setup_sgmii;
1839 break;
1840 default:
1841 break;
1842 }
1843 }
1844
1845 /**
1846 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
1847 * @hw: pointer to hardware structure
1848 * @speed: pointer to link speed
1849 * @autoneg: true when autoneg or autotry is enabled
1850 */
ixgbe_get_link_capabilities_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)1851 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
1852 ixgbe_link_speed *speed,
1853 bool *autoneg)
1854 {
1855 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
1856
1857
1858 if (hw->phy.type == ixgbe_phy_fw) {
1859 *autoneg = true;
1860 *speed = hw->phy.speeds_supported;
1861 return 0;
1862 }
1863
1864 /* SFP */
1865 if (hw->phy.media_type == ixgbe_media_type_fiber) {
1866
1867 /* CS4227 SFP must not enable auto-negotiation */
1868 *autoneg = false;
1869
1870 /* Check if 1G SFP module. */
1871 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1872 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
1873 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core0 ||
1874 hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core1
1875 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1876 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
1877 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1878 return IXGBE_SUCCESS;
1879 }
1880
1881 /* Link capabilities are based on SFP */
1882 if (hw->phy.multispeed_fiber)
1883 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1884 IXGBE_LINK_SPEED_1GB_FULL;
1885 else
1886 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1887 } else {
1888 *autoneg = true;
1889
1890 switch (hw->phy.type) {
1891 case ixgbe_phy_x550em_xfi:
1892 *speed = IXGBE_LINK_SPEED_1GB_FULL |
1893 IXGBE_LINK_SPEED_10GB_FULL;
1894 *autoneg = false;
1895 break;
1896 case ixgbe_phy_ext_1g_t:
1897 case ixgbe_phy_sgmii:
1898 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1899 break;
1900 case ixgbe_phy_x550em_kr:
1901 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1902 /* check different backplane modes */
1903 if (hw->phy.nw_mng_if_sel &
1904 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
1905 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
1906 break;
1907 } else if (hw->device_id ==
1908 IXGBE_DEV_ID_X550EM_A_KR_L) {
1909 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1910 break;
1911 }
1912 }
1913 /* fall through */
1914 default:
1915 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1916 IXGBE_LINK_SPEED_1GB_FULL;
1917 break;
1918 }
1919 }
1920
1921 return IXGBE_SUCCESS;
1922 }
1923
1924 /**
1925 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
1926 * @hw: pointer to hardware structure
1927 * @lsc: pointer to boolean flag which indicates whether external Base T
1928 * PHY interrupt is lsc
1929 *
1930 * Determime if external Base T PHY interrupt cause is high temperature
1931 * failure alarm or link status change.
1932 *
1933 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
1934 * failure alarm, else return PHY access status.
1935 */
ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw * hw,bool * lsc)1936 STATIC s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
1937 {
1938 u32 status;
1939 u16 reg;
1940
1941 *lsc = false;
1942
1943 /* Vendor alarm triggered */
1944 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
1945 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1946 ®);
1947
1948 if (status != IXGBE_SUCCESS ||
1949 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
1950 return status;
1951
1952 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
1953 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
1954 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1955 ®);
1956
1957 if (status != IXGBE_SUCCESS ||
1958 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
1959 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
1960 return status;
1961
1962 /* Global alarm triggered */
1963 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
1964 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1965 ®);
1966
1967 if (status != IXGBE_SUCCESS)
1968 return status;
1969
1970 /* If high temperature failure, then return over temp error and exit */
1971 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
1972 /* power down the PHY in case the PHY FW didn't already */
1973 ixgbe_set_copper_phy_power(hw, false);
1974 return IXGBE_ERR_OVERTEMP;
1975 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
1976 /* device fault alarm triggered */
1977 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
1978 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1979 ®);
1980
1981 if (status != IXGBE_SUCCESS)
1982 return status;
1983
1984 /* if device fault was due to high temp alarm handle and exit */
1985 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
1986 /* power down the PHY in case the PHY FW didn't */
1987 ixgbe_set_copper_phy_power(hw, false);
1988 return IXGBE_ERR_OVERTEMP;
1989 }
1990 }
1991
1992 /* Vendor alarm 2 triggered */
1993 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
1994 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
1995
1996 if (status != IXGBE_SUCCESS ||
1997 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
1998 return status;
1999
2000 /* link connect/disconnect event occurred */
2001 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2002 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2003
2004 if (status != IXGBE_SUCCESS)
2005 return status;
2006
2007 /* Indicate LSC */
2008 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2009 *lsc = true;
2010
2011 return IXGBE_SUCCESS;
2012 }
2013
2014 /**
2015 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2016 * @hw: pointer to hardware structure
2017 *
2018 * Enable link status change and temperature failure alarm for the external
2019 * Base T PHY
2020 *
2021 * Returns PHY access status
2022 */
ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw * hw)2023 STATIC s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2024 {
2025 u32 status;
2026 u16 reg;
2027 bool lsc;
2028
2029 /* Clear interrupt flags */
2030 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2031
2032 /* Enable link status change alarm */
2033
2034 /* Enable the LASI interrupts on X552 devices to receive notifications
2035 * of the link configurations of the external PHY and correspondingly
2036 * support the configuration of the internal iXFI link, since iXFI does
2037 * not support auto-negotiation. This is not required for X553 devices
2038 * having KR support, which performs auto-negotiations and which is used
2039 * as the internal link to the external PHY. Hence adding a check here
2040 * to avoid enabling LASI interrupts for X553 devices.
2041 */
2042 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2043 status = hw->phy.ops.read_reg(hw,
2044 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2045 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2046
2047 if (status != IXGBE_SUCCESS)
2048 return status;
2049
2050 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2051
2052 status = hw->phy.ops.write_reg(hw,
2053 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2054 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2055
2056 if (status != IXGBE_SUCCESS)
2057 return status;
2058 }
2059
2060 /* Enable high temperature failure and global fault alarms */
2061 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2062 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2063 ®);
2064
2065 if (status != IXGBE_SUCCESS)
2066 return status;
2067
2068 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2069 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2070
2071 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2072 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2073 reg);
2074
2075 if (status != IXGBE_SUCCESS)
2076 return status;
2077
2078 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2079 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2080 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2081 ®);
2082
2083 if (status != IXGBE_SUCCESS)
2084 return status;
2085
2086 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2087 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2088
2089 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2090 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2091 reg);
2092
2093 if (status != IXGBE_SUCCESS)
2094 return status;
2095
2096 /* Enable chip-wide vendor alarm */
2097 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2098 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2099 ®);
2100
2101 if (status != IXGBE_SUCCESS)
2102 return status;
2103
2104 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2105
2106 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2107 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2108 reg);
2109
2110 return status;
2111 }
2112
2113 /**
2114 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2115 * @hw: pointer to hardware structure
2116 * @speed: link speed
2117 *
2118 * Configures the integrated KR PHY.
2119 **/
ixgbe_setup_kr_speed_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed)2120 STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2121 ixgbe_link_speed speed)
2122 {
2123 s32 status;
2124 u32 reg_val;
2125
2126 status = hw->mac.ops.read_iosf_sb_reg(hw,
2127 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2128 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2129 if (status)
2130 return status;
2131
2132 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2133 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2134 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2135
2136 /* Advertise 10G support. */
2137 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2138 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2139
2140 /* Advertise 1G support. */
2141 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2142 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2143
2144 status = hw->mac.ops.write_iosf_sb_reg(hw,
2145 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2146 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2147
2148 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2149 /* Set lane mode to KR auto negotiation */
2150 status = hw->mac.ops.read_iosf_sb_reg(hw,
2151 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2152 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2153
2154 if (status)
2155 return status;
2156
2157 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2158 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2159 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2160 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2161 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2162
2163 status = hw->mac.ops.write_iosf_sb_reg(hw,
2164 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2165 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2166 }
2167
2168 return ixgbe_restart_an_internal_phy_x550em(hw);
2169 }
2170
2171 /**
2172 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2173 * @hw: pointer to hardware structure
2174 */
ixgbe_reset_phy_fw(struct ixgbe_hw * hw)2175 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2176 {
2177 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2178 s32 rc;
2179
2180 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2181 return IXGBE_SUCCESS;
2182
2183 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2184 if (rc)
2185 return rc;
2186 memset(store, 0, sizeof(store));
2187
2188 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2189 if (rc)
2190 return rc;
2191
2192 return ixgbe_setup_fw_link(hw);
2193 }
2194
2195 /**
2196 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2197 * @hw: pointer to hardware structure
2198 */
ixgbe_check_overtemp_fw(struct ixgbe_hw * hw)2199 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2200 {
2201 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2202 s32 rc;
2203
2204 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2205 if (rc)
2206 return rc;
2207
2208 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2209 ixgbe_shutdown_fw_phy(hw);
2210 return IXGBE_ERR_OVERTEMP;
2211 }
2212 return IXGBE_SUCCESS;
2213 }
2214
2215 /**
2216 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2217 * @hw: pointer to hardware structure
2218 *
2219 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2220 * values.
2221 **/
ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw * hw)2222 STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2223 {
2224 /* Save NW management interface connected on board. This is used
2225 * to determine internal PHY mode.
2226 */
2227 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2228
2229 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2230 * PHY address. This register field was has only been used for X552.
2231 */
2232 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2233 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2234 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2235 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2236 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2237 }
2238
2239 return IXGBE_SUCCESS;
2240 }
2241
2242 /**
2243 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2244 * @hw: pointer to hardware structure
2245 *
2246 * Initialize any function pointers that were not able to be
2247 * set during init_shared_code because the PHY/SFP type was
2248 * not known. Perform the SFP init if necessary.
2249 */
ixgbe_init_phy_ops_X550em(struct ixgbe_hw * hw)2250 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2251 {
2252 struct ixgbe_phy_info *phy = &hw->phy;
2253 s32 ret_val;
2254
2255 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2256
2257 hw->mac.ops.set_lan_id(hw);
2258 ixgbe_read_mng_if_sel_x550em(hw);
2259
2260 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2261 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2262 ixgbe_setup_mux_ctl(hw);
2263 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2264 }
2265
2266 switch (hw->device_id) {
2267 case IXGBE_DEV_ID_X550EM_A_1G_T:
2268 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2269 phy->ops.read_reg_mdi = NULL;
2270 phy->ops.write_reg_mdi = NULL;
2271 hw->phy.ops.read_reg = NULL;
2272 hw->phy.ops.write_reg = NULL;
2273 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2274 if (hw->bus.lan_id)
2275 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2276 else
2277 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2278
2279 break;
2280 case IXGBE_DEV_ID_X550EM_A_10G_T:
2281 case IXGBE_DEV_ID_X550EM_A_SFP:
2282 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2283 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2284 if (hw->bus.lan_id)
2285 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2286 else
2287 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2288 break;
2289 case IXGBE_DEV_ID_X550EM_X_SFP:
2290 /* set up for CS4227 usage */
2291 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2292 break;
2293 case IXGBE_DEV_ID_X550EM_X_1G_T:
2294 phy->ops.read_reg_mdi = NULL;
2295 phy->ops.write_reg_mdi = NULL;
2296 default:
2297 break;
2298 }
2299
2300 /* Identify the PHY or SFP module */
2301 ret_val = phy->ops.identify(hw);
2302 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2303 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2304 return ret_val;
2305
2306 /* Setup function pointers based on detected hardware */
2307 ixgbe_init_mac_link_ops_X550em(hw);
2308 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2309 phy->ops.reset = NULL;
2310
2311 /* Set functions pointers based on phy type */
2312 switch (hw->phy.type) {
2313 case ixgbe_phy_x550em_kx4:
2314 phy->ops.setup_link = NULL;
2315 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2316 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2317 break;
2318 case ixgbe_phy_x550em_kr:
2319 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2320 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2321 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2322 break;
2323 case ixgbe_phy_ext_1g_t:
2324 /* link is managed by FW */
2325 phy->ops.setup_link = NULL;
2326 phy->ops.reset = NULL;
2327 break;
2328 case ixgbe_phy_x550em_xfi:
2329 /* link is managed by HW */
2330 phy->ops.setup_link = NULL;
2331 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2332 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2333 break;
2334 case ixgbe_phy_x550em_ext_t:
2335 /* If internal link mode is XFI, then setup iXFI internal link,
2336 * else setup KR now.
2337 */
2338 phy->ops.setup_internal_link =
2339 ixgbe_setup_internal_phy_t_x550em;
2340
2341 /* setup SW LPLU only for first revision of X550EM_x */
2342 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2343 !(IXGBE_FUSES0_REV_MASK &
2344 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2345 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2346
2347 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2348 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2349 break;
2350 case ixgbe_phy_sgmii:
2351 phy->ops.setup_link = NULL;
2352 break;
2353 case ixgbe_phy_fw:
2354 phy->ops.setup_link = ixgbe_setup_fw_link;
2355 phy->ops.reset = ixgbe_reset_phy_fw;
2356 break;
2357 default:
2358 break;
2359 }
2360 return ret_val;
2361 }
2362
2363 /**
2364 * ixgbe_set_mdio_speed - Set MDIO clock speed
2365 * @hw: pointer to hardware structure
2366 */
ixgbe_set_mdio_speed(struct ixgbe_hw * hw)2367 STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2368 {
2369 u32 hlreg0;
2370
2371 switch (hw->device_id) {
2372 case IXGBE_DEV_ID_X550EM_X_10G_T:
2373 case IXGBE_DEV_ID_X550EM_A_SGMII:
2374 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2375 case IXGBE_DEV_ID_X550EM_A_10G_T:
2376 case IXGBE_DEV_ID_X550EM_A_SFP:
2377 case IXGBE_DEV_ID_X550EM_A_QSFP:
2378 /* Config MDIO clock speed before the first MDIO PHY access */
2379 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2380 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2381 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2382 break;
2383 case IXGBE_DEV_ID_X550EM_A_1G_T:
2384 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2385 /* Select fast MDIO clock speed for these devices */
2386 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2387 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2388 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2389 break;
2390 default:
2391 break;
2392 }
2393 }
2394
2395 /**
2396 * ixgbe_reset_hw_X550em - Perform hardware reset
2397 * @hw: pointer to hardware structure
2398 *
2399 * Resets the hardware by resetting the transmit and receive units, masks
2400 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2401 * reset.
2402 */
ixgbe_reset_hw_X550em(struct ixgbe_hw * hw)2403 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2404 {
2405 ixgbe_link_speed link_speed;
2406 s32 status;
2407 u32 ctrl = 0;
2408 u32 i;
2409 bool link_up = false;
2410 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2411
2412 DEBUGFUNC("ixgbe_reset_hw_X550em");
2413
2414 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2415 status = hw->mac.ops.stop_adapter(hw);
2416 if (status != IXGBE_SUCCESS) {
2417 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2418 return status;
2419 }
2420 /* flush pending Tx transactions */
2421 ixgbe_clear_tx_pending(hw);
2422
2423 ixgbe_set_mdio_speed(hw);
2424
2425 /* PHY ops must be identified and initialized prior to reset */
2426 status = hw->phy.ops.init(hw);
2427
2428 if (status)
2429 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2430 status);
2431
2432 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2433 status == IXGBE_ERR_PHY_ADDR_INVALID) {
2434 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2435 return status;
2436 }
2437
2438 /* start the external PHY */
2439 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2440 status = ixgbe_init_ext_t_x550em(hw);
2441 if (status) {
2442 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2443 status);
2444 return status;
2445 }
2446 }
2447
2448 /* Setup SFP module if there is one present. */
2449 if (hw->phy.sfp_setup_needed) {
2450 status = hw->mac.ops.setup_sfp(hw);
2451 hw->phy.sfp_setup_needed = false;
2452 }
2453
2454 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2455 return status;
2456
2457 /* Reset PHY */
2458 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2459 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2460 return IXGBE_ERR_OVERTEMP;
2461 }
2462
2463 mac_reset_top:
2464 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2465 * If link reset is used when link is up, it might reset the PHY when
2466 * mng is using it. If link is down or the flag to force full link
2467 * reset is set, then perform link reset.
2468 */
2469 ctrl = IXGBE_CTRL_LNK_RST;
2470 if (!hw->force_full_reset) {
2471 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2472 if (link_up)
2473 ctrl = IXGBE_CTRL_RST;
2474 }
2475
2476 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2477 if (status != IXGBE_SUCCESS) {
2478 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2479 "semaphore failed with %d", status);
2480 return IXGBE_ERR_SWFW_SYNC;
2481 }
2482 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2483 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2484 IXGBE_WRITE_FLUSH(hw);
2485 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2486
2487 /* Poll for reset bit to self-clear meaning reset is complete */
2488 for (i = 0; i < 10; i++) {
2489 usec_delay(1);
2490 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2491 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2492 break;
2493 }
2494
2495 if (ctrl & IXGBE_CTRL_RST_MASK) {
2496 status = IXGBE_ERR_RESET_FAILED;
2497 DEBUGOUT("Reset polling failed to complete.\n");
2498 }
2499
2500 msec_delay(50);
2501
2502 /* Double resets are required for recovery from certain error
2503 * conditions. Between resets, it is necessary to stall to
2504 * allow time for any pending HW events to complete.
2505 */
2506 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2507 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2508 goto mac_reset_top;
2509 }
2510
2511 /* Store the permanent mac address */
2512 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2513
2514 /* Store MAC address from RAR0, clear receive address registers, and
2515 * clear the multicast table. Also reset num_rar_entries to 128,
2516 * since we modify this value when programming the SAN MAC address.
2517 */
2518 hw->mac.num_rar_entries = 128;
2519 hw->mac.ops.init_rx_addrs(hw);
2520
2521 ixgbe_set_mdio_speed(hw);
2522
2523 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2524 ixgbe_setup_mux_ctl(hw);
2525
2526 if (status != IXGBE_SUCCESS)
2527 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2528
2529 return status;
2530 }
2531
2532 /**
2533 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2534 * @hw: pointer to hardware structure
2535 */
ixgbe_init_ext_t_x550em(struct ixgbe_hw * hw)2536 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2537 {
2538 u32 status;
2539 u16 reg;
2540
2541 status = hw->phy.ops.read_reg(hw,
2542 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2543 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2544 ®);
2545
2546 if (status != IXGBE_SUCCESS)
2547 return status;
2548
2549 /* If PHY FW reset completed bit is set then this is the first
2550 * SW instance after a power on so the PHY FW must be un-stalled.
2551 */
2552 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2553 status = hw->phy.ops.read_reg(hw,
2554 IXGBE_MDIO_GLOBAL_RES_PR_10,
2555 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2556 ®);
2557
2558 if (status != IXGBE_SUCCESS)
2559 return status;
2560
2561 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2562
2563 status = hw->phy.ops.write_reg(hw,
2564 IXGBE_MDIO_GLOBAL_RES_PR_10,
2565 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2566 reg);
2567
2568 if (status != IXGBE_SUCCESS)
2569 return status;
2570 }
2571
2572 return status;
2573 }
2574
2575 /**
2576 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2577 * @hw: pointer to hardware structure
2578 **/
ixgbe_setup_kr_x550em(struct ixgbe_hw * hw)2579 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2580 {
2581 /* leave link alone for 2.5G */
2582 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2583 return IXGBE_SUCCESS;
2584
2585 if (ixgbe_check_reset_blocked(hw))
2586 return 0;
2587
2588 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2589 }
2590
2591 /**
2592 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2593 * @hw: pointer to hardware structure
2594 * @speed: new link speed
2595 * @autoneg_wait_to_complete: unused
2596 *
2597 * Configure the external PHY and the integrated KR PHY for SFP support.
2598 **/
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)2599 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2600 ixgbe_link_speed speed,
2601 bool autoneg_wait_to_complete)
2602 {
2603 s32 ret_val;
2604 u16 reg_slice, reg_val;
2605 bool setup_linear = false;
2606 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2607
2608 /* Check if SFP module is supported and linear */
2609 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2610
2611 /* If no SFP module present, then return success. Return success since
2612 * there is no reason to configure CS4227 and SFP not present error is
2613 * not excepted in the setup MAC link flow.
2614 */
2615 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2616 return IXGBE_SUCCESS;
2617
2618 if (ret_val != IXGBE_SUCCESS)
2619 return ret_val;
2620
2621 /* Configure internal PHY for KR/KX. */
2622 ixgbe_setup_kr_speed_x550em(hw, speed);
2623
2624 /* Configure CS4227 LINE side to proper mode. */
2625 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2626 (hw->bus.lan_id << 12);
2627 if (setup_linear)
2628 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2629 else
2630 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2631 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2632 reg_val);
2633 return ret_val;
2634 }
2635
2636 /**
2637 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2638 * @hw: pointer to hardware structure
2639 * @speed: the link speed to force
2640 *
2641 * Configures the integrated PHY for native SFI mode. Used to connect the
2642 * internal PHY directly to an SFP cage, without autonegotiation.
2643 **/
ixgbe_setup_sfi_x550a(struct ixgbe_hw * hw,ixgbe_link_speed * speed)2644 STATIC s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2645 {
2646 struct ixgbe_mac_info *mac = &hw->mac;
2647 s32 status;
2648 u32 reg_val;
2649
2650 /* Disable all AN and force speed to 10G Serial. */
2651 status = mac->ops.read_iosf_sb_reg(hw,
2652 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2653 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2654 if (status != IXGBE_SUCCESS)
2655 return status;
2656
2657 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2658 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2659 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2660 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2661
2662 /* Select forced link speed for internal PHY. */
2663 switch (*speed) {
2664 case IXGBE_LINK_SPEED_10GB_FULL:
2665 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2666 break;
2667 case IXGBE_LINK_SPEED_1GB_FULL:
2668 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2669 break;
2670 default:
2671 /* Other link speeds are not supported by internal PHY. */
2672 return IXGBE_ERR_LINK_SETUP;
2673 }
2674
2675 status = mac->ops.write_iosf_sb_reg(hw,
2676 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2677 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2678
2679 /* Toggle port SW reset by AN reset. */
2680 status = ixgbe_restart_an_internal_phy_x550em(hw);
2681
2682 return status;
2683 }
2684
2685 /**
2686 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2687 * @hw: pointer to hardware structure
2688 * @speed: new link speed
2689 * @autoneg_wait_to_complete: unused
2690 *
2691 * Configure the integrated PHY for SFP support.
2692 **/
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)2693 s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2694 ixgbe_link_speed speed,
2695 bool autoneg_wait_to_complete)
2696 {
2697 s32 ret_val;
2698 u16 reg_phy_ext;
2699 bool setup_linear = false;
2700 u32 reg_slice, reg_phy_int, slice_offset;
2701
2702 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2703
2704 /* Check if SFP module is supported and linear */
2705 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2706
2707 /* If no SFP module present, then return success. Return success since
2708 * SFP not present error is not excepted in the setup MAC link flow.
2709 */
2710 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2711 return IXGBE_SUCCESS;
2712
2713 if (ret_val != IXGBE_SUCCESS)
2714 return ret_val;
2715
2716 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2717 /* Configure internal PHY for native SFI based on module type */
2718 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2719 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2720 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2721
2722 if (ret_val != IXGBE_SUCCESS)
2723 return ret_val;
2724
2725 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2726 if (!setup_linear)
2727 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2728
2729 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2730 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2731 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2732
2733 if (ret_val != IXGBE_SUCCESS)
2734 return ret_val;
2735
2736 /* Setup SFI internal link. */
2737 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2738 } else {
2739 /* Configure internal PHY for KR/KX. */
2740 ixgbe_setup_kr_speed_x550em(hw, speed);
2741
2742 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2743 /* Find Address */
2744 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2745 return IXGBE_ERR_PHY_ADDR_INVALID;
2746 }
2747
2748 /* Get external PHY SKU id */
2749 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2750 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2751
2752 if (ret_val != IXGBE_SUCCESS)
2753 return ret_val;
2754
2755 /* When configuring quad port CS4223, the MAC instance is part
2756 * of the slice offset.
2757 */
2758 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2759 slice_offset = (hw->bus.lan_id +
2760 (hw->bus.instance_id << 1)) << 12;
2761 else
2762 slice_offset = hw->bus.lan_id << 12;
2763
2764 /* Configure CS4227/CS4223 LINE side to proper mode. */
2765 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2766
2767 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2768 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2769
2770 if (ret_val != IXGBE_SUCCESS)
2771 return ret_val;
2772
2773 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2774 (IXGBE_CS4227_EDC_MODE_SR << 1));
2775
2776 if (setup_linear)
2777 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2778 else
2779 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2780 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2781 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2782
2783 /* Flush previous write with a read */
2784 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2785 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2786 }
2787 return ret_val;
2788 }
2789
2790 /**
2791 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
2792 * @hw: pointer to hardware structure
2793 *
2794 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
2795 **/
ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw * hw)2796 STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
2797 {
2798 struct ixgbe_mac_info *mac = &hw->mac;
2799 s32 status;
2800 u32 reg_val;
2801
2802 /* Disable training protocol FSM. */
2803 status = mac->ops.read_iosf_sb_reg(hw,
2804 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
2805 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2806 if (status != IXGBE_SUCCESS)
2807 return status;
2808 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
2809 status = mac->ops.write_iosf_sb_reg(hw,
2810 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
2811 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2812 if (status != IXGBE_SUCCESS)
2813 return status;
2814
2815 /* Disable Flex from training TXFFE. */
2816 status = mac->ops.read_iosf_sb_reg(hw,
2817 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
2818 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2819 if (status != IXGBE_SUCCESS)
2820 return status;
2821 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
2822 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
2823 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
2824 status = mac->ops.write_iosf_sb_reg(hw,
2825 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
2826 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2827 if (status != IXGBE_SUCCESS)
2828 return status;
2829 status = mac->ops.read_iosf_sb_reg(hw,
2830 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
2831 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2832 if (status != IXGBE_SUCCESS)
2833 return status;
2834 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
2835 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
2836 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
2837 status = mac->ops.write_iosf_sb_reg(hw,
2838 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
2839 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2840 if (status != IXGBE_SUCCESS)
2841 return status;
2842
2843 /* Enable override for coefficients. */
2844 status = mac->ops.read_iosf_sb_reg(hw,
2845 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
2846 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2847 if (status != IXGBE_SUCCESS)
2848 return status;
2849 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
2850 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
2851 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
2852 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
2853 status = mac->ops.write_iosf_sb_reg(hw,
2854 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
2855 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2856 return status;
2857 }
2858
2859 /**
2860 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
2861 * @hw: pointer to hardware structure
2862 * @speed: the link speed to force
2863 *
2864 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
2865 * internal and external PHY at a specific speed, without autonegotiation.
2866 **/
ixgbe_setup_ixfi_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed)2867 STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2868 {
2869 struct ixgbe_mac_info *mac = &hw->mac;
2870 s32 status;
2871 u32 reg_val;
2872
2873 /* iXFI is only supported with X552 */
2874 if (mac->type != ixgbe_mac_X550EM_x)
2875 return IXGBE_ERR_LINK_SETUP;
2876
2877 /* Disable AN and force speed to 10G Serial. */
2878 status = mac->ops.read_iosf_sb_reg(hw,
2879 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2880 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2881 if (status != IXGBE_SUCCESS)
2882 return status;
2883
2884 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2885 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
2886
2887 /* Select forced link speed for internal PHY. */
2888 switch (*speed) {
2889 case IXGBE_LINK_SPEED_10GB_FULL:
2890 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
2891 break;
2892 case IXGBE_LINK_SPEED_1GB_FULL:
2893 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
2894 break;
2895 default:
2896 /* Other link speeds are not supported by internal KR PHY. */
2897 return IXGBE_ERR_LINK_SETUP;
2898 }
2899
2900 status = mac->ops.write_iosf_sb_reg(hw,
2901 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2902 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2903 if (status != IXGBE_SUCCESS)
2904 return status;
2905
2906 /* Additional configuration needed for x550em_x */
2907 if (hw->mac.type == ixgbe_mac_X550EM_x) {
2908 status = ixgbe_setup_ixfi_x550em_x(hw);
2909 if (status != IXGBE_SUCCESS)
2910 return status;
2911 }
2912
2913 /* Toggle port SW reset by AN reset. */
2914 status = ixgbe_restart_an_internal_phy_x550em(hw);
2915
2916 return status;
2917 }
2918
2919 /**
2920 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
2921 * @hw: address of hardware structure
2922 * @link_up: address of boolean to indicate link status
2923 *
2924 * Returns error code if unable to get link status.
2925 */
ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw * hw,bool * link_up)2926 STATIC s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
2927 {
2928 u32 ret;
2929 u16 autoneg_status;
2930
2931 *link_up = false;
2932
2933 /* read this twice back to back to indicate current status */
2934 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
2935 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2936 &autoneg_status);
2937 if (ret != IXGBE_SUCCESS)
2938 return ret;
2939
2940 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
2941 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2942 &autoneg_status);
2943 if (ret != IXGBE_SUCCESS)
2944 return ret;
2945
2946 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
2947
2948 return IXGBE_SUCCESS;
2949 }
2950
2951 /**
2952 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
2953 * @hw: point to hardware structure
2954 *
2955 * Configures the link between the integrated KR PHY and the external X557 PHY
2956 * The driver will call this function when it gets a link status change
2957 * interrupt from the X557 PHY. This function configures the link speed
2958 * between the PHYs to match the link speed of the BASE-T link.
2959 *
2960 * A return of a non-zero value indicates an error, and the base driver should
2961 * not report link up.
2962 */
ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw * hw)2963 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
2964 {
2965 ixgbe_link_speed force_speed;
2966 bool link_up;
2967 u32 status;
2968 u16 speed;
2969
2970 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2971 return IXGBE_ERR_CONFIG;
2972
2973 if (hw->mac.type == ixgbe_mac_X550EM_x &&
2974 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
2975 /* If link is down, there is no setup necessary so return */
2976 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
2977 if (status != IXGBE_SUCCESS)
2978 return status;
2979
2980 if (!link_up)
2981 return IXGBE_SUCCESS;
2982
2983 status = hw->phy.ops.read_reg(hw,
2984 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
2985 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2986 &speed);
2987 if (status != IXGBE_SUCCESS)
2988 return status;
2989
2990 /* If link is still down - no setup is required so return */
2991 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
2992 if (status != IXGBE_SUCCESS)
2993 return status;
2994 if (!link_up)
2995 return IXGBE_SUCCESS;
2996
2997 /* clear everything but the speed and duplex bits */
2998 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
2999
3000 switch (speed) {
3001 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3002 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3003 break;
3004 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3005 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3006 break;
3007 default:
3008 /* Internal PHY does not support anything else */
3009 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3010 }
3011
3012 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3013 } else {
3014 speed = IXGBE_LINK_SPEED_10GB_FULL |
3015 IXGBE_LINK_SPEED_1GB_FULL;
3016 return ixgbe_setup_kr_speed_x550em(hw, speed);
3017 }
3018 }
3019
3020 /**
3021 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3022 * @hw: pointer to hardware structure
3023 *
3024 * Configures the integrated KR PHY to use internal loopback mode.
3025 **/
ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw * hw)3026 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3027 {
3028 s32 status;
3029 u32 reg_val;
3030
3031 /* Disable AN and force speed to 10G Serial. */
3032 status = hw->mac.ops.read_iosf_sb_reg(hw,
3033 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3034 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3035 if (status != IXGBE_SUCCESS)
3036 return status;
3037 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3038 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3039 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3040 status = hw->mac.ops.write_iosf_sb_reg(hw,
3041 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3042 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3043 if (status != IXGBE_SUCCESS)
3044 return status;
3045
3046 /* Set near-end loopback clocks. */
3047 status = hw->mac.ops.read_iosf_sb_reg(hw,
3048 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3049 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3050 if (status != IXGBE_SUCCESS)
3051 return status;
3052 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3053 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3054 status = hw->mac.ops.write_iosf_sb_reg(hw,
3055 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3056 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3057 if (status != IXGBE_SUCCESS)
3058 return status;
3059
3060 /* Set loopback enable. */
3061 status = hw->mac.ops.read_iosf_sb_reg(hw,
3062 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3063 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3064 if (status != IXGBE_SUCCESS)
3065 return status;
3066 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3067 status = hw->mac.ops.write_iosf_sb_reg(hw,
3068 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3069 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3070 if (status != IXGBE_SUCCESS)
3071 return status;
3072
3073 /* Training bypass. */
3074 status = hw->mac.ops.read_iosf_sb_reg(hw,
3075 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3076 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3077 if (status != IXGBE_SUCCESS)
3078 return status;
3079 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3080 status = hw->mac.ops.write_iosf_sb_reg(hw,
3081 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3082 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3083
3084 return status;
3085 }
3086
3087 /**
3088 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3089 * assuming that the semaphore is already obtained.
3090 * @hw: pointer to hardware structure
3091 * @offset: offset of word in the EEPROM to read
3092 * @data: word read from the EEPROM
3093 *
3094 * Reads a 16 bit word from the EEPROM using the hostif.
3095 **/
ixgbe_read_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 * data)3096 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3097 {
3098 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3099 struct ixgbe_hic_read_shadow_ram buffer;
3100 s32 status;
3101
3102 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3103 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3104 buffer.hdr.req.buf_lenh = 0;
3105 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3106 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3107
3108 /* convert offset from words to bytes */
3109 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3110 /* one word */
3111 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3112 buffer.pad2 = 0;
3113 buffer.data = 0;
3114 buffer.pad3 = 0;
3115
3116 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3117 if (status)
3118 return status;
3119
3120 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3121 IXGBE_HI_COMMAND_TIMEOUT);
3122 if (!status) {
3123 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3124 FW_NVM_DATA_OFFSET);
3125 }
3126
3127 hw->mac.ops.release_swfw_sync(hw, mask);
3128 return status;
3129 }
3130
3131 /**
3132 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3133 * @hw: pointer to hardware structure
3134 * @offset: offset of word in the EEPROM to read
3135 * @words: number of words
3136 * @data: word(s) read from the EEPROM
3137 *
3138 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3139 **/
ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)3140 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3141 u16 offset, u16 words, u16 *data)
3142 {
3143 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3144 struct ixgbe_hic_read_shadow_ram buffer;
3145 u32 current_word = 0;
3146 u16 words_to_read;
3147 s32 status;
3148 u32 i;
3149
3150 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3151
3152 /* Take semaphore for the entire operation. */
3153 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3154 if (status) {
3155 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3156 return status;
3157 }
3158
3159 while (words) {
3160 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3161 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3162 else
3163 words_to_read = words;
3164
3165 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3166 buffer.hdr.req.buf_lenh = 0;
3167 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3168 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3169
3170 /* convert offset from words to bytes */
3171 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3172 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3173 buffer.pad2 = 0;
3174 buffer.data = 0;
3175 buffer.pad3 = 0;
3176
3177 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3178 IXGBE_HI_COMMAND_TIMEOUT);
3179
3180 if (status) {
3181 DEBUGOUT("Host interface command failed\n");
3182 goto out;
3183 }
3184
3185 for (i = 0; i < words_to_read; i++) {
3186 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3187 2 * i;
3188 u32 value = IXGBE_READ_REG(hw, reg);
3189
3190 data[current_word] = (u16)(value & 0xffff);
3191 current_word++;
3192 i++;
3193 if (i < words_to_read) {
3194 value >>= 16;
3195 data[current_word] = (u16)(value & 0xffff);
3196 current_word++;
3197 }
3198 }
3199 words -= words_to_read;
3200 }
3201
3202 out:
3203 hw->mac.ops.release_swfw_sync(hw, mask);
3204 return status;
3205 }
3206
3207 /**
3208 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3209 * @hw: pointer to hardware structure
3210 * @offset: offset of word in the EEPROM to write
3211 * @data: word write to the EEPROM
3212 *
3213 * Write a 16 bit word to the EEPROM using the hostif.
3214 **/
ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw * hw,u16 offset,u16 data)3215 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3216 u16 data)
3217 {
3218 s32 status;
3219 struct ixgbe_hic_write_shadow_ram buffer;
3220
3221 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3222
3223 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3224 buffer.hdr.req.buf_lenh = 0;
3225 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3226 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3227
3228 /* one word */
3229 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3230 buffer.data = data;
3231 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3232
3233 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3234 sizeof(buffer),
3235 IXGBE_HI_COMMAND_TIMEOUT, true);
3236 if (status != IXGBE_SUCCESS) {
3237 DEBUGOUT2("for offset %04x failed with status %d\n",
3238 offset, status);
3239 return status;
3240 }
3241
3242 if (buffer.hdr.rsp.buf_lenh_status != FW_CEM_RESP_STATUS_SUCCESS) {
3243 DEBUGOUT2("for offset %04x host interface return status %02x\n",
3244 offset, buffer.hdr.rsp.buf_lenh_status);
3245 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3246 }
3247
3248 return status;
3249 }
3250
3251 /**
3252 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3253 * @hw: pointer to hardware structure
3254 * @offset: offset of word in the EEPROM to write
3255 * @data: word write to the EEPROM
3256 *
3257 * Write a 16 bit word to the EEPROM using the hostif.
3258 **/
ixgbe_write_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 data)3259 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3260 u16 data)
3261 {
3262 s32 status = IXGBE_SUCCESS;
3263
3264 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3265
3266 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3267 IXGBE_SUCCESS) {
3268 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3269 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3270 } else {
3271 DEBUGOUT("write ee hostif failed to get semaphore");
3272 status = IXGBE_ERR_SWFW_SYNC;
3273 }
3274
3275 return status;
3276 }
3277
3278 /**
3279 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3280 * @hw: pointer to hardware structure
3281 * @offset: offset of word in the EEPROM to write
3282 * @words: number of words
3283 * @data: word(s) write to the EEPROM
3284 *
3285 * Write a 16 bit word(s) to the EEPROM using the hostif.
3286 **/
ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)3287 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3288 u16 offset, u16 words, u16 *data)
3289 {
3290 s32 status = IXGBE_SUCCESS;
3291 u32 i = 0;
3292
3293 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3294
3295 /* Take semaphore for the entire operation. */
3296 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3297 if (status != IXGBE_SUCCESS) {
3298 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3299 goto out;
3300 }
3301
3302 for (i = 0; i < words; i++) {
3303 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3304 data[i]);
3305
3306 if (status != IXGBE_SUCCESS) {
3307 DEBUGOUT("Eeprom buffered write failed\n");
3308 break;
3309 }
3310 }
3311
3312 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3313 out:
3314
3315 return status;
3316 }
3317
3318 /**
3319 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3320 * @hw: pointer to hardware structure
3321 * @ptr: pointer offset in eeprom
3322 * @size: size of section pointed by ptr, if 0 first word will be used as size
3323 * @csum: address of checksum to update
3324 * @buffer: pointer to buffer containing calculated checksum
3325 * @buffer_size: size of buffer
3326 *
3327 * Returns error status for any failure
3328 */
ixgbe_checksum_ptr_x550(struct ixgbe_hw * hw,u16 ptr,u16 size,u16 * csum,u16 * buffer,u32 buffer_size)3329 STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3330 u16 size, u16 *csum, u16 *buffer,
3331 u32 buffer_size)
3332 {
3333 u16 buf[256];
3334 s32 status;
3335 u16 length, bufsz, i, start;
3336 u16 *local_buffer;
3337
3338 bufsz = sizeof(buf) / sizeof(buf[0]);
3339
3340 /* Read a chunk at the pointer location */
3341 if (!buffer) {
3342 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3343 if (status) {
3344 DEBUGOUT("Failed to read EEPROM image\n");
3345 return status;
3346 }
3347 local_buffer = buf;
3348 } else {
3349 if (buffer_size < ptr)
3350 return IXGBE_ERR_PARAM;
3351 local_buffer = &buffer[ptr];
3352 }
3353
3354 if (size) {
3355 start = 0;
3356 length = size;
3357 } else {
3358 start = 1;
3359 length = local_buffer[0];
3360
3361 /* Skip pointer section if length is invalid. */
3362 if (length == 0xFFFF || length == 0 ||
3363 (ptr + length) >= hw->eeprom.word_size)
3364 return IXGBE_SUCCESS;
3365 }
3366
3367 if (buffer && ((u32)start + (u32)length > buffer_size))
3368 return IXGBE_ERR_PARAM;
3369
3370 for (i = start; length; i++, length--) {
3371 if (i == bufsz && !buffer) {
3372 ptr += bufsz;
3373 i = 0;
3374 if (length < bufsz)
3375 bufsz = length;
3376
3377 /* Read a chunk at the pointer location */
3378 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3379 bufsz, buf);
3380 if (status) {
3381 DEBUGOUT("Failed to read EEPROM image\n");
3382 return status;
3383 }
3384 }
3385 *csum += local_buffer[i];
3386 }
3387 return IXGBE_SUCCESS;
3388 }
3389
3390 /**
3391 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3392 * @hw: pointer to hardware structure
3393 * @buffer: pointer to buffer containing calculated checksum
3394 * @buffer_size: size of buffer
3395 *
3396 * Returns a negative error code on error, or the 16-bit checksum
3397 **/
ixgbe_calc_checksum_X550(struct ixgbe_hw * hw,u16 * buffer,u32 buffer_size)3398 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3399 {
3400 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3401 u16 *local_buffer;
3402 s32 status;
3403 u16 checksum = 0;
3404 u16 pointer, i, size;
3405
3406 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3407
3408 hw->eeprom.ops.init_params(hw);
3409
3410 if (!buffer) {
3411 /* Read pointer area */
3412 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3413 IXGBE_EEPROM_LAST_WORD + 1,
3414 eeprom_ptrs);
3415 if (status) {
3416 DEBUGOUT("Failed to read EEPROM image\n");
3417 return status;
3418 }
3419 local_buffer = eeprom_ptrs;
3420 } else {
3421 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3422 return IXGBE_ERR_PARAM;
3423 local_buffer = buffer;
3424 }
3425
3426 /*
3427 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3428 * checksum word itself
3429 */
3430 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3431 if (i != IXGBE_EEPROM_CHECKSUM)
3432 checksum += local_buffer[i];
3433
3434 /*
3435 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3436 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3437 */
3438 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3439 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3440 continue;
3441
3442 pointer = local_buffer[i];
3443
3444 /* Skip pointer section if the pointer is invalid. */
3445 if (pointer == 0xFFFF || pointer == 0 ||
3446 pointer >= hw->eeprom.word_size)
3447 continue;
3448
3449 switch (i) {
3450 case IXGBE_PCIE_GENERAL_PTR:
3451 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3452 break;
3453 case IXGBE_PCIE_CONFIG0_PTR:
3454 case IXGBE_PCIE_CONFIG1_PTR:
3455 size = IXGBE_PCIE_CONFIG_SIZE;
3456 break;
3457 default:
3458 size = 0;
3459 break;
3460 }
3461
3462 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3463 buffer, buffer_size);
3464 if (status)
3465 return status;
3466 }
3467
3468 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3469
3470 return (s32)checksum;
3471 }
3472
3473 /**
3474 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3475 * @hw: pointer to hardware structure
3476 *
3477 * Returns a negative error code on error, or the 16-bit checksum
3478 **/
ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw * hw)3479 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3480 {
3481 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3482 }
3483
3484 /**
3485 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3486 * @hw: pointer to hardware structure
3487 * @checksum_val: calculated checksum
3488 *
3489 * Performs checksum calculation and validates the EEPROM checksum. If the
3490 * caller does not need checksum_val, the value can be NULL.
3491 **/
ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw * hw,u16 * checksum_val)3492 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3493 {
3494 s32 status;
3495 u16 checksum;
3496 u16 read_checksum = 0;
3497
3498 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3499
3500 /* Read the first word from the EEPROM. If this times out or fails, do
3501 * not continue or we could be in for a very long wait while every
3502 * EEPROM read fails
3503 */
3504 status = hw->eeprom.ops.read(hw, 0, &checksum);
3505 if (status) {
3506 DEBUGOUT("EEPROM read failed\n");
3507 return status;
3508 }
3509
3510 status = hw->eeprom.ops.calc_checksum(hw);
3511 if (status < 0)
3512 return status;
3513
3514 checksum = (u16)(status & 0xffff);
3515
3516 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3517 &read_checksum);
3518 if (status)
3519 return status;
3520
3521 /* Verify read checksum from EEPROM is the same as
3522 * calculated checksum
3523 */
3524 if (read_checksum != checksum) {
3525 status = IXGBE_ERR_EEPROM_CHECKSUM;
3526 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3527 "Invalid EEPROM checksum");
3528 }
3529
3530 /* If the user cares, return the calculated checksum */
3531 if (checksum_val)
3532 *checksum_val = checksum;
3533
3534 return status;
3535 }
3536
3537 /**
3538 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3539 * @hw: pointer to hardware structure
3540 *
3541 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3542 * checksum and updates the EEPROM and instructs the hardware to update
3543 * the flash.
3544 **/
ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw * hw)3545 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3546 {
3547 s32 status;
3548 u16 checksum = 0;
3549
3550 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3551
3552 /* Read the first word from the EEPROM. If this times out or fails, do
3553 * not continue or we could be in for a very long wait while every
3554 * EEPROM read fails
3555 */
3556 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3557 if (status) {
3558 DEBUGOUT("EEPROM read failed\n");
3559 return status;
3560 }
3561
3562 status = ixgbe_calc_eeprom_checksum_X550(hw);
3563 if (status < 0)
3564 return status;
3565
3566 checksum = (u16)(status & 0xffff);
3567
3568 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3569 checksum);
3570 if (status)
3571 return status;
3572
3573 status = ixgbe_update_flash_X550(hw);
3574
3575 return status;
3576 }
3577
3578 /**
3579 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3580 * @hw: pointer to hardware structure
3581 *
3582 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3583 **/
ixgbe_update_flash_X550(struct ixgbe_hw * hw)3584 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3585 {
3586 s32 status = IXGBE_SUCCESS;
3587 union ixgbe_hic_hdr2 buffer;
3588
3589 DEBUGFUNC("ixgbe_update_flash_X550");
3590
3591 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3592 buffer.req.buf_lenh = 0;
3593 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3594 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3595
3596 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3597 sizeof(buffer),
3598 IXGBE_HI_COMMAND_TIMEOUT, false);
3599
3600 return status;
3601 }
3602
3603 /**
3604 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3605 * @hw: pointer to hardware structure
3606 *
3607 * Determines physical layer capabilities of the current configuration.
3608 **/
ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw * hw)3609 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3610 {
3611 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3612 u16 ext_ability = 0;
3613
3614 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3615
3616 hw->phy.ops.identify(hw);
3617
3618 switch (hw->phy.type) {
3619 case ixgbe_phy_x550em_kr:
3620 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3621 if (hw->phy.nw_mng_if_sel &
3622 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3623 physical_layer =
3624 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3625 break;
3626 } else if (hw->device_id ==
3627 IXGBE_DEV_ID_X550EM_A_KR_L) {
3628 physical_layer =
3629 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3630 break;
3631 }
3632 }
3633 /* fall through */
3634 case ixgbe_phy_x550em_xfi:
3635 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3636 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3637 break;
3638 case ixgbe_phy_x550em_kx4:
3639 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3640 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3641 break;
3642 case ixgbe_phy_x550em_ext_t:
3643 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3644 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3645 &ext_ability);
3646 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3647 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3648 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3649 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3650 break;
3651 case ixgbe_phy_fw:
3652 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3653 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3654 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3655 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3656 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3657 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3658 break;
3659 case ixgbe_phy_sgmii:
3660 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3661 break;
3662 case ixgbe_phy_ext_1g_t:
3663 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3664 break;
3665 default:
3666 break;
3667 }
3668
3669 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3670 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3671
3672 return physical_layer;
3673 }
3674
3675 /**
3676 * ixgbe_get_bus_info_x550em - Set PCI bus info
3677 * @hw: pointer to hardware structure
3678 *
3679 * Sets bus link width and speed to unknown because X550em is
3680 * not a PCI device.
3681 **/
ixgbe_get_bus_info_X550em(struct ixgbe_hw * hw)3682 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3683 {
3684
3685 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3686
3687 hw->bus.width = ixgbe_bus_width_unknown;
3688 hw->bus.speed = ixgbe_bus_speed_unknown;
3689
3690 hw->mac.ops.set_lan_id(hw);
3691
3692 return IXGBE_SUCCESS;
3693 }
3694
3695 /**
3696 * ixgbe_disable_rx_x550 - Disable RX unit
3697 * @hw: pointer to hardware structure
3698 *
3699 * Enables the Rx DMA unit for x550
3700 **/
ixgbe_disable_rx_x550(struct ixgbe_hw * hw)3701 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3702 {
3703 u32 rxctrl, pfdtxgswc;
3704 s32 status;
3705 struct ixgbe_hic_disable_rxen fw_cmd;
3706
3707 DEBUGFUNC("ixgbe_enable_rx_dma_x550");
3708
3709 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3710 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3711 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3712 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3713 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3714 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3715 hw->mac.set_lben = true;
3716 } else {
3717 hw->mac.set_lben = false;
3718 }
3719
3720 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3721 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3722 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3723 fw_cmd.port_number = (u8)hw->bus.lan_id;
3724
3725 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3726 sizeof(struct ixgbe_hic_disable_rxen),
3727 IXGBE_HI_COMMAND_TIMEOUT, true);
3728
3729 /* If we fail - disable RX using register write */
3730 if (status) {
3731 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3732 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3733 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3734 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3735 }
3736 }
3737 }
3738 }
3739
3740 /**
3741 * ixgbe_enter_lplu_x550em - Transition to low power states
3742 * @hw: pointer to hardware structure
3743 *
3744 * Configures Low Power Link Up on transition to low power states
3745 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3746 * X557 PHY immediately prior to entering LPLU.
3747 **/
ixgbe_enter_lplu_t_x550em(struct ixgbe_hw * hw)3748 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3749 {
3750 u16 an_10g_cntl_reg, autoneg_reg, speed;
3751 s32 status;
3752 ixgbe_link_speed lcd_speed;
3753 u32 save_autoneg;
3754 bool link_up;
3755
3756 /* SW LPLU not required on later HW revisions. */
3757 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3758 (IXGBE_FUSES0_REV_MASK &
3759 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3760 return IXGBE_SUCCESS;
3761
3762 /* If blocked by MNG FW, then don't restart AN */
3763 if (ixgbe_check_reset_blocked(hw))
3764 return IXGBE_SUCCESS;
3765
3766 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3767 if (status != IXGBE_SUCCESS)
3768 return status;
3769
3770 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3771
3772 if (status != IXGBE_SUCCESS)
3773 return status;
3774
3775 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3776 * disabled, then force link down by entering low power mode.
3777 */
3778 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3779 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3780 return ixgbe_set_copper_phy_power(hw, FALSE);
3781
3782 /* Determine LCD */
3783 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3784
3785 if (status != IXGBE_SUCCESS)
3786 return status;
3787
3788 /* If no valid LCD link speed, then force link down and exit. */
3789 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3790 return ixgbe_set_copper_phy_power(hw, FALSE);
3791
3792 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3793 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3794 &speed);
3795
3796 if (status != IXGBE_SUCCESS)
3797 return status;
3798
3799 /* If no link now, speed is invalid so take link down */
3800 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3801 if (status != IXGBE_SUCCESS)
3802 return ixgbe_set_copper_phy_power(hw, false);
3803
3804 /* clear everything but the speed bits */
3805 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
3806
3807 /* If current speed is already LCD, then exit. */
3808 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
3809 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
3810 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
3811 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
3812 return status;
3813
3814 /* Clear AN completed indication */
3815 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
3816 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3817 &autoneg_reg);
3818
3819 if (status != IXGBE_SUCCESS)
3820 return status;
3821
3822 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
3823 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3824 &an_10g_cntl_reg);
3825
3826 if (status != IXGBE_SUCCESS)
3827 return status;
3828
3829 status = hw->phy.ops.read_reg(hw,
3830 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
3831 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3832 &autoneg_reg);
3833
3834 if (status != IXGBE_SUCCESS)
3835 return status;
3836
3837 save_autoneg = hw->phy.autoneg_advertised;
3838
3839 /* Setup link at least common link speed */
3840 status = hw->mac.ops.setup_link(hw, lcd_speed, false);
3841
3842 /* restore autoneg from before setting lplu speed */
3843 hw->phy.autoneg_advertised = save_autoneg;
3844
3845 return status;
3846 }
3847
3848 /**
3849 * ixgbe_get_lcd_x550em - Determine lowest common denominator
3850 * @hw: pointer to hardware structure
3851 * @lcd_speed: pointer to lowest common link speed
3852 *
3853 * Determine lowest common link speed with link partner.
3854 **/
ixgbe_get_lcd_t_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * lcd_speed)3855 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
3856 {
3857 u16 an_lp_status;
3858 s32 status;
3859 u16 word = hw->eeprom.ctrl_word_3;
3860
3861 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
3862
3863 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
3864 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3865 &an_lp_status);
3866
3867 if (status != IXGBE_SUCCESS)
3868 return status;
3869
3870 /* If link partner advertised 1G, return 1G */
3871 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
3872 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
3873 return status;
3874 }
3875
3876 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
3877 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
3878 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
3879 return status;
3880
3881 /* Link partner not capable of lower speeds, return 10G */
3882 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
3883 return status;
3884 }
3885
3886 /**
3887 * ixgbe_setup_fc_X550em - Set up flow control
3888 * @hw: pointer to hardware structure
3889 *
3890 * Called at init time to set up flow control.
3891 **/
ixgbe_setup_fc_X550em(struct ixgbe_hw * hw)3892 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
3893 {
3894 s32 ret_val = IXGBE_SUCCESS;
3895 u32 pause, asm_dir, reg_val;
3896
3897 DEBUGFUNC("ixgbe_setup_fc_X550em");
3898
3899 /* Validate the requested mode */
3900 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
3901 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3902 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
3903 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3904 goto out;
3905 }
3906
3907 /* 10gig parts do not have a word in the EEPROM to determine the
3908 * default flow control setting, so we explicitly set it to full.
3909 */
3910 if (hw->fc.requested_mode == ixgbe_fc_default)
3911 hw->fc.requested_mode = ixgbe_fc_full;
3912
3913 /* Determine PAUSE and ASM_DIR bits. */
3914 switch (hw->fc.requested_mode) {
3915 case ixgbe_fc_none:
3916 pause = 0;
3917 asm_dir = 0;
3918 break;
3919 case ixgbe_fc_tx_pause:
3920 pause = 0;
3921 asm_dir = 1;
3922 break;
3923 case ixgbe_fc_rx_pause:
3924 /* Rx Flow control is enabled and Tx Flow control is
3925 * disabled by software override. Since there really
3926 * isn't a way to advertise that we are capable of RX
3927 * Pause ONLY, we will advertise that we support both
3928 * symmetric and asymmetric Rx PAUSE, as such we fall
3929 * through to the fc_full statement. Later, we will
3930 * disable the adapter's ability to send PAUSE frames.
3931 */
3932 case ixgbe_fc_full:
3933 pause = 1;
3934 asm_dir = 1;
3935 break;
3936 default:
3937 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
3938 "Flow control param set incorrectly\n");
3939 ret_val = IXGBE_ERR_CONFIG;
3940 goto out;
3941 }
3942
3943 switch (hw->device_id) {
3944 case IXGBE_DEV_ID_X550EM_X_KR:
3945 case IXGBE_DEV_ID_X550EM_A_KR:
3946 case IXGBE_DEV_ID_X550EM_A_KR_L:
3947 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
3948 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3949 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3950 if (ret_val != IXGBE_SUCCESS)
3951 goto out;
3952 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
3953 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
3954 if (pause)
3955 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
3956 if (asm_dir)
3957 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
3958 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
3959 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3960 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3961
3962 /* This device does not fully support AN. */
3963 hw->fc.disable_fc_autoneg = true;
3964 break;
3965 case IXGBE_DEV_ID_X550EM_X_XFI:
3966 hw->fc.disable_fc_autoneg = true;
3967 break;
3968 default:
3969 break;
3970 }
3971
3972 out:
3973 return ret_val;
3974 }
3975
3976 /**
3977 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
3978 * @hw: pointer to hardware structure
3979 *
3980 * Enable flow control according to IEEE clause 37.
3981 **/
ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw * hw)3982 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
3983 {
3984 u32 link_s1, lp_an_page_low, an_cntl_1;
3985 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
3986 ixgbe_link_speed speed;
3987 bool link_up;
3988
3989 /* AN should have completed when the cable was plugged in.
3990 * Look for reasons to bail out. Bail out if:
3991 * - FC autoneg is disabled, or if
3992 * - link is not up.
3993 */
3994 if (hw->fc.disable_fc_autoneg) {
3995 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3996 "Flow control autoneg is disabled");
3997 goto out;
3998 }
3999
4000 hw->mac.ops.check_link(hw, &speed, &link_up, false);
4001 if (!link_up) {
4002 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4003 goto out;
4004 }
4005
4006 /* Check at auto-negotiation has completed */
4007 status = hw->mac.ops.read_iosf_sb_reg(hw,
4008 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4009 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4010
4011 if (status != IXGBE_SUCCESS ||
4012 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4013 DEBUGOUT("Auto-Negotiation did not complete\n");
4014 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4015 goto out;
4016 }
4017
4018 /* Read the 10g AN autoc and LP ability registers and resolve
4019 * local flow control settings accordingly
4020 */
4021 status = hw->mac.ops.read_iosf_sb_reg(hw,
4022 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4023 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4024
4025 if (status != IXGBE_SUCCESS) {
4026 DEBUGOUT("Auto-Negotiation did not complete\n");
4027 goto out;
4028 }
4029
4030 status = hw->mac.ops.read_iosf_sb_reg(hw,
4031 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4032 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4033
4034 if (status != IXGBE_SUCCESS) {
4035 DEBUGOUT("Auto-Negotiation did not complete\n");
4036 goto out;
4037 }
4038
4039 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4040 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4041 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4042 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4043 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4044
4045 out:
4046 if (status == IXGBE_SUCCESS) {
4047 hw->fc.fc_was_autonegged = true;
4048 } else {
4049 hw->fc.fc_was_autonegged = false;
4050 hw->fc.current_mode = hw->fc.requested_mode;
4051 }
4052 }
4053
4054 /**
4055 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4056 * @hw: pointer to hardware structure
4057 *
4058 **/
ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw * hw)4059 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4060 {
4061 hw->fc.fc_was_autonegged = false;
4062 hw->fc.current_mode = hw->fc.requested_mode;
4063 }
4064
4065 /**
4066 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4067 * @hw: pointer to hardware structure
4068 *
4069 * Enable flow control according to IEEE clause 37.
4070 **/
ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw * hw)4071 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4072 {
4073 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4074 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4075 ixgbe_link_speed speed;
4076 bool link_up;
4077
4078 /* AN should have completed when the cable was plugged in.
4079 * Look for reasons to bail out. Bail out if:
4080 * - FC autoneg is disabled, or if
4081 * - link is not up.
4082 */
4083 if (hw->fc.disable_fc_autoneg) {
4084 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4085 "Flow control autoneg is disabled");
4086 goto out;
4087 }
4088
4089 hw->mac.ops.check_link(hw, &speed, &link_up, false);
4090 if (!link_up) {
4091 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4092 goto out;
4093 }
4094
4095 /* Check if auto-negotiation has completed */
4096 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4097 if (status != IXGBE_SUCCESS ||
4098 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4099 DEBUGOUT("Auto-Negotiation did not complete\n");
4100 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4101 goto out;
4102 }
4103
4104 /* Negotiate the flow control */
4105 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4106 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4107 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4108 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4109 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4110
4111 out:
4112 if (status == IXGBE_SUCCESS) {
4113 hw->fc.fc_was_autonegged = true;
4114 } else {
4115 hw->fc.fc_was_autonegged = false;
4116 hw->fc.current_mode = hw->fc.requested_mode;
4117 }
4118 }
4119
4120 /**
4121 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4122 * @hw: pointer to hardware structure
4123 *
4124 * Called at init time to set up flow control.
4125 **/
ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw * hw)4126 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4127 {
4128 s32 status = IXGBE_SUCCESS;
4129 u32 an_cntl = 0;
4130
4131 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4132
4133 /* Validate the requested mode */
4134 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4135 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4136 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4137 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4138 }
4139
4140 if (hw->fc.requested_mode == ixgbe_fc_default)
4141 hw->fc.requested_mode = ixgbe_fc_full;
4142
4143 /* Set up the 1G and 10G flow control advertisement registers so the
4144 * HW will be able to do FC autoneg once the cable is plugged in. If
4145 * we link at 10G, the 1G advertisement is harmless and vice versa.
4146 */
4147 status = hw->mac.ops.read_iosf_sb_reg(hw,
4148 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4149 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4150
4151 if (status != IXGBE_SUCCESS) {
4152 DEBUGOUT("Auto-Negotiation did not complete\n");
4153 return status;
4154 }
4155
4156 /* The possible values of fc.requested_mode are:
4157 * 0: Flow control is completely disabled
4158 * 1: Rx flow control is enabled (we can receive pause frames,
4159 * but not send pause frames).
4160 * 2: Tx flow control is enabled (we can send pause frames but
4161 * we do not support receiving pause frames).
4162 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4163 * other: Invalid.
4164 */
4165 switch (hw->fc.requested_mode) {
4166 case ixgbe_fc_none:
4167 /* Flow control completely disabled by software override. */
4168 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4169 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4170 break;
4171 case ixgbe_fc_tx_pause:
4172 /* Tx Flow control is enabled, and Rx Flow control is
4173 * disabled by software override.
4174 */
4175 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4176 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4177 break;
4178 case ixgbe_fc_rx_pause:
4179 /* Rx Flow control is enabled and Tx Flow control is
4180 * disabled by software override. Since there really
4181 * isn't a way to advertise that we are capable of RX
4182 * Pause ONLY, we will advertise that we support both
4183 * symmetric and asymmetric Rx PAUSE, as such we fall
4184 * through to the fc_full statement. Later, we will
4185 * disable the adapter's ability to send PAUSE frames.
4186 */
4187 case ixgbe_fc_full:
4188 /* Flow control (both Rx and Tx) is enabled by SW override. */
4189 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4190 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4191 break;
4192 default:
4193 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4194 "Flow control param set incorrectly\n");
4195 return IXGBE_ERR_CONFIG;
4196 }
4197
4198 status = hw->mac.ops.write_iosf_sb_reg(hw,
4199 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4200 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4201
4202 /* Restart auto-negotiation. */
4203 status = ixgbe_restart_an_internal_phy_x550em(hw);
4204
4205 return status;
4206 }
4207
4208 /**
4209 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4210 * @hw: pointer to hardware structure
4211 * @state: set mux if 1, clear if 0
4212 */
ixgbe_set_mux(struct ixgbe_hw * hw,u8 state)4213 STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4214 {
4215 u32 esdp;
4216
4217 if (!hw->bus.lan_id)
4218 return;
4219 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4220 if (state)
4221 esdp |= IXGBE_ESDP_SDP1;
4222 else
4223 esdp &= ~IXGBE_ESDP_SDP1;
4224 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4225 IXGBE_WRITE_FLUSH(hw);
4226 }
4227
4228 /**
4229 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4230 * @hw: pointer to hardware structure
4231 * @mask: Mask to specify which semaphore to acquire
4232 *
4233 * Acquires the SWFW semaphore and sets the I2C MUX
4234 **/
ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)4235 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4236 {
4237 s32 status;
4238
4239 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4240
4241 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4242 if (status)
4243 return status;
4244
4245 if (mask & IXGBE_GSSR_I2C_MASK)
4246 ixgbe_set_mux(hw, 1);
4247
4248 return IXGBE_SUCCESS;
4249 }
4250
4251 /**
4252 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4253 * @hw: pointer to hardware structure
4254 * @mask: Mask to specify which semaphore to release
4255 *
4256 * Releases the SWFW semaphore and sets the I2C MUX
4257 **/
ixgbe_release_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)4258 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4259 {
4260 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4261
4262 if (mask & IXGBE_GSSR_I2C_MASK)
4263 ixgbe_set_mux(hw, 0);
4264
4265 ixgbe_release_swfw_sync_X540(hw, mask);
4266 }
4267
4268 /**
4269 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4270 * @hw: pointer to hardware structure
4271 * @mask: Mask to specify which semaphore to acquire
4272 *
4273 * Acquires the SWFW semaphore and get the shared phy token as needed
4274 */
ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw * hw,u32 mask)4275 STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4276 {
4277 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4278 int retries = FW_PHY_TOKEN_RETRIES;
4279 s32 status = IXGBE_SUCCESS;
4280
4281 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4282
4283 while (--retries) {
4284 status = IXGBE_SUCCESS;
4285 if (hmask)
4286 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4287 if (status) {
4288 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4289 status);
4290 return status;
4291 }
4292 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4293 return IXGBE_SUCCESS;
4294
4295 status = ixgbe_get_phy_token(hw);
4296 if (status == IXGBE_ERR_TOKEN_RETRY)
4297 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4298 status);
4299
4300 if (status == IXGBE_SUCCESS)
4301 return IXGBE_SUCCESS;
4302
4303 if (hmask)
4304 ixgbe_release_swfw_sync_X540(hw, hmask);
4305
4306 if (status != IXGBE_ERR_TOKEN_RETRY) {
4307 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4308 status);
4309 return status;
4310 }
4311 }
4312
4313 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4314 hw->phy.id);
4315 return status;
4316 }
4317
4318 /**
4319 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4320 * @hw: pointer to hardware structure
4321 * @mask: Mask to specify which semaphore to release
4322 *
4323 * Releases the SWFW semaphore and puts the shared phy token as needed
4324 */
ixgbe_release_swfw_sync_X550a(struct ixgbe_hw * hw,u32 mask)4325 STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4326 {
4327 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4328
4329 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4330
4331 if (mask & IXGBE_GSSR_TOKEN_SM)
4332 ixgbe_put_phy_token(hw);
4333
4334 if (hmask)
4335 ixgbe_release_swfw_sync_X540(hw, hmask);
4336 }
4337
4338 /**
4339 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4340 * @hw: pointer to hardware structure
4341 * @reg_addr: 32 bit address of PHY register to read
4342 * @device_type: 5 bit device type
4343 * @phy_data: Pointer to read data from PHY register
4344 *
4345 * Reads a value from a specified PHY register using the SWFW lock and PHY
4346 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4347 * instances.
4348 **/
ixgbe_read_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)4349 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4350 u32 device_type, u16 *phy_data)
4351 {
4352 s32 status;
4353 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4354
4355 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4356
4357 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4358 return IXGBE_ERR_SWFW_SYNC;
4359
4360 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4361
4362 hw->mac.ops.release_swfw_sync(hw, mask);
4363
4364 return status;
4365 }
4366
4367 /**
4368 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4369 * @hw: pointer to hardware structure
4370 * @reg_addr: 32 bit PHY register to write
4371 * @device_type: 5 bit device type
4372 * @phy_data: Data to write to the PHY register
4373 *
4374 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4375 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4376 **/
ixgbe_write_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)4377 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4378 u32 device_type, u16 phy_data)
4379 {
4380 s32 status;
4381 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4382
4383 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4384
4385 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4386 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4387 phy_data);
4388 hw->mac.ops.release_swfw_sync(hw, mask);
4389 } else {
4390 status = IXGBE_ERR_SWFW_SYNC;
4391 }
4392
4393 return status;
4394 }
4395
4396 /**
4397 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4398 * @hw: pointer to hardware structure
4399 *
4400 * Handle external Base T PHY interrupt. If high temperature
4401 * failure alarm then return error, else if link status change
4402 * then setup internal/external PHY link
4403 *
4404 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4405 * failure alarm, else return PHY access status.
4406 */
ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw * hw)4407 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4408 {
4409 bool lsc;
4410 u32 status;
4411
4412 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4413
4414 if (status != IXGBE_SUCCESS)
4415 return status;
4416
4417 if (lsc)
4418 return ixgbe_setup_internal_phy(hw);
4419
4420 return IXGBE_SUCCESS;
4421 }
4422
4423 /**
4424 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4425 * @hw: pointer to hardware structure
4426 * @speed: new link speed
4427 * @autoneg_wait_to_complete: true when waiting for completion is needed
4428 *
4429 * Setup internal/external PHY link speed based on link speed, then set
4430 * external PHY auto advertised link speed.
4431 *
4432 * Returns error status for any failure
4433 **/
ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)4434 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4435 ixgbe_link_speed speed,
4436 bool autoneg_wait_to_complete)
4437 {
4438 s32 status;
4439 ixgbe_link_speed force_speed;
4440 u32 i;
4441 bool link_up = false;
4442
4443 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4444
4445 /* Setup internal/external PHY link speed to iXFI (10G), unless
4446 * only 1G is auto advertised then setup KX link.
4447 */
4448 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4449 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4450 else
4451 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4452
4453 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4454 */
4455 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4456 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4457 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4458
4459 if (status != IXGBE_SUCCESS)
4460 return status;
4461
4462 /* Wait for the controller to acquire link */
4463 for (i = 0; i < 10; i++) {
4464 msec_delay(100);
4465
4466 status = ixgbe_check_link(hw, &force_speed, &link_up,
4467 false);
4468 if (status != IXGBE_SUCCESS)
4469 return status;
4470
4471 if (link_up)
4472 break;
4473 }
4474 }
4475
4476 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4477 }
4478
4479 /**
4480 * ixgbe_check_link_t_X550em - Determine link and speed status
4481 * @hw: pointer to hardware structure
4482 * @speed: pointer to link speed
4483 * @link_up: true when link is up
4484 * @link_up_wait_to_complete: bool used to wait for link up or not
4485 *
4486 * Check that both the MAC and X557 external PHY have link.
4487 **/
ixgbe_check_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4488 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4489 bool *link_up, bool link_up_wait_to_complete)
4490 {
4491 u32 status;
4492 u16 i, autoneg_status = 0;
4493
4494 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4495 return IXGBE_ERR_CONFIG;
4496
4497 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4498 link_up_wait_to_complete);
4499
4500 /* If check link fails or MAC link is not up, then return */
4501 if (status != IXGBE_SUCCESS || !(*link_up))
4502 return status;
4503
4504 /* MAC link is up, so check external PHY link.
4505 * X557 PHY. Link status is latching low, and can only be used to detect
4506 * link drop, and not the current status of the link without performing
4507 * back-to-back reads.
4508 */
4509 for (i = 0; i < 2; i++) {
4510 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4511 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4512 &autoneg_status);
4513
4514 if (status != IXGBE_SUCCESS)
4515 return status;
4516 }
4517
4518 /* If external PHY link is not up, then indicate link not up */
4519 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4520 *link_up = false;
4521
4522 return IXGBE_SUCCESS;
4523 }
4524
4525 /**
4526 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4527 * @hw: pointer to hardware structure
4528 **/
ixgbe_reset_phy_t_X550em(struct ixgbe_hw * hw)4529 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4530 {
4531 s32 status;
4532
4533 status = ixgbe_reset_phy_generic(hw);
4534
4535 if (status != IXGBE_SUCCESS)
4536 return status;
4537
4538 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4539 return ixgbe_enable_lasi_ext_t_x550em(hw);
4540 }
4541
4542 /**
4543 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4544 * @hw: pointer to hardware structure
4545 * @led_idx: led number to turn on
4546 **/
ixgbe_led_on_t_X550em(struct ixgbe_hw * hw,u32 led_idx)4547 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4548 {
4549 u16 phy_data;
4550
4551 DEBUGFUNC("ixgbe_led_on_t_X550em");
4552
4553 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4554 return IXGBE_ERR_PARAM;
4555
4556 /* To turn on the LED, set mode to ON. */
4557 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4558 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4559 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4560 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4561 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4562
4563 /* Some designs have the LEDs wired to the MAC */
4564 return ixgbe_led_on_generic(hw, led_idx);
4565 }
4566
4567 /**
4568 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4569 * @hw: pointer to hardware structure
4570 * @led_idx: led number to turn off
4571 **/
ixgbe_led_off_t_X550em(struct ixgbe_hw * hw,u32 led_idx)4572 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4573 {
4574 u16 phy_data;
4575
4576 DEBUGFUNC("ixgbe_led_off_t_X550em");
4577
4578 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4579 return IXGBE_ERR_PARAM;
4580
4581 /* To turn on the LED, set mode to ON. */
4582 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4583 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4584 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4585 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4586 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4587
4588 /* Some designs have the LEDs wired to the MAC */
4589 return ixgbe_led_off_generic(hw, led_idx);
4590 }
4591
4592 /**
4593 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4594 * @hw: pointer to the HW structure
4595 * @maj: driver version major number
4596 * @min: driver version minor number
4597 * @build: driver version build number
4598 * @sub: driver version sub build number
4599 * @len: length of driver_ver string
4600 * @driver_ver: driver string
4601 *
4602 * Sends driver version number to firmware through the manageability
4603 * block. On success return IXGBE_SUCCESS
4604 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4605 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4606 **/
ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw * hw,u8 maj,u8 min,u8 build,u8 sub,u16 len,const char * driver_ver)4607 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4608 u8 build, u8 sub, u16 len, const char *driver_ver)
4609 {
4610 struct ixgbe_hic_drv_info2 fw_cmd;
4611 s32 ret_val = IXGBE_SUCCESS;
4612 int i;
4613
4614 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4615
4616 if ((len == 0) || (driver_ver == NULL) ||
4617 (len > sizeof(fw_cmd.driver_string)))
4618 return IXGBE_ERR_INVALID_ARGUMENT;
4619
4620 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4621 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4622 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4623 fw_cmd.port_num = (u8)hw->bus.func;
4624 fw_cmd.ver_maj = maj;
4625 fw_cmd.ver_min = min;
4626 fw_cmd.ver_build = build;
4627 fw_cmd.ver_sub = sub;
4628 fw_cmd.hdr.checksum = 0;
4629 memcpy(fw_cmd.driver_string, driver_ver, len);
4630 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4631 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4632
4633 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4634 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4635 sizeof(fw_cmd),
4636 IXGBE_HI_COMMAND_TIMEOUT,
4637 true);
4638 if (ret_val != IXGBE_SUCCESS)
4639 continue;
4640
4641 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4642 FW_CEM_RESP_STATUS_SUCCESS)
4643 ret_val = IXGBE_SUCCESS;
4644 else
4645 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4646
4647 break;
4648 }
4649
4650 return ret_val;
4651 }
4652
4653 /**
4654 * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
4655 * @hw: pointer t hardware structure
4656 *
4657 * Returns true if in FW NVM recovery mode.
4658 **/
ixgbe_fw_recovery_mode_X550(struct ixgbe_hw * hw)4659 bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
4660 {
4661 u32 fwsm;
4662
4663 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4664
4665 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
4666 }
4667