1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4 
5 #include "ixgbe_type.h"
6 #include "ixgbe_82599.h"
7 #include "ixgbe_api.h"
8 #include "ixgbe_common.h"
9 #include "ixgbe_phy.h"
10 
11 #define IXGBE_82599_MAX_TX_QUEUES 128
12 #define IXGBE_82599_MAX_RX_QUEUES 128
13 #define IXGBE_82599_RAR_ENTRIES   128
14 #define IXGBE_82599_MC_TBL_SIZE   128
15 #define IXGBE_82599_VFT_TBL_SIZE  128
16 #define IXGBE_82599_RX_PB_SIZE	  512
17 
18 STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
19 					 ixgbe_link_speed speed,
20 					 bool autoneg_wait_to_complete);
21 STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
22 STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
23 				   u16 offset, u16 *data);
24 STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
25 					  u16 words, u16 *data);
26 STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
27 					u8 dev_addr, u8 *data);
28 STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
29 					u8 dev_addr, u8 data);
30 
ixgbe_init_mac_link_ops_82599(struct ixgbe_hw * hw)31 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
32 {
33 	struct ixgbe_mac_info *mac = &hw->mac;
34 
35 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
36 
37 	/*
38 	 * enable the laser control functions for SFP+ fiber
39 	 * and MNG not enabled
40 	 */
41 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
42 	    !ixgbe_mng_enabled(hw)) {
43 		mac->ops.disable_tx_laser =
44 				       ixgbe_disable_tx_laser_multispeed_fiber;
45 		mac->ops.enable_tx_laser =
46 					ixgbe_enable_tx_laser_multispeed_fiber;
47 		mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
48 
49 	} else {
50 		mac->ops.disable_tx_laser = NULL;
51 		mac->ops.enable_tx_laser = NULL;
52 		mac->ops.flap_tx_laser = NULL;
53 	}
54 
55 	if (hw->phy.multispeed_fiber) {
56 		/* Set up dual speed SFP+ support */
57 		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
58 		mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
59 		mac->ops.set_rate_select_speed =
60 					       ixgbe_set_hard_rate_select_speed;
61 	} else {
62 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
63 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
64 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
65 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
66 			mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
67 		} else {
68 			mac->ops.setup_link = ixgbe_setup_mac_link_82599;
69 		}
70 	}
71 }
72 
73 /**
74  * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
75  * @hw: pointer to hardware structure
76  *
77  * Initialize any function pointers that were not able to be
78  * set during init_shared_code because the PHY/SFP type was
79  * not known.  Perform the SFP init if necessary.
80  *
81  **/
ixgbe_init_phy_ops_82599(struct ixgbe_hw * hw)82 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
83 {
84 	struct ixgbe_mac_info *mac = &hw->mac;
85 	struct ixgbe_phy_info *phy = &hw->phy;
86 	s32 ret_val = IXGBE_SUCCESS;
87 	u32 esdp;
88 
89 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
90 
91 	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
92 		/* Store flag indicating I2C bus access control unit. */
93 		hw->phy.qsfp_shared_i2c_bus = TRUE;
94 
95 		/* Initialize access to QSFP+ I2C bus */
96 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
97 		esdp |= IXGBE_ESDP_SDP0_DIR;
98 		esdp &= ~IXGBE_ESDP_SDP1_DIR;
99 		esdp &= ~IXGBE_ESDP_SDP0;
100 		esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
101 		esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
102 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
103 		IXGBE_WRITE_FLUSH(hw);
104 
105 		phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
106 		phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
107 	}
108 	/* Identify the PHY or SFP module */
109 	ret_val = phy->ops.identify(hw);
110 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
111 		goto init_phy_ops_out;
112 
113 	/* Setup function pointers based on detected SFP module and speeds */
114 	ixgbe_init_mac_link_ops_82599(hw);
115 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
116 		hw->phy.ops.reset = NULL;
117 
118 	/* If copper media, overwrite with copper function pointers */
119 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
120 		mac->ops.setup_link = ixgbe_setup_copper_link_82599;
121 		mac->ops.get_link_capabilities =
122 				  ixgbe_get_copper_link_capabilities_generic;
123 	}
124 
125 	/* Set necessary function pointers based on PHY type */
126 	switch (hw->phy.type) {
127 	case ixgbe_phy_tn:
128 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
129 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
130 		phy->ops.get_firmware_version =
131 			     ixgbe_get_phy_firmware_version_tnx;
132 		break;
133 	default:
134 		break;
135 	}
136 init_phy_ops_out:
137 	return ret_val;
138 }
139 
ixgbe_setup_sfp_modules_82599(struct ixgbe_hw * hw)140 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
141 {
142 	s32 ret_val = IXGBE_SUCCESS;
143 	u16 list_offset, data_offset, data_value;
144 
145 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
146 
147 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
148 		ixgbe_init_mac_link_ops_82599(hw);
149 
150 		hw->phy.ops.reset = NULL;
151 
152 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
153 							      &data_offset);
154 		if (ret_val != IXGBE_SUCCESS)
155 			goto setup_sfp_out;
156 
157 		/* PHY config will finish before releasing the semaphore */
158 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
159 							IXGBE_GSSR_MAC_CSR_SM);
160 		if (ret_val != IXGBE_SUCCESS) {
161 			ret_val = IXGBE_ERR_SWFW_SYNC;
162 			goto setup_sfp_out;
163 		}
164 
165 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
166 			goto setup_sfp_err;
167 		while (data_value != 0xffff) {
168 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
169 			IXGBE_WRITE_FLUSH(hw);
170 			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
171 				goto setup_sfp_err;
172 		}
173 
174 		/* Release the semaphore */
175 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
176 		/* Delay obtaining semaphore again to allow FW access
177 		 * prot_autoc_write uses the semaphore too.
178 		 */
179 		msec_delay(hw->eeprom.semaphore_delay);
180 
181 		/* Restart DSP and set SFI mode */
182 		ret_val = hw->mac.ops.prot_autoc_write(hw,
183 			hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
184 			false);
185 
186 		if (ret_val) {
187 			DEBUGOUT("sfp module setup not complete\n");
188 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
189 			goto setup_sfp_out;
190 		}
191 
192 	}
193 
194 setup_sfp_out:
195 	return ret_val;
196 
197 setup_sfp_err:
198 	/* Release the semaphore */
199 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
200 	/* Delay obtaining semaphore again to allow FW access */
201 	msec_delay(hw->eeprom.semaphore_delay);
202 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
203 		      "eeprom read at offset %d failed", data_offset);
204 	return IXGBE_ERR_PHY;
205 }
206 
207 /**
208  * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
209  * @hw: pointer to hardware structure
210  * @locked: Return the if we locked for this read.
211  * @reg_val: Value we read from AUTOC
212  *
213  * For this part (82599) we need to wrap read-modify-writes with a possible
214  * FW/SW lock.  It is assumed this lock will be freed with the next
215  * prot_autoc_write_82599().
216  */
prot_autoc_read_82599(struct ixgbe_hw * hw,bool * locked,u32 * reg_val)217 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
218 {
219 	s32 ret_val;
220 
221 	*locked = false;
222 	 /* If LESM is on then we need to hold the SW/FW semaphore. */
223 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
224 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
225 					IXGBE_GSSR_MAC_CSR_SM);
226 		if (ret_val != IXGBE_SUCCESS)
227 			return IXGBE_ERR_SWFW_SYNC;
228 
229 		*locked = true;
230 	}
231 
232 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
233 	return IXGBE_SUCCESS;
234 }
235 
236 /**
237  * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
238  * @hw: pointer to hardware structure
239  * @autoc: value to write to AUTOC
240  * @locked: bool to indicate whether the SW/FW lock was already taken by
241  *          previous proc_autoc_read_82599.
242  *
243  * This part (82599) may need to hold the SW/FW lock around all writes to
244  * AUTOC. Likewise after a write we need to do a pipeline reset.
245  */
prot_autoc_write_82599(struct ixgbe_hw * hw,u32 autoc,bool locked)246 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
247 {
248 	s32 ret_val = IXGBE_SUCCESS;
249 
250 	/* Blocked by MNG FW so bail */
251 	if (ixgbe_check_reset_blocked(hw))
252 		goto out;
253 
254 	/* We only need to get the lock if:
255 	 *  - We didn't do it already (in the read part of a read-modify-write)
256 	 *  - LESM is enabled.
257 	 */
258 	if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
259 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
260 					IXGBE_GSSR_MAC_CSR_SM);
261 		if (ret_val != IXGBE_SUCCESS)
262 			return IXGBE_ERR_SWFW_SYNC;
263 
264 		locked = true;
265 	}
266 
267 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
268 	ret_val = ixgbe_reset_pipeline_82599(hw);
269 
270 out:
271 	/* Free the SW/FW semaphore as we either grabbed it here or
272 	 * already had it when this function was called.
273 	 */
274 	if (locked)
275 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
276 
277 	return ret_val;
278 }
279 
280 /**
281  * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
282  * @hw: pointer to hardware structure
283  *
284  * Initialize the function pointers and assign the MAC type for 82599.
285  * Does not touch the hardware.
286  **/
287 
ixgbe_init_ops_82599(struct ixgbe_hw * hw)288 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
289 {
290 	struct ixgbe_mac_info *mac = &hw->mac;
291 	struct ixgbe_phy_info *phy = &hw->phy;
292 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
293 	s32 ret_val;
294 
295 	DEBUGFUNC("ixgbe_init_ops_82599");
296 
297 	ixgbe_init_phy_ops_generic(hw);
298 	ret_val = ixgbe_init_ops_generic(hw);
299 
300 	/* PHY */
301 	phy->ops.identify = ixgbe_identify_phy_82599;
302 	phy->ops.init = ixgbe_init_phy_ops_82599;
303 
304 	/* MAC */
305 	mac->ops.reset_hw = ixgbe_reset_hw_82599;
306 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
307 	mac->ops.get_media_type = ixgbe_get_media_type_82599;
308 	mac->ops.get_supported_physical_layer =
309 				    ixgbe_get_supported_physical_layer_82599;
310 	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
311 	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
312 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
313 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
314 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
315 	mac->ops.start_hw = ixgbe_start_hw_82599;
316 	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
317 	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
318 	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
319 	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
320 	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
321 	mac->ops.prot_autoc_read = prot_autoc_read_82599;
322 	mac->ops.prot_autoc_write = prot_autoc_write_82599;
323 
324 	/* RAR, Multicast, VLAN */
325 	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
326 	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
327 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
328 	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
329 	mac->rar_highwater = 1;
330 	mac->ops.set_vfta = ixgbe_set_vfta_generic;
331 	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
332 	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
333 	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
334 	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
335 	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
336 	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
337 
338 	/* Link */
339 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
340 	mac->ops.check_link = ixgbe_check_mac_link_generic;
341 	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
342 	ixgbe_init_mac_link_ops_82599(hw);
343 
344 	mac->mcft_size		= IXGBE_82599_MC_TBL_SIZE;
345 	mac->vft_size		= IXGBE_82599_VFT_TBL_SIZE;
346 	mac->num_rar_entries	= IXGBE_82599_RAR_ENTRIES;
347 	mac->rx_pb_size		= IXGBE_82599_RX_PB_SIZE;
348 	mac->max_rx_queues	= IXGBE_82599_MAX_RX_QUEUES;
349 	mac->max_tx_queues	= IXGBE_82599_MAX_TX_QUEUES;
350 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
351 
352 	mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
353 				      & IXGBE_FWSM_MODE_MASK);
354 
355 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
356 
357 	/* EEPROM */
358 	eeprom->ops.read = ixgbe_read_eeprom_82599;
359 	eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
360 
361 	/* Manageability interface */
362 	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
363 
364 	mac->ops.get_thermal_sensor_data =
365 					 ixgbe_get_thermal_sensor_data_generic;
366 	mac->ops.init_thermal_sensor_thresh =
367 				      ixgbe_init_thermal_sensor_thresh_generic;
368 
369 	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
370 
371 	return ret_val;
372 }
373 
374 /**
375  * ixgbe_get_link_capabilities_82599 - Determines link capabilities
376  * @hw: pointer to hardware structure
377  * @speed: pointer to link speed
378  * @autoneg: true when autoneg or autotry is enabled
379  *
380  * Determines the link capabilities by reading the AUTOC register.
381  **/
ixgbe_get_link_capabilities_82599(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)382 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
383 				      ixgbe_link_speed *speed,
384 				      bool *autoneg)
385 {
386 	s32 status = IXGBE_SUCCESS;
387 	u32 autoc = 0;
388 
389 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
390 
391 
392 	/* Check if 1G SFP module. */
393 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
394 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
395 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core0 ||
396 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core1 ||
397 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
398 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
399 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
400 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
401 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
402 		*autoneg = true;
403 		goto out;
404 	}
405 
406 	/*
407 	 * Determine link capabilities based on the stored value of AUTOC,
408 	 * which represents EEPROM defaults.  If AUTOC value has not
409 	 * been stored, use the current register values.
410 	 */
411 	if (hw->mac.orig_link_settings_stored)
412 		autoc = hw->mac.orig_autoc;
413 	else
414 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
415 
416 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
417 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
418 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
419 		*autoneg = false;
420 		break;
421 
422 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
423 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
424 		*autoneg = false;
425 		break;
426 
427 	case IXGBE_AUTOC_LMS_1G_AN:
428 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
429 		*autoneg = true;
430 		break;
431 
432 	case IXGBE_AUTOC_LMS_10G_SERIAL:
433 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
434 		*autoneg = false;
435 		break;
436 
437 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
438 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
439 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
440 		if (autoc & IXGBE_AUTOC_KR_SUPP)
441 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
442 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
443 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
444 		if (autoc & IXGBE_AUTOC_KX_SUPP)
445 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
446 		*autoneg = true;
447 		break;
448 
449 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
450 		*speed = IXGBE_LINK_SPEED_100_FULL;
451 		if (autoc & IXGBE_AUTOC_KR_SUPP)
452 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
453 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
454 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
455 		if (autoc & IXGBE_AUTOC_KX_SUPP)
456 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
457 		*autoneg = true;
458 		break;
459 
460 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
461 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
462 		*autoneg = false;
463 		break;
464 
465 	default:
466 		status = IXGBE_ERR_LINK_SETUP;
467 		goto out;
468 		break;
469 	}
470 
471 	if (hw->phy.multispeed_fiber) {
472 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
473 			  IXGBE_LINK_SPEED_1GB_FULL;
474 
475 		/* QSFP must not enable full auto-negotiation
476 		 * Limited autoneg is enabled at 1G
477 		 */
478 		if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
479 			*autoneg = false;
480 		else
481 			*autoneg = true;
482 	}
483 
484 out:
485 	return status;
486 }
487 
488 /**
489  * ixgbe_get_media_type_82599 - Get media type
490  * @hw: pointer to hardware structure
491  *
492  * Returns the media type (fiber, copper, backplane)
493  **/
ixgbe_get_media_type_82599(struct ixgbe_hw * hw)494 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
495 {
496 	enum ixgbe_media_type media_type;
497 
498 	DEBUGFUNC("ixgbe_get_media_type_82599");
499 
500 	/* Detect if there is a copper PHY attached. */
501 	switch (hw->phy.type) {
502 	case ixgbe_phy_cu_unknown:
503 	case ixgbe_phy_tn:
504 		media_type = ixgbe_media_type_copper;
505 		goto out;
506 	default:
507 		break;
508 	}
509 
510 	switch (hw->device_id) {
511 	case IXGBE_DEV_ID_82599_KX4:
512 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
513 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
514 	case IXGBE_DEV_ID_82599_KR:
515 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
516 	case IXGBE_DEV_ID_82599_XAUI_LOM:
517 		/* Default device ID is mezzanine card KX/KX4 */
518 		media_type = ixgbe_media_type_backplane;
519 		break;
520 	case IXGBE_DEV_ID_82599_SFP:
521 	case IXGBE_DEV_ID_82599_SFP_FCOE:
522 	case IXGBE_DEV_ID_82599_SFP_EM:
523 	case IXGBE_DEV_ID_82599_SFP_SF2:
524 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
525 	case IXGBE_DEV_ID_82599EN_SFP:
526 		media_type = ixgbe_media_type_fiber;
527 		break;
528 	case IXGBE_DEV_ID_82599_CX4:
529 		media_type = ixgbe_media_type_cx4;
530 		break;
531 	case IXGBE_DEV_ID_82599_T3_LOM:
532 		media_type = ixgbe_media_type_copper;
533 		break;
534 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
535 		media_type = ixgbe_media_type_fiber_qsfp;
536 		break;
537 	default:
538 		media_type = ixgbe_media_type_unknown;
539 		break;
540 	}
541 out:
542 	return media_type;
543 }
544 
545 /**
546  * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
547  * @hw: pointer to hardware structure
548  *
549  * Disables link during D3 power down sequence.
550  *
551  **/
ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw * hw)552 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
553 {
554 	u32 autoc2_reg;
555 	u16 ee_ctrl_2 = 0;
556 
557 	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
558 	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
559 
560 	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
561 	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
562 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
563 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
564 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
565 	}
566 }
567 
568 /**
569  * ixgbe_start_mac_link_82599 - Setup MAC link settings
570  * @hw: pointer to hardware structure
571  * @autoneg_wait_to_complete: true when waiting for completion is needed
572  *
573  * Configures link settings based on values in the ixgbe_hw struct.
574  * Restarts the link.  Performs autonegotiation if needed.
575  **/
ixgbe_start_mac_link_82599(struct ixgbe_hw * hw,bool autoneg_wait_to_complete)576 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
577 			       bool autoneg_wait_to_complete)
578 {
579 	u32 autoc_reg;
580 	u32 links_reg;
581 	u32 i;
582 	s32 status = IXGBE_SUCCESS;
583 	bool got_lock = false;
584 
585 	DEBUGFUNC("ixgbe_start_mac_link_82599");
586 
587 
588 	/*  reset_pipeline requires us to hold this lock as it writes to
589 	 *  AUTOC.
590 	 */
591 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
592 		status = hw->mac.ops.acquire_swfw_sync(hw,
593 						       IXGBE_GSSR_MAC_CSR_SM);
594 		if (status != IXGBE_SUCCESS)
595 			goto out;
596 
597 		got_lock = true;
598 	}
599 
600 	/* Restart link */
601 	ixgbe_reset_pipeline_82599(hw);
602 
603 	if (got_lock)
604 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
605 
606 	/* Only poll for autoneg to complete if specified to do so */
607 	if (autoneg_wait_to_complete) {
608 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
609 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
610 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
611 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
612 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
613 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
614 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
615 			links_reg = 0; /* Just in case Autoneg time = 0 */
616 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
617 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
618 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
619 					break;
620 				msec_delay(100);
621 			}
622 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
623 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
624 				DEBUGOUT("Autoneg did not complete.\n");
625 			}
626 		}
627 	}
628 
629 	/* Add delay to filter out noises during initial link setup */
630 	msec_delay(50);
631 
632 out:
633 	return status;
634 }
635 
636 /**
637  * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
638  * @hw: pointer to hardware structure
639  *
640  * The base drivers may require better control over SFP+ module
641  * PHY states.  This includes selectively shutting down the Tx
642  * laser on the PHY, effectively halting physical link.
643  **/
ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)644 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
645 {
646 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
647 
648 	/* Blocked by MNG FW so bail */
649 	if (ixgbe_check_reset_blocked(hw))
650 		return;
651 
652 	/* Disable Tx laser; allow 100us to go dark per spec */
653 	esdp_reg |= IXGBE_ESDP_SDP3;
654 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
655 	IXGBE_WRITE_FLUSH(hw);
656 	usec_delay(100);
657 }
658 
659 /**
660  * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
661  * @hw: pointer to hardware structure
662  *
663  * The base drivers may require better control over SFP+ module
664  * PHY states.  This includes selectively turning on the Tx
665  * laser on the PHY, effectively starting physical link.
666  **/
ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)667 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
668 {
669 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
670 
671 	/* Enable Tx laser; allow 100ms to light up */
672 	esdp_reg &= ~IXGBE_ESDP_SDP3;
673 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
674 	IXGBE_WRITE_FLUSH(hw);
675 	msec_delay(100);
676 }
677 
678 /**
679  * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
680  * @hw: pointer to hardware structure
681  *
682  * When the driver changes the link speeds that it can support,
683  * it sets autotry_restart to true to indicate that we need to
684  * initiate a new autotry session with the link partner.  To do
685  * so, we set the speed then disable and re-enable the Tx laser, to
686  * alert the link partner that it also needs to restart autotry on its
687  * end.  This is consistent with true clause 37 autoneg, which also
688  * involves a loss of signal.
689  **/
ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)690 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
691 {
692 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
693 
694 	/* Blocked by MNG FW so bail */
695 	if (ixgbe_check_reset_blocked(hw))
696 		return;
697 
698 	if (hw->mac.autotry_restart) {
699 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
700 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
701 		hw->mac.autotry_restart = false;
702 	}
703 }
704 
705 /**
706  * ixgbe_set_hard_rate_select_speed - Set module link speed
707  * @hw: pointer to hardware structure
708  * @speed: link speed to set
709  *
710  * Set module link speed via RS0/RS1 rate select pins.
711  */
ixgbe_set_hard_rate_select_speed(struct ixgbe_hw * hw,ixgbe_link_speed speed)712 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
713 					ixgbe_link_speed speed)
714 {
715 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
716 
717 	switch (speed) {
718 	case IXGBE_LINK_SPEED_10GB_FULL:
719 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
720 		break;
721 	case IXGBE_LINK_SPEED_1GB_FULL:
722 		esdp_reg &= ~IXGBE_ESDP_SDP5;
723 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
724 		break;
725 	default:
726 		DEBUGOUT("Invalid fixed module speed\n");
727 		return;
728 	}
729 
730 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
731 	IXGBE_WRITE_FLUSH(hw);
732 }
733 
734 /**
735  * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
736  * @hw: pointer to hardware structure
737  * @speed: new link speed
738  * @autoneg_wait_to_complete: true when waiting for completion is needed
739  *
740  * Implements the Intel SmartSpeed algorithm.
741  **/
ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)742 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
743 				    ixgbe_link_speed speed,
744 				    bool autoneg_wait_to_complete)
745 {
746 	s32 status = IXGBE_SUCCESS;
747 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
748 	s32 i, j;
749 	bool link_up = false;
750 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
751 
752 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
753 
754 	 /* Set autoneg_advertised value based on input link speed */
755 	hw->phy.autoneg_advertised = 0;
756 
757 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
758 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
759 
760 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
761 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
762 
763 	if (speed & IXGBE_LINK_SPEED_100_FULL)
764 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
765 
766 	/*
767 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
768 	 * autoneg advertisement if link is unable to be established at the
769 	 * highest negotiated rate.  This can sometimes happen due to integrity
770 	 * issues with the physical media connection.
771 	 */
772 
773 	/* First, try to get link with full advertisement */
774 	hw->phy.smart_speed_active = false;
775 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
776 		status = ixgbe_setup_mac_link_82599(hw, speed,
777 						    autoneg_wait_to_complete);
778 		if (status != IXGBE_SUCCESS)
779 			goto out;
780 
781 		/*
782 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
783 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
784 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
785 		 * Table 9 in the AN MAS.
786 		 */
787 		for (i = 0; i < 5; i++) {
788 			msec_delay(100);
789 
790 			/* If we have link, just jump out */
791 			status = ixgbe_check_link(hw, &link_speed, &link_up,
792 						  false);
793 			if (status != IXGBE_SUCCESS)
794 				goto out;
795 
796 			if (link_up)
797 				goto out;
798 		}
799 	}
800 
801 	/*
802 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
803 	 * (or BX4/BX), then disable KR and try again.
804 	 */
805 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
806 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
807 		goto out;
808 
809 	/* Turn SmartSpeed on to disable KR support */
810 	hw->phy.smart_speed_active = true;
811 	status = ixgbe_setup_mac_link_82599(hw, speed,
812 					    autoneg_wait_to_complete);
813 	if (status != IXGBE_SUCCESS)
814 		goto out;
815 
816 	/*
817 	 * Wait for the controller to acquire link.  600ms will allow for
818 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
819 	 * parallel detect, both 10g and 1g. This allows for the maximum
820 	 * connect attempts as defined in the AN MAS table 73-7.
821 	 */
822 	for (i = 0; i < 6; i++) {
823 		msec_delay(100);
824 
825 		/* If we have link, just jump out */
826 		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
827 		if (status != IXGBE_SUCCESS)
828 			goto out;
829 
830 		if (link_up)
831 			goto out;
832 	}
833 
834 	/* We didn't get link.  Turn SmartSpeed back off. */
835 	hw->phy.smart_speed_active = false;
836 	status = ixgbe_setup_mac_link_82599(hw, speed,
837 					    autoneg_wait_to_complete);
838 
839 out:
840 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
841 		DEBUGOUT("Smartspeed has downgraded the link speed "
842 		"from the maximum advertised\n");
843 	return status;
844 }
845 
846 /**
847  * ixgbe_setup_mac_link_82599 - Set MAC link speed
848  * @hw: pointer to hardware structure
849  * @speed: new link speed
850  * @autoneg_wait_to_complete: true when waiting for completion is needed
851  *
852  * Set the link speed in the AUTOC register and restarts link.
853  **/
ixgbe_setup_mac_link_82599(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)854 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
855 			       ixgbe_link_speed speed,
856 			       bool autoneg_wait_to_complete)
857 {
858 	bool autoneg = false;
859 	s32 status = IXGBE_SUCCESS;
860 	u32 pma_pmd_1g, link_mode;
861 	u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
862 	u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
863 	u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
864 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
865 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
866 	u32 links_reg;
867 	u32 i;
868 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
869 
870 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
871 
872 	/* Check to see if speed passed in is supported. */
873 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
874 	if (status)
875 		goto out;
876 
877 	speed &= link_capabilities;
878 
879 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
880 		status = IXGBE_ERR_LINK_SETUP;
881 		goto out;
882 	}
883 
884 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
885 	if (hw->mac.orig_link_settings_stored)
886 		orig_autoc = hw->mac.orig_autoc;
887 	else
888 		orig_autoc = autoc;
889 
890 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
891 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
892 
893 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
894 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
895 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
896 		/* Set KX4/KX/KR support according to speed requested */
897 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
898 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
899 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
900 				autoc |= IXGBE_AUTOC_KX4_SUPP;
901 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
902 			    (hw->phy.smart_speed_active == false))
903 				autoc |= IXGBE_AUTOC_KR_SUPP;
904 		}
905 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
906 			autoc |= IXGBE_AUTOC_KX_SUPP;
907 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
908 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
909 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
910 		/* Switch from 1G SFI to 10G SFI if requested */
911 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
912 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
913 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
914 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
915 		}
916 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
917 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
918 		/* Switch from 10G SFI to 1G SFI if requested */
919 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
920 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
921 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
922 			if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
923 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
924 			else
925 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
926 		}
927 	}
928 
929 	if (autoc != current_autoc) {
930 		/* Restart link */
931 		status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
932 		if (status != IXGBE_SUCCESS)
933 			goto out;
934 
935 		/* Only poll for autoneg to complete if specified to do so */
936 		if (autoneg_wait_to_complete) {
937 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
938 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
939 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
940 				links_reg = 0; /*Just in case Autoneg time=0*/
941 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
942 					links_reg =
943 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
944 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
945 						break;
946 					msec_delay(100);
947 				}
948 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
949 					status =
950 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
951 					DEBUGOUT("Autoneg did not complete.\n");
952 				}
953 			}
954 		}
955 
956 		/* Add delay to filter out noises during initial link setup */
957 		msec_delay(50);
958 	}
959 
960 out:
961 	return status;
962 }
963 
964 /**
965  * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
966  * @hw: pointer to hardware structure
967  * @speed: new link speed
968  * @autoneg_wait_to_complete: true if waiting is needed to complete
969  *
970  * Restarts link on PHY and MAC based on settings passed in.
971  **/
ixgbe_setup_copper_link_82599(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)972 STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
973 					 ixgbe_link_speed speed,
974 					 bool autoneg_wait_to_complete)
975 {
976 	s32 status;
977 
978 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
979 
980 	/* Setup the PHY according to input speed */
981 	status = hw->phy.ops.setup_link_speed(hw, speed,
982 					      autoneg_wait_to_complete);
983 	/* Set up MAC */
984 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
985 
986 	return status;
987 }
988 
989 /**
990  * ixgbe_reset_hw_82599 - Perform hardware reset
991  * @hw: pointer to hardware structure
992  *
993  * Resets the hardware by resetting the transmit and receive units, masks
994  * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
995  * reset.
996  **/
ixgbe_reset_hw_82599(struct ixgbe_hw * hw)997 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
998 {
999 	ixgbe_link_speed link_speed;
1000 	s32 status;
1001 	u32 ctrl = 0;
1002 	u32 i, autoc, autoc2;
1003 	u32 curr_lms;
1004 	bool link_up = false;
1005 
1006 	DEBUGFUNC("ixgbe_reset_hw_82599");
1007 
1008 	/* Call adapter stop to disable tx/rx and clear interrupts */
1009 	status = hw->mac.ops.stop_adapter(hw);
1010 	if (status != IXGBE_SUCCESS)
1011 		goto reset_hw_out;
1012 
1013 	/* flush pending Tx transactions */
1014 	ixgbe_clear_tx_pending(hw);
1015 
1016 	/* PHY ops must be identified and initialized prior to reset */
1017 
1018 	/* Identify PHY and related function pointers */
1019 	status = hw->phy.ops.init(hw);
1020 
1021 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1022 		goto reset_hw_out;
1023 
1024 	/* Setup SFP module if there is one present. */
1025 	if (hw->phy.sfp_setup_needed) {
1026 		status = hw->mac.ops.setup_sfp(hw);
1027 		hw->phy.sfp_setup_needed = false;
1028 	}
1029 
1030 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1031 		goto reset_hw_out;
1032 
1033 	/* Reset PHY */
1034 	if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
1035 		hw->phy.ops.reset(hw);
1036 
1037 	/* remember AUTOC from before we reset */
1038 	curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1039 
1040 mac_reset_top:
1041 	/*
1042 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
1043 	 * If link reset is used when link is up, it might reset the PHY when
1044 	 * mng is using it.  If link is down or the flag to force full link
1045 	 * reset is set, then perform link reset.
1046 	 */
1047 	ctrl = IXGBE_CTRL_LNK_RST;
1048 	if (!hw->force_full_reset) {
1049 		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1050 		if (link_up)
1051 			ctrl = IXGBE_CTRL_RST;
1052 	}
1053 
1054 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1055 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1056 	IXGBE_WRITE_FLUSH(hw);
1057 
1058 	/* Poll for reset bit to self-clear meaning reset is complete */
1059 	for (i = 0; i < 10; i++) {
1060 		usec_delay(1);
1061 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1062 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
1063 			break;
1064 	}
1065 
1066 	if (ctrl & IXGBE_CTRL_RST_MASK) {
1067 		status = IXGBE_ERR_RESET_FAILED;
1068 		DEBUGOUT("Reset polling failed to complete.\n");
1069 	}
1070 
1071 	msec_delay(50);
1072 
1073 	/*
1074 	 * Double resets are required for recovery from certain error
1075 	 * conditions.  Between resets, it is necessary to stall to
1076 	 * allow time for any pending HW events to complete.
1077 	 */
1078 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1079 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1080 		goto mac_reset_top;
1081 	}
1082 
1083 	/*
1084 	 * Store the original AUTOC/AUTOC2 values if they have not been
1085 	 * stored off yet.  Otherwise restore the stored original
1086 	 * values since the reset operation sets back to defaults.
1087 	 */
1088 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1089 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1090 
1091 	/* Enable link if disabled in NVM */
1092 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1093 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1094 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1095 		IXGBE_WRITE_FLUSH(hw);
1096 	}
1097 
1098 	if (hw->mac.orig_link_settings_stored == false) {
1099 		hw->mac.orig_autoc = autoc;
1100 		hw->mac.orig_autoc2 = autoc2;
1101 		hw->mac.orig_link_settings_stored = true;
1102 	} else {
1103 
1104 		/* If MNG FW is running on a multi-speed device that
1105 		 * doesn't autoneg with out driver support we need to
1106 		 * leave LMS in the state it was before we MAC reset.
1107 		 * Likewise if we support WoL we don't want change the
1108 		 * LMS state.
1109 		 */
1110 		if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1111 		    hw->wol_enabled)
1112 			hw->mac.orig_autoc =
1113 				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1114 				curr_lms;
1115 
1116 		if (autoc != hw->mac.orig_autoc) {
1117 			status = hw->mac.ops.prot_autoc_write(hw,
1118 							hw->mac.orig_autoc,
1119 							false);
1120 			if (status != IXGBE_SUCCESS)
1121 				goto reset_hw_out;
1122 		}
1123 
1124 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1125 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1126 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1127 			autoc2 |= (hw->mac.orig_autoc2 &
1128 				   IXGBE_AUTOC2_UPPER_MASK);
1129 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1130 		}
1131 	}
1132 
1133 	/* Store the permanent mac address */
1134 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1135 
1136 	/*
1137 	 * Store MAC address from RAR0, clear receive address registers, and
1138 	 * clear the multicast table.  Also reset num_rar_entries to 128,
1139 	 * since we modify this value when programming the SAN MAC address.
1140 	 */
1141 	hw->mac.num_rar_entries = 128;
1142 	hw->mac.ops.init_rx_addrs(hw);
1143 
1144 	/* Store the permanent SAN mac address */
1145 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1146 
1147 	/* Add the SAN MAC address to the RAR only if it's a valid address */
1148 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1149 		/* Save the SAN MAC RAR index */
1150 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1151 
1152 		hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
1153 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
1154 
1155 		/* clear VMDq pool/queue selection for this RAR */
1156 		hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
1157 				       IXGBE_CLEAR_VMDQ_ALL);
1158 
1159 		/* Reserve the last RAR for the SAN MAC address */
1160 		hw->mac.num_rar_entries--;
1161 	}
1162 
1163 	/* Store the alternative WWNN/WWPN prefix */
1164 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1165 				   &hw->mac.wwpn_prefix);
1166 
1167 reset_hw_out:
1168 	return status;
1169 }
1170 
1171 /**
1172  * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
1173  * @hw: pointer to hardware structure
1174  * @fdircmd: current value of FDIRCMD register
1175  */
ixgbe_fdir_check_cmd_complete(struct ixgbe_hw * hw,u32 * fdircmd)1176 STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
1177 {
1178 	int i;
1179 
1180 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1181 		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1182 		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1183 			return IXGBE_SUCCESS;
1184 		usec_delay(10);
1185 	}
1186 
1187 	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
1188 }
1189 
1190 /**
1191  * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1192  * @hw: pointer to hardware structure
1193  **/
ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw * hw)1194 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1195 {
1196 	s32 err;
1197 	int i;
1198 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1199 	u32 fdircmd;
1200 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1201 
1202 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1203 
1204 	/*
1205 	 * Before starting reinitialization process,
1206 	 * FDIRCMD.CMD must be zero.
1207 	 */
1208 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1209 	if (err) {
1210 		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
1211 		return err;
1212 	}
1213 
1214 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1215 	IXGBE_WRITE_FLUSH(hw);
1216 	/*
1217 	 * 82599 adapters flow director init flow cannot be restarted,
1218 	 * Workaround 82599 silicon errata by performing the following steps
1219 	 * before re-writing the FDIRCTRL control register with the same value.
1220 	 * - write 1 to bit 8 of FDIRCMD register &
1221 	 * - write 0 to bit 8 of FDIRCMD register
1222 	 */
1223 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1224 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1225 			 IXGBE_FDIRCMD_CLEARHT));
1226 	IXGBE_WRITE_FLUSH(hw);
1227 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1228 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1229 			 ~IXGBE_FDIRCMD_CLEARHT));
1230 	IXGBE_WRITE_FLUSH(hw);
1231 	/*
1232 	 * Clear FDIR Hash register to clear any leftover hashes
1233 	 * waiting to be programmed.
1234 	 */
1235 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1236 	IXGBE_WRITE_FLUSH(hw);
1237 
1238 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1239 	IXGBE_WRITE_FLUSH(hw);
1240 
1241 	/* Poll init-done after we write FDIRCTRL register */
1242 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1243 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1244 				   IXGBE_FDIRCTRL_INIT_DONE)
1245 			break;
1246 		msec_delay(1);
1247 	}
1248 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1249 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1250 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1251 	}
1252 
1253 	/* Clear FDIR statistics registers (read to clear) */
1254 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1255 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1256 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1257 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1258 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1259 
1260 	return IXGBE_SUCCESS;
1261 }
1262 
1263 /**
1264  * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1265  * @hw: pointer to hardware structure
1266  * @fdirctrl: value to write to flow director control register
1267  **/
ixgbe_fdir_enable_82599(struct ixgbe_hw * hw,u32 fdirctrl)1268 STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1269 {
1270 	int i;
1271 
1272 	DEBUGFUNC("ixgbe_fdir_enable_82599");
1273 
1274 	/* Prime the keys for hashing */
1275 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1276 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1277 
1278 	/*
1279 	 * Poll init-done after we write the register.  Estimated times:
1280 	 *      10G: PBALLOC = 11b, timing is 60us
1281 	 *       1G: PBALLOC = 11b, timing is 600us
1282 	 *     100M: PBALLOC = 11b, timing is 6ms
1283 	 *
1284 	 *     Multiple these timings by 4 if under full Rx load
1285 	 *
1286 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1287 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1288 	 * this might not finish in our poll time, but we can live with that
1289 	 * for now.
1290 	 */
1291 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1292 	IXGBE_WRITE_FLUSH(hw);
1293 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1294 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1295 				   IXGBE_FDIRCTRL_INIT_DONE)
1296 			break;
1297 		msec_delay(1);
1298 	}
1299 
1300 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1301 		DEBUGOUT("Flow Director poll time exceeded!\n");
1302 }
1303 
1304 /**
1305  * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1306  * @hw: pointer to hardware structure
1307  * @fdirctrl: value to write to flow director control register, initially
1308  *	     contains just the value of the Rx packet buffer allocation
1309  **/
ixgbe_init_fdir_signature_82599(struct ixgbe_hw * hw,u32 fdirctrl)1310 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1311 {
1312 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1313 
1314 	/*
1315 	 * Continue setup of fdirctrl register bits:
1316 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1317 	 *  Set the maximum length per hash bucket to 0xA filters
1318 	 *  Send interrupt when 64 filters are left
1319 	 */
1320 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1321 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1322 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1323 
1324 	/* write hashes and fdirctrl register, poll for completion */
1325 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1326 
1327 	return IXGBE_SUCCESS;
1328 }
1329 
1330 /**
1331  * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1332  * @hw: pointer to hardware structure
1333  * @fdirctrl: value to write to flow director control register, initially
1334  *	     contains just the value of the Rx packet buffer allocation
1335  * @cloud_mode: true - cloud mode, false - other mode
1336  **/
ixgbe_init_fdir_perfect_82599(struct ixgbe_hw * hw,u32 fdirctrl,bool cloud_mode)1337 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
1338 			bool cloud_mode)
1339 {
1340 	UNREFERENCED_1PARAMETER(cloud_mode);
1341 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1342 
1343 	/*
1344 	 * Continue setup of fdirctrl register bits:
1345 	 *  Turn perfect match filtering on
1346 	 *  Report hash in RSS field of Rx wb descriptor
1347 	 *  Initialize the drop queue to queue 127
1348 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1349 	 *  Set the maximum length per hash bucket to 0xA filters
1350 	 *  Send interrupt when 64 (0x4 * 16) filters are left
1351 	 */
1352 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1353 		    IXGBE_FDIRCTRL_REPORT_STATUS |
1354 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1355 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1356 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1357 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1358 
1359 	if (cloud_mode)
1360 		fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
1361 					IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
1362 
1363 	/* write hashes and fdirctrl register, poll for completion */
1364 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1365 
1366 	return IXGBE_SUCCESS;
1367 }
1368 
1369 /**
1370  * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
1371  * @hw: pointer to hardware structure
1372  * @dropqueue: Rx queue index used for the dropped packets
1373  **/
ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw * hw,u8 dropqueue)1374 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
1375 {
1376 	u32 fdirctrl;
1377 
1378 	DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
1379 	/* Clear init done bit and drop queue field */
1380 	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1381 	fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
1382 
1383 	/* Set drop queue */
1384 	fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1385 	if ((hw->mac.type == ixgbe_mac_X550) ||
1386 	    (hw->mac.type == ixgbe_mac_X550EM_x) ||
1387 	    (hw->mac.type == ixgbe_mac_X550EM_a))
1388 		fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
1389 
1390 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1391 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1392 			 IXGBE_FDIRCMD_CLEARHT));
1393 	IXGBE_WRITE_FLUSH(hw);
1394 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1395 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1396 			 ~IXGBE_FDIRCMD_CLEARHT));
1397 	IXGBE_WRITE_FLUSH(hw);
1398 
1399 	/* write hashes and fdirctrl register, poll for completion */
1400 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1401 }
1402 
1403 /*
1404  * These defines allow us to quickly generate all of the necessary instructions
1405  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1406  * for values 0 through 15
1407  */
1408 #define IXGBE_ATR_COMMON_HASH_KEY \
1409 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1410 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1411 do { \
1412 	u32 n = (_n); \
1413 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1414 		common_hash ^= lo_hash_dword >> n; \
1415 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1416 		bucket_hash ^= lo_hash_dword >> n; \
1417 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1418 		sig_hash ^= lo_hash_dword << (16 - n); \
1419 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1420 		common_hash ^= hi_hash_dword >> n; \
1421 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1422 		bucket_hash ^= hi_hash_dword >> n; \
1423 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1424 		sig_hash ^= hi_hash_dword << (16 - n); \
1425 } while (0)
1426 
1427 /**
1428  * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1429  * @input: input bitstream to compute the hash on
1430  * @common: compressed common input dword
1431  *
1432  * This function is almost identical to the function above but contains
1433  * several optimizations such as unwinding all of the loops, letting the
1434  * compiler work out all of the conditional ifs since the keys are static
1435  * defines, and computing two keys at once since the hashed dword stream
1436  * will be the same for both keys.
1437  **/
ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,union ixgbe_atr_hash_dword common)1438 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1439 				     union ixgbe_atr_hash_dword common)
1440 {
1441 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1442 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1443 
1444 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1445 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
1446 
1447 	/* generate common hash dword */
1448 	hi_hash_dword = IXGBE_NTOHL(common.dword);
1449 
1450 	/* low dword is word swapped version of common */
1451 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1452 
1453 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1454 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1455 
1456 	/* Process bits 0 and 16 */
1457 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1458 
1459 	/*
1460 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1461 	 * delay this because bit 0 of the stream should not be processed
1462 	 * so we do not add the VLAN until after bit 0 was processed
1463 	 */
1464 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1465 
1466 	/* Process remaining 30 bit of the key */
1467 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1468 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1469 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1470 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1471 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1472 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1473 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1474 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1475 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1476 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1477 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1478 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1479 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1480 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1481 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1482 
1483 	/* combine common_hash result with signature and bucket hashes */
1484 	bucket_hash ^= common_hash;
1485 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1486 
1487 	sig_hash ^= common_hash << 16;
1488 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1489 
1490 	/* return completed signature hash */
1491 	return sig_hash ^ bucket_hash;
1492 }
1493 
1494 /**
1495  * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1496  * @hw: pointer to hardware structure
1497  * @input: unique input dword
1498  * @common: compressed common input dword
1499  * @queue: queue index to direct traffic to
1500  *
1501  * Note that the tunnel bit in input must not be set when the hardware
1502  * tunneling support does not exist.
1503  **/
ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_hash_dword input,union ixgbe_atr_hash_dword common,u8 queue)1504 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1505 					   union ixgbe_atr_hash_dword input,
1506 					   union ixgbe_atr_hash_dword common,
1507 					   u8 queue)
1508 {
1509 	u64 fdirhashcmd;
1510 	u8 flow_type;
1511 	bool tunnel;
1512 	u32 fdircmd;
1513 
1514 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1515 
1516 	/*
1517 	 * Get the flow_type in order to program FDIRCMD properly
1518 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1519 	 * fifth is FDIRCMD.TUNNEL_FILTER
1520 	 */
1521 	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
1522 	flow_type = input.formatted.flow_type &
1523 		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
1524 	switch (flow_type) {
1525 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
1526 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
1527 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1528 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
1529 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
1530 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1531 		break;
1532 	default:
1533 		DEBUGOUT(" Error on flow type input\n");
1534 		return;
1535 	}
1536 
1537 	/* configure FDIRCMD register */
1538 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1539 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1540 	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1541 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1542 	if (tunnel)
1543 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1544 
1545 	/*
1546 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1547 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1548 	 */
1549 	fdirhashcmd = (u64)fdircmd << 32;
1550 	fdirhashcmd |= (u64)ixgbe_atr_compute_sig_hash_82599(input, common);
1551 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1552 
1553 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1554 
1555 	return;
1556 }
1557 
1558 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1559 do { \
1560 	u32 n = (_n); \
1561 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1562 		bucket_hash ^= lo_hash_dword >> n; \
1563 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1564 		bucket_hash ^= hi_hash_dword >> n; \
1565 } while (0)
1566 
1567 /**
1568  * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1569  * @input: input bitstream to compute the hash on
1570  * @input_mask: mask for the input bitstream
1571  *
1572  * This function serves two main purposes.  First it applies the input_mask
1573  * to the atr_input resulting in a cleaned up atr_input data stream.
1574  * Secondly it computes the hash and stores it in the bkt_hash field at
1575  * the end of the input byte stream.  This way it will be available for
1576  * future use without needing to recompute the hash.
1577  **/
ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input * input,union ixgbe_atr_input * input_mask)1578 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1579 					  union ixgbe_atr_input *input_mask)
1580 {
1581 
1582 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1583 	u32 bucket_hash = 0;
1584 	u32 hi_dword = 0;
1585 	u32 i = 0;
1586 
1587 	/* Apply masks to input data */
1588 	for (i = 0; i < 14; i++)
1589 		input->dword_stream[i]  &= input_mask->dword_stream[i];
1590 
1591 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1592 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1593 
1594 	/* generate common hash dword */
1595 	for (i = 1; i <= 13; i++)
1596 		hi_dword ^= input->dword_stream[i];
1597 	hi_hash_dword = IXGBE_NTOHL(hi_dword);
1598 
1599 	/* low dword is word swapped version of common */
1600 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1601 
1602 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1603 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1604 
1605 	/* Process bits 0 and 16 */
1606 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1607 
1608 	/*
1609 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1610 	 * delay this because bit 0 of the stream should not be processed
1611 	 * so we do not add the VLAN until after bit 0 was processed
1612 	 */
1613 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1614 
1615 	/* Process remaining 30 bit of the key */
1616 	for (i = 1; i <= 15; i++)
1617 		IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1618 
1619 	/*
1620 	 * Limit hash to 13 bits since max bucket count is 8K.
1621 	 * Store result at the end of the input stream.
1622 	 */
1623 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1624 }
1625 
1626 /**
1627  * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
1628  * @input_mask: mask to be bit swapped
1629  *
1630  * The source and destination port masks for flow director are bit swapped
1631  * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
1632  * generate a correctly swapped value we need to bit swap the mask and that
1633  * is what is accomplished by this function.
1634  **/
ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input * input_mask)1635 STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1636 {
1637 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1638 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1639 	mask |= (u32)IXGBE_NTOHS(input_mask->formatted.src_port);
1640 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1641 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1642 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1643 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1644 }
1645 
1646 /*
1647  * These two macros are meant to address the fact that we have registers
1648  * that are either all or in part big-endian.  As a result on big-endian
1649  * systems we will end up byte swapping the value to little-endian before
1650  * it is byte swapped again and written to the hardware in the original
1651  * big-endian format.
1652  */
1653 #define IXGBE_STORE_AS_BE32(_value) \
1654 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1655 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1656 
1657 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1658 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1659 
1660 #define IXGBE_STORE_AS_BE16(_value) \
1661 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1662 
ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input_mask,bool cloud_mode)1663 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1664 				    union ixgbe_atr_input *input_mask, bool cloud_mode)
1665 {
1666 	/* mask IPv6 since it is currently not supported */
1667 	u32 fdirm = IXGBE_FDIRM_DIPv6;
1668 	u32 fdirtcpm;
1669 	u32 fdirip6m;
1670 	UNREFERENCED_1PARAMETER(cloud_mode);
1671 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1672 
1673 	/*
1674 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
1675 	 * are zero, then assume a full mask for that field.  Also assume that
1676 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
1677 	 * cannot be masked out in this implementation.
1678 	 *
1679 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
1680 	 * point in time.
1681 	 */
1682 
1683 	/* verify bucket hash is cleared on hash generation */
1684 	if (input_mask->formatted.bkt_hash)
1685 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
1686 
1687 	/* Program FDIRM and verify partial masks */
1688 	switch (input_mask->formatted.vm_pool & 0x7F) {
1689 	case 0x0:
1690 		fdirm |= IXGBE_FDIRM_POOL;
1691 	case 0x7F:
1692 		break;
1693 	default:
1694 		DEBUGOUT(" Error on vm pool mask\n");
1695 		return IXGBE_ERR_CONFIG;
1696 	}
1697 
1698 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1699 	case 0x0:
1700 		fdirm |= IXGBE_FDIRM_L4P;
1701 		if (input_mask->formatted.dst_port ||
1702 		    input_mask->formatted.src_port) {
1703 			DEBUGOUT(" Error on src/dst port mask\n");
1704 			return IXGBE_ERR_CONFIG;
1705 		}
1706 	case IXGBE_ATR_L4TYPE_MASK:
1707 		break;
1708 	default:
1709 		DEBUGOUT(" Error on flow type mask\n");
1710 		return IXGBE_ERR_CONFIG;
1711 	}
1712 
1713 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1714 	case 0x0000:
1715 		/* mask VLAN ID */
1716 		fdirm |= IXGBE_FDIRM_VLANID;
1717 		/* fall through */
1718 	case 0x0FFF:
1719 		/* mask VLAN priority */
1720 		fdirm |= IXGBE_FDIRM_VLANP;
1721 		break;
1722 	case 0xE000:
1723 		/* mask VLAN ID only */
1724 		fdirm |= IXGBE_FDIRM_VLANID;
1725 		/* fall through */
1726 	case 0xEFFF:
1727 		/* no VLAN fields masked */
1728 		break;
1729 	default:
1730 		DEBUGOUT(" Error on VLAN mask\n");
1731 		return IXGBE_ERR_CONFIG;
1732 	}
1733 
1734 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1735 	case 0x0000:
1736 		/* Mask Flex Bytes */
1737 		fdirm |= IXGBE_FDIRM_FLEX;
1738 		/* fall through */
1739 	case 0xFFFF:
1740 		break;
1741 	default:
1742 		DEBUGOUT(" Error on flexible byte mask\n");
1743 		return IXGBE_ERR_CONFIG;
1744 	}
1745 
1746 	if (cloud_mode) {
1747 		fdirm |= IXGBE_FDIRM_L3P;
1748 		fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
1749 		fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
1750 
1751 		switch (input_mask->formatted.inner_mac[0] & 0xFF) {
1752 		case 0x00:
1753 			/* Mask inner MAC, fall through */
1754 			fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
1755 		case 0xFF:
1756 			break;
1757 		default:
1758 			DEBUGOUT(" Error on inner_mac byte mask\n");
1759 			return IXGBE_ERR_CONFIG;
1760 		}
1761 
1762 		switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
1763 		case 0x0:
1764 			/* Mask vxlan id */
1765 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
1766 			break;
1767 		case 0x00FFFFFF:
1768 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
1769 			break;
1770 		case 0xFFFFFFFF:
1771 			break;
1772 		default:
1773 			DEBUGOUT(" Error on TNI/VNI byte mask\n");
1774 			return IXGBE_ERR_CONFIG;
1775 		}
1776 
1777 		switch (input_mask->formatted.tunnel_type & 0xFFFF) {
1778 		case 0x0:
1779 			/* Mask turnnel type, fall through */
1780 			fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
1781 		case 0xFFFF:
1782 			break;
1783 		default:
1784 			DEBUGOUT(" Error on tunnel type byte mask\n");
1785 			return IXGBE_ERR_CONFIG;
1786 		}
1787 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
1788 
1789 		/* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM,
1790 		 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow
1791 		 * L3/L3 packets to tunnel.
1792 		 */
1793 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
1794 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
1795 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
1796 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
1797 		switch (hw->mac.type) {
1798 		case ixgbe_mac_X550:
1799 		case ixgbe_mac_X550EM_x:
1800 		case ixgbe_mac_X550EM_a:
1801 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
1802 			break;
1803 		default:
1804 			break;
1805 		}
1806 	}
1807 
1808 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1809 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1810 
1811 	if (!cloud_mode) {
1812 		/* store the TCP/UDP port masks, bit reversed from port
1813 		 * layout */
1814 		fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1815 
1816 		/* write both the same so that UDP and TCP use the same mask */
1817 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1818 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1819 		/* also use it for SCTP */
1820 		switch (hw->mac.type) {
1821 		case ixgbe_mac_X550:
1822 		case ixgbe_mac_X550EM_x:
1823 		case ixgbe_mac_X550EM_a:
1824 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
1825 			break;
1826 		default:
1827 			break;
1828 		}
1829 
1830 		/* store source and destination IP masks (big-enian) */
1831 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1832 				     ~input_mask->formatted.src_ip[0]);
1833 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1834 				     ~input_mask->formatted.dst_ip[0]);
1835 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, 0xFFFFFFFF);
1836 	}
1837 	return IXGBE_SUCCESS;
1838 }
1839 
ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,u16 soft_id,u8 queue,bool cloud_mode)1840 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1841 					  union ixgbe_atr_input *input,
1842 					  u16 soft_id, u8 queue, bool cloud_mode)
1843 {
1844 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
1845 	u32 addr_low, addr_high;
1846 	u32 cloud_type = 0;
1847 	s32 err;
1848 	UNREFERENCED_1PARAMETER(cloud_mode);
1849 
1850 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1851 	if (!cloud_mode) {
1852 		/* currently IPv6 is not supported, must be programmed with 0 */
1853 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1854 				     input->formatted.src_ip[0]);
1855 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1856 				     input->formatted.src_ip[1]);
1857 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1858 				     input->formatted.src_ip[2]);
1859 
1860 		/* record the source address (big-endian) */
1861 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
1862 			input->formatted.src_ip[0]);
1863 
1864 		/* record the first 32 bits of the destination address
1865 		 * (big-endian) */
1866 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
1867 			input->formatted.dst_ip[0]);
1868 
1869 		/* record source and destination port (little-endian)*/
1870 		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1871 		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1872 		fdirport |= (u32)IXGBE_NTOHS(input->formatted.src_port);
1873 		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1874 	}
1875 
1876 	/* record VLAN (little-endian) and flex_bytes(big-endian) */
1877 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1878 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1879 	fdirvlan |= (u32)IXGBE_NTOHS(input->formatted.vlan_id);
1880 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1881 
1882 	if (cloud_mode) {
1883 		if (input->formatted.tunnel_type != 0)
1884 			cloud_type = 0x80000000;
1885 
1886 		addr_low = ((u32)input->formatted.inner_mac[0] |
1887 				((u32)input->formatted.inner_mac[1] << 8) |
1888 				((u32)input->formatted.inner_mac[2] << 16) |
1889 				((u32)input->formatted.inner_mac[3] << 24));
1890 		addr_high = ((u32)input->formatted.inner_mac[4] |
1891 				((u32)input->formatted.inner_mac[5] << 8));
1892 		cloud_type |= addr_high;
1893 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
1894 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
1895 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
1896 	}
1897 
1898 	/* configure FDIRHASH register */
1899 	fdirhash = input->formatted.bkt_hash;
1900 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1901 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1902 
1903 	/*
1904 	 * flush all previous writes to make certain registers are
1905 	 * programmed prior to issuing the command
1906 	 */
1907 	IXGBE_WRITE_FLUSH(hw);
1908 
1909 	/* configure FDIRCMD register */
1910 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1911 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1912 	if (queue == IXGBE_FDIR_DROP_QUEUE)
1913 		fdircmd |= IXGBE_FDIRCMD_DROP;
1914 	if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
1915 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1916 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1917 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1918 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1919 
1920 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1921 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1922 	if (err) {
1923 		DEBUGOUT("Flow Director command did not complete!\n");
1924 		return err;
1925 	}
1926 
1927 	return IXGBE_SUCCESS;
1928 }
1929 
ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,u16 soft_id)1930 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1931 					  union ixgbe_atr_input *input,
1932 					  u16 soft_id)
1933 {
1934 	u32 fdirhash;
1935 	u32 fdircmd;
1936 	s32 err;
1937 
1938 	/* configure FDIRHASH register */
1939 	fdirhash = input->formatted.bkt_hash;
1940 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1941 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1942 
1943 	/* flush hash to HW */
1944 	IXGBE_WRITE_FLUSH(hw);
1945 
1946 	/* Query if filter is present */
1947 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1948 
1949 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1950 	if (err) {
1951 		DEBUGOUT("Flow Director command did not complete!\n");
1952 		return err;
1953 	}
1954 
1955 	/* if filter exists in hardware then remove it */
1956 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1957 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1958 		IXGBE_WRITE_FLUSH(hw);
1959 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1960 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1961 	}
1962 
1963 	return IXGBE_SUCCESS;
1964 }
1965 
1966 /**
1967  * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1968  * @hw: pointer to hardware structure
1969  * @input: input bitstream
1970  * @input_mask: mask for the input bitstream
1971  * @soft_id: software index for the filters
1972  * @queue: queue index to direct traffic to
1973  * @cloud_mode: unused
1974  *
1975  * Note that the caller to this function must lock before calling, since the
1976  * hardware writes must be protected from one another.
1977  **/
ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,union ixgbe_atr_input * input_mask,u16 soft_id,u8 queue,bool cloud_mode)1978 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1979 					union ixgbe_atr_input *input,
1980 					union ixgbe_atr_input *input_mask,
1981 					u16 soft_id, u8 queue, bool cloud_mode)
1982 {
1983 	s32 err = IXGBE_ERR_CONFIG;
1984 	UNREFERENCED_1PARAMETER(cloud_mode);
1985 
1986 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1987 
1988 	/*
1989 	 * Check flow_type formatting, and bail out before we touch the hardware
1990 	 * if there's a configuration issue
1991 	 */
1992 	switch (input->formatted.flow_type) {
1993 	case IXGBE_ATR_FLOW_TYPE_IPV4:
1994 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
1995 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1996 		if (input->formatted.dst_port || input->formatted.src_port) {
1997 			DEBUGOUT(" Error on src/dst port\n");
1998 			return IXGBE_ERR_CONFIG;
1999 		}
2000 		break;
2001 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2002 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
2003 		if (input->formatted.dst_port || input->formatted.src_port) {
2004 			DEBUGOUT(" Error on src/dst port\n");
2005 			return IXGBE_ERR_CONFIG;
2006 		}
2007 		/* fall through */
2008 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
2009 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
2010 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
2011 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
2012 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2013 						  IXGBE_ATR_L4TYPE_MASK;
2014 		break;
2015 	default:
2016 		DEBUGOUT(" Error on flow type input\n");
2017 		return err;
2018 	}
2019 
2020 	/* program input mask into the HW */
2021 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
2022 	if (err)
2023 		return err;
2024 
2025 	/* apply mask and compute/store hash */
2026 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2027 
2028 	/* program filters to filter memory */
2029 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2030 						     soft_id, queue, cloud_mode);
2031 }
2032 
2033 /**
2034  * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2035  * @hw: pointer to hardware structure
2036  * @reg: analog register to read
2037  * @val: read value
2038  *
2039  * Performs read operation to Omer analog register specified.
2040  **/
ixgbe_read_analog_reg8_82599(struct ixgbe_hw * hw,u32 reg,u8 * val)2041 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2042 {
2043 	u32  core_ctl;
2044 
2045 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2046 
2047 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2048 			(reg << 8));
2049 	IXGBE_WRITE_FLUSH(hw);
2050 	usec_delay(10);
2051 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2052 	*val = (u8)core_ctl;
2053 
2054 	return IXGBE_SUCCESS;
2055 }
2056 
2057 /**
2058  * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2059  * @hw: pointer to hardware structure
2060  * @reg: atlas register to write
2061  * @val: value to write
2062  *
2063  * Performs write operation to Omer analog register specified.
2064  **/
ixgbe_write_analog_reg8_82599(struct ixgbe_hw * hw,u32 reg,u8 val)2065 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2066 {
2067 	u32  core_ctl;
2068 
2069 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2070 
2071 	core_ctl = (reg << 8) | val;
2072 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2073 	IXGBE_WRITE_FLUSH(hw);
2074 	usec_delay(10);
2075 
2076 	return IXGBE_SUCCESS;
2077 }
2078 
2079 /**
2080  * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2081  * @hw: pointer to hardware structure
2082  *
2083  * Starts the hardware using the generic start_hw function
2084  * and the generation start_hw function.
2085  * Then performs revision-specific operations, if any.
2086  **/
ixgbe_start_hw_82599(struct ixgbe_hw * hw)2087 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2088 {
2089 	s32 ret_val = IXGBE_SUCCESS;
2090 
2091 	DEBUGFUNC("ixgbe_start_hw_82599");
2092 
2093 	ret_val = ixgbe_start_hw_generic(hw);
2094 	if (ret_val != IXGBE_SUCCESS)
2095 		goto out;
2096 
2097 	ixgbe_start_hw_gen2(hw);
2098 
2099 	/* We need to run link autotry after the driver loads */
2100 	hw->mac.autotry_restart = true;
2101 
2102 	if (ret_val == IXGBE_SUCCESS)
2103 		ret_val = ixgbe_verify_fw_version_82599(hw);
2104 out:
2105 	return ret_val;
2106 }
2107 
2108 /**
2109  * ixgbe_identify_phy_82599 - Get physical layer module
2110  * @hw: pointer to hardware structure
2111  *
2112  * Determines the physical layer module found on the current adapter.
2113  * If PHY already detected, maintains current PHY type in hw struct,
2114  * otherwise executes the PHY detection routine.
2115  **/
ixgbe_identify_phy_82599(struct ixgbe_hw * hw)2116 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2117 {
2118 	s32 status;
2119 
2120 	DEBUGFUNC("ixgbe_identify_phy_82599");
2121 
2122 	/* Detect PHY if not unknown - returns success if already detected. */
2123 	status = ixgbe_identify_phy_generic(hw);
2124 	if (status != IXGBE_SUCCESS) {
2125 		/* 82599 10GBASE-T requires an external PHY */
2126 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2127 			return status;
2128 		else
2129 			status = ixgbe_identify_module_generic(hw);
2130 	}
2131 
2132 	/* Set PHY type none if no PHY detected */
2133 	if (hw->phy.type == ixgbe_phy_unknown) {
2134 		hw->phy.type = ixgbe_phy_none;
2135 		return IXGBE_SUCCESS;
2136 	}
2137 
2138 	/* Return error if SFP module has been detected but is not supported */
2139 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2140 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
2141 
2142 	return status;
2143 }
2144 
2145 /**
2146  * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2147  * @hw: pointer to hardware structure
2148  *
2149  * Determines physical layer capabilities of the current configuration.
2150  **/
ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw * hw)2151 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2152 {
2153 	u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2154 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2155 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2156 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2157 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2158 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2159 	u16 ext_ability = 0;
2160 
2161 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2162 
2163 	hw->phy.ops.identify(hw);
2164 
2165 	switch (hw->phy.type) {
2166 	case ixgbe_phy_tn:
2167 	case ixgbe_phy_cu_unknown:
2168 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2169 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2170 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2171 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2172 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2173 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2174 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2175 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2176 		goto out;
2177 	default:
2178 		break;
2179 	}
2180 
2181 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2182 	case IXGBE_AUTOC_LMS_1G_AN:
2183 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2184 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2185 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2186 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2187 			goto out;
2188 		} else
2189 			/* SFI mode so read SFP module */
2190 			goto sfp_check;
2191 		break;
2192 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2193 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2194 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2195 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2196 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2197 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2198 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2199 		goto out;
2200 		break;
2201 	case IXGBE_AUTOC_LMS_10G_SERIAL:
2202 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2203 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2204 			goto out;
2205 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2206 			goto sfp_check;
2207 		break;
2208 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
2209 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2210 		if (autoc & IXGBE_AUTOC_KX_SUPP)
2211 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2212 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
2213 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2214 		if (autoc & IXGBE_AUTOC_KR_SUPP)
2215 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2216 		goto out;
2217 		break;
2218 	default:
2219 		goto out;
2220 		break;
2221 	}
2222 
2223 sfp_check:
2224 	/* SFP check must be done last since DA modules are sometimes used to
2225 	 * test KR mode -  we need to id KR mode correctly before SFP module.
2226 	 * Call identify_sfp because the pluggable module may have changed */
2227 	physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2228 out:
2229 	return physical_layer;
2230 }
2231 
2232 /**
2233  * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2234  * @hw: pointer to hardware structure
2235  * @regval: register value to write to RXCTRL
2236  *
2237  * Enables the Rx DMA unit for 82599
2238  **/
ixgbe_enable_rx_dma_82599(struct ixgbe_hw * hw,u32 regval)2239 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2240 {
2241 
2242 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2243 
2244 	/*
2245 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2246 	 * If traffic is incoming before we enable the Rx unit, it could hang
2247 	 * the Rx DMA unit.  Therefore, make sure the security engine is
2248 	 * completely disabled prior to enabling the Rx unit.
2249 	 */
2250 
2251 	hw->mac.ops.disable_sec_rx_path(hw);
2252 
2253 	if (regval & IXGBE_RXCTRL_RXEN)
2254 		ixgbe_enable_rx(hw);
2255 	else
2256 		ixgbe_disable_rx(hw);
2257 
2258 	hw->mac.ops.enable_sec_rx_path(hw);
2259 
2260 	return IXGBE_SUCCESS;
2261 }
2262 
2263 /**
2264  * ixgbe_verify_fw_version_82599 - verify FW version for 82599
2265  * @hw: pointer to hardware structure
2266  *
2267  * Verifies that installed the firmware version is 0.6 or higher
2268  * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2269  *
2270  * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2271  * if the FW version is not supported.
2272  **/
ixgbe_verify_fw_version_82599(struct ixgbe_hw * hw)2273 STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2274 {
2275 	s32 status = IXGBE_ERR_EEPROM_VERSION;
2276 	u16 fw_offset, fw_ptp_cfg_offset;
2277 	u16 fw_version;
2278 
2279 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
2280 
2281 	/* firmware check is only necessary for SFI devices */
2282 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
2283 		status = IXGBE_SUCCESS;
2284 		goto fw_version_out;
2285 	}
2286 
2287 	/* get the offset to the Firmware Module block */
2288 	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
2289 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2290 			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
2291 		return IXGBE_ERR_EEPROM_VERSION;
2292 	}
2293 
2294 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2295 		goto fw_version_out;
2296 
2297 	/* get the offset to the Pass Through Patch Configuration block */
2298 	if (hw->eeprom.ops.read(hw, (fw_offset +
2299 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2300 				 &fw_ptp_cfg_offset)) {
2301 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2302 			      "eeprom read at offset %d failed",
2303 			      fw_offset +
2304 			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
2305 		return IXGBE_ERR_EEPROM_VERSION;
2306 	}
2307 
2308 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2309 		goto fw_version_out;
2310 
2311 	/* get the firmware version */
2312 	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2313 			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
2314 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2315 			      "eeprom read at offset %d failed",
2316 			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
2317 		return IXGBE_ERR_EEPROM_VERSION;
2318 	}
2319 
2320 	if (fw_version > 0x5)
2321 		status = IXGBE_SUCCESS;
2322 
2323 fw_version_out:
2324 	return status;
2325 }
2326 
2327 /**
2328  * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2329  * @hw: pointer to hardware structure
2330  *
2331  * Returns true if the LESM FW module is present and enabled. Otherwise
2332  * returns false. Smart Speed must be disabled if LESM FW module is enabled.
2333  **/
ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw * hw)2334 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2335 {
2336 	bool lesm_enabled = false;
2337 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2338 	s32 status;
2339 
2340 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2341 
2342 	/* get the offset to the Firmware Module block */
2343 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2344 
2345 	if ((status != IXGBE_SUCCESS) ||
2346 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
2347 		goto out;
2348 
2349 	/* get the offset to the LESM Parameters block */
2350 	status = hw->eeprom.ops.read(hw, (fw_offset +
2351 				     IXGBE_FW_LESM_PARAMETERS_PTR),
2352 				     &fw_lesm_param_offset);
2353 
2354 	if ((status != IXGBE_SUCCESS) ||
2355 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2356 		goto out;
2357 
2358 	/* get the LESM state word */
2359 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2360 				     IXGBE_FW_LESM_STATE_1),
2361 				     &fw_lesm_state);
2362 
2363 	if ((status == IXGBE_SUCCESS) &&
2364 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2365 		lesm_enabled = true;
2366 
2367 out:
2368 	return lesm_enabled;
2369 }
2370 
2371 /**
2372  * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2373  * fastest available method
2374  *
2375  * @hw: pointer to hardware structure
2376  * @offset: offset of  word in EEPROM to read
2377  * @words: number of words
2378  * @data: word(s) read from the EEPROM
2379  *
2380  * Retrieves 16 bit word(s) read from EEPROM
2381  **/
ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)2382 STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2383 					  u16 words, u16 *data)
2384 {
2385 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2386 	s32 ret_val = IXGBE_ERR_CONFIG;
2387 
2388 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2389 
2390 	/*
2391 	 * If EEPROM is detected and can be addressed using 14 bits,
2392 	 * use EERD otherwise use bit bang
2393 	 */
2394 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2395 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2396 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2397 							 data);
2398 	else
2399 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2400 								    words,
2401 								    data);
2402 
2403 	return ret_val;
2404 }
2405 
2406 /**
2407  * ixgbe_read_eeprom_82599 - Read EEPROM word using
2408  * fastest available method
2409  *
2410  * @hw: pointer to hardware structure
2411  * @offset: offset of  word in the EEPROM to read
2412  * @data: word read from the EEPROM
2413  *
2414  * Reads a 16 bit word from the EEPROM
2415  **/
ixgbe_read_eeprom_82599(struct ixgbe_hw * hw,u16 offset,u16 * data)2416 STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2417 				   u16 offset, u16 *data)
2418 {
2419 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2420 	s32 ret_val = IXGBE_ERR_CONFIG;
2421 
2422 	DEBUGFUNC("ixgbe_read_eeprom_82599");
2423 
2424 	/*
2425 	 * If EEPROM is detected and can be addressed using 14 bits,
2426 	 * use EERD otherwise use bit bang
2427 	 */
2428 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2429 	    (offset <= IXGBE_EERD_MAX_ADDR))
2430 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2431 	else
2432 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2433 
2434 	return ret_val;
2435 }
2436 
2437 /**
2438  * ixgbe_reset_pipeline_82599 - perform pipeline reset
2439  *
2440  * @hw: pointer to hardware structure
2441  *
2442  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2443  * full pipeline reset.  This function assumes the SW/FW lock is held.
2444  **/
ixgbe_reset_pipeline_82599(struct ixgbe_hw * hw)2445 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2446 {
2447 	s32 ret_val;
2448 	u32 anlp1_reg = 0;
2449 	u32 i, autoc_reg, autoc2_reg;
2450 
2451 	/* Enable link if disabled in NVM */
2452 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2453 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2454 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2455 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2456 		IXGBE_WRITE_FLUSH(hw);
2457 	}
2458 
2459 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2460 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2461 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2462 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2463 			autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2464 	/* Wait for AN to leave state 0 */
2465 	for (i = 0; i < 10; i++) {
2466 		msec_delay(4);
2467 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2468 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2469 			break;
2470 	}
2471 
2472 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2473 		DEBUGOUT("auto negotiation not completed\n");
2474 		ret_val = IXGBE_ERR_RESET_FAILED;
2475 		goto reset_pipeline_out;
2476 	}
2477 
2478 	ret_val = IXGBE_SUCCESS;
2479 
2480 reset_pipeline_out:
2481 	/* Write AUTOC register with original LMS field and Restart_AN */
2482 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2483 	IXGBE_WRITE_FLUSH(hw);
2484 
2485 	return ret_val;
2486 }
2487 
2488 /**
2489  * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2490  * @hw: pointer to hardware structure
2491  * @byte_offset: byte offset to read
2492  * @dev_addr: address to read from
2493  * @data: value read
2494  *
2495  * Performs byte read operation to SFP module's EEPROM over I2C interface at
2496  * a specified device address.
2497  **/
ixgbe_read_i2c_byte_82599(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 * data)2498 STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2499 				u8 dev_addr, u8 *data)
2500 {
2501 	u32 esdp;
2502 	s32 status;
2503 	s32 timeout = 200;
2504 
2505 	DEBUGFUNC("ixgbe_read_i2c_byte_82599");
2506 
2507 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2508 		/* Acquire I2C bus ownership. */
2509 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2510 		esdp |= IXGBE_ESDP_SDP0;
2511 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2512 		IXGBE_WRITE_FLUSH(hw);
2513 
2514 		while (timeout) {
2515 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2516 			if (esdp & IXGBE_ESDP_SDP1)
2517 				break;
2518 
2519 			msec_delay(5);
2520 			timeout--;
2521 		}
2522 
2523 		if (!timeout) {
2524 			DEBUGOUT("Driver can't access resource,"
2525 				 " acquiring I2C bus timeout.\n");
2526 			status = IXGBE_ERR_I2C;
2527 			goto release_i2c_access;
2528 		}
2529 	}
2530 
2531 	status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2532 
2533 release_i2c_access:
2534 
2535 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2536 		/* Release I2C bus ownership. */
2537 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2538 		esdp &= ~IXGBE_ESDP_SDP0;
2539 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2540 		IXGBE_WRITE_FLUSH(hw);
2541 	}
2542 
2543 	return status;
2544 }
2545 
2546 /**
2547  * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2548  * @hw: pointer to hardware structure
2549  * @byte_offset: byte offset to write
2550  * @dev_addr: address to read from
2551  * @data: value to write
2552  *
2553  * Performs byte write operation to SFP module's EEPROM over I2C interface at
2554  * a specified device address.
2555  **/
ixgbe_write_i2c_byte_82599(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 data)2556 STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2557 				 u8 dev_addr, u8 data)
2558 {
2559 	u32 esdp;
2560 	s32 status;
2561 	s32 timeout = 200;
2562 
2563 	DEBUGFUNC("ixgbe_write_i2c_byte_82599");
2564 
2565 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2566 		/* Acquire I2C bus ownership. */
2567 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2568 		esdp |= IXGBE_ESDP_SDP0;
2569 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2570 		IXGBE_WRITE_FLUSH(hw);
2571 
2572 		while (timeout) {
2573 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2574 			if (esdp & IXGBE_ESDP_SDP1)
2575 				break;
2576 
2577 			msec_delay(5);
2578 			timeout--;
2579 		}
2580 
2581 		if (!timeout) {
2582 			DEBUGOUT("Driver can't access resource,"
2583 				 " acquiring I2C bus timeout.\n");
2584 			status = IXGBE_ERR_I2C;
2585 			goto release_i2c_access;
2586 		}
2587 	}
2588 
2589 	status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2590 
2591 release_i2c_access:
2592 
2593 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2594 		/* Release I2C bus ownership. */
2595 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2596 		esdp &= ~IXGBE_ESDP_SDP0;
2597 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2598 		IXGBE_WRITE_FLUSH(hw);
2599 	}
2600 
2601 	return status;
2602 }
2603