1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4 
5 #include "ixgbe_common.h"
6 #include "ixgbe_phy.h"
7 #include "ixgbe_dcb.h"
8 #include "ixgbe_dcb_82599.h"
9 #include "ixgbe_api.h"
10 
11 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
12 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
13 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
14 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
15 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
16 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
17 					u16 count);
18 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
19 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
20 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
21 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
22 
23 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
24 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
25 					 u16 *san_mac_offset);
26 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
27 					     u16 words, u16 *data);
28 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
29 					      u16 words, u16 *data);
30 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
31 						 u16 offset);
32 
33 /**
34  * ixgbe_init_ops_generic - Inits function ptrs
35  * @hw: pointer to the hardware structure
36  *
37  * Initialize the function pointers.
38  **/
ixgbe_init_ops_generic(struct ixgbe_hw * hw)39 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
40 {
41 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
42 	struct ixgbe_mac_info *mac = &hw->mac;
43 	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
44 
45 	DEBUGFUNC("ixgbe_init_ops_generic");
46 
47 	/* EEPROM */
48 	eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
49 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
50 	if (eec & IXGBE_EEC_PRES) {
51 		eeprom->ops.read = ixgbe_read_eerd_generic;
52 		eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
53 	} else {
54 		eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
55 		eeprom->ops.read_buffer =
56 				 ixgbe_read_eeprom_buffer_bit_bang_generic;
57 	}
58 	eeprom->ops.write = ixgbe_write_eeprom_generic;
59 	eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
60 	eeprom->ops.validate_checksum =
61 				      ixgbe_validate_eeprom_checksum_generic;
62 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
63 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
64 
65 	/* MAC */
66 	mac->ops.init_hw = ixgbe_init_hw_generic;
67 	mac->ops.reset_hw = NULL;
68 	mac->ops.start_hw = ixgbe_start_hw_generic;
69 	mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
70 	mac->ops.get_media_type = NULL;
71 	mac->ops.get_supported_physical_layer = NULL;
72 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
73 	mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
74 	mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
75 	mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
76 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
77 	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
78 	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
79 	mac->ops.prot_autoc_read = prot_autoc_read_generic;
80 	mac->ops.prot_autoc_write = prot_autoc_write_generic;
81 
82 	/* LEDs */
83 	mac->ops.led_on = ixgbe_led_on_generic;
84 	mac->ops.led_off = ixgbe_led_off_generic;
85 	mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
86 	mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
87 	mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
88 
89 	/* RAR, Multicast, VLAN */
90 	mac->ops.set_rar = ixgbe_set_rar_generic;
91 	mac->ops.clear_rar = ixgbe_clear_rar_generic;
92 	mac->ops.insert_mac_addr = NULL;
93 	mac->ops.set_vmdq = NULL;
94 	mac->ops.clear_vmdq = NULL;
95 	mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
96 	mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
97 	mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
98 	mac->ops.enable_mc = ixgbe_enable_mc_generic;
99 	mac->ops.disable_mc = ixgbe_disable_mc_generic;
100 	mac->ops.clear_vfta = NULL;
101 	mac->ops.set_vfta = NULL;
102 	mac->ops.set_vlvf = NULL;
103 	mac->ops.init_uta_tables = NULL;
104 	mac->ops.enable_rx = ixgbe_enable_rx_generic;
105 	mac->ops.disable_rx = ixgbe_disable_rx_generic;
106 
107 	/* Flow Control */
108 	mac->ops.fc_enable = ixgbe_fc_enable_generic;
109 	mac->ops.setup_fc = ixgbe_setup_fc_generic;
110 	mac->ops.fc_autoneg = ixgbe_fc_autoneg;
111 
112 	/* Link */
113 	mac->ops.get_link_capabilities = NULL;
114 	mac->ops.setup_link = NULL;
115 	mac->ops.check_link = NULL;
116 	mac->ops.dmac_config = NULL;
117 	mac->ops.dmac_update_tcs = NULL;
118 	mac->ops.dmac_config_tcs = NULL;
119 
120 	return IXGBE_SUCCESS;
121 }
122 
123 /**
124  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
125  * of flow control
126  * @hw: pointer to hardware structure
127  *
128  * This function returns true if the device supports flow control
129  * autonegotiation, and false if it does not.
130  *
131  **/
ixgbe_device_supports_autoneg_fc(struct ixgbe_hw * hw)132 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
133 {
134 	bool supported = false;
135 	ixgbe_link_speed speed;
136 	bool link_up;
137 
138 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
139 
140 	switch (hw->phy.media_type) {
141 	case ixgbe_media_type_fiber_qsfp:
142 	case ixgbe_media_type_fiber:
143 		/* flow control autoneg black list */
144 		switch (hw->device_id) {
145 		case IXGBE_DEV_ID_X550EM_A_SFP:
146 		case IXGBE_DEV_ID_X550EM_A_SFP_N:
147 		case IXGBE_DEV_ID_X550EM_A_QSFP:
148 		case IXGBE_DEV_ID_X550EM_A_QSFP_N:
149 			supported = false;
150 			break;
151 		default:
152 			hw->mac.ops.check_link(hw, &speed, &link_up, false);
153 			/* if link is down, assume supported */
154 			if (link_up)
155 				supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
156 				true : false;
157 			else
158 				supported = true;
159 		}
160 
161 		break;
162 	case ixgbe_media_type_backplane:
163 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
164 			supported = false;
165 		else
166 			supported = true;
167 		break;
168 	case ixgbe_media_type_copper:
169 		/* only some copper devices support flow control autoneg */
170 		switch (hw->device_id) {
171 		case IXGBE_DEV_ID_82599_T3_LOM:
172 		case IXGBE_DEV_ID_X540T:
173 		case IXGBE_DEV_ID_X540T1:
174 		case IXGBE_DEV_ID_X550T:
175 		case IXGBE_DEV_ID_X550T1:
176 		case IXGBE_DEV_ID_X550EM_X_10G_T:
177 		case IXGBE_DEV_ID_X550EM_A_10G_T:
178 		case IXGBE_DEV_ID_X550EM_A_1G_T:
179 		case IXGBE_DEV_ID_X550EM_A_1G_T_L:
180 			supported = true;
181 			break;
182 		default:
183 			supported = false;
184 		}
185 	default:
186 		break;
187 	}
188 
189 	return supported;
190 }
191 
192 /**
193  * ixgbe_setup_fc_generic - Set up flow control
194  * @hw: pointer to hardware structure
195  *
196  * Called at init time to set up flow control.
197  **/
ixgbe_setup_fc_generic(struct ixgbe_hw * hw)198 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
199 {
200 	s32 ret_val = IXGBE_SUCCESS;
201 	u32 reg = 0, reg_bp = 0;
202 	u16 reg_cu = 0;
203 	bool locked = false;
204 
205 	DEBUGFUNC("ixgbe_setup_fc_generic");
206 
207 	/* Validate the requested mode */
208 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
209 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
210 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
211 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
212 		goto out;
213 	}
214 
215 	/*
216 	 * 10gig parts do not have a word in the EEPROM to determine the
217 	 * default flow control setting, so we explicitly set it to full.
218 	 */
219 	if (hw->fc.requested_mode == ixgbe_fc_default)
220 		hw->fc.requested_mode = ixgbe_fc_full;
221 
222 	/*
223 	 * Set up the 1G and 10G flow control advertisement registers so the
224 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
225 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
226 	 */
227 	switch (hw->phy.media_type) {
228 	case ixgbe_media_type_backplane:
229 		/* some MAC's need RMW protection on AUTOC */
230 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
231 		if (ret_val != IXGBE_SUCCESS)
232 			goto out;
233 
234 		/* fall through - only backplane uses autoc */
235 	case ixgbe_media_type_fiber_qsfp:
236 	case ixgbe_media_type_fiber:
237 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
238 
239 		break;
240 	case ixgbe_media_type_copper:
241 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
242 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
243 		break;
244 	default:
245 		break;
246 	}
247 
248 	/*
249 	 * The possible values of fc.requested_mode are:
250 	 * 0: Flow control is completely disabled
251 	 * 1: Rx flow control is enabled (we can receive pause frames,
252 	 *    but not send pause frames).
253 	 * 2: Tx flow control is enabled (we can send pause frames but
254 	 *    we do not support receiving pause frames).
255 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
256 	 * other: Invalid.
257 	 */
258 	switch (hw->fc.requested_mode) {
259 	case ixgbe_fc_none:
260 		/* Flow control completely disabled by software override. */
261 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
262 		if (hw->phy.media_type == ixgbe_media_type_backplane)
263 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
264 				    IXGBE_AUTOC_ASM_PAUSE);
265 		else if (hw->phy.media_type == ixgbe_media_type_copper)
266 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
267 		break;
268 	case ixgbe_fc_tx_pause:
269 		/*
270 		 * Tx Flow control is enabled, and Rx Flow control is
271 		 * disabled by software override.
272 		 */
273 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
274 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
275 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
276 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
277 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
278 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
279 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
280 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
281 		}
282 		break;
283 	case ixgbe_fc_rx_pause:
284 		/*
285 		 * Rx Flow control is enabled and Tx Flow control is
286 		 * disabled by software override. Since there really
287 		 * isn't a way to advertise that we are capable of RX
288 		 * Pause ONLY, we will advertise that we support both
289 		 * symmetric and asymmetric Rx PAUSE, as such we fall
290 		 * through to the fc_full statement.  Later, we will
291 		 * disable the adapter's ability to send PAUSE frames.
292 		 */
293 	case ixgbe_fc_full:
294 		/* Flow control (both Rx and Tx) is enabled by SW override. */
295 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
296 		if (hw->phy.media_type == ixgbe_media_type_backplane)
297 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
298 				  IXGBE_AUTOC_ASM_PAUSE;
299 		else if (hw->phy.media_type == ixgbe_media_type_copper)
300 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
301 		break;
302 	default:
303 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
304 			     "Flow control param set incorrectly\n");
305 		ret_val = IXGBE_ERR_CONFIG;
306 		goto out;
307 		break;
308 	}
309 
310 	if (hw->mac.type < ixgbe_mac_X540) {
311 		/*
312 		 * Enable auto-negotiation between the MAC & PHY;
313 		 * the MAC will advertise clause 37 flow control.
314 		 */
315 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
316 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
317 
318 		/* Disable AN timeout */
319 		if (hw->fc.strict_ieee)
320 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
321 
322 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
323 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
324 	}
325 
326 	/*
327 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
328 	 * and copper. There is no need to set the PCS1GCTL register.
329 	 *
330 	 */
331 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
332 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
333 		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
334 		if (ret_val)
335 			goto out;
336 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
337 		    (ixgbe_device_supports_autoneg_fc(hw))) {
338 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
339 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
340 	}
341 
342 	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
343 out:
344 	return ret_val;
345 }
346 
347 /**
348  * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
349  * @hw: pointer to hardware structure
350  *
351  * Starts the hardware by filling the bus info structure and media type, clears
352  * all on chip counters, initializes receive address registers, multicast
353  * table, VLAN filter table, calls routine to set up link and flow control
354  * settings, and leaves transmit and receive units disabled and uninitialized
355  **/
ixgbe_start_hw_generic(struct ixgbe_hw * hw)356 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
357 {
358 	s32 ret_val;
359 	u32 ctrl_ext;
360 	u16 device_caps;
361 
362 	DEBUGFUNC("ixgbe_start_hw_generic");
363 
364 	/* Set the media type */
365 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
366 
367 	/* PHY ops initialization must be done in reset_hw() */
368 
369 	/* Clear the VLAN filter table */
370 	hw->mac.ops.clear_vfta(hw);
371 
372 	/* Clear statistics registers */
373 	hw->mac.ops.clear_hw_cntrs(hw);
374 
375 	/* Set No Snoop Disable */
376 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
377 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
378 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
379 	IXGBE_WRITE_FLUSH(hw);
380 
381 	/* Setup flow control */
382 	ret_val = ixgbe_setup_fc(hw);
383 	if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
384 		DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
385 		return ret_val;
386 	}
387 
388 	/* Cache bit indicating need for crosstalk fix */
389 	switch (hw->mac.type) {
390 	case ixgbe_mac_82599EB:
391 	case ixgbe_mac_X550EM_x:
392 	case ixgbe_mac_X550EM_a:
393 		hw->mac.ops.get_device_caps(hw, &device_caps);
394 		if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
395 			hw->need_crosstalk_fix = false;
396 		else
397 			hw->need_crosstalk_fix = true;
398 		break;
399 	default:
400 		hw->need_crosstalk_fix = false;
401 		break;
402 	}
403 
404 	/* Clear adapter stopped flag */
405 	hw->adapter_stopped = false;
406 
407 	return IXGBE_SUCCESS;
408 }
409 
410 /**
411  * ixgbe_start_hw_gen2 - Init sequence for common device family
412  * @hw: pointer to hw structure
413  *
414  * Performs the init sequence common to the second generation
415  * of 10 GbE devices.
416  * Devices in the second generation:
417  *    82599
418  *    X540
419  **/
ixgbe_start_hw_gen2(struct ixgbe_hw * hw)420 void ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
421 {
422 	u32 i;
423 	u32 regval;
424 
425 	/* Clear the rate limiters */
426 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
427 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
428 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
429 	}
430 	IXGBE_WRITE_FLUSH(hw);
431 
432 	/* Disable relaxed ordering */
433 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
434 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
435 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
436 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
437 	}
438 
439 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
440 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
441 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
442 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
443 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
444 	}
445 }
446 
447 /**
448  * ixgbe_init_hw_generic - Generic hardware initialization
449  * @hw: pointer to hardware structure
450  *
451  * Initialize the hardware by resetting the hardware, filling the bus info
452  * structure and media type, clears all on chip counters, initializes receive
453  * address registers, multicast table, VLAN filter table, calls routine to set
454  * up link and flow control settings, and leaves transmit and receive units
455  * disabled and uninitialized
456  **/
ixgbe_init_hw_generic(struct ixgbe_hw * hw)457 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
458 {
459 	s32 status;
460 
461 	DEBUGFUNC("ixgbe_init_hw_generic");
462 
463 	/* Reset the hardware */
464 	status = hw->mac.ops.reset_hw(hw);
465 
466 	if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
467 		/* Start the HW */
468 		status = hw->mac.ops.start_hw(hw);
469 	}
470 
471 	/* Initialize the LED link active for LED blink support */
472 	if (hw->mac.ops.init_led_link_act)
473 		hw->mac.ops.init_led_link_act(hw);
474 
475 	if (status != IXGBE_SUCCESS)
476 		DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
477 
478 	return status;
479 }
480 
481 /**
482  * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
483  * @hw: pointer to hardware structure
484  *
485  * Clears all hardware statistics counters by reading them from the hardware
486  * Statistics counters are clear on read.
487  **/
ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw * hw)488 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
489 {
490 	u16 i = 0;
491 
492 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
493 
494 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
495 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
496 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
497 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
498 	for (i = 0; i < 8; i++)
499 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
500 
501 	IXGBE_READ_REG(hw, IXGBE_MLFC);
502 	IXGBE_READ_REG(hw, IXGBE_MRFC);
503 	IXGBE_READ_REG(hw, IXGBE_RLEC);
504 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
505 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
506 	if (hw->mac.type >= ixgbe_mac_82599EB) {
507 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
508 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
509 	} else {
510 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
511 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
512 	}
513 
514 	for (i = 0; i < 8; i++) {
515 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
516 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
517 		if (hw->mac.type >= ixgbe_mac_82599EB) {
518 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
519 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
520 		} else {
521 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
522 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
523 		}
524 	}
525 	if (hw->mac.type >= ixgbe_mac_82599EB)
526 		for (i = 0; i < 8; i++)
527 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
528 	IXGBE_READ_REG(hw, IXGBE_PRC64);
529 	IXGBE_READ_REG(hw, IXGBE_PRC127);
530 	IXGBE_READ_REG(hw, IXGBE_PRC255);
531 	IXGBE_READ_REG(hw, IXGBE_PRC511);
532 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
533 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
534 	IXGBE_READ_REG(hw, IXGBE_GPRC);
535 	IXGBE_READ_REG(hw, IXGBE_BPRC);
536 	IXGBE_READ_REG(hw, IXGBE_MPRC);
537 	IXGBE_READ_REG(hw, IXGBE_GPTC);
538 	IXGBE_READ_REG(hw, IXGBE_GORCL);
539 	IXGBE_READ_REG(hw, IXGBE_GORCH);
540 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
541 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
542 	if (hw->mac.type == ixgbe_mac_82598EB)
543 		for (i = 0; i < 8; i++)
544 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
545 	IXGBE_READ_REG(hw, IXGBE_RUC);
546 	IXGBE_READ_REG(hw, IXGBE_RFC);
547 	IXGBE_READ_REG(hw, IXGBE_ROC);
548 	IXGBE_READ_REG(hw, IXGBE_RJC);
549 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
550 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
551 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
552 	IXGBE_READ_REG(hw, IXGBE_TORL);
553 	IXGBE_READ_REG(hw, IXGBE_TORH);
554 	IXGBE_READ_REG(hw, IXGBE_TPR);
555 	IXGBE_READ_REG(hw, IXGBE_TPT);
556 	IXGBE_READ_REG(hw, IXGBE_PTC64);
557 	IXGBE_READ_REG(hw, IXGBE_PTC127);
558 	IXGBE_READ_REG(hw, IXGBE_PTC255);
559 	IXGBE_READ_REG(hw, IXGBE_PTC511);
560 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
561 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
562 	IXGBE_READ_REG(hw, IXGBE_MPTC);
563 	IXGBE_READ_REG(hw, IXGBE_BPTC);
564 	for (i = 0; i < 16; i++) {
565 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
566 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
567 		if (hw->mac.type >= ixgbe_mac_82599EB) {
568 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
569 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
570 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
571 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
572 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
573 		} else {
574 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
575 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
576 		}
577 	}
578 
579 	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
580 		if (hw->phy.id == 0)
581 			ixgbe_identify_phy(hw);
582 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
583 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
584 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
585 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
586 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
587 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
588 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
589 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
590 	}
591 
592 	return IXGBE_SUCCESS;
593 }
594 
595 /**
596  * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
597  * @hw: pointer to hardware structure
598  * @pba_num: stores the part number string from the EEPROM
599  * @pba_num_size: part number string buffer length
600  *
601  * Reads the part number string from the EEPROM.
602  **/
ixgbe_read_pba_string_generic(struct ixgbe_hw * hw,u8 * pba_num,u32 pba_num_size)603 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
604 				  u32 pba_num_size)
605 {
606 	s32 ret_val;
607 	u16 data;
608 	u16 pba_ptr;
609 	u16 offset;
610 	u16 length;
611 
612 	DEBUGFUNC("ixgbe_read_pba_string_generic");
613 
614 	if (pba_num == NULL) {
615 		DEBUGOUT("PBA string buffer was null\n");
616 		return IXGBE_ERR_INVALID_ARGUMENT;
617 	}
618 
619 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
620 	if (ret_val) {
621 		DEBUGOUT("NVM Read Error\n");
622 		return ret_val;
623 	}
624 
625 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
626 	if (ret_val) {
627 		DEBUGOUT("NVM Read Error\n");
628 		return ret_val;
629 	}
630 
631 	/*
632 	 * if data is not ptr guard the PBA must be in legacy format which
633 	 * means pba_ptr is actually our second data word for the PBA number
634 	 * and we can decode it into an ascii string
635 	 */
636 	if (data != IXGBE_PBANUM_PTR_GUARD) {
637 		DEBUGOUT("NVM PBA number is not stored as string\n");
638 
639 		/* we will need 11 characters to store the PBA */
640 		if (pba_num_size < 11) {
641 			DEBUGOUT("PBA string buffer too small\n");
642 			return IXGBE_ERR_NO_SPACE;
643 		}
644 
645 		/* extract hex string from data and pba_ptr */
646 		pba_num[0] = (data >> 12) & 0xF;
647 		pba_num[1] = (data >> 8) & 0xF;
648 		pba_num[2] = (data >> 4) & 0xF;
649 		pba_num[3] = data & 0xF;
650 		pba_num[4] = (pba_ptr >> 12) & 0xF;
651 		pba_num[5] = (pba_ptr >> 8) & 0xF;
652 		pba_num[6] = '-';
653 		pba_num[7] = 0;
654 		pba_num[8] = (pba_ptr >> 4) & 0xF;
655 		pba_num[9] = pba_ptr & 0xF;
656 
657 		/* put a null character on the end of our string */
658 		pba_num[10] = '\0';
659 
660 		/* switch all the data but the '-' to hex char */
661 		for (offset = 0; offset < 10; offset++) {
662 			if (pba_num[offset] < 0xA)
663 				pba_num[offset] += '0';
664 			else if (pba_num[offset] < 0x10)
665 				pba_num[offset] += 'A' - 0xA;
666 		}
667 
668 		return IXGBE_SUCCESS;
669 	}
670 
671 	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
672 	if (ret_val) {
673 		DEBUGOUT("NVM Read Error\n");
674 		return ret_val;
675 	}
676 
677 	if (length == 0xFFFF || length == 0) {
678 		DEBUGOUT("NVM PBA number section invalid length\n");
679 		return IXGBE_ERR_PBA_SECTION;
680 	}
681 
682 	/* check if pba_num buffer is big enough */
683 	if (pba_num_size  < (((u32)length * 2) - 1)) {
684 		DEBUGOUT("PBA string buffer too small\n");
685 		return IXGBE_ERR_NO_SPACE;
686 	}
687 
688 	/* trim pba length from start of string */
689 	pba_ptr++;
690 	length--;
691 
692 	for (offset = 0; offset < length; offset++) {
693 		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
694 		if (ret_val) {
695 			DEBUGOUT("NVM Read Error\n");
696 			return ret_val;
697 		}
698 		pba_num[offset * 2] = (u8)(data >> 8);
699 		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
700 	}
701 	pba_num[offset * 2] = '\0';
702 
703 	return IXGBE_SUCCESS;
704 }
705 
706 /**
707  * ixgbe_read_pba_num_generic - Reads part number from EEPROM
708  * @hw: pointer to hardware structure
709  * @pba_num: stores the part number from the EEPROM
710  *
711  * Reads the part number from the EEPROM.
712  **/
ixgbe_read_pba_num_generic(struct ixgbe_hw * hw,u32 * pba_num)713 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
714 {
715 	s32 ret_val;
716 	u16 data;
717 
718 	DEBUGFUNC("ixgbe_read_pba_num_generic");
719 
720 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
721 	if (ret_val) {
722 		DEBUGOUT("NVM Read Error\n");
723 		return ret_val;
724 	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
725 		DEBUGOUT("NVM Not supported\n");
726 		return IXGBE_NOT_IMPLEMENTED;
727 	}
728 	*pba_num = (u32)(data << 16);
729 
730 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
731 	if (ret_val) {
732 		DEBUGOUT("NVM Read Error\n");
733 		return ret_val;
734 	}
735 	*pba_num |= (u32)data;
736 
737 	return IXGBE_SUCCESS;
738 }
739 
740 /**
741  * ixgbe_read_pba_raw
742  * @hw: pointer to the HW structure
743  * @eeprom_buf: optional pointer to EEPROM image
744  * @eeprom_buf_size: size of EEPROM image in words
745  * @max_pba_block_size: PBA block size limit
746  * @pba: pointer to output PBA structure
747  *
748  * Reads PBA from EEPROM image when eeprom_buf is not NULL.
749  * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
750  *
751  **/
ixgbe_read_pba_raw(struct ixgbe_hw * hw,u16 * eeprom_buf,u32 eeprom_buf_size,u16 max_pba_block_size,struct ixgbe_pba * pba)752 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
753 		       u32 eeprom_buf_size, u16 max_pba_block_size,
754 		       struct ixgbe_pba *pba)
755 {
756 	s32 ret_val;
757 	u16 pba_block_size;
758 
759 	if (pba == NULL)
760 		return IXGBE_ERR_PARAM;
761 
762 	if (eeprom_buf == NULL) {
763 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
764 						     &pba->word[0]);
765 		if (ret_val)
766 			return ret_val;
767 	} else {
768 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
769 			pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
770 			pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
771 		} else {
772 			return IXGBE_ERR_PARAM;
773 		}
774 	}
775 
776 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
777 		if (pba->pba_block == NULL)
778 			return IXGBE_ERR_PARAM;
779 
780 		ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
781 						   eeprom_buf_size,
782 						   &pba_block_size);
783 		if (ret_val)
784 			return ret_val;
785 
786 		if (pba_block_size > max_pba_block_size)
787 			return IXGBE_ERR_PARAM;
788 
789 		if (eeprom_buf == NULL) {
790 			ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
791 							     pba_block_size,
792 							     pba->pba_block);
793 			if (ret_val)
794 				return ret_val;
795 		} else {
796 			if (eeprom_buf_size > (u32)(pba->word[1] +
797 					      pba_block_size)) {
798 				memcpy(pba->pba_block,
799 				       &eeprom_buf[pba->word[1]],
800 				       pba_block_size * sizeof(u16));
801 			} else {
802 				return IXGBE_ERR_PARAM;
803 			}
804 		}
805 	}
806 
807 	return IXGBE_SUCCESS;
808 }
809 
810 /**
811  * ixgbe_write_pba_raw
812  * @hw: pointer to the HW structure
813  * @eeprom_buf: optional pointer to EEPROM image
814  * @eeprom_buf_size: size of EEPROM image in words
815  * @pba: pointer to PBA structure
816  *
817  * Writes PBA to EEPROM image when eeprom_buf is not NULL.
818  * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
819  *
820  **/
ixgbe_write_pba_raw(struct ixgbe_hw * hw,u16 * eeprom_buf,u32 eeprom_buf_size,struct ixgbe_pba * pba)821 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
822 			u32 eeprom_buf_size, struct ixgbe_pba *pba)
823 {
824 	s32 ret_val;
825 
826 	if (pba == NULL)
827 		return IXGBE_ERR_PARAM;
828 
829 	if (eeprom_buf == NULL) {
830 		ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
831 						      &pba->word[0]);
832 		if (ret_val)
833 			return ret_val;
834 	} else {
835 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
836 			eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
837 			eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
838 		} else {
839 			return IXGBE_ERR_PARAM;
840 		}
841 	}
842 
843 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
844 		if (pba->pba_block == NULL)
845 			return IXGBE_ERR_PARAM;
846 
847 		if (eeprom_buf == NULL) {
848 			ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
849 							      pba->pba_block[0],
850 							      pba->pba_block);
851 			if (ret_val)
852 				return ret_val;
853 		} else {
854 			if (eeprom_buf_size > (u32)(pba->word[1] +
855 					      pba->pba_block[0])) {
856 				memcpy(&eeprom_buf[pba->word[1]],
857 				       pba->pba_block,
858 				       pba->pba_block[0] * sizeof(u16));
859 			} else {
860 				return IXGBE_ERR_PARAM;
861 			}
862 		}
863 	}
864 
865 	return IXGBE_SUCCESS;
866 }
867 
868 /**
869  * ixgbe_get_pba_block_size
870  * @hw: pointer to the HW structure
871  * @eeprom_buf: optional pointer to EEPROM image
872  * @eeprom_buf_size: size of EEPROM image in words
873  * @pba_data_size: pointer to output variable
874  *
875  * Returns the size of the PBA block in words. Function operates on EEPROM
876  * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
877  * EEPROM device.
878  *
879  **/
ixgbe_get_pba_block_size(struct ixgbe_hw * hw,u16 * eeprom_buf,u32 eeprom_buf_size,u16 * pba_block_size)880 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
881 			     u32 eeprom_buf_size, u16 *pba_block_size)
882 {
883 	s32 ret_val;
884 	u16 pba_word[2];
885 	u16 length;
886 
887 	DEBUGFUNC("ixgbe_get_pba_block_size");
888 
889 	if (eeprom_buf == NULL) {
890 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
891 						     &pba_word[0]);
892 		if (ret_val)
893 			return ret_val;
894 	} else {
895 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
896 			pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
897 			pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
898 		} else {
899 			return IXGBE_ERR_PARAM;
900 		}
901 	}
902 
903 	if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
904 		if (eeprom_buf == NULL) {
905 			ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
906 						      &length);
907 			if (ret_val)
908 				return ret_val;
909 		} else {
910 			if (eeprom_buf_size > pba_word[1])
911 				length = eeprom_buf[pba_word[1] + 0];
912 			else
913 				return IXGBE_ERR_PARAM;
914 		}
915 
916 		if (length == 0xFFFF || length == 0)
917 			return IXGBE_ERR_PBA_SECTION;
918 	} else {
919 		/* PBA number in legacy format, there is no PBA Block. */
920 		length = 0;
921 	}
922 
923 	if (pba_block_size != NULL)
924 		*pba_block_size = length;
925 
926 	return IXGBE_SUCCESS;
927 }
928 
929 /**
930  * ixgbe_get_mac_addr_generic - Generic get MAC address
931  * @hw: pointer to hardware structure
932  * @mac_addr: Adapter MAC address
933  *
934  * Reads the adapter's MAC address from first Receive Address Register (RAR0)
935  * A reset of the adapter must be performed prior to calling this function
936  * in order for the MAC address to have been loaded from the EEPROM into RAR0
937  **/
ixgbe_get_mac_addr_generic(struct ixgbe_hw * hw,u8 * mac_addr)938 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
939 {
940 	u32 rar_high;
941 	u32 rar_low;
942 	u16 i;
943 
944 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
945 
946 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
947 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
948 
949 	for (i = 0; i < 4; i++)
950 		mac_addr[i] = (u8)(rar_low >> (i*8));
951 
952 	for (i = 0; i < 2; i++)
953 		mac_addr[i+4] = (u8)(rar_high >> (i*8));
954 
955 	return IXGBE_SUCCESS;
956 }
957 
958 /**
959  * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
960  * @hw: pointer to hardware structure
961  * @link_status: the link status returned by the PCI config space
962  *
963  * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
964  **/
ixgbe_set_pci_config_data_generic(struct ixgbe_hw * hw,u16 link_status)965 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
966 {
967 	struct ixgbe_mac_info *mac = &hw->mac;
968 
969 	if (hw->bus.type == ixgbe_bus_type_unknown)
970 		hw->bus.type = ixgbe_bus_type_pci_express;
971 
972 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
973 	case IXGBE_PCI_LINK_WIDTH_1:
974 		hw->bus.width = ixgbe_bus_width_pcie_x1;
975 		break;
976 	case IXGBE_PCI_LINK_WIDTH_2:
977 		hw->bus.width = ixgbe_bus_width_pcie_x2;
978 		break;
979 	case IXGBE_PCI_LINK_WIDTH_4:
980 		hw->bus.width = ixgbe_bus_width_pcie_x4;
981 		break;
982 	case IXGBE_PCI_LINK_WIDTH_8:
983 		hw->bus.width = ixgbe_bus_width_pcie_x8;
984 		break;
985 	default:
986 		hw->bus.width = ixgbe_bus_width_unknown;
987 		break;
988 	}
989 
990 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
991 	case IXGBE_PCI_LINK_SPEED_2500:
992 		hw->bus.speed = ixgbe_bus_speed_2500;
993 		break;
994 	case IXGBE_PCI_LINK_SPEED_5000:
995 		hw->bus.speed = ixgbe_bus_speed_5000;
996 		break;
997 	case IXGBE_PCI_LINK_SPEED_8000:
998 		hw->bus.speed = ixgbe_bus_speed_8000;
999 		break;
1000 	default:
1001 		hw->bus.speed = ixgbe_bus_speed_unknown;
1002 		break;
1003 	}
1004 
1005 	mac->ops.set_lan_id(hw);
1006 }
1007 
1008 /**
1009  * ixgbe_get_bus_info_generic - Generic set PCI bus info
1010  * @hw: pointer to hardware structure
1011  *
1012  * Gets the PCI bus info (speed, width, type) then calls helper function to
1013  * store this data within the ixgbe_hw structure.
1014  **/
ixgbe_get_bus_info_generic(struct ixgbe_hw * hw)1015 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1016 {
1017 	u16 link_status;
1018 
1019 	DEBUGFUNC("ixgbe_get_bus_info_generic");
1020 
1021 	/* Get the negotiated link width and speed from PCI config space */
1022 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1023 
1024 	ixgbe_set_pci_config_data_generic(hw, link_status);
1025 
1026 	return IXGBE_SUCCESS;
1027 }
1028 
1029 /**
1030  * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1031  * @hw: pointer to the HW structure
1032  *
1033  * Determines the LAN function id by reading memory-mapped registers and swaps
1034  * the port value if requested, and set MAC instance for devices that share
1035  * CS4227.
1036  **/
ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw * hw)1037 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1038 {
1039 	struct ixgbe_bus_info *bus = &hw->bus;
1040 	u32 reg;
1041 	u16 ee_ctrl_4;
1042 
1043 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1044 
1045 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1046 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1047 	bus->lan_id = (u8)bus->func;
1048 
1049 	/* check for a port swap */
1050 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1051 	if (reg & IXGBE_FACTPS_LFS)
1052 		bus->func ^= 0x1;
1053 
1054 	/* Get MAC instance from EEPROM for configuring CS4227 */
1055 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1056 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1057 		bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1058 				   IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1059 	}
1060 }
1061 
1062 /**
1063  * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1064  * @hw: pointer to hardware structure
1065  *
1066  * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1067  * disables transmit and receive units. The adapter_stopped flag is used by
1068  * the shared code and drivers to determine if the adapter is in a stopped
1069  * state and should not touch the hardware.
1070  **/
ixgbe_stop_adapter_generic(struct ixgbe_hw * hw)1071 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1072 {
1073 	u32 reg_val;
1074 	u16 i;
1075 
1076 	DEBUGFUNC("ixgbe_stop_adapter_generic");
1077 
1078 	/*
1079 	 * Set the adapter_stopped flag so other driver functions stop touching
1080 	 * the hardware
1081 	 */
1082 	hw->adapter_stopped = true;
1083 
1084 	/* Disable the receive unit */
1085 	ixgbe_disable_rx(hw);
1086 
1087 	/* Clear interrupt mask to stop interrupts from being generated */
1088 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1089 
1090 	/* Clear any pending interrupts, flush previous writes */
1091 	IXGBE_READ_REG(hw, IXGBE_EICR);
1092 
1093 	/* Disable the transmit unit.  Each queue must be disabled. */
1094 	for (i = 0; i < hw->mac.max_tx_queues; i++)
1095 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1096 
1097 	/* Disable the receive unit by stopping each queue */
1098 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
1099 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1100 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
1101 		reg_val |= IXGBE_RXDCTL_SWFLSH;
1102 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1103 	}
1104 
1105 	/* flush all queues disables */
1106 	IXGBE_WRITE_FLUSH(hw);
1107 	msec_delay(2);
1108 
1109 	/*
1110 	 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1111 	 * access and verify no pending requests
1112 	 */
1113 	return ixgbe_disable_pcie_master(hw);
1114 }
1115 
1116 /**
1117  * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1118  * @hw: pointer to hardware structure
1119  *
1120  * Store the index for the link active LED. This will be used to support
1121  * blinking the LED.
1122  **/
ixgbe_init_led_link_act_generic(struct ixgbe_hw * hw)1123 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1124 {
1125 	struct ixgbe_mac_info *mac = &hw->mac;
1126 	u32 led_reg, led_mode;
1127 	u8 i;
1128 
1129 	led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1130 
1131 	/* Get LED link active from the LEDCTL register */
1132 	for (i = 0; i < 4; i++) {
1133 		led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1134 
1135 		if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1136 		     IXGBE_LED_LINK_ACTIVE) {
1137 			mac->led_link_act = i;
1138 			return IXGBE_SUCCESS;
1139 		}
1140 	}
1141 
1142 	/*
1143 	 * If LEDCTL register does not have the LED link active set, then use
1144 	 * known MAC defaults.
1145 	 */
1146 	switch (hw->mac.type) {
1147 	case ixgbe_mac_X550EM_a:
1148 	case ixgbe_mac_X550EM_x:
1149 		mac->led_link_act = 1;
1150 		break;
1151 	default:
1152 		mac->led_link_act = 2;
1153 	}
1154 	return IXGBE_SUCCESS;
1155 }
1156 
1157 /**
1158  * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1159  * @hw: pointer to hardware structure
1160  * @index: led number to turn on
1161  **/
ixgbe_led_on_generic(struct ixgbe_hw * hw,u32 index)1162 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1163 {
1164 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1165 
1166 	DEBUGFUNC("ixgbe_led_on_generic");
1167 
1168 	if (index > 3)
1169 		return IXGBE_ERR_PARAM;
1170 
1171 	/* To turn on the LED, set mode to ON. */
1172 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1173 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1174 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1175 	IXGBE_WRITE_FLUSH(hw);
1176 
1177 	return IXGBE_SUCCESS;
1178 }
1179 
1180 /**
1181  * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1182  * @hw: pointer to hardware structure
1183  * @index: led number to turn off
1184  **/
ixgbe_led_off_generic(struct ixgbe_hw * hw,u32 index)1185 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1186 {
1187 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1188 
1189 	DEBUGFUNC("ixgbe_led_off_generic");
1190 
1191 	if (index > 3)
1192 		return IXGBE_ERR_PARAM;
1193 
1194 	/* To turn off the LED, set mode to OFF. */
1195 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1196 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1197 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1198 	IXGBE_WRITE_FLUSH(hw);
1199 
1200 	return IXGBE_SUCCESS;
1201 }
1202 
1203 /**
1204  * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1205  * @hw: pointer to hardware structure
1206  *
1207  * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1208  * ixgbe_hw struct in order to set up EEPROM access.
1209  **/
ixgbe_init_eeprom_params_generic(struct ixgbe_hw * hw)1210 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1211 {
1212 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1213 	u32 eec;
1214 	u16 eeprom_size;
1215 
1216 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1217 
1218 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
1219 		eeprom->type = ixgbe_eeprom_none;
1220 		/* Set default semaphore delay to 10ms which is a well
1221 		 * tested value */
1222 		eeprom->semaphore_delay = 10;
1223 		/* Clear EEPROM page size, it will be initialized as needed */
1224 		eeprom->word_page_size = 0;
1225 
1226 		/*
1227 		 * Check for EEPROM present first.
1228 		 * If not present leave as none
1229 		 */
1230 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1231 		if (eec & IXGBE_EEC_PRES) {
1232 			eeprom->type = ixgbe_eeprom_spi;
1233 
1234 			/*
1235 			 * SPI EEPROM is assumed here.  This code would need to
1236 			 * change if a future EEPROM is not SPI.
1237 			 */
1238 			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1239 					    IXGBE_EEC_SIZE_SHIFT);
1240 			eeprom->word_size = 1 << (eeprom_size +
1241 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
1242 		}
1243 
1244 		if (eec & IXGBE_EEC_ADDR_SIZE)
1245 			eeprom->address_bits = 16;
1246 		else
1247 			eeprom->address_bits = 8;
1248 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1249 			  "%d\n", eeprom->type, eeprom->word_size,
1250 			  eeprom->address_bits);
1251 	}
1252 
1253 	return IXGBE_SUCCESS;
1254 }
1255 
1256 /**
1257  * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1258  * @hw: pointer to hardware structure
1259  * @offset: offset within the EEPROM to write
1260  * @words: number of word(s)
1261  * @data: 16 bit word(s) to write to EEPROM
1262  *
1263  * Reads 16 bit word(s) from EEPROM through bit-bang method
1264  **/
ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)1265 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1266 					       u16 words, u16 *data)
1267 {
1268 	s32 status = IXGBE_SUCCESS;
1269 	u16 i, count;
1270 
1271 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1272 
1273 	hw->eeprom.ops.init_params(hw);
1274 
1275 	if (words == 0) {
1276 		status = IXGBE_ERR_INVALID_ARGUMENT;
1277 		goto out;
1278 	}
1279 
1280 	if (offset + words > hw->eeprom.word_size) {
1281 		status = IXGBE_ERR_EEPROM;
1282 		goto out;
1283 	}
1284 
1285 	/*
1286 	 * The EEPROM page size cannot be queried from the chip. We do lazy
1287 	 * initialization. It is worth to do that when we write large buffer.
1288 	 */
1289 	if ((hw->eeprom.word_page_size == 0) &&
1290 	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1291 		ixgbe_detect_eeprom_page_size_generic(hw, offset);
1292 
1293 	/*
1294 	 * We cannot hold synchronization semaphores for too long
1295 	 * to avoid other entity starvation. However it is more efficient
1296 	 * to read in bursts than synchronizing access for each word.
1297 	 */
1298 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1299 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1300 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1301 		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1302 							    count, &data[i]);
1303 
1304 		if (status != IXGBE_SUCCESS)
1305 			break;
1306 	}
1307 
1308 out:
1309 	return status;
1310 }
1311 
1312 /**
1313  * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1314  * @hw: pointer to hardware structure
1315  * @offset: offset within the EEPROM to be written to
1316  * @words: number of word(s)
1317  * @data: 16 bit word(s) to be written to the EEPROM
1318  *
1319  * If ixgbe_eeprom_update_checksum is not called after this function, the
1320  * EEPROM will most likely contain an invalid checksum.
1321  **/
ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)1322 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1323 					      u16 words, u16 *data)
1324 {
1325 	s32 status;
1326 	u16 word;
1327 	u16 page_size;
1328 	u16 i;
1329 	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1330 
1331 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1332 
1333 	/* Prepare the EEPROM for writing  */
1334 	status = ixgbe_acquire_eeprom(hw);
1335 
1336 	if (status == IXGBE_SUCCESS) {
1337 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1338 			ixgbe_release_eeprom(hw);
1339 			status = IXGBE_ERR_EEPROM;
1340 		}
1341 	}
1342 
1343 	if (status == IXGBE_SUCCESS) {
1344 		for (i = 0; i < words; i++) {
1345 			ixgbe_standby_eeprom(hw);
1346 
1347 			/*  Send the WRITE ENABLE command (8 bit opcode )  */
1348 			ixgbe_shift_out_eeprom_bits(hw,
1349 						   IXGBE_EEPROM_WREN_OPCODE_SPI,
1350 						   IXGBE_EEPROM_OPCODE_BITS);
1351 
1352 			ixgbe_standby_eeprom(hw);
1353 
1354 			/*
1355 			 * Some SPI eeproms use the 8th address bit embedded
1356 			 * in the opcode
1357 			 */
1358 			if ((hw->eeprom.address_bits == 8) &&
1359 			    ((offset + i) >= 128))
1360 				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1361 
1362 			/* Send the Write command (8-bit opcode + addr) */
1363 			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1364 						    IXGBE_EEPROM_OPCODE_BITS);
1365 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1366 						    hw->eeprom.address_bits);
1367 
1368 			page_size = hw->eeprom.word_page_size;
1369 
1370 			/* Send the data in burst via SPI*/
1371 			do {
1372 				word = data[i];
1373 				word = (word >> 8) | (word << 8);
1374 				ixgbe_shift_out_eeprom_bits(hw, word, 16);
1375 
1376 				if (page_size == 0)
1377 					break;
1378 
1379 				/* do not wrap around page */
1380 				if (((offset + i) & (page_size - 1)) ==
1381 				    (page_size - 1))
1382 					break;
1383 			} while (++i < words);
1384 
1385 			ixgbe_standby_eeprom(hw);
1386 			msec_delay(10);
1387 		}
1388 		/* Done with writing - release the EEPROM */
1389 		ixgbe_release_eeprom(hw);
1390 	}
1391 
1392 	return status;
1393 }
1394 
1395 /**
1396  * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1397  * @hw: pointer to hardware structure
1398  * @offset: offset within the EEPROM to be written to
1399  * @data: 16 bit word to be written to the EEPROM
1400  *
1401  * If ixgbe_eeprom_update_checksum is not called after this function, the
1402  * EEPROM will most likely contain an invalid checksum.
1403  **/
ixgbe_write_eeprom_generic(struct ixgbe_hw * hw,u16 offset,u16 data)1404 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1405 {
1406 	s32 status;
1407 
1408 	DEBUGFUNC("ixgbe_write_eeprom_generic");
1409 
1410 	hw->eeprom.ops.init_params(hw);
1411 
1412 	if (offset >= hw->eeprom.word_size) {
1413 		status = IXGBE_ERR_EEPROM;
1414 		goto out;
1415 	}
1416 
1417 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1418 
1419 out:
1420 	return status;
1421 }
1422 
1423 /**
1424  * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1425  * @hw: pointer to hardware structure
1426  * @offset: offset within the EEPROM to be read
1427  * @data: read 16 bit words(s) from EEPROM
1428  * @words: number of word(s)
1429  *
1430  * Reads 16 bit word(s) from EEPROM through bit-bang method
1431  **/
ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)1432 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1433 					      u16 words, u16 *data)
1434 {
1435 	s32 status = IXGBE_SUCCESS;
1436 	u16 i, count;
1437 
1438 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1439 
1440 	hw->eeprom.ops.init_params(hw);
1441 
1442 	if (words == 0) {
1443 		status = IXGBE_ERR_INVALID_ARGUMENT;
1444 		goto out;
1445 	}
1446 
1447 	if (offset + words > hw->eeprom.word_size) {
1448 		status = IXGBE_ERR_EEPROM;
1449 		goto out;
1450 	}
1451 
1452 	/*
1453 	 * We cannot hold synchronization semaphores for too long
1454 	 * to avoid other entity starvation. However it is more efficient
1455 	 * to read in bursts than synchronizing access for each word.
1456 	 */
1457 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1458 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1459 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1460 
1461 		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1462 							   count, &data[i]);
1463 
1464 		if (status != IXGBE_SUCCESS)
1465 			break;
1466 	}
1467 
1468 out:
1469 	return status;
1470 }
1471 
1472 /**
1473  * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1474  * @hw: pointer to hardware structure
1475  * @offset: offset within the EEPROM to be read
1476  * @words: number of word(s)
1477  * @data: read 16 bit word(s) from EEPROM
1478  *
1479  * Reads 16 bit word(s) from EEPROM through bit-bang method
1480  **/
ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)1481 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1482 					     u16 words, u16 *data)
1483 {
1484 	s32 status;
1485 	u16 word_in;
1486 	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1487 	u16 i;
1488 
1489 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1490 
1491 	/* Prepare the EEPROM for reading  */
1492 	status = ixgbe_acquire_eeprom(hw);
1493 
1494 	if (status == IXGBE_SUCCESS) {
1495 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1496 			ixgbe_release_eeprom(hw);
1497 			status = IXGBE_ERR_EEPROM;
1498 		}
1499 	}
1500 
1501 	if (status == IXGBE_SUCCESS) {
1502 		for (i = 0; i < words; i++) {
1503 			ixgbe_standby_eeprom(hw);
1504 			/*
1505 			 * Some SPI eeproms use the 8th address bit embedded
1506 			 * in the opcode
1507 			 */
1508 			if ((hw->eeprom.address_bits == 8) &&
1509 			    ((offset + i) >= 128))
1510 				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1511 
1512 			/* Send the READ command (opcode + addr) */
1513 			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1514 						    IXGBE_EEPROM_OPCODE_BITS);
1515 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1516 						    hw->eeprom.address_bits);
1517 
1518 			/* Read the data. */
1519 			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1520 			data[i] = (word_in >> 8) | (word_in << 8);
1521 		}
1522 
1523 		/* End this read operation */
1524 		ixgbe_release_eeprom(hw);
1525 	}
1526 
1527 	return status;
1528 }
1529 
1530 /**
1531  * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1532  * @hw: pointer to hardware structure
1533  * @offset: offset within the EEPROM to be read
1534  * @data: read 16 bit value from EEPROM
1535  *
1536  * Reads 16 bit value from EEPROM through bit-bang method
1537  **/
ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw * hw,u16 offset,u16 * data)1538 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1539 				       u16 *data)
1540 {
1541 	s32 status;
1542 
1543 	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1544 
1545 	hw->eeprom.ops.init_params(hw);
1546 
1547 	if (offset >= hw->eeprom.word_size) {
1548 		status = IXGBE_ERR_EEPROM;
1549 		goto out;
1550 	}
1551 
1552 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1553 
1554 out:
1555 	return status;
1556 }
1557 
1558 /**
1559  * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1560  * @hw: pointer to hardware structure
1561  * @offset: offset of word in the EEPROM to read
1562  * @words: number of word(s)
1563  * @data: 16 bit word(s) from the EEPROM
1564  *
1565  * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1566  **/
ixgbe_read_eerd_buffer_generic(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)1567 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1568 				   u16 words, u16 *data)
1569 {
1570 	u32 eerd;
1571 	s32 status = IXGBE_SUCCESS;
1572 	u32 i;
1573 
1574 	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1575 
1576 	hw->eeprom.ops.init_params(hw);
1577 
1578 	if (words == 0) {
1579 		status = IXGBE_ERR_INVALID_ARGUMENT;
1580 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1581 		goto out;
1582 	}
1583 
1584 	if (offset >= hw->eeprom.word_size) {
1585 		status = IXGBE_ERR_EEPROM;
1586 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1587 		goto out;
1588 	}
1589 
1590 	for (i = 0; i < words; i++) {
1591 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1592 		       IXGBE_EEPROM_RW_REG_START;
1593 
1594 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1595 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1596 
1597 		if (status == IXGBE_SUCCESS) {
1598 			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1599 				   IXGBE_EEPROM_RW_REG_DATA);
1600 		} else {
1601 			DEBUGOUT("Eeprom read timed out\n");
1602 			goto out;
1603 		}
1604 	}
1605 out:
1606 	return status;
1607 }
1608 
1609 /**
1610  * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1611  * @hw: pointer to hardware structure
1612  * @offset: offset within the EEPROM to be used as a scratch pad
1613  *
1614  * Discover EEPROM page size by writing marching data at given offset.
1615  * This function is called only when we are writing a new large buffer
1616  * at given offset so the data would be overwritten anyway.
1617  **/
ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw * hw,u16 offset)1618 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1619 						 u16 offset)
1620 {
1621 	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1622 	s32 status = IXGBE_SUCCESS;
1623 	u16 i;
1624 
1625 	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1626 
1627 	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1628 		data[i] = i;
1629 
1630 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1631 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1632 					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1633 	hw->eeprom.word_page_size = 0;
1634 	if (status != IXGBE_SUCCESS)
1635 		goto out;
1636 
1637 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1638 	if (status != IXGBE_SUCCESS)
1639 		goto out;
1640 
1641 	/*
1642 	 * When writing in burst more than the actual page size
1643 	 * EEPROM address wraps around current page.
1644 	 */
1645 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1646 
1647 	DEBUGOUT1("Detected EEPROM page size = %d words.",
1648 		  hw->eeprom.word_page_size);
1649 out:
1650 	return status;
1651 }
1652 
1653 /**
1654  * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1655  * @hw: pointer to hardware structure
1656  * @offset: offset of  word in the EEPROM to read
1657  * @data: word read from the EEPROM
1658  *
1659  * Reads a 16 bit word from the EEPROM using the EERD register.
1660  **/
ixgbe_read_eerd_generic(struct ixgbe_hw * hw,u16 offset,u16 * data)1661 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1662 {
1663 	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1664 }
1665 
1666 /**
1667  * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1668  * @hw: pointer to hardware structure
1669  * @offset: offset of  word in the EEPROM to write
1670  * @words: number of word(s)
1671  * @data: word(s) write to the EEPROM
1672  *
1673  * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1674  **/
ixgbe_write_eewr_buffer_generic(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)1675 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1676 				    u16 words, u16 *data)
1677 {
1678 	u32 eewr;
1679 	s32 status = IXGBE_SUCCESS;
1680 	u16 i;
1681 
1682 	DEBUGFUNC("ixgbe_write_eewr_generic");
1683 
1684 	hw->eeprom.ops.init_params(hw);
1685 
1686 	if (words == 0) {
1687 		status = IXGBE_ERR_INVALID_ARGUMENT;
1688 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1689 		goto out;
1690 	}
1691 
1692 	if (offset >= hw->eeprom.word_size) {
1693 		status = IXGBE_ERR_EEPROM;
1694 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1695 		goto out;
1696 	}
1697 
1698 	for (i = 0; i < words; i++) {
1699 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1700 			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1701 			IXGBE_EEPROM_RW_REG_START;
1702 
1703 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1704 		if (status != IXGBE_SUCCESS) {
1705 			DEBUGOUT("Eeprom write EEWR timed out\n");
1706 			goto out;
1707 		}
1708 
1709 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1710 
1711 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1712 		if (status != IXGBE_SUCCESS) {
1713 			DEBUGOUT("Eeprom write EEWR timed out\n");
1714 			goto out;
1715 		}
1716 	}
1717 
1718 out:
1719 	return status;
1720 }
1721 
1722 /**
1723  * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1724  * @hw: pointer to hardware structure
1725  * @offset: offset of  word in the EEPROM to write
1726  * @data: word write to the EEPROM
1727  *
1728  * Write a 16 bit word to the EEPROM using the EEWR register.
1729  **/
ixgbe_write_eewr_generic(struct ixgbe_hw * hw,u16 offset,u16 data)1730 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1731 {
1732 	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1733 }
1734 
1735 /**
1736  * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1737  * @hw: pointer to hardware structure
1738  * @ee_reg: EEPROM flag for polling
1739  *
1740  * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1741  * read or write is done respectively.
1742  **/
ixgbe_poll_eerd_eewr_done(struct ixgbe_hw * hw,u32 ee_reg)1743 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1744 {
1745 	u32 i;
1746 	u32 reg;
1747 	s32 status = IXGBE_ERR_EEPROM;
1748 
1749 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1750 
1751 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1752 		if (ee_reg == IXGBE_NVM_POLL_READ)
1753 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1754 		else
1755 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1756 
1757 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1758 			status = IXGBE_SUCCESS;
1759 			break;
1760 		}
1761 		usec_delay(5);
1762 	}
1763 
1764 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1765 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1766 			     "EEPROM read/write done polling timed out");
1767 
1768 	return status;
1769 }
1770 
1771 /**
1772  * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1773  * @hw: pointer to hardware structure
1774  *
1775  * Prepares EEPROM for access using bit-bang method. This function should
1776  * be called before issuing a command to the EEPROM.
1777  **/
ixgbe_acquire_eeprom(struct ixgbe_hw * hw)1778 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1779 {
1780 	s32 status = IXGBE_SUCCESS;
1781 	u32 eec;
1782 	u32 i;
1783 
1784 	DEBUGFUNC("ixgbe_acquire_eeprom");
1785 
1786 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1787 	    != IXGBE_SUCCESS)
1788 		status = IXGBE_ERR_SWFW_SYNC;
1789 
1790 	if (status == IXGBE_SUCCESS) {
1791 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1792 
1793 		/* Request EEPROM Access */
1794 		eec |= IXGBE_EEC_REQ;
1795 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1796 
1797 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1798 			eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1799 			if (eec & IXGBE_EEC_GNT)
1800 				break;
1801 			usec_delay(5);
1802 		}
1803 
1804 		/* Release if grant not acquired */
1805 		if (!(eec & IXGBE_EEC_GNT)) {
1806 			eec &= ~IXGBE_EEC_REQ;
1807 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1808 			DEBUGOUT("Could not acquire EEPROM grant\n");
1809 
1810 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1811 			status = IXGBE_ERR_EEPROM;
1812 		}
1813 
1814 		/* Setup EEPROM for Read/Write */
1815 		if (status == IXGBE_SUCCESS) {
1816 			/* Clear CS and SK */
1817 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1818 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1819 			IXGBE_WRITE_FLUSH(hw);
1820 			usec_delay(1);
1821 		}
1822 	}
1823 	return status;
1824 }
1825 
1826 /**
1827  * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1828  * @hw: pointer to hardware structure
1829  *
1830  * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1831  **/
ixgbe_get_eeprom_semaphore(struct ixgbe_hw * hw)1832 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1833 {
1834 	s32 status = IXGBE_ERR_EEPROM;
1835 	u32 timeout = 2000;
1836 	u32 i;
1837 	u32 swsm;
1838 
1839 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1840 
1841 
1842 	/* Get SMBI software semaphore between device drivers first */
1843 	for (i = 0; i < timeout; i++) {
1844 		/*
1845 		 * If the SMBI bit is 0 when we read it, then the bit will be
1846 		 * set and we have the semaphore
1847 		 */
1848 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1849 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1850 			status = IXGBE_SUCCESS;
1851 			break;
1852 		}
1853 		usec_delay(50);
1854 	}
1855 
1856 	if (i == timeout) {
1857 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1858 			 "not granted.\n");
1859 		/*
1860 		 * this release is particularly important because our attempts
1861 		 * above to get the semaphore may have succeeded, and if there
1862 		 * was a timeout, we should unconditionally clear the semaphore
1863 		 * bits to free the driver to make progress
1864 		 */
1865 		ixgbe_release_eeprom_semaphore(hw);
1866 
1867 		usec_delay(50);
1868 		/*
1869 		 * one last try
1870 		 * If the SMBI bit is 0 when we read it, then the bit will be
1871 		 * set and we have the semaphore
1872 		 */
1873 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1874 		if (!(swsm & IXGBE_SWSM_SMBI))
1875 			status = IXGBE_SUCCESS;
1876 	}
1877 
1878 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1879 	if (status == IXGBE_SUCCESS) {
1880 		for (i = 0; i < timeout; i++) {
1881 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1882 
1883 			/* Set the SW EEPROM semaphore bit to request access */
1884 			swsm |= IXGBE_SWSM_SWESMBI;
1885 			IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1886 
1887 			/*
1888 			 * If we set the bit successfully then we got the
1889 			 * semaphore.
1890 			 */
1891 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1892 			if (swsm & IXGBE_SWSM_SWESMBI)
1893 				break;
1894 
1895 			usec_delay(50);
1896 		}
1897 
1898 		/*
1899 		 * Release semaphores and return error if SW EEPROM semaphore
1900 		 * was not granted because we don't have access to the EEPROM
1901 		 */
1902 		if (i >= timeout) {
1903 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
1904 			    "SWESMBI Software EEPROM semaphore not granted.\n");
1905 			ixgbe_release_eeprom_semaphore(hw);
1906 			status = IXGBE_ERR_EEPROM;
1907 		}
1908 	} else {
1909 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1910 			     "Software semaphore SMBI between device drivers "
1911 			     "not granted.\n");
1912 	}
1913 
1914 	return status;
1915 }
1916 
1917 /**
1918  * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1919  * @hw: pointer to hardware structure
1920  *
1921  * This function clears hardware semaphore bits.
1922  **/
ixgbe_release_eeprom_semaphore(struct ixgbe_hw * hw)1923 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1924 {
1925 	u32 swsm;
1926 
1927 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1928 
1929 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1930 
1931 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1932 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1933 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1934 	IXGBE_WRITE_FLUSH(hw);
1935 }
1936 
1937 /**
1938  * ixgbe_ready_eeprom - Polls for EEPROM ready
1939  * @hw: pointer to hardware structure
1940  **/
ixgbe_ready_eeprom(struct ixgbe_hw * hw)1941 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1942 {
1943 	s32 status = IXGBE_SUCCESS;
1944 	u16 i;
1945 	u8 spi_stat_reg;
1946 
1947 	DEBUGFUNC("ixgbe_ready_eeprom");
1948 
1949 	/*
1950 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1951 	 * EEPROM will signal that the command has been completed by clearing
1952 	 * bit 0 of the internal status register.  If it's not cleared within
1953 	 * 5 milliseconds, then error out.
1954 	 */
1955 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1956 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1957 					    IXGBE_EEPROM_OPCODE_BITS);
1958 		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1959 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1960 			break;
1961 
1962 		usec_delay(5);
1963 		ixgbe_standby_eeprom(hw);
1964 	};
1965 
1966 	/*
1967 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1968 	 * devices (and only 0-5mSec on 5V devices)
1969 	 */
1970 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1971 		DEBUGOUT("SPI EEPROM Status error\n");
1972 		status = IXGBE_ERR_EEPROM;
1973 	}
1974 
1975 	return status;
1976 }
1977 
1978 /**
1979  * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1980  * @hw: pointer to hardware structure
1981  **/
ixgbe_standby_eeprom(struct ixgbe_hw * hw)1982 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1983 {
1984 	u32 eec;
1985 
1986 	DEBUGFUNC("ixgbe_standby_eeprom");
1987 
1988 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1989 
1990 	/* Toggle CS to flush commands */
1991 	eec |= IXGBE_EEC_CS;
1992 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1993 	IXGBE_WRITE_FLUSH(hw);
1994 	usec_delay(1);
1995 	eec &= ~IXGBE_EEC_CS;
1996 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1997 	IXGBE_WRITE_FLUSH(hw);
1998 	usec_delay(1);
1999 }
2000 
2001 /**
2002  * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2003  * @hw: pointer to hardware structure
2004  * @data: data to send to the EEPROM
2005  * @count: number of bits to shift out
2006  **/
ixgbe_shift_out_eeprom_bits(struct ixgbe_hw * hw,u16 data,u16 count)2007 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2008 					u16 count)
2009 {
2010 	u32 eec;
2011 	u32 mask;
2012 	u32 i;
2013 
2014 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2015 
2016 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2017 
2018 	/*
2019 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
2020 	 * one bit at a time.  Determine the starting bit based on count
2021 	 */
2022 	mask = 0x01 << (count - 1);
2023 
2024 	for (i = 0; i < count; i++) {
2025 		/*
2026 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2027 		 * "1", and then raising and then lowering the clock (the SK
2028 		 * bit controls the clock input to the EEPROM).  A "0" is
2029 		 * shifted out to the EEPROM by setting "DI" to "0" and then
2030 		 * raising and then lowering the clock.
2031 		 */
2032 		if (data & mask)
2033 			eec |= IXGBE_EEC_DI;
2034 		else
2035 			eec &= ~IXGBE_EEC_DI;
2036 
2037 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2038 		IXGBE_WRITE_FLUSH(hw);
2039 
2040 		usec_delay(1);
2041 
2042 		ixgbe_raise_eeprom_clk(hw, &eec);
2043 		ixgbe_lower_eeprom_clk(hw, &eec);
2044 
2045 		/*
2046 		 * Shift mask to signify next bit of data to shift in to the
2047 		 * EEPROM
2048 		 */
2049 		mask = mask >> 1;
2050 	};
2051 
2052 	/* We leave the "DI" bit set to "0" when we leave this routine. */
2053 	eec &= ~IXGBE_EEC_DI;
2054 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2055 	IXGBE_WRITE_FLUSH(hw);
2056 }
2057 
2058 /**
2059  * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2060  * @hw: pointer to hardware structure
2061  * @count: number of bits to shift
2062  **/
ixgbe_shift_in_eeprom_bits(struct ixgbe_hw * hw,u16 count)2063 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2064 {
2065 	u32 eec;
2066 	u32 i;
2067 	u16 data = 0;
2068 
2069 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2070 
2071 	/*
2072 	 * In order to read a register from the EEPROM, we need to shift
2073 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2074 	 * the clock input to the EEPROM (setting the SK bit), and then reading
2075 	 * the value of the "DO" bit.  During this "shifting in" process the
2076 	 * "DI" bit should always be clear.
2077 	 */
2078 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2079 
2080 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2081 
2082 	for (i = 0; i < count; i++) {
2083 		data = data << 1;
2084 		ixgbe_raise_eeprom_clk(hw, &eec);
2085 
2086 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2087 
2088 		eec &= ~(IXGBE_EEC_DI);
2089 		if (eec & IXGBE_EEC_DO)
2090 			data |= 1;
2091 
2092 		ixgbe_lower_eeprom_clk(hw, &eec);
2093 	}
2094 
2095 	return data;
2096 }
2097 
2098 /**
2099  * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2100  * @hw: pointer to hardware structure
2101  * @eec: EEC register's current value
2102  **/
ixgbe_raise_eeprom_clk(struct ixgbe_hw * hw,u32 * eec)2103 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2104 {
2105 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
2106 
2107 	/*
2108 	 * Raise the clock input to the EEPROM
2109 	 * (setting the SK bit), then delay
2110 	 */
2111 	*eec = *eec | IXGBE_EEC_SK;
2112 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2113 	IXGBE_WRITE_FLUSH(hw);
2114 	usec_delay(1);
2115 }
2116 
2117 /**
2118  * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2119  * @hw: pointer to hardware structure
2120  * @eec: EEC's current value
2121  **/
ixgbe_lower_eeprom_clk(struct ixgbe_hw * hw,u32 * eec)2122 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2123 {
2124 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
2125 
2126 	/*
2127 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
2128 	 * delay
2129 	 */
2130 	*eec = *eec & ~IXGBE_EEC_SK;
2131 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2132 	IXGBE_WRITE_FLUSH(hw);
2133 	usec_delay(1);
2134 }
2135 
2136 /**
2137  * ixgbe_release_eeprom - Release EEPROM, release semaphores
2138  * @hw: pointer to hardware structure
2139  **/
ixgbe_release_eeprom(struct ixgbe_hw * hw)2140 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2141 {
2142 	u32 eec;
2143 
2144 	DEBUGFUNC("ixgbe_release_eeprom");
2145 
2146 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2147 
2148 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
2149 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2150 
2151 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2152 	IXGBE_WRITE_FLUSH(hw);
2153 
2154 	usec_delay(1);
2155 
2156 	/* Stop requesting EEPROM access */
2157 	eec &= ~IXGBE_EEC_REQ;
2158 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2159 
2160 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2161 
2162 	/* Delay before attempt to obtain semaphore again to allow FW access */
2163 	msec_delay(hw->eeprom.semaphore_delay);
2164 }
2165 
2166 /**
2167  * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2168  * @hw: pointer to hardware structure
2169  *
2170  * Returns a negative error code on error, or the 16-bit checksum
2171  **/
ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw * hw)2172 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2173 {
2174 	u16 i;
2175 	u16 j;
2176 	u16 checksum = 0;
2177 	u16 length = 0;
2178 	u16 pointer = 0;
2179 	u16 word = 0;
2180 
2181 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2182 
2183 	/* Include 0x0-0x3F in the checksum */
2184 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2185 		if (hw->eeprom.ops.read(hw, i, &word)) {
2186 			DEBUGOUT("EEPROM read failed\n");
2187 			return IXGBE_ERR_EEPROM;
2188 		}
2189 		checksum += word;
2190 	}
2191 
2192 	/* Include all data from pointers except for the fw pointer */
2193 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2194 		if (hw->eeprom.ops.read(hw, i, &pointer)) {
2195 			DEBUGOUT("EEPROM read failed\n");
2196 			return IXGBE_ERR_EEPROM;
2197 		}
2198 
2199 		/* If the pointer seems invalid */
2200 		if (pointer == 0xFFFF || pointer == 0)
2201 			continue;
2202 
2203 		if (hw->eeprom.ops.read(hw, pointer, &length)) {
2204 			DEBUGOUT("EEPROM read failed\n");
2205 			return IXGBE_ERR_EEPROM;
2206 		}
2207 
2208 		if (length == 0xFFFF || length == 0)
2209 			continue;
2210 
2211 		for (j = pointer + 1; j <= pointer + length; j++) {
2212 			if (hw->eeprom.ops.read(hw, j, &word)) {
2213 				DEBUGOUT("EEPROM read failed\n");
2214 				return IXGBE_ERR_EEPROM;
2215 			}
2216 			checksum += word;
2217 		}
2218 	}
2219 
2220 	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2221 
2222 	return (s32)checksum;
2223 }
2224 
2225 /**
2226  * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2227  * @hw: pointer to hardware structure
2228  * @checksum_val: calculated checksum
2229  *
2230  * Performs checksum calculation and validates the EEPROM checksum.  If the
2231  * caller does not need checksum_val, the value can be NULL.
2232  **/
ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw * hw,u16 * checksum_val)2233 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2234 					   u16 *checksum_val)
2235 {
2236 	s32 status;
2237 	u16 checksum;
2238 	u16 read_checksum = 0;
2239 
2240 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2241 
2242 	/* Read the first word from the EEPROM. If this times out or fails, do
2243 	 * not continue or we could be in for a very long wait while every
2244 	 * EEPROM read fails
2245 	 */
2246 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2247 	if (status) {
2248 		DEBUGOUT("EEPROM read failed\n");
2249 		return status;
2250 	}
2251 
2252 	status = hw->eeprom.ops.calc_checksum(hw);
2253 	if (status < 0)
2254 		return status;
2255 
2256 	checksum = (u16)(status & 0xffff);
2257 
2258 	status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2259 	if (status) {
2260 		DEBUGOUT("EEPROM read failed\n");
2261 		return status;
2262 	}
2263 
2264 	/* Verify read checksum from EEPROM is the same as
2265 	 * calculated checksum
2266 	 */
2267 	if (read_checksum != checksum)
2268 		status = IXGBE_ERR_EEPROM_CHECKSUM;
2269 
2270 	/* If the user cares, return the calculated checksum */
2271 	if (checksum_val)
2272 		*checksum_val = checksum;
2273 
2274 	return status;
2275 }
2276 
2277 /**
2278  * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2279  * @hw: pointer to hardware structure
2280  **/
ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw * hw)2281 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2282 {
2283 	s32 status;
2284 	u16 checksum;
2285 
2286 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2287 
2288 	/* Read the first word from the EEPROM. If this times out or fails, do
2289 	 * not continue or we could be in for a very long wait while every
2290 	 * EEPROM read fails
2291 	 */
2292 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2293 	if (status) {
2294 		DEBUGOUT("EEPROM read failed\n");
2295 		return status;
2296 	}
2297 
2298 	status = hw->eeprom.ops.calc_checksum(hw);
2299 	if (status < 0)
2300 		return status;
2301 
2302 	checksum = (u16)(status & 0xffff);
2303 
2304 	status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2305 
2306 	return status;
2307 }
2308 
2309 /**
2310  * ixgbe_validate_mac_addr - Validate MAC address
2311  * @mac_addr: pointer to MAC address.
2312  *
2313  * Tests a MAC address to ensure it is a valid Individual Address.
2314  **/
ixgbe_validate_mac_addr(u8 * mac_addr)2315 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2316 {
2317 	s32 status = IXGBE_SUCCESS;
2318 
2319 	DEBUGFUNC("ixgbe_validate_mac_addr");
2320 
2321 	/* Make sure it is not a multicast address */
2322 	if (IXGBE_IS_MULTICAST(mac_addr)) {
2323 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2324 	/* Not a broadcast address */
2325 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
2326 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2327 	/* Reject the zero address */
2328 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2329 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2330 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2331 	}
2332 	return status;
2333 }
2334 
2335 /**
2336  * ixgbe_set_rar_generic - Set Rx address register
2337  * @hw: pointer to hardware structure
2338  * @index: Receive address register to write
2339  * @addr: Address to put into receive address register
2340  * @vmdq: VMDq "set" or "pool" index
2341  * @enable_addr: set flag that address is active
2342  *
2343  * Puts an ethernet address into a receive address register.
2344  **/
ixgbe_set_rar_generic(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq,u32 enable_addr)2345 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2346 			  u32 enable_addr)
2347 {
2348 	u32 rar_low, rar_high;
2349 	u32 rar_entries = hw->mac.num_rar_entries;
2350 
2351 	DEBUGFUNC("ixgbe_set_rar_generic");
2352 
2353 	/* Make sure we are using a valid rar index range */
2354 	if (index >= rar_entries) {
2355 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2356 			     "RAR index %d is out of range.\n", index);
2357 		return IXGBE_ERR_INVALID_ARGUMENT;
2358 	}
2359 
2360 	/* setup VMDq pool selection before this RAR gets enabled */
2361 	hw->mac.ops.set_vmdq(hw, index, vmdq);
2362 
2363 	/*
2364 	 * HW expects these in little endian so we reverse the byte
2365 	 * order from network order (big endian) to little endian
2366 	 */
2367 	rar_low = ((u32)addr[0] |
2368 		   ((u32)addr[1] << 8) |
2369 		   ((u32)addr[2] << 16) |
2370 		   ((u32)addr[3] << 24));
2371 	/*
2372 	 * Some parts put the VMDq setting in the extra RAH bits,
2373 	 * so save everything except the lower 16 bits that hold part
2374 	 * of the address and the address valid bit.
2375 	 */
2376 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2377 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2378 	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2379 
2380 	if (enable_addr != 0)
2381 		rar_high |= IXGBE_RAH_AV;
2382 
2383 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2384 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2385 
2386 	return IXGBE_SUCCESS;
2387 }
2388 
2389 /**
2390  * ixgbe_clear_rar_generic - Remove Rx address register
2391  * @hw: pointer to hardware structure
2392  * @index: Receive address register to write
2393  *
2394  * Clears an ethernet address from a receive address register.
2395  **/
ixgbe_clear_rar_generic(struct ixgbe_hw * hw,u32 index)2396 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2397 {
2398 	u32 rar_high;
2399 	u32 rar_entries = hw->mac.num_rar_entries;
2400 
2401 	DEBUGFUNC("ixgbe_clear_rar_generic");
2402 
2403 	/* Make sure we are using a valid rar index range */
2404 	if (index >= rar_entries) {
2405 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2406 			     "RAR index %d is out of range.\n", index);
2407 		return IXGBE_ERR_INVALID_ARGUMENT;
2408 	}
2409 
2410 	/*
2411 	 * Some parts put the VMDq setting in the extra RAH bits,
2412 	 * so save everything except the lower 16 bits that hold part
2413 	 * of the address and the address valid bit.
2414 	 */
2415 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2416 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2417 
2418 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2419 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2420 
2421 	/* clear VMDq pool/queue selection for this RAR */
2422 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2423 
2424 	return IXGBE_SUCCESS;
2425 }
2426 
2427 /**
2428  * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2429  * @hw: pointer to hardware structure
2430  *
2431  * Places the MAC address in receive address register 0 and clears the rest
2432  * of the receive address registers. Clears the multicast table. Assumes
2433  * the receiver is in reset when the routine is called.
2434  **/
ixgbe_init_rx_addrs_generic(struct ixgbe_hw * hw)2435 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2436 {
2437 	u32 i;
2438 	u32 rar_entries = hw->mac.num_rar_entries;
2439 
2440 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2441 
2442 	/*
2443 	 * If the current mac address is valid, assume it is a software override
2444 	 * to the permanent address.
2445 	 * Otherwise, use the permanent address from the eeprom.
2446 	 */
2447 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2448 	    IXGBE_ERR_INVALID_MAC_ADDR) {
2449 		/* Get the MAC address from the RAR0 for later reference */
2450 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2451 
2452 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2453 			  hw->mac.addr[0], hw->mac.addr[1],
2454 			  hw->mac.addr[2]);
2455 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2456 			  hw->mac.addr[4], hw->mac.addr[5]);
2457 	} else {
2458 		/* Setup the receive address. */
2459 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2460 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2461 			  hw->mac.addr[0], hw->mac.addr[1],
2462 			  hw->mac.addr[2]);
2463 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2464 			  hw->mac.addr[4], hw->mac.addr[5]);
2465 
2466 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2467 	}
2468 
2469 	/* clear VMDq pool/queue selection for RAR 0 */
2470 	hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2471 
2472 	hw->addr_ctrl.overflow_promisc = 0;
2473 
2474 	hw->addr_ctrl.rar_used_count = 1;
2475 
2476 	/* Zero out the other receive addresses. */
2477 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2478 	for (i = 1; i < rar_entries; i++) {
2479 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2480 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2481 	}
2482 
2483 	/* Clear the MTA */
2484 	hw->addr_ctrl.mta_in_use = 0;
2485 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2486 
2487 	DEBUGOUT(" Clearing MTA\n");
2488 	for (i = 0; i < hw->mac.mcft_size; i++)
2489 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2490 
2491 	ixgbe_init_uta_tables(hw);
2492 
2493 	return IXGBE_SUCCESS;
2494 }
2495 
2496 /**
2497  * ixgbe_add_uc_addr - Adds a secondary unicast address.
2498  * @hw: pointer to hardware structure
2499  * @addr: new address
2500  * @vmdq: VMDq "set" or "pool" index
2501  *
2502  * Adds it to unused receive address register or goes into promiscuous mode.
2503  **/
ixgbe_add_uc_addr(struct ixgbe_hw * hw,u8 * addr,u32 vmdq)2504 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2505 {
2506 	u32 rar_entries = hw->mac.num_rar_entries;
2507 	u32 rar;
2508 
2509 	DEBUGFUNC("ixgbe_add_uc_addr");
2510 
2511 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2512 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2513 
2514 	/*
2515 	 * Place this address in the RAR if there is room,
2516 	 * else put the controller into promiscuous mode
2517 	 */
2518 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
2519 		rar = hw->addr_ctrl.rar_used_count;
2520 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2521 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2522 		hw->addr_ctrl.rar_used_count++;
2523 	} else {
2524 		hw->addr_ctrl.overflow_promisc++;
2525 	}
2526 
2527 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2528 }
2529 
2530 /**
2531  * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2532  * @hw: pointer to hardware structure
2533  * @addr_list: the list of new addresses
2534  * @addr_count: number of addresses
2535  * @next: iterator function to walk the address list
2536  *
2537  * The given list replaces any existing list.  Clears the secondary addrs from
2538  * receive address registers.  Uses unused receive address registers for the
2539  * first secondary addresses, and falls back to promiscuous mode as needed.
2540  *
2541  * Drivers using secondary unicast addresses must set user_set_promisc when
2542  * manually putting the device into promiscuous mode.
2543  **/
ixgbe_update_uc_addr_list_generic(struct ixgbe_hw * hw,u8 * addr_list,u32 addr_count,ixgbe_mc_addr_itr next)2544 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2545 				      u32 addr_count, ixgbe_mc_addr_itr next)
2546 {
2547 	u8 *addr;
2548 	u32 i;
2549 	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2550 	u32 uc_addr_in_use;
2551 	u32 fctrl;
2552 	u32 vmdq;
2553 
2554 	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2555 
2556 	/*
2557 	 * Clear accounting of old secondary address list,
2558 	 * don't count RAR[0]
2559 	 */
2560 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2561 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2562 	hw->addr_ctrl.overflow_promisc = 0;
2563 
2564 	/* Zero out the other receive addresses */
2565 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2566 	for (i = 0; i < uc_addr_in_use; i++) {
2567 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2568 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2569 	}
2570 
2571 	/* Add the new addresses */
2572 	for (i = 0; i < addr_count; i++) {
2573 		DEBUGOUT(" Adding the secondary addresses:\n");
2574 		addr = next(hw, &addr_list, &vmdq);
2575 		ixgbe_add_uc_addr(hw, addr, vmdq);
2576 	}
2577 
2578 	if (hw->addr_ctrl.overflow_promisc) {
2579 		/* enable promisc if not already in overflow or set by user */
2580 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2581 			DEBUGOUT(" Entering address overflow promisc mode\n");
2582 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2583 			fctrl |= IXGBE_FCTRL_UPE;
2584 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2585 		}
2586 	} else {
2587 		/* only disable if set by overflow, not by user */
2588 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2589 			DEBUGOUT(" Leaving address overflow promisc mode\n");
2590 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2591 			fctrl &= ~IXGBE_FCTRL_UPE;
2592 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2593 		}
2594 	}
2595 
2596 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2597 	return IXGBE_SUCCESS;
2598 }
2599 
2600 /**
2601  * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2602  * @hw: pointer to hardware structure
2603  * @mc_addr: the multicast address
2604  *
2605  * Extracts the 12 bits, from a multicast address, to determine which
2606  * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2607  * incoming rx multicast addresses, to determine the bit-vector to check in
2608  * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2609  * by the MO field of the MCSTCTRL. The MO field is set during initialization
2610  * to mc_filter_type.
2611  **/
ixgbe_mta_vector(struct ixgbe_hw * hw,u8 * mc_addr)2612 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2613 {
2614 	u32 vector = 0;
2615 
2616 	DEBUGFUNC("ixgbe_mta_vector");
2617 
2618 	switch (hw->mac.mc_filter_type) {
2619 	case 0:   /* use bits [47:36] of the address */
2620 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2621 		break;
2622 	case 1:   /* use bits [46:35] of the address */
2623 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2624 		break;
2625 	case 2:   /* use bits [45:34] of the address */
2626 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2627 		break;
2628 	case 3:   /* use bits [43:32] of the address */
2629 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2630 		break;
2631 	default:  /* Invalid mc_filter_type */
2632 		DEBUGOUT("MC filter type param set incorrectly\n");
2633 		ASSERT(0);
2634 		break;
2635 	}
2636 
2637 	/* vector can only be 12-bits or boundary will be exceeded */
2638 	vector &= 0xFFF;
2639 	return vector;
2640 }
2641 
2642 /**
2643  * ixgbe_set_mta - Set bit-vector in multicast table
2644  * @hw: pointer to hardware structure
2645  * @mc_addr: Multicast address
2646  *
2647  * Sets the bit-vector in the multicast table.
2648  **/
ixgbe_set_mta(struct ixgbe_hw * hw,u8 * mc_addr)2649 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2650 {
2651 	u32 vector;
2652 	u32 vector_bit;
2653 	u32 vector_reg;
2654 
2655 	DEBUGFUNC("ixgbe_set_mta");
2656 
2657 	hw->addr_ctrl.mta_in_use++;
2658 
2659 	vector = ixgbe_mta_vector(hw, mc_addr);
2660 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2661 
2662 	/*
2663 	 * The MTA is a register array of 128 32-bit registers. It is treated
2664 	 * like an array of 4096 bits.  We want to set bit
2665 	 * BitArray[vector_value]. So we figure out what register the bit is
2666 	 * in, read it, OR in the new bit, then write back the new value.  The
2667 	 * register is determined by the upper 7 bits of the vector value and
2668 	 * the bit within that register are determined by the lower 5 bits of
2669 	 * the value.
2670 	 */
2671 	vector_reg = (vector >> 5) & 0x7F;
2672 	vector_bit = vector & 0x1F;
2673 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2674 }
2675 
2676 /**
2677  * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2678  * @hw: pointer to hardware structure
2679  * @mc_addr_list: the list of new multicast addresses
2680  * @mc_addr_count: number of addresses
2681  * @next: iterator function to walk the multicast address list
2682  * @clear: flag, when set clears the table beforehand
2683  *
2684  * When the clear flag is set, the given list replaces any existing list.
2685  * Hashes the given addresses into the multicast table.
2686  **/
ixgbe_update_mc_addr_list_generic(struct ixgbe_hw * hw,u8 * mc_addr_list,u32 mc_addr_count,ixgbe_mc_addr_itr next,bool clear)2687 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2688 				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
2689 				      bool clear)
2690 {
2691 	u32 i;
2692 	u32 vmdq;
2693 
2694 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2695 
2696 	/*
2697 	 * Set the new number of MC addresses that we are being requested to
2698 	 * use.
2699 	 */
2700 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2701 	hw->addr_ctrl.mta_in_use = 0;
2702 
2703 	/* Clear mta_shadow */
2704 	if (clear) {
2705 		DEBUGOUT(" Clearing MTA\n");
2706 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2707 	}
2708 
2709 	/* Update mta_shadow */
2710 	for (i = 0; i < mc_addr_count; i++) {
2711 		DEBUGOUT(" Adding the multicast addresses:\n");
2712 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2713 	}
2714 
2715 	/* Enable mta */
2716 	for (i = 0; i < hw->mac.mcft_size; i++)
2717 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2718 				      hw->mac.mta_shadow[i]);
2719 
2720 	if (hw->addr_ctrl.mta_in_use > 0)
2721 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2722 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2723 
2724 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2725 	return IXGBE_SUCCESS;
2726 }
2727 
2728 /**
2729  * ixgbe_enable_mc_generic - Enable multicast address in RAR
2730  * @hw: pointer to hardware structure
2731  *
2732  * Enables multicast address in RAR and the use of the multicast hash table.
2733  **/
ixgbe_enable_mc_generic(struct ixgbe_hw * hw)2734 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2735 {
2736 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2737 
2738 	DEBUGFUNC("ixgbe_enable_mc_generic");
2739 
2740 	if (a->mta_in_use > 0)
2741 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2742 				hw->mac.mc_filter_type);
2743 
2744 	return IXGBE_SUCCESS;
2745 }
2746 
2747 /**
2748  * ixgbe_disable_mc_generic - Disable multicast address in RAR
2749  * @hw: pointer to hardware structure
2750  *
2751  * Disables multicast address in RAR and the use of the multicast hash table.
2752  **/
ixgbe_disable_mc_generic(struct ixgbe_hw * hw)2753 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2754 {
2755 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2756 
2757 	DEBUGFUNC("ixgbe_disable_mc_generic");
2758 
2759 	if (a->mta_in_use > 0)
2760 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2761 
2762 	return IXGBE_SUCCESS;
2763 }
2764 
2765 /**
2766  * ixgbe_fc_enable_generic - Enable flow control
2767  * @hw: pointer to hardware structure
2768  *
2769  * Enable flow control according to the current settings.
2770  **/
ixgbe_fc_enable_generic(struct ixgbe_hw * hw)2771 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2772 {
2773 	s32 ret_val = IXGBE_SUCCESS;
2774 	u32 mflcn_reg, fccfg_reg;
2775 	u32 reg;
2776 	u32 fcrtl, fcrth;
2777 	int i;
2778 
2779 	DEBUGFUNC("ixgbe_fc_enable_generic");
2780 
2781 	/* Validate the water mark configuration */
2782 	if (!hw->fc.pause_time) {
2783 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2784 		goto out;
2785 	}
2786 
2787 	/* Low water mark of zero causes XOFF floods */
2788 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2789 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2790 		    hw->fc.high_water[i]) {
2791 			if (!hw->fc.low_water[i] ||
2792 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2793 				DEBUGOUT("Invalid water mark configuration\n");
2794 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2795 				goto out;
2796 			}
2797 		}
2798 	}
2799 
2800 	/* Negotiate the fc mode to use */
2801 	hw->mac.ops.fc_autoneg(hw);
2802 
2803 	/* Disable any previous flow control settings */
2804 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2805 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2806 
2807 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2808 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2809 
2810 	/*
2811 	 * The possible values of fc.current_mode are:
2812 	 * 0: Flow control is completely disabled
2813 	 * 1: Rx flow control is enabled (we can receive pause frames,
2814 	 *    but not send pause frames).
2815 	 * 2: Tx flow control is enabled (we can send pause frames but
2816 	 *    we do not support receiving pause frames).
2817 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2818 	 * other: Invalid.
2819 	 */
2820 	switch (hw->fc.current_mode) {
2821 	case ixgbe_fc_none:
2822 		/*
2823 		 * Flow control is disabled by software override or autoneg.
2824 		 * The code below will actually disable it in the HW.
2825 		 */
2826 		break;
2827 	case ixgbe_fc_rx_pause:
2828 		/*
2829 		 * Rx Flow control is enabled and Tx Flow control is
2830 		 * disabled by software override. Since there really
2831 		 * isn't a way to advertise that we are capable of RX
2832 		 * Pause ONLY, we will advertise that we support both
2833 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2834 		 * disable the adapter's ability to send PAUSE frames.
2835 		 */
2836 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2837 		break;
2838 	case ixgbe_fc_tx_pause:
2839 		/*
2840 		 * Tx Flow control is enabled, and Rx Flow control is
2841 		 * disabled by software override.
2842 		 */
2843 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2844 		break;
2845 	case ixgbe_fc_full:
2846 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2847 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2848 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2849 		break;
2850 	default:
2851 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2852 			     "Flow control param set incorrectly\n");
2853 		ret_val = IXGBE_ERR_CONFIG;
2854 		goto out;
2855 		break;
2856 	}
2857 
2858 	/* Set 802.3x based flow control settings. */
2859 	mflcn_reg |= IXGBE_MFLCN_DPF;
2860 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2861 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2862 
2863 
2864 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2865 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2866 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2867 		    hw->fc.high_water[i]) {
2868 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2869 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2870 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2871 		} else {
2872 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2873 			/*
2874 			 * In order to prevent Tx hangs when the internal Tx
2875 			 * switch is enabled we must set the high water mark
2876 			 * to the Rx packet buffer size - 24KB.  This allows
2877 			 * the Tx switch to function even under heavy Rx
2878 			 * workloads.
2879 			 */
2880 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2881 		}
2882 
2883 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2884 	}
2885 
2886 	/* Configure pause time (2 TCs per register) */
2887 	reg = hw->fc.pause_time * 0x00010001;
2888 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2889 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2890 
2891 	/* Configure flow control refresh threshold value */
2892 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2893 
2894 out:
2895 	return ret_val;
2896 }
2897 
2898 /**
2899  * ixgbe_negotiate_fc - Negotiate flow control
2900  * @hw: pointer to hardware structure
2901  * @adv_reg: flow control advertised settings
2902  * @lp_reg: link partner's flow control settings
2903  * @adv_sym: symmetric pause bit in advertisement
2904  * @adv_asm: asymmetric pause bit in advertisement
2905  * @lp_sym: symmetric pause bit in link partner advertisement
2906  * @lp_asm: asymmetric pause bit in link partner advertisement
2907  *
2908  * Find the intersection between advertised settings and link partner's
2909  * advertised settings
2910  **/
ixgbe_negotiate_fc(struct ixgbe_hw * hw,u32 adv_reg,u32 lp_reg,u32 adv_sym,u32 adv_asm,u32 lp_sym,u32 lp_asm)2911 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2912 		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2913 {
2914 	if ((!(adv_reg)) ||  (!(lp_reg))) {
2915 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2916 			     "Local or link partner's advertised flow control "
2917 			     "settings are NULL. Local: %x, link partner: %x\n",
2918 			     adv_reg, lp_reg);
2919 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2920 	}
2921 
2922 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2923 		/*
2924 		 * Now we need to check if the user selected Rx ONLY
2925 		 * of pause frames.  In this case, we had to advertise
2926 		 * FULL flow control because we could not advertise RX
2927 		 * ONLY. Hence, we must now check to see if we need to
2928 		 * turn OFF the TRANSMISSION of PAUSE frames.
2929 		 */
2930 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2931 			hw->fc.current_mode = ixgbe_fc_full;
2932 			DEBUGOUT("Flow Control = FULL.\n");
2933 		} else {
2934 			hw->fc.current_mode = ixgbe_fc_rx_pause;
2935 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2936 		}
2937 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2938 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2939 		hw->fc.current_mode = ixgbe_fc_tx_pause;
2940 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2941 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2942 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2943 		hw->fc.current_mode = ixgbe_fc_rx_pause;
2944 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2945 	} else {
2946 		hw->fc.current_mode = ixgbe_fc_none;
2947 		DEBUGOUT("Flow Control = NONE.\n");
2948 	}
2949 	return IXGBE_SUCCESS;
2950 }
2951 
2952 /**
2953  * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2954  * @hw: pointer to hardware structure
2955  *
2956  * Enable flow control according on 1 gig fiber.
2957  **/
ixgbe_fc_autoneg_fiber(struct ixgbe_hw * hw)2958 STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2959 {
2960 	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2961 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2962 
2963 	/*
2964 	 * On multispeed fiber at 1g, bail out if
2965 	 * - link is up but AN did not complete, or if
2966 	 * - link is up and AN completed but timed out
2967 	 */
2968 
2969 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2970 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2971 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2972 		DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
2973 		goto out;
2974 	}
2975 
2976 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2977 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2978 
2979 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2980 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2981 				      IXGBE_PCS1GANA_ASM_PAUSE,
2982 				      IXGBE_PCS1GANA_SYM_PAUSE,
2983 				      IXGBE_PCS1GANA_ASM_PAUSE);
2984 
2985 out:
2986 	return ret_val;
2987 }
2988 
2989 /**
2990  * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2991  * @hw: pointer to hardware structure
2992  *
2993  * Enable flow control according to IEEE clause 37.
2994  **/
ixgbe_fc_autoneg_backplane(struct ixgbe_hw * hw)2995 STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2996 {
2997 	u32 links2, anlp1_reg, autoc_reg, links;
2998 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2999 
3000 	/*
3001 	 * On backplane, bail out if
3002 	 * - backplane autoneg was not completed, or if
3003 	 * - we are 82599 and link partner is not AN enabled
3004 	 */
3005 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3006 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3007 		DEBUGOUT("Auto-Negotiation did not complete\n");
3008 		goto out;
3009 	}
3010 
3011 	if (hw->mac.type == ixgbe_mac_82599EB) {
3012 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3013 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3014 			DEBUGOUT("Link partner is not AN enabled\n");
3015 			goto out;
3016 		}
3017 	}
3018 	/*
3019 	 * Read the 10g AN autoc and LP ability registers and resolve
3020 	 * local flow control settings accordingly
3021 	 */
3022 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3023 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3024 
3025 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3026 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3027 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3028 
3029 out:
3030 	return ret_val;
3031 }
3032 
3033 /**
3034  * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3035  * @hw: pointer to hardware structure
3036  *
3037  * Enable flow control according to IEEE clause 37.
3038  **/
ixgbe_fc_autoneg_copper(struct ixgbe_hw * hw)3039 STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3040 {
3041 	u16 technology_ability_reg = 0;
3042 	u16 lp_technology_ability_reg = 0;
3043 
3044 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3045 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3046 			     &technology_ability_reg);
3047 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3048 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3049 			     &lp_technology_ability_reg);
3050 
3051 	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3052 				  (u32)lp_technology_ability_reg,
3053 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3054 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3055 }
3056 
3057 /**
3058  * ixgbe_fc_autoneg - Configure flow control
3059  * @hw: pointer to hardware structure
3060  *
3061  * Compares our advertised flow control capabilities to those advertised by
3062  * our link partner, and determines the proper flow control mode to use.
3063  **/
ixgbe_fc_autoneg(struct ixgbe_hw * hw)3064 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3065 {
3066 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3067 	ixgbe_link_speed speed;
3068 	bool link_up;
3069 
3070 	DEBUGFUNC("ixgbe_fc_autoneg");
3071 
3072 	/*
3073 	 * AN should have completed when the cable was plugged in.
3074 	 * Look for reasons to bail out.  Bail out if:
3075 	 * - FC autoneg is disabled, or if
3076 	 * - link is not up.
3077 	 */
3078 	if (hw->fc.disable_fc_autoneg) {
3079 		/* TODO: This should be just an informative log */
3080 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
3081 			      "Flow control autoneg is disabled");
3082 		goto out;
3083 	}
3084 
3085 	hw->mac.ops.check_link(hw, &speed, &link_up, false);
3086 	if (!link_up) {
3087 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3088 		goto out;
3089 	}
3090 
3091 	switch (hw->phy.media_type) {
3092 	/* Autoneg flow control on fiber adapters */
3093 	case ixgbe_media_type_fiber_qsfp:
3094 	case ixgbe_media_type_fiber:
3095 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3096 			ret_val = ixgbe_fc_autoneg_fiber(hw);
3097 		break;
3098 
3099 	/* Autoneg flow control on backplane adapters */
3100 	case ixgbe_media_type_backplane:
3101 		ret_val = ixgbe_fc_autoneg_backplane(hw);
3102 		break;
3103 
3104 	/* Autoneg flow control on copper adapters */
3105 	case ixgbe_media_type_copper:
3106 		if (ixgbe_device_supports_autoneg_fc(hw))
3107 			ret_val = ixgbe_fc_autoneg_copper(hw);
3108 		break;
3109 
3110 	default:
3111 		break;
3112 	}
3113 
3114 out:
3115 	if (ret_val == IXGBE_SUCCESS) {
3116 		hw->fc.fc_was_autonegged = true;
3117 	} else {
3118 		hw->fc.fc_was_autonegged = false;
3119 		hw->fc.current_mode = hw->fc.requested_mode;
3120 	}
3121 }
3122 
3123 /*
3124  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3125  * @hw: pointer to hardware structure
3126  *
3127  * System-wide timeout range is encoded in PCIe Device Control2 register.
3128  *
3129  * Add 10% to specified maximum and return the number of times to poll for
3130  * completion timeout, in units of 100 microsec.  Never return less than
3131  * 800 = 80 millisec.
3132  */
ixgbe_pcie_timeout_poll(struct ixgbe_hw * hw)3133 STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3134 {
3135 	s16 devctl2;
3136 	u32 pollcnt;
3137 
3138 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3139 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3140 
3141 	switch (devctl2) {
3142 	case IXGBE_PCIDEVCTRL2_65_130ms:
3143 		pollcnt = 1300;		/* 130 millisec */
3144 		break;
3145 	case IXGBE_PCIDEVCTRL2_260_520ms:
3146 		pollcnt = 5200;		/* 520 millisec */
3147 		break;
3148 	case IXGBE_PCIDEVCTRL2_1_2s:
3149 		pollcnt = 20000;	/* 2 sec */
3150 		break;
3151 	case IXGBE_PCIDEVCTRL2_4_8s:
3152 		pollcnt = 80000;	/* 8 sec */
3153 		break;
3154 	case IXGBE_PCIDEVCTRL2_17_34s:
3155 		pollcnt = 34000;	/* 34 sec */
3156 		break;
3157 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
3158 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
3159 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
3160 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
3161 	default:
3162 		pollcnt = 800;		/* 80 millisec minimum */
3163 		break;
3164 	}
3165 
3166 	/* add 10% to spec maximum */
3167 	return (pollcnt * 11) / 10;
3168 }
3169 
3170 /**
3171  * ixgbe_disable_pcie_master - Disable PCI-express master access
3172  * @hw: pointer to hardware structure
3173  *
3174  * Disables PCI-Express master access and verifies there are no pending
3175  * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3176  * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3177  * is returned signifying master requests disabled.
3178  **/
ixgbe_disable_pcie_master(struct ixgbe_hw * hw)3179 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3180 {
3181 	s32 status = IXGBE_SUCCESS;
3182 	u32 i, poll;
3183 	u16 value;
3184 
3185 	DEBUGFUNC("ixgbe_disable_pcie_master");
3186 
3187 	/* Always set this bit to ensure any future transactions are blocked */
3188 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3189 
3190 	/* Exit if master requests are blocked */
3191 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3192 	    IXGBE_REMOVED(hw->hw_addr))
3193 		goto out;
3194 
3195 	/* Poll for master request bit to clear */
3196 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3197 		usec_delay(100);
3198 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3199 			goto out;
3200 	}
3201 
3202 	/*
3203 	 * Two consecutive resets are required via CTRL.RST per datasheet
3204 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
3205 	 * of this need.  The first reset prevents new master requests from
3206 	 * being issued by our device.  We then must wait 1usec or more for any
3207 	 * remaining completions from the PCIe bus to trickle in, and then reset
3208 	 * again to clear out any effects they may have had on our device.
3209 	 */
3210 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3211 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3212 
3213 	if (hw->mac.type >= ixgbe_mac_X550)
3214 		goto out;
3215 
3216 	/*
3217 	 * Before proceeding, make sure that the PCIe block does not have
3218 	 * transactions pending.
3219 	 */
3220 	poll = ixgbe_pcie_timeout_poll(hw);
3221 	for (i = 0; i < poll; i++) {
3222 		usec_delay(100);
3223 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3224 		if (IXGBE_REMOVED(hw->hw_addr))
3225 			goto out;
3226 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3227 			goto out;
3228 	}
3229 
3230 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
3231 		     "PCIe transaction pending bit also did not clear.\n");
3232 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3233 
3234 out:
3235 	return status;
3236 }
3237 
3238 /**
3239  * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3240  * @hw: pointer to hardware structure
3241  * @mask: Mask to specify which semaphore to acquire
3242  *
3243  * Acquires the SWFW semaphore through the GSSR register for the specified
3244  * function (CSR, PHY0, PHY1, EEPROM, Flash)
3245  **/
ixgbe_acquire_swfw_sync(struct ixgbe_hw * hw,u32 mask)3246 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3247 {
3248 	u32 gssr = 0;
3249 	u32 swmask = mask;
3250 	u32 fwmask = mask << 5;
3251 	u32 timeout = 200;
3252 	u32 i;
3253 
3254 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
3255 
3256 	for (i = 0; i < timeout; i++) {
3257 		/*
3258 		 * SW NVM semaphore bit is used for access to all
3259 		 * SW_FW_SYNC bits (not just NVM)
3260 		 */
3261 		if (ixgbe_get_eeprom_semaphore(hw))
3262 			return IXGBE_ERR_SWFW_SYNC;
3263 
3264 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3265 		if (!(gssr & (fwmask | swmask))) {
3266 			gssr |= swmask;
3267 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3268 			ixgbe_release_eeprom_semaphore(hw);
3269 			return IXGBE_SUCCESS;
3270 		} else {
3271 			/* Resource is currently in use by FW or SW */
3272 			ixgbe_release_eeprom_semaphore(hw);
3273 			msec_delay(5);
3274 		}
3275 	}
3276 
3277 	/* If time expired clear the bits holding the lock and retry */
3278 	if (gssr & (fwmask | swmask))
3279 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3280 
3281 	msec_delay(5);
3282 	return IXGBE_ERR_SWFW_SYNC;
3283 }
3284 
3285 /**
3286  * ixgbe_release_swfw_sync - Release SWFW semaphore
3287  * @hw: pointer to hardware structure
3288  * @mask: Mask to specify which semaphore to release
3289  *
3290  * Releases the SWFW semaphore through the GSSR register for the specified
3291  * function (CSR, PHY0, PHY1, EEPROM, Flash)
3292  **/
ixgbe_release_swfw_sync(struct ixgbe_hw * hw,u32 mask)3293 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3294 {
3295 	u32 gssr;
3296 	u32 swmask = mask;
3297 
3298 	DEBUGFUNC("ixgbe_release_swfw_sync");
3299 
3300 	ixgbe_get_eeprom_semaphore(hw);
3301 
3302 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3303 	gssr &= ~swmask;
3304 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3305 
3306 	ixgbe_release_eeprom_semaphore(hw);
3307 }
3308 
3309 /**
3310  * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3311  * @hw: pointer to hardware structure
3312  *
3313  * Stops the receive data path and waits for the HW to internally empty
3314  * the Rx security block
3315  **/
ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw * hw)3316 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3317 {
3318 #define IXGBE_MAX_SECRX_POLL 4000
3319 
3320 	int i;
3321 	int secrxreg;
3322 
3323 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3324 
3325 
3326 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3327 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3328 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3329 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3330 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3331 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3332 			break;
3333 		else
3334 			/* Use interrupt-safe sleep just in case */
3335 			usec_delay(10);
3336 	}
3337 
3338 	/* For informational purposes only */
3339 	if (i >= IXGBE_MAX_SECRX_POLL)
3340 		DEBUGOUT("Rx unit being enabled before security "
3341 			 "path fully disabled.  Continuing with init.\n");
3342 
3343 	return IXGBE_SUCCESS;
3344 }
3345 
3346 /**
3347  * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3348  * @hw: pointer to hardware structure
3349  * @locked: bool to indicate whether the SW/FW lock was taken
3350  * @reg_val: Value we read from AUTOC
3351  *
3352  * The default case requires no protection so just to the register read.
3353  */
prot_autoc_read_generic(struct ixgbe_hw * hw,bool * locked,u32 * reg_val)3354 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3355 {
3356 	*locked = false;
3357 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3358 	return IXGBE_SUCCESS;
3359 }
3360 
3361 /**
3362  * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3363  * @hw: pointer to hardware structure
3364  * @reg_val: value to write to AUTOC
3365  * @locked: bool to indicate whether the SW/FW lock was already taken by
3366  *          previous read.
3367  *
3368  * The default case requires no protection so just to the register write.
3369  */
prot_autoc_write_generic(struct ixgbe_hw * hw,u32 reg_val,bool locked)3370 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3371 {
3372 	UNREFERENCED_1PARAMETER(locked);
3373 
3374 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3375 	return IXGBE_SUCCESS;
3376 }
3377 
3378 /**
3379  * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3380  * @hw: pointer to hardware structure
3381  *
3382  * Enables the receive data path.
3383  **/
ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw * hw)3384 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3385 {
3386 	u32 secrxreg;
3387 
3388 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3389 
3390 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3391 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3392 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3393 	IXGBE_WRITE_FLUSH(hw);
3394 
3395 	return IXGBE_SUCCESS;
3396 }
3397 
3398 /**
3399  * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3400  * @hw: pointer to hardware structure
3401  * @regval: register value to write to RXCTRL
3402  *
3403  * Enables the Rx DMA unit
3404  **/
ixgbe_enable_rx_dma_generic(struct ixgbe_hw * hw,u32 regval)3405 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3406 {
3407 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3408 
3409 	if (regval & IXGBE_RXCTRL_RXEN)
3410 		ixgbe_enable_rx(hw);
3411 	else
3412 		ixgbe_disable_rx(hw);
3413 
3414 	return IXGBE_SUCCESS;
3415 }
3416 
3417 /**
3418  * ixgbe_blink_led_start_generic - Blink LED based on index.
3419  * @hw: pointer to hardware structure
3420  * @index: led number to blink
3421  **/
ixgbe_blink_led_start_generic(struct ixgbe_hw * hw,u32 index)3422 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3423 {
3424 	ixgbe_link_speed speed = 0;
3425 	bool link_up = 0;
3426 	u32 autoc_reg = 0;
3427 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3428 	s32 ret_val = IXGBE_SUCCESS;
3429 	bool locked = false;
3430 
3431 	DEBUGFUNC("ixgbe_blink_led_start_generic");
3432 
3433 	if (index > 3)
3434 		return IXGBE_ERR_PARAM;
3435 
3436 	/*
3437 	 * Link must be up to auto-blink the LEDs;
3438 	 * Force it if link is down.
3439 	 */
3440 	hw->mac.ops.check_link(hw, &speed, &link_up, false);
3441 
3442 	if (!link_up) {
3443 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3444 		if (ret_val != IXGBE_SUCCESS)
3445 			goto out;
3446 
3447 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3448 		autoc_reg |= IXGBE_AUTOC_FLU;
3449 
3450 		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3451 		if (ret_val != IXGBE_SUCCESS)
3452 			goto out;
3453 
3454 		IXGBE_WRITE_FLUSH(hw);
3455 		msec_delay(10);
3456 	}
3457 
3458 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3459 	led_reg |= IXGBE_LED_BLINK(index);
3460 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3461 	IXGBE_WRITE_FLUSH(hw);
3462 
3463 out:
3464 	return ret_val;
3465 }
3466 
3467 /**
3468  * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3469  * @hw: pointer to hardware structure
3470  * @index: led number to stop blinking
3471  **/
ixgbe_blink_led_stop_generic(struct ixgbe_hw * hw,u32 index)3472 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3473 {
3474 	u32 autoc_reg = 0;
3475 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3476 	s32 ret_val = IXGBE_SUCCESS;
3477 	bool locked = false;
3478 
3479 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
3480 
3481 	if (index > 3)
3482 		return IXGBE_ERR_PARAM;
3483 
3484 
3485 	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3486 	if (ret_val != IXGBE_SUCCESS)
3487 		goto out;
3488 
3489 	autoc_reg &= ~IXGBE_AUTOC_FLU;
3490 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3491 
3492 	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3493 	if (ret_val != IXGBE_SUCCESS)
3494 		goto out;
3495 
3496 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3497 	led_reg &= ~IXGBE_LED_BLINK(index);
3498 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3499 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3500 	IXGBE_WRITE_FLUSH(hw);
3501 
3502 out:
3503 	return ret_val;
3504 }
3505 
3506 /**
3507  * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3508  * @hw: pointer to hardware structure
3509  * @san_mac_offset: SAN MAC address offset
3510  *
3511  * This function will read the EEPROM location for the SAN MAC address
3512  * pointer, and returns the value at that location.  This is used in both
3513  * get and set mac_addr routines.
3514  **/
ixgbe_get_san_mac_addr_offset(struct ixgbe_hw * hw,u16 * san_mac_offset)3515 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3516 					 u16 *san_mac_offset)
3517 {
3518 	s32 ret_val;
3519 
3520 	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3521 
3522 	/*
3523 	 * First read the EEPROM pointer to see if the MAC addresses are
3524 	 * available.
3525 	 */
3526 	ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3527 				      san_mac_offset);
3528 	if (ret_val) {
3529 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3530 			      "eeprom at offset %d failed",
3531 			      IXGBE_SAN_MAC_ADDR_PTR);
3532 	}
3533 
3534 	return ret_val;
3535 }
3536 
3537 /**
3538  * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3539  * @hw: pointer to hardware structure
3540  * @san_mac_addr: SAN MAC address
3541  *
3542  * Reads the SAN MAC address from the EEPROM, if it's available.  This is
3543  * per-port, so set_lan_id() must be called before reading the addresses.
3544  * set_lan_id() is called by identify_sfp(), but this cannot be relied
3545  * upon for non-SFP connections, so we must call it here.
3546  **/
ixgbe_get_san_mac_addr_generic(struct ixgbe_hw * hw,u8 * san_mac_addr)3547 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3548 {
3549 	u16 san_mac_data, san_mac_offset;
3550 	u8 i;
3551 	s32 ret_val;
3552 
3553 	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3554 
3555 	/*
3556 	 * First read the EEPROM pointer to see if the MAC addresses are
3557 	 * available.  If they're not, no point in calling set_lan_id() here.
3558 	 */
3559 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3560 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3561 		goto san_mac_addr_out;
3562 
3563 	/* make sure we know which port we need to program */
3564 	hw->mac.ops.set_lan_id(hw);
3565 	/* apply the port offset to the address offset */
3566 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3567 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3568 	for (i = 0; i < 3; i++) {
3569 		ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3570 					      &san_mac_data);
3571 		if (ret_val) {
3572 			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3573 				      "eeprom read at offset %d failed",
3574 				      san_mac_offset);
3575 			goto san_mac_addr_out;
3576 		}
3577 		san_mac_addr[i * 2] = (u8)(san_mac_data);
3578 		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3579 		san_mac_offset++;
3580 	}
3581 	return IXGBE_SUCCESS;
3582 
3583 san_mac_addr_out:
3584 	/*
3585 	 * No addresses available in this EEPROM.  It's not an
3586 	 * error though, so just wipe the local address and return.
3587 	 */
3588 	for (i = 0; i < 6; i++)
3589 		san_mac_addr[i] = 0xFF;
3590 	return IXGBE_SUCCESS;
3591 }
3592 
3593 /**
3594  * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3595  * @hw: pointer to hardware structure
3596  * @san_mac_addr: SAN MAC address
3597  *
3598  * Write a SAN MAC address to the EEPROM.
3599  **/
ixgbe_set_san_mac_addr_generic(struct ixgbe_hw * hw,u8 * san_mac_addr)3600 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3601 {
3602 	s32 ret_val;
3603 	u16 san_mac_data, san_mac_offset;
3604 	u8 i;
3605 
3606 	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3607 
3608 	/* Look for SAN mac address pointer.  If not defined, return */
3609 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3610 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3611 		return IXGBE_ERR_NO_SAN_ADDR_PTR;
3612 
3613 	/* Make sure we know which port we need to write */
3614 	hw->mac.ops.set_lan_id(hw);
3615 	/* Apply the port offset to the address offset */
3616 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3617 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3618 
3619 	for (i = 0; i < 3; i++) {
3620 		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3621 		san_mac_data |= (u16)(san_mac_addr[i * 2]);
3622 		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3623 		san_mac_offset++;
3624 	}
3625 
3626 	return IXGBE_SUCCESS;
3627 }
3628 
3629 /**
3630  * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3631  * @hw: pointer to hardware structure
3632  *
3633  * Read PCIe configuration space, and get the MSI-X vector count from
3634  * the capabilities table.
3635  **/
ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw * hw)3636 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3637 {
3638 	u16 msix_count = 1;
3639 	u16 max_msix_count;
3640 	u16 pcie_offset;
3641 
3642 	switch (hw->mac.type) {
3643 	case ixgbe_mac_82598EB:
3644 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3645 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3646 		break;
3647 	case ixgbe_mac_82599EB:
3648 	case ixgbe_mac_X540:
3649 	case ixgbe_mac_X550:
3650 	case ixgbe_mac_X550EM_x:
3651 	case ixgbe_mac_X550EM_a:
3652 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3653 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3654 		break;
3655 	default:
3656 		return msix_count;
3657 	}
3658 
3659 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3660 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3661 	if (IXGBE_REMOVED(hw->hw_addr))
3662 		msix_count = 0;
3663 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3664 
3665 	/* MSI-X count is zero-based in HW */
3666 	msix_count++;
3667 
3668 	if (msix_count > max_msix_count)
3669 		msix_count = max_msix_count;
3670 
3671 	return msix_count;
3672 }
3673 
3674 /**
3675  * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3676  * @hw: pointer to hardware structure
3677  * @addr: Address to put into receive address register
3678  * @vmdq: VMDq pool to assign
3679  *
3680  * Puts an ethernet address into a receive address register, or
3681  * finds the rar that it is already in; adds to the pool list
3682  **/
ixgbe_insert_mac_addr_generic(struct ixgbe_hw * hw,u8 * addr,u32 vmdq)3683 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3684 {
3685 	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3686 	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3687 	u32 rar;
3688 	u32 rar_low, rar_high;
3689 	u32 addr_low, addr_high;
3690 
3691 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3692 
3693 	/* swap bytes for HW little endian */
3694 	addr_low  = addr[0] | (addr[1] << 8)
3695 			    | (addr[2] << 16)
3696 			    | (addr[3] << 24);
3697 	addr_high = addr[4] | (addr[5] << 8);
3698 
3699 	/*
3700 	 * Either find the mac_id in rar or find the first empty space.
3701 	 * rar_highwater points to just after the highest currently used
3702 	 * rar in order to shorten the search.  It grows when we add a new
3703 	 * rar to the top.
3704 	 */
3705 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3706 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3707 
3708 		if (((IXGBE_RAH_AV & rar_high) == 0)
3709 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3710 			first_empty_rar = rar;
3711 		} else if ((rar_high & 0xFFFF) == addr_high) {
3712 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3713 			if (rar_low == addr_low)
3714 				break;    /* found it already in the rars */
3715 		}
3716 	}
3717 
3718 	if (rar < hw->mac.rar_highwater) {
3719 		/* already there so just add to the pool bits */
3720 		ixgbe_set_vmdq(hw, rar, vmdq);
3721 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3722 		/* stick it into first empty RAR slot we found */
3723 		rar = first_empty_rar;
3724 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3725 	} else if (rar == hw->mac.rar_highwater) {
3726 		/* add it to the top of the list and inc the highwater mark */
3727 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3728 		hw->mac.rar_highwater++;
3729 	} else if (rar >= hw->mac.num_rar_entries) {
3730 		return IXGBE_ERR_INVALID_MAC_ADDR;
3731 	}
3732 
3733 	/*
3734 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
3735 	 * remains cleared to be sure default pool packets will get delivered
3736 	 */
3737 	if (rar == 0)
3738 		ixgbe_clear_vmdq(hw, rar, 0);
3739 
3740 	return rar;
3741 }
3742 
3743 /**
3744  * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3745  * @hw: pointer to hardware struct
3746  * @rar: receive address register index to disassociate
3747  * @vmdq: VMDq pool index to remove from the rar
3748  **/
ixgbe_clear_vmdq_generic(struct ixgbe_hw * hw,u32 rar,u32 vmdq)3749 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3750 {
3751 	u32 mpsar_lo, mpsar_hi;
3752 	u32 rar_entries = hw->mac.num_rar_entries;
3753 
3754 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
3755 
3756 	/* Make sure we are using a valid rar index range */
3757 	if (rar >= rar_entries) {
3758 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3759 			     "RAR index %d is out of range.\n", rar);
3760 		return IXGBE_ERR_INVALID_ARGUMENT;
3761 	}
3762 
3763 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3764 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3765 
3766 	if (IXGBE_REMOVED(hw->hw_addr))
3767 		goto done;
3768 
3769 	if (!mpsar_lo && !mpsar_hi)
3770 		goto done;
3771 
3772 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3773 		if (mpsar_lo) {
3774 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3775 			mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3776 		}
3777 		if (mpsar_hi) {
3778 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3779 			mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3780 		}
3781 	} else if (vmdq < 32) {
3782 		mpsar_lo &= ~(1 << vmdq);
3783 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3784 	} else {
3785 		mpsar_hi &= ~(1 << (vmdq - 32));
3786 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3787 	}
3788 
3789 	/* was that the last pool using this rar? */
3790 	if (mpsar_lo == 0 && mpsar_hi == 0 &&
3791 	    rar != 0 && rar != hw->mac.san_mac_rar_index)
3792 		hw->mac.ops.clear_rar(hw, rar);
3793 done:
3794 	return IXGBE_SUCCESS;
3795 }
3796 
3797 /**
3798  * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3799  * @hw: pointer to hardware struct
3800  * @rar: receive address register index to associate with a VMDq index
3801  * @vmdq: VMDq pool index
3802  **/
ixgbe_set_vmdq_generic(struct ixgbe_hw * hw,u32 rar,u32 vmdq)3803 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3804 {
3805 	u32 mpsar;
3806 	u32 rar_entries = hw->mac.num_rar_entries;
3807 
3808 	DEBUGFUNC("ixgbe_set_vmdq_generic");
3809 
3810 	/* Make sure we are using a valid rar index range */
3811 	if (rar >= rar_entries) {
3812 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3813 			     "RAR index %d is out of range.\n", rar);
3814 		return IXGBE_ERR_INVALID_ARGUMENT;
3815 	}
3816 
3817 	if (vmdq < 32) {
3818 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3819 		mpsar |= 1 << vmdq;
3820 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3821 	} else {
3822 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3823 		mpsar |= 1 << (vmdq - 32);
3824 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3825 	}
3826 	return IXGBE_SUCCESS;
3827 }
3828 
3829 /**
3830  * This function should only be involved in the IOV mode.
3831  * In IOV mode, Default pool is next pool after the number of
3832  * VFs advertized and not 0.
3833  * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3834  *
3835  * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3836  * @hw: pointer to hardware struct
3837  * @vmdq: VMDq pool index
3838  **/
ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw * hw,u32 vmdq)3839 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3840 {
3841 	u32 rar = hw->mac.san_mac_rar_index;
3842 
3843 	DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3844 
3845 	if (vmdq < 32) {
3846 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3847 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3848 	} else {
3849 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3850 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3851 	}
3852 
3853 	return IXGBE_SUCCESS;
3854 }
3855 
3856 /**
3857  * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3858  * @hw: pointer to hardware structure
3859  **/
ixgbe_init_uta_tables_generic(struct ixgbe_hw * hw)3860 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3861 {
3862 	int i;
3863 
3864 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3865 	DEBUGOUT(" Clearing UTA\n");
3866 
3867 	for (i = 0; i < 128; i++)
3868 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3869 
3870 	return IXGBE_SUCCESS;
3871 }
3872 
3873 /**
3874  * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3875  * @hw: pointer to hardware structure
3876  * @vlan: VLAN id to write to VLAN filter
3877  * @vlvf_bypass: true to find vlanid only, false returns first empty slot if
3878  *		  vlanid not found
3879  *
3880  *
3881  * return the VLVF index where this VLAN id should be placed
3882  *
3883  **/
ixgbe_find_vlvf_slot(struct ixgbe_hw * hw,u32 vlan,bool vlvf_bypass)3884 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3885 {
3886 	s32 regindex, first_empty_slot;
3887 	u32 bits;
3888 
3889 	/* short cut the special case */
3890 	if (vlan == 0)
3891 		return 0;
3892 
3893 	/* if vlvf_bypass is set we don't want to use an empty slot, we
3894 	 * will simply bypass the VLVF if there are no entries present in the
3895 	 * VLVF that contain our VLAN
3896 	 */
3897 	first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3898 
3899 	/* add VLAN enable bit for comparison */
3900 	vlan |= IXGBE_VLVF_VIEN;
3901 
3902 	/* Search for the vlan id in the VLVF entries. Save off the first empty
3903 	 * slot found along the way.
3904 	 *
3905 	 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3906 	 */
3907 	for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3908 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3909 		if (bits == vlan)
3910 			return regindex;
3911 		if (!first_empty_slot && !bits)
3912 			first_empty_slot = regindex;
3913 	}
3914 
3915 	/* If we are here then we didn't find the VLAN.  Return first empty
3916 	 * slot we found during our search, else error.
3917 	 */
3918 	if (!first_empty_slot)
3919 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3920 
3921 	return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3922 }
3923 
3924 /**
3925  * ixgbe_set_vfta_generic - Set VLAN filter table
3926  * @hw: pointer to hardware structure
3927  * @vlan: VLAN id to write to VLAN filter
3928  * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3929  * @vlan_on: boolean flag to turn on/off VLAN
3930  * @vlvf_bypass: boolean flag indicating updating default pool is okay
3931  *
3932  * Turn on/off specified VLAN in the VLAN filter table.
3933  **/
ixgbe_set_vfta_generic(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on,bool vlvf_bypass)3934 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3935 			   bool vlan_on, bool vlvf_bypass)
3936 {
3937 	u32 regidx, vfta_delta, vfta;
3938 	s32 ret_val;
3939 
3940 	DEBUGFUNC("ixgbe_set_vfta_generic");
3941 
3942 	if (vlan > 4095 || vind > 63)
3943 		return IXGBE_ERR_PARAM;
3944 
3945 	/*
3946 	 * this is a 2 part operation - first the VFTA, then the
3947 	 * VLVF and VLVFB if VT Mode is set
3948 	 * We don't write the VFTA until we know the VLVF part succeeded.
3949 	 */
3950 
3951 	/* Part 1
3952 	 * The VFTA is a bitstring made up of 128 32-bit registers
3953 	 * that enable the particular VLAN id, much like the MTA:
3954 	 *    bits[11-5]: which register
3955 	 *    bits[4-0]:  which bit in the register
3956 	 */
3957 	regidx = vlan / 32;
3958 	vfta_delta = 1 << (vlan % 32);
3959 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3960 
3961 	/*
3962 	 * vfta_delta represents the difference between the current value
3963 	 * of vfta and the value we want in the register.  Since the diff
3964 	 * is an XOR mask we can just update the vfta using an XOR
3965 	 */
3966 	vfta_delta &= vlan_on ? ~vfta : vfta;
3967 	vfta ^= vfta_delta;
3968 
3969 	/* Part 2
3970 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3971 	 */
3972 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
3973 					 vfta, vlvf_bypass);
3974 	if (ret_val != IXGBE_SUCCESS) {
3975 		if (vlvf_bypass)
3976 			goto vfta_update;
3977 		return ret_val;
3978 	}
3979 
3980 vfta_update:
3981 	/* Update VFTA now that we are ready for traffic */
3982 	if (vfta_delta)
3983 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
3984 
3985 	return IXGBE_SUCCESS;
3986 }
3987 
3988 /**
3989  * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3990  * @hw: pointer to hardware structure
3991  * @vlan: VLAN id to write to VLAN filter
3992  * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3993  * @vlan_on: boolean flag to turn on/off VLAN in VLVF
3994  * @vfta_delta: pointer to the difference between the current value of VFTA
3995  *		 and the desired value
3996  * @vfta: the desired value of the VFTA
3997  * @vlvf_bypass: boolean flag indicating updating default pool is okay
3998  *
3999  * Turn on/off specified bit in VLVF table.
4000  **/
ixgbe_set_vlvf_generic(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on,u32 * vfta_delta,u32 vfta,bool vlvf_bypass)4001 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4002 			   bool vlan_on, u32 *vfta_delta, u32 vfta,
4003 			   bool vlvf_bypass)
4004 {
4005 	u32 bits;
4006 	s32 vlvf_index;
4007 
4008 	DEBUGFUNC("ixgbe_set_vlvf_generic");
4009 
4010 	if (vlan > 4095 || vind > 63)
4011 		return IXGBE_ERR_PARAM;
4012 
4013 	/* If VT Mode is set
4014 	 *   Either vlan_on
4015 	 *     make sure the vlan is in VLVF
4016 	 *     set the vind bit in the matching VLVFB
4017 	 *   Or !vlan_on
4018 	 *     clear the pool bit and possibly the vind
4019 	 */
4020 	if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4021 		return IXGBE_SUCCESS;
4022 
4023 	vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4024 	if (vlvf_index < 0)
4025 		return vlvf_index;
4026 
4027 	bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4028 
4029 	/* set the pool bit */
4030 	bits |= 1 << (vind % 32);
4031 	if (vlan_on)
4032 		goto vlvf_update;
4033 
4034 	/* clear the pool bit */
4035 	bits ^= 1 << (vind % 32);
4036 
4037 	if (!bits &&
4038 	    !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4039 		/* Clear VFTA first, then disable VLVF.  Otherwise
4040 		 * we run the risk of stray packets leaking into
4041 		 * the PF via the default pool
4042 		 */
4043 		if (*vfta_delta)
4044 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4045 
4046 		/* disable VLVF and clear remaining bit from pool */
4047 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4048 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4049 
4050 		return IXGBE_SUCCESS;
4051 	}
4052 
4053 	/* If there are still bits set in the VLVFB registers
4054 	 * for the VLAN ID indicated we need to see if the
4055 	 * caller is requesting that we clear the VFTA entry bit.
4056 	 * If the caller has requested that we clear the VFTA
4057 	 * entry bit but there are still pools/VFs using this VLAN
4058 	 * ID entry then ignore the request.  We're not worried
4059 	 * about the case where we're turning the VFTA VLAN ID
4060 	 * entry bit on, only when requested to turn it off as
4061 	 * there may be multiple pools and/or VFs using the
4062 	 * VLAN ID entry.  In that case we cannot clear the
4063 	 * VFTA bit until all pools/VFs using that VLAN ID have also
4064 	 * been cleared.  This will be indicated by "bits" being
4065 	 * zero.
4066 	 */
4067 	*vfta_delta = 0;
4068 
4069 vlvf_update:
4070 	/* record pool change and enable VLAN ID if not already enabled */
4071 	IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4072 	IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4073 
4074 	return IXGBE_SUCCESS;
4075 }
4076 
4077 /**
4078  * ixgbe_clear_vfta_generic - Clear VLAN filter table
4079  * @hw: pointer to hardware structure
4080  *
4081  * Clears the VLAN filer table, and the VMDq index associated with the filter
4082  **/
ixgbe_clear_vfta_generic(struct ixgbe_hw * hw)4083 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4084 {
4085 	u32 offset;
4086 
4087 	DEBUGFUNC("ixgbe_clear_vfta_generic");
4088 
4089 	for (offset = 0; offset < hw->mac.vft_size; offset++)
4090 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4091 
4092 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4093 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4094 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4095 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
4096 	}
4097 
4098 	return IXGBE_SUCCESS;
4099 }
4100 
4101 /**
4102  * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4103  * @hw: pointer to hardware structure
4104  *
4105  * Contains the logic to identify if we need to verify link for the
4106  * crosstalk fix
4107  **/
ixgbe_need_crosstalk_fix(struct ixgbe_hw * hw)4108 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4109 {
4110 
4111 	/* Does FW say we need the fix */
4112 	if (!hw->need_crosstalk_fix)
4113 		return false;
4114 
4115 	/* Only consider SFP+ PHYs i.e. media type fiber */
4116 	switch (hw->mac.ops.get_media_type(hw)) {
4117 	case ixgbe_media_type_fiber:
4118 	case ixgbe_media_type_fiber_qsfp:
4119 		break;
4120 	default:
4121 		return false;
4122 	}
4123 
4124 	return true;
4125 }
4126 
4127 /**
4128  * ixgbe_check_mac_link_generic - Determine link and speed status
4129  * @hw: pointer to hardware structure
4130  * @speed: pointer to link speed
4131  * @link_up: true when link is up
4132  * @link_up_wait_to_complete: bool used to wait for link up or not
4133  *
4134  * Reads the links register to determine if link is up and the current speed
4135  **/
ixgbe_check_mac_link_generic(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4136 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4137 				 bool *link_up, bool link_up_wait_to_complete)
4138 {
4139 	u32 links_reg, links_orig;
4140 	u32 i;
4141 
4142 	DEBUGFUNC("ixgbe_check_mac_link_generic");
4143 
4144 	/* If Crosstalk fix enabled do the sanity check of making sure
4145 	 * the SFP+ cage is full.
4146 	 */
4147 	if (ixgbe_need_crosstalk_fix(hw)) {
4148 		u32 sfp_cage_full;
4149 
4150 		switch (hw->mac.type) {
4151 		case ixgbe_mac_82599EB:
4152 			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4153 					IXGBE_ESDP_SDP2;
4154 			break;
4155 		case ixgbe_mac_X550EM_x:
4156 		case ixgbe_mac_X550EM_a:
4157 			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4158 					IXGBE_ESDP_SDP0;
4159 			break;
4160 		default:
4161 			/* sanity check - No SFP+ devices here */
4162 			sfp_cage_full = false;
4163 			break;
4164 		}
4165 
4166 		if (!sfp_cage_full) {
4167 			*link_up = false;
4168 			*speed = IXGBE_LINK_SPEED_UNKNOWN;
4169 			return IXGBE_SUCCESS;
4170 		}
4171 	}
4172 
4173 	/* clear the old state */
4174 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4175 
4176 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4177 
4178 	if (links_orig != links_reg) {
4179 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
4180 			  links_orig, links_reg);
4181 	}
4182 
4183 	if (link_up_wait_to_complete) {
4184 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
4185 			if (links_reg & IXGBE_LINKS_UP) {
4186 				*link_up = true;
4187 				break;
4188 			} else {
4189 				*link_up = false;
4190 			}
4191 			msec_delay(100);
4192 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4193 		}
4194 	} else {
4195 		if (links_reg & IXGBE_LINKS_UP)
4196 			*link_up = true;
4197 		else
4198 			*link_up = false;
4199 	}
4200 
4201 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4202 	case IXGBE_LINKS_SPEED_10G_82599:
4203 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
4204 		if (hw->mac.type >= ixgbe_mac_X550) {
4205 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4206 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4207 		}
4208 		break;
4209 	case IXGBE_LINKS_SPEED_1G_82599:
4210 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
4211 		break;
4212 	case IXGBE_LINKS_SPEED_100_82599:
4213 		*speed = IXGBE_LINK_SPEED_100_FULL;
4214 		if (hw->mac.type == ixgbe_mac_X550) {
4215 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4216 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
4217 		}
4218 		break;
4219 	case IXGBE_LINKS_SPEED_10_X550EM_A:
4220 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4221 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4222 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
4223 			*speed = IXGBE_LINK_SPEED_10_FULL;
4224 		break;
4225 	default:
4226 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4227 	}
4228 
4229 	return IXGBE_SUCCESS;
4230 }
4231 
4232 /**
4233  * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4234  * the EEPROM
4235  * @hw: pointer to hardware structure
4236  * @wwnn_prefix: the alternative WWNN prefix
4237  * @wwpn_prefix: the alternative WWPN prefix
4238  *
4239  * This function will read the EEPROM from the alternative SAN MAC address
4240  * block to check the support for the alternative WWNN/WWPN prefix support.
4241  **/
ixgbe_get_wwn_prefix_generic(struct ixgbe_hw * hw,u16 * wwnn_prefix,u16 * wwpn_prefix)4242 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4243 				 u16 *wwpn_prefix)
4244 {
4245 	u16 offset, caps;
4246 	u16 alt_san_mac_blk_offset;
4247 
4248 	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4249 
4250 	/* clear output first */
4251 	*wwnn_prefix = 0xFFFF;
4252 	*wwpn_prefix = 0xFFFF;
4253 
4254 	/* check if alternative SAN MAC is supported */
4255 	offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4256 	if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4257 		goto wwn_prefix_err;
4258 
4259 	if ((alt_san_mac_blk_offset == 0) ||
4260 	    (alt_san_mac_blk_offset == 0xFFFF))
4261 		goto wwn_prefix_out;
4262 
4263 	/* check capability in alternative san mac address block */
4264 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4265 	if (hw->eeprom.ops.read(hw, offset, &caps))
4266 		goto wwn_prefix_err;
4267 	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4268 		goto wwn_prefix_out;
4269 
4270 	/* get the corresponding prefix for WWNN/WWPN */
4271 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4272 	if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4273 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4274 			      "eeprom read at offset %d failed", offset);
4275 	}
4276 
4277 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4278 	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4279 		goto wwn_prefix_err;
4280 
4281 wwn_prefix_out:
4282 	return IXGBE_SUCCESS;
4283 
4284 wwn_prefix_err:
4285 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4286 		      "eeprom read at offset %d failed", offset);
4287 	return IXGBE_SUCCESS;
4288 }
4289 
4290 /**
4291  * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4292  * @hw: pointer to hardware structure
4293  * @bs: the fcoe boot status
4294  *
4295  * This function will read the FCOE boot status from the iSCSI FCOE block
4296  **/
ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw * hw,u16 * bs)4297 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4298 {
4299 	u16 offset, caps, flags;
4300 	s32 status;
4301 
4302 	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4303 
4304 	/* clear output first */
4305 	*bs = ixgbe_fcoe_bootstatus_unavailable;
4306 
4307 	/* check if FCOE IBA block is present */
4308 	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4309 	status = hw->eeprom.ops.read(hw, offset, &caps);
4310 	if (status != IXGBE_SUCCESS)
4311 		goto out;
4312 
4313 	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4314 		goto out;
4315 
4316 	/* check if iSCSI FCOE block is populated */
4317 	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4318 	if (status != IXGBE_SUCCESS)
4319 		goto out;
4320 
4321 	if ((offset == 0) || (offset == 0xFFFF))
4322 		goto out;
4323 
4324 	/* read fcoe flags in iSCSI FCOE block */
4325 	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4326 	status = hw->eeprom.ops.read(hw, offset, &flags);
4327 	if (status != IXGBE_SUCCESS)
4328 		goto out;
4329 
4330 	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4331 		*bs = ixgbe_fcoe_bootstatus_enabled;
4332 	else
4333 		*bs = ixgbe_fcoe_bootstatus_disabled;
4334 
4335 out:
4336 	return status;
4337 }
4338 
4339 /**
4340  * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4341  * @hw: pointer to hardware structure
4342  * @enable: enable or disable switch for MAC anti-spoofing
4343  * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4344  *
4345  **/
ixgbe_set_mac_anti_spoofing(struct ixgbe_hw * hw,bool enable,int vf)4346 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4347 {
4348 	int vf_target_reg = vf >> 3;
4349 	int vf_target_shift = vf % 8;
4350 	u32 pfvfspoof;
4351 
4352 	if (hw->mac.type == ixgbe_mac_82598EB)
4353 		return;
4354 
4355 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4356 	if (enable)
4357 		pfvfspoof |= (1 << vf_target_shift);
4358 	else
4359 		pfvfspoof &= ~(1 << vf_target_shift);
4360 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4361 }
4362 
4363 /**
4364  * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4365  * @hw: pointer to hardware structure
4366  * @enable: enable or disable switch for VLAN anti-spoofing
4367  * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4368  *
4369  **/
ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw * hw,bool enable,int vf)4370 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4371 {
4372 	int vf_target_reg = vf >> 3;
4373 	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4374 	u32 pfvfspoof;
4375 
4376 	if (hw->mac.type == ixgbe_mac_82598EB)
4377 		return;
4378 
4379 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4380 	if (enable)
4381 		pfvfspoof |= (1 << vf_target_shift);
4382 	else
4383 		pfvfspoof &= ~(1 << vf_target_shift);
4384 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4385 }
4386 
4387 /**
4388  * ixgbe_get_device_caps_generic - Get additional device capabilities
4389  * @hw: pointer to hardware structure
4390  * @device_caps: the EEPROM word with the extra device capabilities
4391  *
4392  * This function will read the EEPROM location for the device capabilities,
4393  * and return the word through device_caps.
4394  **/
ixgbe_get_device_caps_generic(struct ixgbe_hw * hw,u16 * device_caps)4395 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4396 {
4397 	DEBUGFUNC("ixgbe_get_device_caps_generic");
4398 
4399 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4400 
4401 	return IXGBE_SUCCESS;
4402 }
4403 
4404 /**
4405  * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4406  * @hw: pointer to hardware structure
4407  *
4408  **/
ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw * hw)4409 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4410 {
4411 	u32 regval;
4412 	u32 i;
4413 
4414 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4415 
4416 	/* Enable relaxed ordering */
4417 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
4418 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4419 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4420 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4421 	}
4422 
4423 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
4424 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4425 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4426 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4427 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4428 	}
4429 
4430 }
4431 
4432 /**
4433  * ixgbe_calculate_checksum - Calculate checksum for buffer
4434  * @buffer: pointer to EEPROM
4435  * @length: size of EEPROM to calculate a checksum for
4436  * Calculates the checksum for some buffer on a specified length.  The
4437  * checksum calculated is returned.
4438  **/
ixgbe_calculate_checksum(u8 * buffer,u32 length)4439 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4440 {
4441 	u32 i;
4442 	u8 sum = 0;
4443 
4444 	DEBUGFUNC("ixgbe_calculate_checksum");
4445 
4446 	if (!buffer)
4447 		return 0;
4448 
4449 	for (i = 0; i < length; i++)
4450 		sum += buffer[i];
4451 
4452 	return (u8) (0 - sum);
4453 }
4454 
4455 /**
4456  * ixgbe_hic_unlocked - Issue command to manageability block unlocked
4457  * @hw: pointer to the HW structure
4458  * @buffer: command to write and where the return status will be placed
4459  * @length: length of buffer, must be multiple of 4 bytes
4460  * @timeout: time in ms to wait for command completion
4461  *
4462  * Communicates with the manageability block. On success return IXGBE_SUCCESS
4463  * else returns semaphore error when encountering an error acquiring
4464  * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4465  *
4466  * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4467  * by the caller.
4468  **/
ixgbe_hic_unlocked(struct ixgbe_hw * hw,u32 * buffer,u32 length,u32 timeout)4469 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4470 		       u32 timeout)
4471 {
4472 	u32 hicr, i, fwsts;
4473 	u16 dword_len;
4474 
4475 	DEBUGFUNC("ixgbe_hic_unlocked");
4476 
4477 	if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4478 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4479 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4480 	}
4481 
4482 	/* Set bit 9 of FWSTS clearing FW reset indication */
4483 	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4484 	IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4485 
4486 	/* Check that the host interface is enabled. */
4487 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4488 	if (!(hicr & IXGBE_HICR_EN)) {
4489 		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4490 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4491 	}
4492 
4493 	/* Calculate length in DWORDs. We must be DWORD aligned */
4494 	if (length % sizeof(u32)) {
4495 		DEBUGOUT("Buffer length failure, not aligned to dword");
4496 		return IXGBE_ERR_INVALID_ARGUMENT;
4497 	}
4498 
4499 	dword_len = length >> 2;
4500 
4501 	/* The device driver writes the relevant command block
4502 	 * into the ram area.
4503 	 */
4504 	for (i = 0; i < dword_len; i++)
4505 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4506 				      i, IXGBE_CPU_TO_LE32(buffer[i]));
4507 
4508 	/* Setting this bit tells the ARC that a new command is pending. */
4509 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4510 
4511 	for (i = 0; i < timeout; i++) {
4512 		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4513 		if (!(hicr & IXGBE_HICR_C))
4514 			break;
4515 		msec_delay(1);
4516 	}
4517 
4518 	/* For each command except "Apply Update" perform
4519 	 * status checks in the HICR registry.
4520 	 */
4521 	if ((buffer[0] & IXGBE_HOST_INTERFACE_MASK_CMD) ==
4522 	    IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD)
4523 		return IXGBE_SUCCESS;
4524 
4525 	/* Check command completion */
4526 	if ((timeout && i == timeout) ||
4527 	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4528 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4529 			      "Command has failed with no status valid.\n");
4530 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4531 	}
4532 
4533 	return IXGBE_SUCCESS;
4534 }
4535 
4536 /**
4537  * ixgbe_host_interface_command - Issue command to manageability block
4538  * @hw: pointer to the HW structure
4539  * @buffer: contains the command to write and where the return status will
4540  *  be placed
4541  * @length: length of buffer, must be multiple of 4 bytes
4542  * @timeout: time in ms to wait for command completion
4543  * @return_data: read and return data from the buffer (true) or not (false)
4544  *  Needed because FW structures are big endian and decoding of
4545  *  these fields can be 8 bit or 16 bit based on command. Decoding
4546  *  is not easily understood without making a table of commands.
4547  *  So we will leave this up to the caller to read back the data
4548  *  in these cases.
4549  *
4550  * Communicates with the manageability block. On success return IXGBE_SUCCESS
4551  * else returns semaphore error when encountering an error acquiring
4552  * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4553  **/
ixgbe_host_interface_command(struct ixgbe_hw * hw,u32 * buffer,u32 length,u32 timeout,bool return_data)4554 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4555 				 u32 length, u32 timeout, bool return_data)
4556 {
4557 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4558 	struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
4559 	u16 buf_len;
4560 	s32 status;
4561 	u32 bi;
4562 	u32 dword_len;
4563 
4564 	DEBUGFUNC("ixgbe_host_interface_command");
4565 
4566 	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4567 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4568 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4569 	}
4570 
4571 	/* Take management host interface semaphore */
4572 	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4573 	if (status)
4574 		return status;
4575 
4576 	status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4577 	if (status)
4578 		goto rel_out;
4579 
4580 	if (!return_data)
4581 		goto rel_out;
4582 
4583 	/* Calculate length in DWORDs */
4584 	dword_len = hdr_size >> 2;
4585 
4586 	/* first pull in the header so we know the buffer length */
4587 	for (bi = 0; bi < dword_len; bi++) {
4588 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4589 		IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
4590 	}
4591 
4592 	/*
4593 	 * If there is any thing in data position pull it in
4594 	 * Read Flash command requires reading buffer length from
4595 	 * two byes instead of one byte
4596 	 */
4597 	if (resp->cmd == IXGBE_HOST_INTERFACE_FLASH_READ_CMD ||
4598 	    resp->cmd == IXGBE_HOST_INTERFACE_SHADOW_RAM_READ_CMD) {
4599 		for (; bi < dword_len + 2; bi++) {
4600 			buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4601 							  bi);
4602 			IXGBE_LE32_TO_CPUS(&buffer[bi]);
4603 		}
4604 		buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3)
4605 				  & 0xF00) | resp->buf_len;
4606 		hdr_size += (2 << 2);
4607 	} else {
4608 		buf_len = resp->buf_len;
4609 	}
4610 	if (!buf_len)
4611 		goto rel_out;
4612 
4613 	if (length < buf_len + hdr_size) {
4614 		DEBUGOUT("Buffer not large enough for reply message.\n");
4615 		status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4616 		goto rel_out;
4617 	}
4618 
4619 	/* Calculate length in DWORDs, add 3 for odd lengths */
4620 	dword_len = (buf_len + 3) >> 2;
4621 
4622 	/* Pull in the rest of the buffer (bi is where we left off) */
4623 	for (; bi <= dword_len; bi++) {
4624 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4625 		IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
4626 	}
4627 
4628 rel_out:
4629 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4630 
4631 	return status;
4632 }
4633 
4634 /**
4635  * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4636  * @hw: pointer to the HW structure
4637  * @maj: driver version major number
4638  * @min: driver version minor number
4639  * @build: driver version build number
4640  * @sub: driver version sub build number
4641  * @len: unused
4642  * @driver_ver: unused
4643  *
4644  * Sends driver version number to firmware through the manageability
4645  * block.  On success return IXGBE_SUCCESS
4646  * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4647  * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4648  **/
ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw * hw,u8 maj,u8 min,u8 build,u8 sub,u16 len,const char * driver_ver)4649 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4650 				 u8 build, u8 sub, u16 len,
4651 				 const char *driver_ver)
4652 {
4653 	struct ixgbe_hic_drv_info fw_cmd;
4654 	int i;
4655 	s32 ret_val = IXGBE_SUCCESS;
4656 
4657 	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4658 	UNREFERENCED_2PARAMETER(len, driver_ver);
4659 
4660 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4661 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4662 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4663 	fw_cmd.port_num = (u8)hw->bus.func;
4664 	fw_cmd.ver_maj = maj;
4665 	fw_cmd.ver_min = min;
4666 	fw_cmd.ver_build = build;
4667 	fw_cmd.ver_sub = sub;
4668 	fw_cmd.hdr.checksum = 0;
4669 	fw_cmd.pad = 0;
4670 	fw_cmd.pad2 = 0;
4671 	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4672 				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4673 
4674 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4675 		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4676 						       sizeof(fw_cmd),
4677 						       IXGBE_HI_COMMAND_TIMEOUT,
4678 						       true);
4679 		if (ret_val != IXGBE_SUCCESS)
4680 			continue;
4681 
4682 		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4683 		    FW_CEM_RESP_STATUS_SUCCESS)
4684 			ret_val = IXGBE_SUCCESS;
4685 		else
4686 			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4687 
4688 		break;
4689 	}
4690 
4691 	return ret_val;
4692 }
4693 
4694 /**
4695  * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4696  * @hw: pointer to hardware structure
4697  * @num_pb: number of packet buffers to allocate
4698  * @headroom: reserve n KB of headroom
4699  * @strategy: packet buffer allocation strategy
4700  **/
ixgbe_set_rxpba_generic(struct ixgbe_hw * hw,int num_pb,u32 headroom,int strategy)4701 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4702 			     int strategy)
4703 {
4704 	u32 pbsize = hw->mac.rx_pb_size;
4705 	int i = 0;
4706 	u32 rxpktsize, txpktsize, txpbthresh;
4707 
4708 	/* Reserve headroom */
4709 	pbsize -= headroom;
4710 
4711 	if (!num_pb)
4712 		num_pb = 1;
4713 
4714 	/* Divide remaining packet buffer space amongst the number of packet
4715 	 * buffers requested using supplied strategy.
4716 	 */
4717 	switch (strategy) {
4718 	case PBA_STRATEGY_WEIGHTED:
4719 		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4720 		 * buffer with 5/8 of the packet buffer space.
4721 		 */
4722 		rxpktsize = (pbsize * 5) / (num_pb * 4);
4723 		pbsize -= rxpktsize * (num_pb / 2);
4724 		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4725 		for (; i < (num_pb / 2); i++)
4726 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4727 		/* fall through - configure remaining packet buffers */
4728 	case PBA_STRATEGY_EQUAL:
4729 		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4730 		for (; i < num_pb; i++)
4731 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4732 		break;
4733 	default:
4734 		break;
4735 	}
4736 
4737 	/* Only support an equally distributed Tx packet buffer strategy. */
4738 	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4739 	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4740 	for (i = 0; i < num_pb; i++) {
4741 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4742 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4743 	}
4744 
4745 	/* Clear unused TCs, if any, to zero buffer size*/
4746 	for (; i < IXGBE_MAX_PB; i++) {
4747 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4748 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4749 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4750 	}
4751 }
4752 
4753 /**
4754  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4755  * @hw: pointer to the hardware structure
4756  *
4757  * The 82599 and x540 MACs can experience issues if TX work is still pending
4758  * when a reset occurs.  This function prevents this by flushing the PCIe
4759  * buffers on the system.
4760  **/
ixgbe_clear_tx_pending(struct ixgbe_hw * hw)4761 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4762 {
4763 	u32 gcr_ext, hlreg0, i, poll;
4764 	u16 value;
4765 
4766 	/*
4767 	 * If double reset is not requested then all transactions should
4768 	 * already be clear and as such there is no work to do
4769 	 */
4770 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4771 		return;
4772 
4773 	/*
4774 	 * Set loopback enable to prevent any transmits from being sent
4775 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
4776 	 * has already been cleared.
4777 	 */
4778 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4779 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4780 
4781 	/* Wait for a last completion before clearing buffers */
4782 	IXGBE_WRITE_FLUSH(hw);
4783 	msec_delay(3);
4784 
4785 	/*
4786 	 * Before proceeding, make sure that the PCIe block does not have
4787 	 * transactions pending.
4788 	 */
4789 	poll = ixgbe_pcie_timeout_poll(hw);
4790 	for (i = 0; i < poll; i++) {
4791 		usec_delay(100);
4792 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4793 		if (IXGBE_REMOVED(hw->hw_addr))
4794 			goto out;
4795 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4796 			goto out;
4797 	}
4798 
4799 out:
4800 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
4801 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4802 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4803 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4804 
4805 	/* Flush all writes and allow 20usec for all transactions to clear */
4806 	IXGBE_WRITE_FLUSH(hw);
4807 	usec_delay(20);
4808 
4809 	/* restore previous register values */
4810 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4811 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4812 }
4813 
4814 STATIC const u8 ixgbe_emc_temp_data[4] = {
4815 	IXGBE_EMC_INTERNAL_DATA,
4816 	IXGBE_EMC_DIODE1_DATA,
4817 	IXGBE_EMC_DIODE2_DATA,
4818 	IXGBE_EMC_DIODE3_DATA
4819 };
4820 STATIC const u8 ixgbe_emc_therm_limit[4] = {
4821 	IXGBE_EMC_INTERNAL_THERM_LIMIT,
4822 	IXGBE_EMC_DIODE1_THERM_LIMIT,
4823 	IXGBE_EMC_DIODE2_THERM_LIMIT,
4824 	IXGBE_EMC_DIODE3_THERM_LIMIT
4825 };
4826 
4827 /**
4828  * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
4829  * @hw: pointer to hardware structure
4830  *
4831  * Returns the thermal sensor data structure
4832  **/
ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw * hw)4833 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
4834 {
4835 	s32 status = IXGBE_SUCCESS;
4836 	u16 ets_offset;
4837 	u16 ets_cfg;
4838 	u16 ets_sensor;
4839 	u8  num_sensors;
4840 	u8  sensor_index;
4841 	u8  sensor_location;
4842 	u8  i;
4843 	struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4844 
4845 	DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
4846 
4847 	/* Only support thermal sensors attached to 82599 physical port 0 */
4848 	if ((hw->mac.type != ixgbe_mac_82599EB) ||
4849 	    (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
4850 		status = IXGBE_NOT_IMPLEMENTED;
4851 		goto out;
4852 	}
4853 
4854 	status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
4855 	if (status)
4856 		goto out;
4857 
4858 	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
4859 		status = IXGBE_NOT_IMPLEMENTED;
4860 		goto out;
4861 	}
4862 
4863 	status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
4864 	if (status)
4865 		goto out;
4866 
4867 	if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4868 		!= IXGBE_ETS_TYPE_EMC) {
4869 		status = IXGBE_NOT_IMPLEMENTED;
4870 		goto out;
4871 	}
4872 
4873 	num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4874 	if (num_sensors > IXGBE_MAX_SENSORS)
4875 		num_sensors = IXGBE_MAX_SENSORS;
4876 
4877 	for (i = 0; i < num_sensors; i++) {
4878 		status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
4879 					     &ets_sensor);
4880 		if (status)
4881 			goto out;
4882 
4883 		sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4884 				IXGBE_ETS_DATA_INDEX_SHIFT);
4885 		sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4886 				   IXGBE_ETS_DATA_LOC_SHIFT);
4887 
4888 		if (sensor_location != 0) {
4889 			status = hw->phy.ops.read_i2c_byte(hw,
4890 					ixgbe_emc_temp_data[sensor_index],
4891 					IXGBE_I2C_THERMAL_SENSOR_ADDR,
4892 					&data->sensor[i].temp);
4893 			if (status)
4894 				goto out;
4895 		}
4896 	}
4897 out:
4898 	return status;
4899 }
4900 
4901 /**
4902  * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
4903  * @hw: pointer to hardware structure
4904  *
4905  * Inits the thermal sensor thresholds according to the NVM map
4906  * and save off the threshold and location values into mac.thermal_sensor_data
4907  **/
ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw * hw)4908 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
4909 {
4910 	s32 status = IXGBE_SUCCESS;
4911 	u16 offset;
4912 	u16 ets_offset;
4913 	u16 ets_cfg;
4914 	u16 ets_sensor;
4915 	u8  low_thresh_delta;
4916 	u8  num_sensors;
4917 	u8  sensor_index;
4918 	u8  sensor_location;
4919 	u8  therm_limit;
4920 	u8  i;
4921 	struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4922 
4923 	DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
4924 
4925 	memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
4926 
4927 	/* Only support thermal sensors attached to 82599 physical port 0 */
4928 	if ((hw->mac.type != ixgbe_mac_82599EB) ||
4929 	    (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
4930 		return IXGBE_NOT_IMPLEMENTED;
4931 
4932 	offset = IXGBE_ETS_CFG;
4933 	if (hw->eeprom.ops.read(hw, offset, &ets_offset))
4934 		goto eeprom_err;
4935 	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
4936 		return IXGBE_NOT_IMPLEMENTED;
4937 
4938 	offset = ets_offset;
4939 	if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
4940 		goto eeprom_err;
4941 	if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4942 		!= IXGBE_ETS_TYPE_EMC)
4943 		return IXGBE_NOT_IMPLEMENTED;
4944 
4945 	low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
4946 			     IXGBE_ETS_LTHRES_DELTA_SHIFT);
4947 	num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4948 
4949 	for (i = 0; i < num_sensors; i++) {
4950 		offset = ets_offset + 1 + i;
4951 		if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
4952 			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4953 				      "eeprom read at offset %d failed",
4954 				      offset);
4955 			continue;
4956 		}
4957 		sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4958 				IXGBE_ETS_DATA_INDEX_SHIFT);
4959 		sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4960 				   IXGBE_ETS_DATA_LOC_SHIFT);
4961 		therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
4962 
4963 		hw->phy.ops.write_i2c_byte(hw,
4964 			ixgbe_emc_therm_limit[sensor_index],
4965 			IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
4966 
4967 		if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
4968 			data->sensor[i].location = sensor_location;
4969 			data->sensor[i].caution_thresh = therm_limit;
4970 			data->sensor[i].max_op_thresh = therm_limit -
4971 							low_thresh_delta;
4972 		}
4973 	}
4974 	return status;
4975 
4976 eeprom_err:
4977 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4978 		      "eeprom read at offset %d failed", offset);
4979 	return IXGBE_NOT_IMPLEMENTED;
4980 }
4981 
4982 /**
4983  * ixgbe_get_orom_version - Return option ROM from EEPROM
4984  *
4985  * @hw: pointer to hardware structure
4986  * @nvm_ver: pointer to output structure
4987  *
4988  * if valid option ROM version, nvm_ver->or_valid set to true
4989  * else nvm_ver->or_valid is false.
4990  **/
ixgbe_get_orom_version(struct ixgbe_hw * hw,struct ixgbe_nvm_version * nvm_ver)4991 void ixgbe_get_orom_version(struct ixgbe_hw *hw,
4992 			    struct ixgbe_nvm_version *nvm_ver)
4993 {
4994 	u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
4995 
4996 	nvm_ver->or_valid = false;
4997 	/* Option Rom may or may not be present.  Start with pointer */
4998 	hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
4999 
5000 	/* make sure offset is valid */
5001 	if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
5002 		return;
5003 
5004 	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
5005 	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
5006 
5007 	/* option rom exists and is valid */
5008 	if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
5009 	    eeprom_cfg_blkl == NVM_VER_INVALID ||
5010 	    eeprom_cfg_blkh == NVM_VER_INVALID)
5011 		return;
5012 
5013 	nvm_ver->or_valid = true;
5014 	nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
5015 	nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
5016 			    (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
5017 	nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
5018 }
5019 
5020 /**
5021  * ixgbe_get_oem_prod_version - Return OEM Product version
5022  *
5023  * @hw: pointer to hardware structure
5024  * @nvm_ver: pointer to output structure
5025  *
5026  * if valid OEM product version, nvm_ver->oem_valid set to true
5027  * else nvm_ver->oem_valid is false.
5028  **/
ixgbe_get_oem_prod_version(struct ixgbe_hw * hw,struct ixgbe_nvm_version * nvm_ver)5029 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
5030 				struct ixgbe_nvm_version *nvm_ver)
5031 {
5032 	u16 rel_num, prod_ver, mod_len, cap, offset;
5033 
5034 	nvm_ver->oem_valid = false;
5035 	hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
5036 
5037 	/* Return is offset to OEM Product Version block is invalid */
5038 	if (offset == 0x0 || offset == NVM_INVALID_PTR)
5039 		return;
5040 
5041 	/* Read product version block */
5042 	hw->eeprom.ops.read(hw, offset, &mod_len);
5043 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
5044 
5045 	/* Return if OEM product version block is invalid */
5046 	if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
5047 	    (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
5048 		return;
5049 
5050 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
5051 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
5052 
5053 	/* Return if version is invalid */
5054 	if ((rel_num | prod_ver) == 0x0 ||
5055 	    rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
5056 		return;
5057 
5058 	nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
5059 	nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
5060 	nvm_ver->oem_release = rel_num;
5061 	nvm_ver->oem_valid = true;
5062 }
5063 
5064 /**
5065  * ixgbe_get_etk_id - Return Etrack ID from EEPROM
5066  *
5067  * @hw: pointer to hardware structure
5068  * @nvm_ver: pointer to output structure
5069  *
5070  * word read errors will return 0xFFFF
5071  **/
ixgbe_get_etk_id(struct ixgbe_hw * hw,struct ixgbe_nvm_version * nvm_ver)5072 void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
5073 {
5074 	u16 etk_id_l, etk_id_h;
5075 
5076 	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
5077 		etk_id_l = NVM_VER_INVALID;
5078 	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
5079 		etk_id_h = NVM_VER_INVALID;
5080 
5081 	/* The word order for the version format is determined by high order
5082 	 * word bit 15.
5083 	 */
5084 	if ((etk_id_h & NVM_ETK_VALID) == 0) {
5085 		nvm_ver->etk_id = etk_id_h;
5086 		nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
5087 	} else {
5088 		nvm_ver->etk_id = etk_id_l;
5089 		nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
5090 	}
5091 }
5092 
5093 
5094 /**
5095  * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
5096  * @hw: pointer to hardware structure
5097  * @map: pointer to u8 arr for returning map
5098  *
5099  * Read the rtrup2tc HW register and resolve its content into map
5100  **/
ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw * hw,u8 * map)5101 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
5102 {
5103 	u32 reg, i;
5104 
5105 	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
5106 	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
5107 		map[i] = IXGBE_RTRUP2TC_UP_MASK &
5108 			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5109 	return;
5110 }
5111 
ixgbe_disable_rx_generic(struct ixgbe_hw * hw)5112 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5113 {
5114 	u32 pfdtxgswc;
5115 	u32 rxctrl;
5116 
5117 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5118 	if (rxctrl & IXGBE_RXCTRL_RXEN) {
5119 		if (hw->mac.type != ixgbe_mac_82598EB) {
5120 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5121 			if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5122 				pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5123 				IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5124 				hw->mac.set_lben = true;
5125 			} else {
5126 				hw->mac.set_lben = false;
5127 			}
5128 		}
5129 		rxctrl &= ~IXGBE_RXCTRL_RXEN;
5130 		IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5131 	}
5132 }
5133 
ixgbe_enable_rx_generic(struct ixgbe_hw * hw)5134 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5135 {
5136 	u32 pfdtxgswc;
5137 	u32 rxctrl;
5138 
5139 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5140 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5141 
5142 	if (hw->mac.type != ixgbe_mac_82598EB) {
5143 		if (hw->mac.set_lben) {
5144 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5145 			pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5146 			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5147 			hw->mac.set_lben = false;
5148 		}
5149 	}
5150 }
5151 
5152 /**
5153  * ixgbe_mng_present - returns true when management capability is present
5154  * @hw: pointer to hardware structure
5155  */
ixgbe_mng_present(struct ixgbe_hw * hw)5156 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5157 {
5158 	u32 fwsm;
5159 
5160 	if (hw->mac.type < ixgbe_mac_82599EB)
5161 		return false;
5162 
5163 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5164 
5165 	return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
5166 }
5167 
5168 /**
5169  * ixgbe_mng_enabled - Is the manageability engine enabled?
5170  * @hw: pointer to hardware structure
5171  *
5172  * Returns true if the manageability engine is enabled.
5173  **/
ixgbe_mng_enabled(struct ixgbe_hw * hw)5174 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5175 {
5176 	u32 fwsm, manc, factps;
5177 
5178 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5179 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5180 		return false;
5181 
5182 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5183 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5184 		return false;
5185 
5186 	if (hw->mac.type <= ixgbe_mac_X540) {
5187 		factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5188 		if (factps & IXGBE_FACTPS_MNGCG)
5189 			return false;
5190 	}
5191 
5192 	return true;
5193 }
5194 
5195 /**
5196  * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5197  * @hw: pointer to hardware structure
5198  * @speed: new link speed
5199  * @autoneg_wait_to_complete: true when waiting for completion is needed
5200  *
5201  * Set the link speed in the MAC and/or PHY register and restarts link.
5202  **/
ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)5203 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5204 					  ixgbe_link_speed speed,
5205 					  bool autoneg_wait_to_complete)
5206 {
5207 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5208 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5209 	s32 status = IXGBE_SUCCESS;
5210 	u32 speedcnt = 0;
5211 	u32 i = 0;
5212 	bool autoneg, link_up = false;
5213 
5214 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5215 
5216 	/* Mask off requested but non-supported speeds */
5217 	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5218 	if (status != IXGBE_SUCCESS)
5219 		return status;
5220 
5221 	speed &= link_speed;
5222 
5223 	/* Try each speed one by one, highest priority first.  We do this in
5224 	 * software because 10Gb fiber doesn't support speed autonegotiation.
5225 	 */
5226 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5227 		speedcnt++;
5228 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5229 
5230 		/* Set the module link speed */
5231 		switch (hw->phy.media_type) {
5232 		case ixgbe_media_type_fiber:
5233 			ixgbe_set_rate_select_speed(hw,
5234 						    IXGBE_LINK_SPEED_10GB_FULL);
5235 			break;
5236 		case ixgbe_media_type_fiber_qsfp:
5237 			/* QSFP module automatically detects MAC link speed */
5238 			break;
5239 		default:
5240 			DEBUGOUT("Unexpected media type.\n");
5241 			break;
5242 		}
5243 
5244 		/* Allow module to change analog characteristics (1G->10G) */
5245 		msec_delay(40);
5246 
5247 		status = ixgbe_setup_mac_link(hw,
5248 					      IXGBE_LINK_SPEED_10GB_FULL,
5249 					      autoneg_wait_to_complete);
5250 		if (status != IXGBE_SUCCESS)
5251 			return status;
5252 
5253 		/* Flap the Tx laser if it has not already been done */
5254 		ixgbe_flap_tx_laser(hw);
5255 
5256 		/* Wait for the controller to acquire link.  Per IEEE 802.3ap,
5257 		 * Section 73.10.2, we may have to wait up to 1000ms if KR is
5258 		 * attempted.  82599 uses the same timing for 10g SFI.
5259 		 */
5260 		for (i = 0; i < 10; i++) {
5261 			/* Wait for the link partner to also set speed */
5262 			msec_delay(100);
5263 
5264 			/* If we have link, just jump out */
5265 			status = ixgbe_check_link(hw, &link_speed,
5266 						  &link_up, false);
5267 			if (status != IXGBE_SUCCESS)
5268 				return status;
5269 
5270 			if (link_up)
5271 				goto out;
5272 		}
5273 	}
5274 
5275 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5276 		speedcnt++;
5277 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5278 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5279 
5280 		/* Set the module link speed */
5281 		switch (hw->phy.media_type) {
5282 		case ixgbe_media_type_fiber:
5283 			ixgbe_set_rate_select_speed(hw,
5284 						    IXGBE_LINK_SPEED_1GB_FULL);
5285 			break;
5286 		case ixgbe_media_type_fiber_qsfp:
5287 			/* QSFP module automatically detects link speed */
5288 			break;
5289 		default:
5290 			DEBUGOUT("Unexpected media type.\n");
5291 			break;
5292 		}
5293 
5294 		/* Allow module to change analog characteristics (10G->1G) */
5295 		msec_delay(40);
5296 
5297 		status = ixgbe_setup_mac_link(hw,
5298 					      IXGBE_LINK_SPEED_1GB_FULL,
5299 					      autoneg_wait_to_complete);
5300 		if (status != IXGBE_SUCCESS)
5301 			return status;
5302 
5303 		/* Flap the Tx laser if it has not already been done */
5304 		ixgbe_flap_tx_laser(hw);
5305 
5306 		/* Wait for the link partner to also set speed */
5307 		msec_delay(100);
5308 
5309 		/* If we have link, just jump out */
5310 		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
5311 		if (status != IXGBE_SUCCESS)
5312 			return status;
5313 
5314 		if (link_up)
5315 			goto out;
5316 	}
5317 
5318 	/* We didn't get link.  Configure back to the highest speed we tried,
5319 	 * (if there was more than one).  We call ourselves back with just the
5320 	 * single highest speed that the user requested.
5321 	 */
5322 	if (speedcnt > 1)
5323 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5324 						      highest_link_speed,
5325 						      autoneg_wait_to_complete);
5326 
5327 out:
5328 	/* Set autoneg_advertised value based on input link speed */
5329 	hw->phy.autoneg_advertised = 0;
5330 
5331 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5332 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5333 
5334 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5335 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5336 
5337 	return status;
5338 }
5339 
5340 /**
5341  * ixgbe_set_soft_rate_select_speed - Set module link speed
5342  * @hw: pointer to hardware structure
5343  * @speed: link speed to set
5344  *
5345  * Set module link speed via the soft rate select.
5346  */
ixgbe_set_soft_rate_select_speed(struct ixgbe_hw * hw,ixgbe_link_speed speed)5347 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5348 					ixgbe_link_speed speed)
5349 {
5350 	s32 status;
5351 	u8 rs, eeprom_data;
5352 
5353 	switch (speed) {
5354 	case IXGBE_LINK_SPEED_10GB_FULL:
5355 		/* one bit mask same as setting on */
5356 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5357 		break;
5358 	case IXGBE_LINK_SPEED_1GB_FULL:
5359 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5360 		break;
5361 	default:
5362 		DEBUGOUT("Invalid fixed module speed\n");
5363 		return;
5364 	}
5365 
5366 	/* Set RS0 */
5367 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5368 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
5369 					   &eeprom_data);
5370 	if (status) {
5371 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5372 		goto out;
5373 	}
5374 
5375 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5376 
5377 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5378 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
5379 					    eeprom_data);
5380 	if (status) {
5381 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5382 		goto out;
5383 	}
5384 
5385 	/* Set RS1 */
5386 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5387 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
5388 					   &eeprom_data);
5389 	if (status) {
5390 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5391 		goto out;
5392 	}
5393 
5394 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5395 
5396 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5397 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
5398 					    eeprom_data);
5399 	if (status) {
5400 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
5401 		goto out;
5402 	}
5403 out:
5404 	return;
5405 }
5406