xref: /f-stack/dpdk/drivers/net/igc/base/igc_i225.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4 
5 #include "igc_api.h"
6 
7 static s32 igc_init_nvm_params_i225(struct igc_hw *hw);
8 static s32 igc_init_mac_params_i225(struct igc_hw *hw);
9 static s32 igc_init_phy_params_i225(struct igc_hw *hw);
10 static s32 igc_reset_hw_i225(struct igc_hw *hw);
11 static s32 igc_acquire_nvm_i225(struct igc_hw *hw);
12 static void igc_release_nvm_i225(struct igc_hw *hw);
13 static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw);
14 static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
15 				  u16 *data);
16 static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw);
17 static s32 igc_valid_led_default_i225(struct igc_hw *hw, u16 *data);
18 
19 /**
20  *  igc_init_nvm_params_i225 - Init NVM func ptrs.
21  *  @hw: pointer to the HW structure
22  **/
igc_init_nvm_params_i225(struct igc_hw * hw)23 static s32 igc_init_nvm_params_i225(struct igc_hw *hw)
24 {
25 	struct igc_nvm_info *nvm = &hw->nvm;
26 	u32 eecd = IGC_READ_REG(hw, IGC_EECD);
27 	u16 size;
28 
29 	DEBUGFUNC("igc_init_nvm_params_i225");
30 
31 	size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
32 		     IGC_EECD_SIZE_EX_SHIFT);
33 	/*
34 	 * Added to a constant, "size" becomes the left-shift value
35 	 * for setting word_size.
36 	 */
37 	size += NVM_WORD_SIZE_BASE_SHIFT;
38 
39 	/* Just in case size is out of range, cap it to the largest
40 	 * EEPROM size supported
41 	 */
42 	if (size > 15)
43 		size = 15;
44 
45 	nvm->word_size = 1 << size;
46 	nvm->opcode_bits = 8;
47 	nvm->delay_usec = 1;
48 	nvm->type = igc_nvm_eeprom_spi;
49 
50 
51 	nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8;
52 	nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ?
53 			    16 : 8;
54 
55 	if (nvm->word_size == (1 << 15))
56 		nvm->page_size = 128;
57 
58 	nvm->ops.acquire = igc_acquire_nvm_i225;
59 	nvm->ops.release = igc_release_nvm_i225;
60 	nvm->ops.valid_led_default = igc_valid_led_default_i225;
61 	if (igc_get_flash_presence_i225(hw)) {
62 		hw->nvm.type = igc_nvm_flash_hw;
63 		nvm->ops.read    = igc_read_nvm_srrd_i225;
64 		nvm->ops.write   = igc_write_nvm_srwr_i225;
65 		nvm->ops.validate = igc_validate_nvm_checksum_i225;
66 		nvm->ops.update   = igc_update_nvm_checksum_i225;
67 	} else {
68 		hw->nvm.type = igc_nvm_invm;
69 		nvm->ops.write    = igc_null_write_nvm;
70 		nvm->ops.validate = igc_null_ops_generic;
71 		nvm->ops.update   = igc_null_ops_generic;
72 	}
73 
74 	return IGC_SUCCESS;
75 }
76 
77 /**
78  *  igc_init_mac_params_i225 - Init MAC func ptrs.
79  *  @hw: pointer to the HW structure
80  **/
igc_init_mac_params_i225(struct igc_hw * hw)81 static s32 igc_init_mac_params_i225(struct igc_hw *hw)
82 {
83 	struct igc_mac_info *mac = &hw->mac;
84 	struct igc_dev_spec_i225 *dev_spec = &hw->dev_spec._i225;
85 
86 	DEBUGFUNC("igc_init_mac_params_i225");
87 
88 	/* Initialize function pointer */
89 	igc_init_mac_ops_generic(hw);
90 
91 	/* Set media type */
92 	hw->phy.media_type = igc_media_type_copper;
93 	/* Set mta register count */
94 	mac->mta_reg_count = 128;
95 	/* Set rar entry count */
96 	mac->rar_entry_count = IGC_RAR_ENTRIES_BASE;
97 
98 	/* reset */
99 	mac->ops.reset_hw = igc_reset_hw_i225;
100 	/* hw initialization */
101 	mac->ops.init_hw = igc_init_hw_i225;
102 	/* link setup */
103 	mac->ops.setup_link = igc_setup_link_generic;
104 	/* check for link */
105 	mac->ops.check_for_link = igc_check_for_link_i225;
106 	/* link info */
107 	mac->ops.get_link_up_info = igc_get_speed_and_duplex_copper_generic;
108 	/* acquire SW_FW sync */
109 	mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225;
110 	/* release SW_FW sync */
111 	mac->ops.release_swfw_sync = igc_release_swfw_sync_i225;
112 
113 	/* Allow a single clear of the SW semaphore on I225 */
114 	dev_spec->clear_semaphore_once = true;
115 	mac->ops.setup_physical_interface = igc_setup_copper_link_i225;
116 
117 	/* Set if part includes ASF firmware */
118 	mac->asf_firmware_present = true;
119 
120 	/* multicast address update */
121 	mac->ops.update_mc_addr_list = igc_update_mc_addr_list_generic;
122 
123 	mac->ops.write_vfta = igc_write_vfta_generic;
124 
125 	return IGC_SUCCESS;
126 }
127 
128 /**
129  *  igc_init_phy_params_i225 - Init PHY func ptrs.
130  *  @hw: pointer to the HW structure
131  **/
igc_init_phy_params_i225(struct igc_hw * hw)132 static s32 igc_init_phy_params_i225(struct igc_hw *hw)
133 {
134 	struct igc_phy_info *phy = &hw->phy;
135 	s32 ret_val = IGC_SUCCESS;
136 	u32 ctrl_ext;
137 
138 	DEBUGFUNC("igc_init_phy_params_i225");
139 
140 	phy->ops.read_i2c_byte = igc_read_i2c_byte_generic;
141 	phy->ops.write_i2c_byte = igc_write_i2c_byte_generic;
142 
143 	if (hw->phy.media_type != igc_media_type_copper) {
144 		phy->type = igc_phy_none;
145 		goto out;
146 	}
147 
148 	phy->ops.power_up   = igc_power_up_phy_copper;
149 	phy->ops.power_down = igc_power_down_phy_copper_base;
150 
151 	phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
152 
153 	phy->reset_delay_us	= 100;
154 
155 	phy->ops.acquire	= igc_acquire_phy_base;
156 	phy->ops.check_reset_block = igc_check_reset_block_generic;
157 	phy->ops.commit		= igc_phy_sw_reset_generic;
158 	phy->ops.release	= igc_release_phy_base;
159 
160 	ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
161 
162 	/* Make sure the PHY is in a good state. Several people have reported
163 	 * firmware leaving the PHY's page select register set to something
164 	 * other than the default of zero, which causes the PHY ID read to
165 	 * access something other than the intended register.
166 	 */
167 	ret_val = hw->phy.ops.reset(hw);
168 	if (ret_val)
169 		goto out;
170 
171 	IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext);
172 	phy->ops.read_reg = igc_read_phy_reg_gpy;
173 	phy->ops.write_reg = igc_write_phy_reg_gpy;
174 
175 	ret_val = igc_get_phy_id(hw);
176 	/* Verify phy id and set remaining function pointers */
177 	switch (phy->id) {
178 	case I225_I_PHY_ID:
179 		phy->type		= igc_phy_i225;
180 		phy->ops.set_d0_lplu_state = igc_set_d0_lplu_state_i225;
181 		phy->ops.set_d3_lplu_state = igc_set_d3_lplu_state_i225;
182 		/* TODO - complete with GPY PHY information */
183 		break;
184 	default:
185 		ret_val = -IGC_ERR_PHY;
186 		goto out;
187 	}
188 
189 out:
190 	return ret_val;
191 }
192 
193 /**
194  *  igc_reset_hw_i225 - Reset hardware
195  *  @hw: pointer to the HW structure
196  *
197  *  This resets the hardware into a known state.
198  **/
igc_reset_hw_i225(struct igc_hw * hw)199 static s32 igc_reset_hw_i225(struct igc_hw *hw)
200 {
201 	u32 ctrl;
202 	s32 ret_val;
203 
204 	DEBUGFUNC("igc_reset_hw_i225");
205 
206 	/*
207 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
208 	 * on the last TLP read/write transaction when MAC is reset.
209 	 */
210 	ret_val = igc_disable_pcie_master_generic(hw);
211 	if (ret_val)
212 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
213 
214 	DEBUGOUT("Masking off all interrupts\n");
215 	IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
216 
217 	IGC_WRITE_REG(hw, IGC_RCTL, 0);
218 	IGC_WRITE_REG(hw, IGC_TCTL, IGC_TCTL_PSP);
219 	IGC_WRITE_FLUSH(hw);
220 
221 	msec_delay(10);
222 
223 	ctrl = IGC_READ_REG(hw, IGC_CTRL);
224 
225 	DEBUGOUT("Issuing a global reset to MAC\n");
226 	IGC_WRITE_REG(hw, IGC_CTRL, ctrl | IGC_CTRL_RST);
227 
228 	ret_val = igc_get_auto_rd_done_generic(hw);
229 	if (ret_val) {
230 		/*
231 		 * When auto config read does not complete, do not
232 		 * return with an error. This can happen in situations
233 		 * where there is no eeprom and prevents getting link.
234 		 */
235 		DEBUGOUT("Auto Read Done did not complete\n");
236 	}
237 
238 	/* Clear any pending interrupt events. */
239 	IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
240 	IGC_READ_REG(hw, IGC_ICR);
241 
242 	/* Install any alternate MAC address into RAR0 */
243 	ret_val = igc_check_alt_mac_addr_generic(hw);
244 
245 	return ret_val;
246 }
247 
248 /* igc_acquire_nvm_i225 - Request for access to EEPROM
249  * @hw: pointer to the HW structure
250  *
251  * Acquire the necessary semaphores for exclusive access to the EEPROM.
252  * Set the EEPROM access request bit and wait for EEPROM access grant bit.
253  * Return successful if access grant bit set, else clear the request for
254  * EEPROM access and return -IGC_ERR_NVM (-1).
255  */
igc_acquire_nvm_i225(struct igc_hw * hw)256 static s32 igc_acquire_nvm_i225(struct igc_hw *hw)
257 {
258 	s32 ret_val;
259 
260 	DEBUGFUNC("igc_acquire_nvm_i225");
261 
262 	ret_val = igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
263 
264 	return ret_val;
265 }
266 
267 /* igc_release_nvm_i225 - Release exclusive access to EEPROM
268  * @hw: pointer to the HW structure
269  *
270  * Stop any current commands to the EEPROM and clear the EEPROM request bit,
271  * then release the semaphores acquired.
272  */
igc_release_nvm_i225(struct igc_hw * hw)273 static void igc_release_nvm_i225(struct igc_hw *hw)
274 {
275 	DEBUGFUNC("igc_release_nvm_i225");
276 
277 	igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
278 }
279 
280 /* igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore
281  * @hw: pointer to the HW structure
282  * @mask: specifies which semaphore to acquire
283  *
284  * Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
285  * will also specify which port we're acquiring the lock for.
286  */
igc_acquire_swfw_sync_i225(struct igc_hw * hw,u16 mask)287 s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask)
288 {
289 	u32 swfw_sync;
290 	u32 swmask = mask;
291 	u32 fwmask = mask << 16;
292 	s32 ret_val = IGC_SUCCESS;
293 	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
294 
295 	DEBUGFUNC("igc_acquire_swfw_sync_i225");
296 
297 	while (i < timeout) {
298 		if (igc_get_hw_semaphore_i225(hw)) {
299 			ret_val = -IGC_ERR_SWFW_SYNC;
300 			goto out;
301 		}
302 
303 		swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC);
304 		if (!(swfw_sync & (fwmask | swmask)))
305 			break;
306 
307 		/* Firmware currently using resource (fwmask)
308 		 * or other software thread using resource (swmask)
309 		 */
310 		igc_put_hw_semaphore_generic(hw);
311 		msec_delay_irq(5);
312 		i++;
313 	}
314 
315 	if (i == timeout) {
316 		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
317 		ret_val = -IGC_ERR_SWFW_SYNC;
318 		goto out;
319 	}
320 
321 	swfw_sync |= swmask;
322 	IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync);
323 
324 	igc_put_hw_semaphore_generic(hw);
325 
326 out:
327 	return ret_val;
328 }
329 
330 /* igc_release_swfw_sync_i225 - Release SW/FW semaphore
331  * @hw: pointer to the HW structure
332  * @mask: specifies which semaphore to acquire
333  *
334  * Release the SW/FW semaphore used to access the PHY or NVM.  The mask
335  * will also specify which port we're releasing the lock for.
336  */
igc_release_swfw_sync_i225(struct igc_hw * hw,u16 mask)337 void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
338 {
339 	u32 swfw_sync;
340 
341 	DEBUGFUNC("igc_release_swfw_sync_i225");
342 
343 	while (igc_get_hw_semaphore_i225(hw) != IGC_SUCCESS)
344 		; /* Empty */
345 
346 	swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC);
347 	swfw_sync &= ~mask;
348 	IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync);
349 
350 	igc_put_hw_semaphore_generic(hw);
351 }
352 
353 /*
354  * igc_setup_copper_link_i225 - Configure copper link settings
355  * @hw: pointer to the HW structure
356  *
357  * Configures the link for auto-neg or forced speed and duplex.  Then we check
358  * for link, once link is established calls to configure collision distance
359  * and flow control are called.
360  */
igc_setup_copper_link_i225(struct igc_hw * hw)361 s32 igc_setup_copper_link_i225(struct igc_hw *hw)
362 {
363 	u32 phpm_reg;
364 	s32 ret_val;
365 	u32 ctrl;
366 
367 	DEBUGFUNC("igc_setup_copper_link_i225");
368 
369 	ctrl = IGC_READ_REG(hw, IGC_CTRL);
370 	ctrl |= IGC_CTRL_SLU;
371 	ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
372 	IGC_WRITE_REG(hw, IGC_CTRL, ctrl);
373 
374 	phpm_reg = IGC_READ_REG(hw, IGC_I225_PHPM);
375 	phpm_reg &= ~IGC_I225_PHPM_GO_LINKD;
376 	IGC_WRITE_REG(hw, IGC_I225_PHPM, phpm_reg);
377 
378 	ret_val = igc_setup_copper_link_generic(hw);
379 
380 	return ret_val;
381 }
382 
383 /* igc_get_hw_semaphore_i225 - Acquire hardware semaphore
384  * @hw: pointer to the HW structure
385  *
386  * Acquire the HW semaphore to access the PHY or NVM
387  */
igc_get_hw_semaphore_i225(struct igc_hw * hw)388 static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw)
389 {
390 	u32 swsm;
391 	s32 timeout = hw->nvm.word_size + 1;
392 	s32 i = 0;
393 
394 	DEBUGFUNC("igc_get_hw_semaphore_i225");
395 
396 	/* Get the SW semaphore */
397 	while (i < timeout) {
398 		swsm = IGC_READ_REG(hw, IGC_SWSM);
399 		if (!(swsm & IGC_SWSM_SMBI))
400 			break;
401 
402 		usec_delay(50);
403 		i++;
404 	}
405 
406 	if (i == timeout) {
407 		/* In rare circumstances, the SW semaphore may already be held
408 		 * unintentionally. Clear the semaphore once before giving up.
409 		 */
410 		if (hw->dev_spec._i225.clear_semaphore_once) {
411 			hw->dev_spec._i225.clear_semaphore_once = false;
412 			igc_put_hw_semaphore_generic(hw);
413 			for (i = 0; i < timeout; i++) {
414 				swsm = IGC_READ_REG(hw, IGC_SWSM);
415 				if (!(swsm & IGC_SWSM_SMBI))
416 					break;
417 
418 				usec_delay(50);
419 			}
420 		}
421 
422 		/* If we do not have the semaphore here, we have to give up. */
423 		if (i == timeout) {
424 			DEBUGOUT("Driver can't access device -\n");
425 			DEBUGOUT("SMBI bit is set.\n");
426 			return -IGC_ERR_NVM;
427 		}
428 	}
429 
430 	/* Get the FW semaphore. */
431 	for (i = 0; i < timeout; i++) {
432 		swsm = IGC_READ_REG(hw, IGC_SWSM);
433 		IGC_WRITE_REG(hw, IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
434 
435 		/* Semaphore acquired if bit latched */
436 		if (IGC_READ_REG(hw, IGC_SWSM) & IGC_SWSM_SWESMBI)
437 			break;
438 
439 		usec_delay(50);
440 	}
441 
442 	if (i == timeout) {
443 		/* Release semaphores */
444 		igc_put_hw_semaphore_generic(hw);
445 		DEBUGOUT("Driver can't access the NVM\n");
446 		return -IGC_ERR_NVM;
447 	}
448 
449 	return IGC_SUCCESS;
450 }
451 
452 /* igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register
453  * @hw: pointer to the HW structure
454  * @offset: offset of word in the Shadow Ram to read
455  * @words: number of words to read
456  * @data: word read from the Shadow Ram
457  *
458  * Reads a 16 bit word from the Shadow Ram using the EERD register.
459  * Uses necessary synchronization semaphores.
460  */
igc_read_nvm_srrd_i225(struct igc_hw * hw,u16 offset,u16 words,u16 * data)461 s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words,
462 			     u16 *data)
463 {
464 	s32 status = IGC_SUCCESS;
465 	u16 i, count;
466 
467 	DEBUGFUNC("igc_read_nvm_srrd_i225");
468 
469 	/* We cannot hold synchronization semaphores for too long,
470 	 * because of forceful takeover procedure. However it is more efficient
471 	 * to read in bursts than synchronizing access for each word.
472 	 */
473 	for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
474 		count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
475 			IGC_EERD_EEWR_MAX_COUNT : (words - i);
476 		if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
477 			status = igc_read_nvm_eerd(hw, offset, count,
478 						     data + i);
479 			hw->nvm.ops.release(hw);
480 		} else {
481 			status = IGC_ERR_SWFW_SYNC;
482 		}
483 
484 		if (status != IGC_SUCCESS)
485 			break;
486 	}
487 
488 	return status;
489 }
490 
491 /* igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR
492  * @hw: pointer to the HW structure
493  * @offset: offset within the Shadow RAM to be written to
494  * @words: number of words to write
495  * @data: 16 bit word(s) to be written to the Shadow RAM
496  *
497  * Writes data to Shadow RAM at offset using EEWR register.
498  *
499  * If igc_update_nvm_checksum is not called after this function , the
500  * data will not be committed to FLASH and also Shadow RAM will most likely
501  * contain an invalid checksum.
502  *
503  * If error code is returned, data and Shadow RAM may be inconsistent - buffer
504  * partially written.
505  */
igc_write_nvm_srwr_i225(struct igc_hw * hw,u16 offset,u16 words,u16 * data)506 s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words,
507 			      u16 *data)
508 {
509 	s32 status = IGC_SUCCESS;
510 	u16 i, count;
511 
512 	DEBUGFUNC("igc_write_nvm_srwr_i225");
513 
514 	/* We cannot hold synchronization semaphores for too long,
515 	 * because of forceful takeover procedure. However it is more efficient
516 	 * to write in bursts than synchronizing access for each word.
517 	 */
518 	for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
519 		count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
520 			IGC_EERD_EEWR_MAX_COUNT : (words - i);
521 		if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
522 			status = __igc_write_nvm_srwr(hw, offset, count,
523 							data + i);
524 			hw->nvm.ops.release(hw);
525 		} else {
526 			status = IGC_ERR_SWFW_SYNC;
527 		}
528 
529 		if (status != IGC_SUCCESS)
530 			break;
531 	}
532 
533 	return status;
534 }
535 
536 /* __igc_write_nvm_srwr - Write to Shadow Ram using EEWR
537  * @hw: pointer to the HW structure
538  * @offset: offset within the Shadow Ram to be written to
539  * @words: number of words to write
540  * @data: 16 bit word(s) to be written to the Shadow Ram
541  *
542  * Writes data to Shadow Ram at offset using EEWR register.
543  *
544  * If igc_update_nvm_checksum is not called after this function , the
545  * Shadow Ram will most likely contain an invalid checksum.
546  */
__igc_write_nvm_srwr(struct igc_hw * hw,u16 offset,u16 words,u16 * data)547 static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
548 				  u16 *data)
549 {
550 	struct igc_nvm_info *nvm = &hw->nvm;
551 	u32 i, k, eewr = 0;
552 	u32 attempts = 100000;
553 	s32 ret_val = IGC_SUCCESS;
554 
555 	DEBUGFUNC("__igc_write_nvm_srwr");
556 
557 	/* A check for invalid values:  offset too large, too many words,
558 	 * too many words for the offset, and not enough words.
559 	 */
560 	if (offset >= nvm->word_size || words > (nvm->word_size - offset) ||
561 			words == 0) {
562 		DEBUGOUT("nvm parameter(s) out of bounds\n");
563 		ret_val = -IGC_ERR_NVM;
564 		goto out;
565 	}
566 
567 	for (i = 0; i < words; i++) {
568 		eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
569 			(data[i] << IGC_NVM_RW_REG_DATA) |
570 			IGC_NVM_RW_REG_START;
571 
572 		IGC_WRITE_REG(hw, IGC_SRWR, eewr);
573 
574 		for (k = 0; k < attempts; k++) {
575 			if (IGC_NVM_RW_REG_DONE &
576 			    IGC_READ_REG(hw, IGC_SRWR)) {
577 				ret_val = IGC_SUCCESS;
578 				break;
579 			}
580 			usec_delay(5);
581 		}
582 
583 		if (ret_val != IGC_SUCCESS) {
584 			DEBUGOUT("Shadow RAM write EEWR timed out\n");
585 			break;
586 		}
587 	}
588 
589 out:
590 	return ret_val;
591 }
592 
593 /* igc_read_invm_version_i225 - Reads iNVM version and image type
594  * @hw: pointer to the HW structure
595  * @invm_ver: version structure for the version read
596  *
597  * Reads iNVM version and image type.
598  */
igc_read_invm_version_i225(struct igc_hw * hw,struct igc_fw_version * invm_ver)599 s32 igc_read_invm_version_i225(struct igc_hw *hw,
600 				 struct igc_fw_version *invm_ver)
601 {
602 	u32 *record = NULL;
603 	u32 *next_record = NULL;
604 	u32 i = 0;
605 	u32 invm_dword = 0;
606 	u32 invm_blocks = IGC_INVM_SIZE - (IGC_INVM_ULT_BYTES_SIZE /
607 					     IGC_INVM_RECORD_SIZE_IN_BYTES);
608 	u32 buffer[IGC_INVM_SIZE];
609 	s32 status = -IGC_ERR_INVM_VALUE_NOT_FOUND;
610 	u16 version = 0;
611 
612 	DEBUGFUNC("igc_read_invm_version_i225");
613 
614 	/* Read iNVM memory */
615 	for (i = 0; i < IGC_INVM_SIZE; i++) {
616 		invm_dword = IGC_READ_REG(hw, IGC_INVM_DATA_REG(i));
617 		buffer[i] = invm_dword;
618 	}
619 
620 	/* Read version number */
621 	for (i = 1; i < invm_blocks; i++) {
622 		record = &buffer[invm_blocks - i];
623 		next_record = &buffer[invm_blocks - i + 1];
624 
625 		/* Check if we have first version location used */
626 		if (i == 1 && (*record & IGC_INVM_VER_FIELD_ONE) == 0) {
627 			version = 0;
628 			status = IGC_SUCCESS;
629 			break;
630 		}
631 		/* Check if we have second version location used */
632 		else if ((i == 1) &&
633 			 ((*record & IGC_INVM_VER_FIELD_TWO) == 0)) {
634 			version = (*record & IGC_INVM_VER_FIELD_ONE) >> 3;
635 			status = IGC_SUCCESS;
636 			break;
637 		}
638 		/* Check if we have odd version location
639 		 * used and it is the last one used
640 		 */
641 		else if ((((*record & IGC_INVM_VER_FIELD_ONE) == 0) &&
642 			  ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
643 			   (i != 1))) {
644 			version = (*next_record & IGC_INVM_VER_FIELD_TWO)
645 				  >> 13;
646 			status = IGC_SUCCESS;
647 			break;
648 		}
649 		/* Check if we have even version location
650 		 * used and it is the last one used
651 		 */
652 		else if (((*record & IGC_INVM_VER_FIELD_TWO) == 0) &&
653 			 ((*record & 0x3) == 0)) {
654 			version = (*record & IGC_INVM_VER_FIELD_ONE) >> 3;
655 			status = IGC_SUCCESS;
656 			break;
657 		}
658 	}
659 
660 	if (status == IGC_SUCCESS) {
661 		invm_ver->invm_major = (version & IGC_INVM_MAJOR_MASK)
662 					>> IGC_INVM_MAJOR_SHIFT;
663 		invm_ver->invm_minor = version & IGC_INVM_MINOR_MASK;
664 	}
665 	/* Read Image Type */
666 	for (i = 1; i < invm_blocks; i++) {
667 		record = &buffer[invm_blocks - i];
668 		next_record = &buffer[invm_blocks - i + 1];
669 
670 		/* Check if we have image type in first location used */
671 		if (i == 1 && (*record & IGC_INVM_IMGTYPE_FIELD) == 0) {
672 			invm_ver->invm_img_type = 0;
673 			status = IGC_SUCCESS;
674 			break;
675 		}
676 		/* Check if we have image type in first location used */
677 		else if ((((*record & 0x3) == 0) &&
678 			  ((*record & IGC_INVM_IMGTYPE_FIELD) == 0)) ||
679 			    ((((*record & 0x3) != 0) && (i != 1)))) {
680 			invm_ver->invm_img_type =
681 				(*next_record & IGC_INVM_IMGTYPE_FIELD) >> 23;
682 			status = IGC_SUCCESS;
683 			break;
684 		}
685 	}
686 	return status;
687 }
688 
689 /* igc_validate_nvm_checksum_i225 - Validate EEPROM checksum
690  * @hw: pointer to the HW structure
691  *
692  * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
693  * and then verifies that the sum of the EEPROM is equal to 0xBABA.
694  */
igc_validate_nvm_checksum_i225(struct igc_hw * hw)695 s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw)
696 {
697 	s32 status = IGC_SUCCESS;
698 	s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset,
699 			u16 count, u16 *data);
700 
701 	DEBUGFUNC("igc_validate_nvm_checksum_i225");
702 
703 	if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
704 		/* Replace the read function with semaphore grabbing with
705 		 * the one that skips this for a while.
706 		 * We have semaphore taken already here.
707 		 */
708 		read_op_ptr = hw->nvm.ops.read;
709 		hw->nvm.ops.read = igc_read_nvm_eerd;
710 
711 		status = igc_validate_nvm_checksum_generic(hw);
712 
713 		/* Revert original read operation. */
714 		hw->nvm.ops.read = read_op_ptr;
715 
716 		hw->nvm.ops.release(hw);
717 	} else {
718 		status = IGC_ERR_SWFW_SYNC;
719 	}
720 
721 	return status;
722 }
723 
724 /* igc_update_nvm_checksum_i225 - Update EEPROM checksum
725  * @hw: pointer to the HW structure
726  *
727  * Updates the EEPROM checksum by reading/adding each word of the EEPROM
728  * up to the checksum.  Then calculates the EEPROM checksum and writes the
729  * value to the EEPROM. Next commit EEPROM data onto the Flash.
730  */
igc_update_nvm_checksum_i225(struct igc_hw * hw)731 s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
732 {
733 	s32 ret_val;
734 	u16 checksum = 0;
735 	u16 i, nvm_data;
736 
737 	DEBUGFUNC("igc_update_nvm_checksum_i225");
738 
739 	/* Read the first word from the EEPROM. If this times out or fails, do
740 	 * not continue or we could be in for a very long wait while every
741 	 * EEPROM read fails
742 	 */
743 	ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data);
744 	if (ret_val != IGC_SUCCESS) {
745 		DEBUGOUT("EEPROM read failed\n");
746 		goto out;
747 	}
748 
749 	if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
750 		/* Do not use hw->nvm.ops.write, hw->nvm.ops.read
751 		 * because we do not want to take the synchronization
752 		 * semaphores twice here.
753 		 */
754 
755 		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
756 			ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data);
757 			if (ret_val) {
758 				hw->nvm.ops.release(hw);
759 				DEBUGOUT("NVM Read Error while updating\n");
760 				DEBUGOUT("checksum.\n");
761 				goto out;
762 			}
763 			checksum += nvm_data;
764 		}
765 		checksum = (u16)NVM_SUM - checksum;
766 		ret_val = __igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
767 						 &checksum);
768 		if (ret_val != IGC_SUCCESS) {
769 			hw->nvm.ops.release(hw);
770 			DEBUGOUT("NVM Write Error while updating checksum.\n");
771 			goto out;
772 		}
773 
774 		hw->nvm.ops.release(hw);
775 
776 		ret_val = igc_update_flash_i225(hw);
777 	} else {
778 		ret_val = IGC_ERR_SWFW_SYNC;
779 	}
780 out:
781 	return ret_val;
782 }
783 
784 /* igc_get_flash_presence_i225 - Check if flash device is detected.
785  * @hw: pointer to the HW structure
786  */
igc_get_flash_presence_i225(struct igc_hw * hw)787 bool igc_get_flash_presence_i225(struct igc_hw *hw)
788 {
789 	u32 eec = 0;
790 	bool ret_val = false;
791 
792 	DEBUGFUNC("igc_get_flash_presence_i225");
793 
794 	eec = IGC_READ_REG(hw, IGC_EECD);
795 
796 	if (eec & IGC_EECD_FLASH_DETECTED_I225)
797 		ret_val = true;
798 
799 	return ret_val;
800 }
801 
802 /* igc_set_flsw_flash_burst_counter_i225 - sets FLSW NVM Burst
803  * Counter in FLSWCNT register.
804  *
805  * @hw: pointer to the HW structure
806  * @burst_counter: size in bytes of the Flash burst to read or write
807  */
igc_set_flsw_flash_burst_counter_i225(struct igc_hw * hw,u32 burst_counter)808 s32 igc_set_flsw_flash_burst_counter_i225(struct igc_hw *hw,
809 					    u32 burst_counter)
810 {
811 	s32 ret_val = IGC_SUCCESS;
812 
813 	DEBUGFUNC("igc_set_flsw_flash_burst_counter_i225");
814 
815 	/* Validate input data */
816 	if (burst_counter < IGC_I225_SHADOW_RAM_SIZE) {
817 		/* Write FLSWCNT - burst counter */
818 		IGC_WRITE_REG(hw, IGC_I225_FLSWCNT, burst_counter);
819 	} else {
820 		ret_val = IGC_ERR_INVALID_ARGUMENT;
821 	}
822 
823 	return ret_val;
824 }
825 
826 /* igc_write_erase_flash_command_i225 - write/erase to a sector
827  * region on a given address.
828  *
829  * @hw: pointer to the HW structure
830  * @opcode: opcode to be used for the write command
831  * @address: the offset to write into the FLASH image
832  */
igc_write_erase_flash_command_i225(struct igc_hw * hw,u32 opcode,u32 address)833 s32 igc_write_erase_flash_command_i225(struct igc_hw *hw, u32 opcode,
834 					 u32 address)
835 {
836 	u32 flswctl = 0;
837 	s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
838 	s32 ret_val = IGC_SUCCESS;
839 
840 	DEBUGFUNC("igc_write_erase_flash_command_i225");
841 
842 	flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL);
843 	/* Polling done bit on FLSWCTL register */
844 	while (timeout) {
845 		if (flswctl & IGC_FLSWCTL_DONE)
846 			break;
847 		usec_delay(5);
848 		flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL);
849 		timeout--;
850 	}
851 
852 	if (!timeout) {
853 		DEBUGOUT("Flash transaction was not done\n");
854 		return -IGC_ERR_NVM;
855 	}
856 
857 	/* Build and issue command on FLSWCTL register */
858 	flswctl = address | opcode;
859 	IGC_WRITE_REG(hw, IGC_I225_FLSWCTL, flswctl);
860 
861 	/* Check if issued command is valid on FLSWCTL register */
862 	flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL);
863 	if (!(flswctl & IGC_FLSWCTL_CMDV)) {
864 		DEBUGOUT("Write flash command failed\n");
865 		ret_val = IGC_ERR_INVALID_ARGUMENT;
866 	}
867 
868 	return ret_val;
869 }
870 
871 /* igc_update_flash_i225 - Commit EEPROM to the flash
872  * if fw_valid_bit is set, FW is active. setting FLUPD bit in EEC
873  * register makes the FW load the internal shadow RAM into the flash.
874  * Otherwise, fw_valid_bit is 0. if FL_SECU.block_prtotected_sw = 0
875  * then FW is not active so the SW is responsible shadow RAM dump.
876  *
877  * @hw: pointer to the HW structure
878  */
igc_update_flash_i225(struct igc_hw * hw)879 s32 igc_update_flash_i225(struct igc_hw *hw)
880 {
881 	u16 current_offset_data = 0;
882 	u32 block_sw_protect = 1;
883 	u16 base_address = 0x0;
884 	u32 i, fw_valid_bit;
885 	u16 current_offset;
886 	s32 ret_val = 0;
887 	u32 flup;
888 
889 	DEBUGFUNC("igc_update_flash_i225");
890 
891 	block_sw_protect = IGC_READ_REG(hw, IGC_I225_FLSECU) &
892 					  IGC_FLSECU_BLK_SW_ACCESS_I225;
893 	fw_valid_bit = IGC_READ_REG(hw, IGC_FWSM) &
894 				      IGC_FWSM_FW_VALID_I225;
895 	if (fw_valid_bit) {
896 		ret_val = igc_pool_flash_update_done_i225(hw);
897 		if (ret_val == -IGC_ERR_NVM) {
898 			DEBUGOUT("Flash update time out\n");
899 			goto out;
900 		}
901 
902 		flup = IGC_READ_REG(hw, IGC_EECD) | IGC_EECD_FLUPD_I225;
903 		IGC_WRITE_REG(hw, IGC_EECD, flup);
904 
905 		ret_val = igc_pool_flash_update_done_i225(hw);
906 		if (ret_val == IGC_SUCCESS)
907 			DEBUGOUT("Flash update complete\n");
908 		else
909 			DEBUGOUT("Flash update time out\n");
910 	} else if (!block_sw_protect) {
911 		/* FW is not active and security protection is disabled.
912 		 * therefore, SW is in charge of shadow RAM dump.
913 		 * Check which sector is valid. if sector 0 is valid,
914 		 * base address remains 0x0. otherwise, sector 1 is
915 		 * valid and it's base address is 0x1000
916 		 */
917 		if (IGC_READ_REG(hw, IGC_EECD) & IGC_EECD_SEC1VAL_I225)
918 			base_address = 0x1000;
919 
920 		/* Valid sector erase */
921 		ret_val = igc_write_erase_flash_command_i225(hw,
922 						  IGC_I225_ERASE_CMD_OPCODE,
923 						  base_address);
924 		if (!ret_val) {
925 			DEBUGOUT("Sector erase failed\n");
926 			goto out;
927 		}
928 
929 		current_offset = base_address;
930 
931 		/* Write */
932 		for (i = 0; i < IGC_I225_SHADOW_RAM_SIZE / 2; i++) {
933 			/* Set burst write length */
934 			ret_val = igc_set_flsw_flash_burst_counter_i225(hw,
935 									  0x2);
936 			if (ret_val != IGC_SUCCESS)
937 				break;
938 
939 			/* Set address and opcode */
940 			ret_val = igc_write_erase_flash_command_i225(hw,
941 						IGC_I225_WRITE_CMD_OPCODE,
942 						2 * current_offset);
943 			if (ret_val != IGC_SUCCESS)
944 				break;
945 
946 			ret_val = igc_read_nvm_eerd(hw, current_offset,
947 						      1, &current_offset_data);
948 			if (ret_val) {
949 				DEBUGOUT("Failed to read from EEPROM\n");
950 				goto out;
951 			}
952 
953 			/* Write CurrentOffseData to FLSWDATA register */
954 			IGC_WRITE_REG(hw, IGC_I225_FLSWDATA,
955 					current_offset_data);
956 			current_offset++;
957 
958 			/* Wait till operation has finished */
959 			ret_val = igc_poll_eerd_eewr_done(hw,
960 						IGC_NVM_POLL_READ);
961 			if (ret_val)
962 				break;
963 
964 			usec_delay(1000);
965 		}
966 	}
967 out:
968 	return ret_val;
969 }
970 
971 /* igc_pool_flash_update_done_i225 - Pool FLUDONE status.
972  * @hw: pointer to the HW structure
973  */
igc_pool_flash_update_done_i225(struct igc_hw * hw)974 s32 igc_pool_flash_update_done_i225(struct igc_hw *hw)
975 {
976 	s32 ret_val = -IGC_ERR_NVM;
977 	u32 i, reg;
978 
979 	DEBUGFUNC("igc_pool_flash_update_done_i225");
980 
981 	for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) {
982 		reg = IGC_READ_REG(hw, IGC_EECD);
983 		if (reg & IGC_EECD_FLUDONE_I225) {
984 			ret_val = IGC_SUCCESS;
985 			break;
986 		}
987 		usec_delay(5);
988 	}
989 
990 	return ret_val;
991 }
992 
993 /* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds.
994  * @hw: pointer to the HW structure
995  * @link: bool indicating link status
996  *
997  * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC
998  * settings, otherwise specify that there is no LTR requirement.
999  */
igc_set_ltr_i225(struct igc_hw * hw,bool link)1000 static s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
1001 {
1002 	u16 speed, duplex;
1003 	u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max;
1004 	s32 size;
1005 
1006 	DEBUGFUNC("igc_set_ltr_i225");
1007 
1008 	/* If we do not have link, LTR thresholds are zero. */
1009 	if (link) {
1010 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1011 
1012 		/* Check if using copper interface with EEE enabled or if the
1013 		 * link speed is 10 Mbps.
1014 		 */
1015 		if (hw->phy.media_type == igc_media_type_copper &&
1016 				!hw->dev_spec._i225.eee_disable &&
1017 				speed != SPEED_10) {
1018 			/* EEE enabled, so send LTRMAX threshold. */
1019 			ltrc = IGC_READ_REG(hw, IGC_LTRC) |
1020 				IGC_LTRC_EEEMS_EN;
1021 			IGC_WRITE_REG(hw, IGC_LTRC, ltrc);
1022 
1023 			/* Calculate tw_system (nsec). */
1024 			if (speed == SPEED_100)
1025 				tw_system = ((IGC_READ_REG(hw, IGC_EEE_SU) &
1026 					IGC_TW_SYSTEM_100_MASK) >>
1027 					IGC_TW_SYSTEM_100_SHIFT) * 500;
1028 			else
1029 				tw_system = (IGC_READ_REG(hw, IGC_EEE_SU) &
1030 					IGC_TW_SYSTEM_1000_MASK) * 500;
1031 		} else {
1032 			tw_system = 0;
1033 		}
1034 
1035 		/* Get the Rx packet buffer size. */
1036 		size = IGC_READ_REG(hw, IGC_RXPBS) &
1037 			IGC_RXPBS_SIZE_I225_MASK;
1038 
1039 		/* Calculations vary based on DMAC settings. */
1040 		if (IGC_READ_REG(hw, IGC_DMACR) & IGC_DMACR_DMAC_EN) {
1041 			size -= (IGC_READ_REG(hw, IGC_DMACR) &
1042 				 IGC_DMACR_DMACTHR_MASK) >>
1043 				 IGC_DMACR_DMACTHR_SHIFT;
1044 			/* Convert size to bits. */
1045 			size *= 1024 * 8;
1046 		} else {
1047 			/* Convert size to bytes, subtract the MTU, and then
1048 			 * convert the size to bits.
1049 			 */
1050 			size *= 1024;
1051 			size -= hw->dev_spec._i225.mtu;
1052 			size *= 8;
1053 		}
1054 
1055 		if (size < 0) {
1056 			DEBUGOUT1("Invalid effective Rx buffer size %d\n",
1057 				  size);
1058 			return -IGC_ERR_CONFIG;
1059 		}
1060 
1061 		/* Calculate the thresholds. Since speed is in Mbps, simplify
1062 		 * the calculation by multiplying size/speed by 1000 for result
1063 		 * to be in nsec before dividing by the scale in nsec. Set the
1064 		 * scale such that the LTR threshold fits in the register.
1065 		 */
1066 		ltr_min = (1000 * size) / speed;
1067 		ltr_max = ltr_min + tw_system;
1068 		scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 :
1069 			    IGC_LTRMINV_SCALE_32768;
1070 		scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 :
1071 			    IGC_LTRMAXV_SCALE_32768;
1072 		ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768;
1073 		ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768;
1074 
1075 		/* Only write the LTR thresholds if they differ from before. */
1076 		ltrv = IGC_READ_REG(hw, IGC_LTRMINV);
1077 		if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) {
1078 			ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min |
1079 			      (scale_min << IGC_LTRMINV_SCALE_SHIFT);
1080 			IGC_WRITE_REG(hw, IGC_LTRMINV, ltrv);
1081 		}
1082 
1083 		ltrv = IGC_READ_REG(hw, IGC_LTRMAXV);
1084 		if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
1085 			ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
1086 			      (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
1087 			IGC_WRITE_REG(hw, IGC_LTRMAXV, ltrv);
1088 		}
1089 	}
1090 
1091 	return IGC_SUCCESS;
1092 }
1093 
1094 /* igc_check_for_link_i225 - Check for link
1095  * @hw: pointer to the HW structure
1096  *
1097  * Checks to see of the link status of the hardware has changed.  If a
1098  * change in link status has been detected, then we read the PHY registers
1099  * to get the current speed/duplex if link exists.
1100  */
igc_check_for_link_i225(struct igc_hw * hw)1101 s32 igc_check_for_link_i225(struct igc_hw *hw)
1102 {
1103 	struct igc_mac_info *mac = &hw->mac;
1104 	s32 ret_val;
1105 	bool link = false;
1106 
1107 	DEBUGFUNC("igc_check_for_link_i225");
1108 
1109 	/* We only want to go out to the PHY registers to see if
1110 	 * Auto-Neg has completed and/or if our link status has
1111 	 * changed.  The get_link_status flag is set upon receiving
1112 	 * a Link Status Change or Rx Sequence Error interrupt.
1113 	 */
1114 	if (!mac->get_link_status) {
1115 		ret_val = IGC_SUCCESS;
1116 		goto out;
1117 	}
1118 
1119 	/* First we want to see if the MII Status Register reports
1120 	 * link.  If so, then we want to get the current speed/duplex
1121 	 * of the PHY.
1122 	 */
1123 	ret_val = igc_phy_has_link_generic(hw, 1, 0, &link);
1124 	if (ret_val)
1125 		goto out;
1126 
1127 	if (!link)
1128 		goto out; /* No link detected */
1129 
1130 	mac->get_link_status = false;
1131 
1132 	/* Check if there was DownShift, must be checked
1133 	 * immediately after link-up
1134 	 */
1135 	igc_check_downshift_generic(hw);
1136 
1137 	/* If we are forcing speed/duplex, then we simply return since
1138 	 * we have already determined whether we have link or not.
1139 	 */
1140 	if (!mac->autoneg)
1141 		goto out;
1142 
1143 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1144 	 * of MAC speed/duplex configuration.  So we only need to
1145 	 * configure Collision Distance in the MAC.
1146 	 */
1147 	mac->ops.config_collision_dist(hw);
1148 
1149 	/* Configure Flow Control now that Auto-Neg has completed.
1150 	 * First, we need to restore the desired flow control
1151 	 * settings because we may have had to re-autoneg with a
1152 	 * different link partner.
1153 	 */
1154 	ret_val = igc_config_fc_after_link_up_generic(hw);
1155 	if (ret_val)
1156 		DEBUGOUT("Error configuring flow control\n");
1157 out:
1158 	/* Now that we are aware of our link settings, we can set the LTR
1159 	 * thresholds.
1160 	 */
1161 	ret_val = igc_set_ltr_i225(hw, link);
1162 
1163 	return ret_val;
1164 }
1165 
1166 /* igc_init_function_pointers_i225 - Init func ptrs.
1167  * @hw: pointer to the HW structure
1168  *
1169  * Called to initialize all function pointers and parameters.
1170  */
igc_init_function_pointers_i225(struct igc_hw * hw)1171 void igc_init_function_pointers_i225(struct igc_hw *hw)
1172 {
1173 	igc_init_mac_ops_generic(hw);
1174 	igc_init_phy_ops_generic(hw);
1175 	igc_init_nvm_ops_generic(hw);
1176 	hw->mac.ops.init_params = igc_init_mac_params_i225;
1177 	hw->nvm.ops.init_params = igc_init_nvm_params_i225;
1178 	hw->phy.ops.init_params = igc_init_phy_params_i225;
1179 }
1180 
1181 /* igc_valid_led_default_i225 - Verify a valid default LED config
1182  * @hw: pointer to the HW structure
1183  * @data: pointer to the NVM (EEPROM)
1184  *
1185  * Read the EEPROM for the current default LED configuration.  If the
1186  * LED configuration is not valid, set to a valid LED configuration.
1187  */
igc_valid_led_default_i225(struct igc_hw * hw,u16 * data)1188 static s32 igc_valid_led_default_i225(struct igc_hw *hw, u16 *data)
1189 {
1190 	s32 ret_val;
1191 
1192 	DEBUGFUNC("igc_valid_led_default_i225");
1193 
1194 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1195 	if (ret_val) {
1196 		DEBUGOUT("NVM Read Error\n");
1197 		goto out;
1198 	}
1199 
1200 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1201 		switch (hw->phy.media_type) {
1202 		case igc_media_type_internal_serdes:
1203 			*data = ID_LED_DEFAULT_I225_SERDES;
1204 			break;
1205 		case igc_media_type_copper:
1206 		default:
1207 			*data = ID_LED_DEFAULT_I225;
1208 			break;
1209 		}
1210 	}
1211 out:
1212 	return ret_val;
1213 }
1214 
1215 /* igc_get_cfg_done_i225 - Read config done bit
1216  * @hw: pointer to the HW structure
1217  *
1218  * Read the management control register for the config done bit for
1219  * completion status.  NOTE: silicon which is EEPROM-less will fail trying
1220  * to read the config done bit, so an error is *ONLY* logged and returns
1221  * IGC_SUCCESS.  If we were to return with error, EEPROM-less silicon
1222  * would not be able to be reset or change link.
1223  */
igc_get_cfg_done_i225(struct igc_hw * hw)1224 static s32 igc_get_cfg_done_i225(struct igc_hw *hw)
1225 {
1226 	s32 timeout = PHY_CFG_TIMEOUT;
1227 	u32 mask = IGC_NVM_CFG_DONE_PORT_0;
1228 
1229 	DEBUGFUNC("igc_get_cfg_done_i225");
1230 
1231 	while (timeout) {
1232 		if (IGC_READ_REG(hw, IGC_EEMNGCTL_I225) & mask)
1233 			break;
1234 		msec_delay(1);
1235 		timeout--;
1236 	}
1237 	if (!timeout)
1238 		DEBUGOUT("MNG configuration cycle has not completed.\n");
1239 
1240 	return IGC_SUCCESS;
1241 }
1242 
1243 /* igc_init_hw_i225 - Init hw for I225
1244  * @hw: pointer to the HW structure
1245  *
1246  * Called to initialize hw for i225 hw family.
1247  */
igc_init_hw_i225(struct igc_hw * hw)1248 s32 igc_init_hw_i225(struct igc_hw *hw)
1249 {
1250 	s32 ret_val;
1251 
1252 	DEBUGFUNC("igc_init_hw_i225");
1253 
1254 	hw->phy.ops.get_cfg_done = igc_get_cfg_done_i225;
1255 	ret_val = igc_init_hw_base(hw);
1256 	return ret_val;
1257 }
1258 
1259 /*
1260  * igc_set_d0_lplu_state_i225 - Set Low-Power-Link-Up (LPLU) D0 state
1261  * @hw: pointer to the HW structure
1262  * @active: true to enable LPLU, false to disable
1263  *
1264  * Note: since I225 does not actually support LPLU, this function
1265  * simply enables/disables 1G and 2.5G speeds in D0.
1266  */
igc_set_d0_lplu_state_i225(struct igc_hw * hw,bool active)1267 s32 igc_set_d0_lplu_state_i225(struct igc_hw *hw, bool active)
1268 {
1269 	u32 data;
1270 
1271 	DEBUGFUNC("igc_set_d0_lplu_state_i225");
1272 
1273 	data = IGC_READ_REG(hw, IGC_I225_PHPM);
1274 
1275 	if (active) {
1276 		data |= IGC_I225_PHPM_DIS_1000;
1277 		data |= IGC_I225_PHPM_DIS_2500;
1278 	} else {
1279 		data &= ~IGC_I225_PHPM_DIS_1000;
1280 		data &= ~IGC_I225_PHPM_DIS_2500;
1281 	}
1282 
1283 	IGC_WRITE_REG(hw, IGC_I225_PHPM, data);
1284 	return IGC_SUCCESS;
1285 }
1286 
1287 /*
1288  * igc_set_d3_lplu_state_i225 - Set Low-Power-Link-Up (LPLU) D3 state
1289  * @hw: pointer to the HW structure
1290  * @active: true to enable LPLU, false to disable
1291  *
1292  * Note: since I225 does not actually support LPLU, this function
1293  * simply enables/disables 100M, 1G and 2.5G speeds in D3.
1294  */
igc_set_d3_lplu_state_i225(struct igc_hw * hw,bool active)1295 s32 igc_set_d3_lplu_state_i225(struct igc_hw *hw, bool active)
1296 {
1297 	u32 data;
1298 
1299 	DEBUGFUNC("igc_set_d3_lplu_state_i225");
1300 
1301 	data = IGC_READ_REG(hw, IGC_I225_PHPM);
1302 
1303 	if (active) {
1304 		data |= IGC_I225_PHPM_DIS_100_D3;
1305 		data |= IGC_I225_PHPM_DIS_1000_D3;
1306 		data |= IGC_I225_PHPM_DIS_2500_D3;
1307 	} else {
1308 		data &= ~IGC_I225_PHPM_DIS_100_D3;
1309 		data &= ~IGC_I225_PHPM_DIS_1000_D3;
1310 		data &= ~IGC_I225_PHPM_DIS_2500_D3;
1311 	}
1312 
1313 	IGC_WRITE_REG(hw, IGC_I225_PHPM, data);
1314 	return IGC_SUCCESS;
1315 }
1316 
1317 /**
1318  *  igc_set_eee_i225 - Enable/disable EEE support
1319  *  @hw: pointer to the HW structure
1320  *  @adv2p5G: boolean flag enabling 2.5G EEE advertisement
1321  *  @adv1G: boolean flag enabling 1G EEE advertisement
1322  *  @adv100M: boolean flag enabling 100M EEE advertisement
1323  *
1324  *  Enable/disable EEE based on setting in dev_spec structure.
1325  *
1326  **/
igc_set_eee_i225(struct igc_hw * hw,bool adv2p5G,bool adv1G,bool adv100M)1327 s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
1328 		       bool adv100M)
1329 {
1330 	u32 ipcnfg, eeer;
1331 
1332 	DEBUGFUNC("igc_set_eee_i225");
1333 
1334 	if (hw->mac.type != igc_i225 ||
1335 	    hw->phy.media_type != igc_media_type_copper)
1336 		goto out;
1337 	ipcnfg = IGC_READ_REG(hw, IGC_IPCNFG);
1338 	eeer = IGC_READ_REG(hw, IGC_EEER);
1339 
1340 	/* enable or disable per user setting */
1341 	if (!(hw->dev_spec._i225.eee_disable)) {
1342 		u32 eee_su = IGC_READ_REG(hw, IGC_EEE_SU);
1343 
1344 		if (adv100M)
1345 			ipcnfg |= IGC_IPCNFG_EEE_100M_AN;
1346 		else
1347 			ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN;
1348 
1349 		if (adv1G)
1350 			ipcnfg |= IGC_IPCNFG_EEE_1G_AN;
1351 		else
1352 			ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN;
1353 
1354 		if (adv2p5G)
1355 			ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN;
1356 		else
1357 			ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN;
1358 
1359 		eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
1360 			IGC_EEER_LPI_FC);
1361 
1362 		/* This bit should not be set in normal operation. */
1363 		if (eee_su & IGC_EEE_SU_LPI_CLK_STP)
1364 			DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
1365 	} else {
1366 		ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN |
1367 			IGC_IPCNFG_EEE_100M_AN);
1368 		eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
1369 			IGC_EEER_LPI_FC);
1370 	}
1371 	IGC_WRITE_REG(hw, IGC_IPCNFG, ipcnfg);
1372 	IGC_WRITE_REG(hw, IGC_EEER, eeer);
1373 	IGC_READ_REG(hw, IGC_IPCNFG);
1374 	IGC_READ_REG(hw, IGC_EEER);
1375 out:
1376 
1377 	return IGC_SUCCESS;
1378 }
1379