xref: /dpdk/drivers/net/e1000/base/e1000_i210.c (revision 4a8ab48e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4 
5 #include "e1000_api.h"
6 
7 
8 STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
9 STATIC void e1000_release_nvm_i210(struct e1000_hw *hw);
10 STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
11 STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
12 				u16 *data);
13 STATIC s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
14 STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
15 
16 /**
17  *  e1000_acquire_nvm_i210 - Request for access to EEPROM
18  *  @hw: pointer to the HW structure
19  *
20  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
21  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
22  *  Return successful if access grant bit set, else clear the request for
23  *  EEPROM access and return -E1000_ERR_NVM (-1).
24  **/
e1000_acquire_nvm_i210(struct e1000_hw * hw)25 STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
26 {
27 	s32 ret_val;
28 
29 	DEBUGFUNC("e1000_acquire_nvm_i210");
30 
31 	ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
32 
33 	return ret_val;
34 }
35 
36 /**
37  *  e1000_release_nvm_i210 - Release exclusive access to EEPROM
38  *  @hw: pointer to the HW structure
39  *
40  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
41  *  then release the semaphores acquired.
42  **/
e1000_release_nvm_i210(struct e1000_hw * hw)43 STATIC void e1000_release_nvm_i210(struct e1000_hw *hw)
44 {
45 	DEBUGFUNC("e1000_release_nvm_i210");
46 
47 	e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
48 }
49 
50 /**
51  *  e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
52  *  @hw: pointer to the HW structure
53  *  @mask: specifies which semaphore to acquire
54  *
55  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
56  *  will also specify which port we're acquiring the lock for.
57  **/
e1000_acquire_swfw_sync_i210(struct e1000_hw * hw,u16 mask)58 s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
59 {
60 	u32 swfw_sync;
61 	u32 swmask = mask;
62 	u32 fwmask = mask << 16;
63 	s32 ret_val = E1000_SUCCESS;
64 	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
65 
66 	DEBUGFUNC("e1000_acquire_swfw_sync_i210");
67 
68 	while (i < timeout) {
69 		if (e1000_get_hw_semaphore_i210(hw)) {
70 			ret_val = -E1000_ERR_SWFW_SYNC;
71 			goto out;
72 		}
73 
74 		swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
75 		if (!(swfw_sync & (fwmask | swmask)))
76 			break;
77 
78 		/*
79 		 * Firmware currently using resource (fwmask)
80 		 * or other software thread using resource (swmask)
81 		 */
82 		e1000_put_hw_semaphore_generic(hw);
83 		msec_delay_irq(5);
84 		i++;
85 	}
86 
87 	if (i == timeout) {
88 		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
89 		ret_val = -E1000_ERR_SWFW_SYNC;
90 		goto out;
91 	}
92 
93 	swfw_sync |= swmask;
94 	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
95 
96 	e1000_put_hw_semaphore_generic(hw);
97 
98 out:
99 	return ret_val;
100 }
101 
102 /**
103  *  e1000_release_swfw_sync_i210 - Release SW/FW semaphore
104  *  @hw: pointer to the HW structure
105  *  @mask: specifies which semaphore to acquire
106  *
107  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
108  *  will also specify which port we're releasing the lock for.
109  **/
e1000_release_swfw_sync_i210(struct e1000_hw * hw,u16 mask)110 void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
111 {
112 	u32 swfw_sync;
113 
114 	DEBUGFUNC("e1000_release_swfw_sync_i210");
115 
116 	while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
117 		; /* Empty */
118 
119 	swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
120 	swfw_sync &= (u32)~mask;
121 	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
122 
123 	e1000_put_hw_semaphore_generic(hw);
124 }
125 
126 /**
127  *  e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
128  *  @hw: pointer to the HW structure
129  *
130  *  Acquire the HW semaphore to access the PHY or NVM
131  **/
e1000_get_hw_semaphore_i210(struct e1000_hw * hw)132 STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
133 {
134 	u32 swsm;
135 	s32 timeout = hw->nvm.word_size + 1;
136 	s32 i = 0;
137 
138 	DEBUGFUNC("e1000_get_hw_semaphore_i210");
139 
140 	/* Get the SW semaphore */
141 	while (i < timeout) {
142 		swsm = E1000_READ_REG(hw, E1000_SWSM);
143 		if (!(swsm & E1000_SWSM_SMBI))
144 			break;
145 
146 		usec_delay(50);
147 		i++;
148 	}
149 
150 	if (i == timeout) {
151 		/* In rare circumstances, the SW semaphore may already be held
152 		 * unintentionally. Clear the semaphore once before giving up.
153 		 */
154 		if (hw->dev_spec._82575.clear_semaphore_once) {
155 			hw->dev_spec._82575.clear_semaphore_once = false;
156 			e1000_put_hw_semaphore_generic(hw);
157 			for (i = 0; i < timeout; i++) {
158 				swsm = E1000_READ_REG(hw, E1000_SWSM);
159 				if (!(swsm & E1000_SWSM_SMBI))
160 					break;
161 
162 				usec_delay(50);
163 			}
164 		}
165 
166 		/* If we do not have the semaphore here, we have to give up. */
167 		if (i == timeout) {
168 			DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
169 			return -E1000_ERR_NVM;
170 		}
171 	}
172 
173 	/* Get the FW semaphore. */
174 	for (i = 0; i < timeout; i++) {
175 		swsm = E1000_READ_REG(hw, E1000_SWSM);
176 		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
177 
178 		/* Semaphore acquired if bit latched */
179 		if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
180 			break;
181 
182 		usec_delay(50);
183 	}
184 
185 	if (i == timeout) {
186 		/* Release semaphores */
187 		e1000_put_hw_semaphore_generic(hw);
188 		DEBUGOUT("Driver can't access the NVM\n");
189 		return -E1000_ERR_NVM;
190 	}
191 
192 	return E1000_SUCCESS;
193 }
194 
195 /**
196  *  e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
197  *  @hw: pointer to the HW structure
198  *  @offset: offset of word in the Shadow Ram to read
199  *  @words: number of words to read
200  *  @data: word read from the Shadow Ram
201  *
202  *  Reads a 16 bit word from the Shadow Ram using the EERD register.
203  *  Uses necessary synchronization semaphores.
204  **/
e1000_read_nvm_srrd_i210(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)205 s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
206 			     u16 *data)
207 {
208 	s32 status = E1000_SUCCESS;
209 	u16 i, count;
210 
211 	DEBUGFUNC("e1000_read_nvm_srrd_i210");
212 
213 	/* We cannot hold synchronization semaphores for too long,
214 	 * because of forceful takeover procedure. However it is more efficient
215 	 * to read in bursts than synchronizing access for each word. */
216 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
217 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
218 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
219 		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
220 			status = e1000_read_nvm_eerd(hw, offset, count,
221 						     data + i);
222 			hw->nvm.ops.release(hw);
223 		} else {
224 			status = E1000_ERR_SWFW_SYNC;
225 		}
226 
227 		if (status != E1000_SUCCESS)
228 			break;
229 	}
230 
231 	return status;
232 }
233 
234 /**
235  *  e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
236  *  @hw: pointer to the HW structure
237  *  @offset: offset within the Shadow RAM to be written to
238  *  @words: number of words to write
239  *  @data: 16 bit word(s) to be written to the Shadow RAM
240  *
241  *  Writes data to Shadow RAM at offset using EEWR register.
242  *
243  *  If e1000_update_nvm_checksum is not called after this function , the
244  *  data will not be committed to FLASH and also Shadow RAM will most likely
245  *  contain an invalid checksum.
246  *
247  *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
248  *  partially written.
249  **/
e1000_write_nvm_srwr_i210(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)250 s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
251 			      u16 *data)
252 {
253 	s32 status = E1000_SUCCESS;
254 	u16 i, count;
255 
256 	DEBUGFUNC("e1000_write_nvm_srwr_i210");
257 
258 	/* We cannot hold synchronization semaphores for too long,
259 	 * because of forceful takeover procedure. However it is more efficient
260 	 * to write in bursts than synchronizing access for each word. */
261 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
262 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
263 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
264 		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
265 			status = e1000_write_nvm_srwr(hw, offset, count,
266 						      data + i);
267 			hw->nvm.ops.release(hw);
268 		} else {
269 			status = E1000_ERR_SWFW_SYNC;
270 		}
271 
272 		if (status != E1000_SUCCESS)
273 			break;
274 	}
275 
276 	return status;
277 }
278 
279 /**
280  *  e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
281  *  @hw: pointer to the HW structure
282  *  @offset: offset within the Shadow Ram to be written to
283  *  @words: number of words to write
284  *  @data: 16 bit word(s) to be written to the Shadow Ram
285  *
286  *  Writes data to Shadow Ram at offset using EEWR register.
287  *
288  *  If e1000_update_nvm_checksum is not called after this function , the
289  *  Shadow Ram will most likely contain an invalid checksum.
290  **/
e1000_write_nvm_srwr(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)291 STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
292 				u16 *data)
293 {
294 	struct e1000_nvm_info *nvm = &hw->nvm;
295 	u32 i, k, eewr = 0;
296 	u32 attempts = 100000;
297 	s32 ret_val = E1000_SUCCESS;
298 
299 	DEBUGFUNC("e1000_write_nvm_srwr");
300 
301 	/*
302 	 * A check for invalid values:  offset too large, too many words,
303 	 * too many words for the offset, and not enough words.
304 	 */
305 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
306 	    (words == 0)) {
307 		DEBUGOUT("nvm parameter(s) out of bounds\n");
308 		ret_val = -E1000_ERR_NVM;
309 		goto out;
310 	}
311 
312 	for (i = 0; i < words; i++) {
313 		ret_val = -E1000_ERR_NVM;
314 
315 		eewr = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
316 			(data[i] << E1000_NVM_RW_REG_DATA) |
317 			E1000_NVM_RW_REG_START;
318 
319 		E1000_WRITE_REG(hw, E1000_SRWR, eewr);
320 
321 		for (k = 0; k < attempts; k++) {
322 			if (E1000_NVM_RW_REG_DONE &
323 			    E1000_READ_REG(hw, E1000_SRWR)) {
324 				ret_val = E1000_SUCCESS;
325 				break;
326 			}
327 			usec_delay(5);
328 		}
329 
330 		if (ret_val != E1000_SUCCESS) {
331 			DEBUGOUT("Shadow RAM write EEWR timed out\n");
332 			break;
333 		}
334 	}
335 
336 out:
337 	return ret_val;
338 }
339 
340 /** e1000_read_invm_word_i210 - Reads OTP
341  *  @hw: pointer to the HW structure
342  *  @address: the word address (aka eeprom offset) to read
343  *  @data: pointer to the data read
344  *
345  *  Reads 16-bit words from the OTP. Return error when the word is not
346  *  stored in OTP.
347  **/
e1000_read_invm_word_i210(struct e1000_hw * hw,u8 address,u16 * data)348 STATIC s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
349 {
350 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
351 	u32 invm_dword;
352 	u16 i;
353 	u8 record_type, word_address;
354 
355 	DEBUGFUNC("e1000_read_invm_word_i210");
356 
357 	for (i = 0; i < E1000_INVM_SIZE; i++) {
358 		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
359 		/* Get record type */
360 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
361 		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
362 			break;
363 		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
364 			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
365 		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
366 			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
367 		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
368 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
369 			if (word_address == address) {
370 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
371 				DEBUGOUT2("Read INVM Word 0x%02x = %x",
372 					  address, *data);
373 				status = E1000_SUCCESS;
374 				break;
375 			}
376 		}
377 	}
378 	if (status != E1000_SUCCESS)
379 		DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
380 	return status;
381 }
382 
383 /** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
384  *  @hw: pointer to the HW structure
385  *  @address: the word address (aka eeprom offset) to read
386  *  @data: pointer to the data read
387  *
388  *  Wrapper function to return data formerly found in the NVM.
389  **/
e1000_read_invm_i210(struct e1000_hw * hw,u16 offset,u16 E1000_UNUSEDARG words,u16 * data)390 STATIC s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
391 				u16 E1000_UNUSEDARG words, u16 *data)
392 {
393 	s32 ret_val = E1000_SUCCESS;
394 	UNREFERENCED_1PARAMETER(words);
395 
396 	DEBUGFUNC("e1000_read_invm_i210");
397 
398 	/* Only the MAC addr is required to be present in the iNVM */
399 	switch (offset) {
400 	case NVM_MAC_ADDR:
401 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
402 		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset + 1,
403 						     &data[1]);
404 		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset + 2,
405 						     &data[2]);
406 		if (ret_val != E1000_SUCCESS)
407 			DEBUGOUT("MAC Addr not found in iNVM\n");
408 		break;
409 	case NVM_INIT_CTRL_2:
410 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
411 		if (ret_val != E1000_SUCCESS) {
412 			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
413 			ret_val = E1000_SUCCESS;
414 		}
415 		break;
416 	case NVM_INIT_CTRL_4:
417 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
418 		if (ret_val != E1000_SUCCESS) {
419 			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
420 			ret_val = E1000_SUCCESS;
421 		}
422 		break;
423 	case NVM_LED_1_CFG:
424 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
425 		if (ret_val != E1000_SUCCESS) {
426 			*data = NVM_LED_1_CFG_DEFAULT_I211;
427 			ret_val = E1000_SUCCESS;
428 		}
429 		break;
430 	case NVM_LED_0_2_CFG:
431 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
432 		if (ret_val != E1000_SUCCESS) {
433 			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
434 			ret_val = E1000_SUCCESS;
435 		}
436 		break;
437 	case NVM_ID_LED_SETTINGS:
438 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
439 		if (ret_val != E1000_SUCCESS) {
440 			*data = ID_LED_RESERVED_FFFF;
441 			ret_val = E1000_SUCCESS;
442 		}
443 		break;
444 	case NVM_SUB_DEV_ID:
445 		*data = hw->subsystem_device_id;
446 		break;
447 	case NVM_SUB_VEN_ID:
448 		*data = hw->subsystem_vendor_id;
449 		break;
450 	case NVM_DEV_ID:
451 		*data = hw->device_id;
452 		break;
453 	case NVM_VEN_ID:
454 		*data = hw->vendor_id;
455 		break;
456 	default:
457 		DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
458 		*data = NVM_RESERVED_WORD;
459 		break;
460 	}
461 	return ret_val;
462 }
463 
464 /**
465  *  e1000_read_invm_version - Reads iNVM version and image type
466  *  @hw: pointer to the HW structure
467  *  @invm_ver: version structure for the version read
468  *
469  *  Reads iNVM version and image type.
470  **/
e1000_read_invm_version(struct e1000_hw * hw,struct e1000_fw_version * invm_ver)471 s32 e1000_read_invm_version(struct e1000_hw *hw,
472 			    struct e1000_fw_version *invm_ver)
473 {
474 	u32 *record = NULL;
475 	u32 *next_record = NULL;
476 	u32 i = 0;
477 	u32 invm_dword = 0;
478 	u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
479 					     E1000_INVM_RECORD_SIZE_IN_BYTES);
480 	u32 buffer[E1000_INVM_SIZE];
481 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
482 	u16 version = 0;
483 
484 	DEBUGFUNC("e1000_read_invm_version");
485 
486 	/* Read iNVM memory */
487 	for (i = 0; i < E1000_INVM_SIZE; i++) {
488 		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
489 		buffer[i] = invm_dword;
490 	}
491 
492 	/* Read version number */
493 	for (i = 1; i < invm_blocks; i++) {
494 		record = &buffer[invm_blocks - i];
495 		next_record = &buffer[invm_blocks - i + 1];
496 
497 		/* Check if we have first version location used */
498 		if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
499 			version = 0;
500 			status = E1000_SUCCESS;
501 			break;
502 		}
503 		/* Check if we have second version location used */
504 		else if ((i == 1) &&
505 			 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
506 			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
507 			status = E1000_SUCCESS;
508 			break;
509 		}
510 		/*
511 		 * Check if we have odd version location
512 		 * used and it is the last one used
513 		 */
514 		else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
515 			 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
516 			 (i != 1))) {
517 			version = (*next_record & E1000_INVM_VER_FIELD_TWO)
518 				  >> 13;
519 			status = E1000_SUCCESS;
520 			break;
521 		}
522 		/*
523 		 * Check if we have even version location
524 		 * used and it is the last one used
525 		 */
526 		else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
527 			 ((*record & 0x3) == 0)) {
528 			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
529 			status = E1000_SUCCESS;
530 			break;
531 		}
532 	}
533 
534 	if (status == E1000_SUCCESS) {
535 		invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
536 					>> E1000_INVM_MAJOR_SHIFT;
537 		invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
538 	}
539 	/* Read Image Type */
540 	for (i = 1; i < invm_blocks; i++) {
541 		record = &buffer[invm_blocks - i];
542 		next_record = &buffer[invm_blocks - i + 1];
543 
544 		/* Check if we have image type in first location used */
545 		if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
546 			invm_ver->invm_img_type = 0;
547 			status = E1000_SUCCESS;
548 			break;
549 		}
550 		/* Check if we have image type in first location used */
551 		else if ((((*record & 0x3) == 0) &&
552 			 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
553 			 ((((*record & 0x3) != 0) && (i != 1)))) {
554 			invm_ver->invm_img_type =
555 				(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
556 			status = E1000_SUCCESS;
557 			break;
558 		}
559 	}
560 	return status;
561 }
562 
563 /**
564  *  e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
565  *  @hw: pointer to the HW structure
566  *
567  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
568  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
569  **/
e1000_validate_nvm_checksum_i210(struct e1000_hw * hw)570 s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
571 {
572 	s32 status = E1000_SUCCESS;
573 	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
574 
575 	DEBUGFUNC("e1000_validate_nvm_checksum_i210");
576 
577 	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
578 
579 		/*
580 		 * Replace the read function with semaphore grabbing with
581 		 * the one that skips this for a while.
582 		 * We have semaphore taken already here.
583 		 */
584 		read_op_ptr = hw->nvm.ops.read;
585 		hw->nvm.ops.read = e1000_read_nvm_eerd;
586 
587 		status = e1000_validate_nvm_checksum_generic(hw);
588 
589 		/* Revert original read operation. */
590 		hw->nvm.ops.read = read_op_ptr;
591 
592 		hw->nvm.ops.release(hw);
593 	} else {
594 		status = E1000_ERR_SWFW_SYNC;
595 	}
596 
597 	return status;
598 }
599 
600 
601 /**
602  *  e1000_update_nvm_checksum_i210 - Update EEPROM checksum
603  *  @hw: pointer to the HW structure
604  *
605  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
606  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
607  *  value to the EEPROM. Next commit EEPROM data onto the Flash.
608  **/
e1000_update_nvm_checksum_i210(struct e1000_hw * hw)609 s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
610 {
611 	s32 ret_val;
612 	u16 checksum = 0;
613 	u16 i, nvm_data;
614 
615 	DEBUGFUNC("e1000_update_nvm_checksum_i210");
616 
617 	/*
618 	 * Read the first word from the EEPROM. If this times out or fails, do
619 	 * not continue or we could be in for a very long wait while every
620 	 * EEPROM read fails
621 	 */
622 	ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
623 	if (ret_val != E1000_SUCCESS) {
624 		DEBUGOUT("EEPROM read failed\n");
625 		goto out;
626 	}
627 
628 	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
629 		/*
630 		 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
631 		 * because we do not want to take the synchronization
632 		 * semaphores twice here.
633 		 */
634 
635 		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
636 			ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
637 			if (ret_val) {
638 				hw->nvm.ops.release(hw);
639 				DEBUGOUT("NVM Read Error while updating checksum.\n");
640 				goto out;
641 			}
642 			checksum += nvm_data;
643 		}
644 		checksum = (u16) NVM_SUM - checksum;
645 		ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
646 						&checksum);
647 		if (ret_val != E1000_SUCCESS) {
648 			hw->nvm.ops.release(hw);
649 			DEBUGOUT("NVM Write Error while updating checksum.\n");
650 			goto out;
651 		}
652 
653 		hw->nvm.ops.release(hw);
654 
655 		ret_val = e1000_update_flash_i210(hw);
656 	} else {
657 		ret_val = E1000_ERR_SWFW_SYNC;
658 	}
659 out:
660 	return ret_val;
661 }
662 
663 /**
664  *  e1000_get_flash_presence_i210 - Check if flash device is detected.
665  *  @hw: pointer to the HW structure
666  *
667  **/
e1000_get_flash_presence_i210(struct e1000_hw * hw)668 bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
669 {
670 	u32 eec = 0;
671 	bool ret_val = false;
672 
673 	DEBUGFUNC("e1000_get_flash_presence_i210");
674 
675 	eec = E1000_READ_REG(hw, E1000_EECD);
676 
677 	if (eec & E1000_EECD_FLASH_DETECTED_I210)
678 		ret_val = true;
679 
680 	return ret_val;
681 }
682 
683 /**
684  *  e1000_update_flash_i210 - Commit EEPROM to the flash
685  *  @hw: pointer to the HW structure
686  *
687  **/
e1000_update_flash_i210(struct e1000_hw * hw)688 s32 e1000_update_flash_i210(struct e1000_hw *hw)
689 {
690 	s32 ret_val;
691 	u32 flup;
692 
693 	DEBUGFUNC("e1000_update_flash_i210");
694 
695 	ret_val = e1000_pool_flash_update_done_i210(hw);
696 	if (ret_val == -E1000_ERR_NVM) {
697 		DEBUGOUT("Flash update time out\n");
698 		goto out;
699 	}
700 
701 	flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
702 	E1000_WRITE_REG(hw, E1000_EECD, flup);
703 
704 	ret_val = e1000_pool_flash_update_done_i210(hw);
705 	if (ret_val == E1000_SUCCESS)
706 		DEBUGOUT("Flash update complete\n");
707 	else
708 		DEBUGOUT("Flash update time out\n");
709 
710 out:
711 	return ret_val;
712 }
713 
714 /**
715  *  e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
716  *  @hw: pointer to the HW structure
717  *
718  **/
e1000_pool_flash_update_done_i210(struct e1000_hw * hw)719 s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
720 {
721 	s32 ret_val = -E1000_ERR_NVM;
722 	u32 i, reg;
723 
724 	DEBUGFUNC("e1000_pool_flash_update_done_i210");
725 
726 	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
727 		reg = E1000_READ_REG(hw, E1000_EECD);
728 		if (reg & E1000_EECD_FLUDONE_I210) {
729 			ret_val = E1000_SUCCESS;
730 			break;
731 		}
732 		usec_delay(5);
733 	}
734 
735 	return ret_val;
736 }
737 
738 /**
739  *  e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
740  *  @hw: pointer to the HW structure
741  *
742  *  Initialize the i210/i211 NVM parameters and function pointers.
743  **/
e1000_init_nvm_params_i210(struct e1000_hw * hw)744 STATIC s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
745 {
746 	s32 ret_val;
747 	struct e1000_nvm_info *nvm = &hw->nvm;
748 
749 	DEBUGFUNC("e1000_init_nvm_params_i210");
750 
751 	ret_val = e1000_init_nvm_params_82575(hw);
752 	nvm->ops.acquire = e1000_acquire_nvm_i210;
753 	nvm->ops.release = e1000_release_nvm_i210;
754 	nvm->ops.valid_led_default = e1000_valid_led_default_i210;
755 	if (e1000_get_flash_presence_i210(hw)) {
756 		hw->nvm.type = e1000_nvm_flash_hw;
757 		nvm->ops.read    = e1000_read_nvm_srrd_i210;
758 		nvm->ops.write   = e1000_write_nvm_srwr_i210;
759 		nvm->ops.validate = e1000_validate_nvm_checksum_i210;
760 		nvm->ops.update   = e1000_update_nvm_checksum_i210;
761 	} else {
762 		hw->nvm.type = e1000_nvm_invm;
763 		nvm->ops.read     = e1000_read_invm_i210;
764 		nvm->ops.write    = e1000_null_write_nvm;
765 		nvm->ops.validate = e1000_null_ops_generic;
766 		nvm->ops.update   = e1000_null_ops_generic;
767 	}
768 	return ret_val;
769 }
770 
771 /**
772  *  e1000_init_function_pointers_i210 - Init func ptrs.
773  *  @hw: pointer to the HW structure
774  *
775  *  Called to initialize all function pointers and parameters.
776  **/
e1000_init_function_pointers_i210(struct e1000_hw * hw)777 void e1000_init_function_pointers_i210(struct e1000_hw *hw)
778 {
779 	e1000_init_function_pointers_82575(hw);
780 	hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
781 }
782 
783 /**
784  *  e1000_valid_led_default_i210 - Verify a valid default LED config
785  *  @hw: pointer to the HW structure
786  *  @data: pointer to the NVM (EEPROM)
787  *
788  *  Read the EEPROM for the current default LED configuration.  If the
789  *  LED configuration is not valid, set to a valid LED configuration.
790  **/
e1000_valid_led_default_i210(struct e1000_hw * hw,u16 * data)791 STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
792 {
793 	s32 ret_val;
794 
795 	DEBUGFUNC("e1000_valid_led_default_i210");
796 
797 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
798 	if (ret_val) {
799 		DEBUGOUT("NVM Read Error\n");
800 		goto out;
801 	}
802 
803 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
804 		switch (hw->phy.media_type) {
805 		case e1000_media_type_internal_serdes:
806 			*data = ID_LED_DEFAULT_I210_SERDES;
807 			break;
808 		case e1000_media_type_copper:
809 		default:
810 			*data = ID_LED_DEFAULT_I210;
811 			break;
812 		}
813 	}
814 out:
815 	return ret_val;
816 }
817 
818 /**
819  * e1000_pll_workaround_i210
820  * @hw: pointer to the HW structure
821  *
822  * Works around an errata in the PLL circuit where it occasionally
823  * provides the wrong clock frequency after power up.
824  **/
e1000_pll_workaround_i210(struct e1000_hw * hw)825 STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
826 {
827 	s32 ret_val;
828 	u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
829 	u16 nvm_word, phy_word, pci_word, tmp_nvm;
830 	int i;
831 
832 	/* Get PHY semaphore */
833 	hw->phy.ops.acquire(hw);
834 	/* Get and set needed register values */
835 	wuc = E1000_READ_REG(hw, E1000_WUC);
836 	mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
837 	reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
838 	E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val);
839 
840 	/* Get data from NVM, or set default */
841 	ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
842 					    &nvm_word);
843 	if (ret_val != E1000_SUCCESS)
844 		nvm_word = E1000_INVM_DEFAULT_AL;
845 	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
846 	phy_word = E1000_PHY_PLL_UNCONF;
847 	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
848 		/* check current state directly from internal PHY */
849 		e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, 0xFC);
850 		usec_delay(20);
851 		e1000_read_phy_reg_mdic(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
852 		usec_delay(20);
853 		e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, 0);
854 		if ((phy_word & E1000_PHY_PLL_UNCONF)
855 		    != E1000_PHY_PLL_UNCONF) {
856 			ret_val = E1000_SUCCESS;
857 			break;
858 		} else {
859 			ret_val = -E1000_ERR_PHY;
860 		}
861 		/* directly reset the internal PHY */
862 		ctrl = E1000_READ_REG(hw, E1000_CTRL);
863 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
864 
865 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
866 		ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
867 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
868 
869 		E1000_WRITE_REG(hw, E1000_WUC, 0);
870 		reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
871 		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
872 
873 		e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
874 		pci_word |= E1000_PCI_PMCSR_D3;
875 		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
876 		msec_delay(1);
877 		pci_word &= ~E1000_PCI_PMCSR_D3;
878 		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
879 		reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
880 		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
881 
882 		/* restore WUC register */
883 		E1000_WRITE_REG(hw, E1000_WUC, wuc);
884 	}
885 	/* restore MDICNFG setting */
886 	E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
887 	/* Release PHY semaphore */
888 	hw->phy.ops.release(hw);
889 	return ret_val;
890 }
891 
892 /**
893  *  e1000_get_cfg_done_i210 - Read config done bit
894  *  @hw: pointer to the HW structure
895  *
896  *  Read the management control register for the config done bit for
897  *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
898  *  to read the config done bit, so an error is *ONLY* logged and returns
899  *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
900  *  would not be able to be reset or change link.
901  **/
e1000_get_cfg_done_i210(struct e1000_hw * hw)902 STATIC s32 e1000_get_cfg_done_i210(struct e1000_hw *hw)
903 {
904 	s32 timeout = PHY_CFG_TIMEOUT;
905 	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
906 
907 	DEBUGFUNC("e1000_get_cfg_done_i210");
908 
909 	while (timeout) {
910 		if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask)
911 			break;
912 		msec_delay(1);
913 		timeout--;
914 	}
915 	if (!timeout)
916 		DEBUGOUT("MNG configuration cycle has not completed.\n");
917 
918 	return E1000_SUCCESS;
919 }
920 
921 /**
922  *  e1000_init_hw_i210 - Init hw for I210/I211
923  *  @hw: pointer to the HW structure
924  *
925  *  Called to initialize hw for i210 hw family.
926  **/
e1000_init_hw_i210(struct e1000_hw * hw)927 s32 e1000_init_hw_i210(struct e1000_hw *hw)
928 {
929 	s32 ret_val;
930 	struct e1000_mac_info *mac = &hw->mac;
931 
932 	DEBUGFUNC("e1000_init_hw_i210");
933 	if ((hw->mac.type >= e1000_i210) &&
934 	    !(e1000_get_flash_presence_i210(hw))) {
935 		ret_val = e1000_pll_workaround_i210(hw);
936 		if (ret_val != E1000_SUCCESS)
937 			return ret_val;
938 	}
939 	hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210;
940 
941 	/* Initialize identification LED */
942 	ret_val = mac->ops.id_led_init(hw);
943 
944 	ret_val = e1000_init_hw_base(hw);
945 	return ret_val;
946 }
947