xref: /f-stack/dpdk/drivers/net/e1000/base/e1000_i210.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4 
5 #include "e1000_api.h"
6 
7 
8 STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
9 STATIC void e1000_release_nvm_i210(struct e1000_hw *hw);
10 STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
11 STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
12 				u16 *data);
13 STATIC s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
14 STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
15 
16 /**
17  *  e1000_acquire_nvm_i210 - Request for access to EEPROM
18  *  @hw: pointer to the HW structure
19  *
20  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
21  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
22  *  Return successful if access grant bit set, else clear the request for
23  *  EEPROM access and return -E1000_ERR_NVM (-1).
24  **/
e1000_acquire_nvm_i210(struct e1000_hw * hw)25 STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
26 {
27 	s32 ret_val;
28 
29 	DEBUGFUNC("e1000_acquire_nvm_i210");
30 
31 	ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
32 
33 	return ret_val;
34 }
35 
36 /**
37  *  e1000_release_nvm_i210 - Release exclusive access to EEPROM
38  *  @hw: pointer to the HW structure
39  *
40  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
41  *  then release the semaphores acquired.
42  **/
e1000_release_nvm_i210(struct e1000_hw * hw)43 STATIC void e1000_release_nvm_i210(struct e1000_hw *hw)
44 {
45 	DEBUGFUNC("e1000_release_nvm_i210");
46 
47 	e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
48 }
49 
50 /**
51  *  e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
52  *  @hw: pointer to the HW structure
53  *  @mask: specifies which semaphore to acquire
54  *
55  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
56  *  will also specify which port we're acquiring the lock for.
57  **/
e1000_acquire_swfw_sync_i210(struct e1000_hw * hw,u16 mask)58 s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
59 {
60 	u32 swfw_sync;
61 	u32 swmask = mask;
62 	u32 fwmask = mask << 16;
63 	s32 ret_val = E1000_SUCCESS;
64 	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
65 
66 	DEBUGFUNC("e1000_acquire_swfw_sync_i210");
67 
68 	while (i < timeout) {
69 		if (e1000_get_hw_semaphore_i210(hw)) {
70 			ret_val = -E1000_ERR_SWFW_SYNC;
71 			goto out;
72 		}
73 
74 		swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
75 		if (!(swfw_sync & (fwmask | swmask)))
76 			break;
77 
78 		/*
79 		 * Firmware currently using resource (fwmask)
80 		 * or other software thread using resource (swmask)
81 		 */
82 		e1000_put_hw_semaphore_generic(hw);
83 		msec_delay_irq(5);
84 		i++;
85 	}
86 
87 	if (i == timeout) {
88 		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
89 		ret_val = -E1000_ERR_SWFW_SYNC;
90 		goto out;
91 	}
92 
93 	swfw_sync |= swmask;
94 	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
95 
96 	e1000_put_hw_semaphore_generic(hw);
97 
98 out:
99 	return ret_val;
100 }
101 
102 /**
103  *  e1000_release_swfw_sync_i210 - Release SW/FW semaphore
104  *  @hw: pointer to the HW structure
105  *  @mask: specifies which semaphore to acquire
106  *
107  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
108  *  will also specify which port we're releasing the lock for.
109  **/
e1000_release_swfw_sync_i210(struct e1000_hw * hw,u16 mask)110 void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
111 {
112 	u32 swfw_sync;
113 
114 	DEBUGFUNC("e1000_release_swfw_sync_i210");
115 
116 	while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
117 		; /* Empty */
118 
119 	swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
120 	swfw_sync &= (u32)~mask;
121 	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
122 
123 	e1000_put_hw_semaphore_generic(hw);
124 }
125 
126 /**
127  *  e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
128  *  @hw: pointer to the HW structure
129  *
130  *  Acquire the HW semaphore to access the PHY or NVM
131  **/
e1000_get_hw_semaphore_i210(struct e1000_hw * hw)132 STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
133 {
134 	u32 swsm;
135 	s32 timeout = hw->nvm.word_size + 1;
136 	s32 i = 0;
137 
138 	DEBUGFUNC("e1000_get_hw_semaphore_i210");
139 
140 	/* Get the SW semaphore */
141 	while (i < timeout) {
142 		swsm = E1000_READ_REG(hw, E1000_SWSM);
143 		if (!(swsm & E1000_SWSM_SMBI))
144 			break;
145 
146 		usec_delay(50);
147 		i++;
148 	}
149 
150 	if (i == timeout) {
151 		/* In rare circumstances, the SW semaphore may already be held
152 		 * unintentionally. Clear the semaphore once before giving up.
153 		 */
154 		if (hw->dev_spec._82575.clear_semaphore_once) {
155 			hw->dev_spec._82575.clear_semaphore_once = false;
156 			e1000_put_hw_semaphore_generic(hw);
157 			for (i = 0; i < timeout; i++) {
158 				swsm = E1000_READ_REG(hw, E1000_SWSM);
159 				if (!(swsm & E1000_SWSM_SMBI))
160 					break;
161 
162 				usec_delay(50);
163 			}
164 		}
165 
166 		/* If we do not have the semaphore here, we have to give up. */
167 		if (i == timeout) {
168 			DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
169 			return -E1000_ERR_NVM;
170 		}
171 	}
172 
173 	/* Get the FW semaphore. */
174 	for (i = 0; i < timeout; i++) {
175 		swsm = E1000_READ_REG(hw, E1000_SWSM);
176 		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
177 
178 		/* Semaphore acquired if bit latched */
179 		if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
180 			break;
181 
182 		usec_delay(50);
183 	}
184 
185 	if (i == timeout) {
186 		/* Release semaphores */
187 		e1000_put_hw_semaphore_generic(hw);
188 		DEBUGOUT("Driver can't access the NVM\n");
189 		return -E1000_ERR_NVM;
190 	}
191 
192 	return E1000_SUCCESS;
193 }
194 
195 /**
196  *  e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
197  *  @hw: pointer to the HW structure
198  *  @offset: offset of word in the Shadow Ram to read
199  *  @words: number of words to read
200  *  @data: word read from the Shadow Ram
201  *
202  *  Reads a 16 bit word from the Shadow Ram using the EERD register.
203  *  Uses necessary synchronization semaphores.
204  **/
e1000_read_nvm_srrd_i210(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)205 s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
206 			     u16 *data)
207 {
208 	s32 status = E1000_SUCCESS;
209 	u16 i, count;
210 
211 	DEBUGFUNC("e1000_read_nvm_srrd_i210");
212 
213 	/* We cannot hold synchronization semaphores for too long,
214 	 * because of forceful takeover procedure. However it is more efficient
215 	 * to read in bursts than synchronizing access for each word. */
216 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
217 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
218 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
219 		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
220 			status = e1000_read_nvm_eerd(hw, offset, count,
221 						     data + i);
222 			hw->nvm.ops.release(hw);
223 		} else {
224 			status = E1000_ERR_SWFW_SYNC;
225 		}
226 
227 		if (status != E1000_SUCCESS)
228 			break;
229 	}
230 
231 	return status;
232 }
233 
234 /**
235  *  e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
236  *  @hw: pointer to the HW structure
237  *  @offset: offset within the Shadow RAM to be written to
238  *  @words: number of words to write
239  *  @data: 16 bit word(s) to be written to the Shadow RAM
240  *
241  *  Writes data to Shadow RAM at offset using EEWR register.
242  *
243  *  If e1000_update_nvm_checksum is not called after this function , the
244  *  data will not be committed to FLASH and also Shadow RAM will most likely
245  *  contain an invalid checksum.
246  *
247  *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
248  *  partially written.
249  **/
e1000_write_nvm_srwr_i210(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)250 s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
251 			      u16 *data)
252 {
253 	s32 status = E1000_SUCCESS;
254 	u16 i, count;
255 
256 	DEBUGFUNC("e1000_write_nvm_srwr_i210");
257 
258 	/* We cannot hold synchronization semaphores for too long,
259 	 * because of forceful takeover procedure. However it is more efficient
260 	 * to write in bursts than synchronizing access for each word. */
261 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
262 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
263 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
264 		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
265 			status = e1000_write_nvm_srwr(hw, offset, count,
266 						      data + i);
267 			hw->nvm.ops.release(hw);
268 		} else {
269 			status = E1000_ERR_SWFW_SYNC;
270 		}
271 
272 		if (status != E1000_SUCCESS)
273 			break;
274 	}
275 
276 	return status;
277 }
278 
279 /**
280  *  e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
281  *  @hw: pointer to the HW structure
282  *  @offset: offset within the Shadow Ram to be written to
283  *  @words: number of words to write
284  *  @data: 16 bit word(s) to be written to the Shadow Ram
285  *
286  *  Writes data to Shadow Ram at offset using EEWR register.
287  *
288  *  If e1000_update_nvm_checksum is not called after this function , the
289  *  Shadow Ram will most likely contain an invalid checksum.
290  **/
e1000_write_nvm_srwr(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)291 STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
292 				u16 *data)
293 {
294 	struct e1000_nvm_info *nvm = &hw->nvm;
295 	u32 i, k, eewr = 0;
296 	u32 attempts = 100000;
297 	s32 ret_val = E1000_SUCCESS;
298 
299 	DEBUGFUNC("e1000_write_nvm_srwr");
300 
301 	/*
302 	 * A check for invalid values:  offset too large, too many words,
303 	 * too many words for the offset, and not enough words.
304 	 */
305 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
306 	    (words == 0)) {
307 		DEBUGOUT("nvm parameter(s) out of bounds\n");
308 		ret_val = -E1000_ERR_NVM;
309 		goto out;
310 	}
311 
312 	for (i = 0; i < words; i++) {
313 		eewr = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
314 			(data[i] << E1000_NVM_RW_REG_DATA) |
315 			E1000_NVM_RW_REG_START;
316 
317 		E1000_WRITE_REG(hw, E1000_SRWR, eewr);
318 
319 		for (k = 0; k < attempts; k++) {
320 			if (E1000_NVM_RW_REG_DONE &
321 			    E1000_READ_REG(hw, E1000_SRWR)) {
322 				ret_val = E1000_SUCCESS;
323 				break;
324 			}
325 			usec_delay(5);
326 		}
327 
328 		if (ret_val != E1000_SUCCESS) {
329 			DEBUGOUT("Shadow RAM write EEWR timed out\n");
330 			break;
331 		}
332 	}
333 
334 out:
335 	return ret_val;
336 }
337 
338 /** e1000_read_invm_word_i210 - Reads OTP
339  *  @hw: pointer to the HW structure
340  *  @address: the word address (aka eeprom offset) to read
341  *  @data: pointer to the data read
342  *
343  *  Reads 16-bit words from the OTP. Return error when the word is not
344  *  stored in OTP.
345  **/
e1000_read_invm_word_i210(struct e1000_hw * hw,u8 address,u16 * data)346 STATIC s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
347 {
348 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
349 	u32 invm_dword;
350 	u16 i;
351 	u8 record_type, word_address;
352 
353 	DEBUGFUNC("e1000_read_invm_word_i210");
354 
355 	for (i = 0; i < E1000_INVM_SIZE; i++) {
356 		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
357 		/* Get record type */
358 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
359 		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
360 			break;
361 		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
362 			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
363 		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
364 			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
365 		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
366 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
367 			if (word_address == address) {
368 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
369 				DEBUGOUT2("Read INVM Word 0x%02x = %x",
370 					  address, *data);
371 				status = E1000_SUCCESS;
372 				break;
373 			}
374 		}
375 	}
376 	if (status != E1000_SUCCESS)
377 		DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
378 	return status;
379 }
380 
381 /** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
382  *  @hw: pointer to the HW structure
383  *  @address: the word address (aka eeprom offset) to read
384  *  @data: pointer to the data read
385  *
386  *  Wrapper function to return data formerly found in the NVM.
387  **/
e1000_read_invm_i210(struct e1000_hw * hw,u16 offset,u16 E1000_UNUSEDARG words,u16 * data)388 STATIC s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
389 				u16 E1000_UNUSEDARG words, u16 *data)
390 {
391 	s32 ret_val = E1000_SUCCESS;
392 	UNREFERENCED_1PARAMETER(words);
393 
394 	DEBUGFUNC("e1000_read_invm_i210");
395 
396 	/* Only the MAC addr is required to be present in the iNVM */
397 	switch (offset) {
398 	case NVM_MAC_ADDR:
399 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
400 		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset + 1,
401 						     &data[1]);
402 		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset + 2,
403 						     &data[2]);
404 		if (ret_val != E1000_SUCCESS)
405 			DEBUGOUT("MAC Addr not found in iNVM\n");
406 		break;
407 	case NVM_INIT_CTRL_2:
408 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
409 		if (ret_val != E1000_SUCCESS) {
410 			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
411 			ret_val = E1000_SUCCESS;
412 		}
413 		break;
414 	case NVM_INIT_CTRL_4:
415 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
416 		if (ret_val != E1000_SUCCESS) {
417 			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
418 			ret_val = E1000_SUCCESS;
419 		}
420 		break;
421 	case NVM_LED_1_CFG:
422 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
423 		if (ret_val != E1000_SUCCESS) {
424 			*data = NVM_LED_1_CFG_DEFAULT_I211;
425 			ret_val = E1000_SUCCESS;
426 		}
427 		break;
428 	case NVM_LED_0_2_CFG:
429 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
430 		if (ret_val != E1000_SUCCESS) {
431 			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
432 			ret_val = E1000_SUCCESS;
433 		}
434 		break;
435 	case NVM_ID_LED_SETTINGS:
436 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
437 		if (ret_val != E1000_SUCCESS) {
438 			*data = ID_LED_RESERVED_FFFF;
439 			ret_val = E1000_SUCCESS;
440 		}
441 		break;
442 	case NVM_SUB_DEV_ID:
443 		*data = hw->subsystem_device_id;
444 		break;
445 	case NVM_SUB_VEN_ID:
446 		*data = hw->subsystem_vendor_id;
447 		break;
448 	case NVM_DEV_ID:
449 		*data = hw->device_id;
450 		break;
451 	case NVM_VEN_ID:
452 		*data = hw->vendor_id;
453 		break;
454 	default:
455 		DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
456 		*data = NVM_RESERVED_WORD;
457 		break;
458 	}
459 	return ret_val;
460 }
461 
462 /**
463  *  e1000_read_invm_version - Reads iNVM version and image type
464  *  @hw: pointer to the HW structure
465  *  @invm_ver: version structure for the version read
466  *
467  *  Reads iNVM version and image type.
468  **/
e1000_read_invm_version(struct e1000_hw * hw,struct e1000_fw_version * invm_ver)469 s32 e1000_read_invm_version(struct e1000_hw *hw,
470 			    struct e1000_fw_version *invm_ver)
471 {
472 	u32 *record = NULL;
473 	u32 *next_record = NULL;
474 	u32 i = 0;
475 	u32 invm_dword = 0;
476 	u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
477 					     E1000_INVM_RECORD_SIZE_IN_BYTES);
478 	u32 buffer[E1000_INVM_SIZE];
479 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
480 	u16 version = 0;
481 
482 	DEBUGFUNC("e1000_read_invm_version");
483 
484 	/* Read iNVM memory */
485 	for (i = 0; i < E1000_INVM_SIZE; i++) {
486 		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
487 		buffer[i] = invm_dword;
488 	}
489 
490 	/* Read version number */
491 	for (i = 1; i < invm_blocks; i++) {
492 		record = &buffer[invm_blocks - i];
493 		next_record = &buffer[invm_blocks - i + 1];
494 
495 		/* Check if we have first version location used */
496 		if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
497 			version = 0;
498 			status = E1000_SUCCESS;
499 			break;
500 		}
501 		/* Check if we have second version location used */
502 		else if ((i == 1) &&
503 			 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
504 			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
505 			status = E1000_SUCCESS;
506 			break;
507 		}
508 		/*
509 		 * Check if we have odd version location
510 		 * used and it is the last one used
511 		 */
512 		else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
513 			 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
514 			 (i != 1))) {
515 			version = (*next_record & E1000_INVM_VER_FIELD_TWO)
516 				  >> 13;
517 			status = E1000_SUCCESS;
518 			break;
519 		}
520 		/*
521 		 * Check if we have even version location
522 		 * used and it is the last one used
523 		 */
524 		else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
525 			 ((*record & 0x3) == 0)) {
526 			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
527 			status = E1000_SUCCESS;
528 			break;
529 		}
530 	}
531 
532 	if (status == E1000_SUCCESS) {
533 		invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
534 					>> E1000_INVM_MAJOR_SHIFT;
535 		invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
536 	}
537 	/* Read Image Type */
538 	for (i = 1; i < invm_blocks; i++) {
539 		record = &buffer[invm_blocks - i];
540 		next_record = &buffer[invm_blocks - i + 1];
541 
542 		/* Check if we have image type in first location used */
543 		if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
544 			invm_ver->invm_img_type = 0;
545 			status = E1000_SUCCESS;
546 			break;
547 		}
548 		/* Check if we have image type in first location used */
549 		else if ((((*record & 0x3) == 0) &&
550 			 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
551 			 ((((*record & 0x3) != 0) && (i != 1)))) {
552 			invm_ver->invm_img_type =
553 				(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
554 			status = E1000_SUCCESS;
555 			break;
556 		}
557 	}
558 	return status;
559 }
560 
561 /**
562  *  e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
563  *  @hw: pointer to the HW structure
564  *
565  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
566  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
567  **/
e1000_validate_nvm_checksum_i210(struct e1000_hw * hw)568 s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
569 {
570 	s32 status = E1000_SUCCESS;
571 	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
572 
573 	DEBUGFUNC("e1000_validate_nvm_checksum_i210");
574 
575 	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
576 
577 		/*
578 		 * Replace the read function with semaphore grabbing with
579 		 * the one that skips this for a while.
580 		 * We have semaphore taken already here.
581 		 */
582 		read_op_ptr = hw->nvm.ops.read;
583 		hw->nvm.ops.read = e1000_read_nvm_eerd;
584 
585 		status = e1000_validate_nvm_checksum_generic(hw);
586 
587 		/* Revert original read operation. */
588 		hw->nvm.ops.read = read_op_ptr;
589 
590 		hw->nvm.ops.release(hw);
591 	} else {
592 		status = E1000_ERR_SWFW_SYNC;
593 	}
594 
595 	return status;
596 }
597 
598 
599 /**
600  *  e1000_update_nvm_checksum_i210 - Update EEPROM checksum
601  *  @hw: pointer to the HW structure
602  *
603  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
604  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
605  *  value to the EEPROM. Next commit EEPROM data onto the Flash.
606  **/
e1000_update_nvm_checksum_i210(struct e1000_hw * hw)607 s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
608 {
609 	s32 ret_val;
610 	u16 checksum = 0;
611 	u16 i, nvm_data;
612 
613 	DEBUGFUNC("e1000_update_nvm_checksum_i210");
614 
615 	/*
616 	 * Read the first word from the EEPROM. If this times out or fails, do
617 	 * not continue or we could be in for a very long wait while every
618 	 * EEPROM read fails
619 	 */
620 	ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
621 	if (ret_val != E1000_SUCCESS) {
622 		DEBUGOUT("EEPROM read failed\n");
623 		goto out;
624 	}
625 
626 	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
627 		/*
628 		 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
629 		 * because we do not want to take the synchronization
630 		 * semaphores twice here.
631 		 */
632 
633 		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
634 			ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
635 			if (ret_val) {
636 				hw->nvm.ops.release(hw);
637 				DEBUGOUT("NVM Read Error while updating checksum.\n");
638 				goto out;
639 			}
640 			checksum += nvm_data;
641 		}
642 		checksum = (u16) NVM_SUM - checksum;
643 		ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
644 						&checksum);
645 		if (ret_val != E1000_SUCCESS) {
646 			hw->nvm.ops.release(hw);
647 			DEBUGOUT("NVM Write Error while updating checksum.\n");
648 			goto out;
649 		}
650 
651 		hw->nvm.ops.release(hw);
652 
653 		ret_val = e1000_update_flash_i210(hw);
654 	} else {
655 		ret_val = E1000_ERR_SWFW_SYNC;
656 	}
657 out:
658 	return ret_val;
659 }
660 
661 /**
662  *  e1000_get_flash_presence_i210 - Check if flash device is detected.
663  *  @hw: pointer to the HW structure
664  *
665  **/
e1000_get_flash_presence_i210(struct e1000_hw * hw)666 bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
667 {
668 	u32 eec = 0;
669 	bool ret_val = false;
670 
671 	DEBUGFUNC("e1000_get_flash_presence_i210");
672 
673 	eec = E1000_READ_REG(hw, E1000_EECD);
674 
675 	if (eec & E1000_EECD_FLASH_DETECTED_I210)
676 		ret_val = true;
677 
678 	return ret_val;
679 }
680 
681 /**
682  *  e1000_update_flash_i210 - Commit EEPROM to the flash
683  *  @hw: pointer to the HW structure
684  *
685  **/
e1000_update_flash_i210(struct e1000_hw * hw)686 s32 e1000_update_flash_i210(struct e1000_hw *hw)
687 {
688 	s32 ret_val;
689 	u32 flup;
690 
691 	DEBUGFUNC("e1000_update_flash_i210");
692 
693 	ret_val = e1000_pool_flash_update_done_i210(hw);
694 	if (ret_val == -E1000_ERR_NVM) {
695 		DEBUGOUT("Flash update time out\n");
696 		goto out;
697 	}
698 
699 	flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
700 	E1000_WRITE_REG(hw, E1000_EECD, flup);
701 
702 	ret_val = e1000_pool_flash_update_done_i210(hw);
703 	if (ret_val == E1000_SUCCESS)
704 		DEBUGOUT("Flash update complete\n");
705 	else
706 		DEBUGOUT("Flash update time out\n");
707 
708 out:
709 	return ret_val;
710 }
711 
712 /**
713  *  e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
714  *  @hw: pointer to the HW structure
715  *
716  **/
e1000_pool_flash_update_done_i210(struct e1000_hw * hw)717 s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
718 {
719 	s32 ret_val = -E1000_ERR_NVM;
720 	u32 i, reg;
721 
722 	DEBUGFUNC("e1000_pool_flash_update_done_i210");
723 
724 	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
725 		reg = E1000_READ_REG(hw, E1000_EECD);
726 		if (reg & E1000_EECD_FLUDONE_I210) {
727 			ret_val = E1000_SUCCESS;
728 			break;
729 		}
730 		usec_delay(5);
731 	}
732 
733 	return ret_val;
734 }
735 
736 /**
737  *  e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
738  *  @hw: pointer to the HW structure
739  *
740  *  Initialize the i210/i211 NVM parameters and function pointers.
741  **/
e1000_init_nvm_params_i210(struct e1000_hw * hw)742 STATIC s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
743 {
744 	s32 ret_val;
745 	struct e1000_nvm_info *nvm = &hw->nvm;
746 
747 	DEBUGFUNC("e1000_init_nvm_params_i210");
748 
749 	ret_val = e1000_init_nvm_params_82575(hw);
750 	nvm->ops.acquire = e1000_acquire_nvm_i210;
751 	nvm->ops.release = e1000_release_nvm_i210;
752 	nvm->ops.valid_led_default = e1000_valid_led_default_i210;
753 	if (e1000_get_flash_presence_i210(hw)) {
754 		hw->nvm.type = e1000_nvm_flash_hw;
755 		nvm->ops.read    = e1000_read_nvm_srrd_i210;
756 		nvm->ops.write   = e1000_write_nvm_srwr_i210;
757 		nvm->ops.validate = e1000_validate_nvm_checksum_i210;
758 		nvm->ops.update   = e1000_update_nvm_checksum_i210;
759 	} else {
760 		hw->nvm.type = e1000_nvm_invm;
761 		nvm->ops.read     = e1000_read_invm_i210;
762 		nvm->ops.write    = e1000_null_write_nvm;
763 		nvm->ops.validate = e1000_null_ops_generic;
764 		nvm->ops.update   = e1000_null_ops_generic;
765 	}
766 	return ret_val;
767 }
768 
769 /**
770  *  e1000_init_function_pointers_i210 - Init func ptrs.
771  *  @hw: pointer to the HW structure
772  *
773  *  Called to initialize all function pointers and parameters.
774  **/
e1000_init_function_pointers_i210(struct e1000_hw * hw)775 void e1000_init_function_pointers_i210(struct e1000_hw *hw)
776 {
777 	e1000_init_function_pointers_82575(hw);
778 	hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
779 }
780 
781 /**
782  *  e1000_valid_led_default_i210 - Verify a valid default LED config
783  *  @hw: pointer to the HW structure
784  *  @data: pointer to the NVM (EEPROM)
785  *
786  *  Read the EEPROM for the current default LED configuration.  If the
787  *  LED configuration is not valid, set to a valid LED configuration.
788  **/
e1000_valid_led_default_i210(struct e1000_hw * hw,u16 * data)789 STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
790 {
791 	s32 ret_val;
792 
793 	DEBUGFUNC("e1000_valid_led_default_i210");
794 
795 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
796 	if (ret_val) {
797 		DEBUGOUT("NVM Read Error\n");
798 		goto out;
799 	}
800 
801 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
802 		switch (hw->phy.media_type) {
803 		case e1000_media_type_internal_serdes:
804 			*data = ID_LED_DEFAULT_I210_SERDES;
805 			break;
806 		case e1000_media_type_copper:
807 		default:
808 			*data = ID_LED_DEFAULT_I210;
809 			break;
810 		}
811 	}
812 out:
813 	return ret_val;
814 }
815 
816 /**
817  * e1000_pll_workaround_i210
818  * @hw: pointer to the HW structure
819  *
820  * Works around an errata in the PLL circuit where it occasionally
821  * provides the wrong clock frequency after power up.
822  **/
e1000_pll_workaround_i210(struct e1000_hw * hw)823 STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
824 {
825 	s32 ret_val;
826 	u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
827 	u16 nvm_word, phy_word, pci_word, tmp_nvm;
828 	int i;
829 
830 	/* Get PHY semaphore */
831 	hw->phy.ops.acquire(hw);
832 	/* Get and set needed register values */
833 	wuc = E1000_READ_REG(hw, E1000_WUC);
834 	mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
835 	reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
836 	E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val);
837 
838 	/* Get data from NVM, or set default */
839 	ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
840 					    &nvm_word);
841 	if (ret_val != E1000_SUCCESS)
842 		nvm_word = E1000_INVM_DEFAULT_AL;
843 	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
844 	phy_word = E1000_PHY_PLL_UNCONF;
845 	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
846 		/* check current state directly from internal PHY */
847 		e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, 0xFC);
848 		usec_delay(20);
849 		e1000_read_phy_reg_mdic(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
850 		usec_delay(20);
851 		e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, 0);
852 		if ((phy_word & E1000_PHY_PLL_UNCONF)
853 		    != E1000_PHY_PLL_UNCONF) {
854 			ret_val = E1000_SUCCESS;
855 			break;
856 		} else {
857 			ret_val = -E1000_ERR_PHY;
858 		}
859 		/* directly reset the internal PHY */
860 		ctrl = E1000_READ_REG(hw, E1000_CTRL);
861 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
862 
863 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
864 		ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
865 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
866 
867 		E1000_WRITE_REG(hw, E1000_WUC, 0);
868 		reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
869 		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
870 
871 		e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
872 		pci_word |= E1000_PCI_PMCSR_D3;
873 		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
874 		msec_delay(1);
875 		pci_word &= ~E1000_PCI_PMCSR_D3;
876 		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
877 		reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
878 		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
879 
880 		/* restore WUC register */
881 		E1000_WRITE_REG(hw, E1000_WUC, wuc);
882 	}
883 	/* restore MDICNFG setting */
884 	E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
885 	/* Release PHY semaphore */
886 	hw->phy.ops.release(hw);
887 	return ret_val;
888 }
889 
890 /**
891  *  e1000_get_cfg_done_i210 - Read config done bit
892  *  @hw: pointer to the HW structure
893  *
894  *  Read the management control register for the config done bit for
895  *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
896  *  to read the config done bit, so an error is *ONLY* logged and returns
897  *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
898  *  would not be able to be reset or change link.
899  **/
e1000_get_cfg_done_i210(struct e1000_hw * hw)900 STATIC s32 e1000_get_cfg_done_i210(struct e1000_hw *hw)
901 {
902 	s32 timeout = PHY_CFG_TIMEOUT;
903 	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
904 
905 	DEBUGFUNC("e1000_get_cfg_done_i210");
906 
907 	while (timeout) {
908 		if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask)
909 			break;
910 		msec_delay(1);
911 		timeout--;
912 	}
913 	if (!timeout)
914 		DEBUGOUT("MNG configuration cycle has not completed.\n");
915 
916 	return E1000_SUCCESS;
917 }
918 
919 /**
920  *  e1000_init_hw_i210 - Init hw for I210/I211
921  *  @hw: pointer to the HW structure
922  *
923  *  Called to initialize hw for i210 hw family.
924  **/
e1000_init_hw_i210(struct e1000_hw * hw)925 s32 e1000_init_hw_i210(struct e1000_hw *hw)
926 {
927 	s32 ret_val;
928 	struct e1000_mac_info *mac = &hw->mac;
929 
930 	DEBUGFUNC("e1000_init_hw_i210");
931 	if ((hw->mac.type >= e1000_i210) &&
932 	    !(e1000_get_flash_presence_i210(hw))) {
933 		ret_val = e1000_pll_workaround_i210(hw);
934 		if (ret_val != E1000_SUCCESS)
935 			return ret_val;
936 	}
937 	hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210;
938 
939 	/* Initialize identification LED */
940 	ret_val = mac->ops.id_led_init(hw);
941 
942 	ret_val = e1000_init_hw_base(hw);
943 	return ret_val;
944 }
945