xref: /f-stack/dpdk/drivers/net/i40e/base/i40e_nvm.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4 
5 #include <inttypes.h>
6 
7 #include "i40e_prototype.h"
8 
9 /**
10  * i40e_init_nvm_ops - Initialize NVM function pointers
11  * @hw: pointer to the HW structure
12  *
13  * Setup the function pointers and the NVM info structure. Should be called
14  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
15  * Please notice that the NVM term is used here (& in all methods covered
16  * in this file) as an equivalent of the FLASH part mapped into the SR.
17  * We are accessing FLASH always through the Shadow RAM.
18  **/
i40e_init_nvm(struct i40e_hw * hw)19 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
20 {
21 	struct i40e_nvm_info *nvm = &hw->nvm;
22 	enum i40e_status_code ret_code = I40E_SUCCESS;
23 	u32 fla, gens;
24 	u8 sr_size;
25 
26 	DEBUGFUNC("i40e_init_nvm");
27 
28 	/* The SR size is stored regardless of the nvm programming mode
29 	 * as the blank mode may be used in the factory line.
30 	 */
31 	gens = rd32(hw, I40E_GLNVM_GENS);
32 	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
33 			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
34 	/* Switching to words (sr_size contains power of 2KB) */
35 	nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
36 
37 	/* Check if we are in the normal or blank NVM programming mode */
38 	fla = rd32(hw, I40E_GLNVM_FLA);
39 	if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
40 		/* Max NVM timeout */
41 		nvm->timeout = I40E_MAX_NVM_TIMEOUT;
42 		nvm->blank_nvm_mode = false;
43 	} else { /* Blank programming mode */
44 		nvm->blank_nvm_mode = true;
45 		ret_code = I40E_ERR_NVM_BLANK_MODE;
46 		i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
47 	}
48 
49 	return ret_code;
50 }
51 
52 /**
53  * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
54  * @hw: pointer to the HW structure
55  * @access: NVM access type (read or write)
56  *
57  * This function will request NVM ownership for reading
58  * via the proper Admin Command.
59  **/
i40e_acquire_nvm(struct i40e_hw * hw,enum i40e_aq_resource_access_type access)60 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
61 				       enum i40e_aq_resource_access_type access)
62 {
63 	enum i40e_status_code ret_code = I40E_SUCCESS;
64 	u64 gtime, timeout;
65 	u64 time_left = 0;
66 
67 	DEBUGFUNC("i40e_acquire_nvm");
68 
69 	if (hw->nvm.blank_nvm_mode)
70 		goto i40e_i40e_acquire_nvm_exit;
71 
72 	ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
73 					    0, &time_left, NULL);
74 	/* Reading the Global Device Timer */
75 	gtime = rd32(hw, I40E_GLVFGEN_TIMER);
76 
77 	/* Store the timeout */
78 	hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
79 
80 	if (ret_code)
81 		i40e_debug(hw, I40E_DEBUG_NVM,
82 			   "NVM acquire type %d failed time_left=%" PRIu64 " ret=%d aq_err=%d\n",
83 			   access, time_left, ret_code, hw->aq.asq_last_status);
84 
85 	if (ret_code && time_left) {
86 		/* Poll until the current NVM owner timeouts */
87 		timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
88 		while ((gtime < timeout) && time_left) {
89 			i40e_msec_delay(10);
90 			gtime = rd32(hw, I40E_GLVFGEN_TIMER);
91 			ret_code = i40e_aq_request_resource(hw,
92 							I40E_NVM_RESOURCE_ID,
93 							access, 0, &time_left,
94 							NULL);
95 			if (ret_code == I40E_SUCCESS) {
96 				hw->nvm.hw_semaphore_timeout =
97 					    I40E_MS_TO_GTIME(time_left) + gtime;
98 				break;
99 			}
100 		}
101 		if (ret_code != I40E_SUCCESS) {
102 			hw->nvm.hw_semaphore_timeout = 0;
103 			i40e_debug(hw, I40E_DEBUG_NVM,
104 				   "NVM acquire timed out, wait %" PRIu64 " ms before trying again. status=%d aq_err=%d\n",
105 				   time_left, ret_code, hw->aq.asq_last_status);
106 		}
107 	}
108 
109 i40e_i40e_acquire_nvm_exit:
110 	return ret_code;
111 }
112 
113 /**
114  * i40e_release_nvm - Generic request for releasing the NVM ownership
115  * @hw: pointer to the HW structure
116  *
117  * This function will release NVM resource via the proper Admin Command.
118  **/
i40e_release_nvm(struct i40e_hw * hw)119 void i40e_release_nvm(struct i40e_hw *hw)
120 {
121 	enum i40e_status_code ret_code = I40E_SUCCESS;
122 	u32 total_delay = 0;
123 
124 	DEBUGFUNC("i40e_release_nvm");
125 
126 	if (hw->nvm.blank_nvm_mode)
127 		return;
128 
129 	ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
130 
131 	/* there are some rare cases when trying to release the resource
132 	 * results in an admin Q timeout, so handle them correctly
133 	 */
134 	while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
135 	       (total_delay < hw->aq.asq_cmd_timeout)) {
136 			i40e_msec_delay(1);
137 			ret_code = i40e_aq_release_resource(hw,
138 						I40E_NVM_RESOURCE_ID, 0, NULL);
139 			total_delay++;
140 	}
141 }
142 
143 /**
144  * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
145  * @hw: pointer to the HW structure
146  *
147  * Polls the SRCTL Shadow RAM register done bit.
148  **/
i40e_poll_sr_srctl_done_bit(struct i40e_hw * hw)149 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
150 {
151 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
152 	u32 srctl, wait_cnt;
153 
154 	DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
155 
156 	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
157 	for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
158 		srctl = rd32(hw, I40E_GLNVM_SRCTL);
159 		if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
160 			ret_code = I40E_SUCCESS;
161 			break;
162 		}
163 		i40e_usec_delay(5);
164 	}
165 	if (ret_code == I40E_ERR_TIMEOUT)
166 		i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
167 	return ret_code;
168 }
169 
170 /**
171  * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
172  * @hw: pointer to the HW structure
173  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
174  * @data: word read from the Shadow RAM
175  *
176  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
177  **/
i40e_read_nvm_word_srctl(struct i40e_hw * hw,u16 offset,u16 * data)178 STATIC enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw,
179 						      u16 offset,
180 						      u16 *data)
181 {
182 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
183 	u32 sr_reg;
184 
185 	DEBUGFUNC("i40e_read_nvm_word_srctl");
186 
187 	if (offset >= hw->nvm.sr_size) {
188 		i40e_debug(hw, I40E_DEBUG_NVM,
189 			   "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
190 			   offset, hw->nvm.sr_size);
191 		ret_code = I40E_ERR_PARAM;
192 		goto read_nvm_exit;
193 	}
194 
195 	/* Poll the done bit first */
196 	ret_code = i40e_poll_sr_srctl_done_bit(hw);
197 	if (ret_code == I40E_SUCCESS) {
198 		/* Write the address and start reading */
199 		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
200 			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
201 		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
202 
203 		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
204 		ret_code = i40e_poll_sr_srctl_done_bit(hw);
205 		if (ret_code == I40E_SUCCESS) {
206 			sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
207 			*data = (u16)((sr_reg &
208 				       I40E_GLNVM_SRDATA_RDDATA_MASK)
209 				    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
210 		}
211 	}
212 	if (ret_code != I40E_SUCCESS)
213 		i40e_debug(hw, I40E_DEBUG_NVM,
214 			   "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
215 			   offset);
216 
217 read_nvm_exit:
218 	return ret_code;
219 }
220 
221 /**
222  * i40e_read_nvm_aq - Read Shadow RAM.
223  * @hw: pointer to the HW structure.
224  * @module_pointer: module pointer location in words from the NVM beginning
225  * @offset: offset in words from module start
226  * @words: number of words to write
227  * @data: buffer with words to write to the Shadow RAM
228  * @last_command: tells the AdminQ that this is the last command
229  *
230  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
231  **/
i40e_read_nvm_aq(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data,bool last_command)232 STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
233 					      u8 module_pointer, u32 offset,
234 					      u16 words, void *data,
235 					      bool last_command)
236 {
237 	enum i40e_status_code ret_code = I40E_ERR_NVM;
238 	struct i40e_asq_cmd_details cmd_details;
239 
240 	DEBUGFUNC("i40e_read_nvm_aq");
241 
242 	memset(&cmd_details, 0, sizeof(cmd_details));
243 	cmd_details.wb_desc = &hw->nvm_wb_desc;
244 
245 	/* Here we are checking the SR limit only for the flat memory model.
246 	 * We cannot do it for the module-based model, as we did not acquire
247 	 * the NVM resource yet (we cannot get the module pointer value).
248 	 * Firmware will check the module-based model.
249 	 */
250 	if ((offset + words) > hw->nvm.sr_size)
251 		i40e_debug(hw, I40E_DEBUG_NVM,
252 			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
253 			   (offset + words), hw->nvm.sr_size);
254 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
255 		/* We can write only up to 4KB (one sector), in one AQ write */
256 		i40e_debug(hw, I40E_DEBUG_NVM,
257 			   "NVM write fail error: tried to write %d words, limit is %d.\n",
258 			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
259 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
260 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
261 		/* A single write cannot spread over two sectors */
262 		i40e_debug(hw, I40E_DEBUG_NVM,
263 			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
264 			   offset, words);
265 	else
266 		ret_code = i40e_aq_read_nvm(hw, module_pointer,
267 					    2 * offset,  /*bytes*/
268 					    2 * words,   /*bytes*/
269 					    data, last_command, &cmd_details);
270 
271 	return ret_code;
272 }
273 
274 /**
275  * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
276  * @hw: pointer to the HW structure
277  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
278  * @data: word read from the Shadow RAM
279  *
280  * Reads one 16 bit word from the Shadow RAM using the AdminQ
281  **/
i40e_read_nvm_word_aq(struct i40e_hw * hw,u16 offset,u16 * data)282 STATIC enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
283 						   u16 *data)
284 {
285 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
286 
287 	DEBUGFUNC("i40e_read_nvm_word_aq");
288 
289 	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
290 	*data = LE16_TO_CPU(*(__le16 *)data);
291 
292 	return ret_code;
293 }
294 
295 /**
296  * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
297  * @hw: pointer to the HW structure
298  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
299  * @data: word read from the Shadow RAM
300  *
301  * Reads one 16 bit word from the Shadow RAM.
302  *
303  * Do not use this function except in cases where the nvm lock is already
304  * taken via i40e_acquire_nvm().
305  **/
__i40e_read_nvm_word(struct i40e_hw * hw,u16 offset,u16 * data)306 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
307 					   u16 offset,
308 					   u16 *data)
309 {
310 
311 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
312 		return i40e_read_nvm_word_aq(hw, offset, data);
313 
314 	return i40e_read_nvm_word_srctl(hw, offset, data);
315 }
316 
317 /**
318  * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
319  * @hw: pointer to the HW structure
320  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
321  * @data: word read from the Shadow RAM
322  *
323  * Reads one 16 bit word from the Shadow RAM.
324  **/
i40e_read_nvm_word(struct i40e_hw * hw,u16 offset,u16 * data)325 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
326 					 u16 *data)
327 {
328 	enum i40e_status_code ret_code = I40E_SUCCESS;
329 
330 	if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
331 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
332 
333 	if (ret_code)
334 		return ret_code;
335 	ret_code = __i40e_read_nvm_word(hw, offset, data);
336 
337 	if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
338 		i40e_release_nvm(hw);
339 	return ret_code;
340 }
341 
342 /**
343  * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
344  * @hw: Pointer to the HW structure
345  * @module_ptr: Pointer to module in words with respect to NVM beginning
346  * @module_offset: Offset in words from module start
347  * @data_offset: Offset in words from reading data area start
348  * @words_data_size: Words to read from NVM
349  * @data_ptr: Pointer to memory location where resulting buffer will be stored
350  **/
351 enum i40e_status_code
i40e_read_nvm_module_data(struct i40e_hw * hw,u8 module_ptr,u16 module_offset,u16 data_offset,u16 words_data_size,u16 * data_ptr)352 i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
353 			  u16 data_offset, u16 words_data_size, u16 *data_ptr)
354 {
355 	enum i40e_status_code status;
356 	u16 specific_ptr = 0;
357 	u16 ptr_value = 0;
358 	u16 offset = 0;
359 
360 	if (module_ptr != 0) {
361 		status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
362 		if (status != I40E_SUCCESS) {
363 			i40e_debug(hw, I40E_DEBUG_ALL,
364 				   "Reading nvm word failed.Error code: %d.\n",
365 				   status);
366 			return I40E_ERR_NVM;
367 		}
368 	}
369 #define I40E_NVM_INVALID_PTR_VAL 0x7FFF
370 #define I40E_NVM_INVALID_VAL 0xFFFF
371 
372 	/* Pointer not initialized */
373 	if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
374 	    ptr_value == I40E_NVM_INVALID_VAL) {
375 		i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
376 		return I40E_ERR_BAD_PTR;
377 	}
378 
379 	/* Check whether the module is in SR mapped area or outside */
380 	if (ptr_value & I40E_PTR_TYPE) {
381 		/* Pointer points outside of the Shared RAM mapped area */
382 		i40e_debug(hw, I40E_DEBUG_ALL,
383 			   "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
384 
385 		return I40E_ERR_PARAM;
386 	} else {
387 		/* Read from the Shadow RAM */
388 
389 		status = i40e_read_nvm_word(hw, ptr_value + module_offset,
390 					    &specific_ptr);
391 		if (status != I40E_SUCCESS) {
392 			i40e_debug(hw, I40E_DEBUG_ALL,
393 				   "Reading nvm word failed.Error code: %d.\n",
394 				   status);
395 			return I40E_ERR_NVM;
396 		}
397 
398 		offset = ptr_value + module_offset + specific_ptr +
399 			data_offset;
400 
401 		status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
402 					      data_ptr);
403 		if (status != I40E_SUCCESS) {
404 			i40e_debug(hw, I40E_DEBUG_ALL,
405 				   "Reading nvm buffer failed.Error code: %d.\n",
406 				   status);
407 		}
408 	}
409 
410 	return status;
411 }
412 
413 /**
414  * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
415  * @hw: pointer to the HW structure
416  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
417  * @words: (in) number of words to read; (out) number of words actually read
418  * @data: words read from the Shadow RAM
419  *
420  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
421  * method. The buffer read is preceded by the NVM ownership take
422  * and followed by the release.
423  **/
i40e_read_nvm_buffer_srctl(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)424 STATIC enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
425 							u16 *words, u16 *data)
426 {
427 	enum i40e_status_code ret_code = I40E_SUCCESS;
428 	u16 index, word;
429 
430 	DEBUGFUNC("i40e_read_nvm_buffer_srctl");
431 
432 	/* Loop through the selected region */
433 	for (word = 0; word < *words; word++) {
434 		index = offset + word;
435 		ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
436 		if (ret_code != I40E_SUCCESS)
437 			break;
438 	}
439 
440 	/* Update the number of words read from the Shadow RAM */
441 	*words = word;
442 
443 	return ret_code;
444 }
445 
446 /**
447  * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
448  * @hw: pointer to the HW structure
449  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
450  * @words: (in) number of words to read; (out) number of words actually read
451  * @data: words read from the Shadow RAM
452  *
453  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
454  * method. The buffer read is preceded by the NVM ownership take
455  * and followed by the release.
456  **/
i40e_read_nvm_buffer_aq(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)457 STATIC enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
458 						     u16 *words, u16 *data)
459 {
460 	enum i40e_status_code ret_code;
461 	u16 read_size = *words;
462 	bool last_cmd = false;
463 	u16 words_read = 0;
464 	u16 i = 0;
465 
466 	DEBUGFUNC("i40e_read_nvm_buffer_aq");
467 
468 	do {
469 		/* Calculate number of bytes we should read in this step.
470 		 * FVL AQ do not allow to read more than one page at a time or
471 		 * to cross page boundaries.
472 		 */
473 		if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
474 			read_size = min(*words,
475 					(u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
476 				      (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
477 		else
478 			read_size = min((*words - words_read),
479 					I40E_SR_SECTOR_SIZE_IN_WORDS);
480 
481 		/* Check if this is last command, if so set proper flag */
482 		if ((words_read + read_size) >= *words)
483 			last_cmd = true;
484 
485 		ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
486 					    data + words_read, last_cmd);
487 		if (ret_code != I40E_SUCCESS)
488 			goto read_nvm_buffer_aq_exit;
489 
490 		/* Increment counter for words already read and move offset to
491 		 * new read location
492 		 */
493 		words_read += read_size;
494 		offset += read_size;
495 	} while (words_read < *words);
496 
497 	for (i = 0; i < *words; i++)
498 		data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
499 
500 read_nvm_buffer_aq_exit:
501 	*words = words_read;
502 	return ret_code;
503 }
504 
505 /**
506  * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
507  * @hw: pointer to the HW structure
508  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
509  * @words: (in) number of words to read; (out) number of words actually read
510  * @data: words read from the Shadow RAM
511  *
512  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
513  * method.
514  **/
__i40e_read_nvm_buffer(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)515 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
516 					     u16 offset,
517 					     u16 *words, u16 *data)
518 {
519 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
520 		return i40e_read_nvm_buffer_aq(hw, offset, words, data);
521 
522 	return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
523 }
524 
525 /**
526  * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
527  * @hw: pointer to the HW structure
528  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
529  * @words: (in) number of words to read; (out) number of words actually read
530  * @data: words read from the Shadow RAM
531  *
532  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
533  * method. The buffer read is preceded by the NVM ownership take
534  * and followed by the release.
535  **/
i40e_read_nvm_buffer(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)536 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
537 					   u16 *words, u16 *data)
538 {
539 	enum i40e_status_code ret_code = I40E_SUCCESS;
540 
541 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
542 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
543 		if (!ret_code) {
544 			ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
545 							 data);
546 			i40e_release_nvm(hw);
547 		}
548 	} else {
549 		ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
550 	}
551 
552 	return ret_code;
553 }
554 
555 /**
556  * i40e_write_nvm_aq - Writes Shadow RAM.
557  * @hw: pointer to the HW structure.
558  * @module_pointer: module pointer location in words from the NVM beginning
559  * @offset: offset in words from module start
560  * @words: number of words to write
561  * @data: buffer with words to write to the Shadow RAM
562  * @last_command: tells the AdminQ that this is the last command
563  *
564  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
565  **/
i40e_write_nvm_aq(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data,bool last_command)566 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
567 					u32 offset, u16 words, void *data,
568 					bool last_command)
569 {
570 	enum i40e_status_code ret_code = I40E_ERR_NVM;
571 	struct i40e_asq_cmd_details cmd_details;
572 
573 	DEBUGFUNC("i40e_write_nvm_aq");
574 
575 	memset(&cmd_details, 0, sizeof(cmd_details));
576 	cmd_details.wb_desc = &hw->nvm_wb_desc;
577 
578 	/* Here we are checking the SR limit only for the flat memory model.
579 	 * We cannot do it for the module-based model, as we did not acquire
580 	 * the NVM resource yet (we cannot get the module pointer value).
581 	 * Firmware will check the module-based model.
582 	 */
583 	if ((offset + words) > hw->nvm.sr_size)
584 		DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
585 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
586 		/* We can write only up to 4KB (one sector), in one AQ write */
587 		DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
588 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
589 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
590 		/* A single write cannot spread over two sectors */
591 		DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
592 	else
593 		ret_code = i40e_aq_update_nvm(hw, module_pointer,
594 					      2 * offset,  /*bytes*/
595 					      2 * words,   /*bytes*/
596 					      data, last_command, 0,
597 					      &cmd_details);
598 
599 	return ret_code;
600 }
601 
602 /**
603  * __i40e_write_nvm_word - Writes Shadow RAM word
604  * @hw: pointer to the HW structure
605  * @offset: offset of the Shadow RAM word to write
606  * @data: word to write to the Shadow RAM
607  *
608  * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
609  * NVM ownership have to be acquired and released (on ARQ completion event
610  * reception) by caller. To commit SR to NVM update checksum function
611  * should be called.
612  **/
__i40e_write_nvm_word(struct i40e_hw * hw,u32 offset,void * data)613 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
614 					    void *data)
615 {
616 	DEBUGFUNC("i40e_write_nvm_word");
617 
618 	*((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
619 
620 	/* Value 0x00 below means that we treat SR as a flat mem */
621 	return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
622 }
623 
624 /**
625  * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
626  * @hw: pointer to the HW structure
627  * @module_pointer: module pointer location in words from the NVM beginning
628  * @offset: offset of the Shadow RAM buffer to write
629  * @words: number of words to write
630  * @data: words to write to the Shadow RAM
631  *
632  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
633  * NVM ownership must be acquired before calling this function and released
634  * on ARQ completion event reception by caller. To commit SR to NVM update
635  * checksum function should be called.
636  **/
__i40e_write_nvm_buffer(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data)637 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
638 					      u8 module_pointer, u32 offset,
639 					      u16 words, void *data)
640 {
641 	__le16 *le_word_ptr = (__le16 *)data;
642 	u16 *word_ptr = (u16 *)data;
643 	u32 i = 0;
644 
645 	DEBUGFUNC("i40e_write_nvm_buffer");
646 
647 	for (i = 0; i < words; i++)
648 		le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
649 
650 	/* Here we will only write one buffer as the size of the modules
651 	 * mirrored in the Shadow RAM is always less than 4K.
652 	 */
653 	return i40e_write_nvm_aq(hw, module_pointer, offset, words,
654 				 data, false);
655 }
656 
657 /**
658  * i40e_calc_nvm_checksum - Calculates and returns the checksum
659  * @hw: pointer to hardware structure
660  * @checksum: pointer to the checksum
661  *
662  * This function calculates SW Checksum that covers the whole 64kB shadow RAM
663  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
664  * is customer specific and unknown. Therefore, this function skips all maximum
665  * possible size of VPD (1kB).
666  **/
i40e_calc_nvm_checksum(struct i40e_hw * hw,u16 * checksum)667 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
668 {
669 	enum i40e_status_code ret_code = I40E_SUCCESS;
670 	struct i40e_virt_mem vmem;
671 	u16 pcie_alt_module = 0;
672 	u16 checksum_local = 0;
673 	u16 vpd_module = 0;
674 	u16 *data;
675 	u16 i = 0;
676 
677 	DEBUGFUNC("i40e_calc_nvm_checksum");
678 
679 	ret_code = i40e_allocate_virt_mem(hw, &vmem,
680 				    I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
681 	if (ret_code)
682 		goto i40e_calc_nvm_checksum_exit;
683 	data = (u16 *)vmem.va;
684 
685 	/* read pointer to VPD area */
686 	ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
687 	if (ret_code != I40E_SUCCESS) {
688 		ret_code = I40E_ERR_NVM_CHECKSUM;
689 		goto i40e_calc_nvm_checksum_exit;
690 	}
691 
692 	/* read pointer to PCIe Alt Auto-load module */
693 	ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
694 					&pcie_alt_module);
695 	if (ret_code != I40E_SUCCESS) {
696 		ret_code = I40E_ERR_NVM_CHECKSUM;
697 		goto i40e_calc_nvm_checksum_exit;
698 	}
699 
700 	/* Calculate SW checksum that covers the whole 64kB shadow RAM
701 	 * except the VPD and PCIe ALT Auto-load modules
702 	 */
703 	for (i = 0; i < hw->nvm.sr_size; i++) {
704 		/* Read SR page */
705 		if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
706 			u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
707 
708 			ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
709 			if (ret_code != I40E_SUCCESS) {
710 				ret_code = I40E_ERR_NVM_CHECKSUM;
711 				goto i40e_calc_nvm_checksum_exit;
712 			}
713 		}
714 
715 		/* Skip Checksum word */
716 		if (i == I40E_SR_SW_CHECKSUM_WORD)
717 			continue;
718 		/* Skip VPD module (convert byte size to word count) */
719 		if ((i >= (u32)vpd_module) &&
720 		    (i < ((u32)vpd_module +
721 		     (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
722 			continue;
723 		}
724 		/* Skip PCIe ALT module (convert byte size to word count) */
725 		if ((i >= (u32)pcie_alt_module) &&
726 		    (i < ((u32)pcie_alt_module +
727 		     (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
728 			continue;
729 		}
730 
731 		checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
732 	}
733 
734 	*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
735 
736 i40e_calc_nvm_checksum_exit:
737 	i40e_free_virt_mem(hw, &vmem);
738 	return ret_code;
739 }
740 
741 /**
742  * i40e_update_nvm_checksum - Updates the NVM checksum
743  * @hw: pointer to hardware structure
744  *
745  * NVM ownership must be acquired before calling this function and released
746  * on ARQ completion event reception by caller.
747  * This function will commit SR to NVM.
748  **/
i40e_update_nvm_checksum(struct i40e_hw * hw)749 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
750 {
751 	enum i40e_status_code ret_code = I40E_SUCCESS;
752 	u16 checksum;
753 	__le16 le_sum;
754 
755 	DEBUGFUNC("i40e_update_nvm_checksum");
756 
757 	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
758 	le_sum = CPU_TO_LE16(checksum);
759 	if (ret_code == I40E_SUCCESS)
760 		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
761 					     1, &le_sum, true);
762 
763 	return ret_code;
764 }
765 
766 /**
767  * i40e_validate_nvm_checksum - Validate EEPROM checksum
768  * @hw: pointer to hardware structure
769  * @checksum: calculated checksum
770  *
771  * Performs checksum calculation and validates the NVM SW checksum. If the
772  * caller does not need checksum, the value can be NULL.
773  **/
i40e_validate_nvm_checksum(struct i40e_hw * hw,u16 * checksum)774 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
775 						 u16 *checksum)
776 {
777 	enum i40e_status_code ret_code = I40E_SUCCESS;
778 	u16 checksum_sr = 0;
779 	u16 checksum_local = 0;
780 
781 	DEBUGFUNC("i40e_validate_nvm_checksum");
782 
783 	/* We must acquire the NVM lock in order to correctly synchronize the
784 	 * NVM accesses across multiple PFs. Without doing so it is possible
785 	 * for one of the PFs to read invalid data potentially indicating that
786 	 * the checksum is invalid.
787 	 */
788 	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
789 	if (ret_code)
790 		return ret_code;
791 	ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
792 	__i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
793 	i40e_release_nvm(hw);
794 	if (ret_code)
795 		return ret_code;
796 
797 	/* Verify read checksum from EEPROM is the same as
798 	 * calculated checksum
799 	 */
800 	if (checksum_local != checksum_sr)
801 		ret_code = I40E_ERR_NVM_CHECKSUM;
802 
803 	/* If the user cares, return the calculated checksum */
804 	if (checksum)
805 		*checksum = checksum_local;
806 
807 	return ret_code;
808 }
809 
810 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
811 						    struct i40e_nvm_access *cmd,
812 						    u8 *bytes, int *perrno);
813 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
814 						    struct i40e_nvm_access *cmd,
815 						    u8 *bytes, int *perrno);
816 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
817 						    struct i40e_nvm_access *cmd,
818 						    u8 *bytes, int *perrno);
819 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
820 						    struct i40e_nvm_access *cmd,
821 						    int *perrno);
822 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
823 						   struct i40e_nvm_access *cmd,
824 						   int *perrno);
825 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
826 						   struct i40e_nvm_access *cmd,
827 						   u8 *bytes, int *perrno);
828 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
829 						  struct i40e_nvm_access *cmd,
830 						  u8 *bytes, int *perrno);
831 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
832 						 struct i40e_nvm_access *cmd,
833 						 u8 *bytes, int *perrno);
834 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
835 						    struct i40e_nvm_access *cmd,
836 						    u8 *bytes, int *perrno);
837 STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
838 						    struct i40e_nvm_access *cmd,
839 						    u8 *bytes, int *perrno);
i40e_nvmupd_get_module(u32 val)840 STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
841 {
842 	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
843 }
i40e_nvmupd_get_transaction(u32 val)844 STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
845 {
846 	return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
847 }
848 
i40e_nvmupd_get_preservation_flags(u32 val)849 STATIC INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
850 {
851 	return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
852 		    I40E_NVM_PRESERVATION_FLAGS_SHIFT);
853 }
854 
855 STATIC const char *i40e_nvm_update_state_str[] = {
856 	"I40E_NVMUPD_INVALID",
857 	"I40E_NVMUPD_READ_CON",
858 	"I40E_NVMUPD_READ_SNT",
859 	"I40E_NVMUPD_READ_LCB",
860 	"I40E_NVMUPD_READ_SA",
861 	"I40E_NVMUPD_WRITE_ERA",
862 	"I40E_NVMUPD_WRITE_CON",
863 	"I40E_NVMUPD_WRITE_SNT",
864 	"I40E_NVMUPD_WRITE_LCB",
865 	"I40E_NVMUPD_WRITE_SA",
866 	"I40E_NVMUPD_CSUM_CON",
867 	"I40E_NVMUPD_CSUM_SA",
868 	"I40E_NVMUPD_CSUM_LCB",
869 	"I40E_NVMUPD_STATUS",
870 	"I40E_NVMUPD_EXEC_AQ",
871 	"I40E_NVMUPD_GET_AQ_RESULT",
872 	"I40E_NVMUPD_GET_AQ_EVENT",
873 	"I40E_NVMUPD_GET_FEATURES",
874 };
875 
876 /**
877  * i40e_nvmupd_command - Process an NVM update command
878  * @hw: pointer to hardware structure
879  * @cmd: pointer to nvm update command
880  * @bytes: pointer to the data buffer
881  * @perrno: pointer to return error code
882  *
883  * Dispatches command depending on what update state is current
884  **/
i40e_nvmupd_command(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)885 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
886 					  struct i40e_nvm_access *cmd,
887 					  u8 *bytes, int *perrno)
888 {
889 	enum i40e_status_code status;
890 	enum i40e_nvmupd_cmd upd_cmd;
891 
892 	DEBUGFUNC("i40e_nvmupd_command");
893 
894 	/* assume success */
895 	*perrno = 0;
896 
897 	/* early check for status command and debug msgs */
898 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
899 
900 	i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
901 		   i40e_nvm_update_state_str[upd_cmd],
902 		   hw->nvmupd_state,
903 		   hw->nvm_release_on_done, hw->nvm_wait_opcode,
904 		   cmd->command, cmd->config, cmd->offset, cmd->data_size);
905 
906 	if (upd_cmd == I40E_NVMUPD_INVALID) {
907 		*perrno = -EFAULT;
908 		i40e_debug(hw, I40E_DEBUG_NVM,
909 			   "i40e_nvmupd_validate_command returns %d errno %d\n",
910 			   upd_cmd, *perrno);
911 	}
912 
913 	/* a status request returns immediately rather than
914 	 * going into the state machine
915 	 */
916 	if (upd_cmd == I40E_NVMUPD_STATUS) {
917 		if (!cmd->data_size) {
918 			*perrno = -EFAULT;
919 			return I40E_ERR_BUF_TOO_SHORT;
920 		}
921 
922 		bytes[0] = hw->nvmupd_state;
923 
924 		if (cmd->data_size >= 4) {
925 			bytes[1] = 0;
926 			*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
927 		}
928 
929 		/* Clear error status on read */
930 		if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
931 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
932 
933 		return I40E_SUCCESS;
934 	}
935 
936 	/*
937 	 * A supported features request returns immediately
938 	 * rather than going into state machine
939 	 */
940 	if (upd_cmd == I40E_NVMUPD_FEATURES) {
941 		if (cmd->data_size < hw->nvmupd_features.size) {
942 			*perrno = -EFAULT;
943 			return I40E_ERR_BUF_TOO_SHORT;
944 		}
945 
946 		/*
947 		 * If buffer is bigger than i40e_nvmupd_features structure,
948 		 * make sure the trailing bytes are set to 0x0.
949 		 */
950 		if (cmd->data_size > hw->nvmupd_features.size)
951 			i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
952 				    cmd->data_size - hw->nvmupd_features.size,
953 				    I40E_NONDMA_MEM);
954 
955 		i40e_memcpy(bytes, &hw->nvmupd_features,
956 			    hw->nvmupd_features.size, I40E_NONDMA_MEM);
957 
958 		return I40E_SUCCESS;
959 	}
960 
961 	/* Clear status even it is not read and log */
962 	if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
963 		i40e_debug(hw, I40E_DEBUG_NVM,
964 			   "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
965 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
966 	}
967 
968 	/* Acquire lock to prevent race condition where adminq_task
969 	 * can execute after i40e_nvmupd_nvm_read/write but before state
970 	 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
971 	 *
972 	 * During NVMUpdate, it is observed that lock could be held for
973 	 * ~5ms for most commands. However lock is held for ~60ms for
974 	 * NVMUPD_CSUM_LCB command.
975 	 */
976 	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
977 	switch (hw->nvmupd_state) {
978 	case I40E_NVMUPD_STATE_INIT:
979 		status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
980 		break;
981 
982 	case I40E_NVMUPD_STATE_READING:
983 		status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
984 		break;
985 
986 	case I40E_NVMUPD_STATE_WRITING:
987 		status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
988 		break;
989 
990 	case I40E_NVMUPD_STATE_INIT_WAIT:
991 	case I40E_NVMUPD_STATE_WRITE_WAIT:
992 		/* if we need to stop waiting for an event, clear
993 		 * the wait info and return before doing anything else
994 		 */
995 		if (cmd->offset == 0xffff) {
996 			i40e_nvmupd_clear_wait_state(hw);
997 			status = I40E_SUCCESS;
998 			break;
999 		}
1000 
1001 		status = I40E_ERR_NOT_READY;
1002 		*perrno = -EBUSY;
1003 		break;
1004 
1005 	default:
1006 		/* invalid state, should never happen */
1007 		i40e_debug(hw, I40E_DEBUG_NVM,
1008 			   "NVMUPD: no such state %d\n", hw->nvmupd_state);
1009 		status = I40E_NOT_SUPPORTED;
1010 		*perrno = -ESRCH;
1011 		break;
1012 	}
1013 
1014 	i40e_release_spinlock(&hw->aq.arq_spinlock);
1015 	return status;
1016 }
1017 
1018 /**
1019  * i40e_nvmupd_state_init - Handle NVM update state Init
1020  * @hw: pointer to hardware structure
1021  * @cmd: pointer to nvm update command buffer
1022  * @bytes: pointer to the data buffer
1023  * @perrno: pointer to return error code
1024  *
1025  * Process legitimate commands of the Init state and conditionally set next
1026  * state. Reject all other commands.
1027  **/
i40e_nvmupd_state_init(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1028 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
1029 						    struct i40e_nvm_access *cmd,
1030 						    u8 *bytes, int *perrno)
1031 {
1032 	enum i40e_status_code status = I40E_SUCCESS;
1033 	enum i40e_nvmupd_cmd upd_cmd;
1034 
1035 	DEBUGFUNC("i40e_nvmupd_state_init");
1036 
1037 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1038 
1039 	switch (upd_cmd) {
1040 	case I40E_NVMUPD_READ_SA:
1041 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1042 		if (status) {
1043 			*perrno = i40e_aq_rc_to_posix(status,
1044 						     hw->aq.asq_last_status);
1045 		} else {
1046 			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1047 			i40e_release_nvm(hw);
1048 		}
1049 		break;
1050 
1051 	case I40E_NVMUPD_READ_SNT:
1052 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1053 		if (status) {
1054 			*perrno = i40e_aq_rc_to_posix(status,
1055 						     hw->aq.asq_last_status);
1056 		} else {
1057 			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1058 			if (status)
1059 				i40e_release_nvm(hw);
1060 			else
1061 				hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
1062 		}
1063 		break;
1064 
1065 	case I40E_NVMUPD_WRITE_ERA:
1066 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1067 		if (status) {
1068 			*perrno = i40e_aq_rc_to_posix(status,
1069 						     hw->aq.asq_last_status);
1070 		} else {
1071 			status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1072 			if (status) {
1073 				i40e_release_nvm(hw);
1074 			} else {
1075 				hw->nvm_release_on_done = true;
1076 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1077 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1078 			}
1079 		}
1080 		break;
1081 
1082 	case I40E_NVMUPD_WRITE_SA:
1083 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1084 		if (status) {
1085 			*perrno = i40e_aq_rc_to_posix(status,
1086 						     hw->aq.asq_last_status);
1087 		} else {
1088 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1089 			if (status) {
1090 				i40e_release_nvm(hw);
1091 			} else {
1092 				hw->nvm_release_on_done = true;
1093 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1094 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1095 			}
1096 		}
1097 		break;
1098 
1099 	case I40E_NVMUPD_WRITE_SNT:
1100 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1101 		if (status) {
1102 			*perrno = i40e_aq_rc_to_posix(status,
1103 						     hw->aq.asq_last_status);
1104 		} else {
1105 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1106 			if (status) {
1107 				i40e_release_nvm(hw);
1108 			} else {
1109 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1110 				hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1111 			}
1112 		}
1113 		break;
1114 
1115 	case I40E_NVMUPD_CSUM_SA:
1116 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1117 		if (status) {
1118 			*perrno = i40e_aq_rc_to_posix(status,
1119 						     hw->aq.asq_last_status);
1120 		} else {
1121 			status = i40e_update_nvm_checksum(hw);
1122 			if (status) {
1123 				*perrno = hw->aq.asq_last_status ?
1124 				   i40e_aq_rc_to_posix(status,
1125 						       hw->aq.asq_last_status) :
1126 				   -EIO;
1127 				i40e_release_nvm(hw);
1128 			} else {
1129 				hw->nvm_release_on_done = true;
1130 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1131 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1132 			}
1133 		}
1134 		break;
1135 
1136 	case I40E_NVMUPD_EXEC_AQ:
1137 		status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1138 		break;
1139 
1140 	case I40E_NVMUPD_GET_AQ_RESULT:
1141 		status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1142 		break;
1143 
1144 	case I40E_NVMUPD_GET_AQ_EVENT:
1145 		status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
1146 		break;
1147 
1148 	default:
1149 		i40e_debug(hw, I40E_DEBUG_NVM,
1150 			   "NVMUPD: bad cmd %s in init state\n",
1151 			   i40e_nvm_update_state_str[upd_cmd]);
1152 		status = I40E_ERR_NVM;
1153 		*perrno = -ESRCH;
1154 		break;
1155 	}
1156 	return status;
1157 }
1158 
1159 /**
1160  * i40e_nvmupd_state_reading - Handle NVM update state Reading
1161  * @hw: pointer to hardware structure
1162  * @cmd: pointer to nvm update command buffer
1163  * @bytes: pointer to the data buffer
1164  * @perrno: pointer to return error code
1165  *
1166  * NVM ownership is already held.  Process legitimate commands and set any
1167  * change in state; reject all other commands.
1168  **/
i40e_nvmupd_state_reading(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1169 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1170 						    struct i40e_nvm_access *cmd,
1171 						    u8 *bytes, int *perrno)
1172 {
1173 	enum i40e_status_code status = I40E_SUCCESS;
1174 	enum i40e_nvmupd_cmd upd_cmd;
1175 
1176 	DEBUGFUNC("i40e_nvmupd_state_reading");
1177 
1178 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1179 
1180 	switch (upd_cmd) {
1181 	case I40E_NVMUPD_READ_SA:
1182 	case I40E_NVMUPD_READ_CON:
1183 		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1184 		break;
1185 
1186 	case I40E_NVMUPD_READ_LCB:
1187 		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1188 		i40e_release_nvm(hw);
1189 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1190 		break;
1191 
1192 	default:
1193 		i40e_debug(hw, I40E_DEBUG_NVM,
1194 			   "NVMUPD: bad cmd %s in reading state.\n",
1195 			   i40e_nvm_update_state_str[upd_cmd]);
1196 		status = I40E_NOT_SUPPORTED;
1197 		*perrno = -ESRCH;
1198 		break;
1199 	}
1200 	return status;
1201 }
1202 
1203 /**
1204  * i40e_nvmupd_state_writing - Handle NVM update state Writing
1205  * @hw: pointer to hardware structure
1206  * @cmd: pointer to nvm update command buffer
1207  * @bytes: pointer to the data buffer
1208  * @perrno: pointer to return error code
1209  *
1210  * NVM ownership is already held.  Process legitimate commands and set any
1211  * change in state; reject all other commands
1212  **/
i40e_nvmupd_state_writing(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1213 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1214 						    struct i40e_nvm_access *cmd,
1215 						    u8 *bytes, int *perrno)
1216 {
1217 	enum i40e_status_code status = I40E_SUCCESS;
1218 	enum i40e_nvmupd_cmd upd_cmd;
1219 	bool retry_attempt = false;
1220 
1221 	DEBUGFUNC("i40e_nvmupd_state_writing");
1222 
1223 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1224 
1225 retry:
1226 	switch (upd_cmd) {
1227 	case I40E_NVMUPD_WRITE_CON:
1228 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1229 		if (!status) {
1230 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1231 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1232 		}
1233 		break;
1234 
1235 	case I40E_NVMUPD_WRITE_LCB:
1236 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1237 		if (status) {
1238 			*perrno = hw->aq.asq_last_status ?
1239 				   i40e_aq_rc_to_posix(status,
1240 						       hw->aq.asq_last_status) :
1241 				   -EIO;
1242 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1243 		} else {
1244 			hw->nvm_release_on_done = true;
1245 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1246 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1247 		}
1248 		break;
1249 
1250 	case I40E_NVMUPD_CSUM_CON:
1251 		/* Assumes the caller has acquired the nvm */
1252 		status = i40e_update_nvm_checksum(hw);
1253 		if (status) {
1254 			*perrno = hw->aq.asq_last_status ?
1255 				   i40e_aq_rc_to_posix(status,
1256 						       hw->aq.asq_last_status) :
1257 				   -EIO;
1258 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1259 		} else {
1260 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1261 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1262 		}
1263 		break;
1264 
1265 	case I40E_NVMUPD_CSUM_LCB:
1266 		/* Assumes the caller has acquired the nvm */
1267 		status = i40e_update_nvm_checksum(hw);
1268 		if (status) {
1269 			*perrno = hw->aq.asq_last_status ?
1270 				   i40e_aq_rc_to_posix(status,
1271 						       hw->aq.asq_last_status) :
1272 				   -EIO;
1273 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1274 		} else {
1275 			hw->nvm_release_on_done = true;
1276 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1277 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1278 		}
1279 		break;
1280 
1281 	default:
1282 		i40e_debug(hw, I40E_DEBUG_NVM,
1283 			   "NVMUPD: bad cmd %s in writing state.\n",
1284 			   i40e_nvm_update_state_str[upd_cmd]);
1285 		status = I40E_NOT_SUPPORTED;
1286 		*perrno = -ESRCH;
1287 		break;
1288 	}
1289 
1290 	/* In some circumstances, a multi-write transaction takes longer
1291 	 * than the default 3 minute timeout on the write semaphore.  If
1292 	 * the write failed with an EBUSY status, this is likely the problem,
1293 	 * so here we try to reacquire the semaphore then retry the write.
1294 	 * We only do one retry, then give up.
1295 	 */
1296 	if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1297 	    !retry_attempt) {
1298 		enum i40e_status_code old_status = status;
1299 		u32 old_asq_status = hw->aq.asq_last_status;
1300 		u32 gtime;
1301 
1302 		gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1303 		if (gtime >= hw->nvm.hw_semaphore_timeout) {
1304 			i40e_debug(hw, I40E_DEBUG_ALL,
1305 				   "NVMUPD: write semaphore expired (%d >= %" PRIu64 "), retrying\n",
1306 				   gtime, hw->nvm.hw_semaphore_timeout);
1307 			i40e_release_nvm(hw);
1308 			status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1309 			if (status) {
1310 				i40e_debug(hw, I40E_DEBUG_ALL,
1311 					   "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1312 					   hw->aq.asq_last_status);
1313 				status = old_status;
1314 				hw->aq.asq_last_status = old_asq_status;
1315 			} else {
1316 				retry_attempt = true;
1317 				goto retry;
1318 			}
1319 		}
1320 	}
1321 
1322 	return status;
1323 }
1324 
1325 /**
1326  * i40e_nvmupd_clear_wait_state - clear wait state on hw
1327  * @hw: pointer to the hardware structure
1328  **/
i40e_nvmupd_clear_wait_state(struct i40e_hw * hw)1329 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1330 {
1331 	i40e_debug(hw, I40E_DEBUG_NVM,
1332 		   "NVMUPD: clearing wait on opcode 0x%04x\n",
1333 		   hw->nvm_wait_opcode);
1334 
1335 	if (hw->nvm_release_on_done) {
1336 		i40e_release_nvm(hw);
1337 		hw->nvm_release_on_done = false;
1338 	}
1339 	hw->nvm_wait_opcode = 0;
1340 
1341 	if (hw->aq.arq_last_status) {
1342 		hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1343 		return;
1344 	}
1345 
1346 	switch (hw->nvmupd_state) {
1347 	case I40E_NVMUPD_STATE_INIT_WAIT:
1348 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1349 		break;
1350 
1351 	case I40E_NVMUPD_STATE_WRITE_WAIT:
1352 		hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1353 		break;
1354 
1355 	default:
1356 		break;
1357 	}
1358 }
1359 
1360 /**
1361  * i40e_nvmupd_check_wait_event - handle NVM update operation events
1362  * @hw: pointer to the hardware structure
1363  * @opcode: the event that just happened
1364  * @desc: AdminQ descriptor
1365  **/
i40e_nvmupd_check_wait_event(struct i40e_hw * hw,u16 opcode,struct i40e_aq_desc * desc)1366 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1367 				  struct i40e_aq_desc *desc)
1368 {
1369 	u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1370 
1371 	if (opcode == hw->nvm_wait_opcode) {
1372 		i40e_memcpy(&hw->nvm_aq_event_desc, desc,
1373 			    aq_desc_len, I40E_NONDMA_TO_NONDMA);
1374 		i40e_nvmupd_clear_wait_state(hw);
1375 	}
1376 }
1377 
1378 /**
1379  * i40e_nvmupd_validate_command - Validate given command
1380  * @hw: pointer to hardware structure
1381  * @cmd: pointer to nvm update command buffer
1382  * @perrno: pointer to return error code
1383  *
1384  * Return one of the valid command types or I40E_NVMUPD_INVALID
1385  **/
i40e_nvmupd_validate_command(struct i40e_hw * hw,struct i40e_nvm_access * cmd,int * perrno)1386 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1387 						    struct i40e_nvm_access *cmd,
1388 						    int *perrno)
1389 {
1390 	enum i40e_nvmupd_cmd upd_cmd;
1391 	u8 module, transaction;
1392 
1393 	DEBUGFUNC("i40e_nvmupd_validate_command\n");
1394 
1395 	/* anything that doesn't match a recognized case is an error */
1396 	upd_cmd = I40E_NVMUPD_INVALID;
1397 
1398 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1399 	module = i40e_nvmupd_get_module(cmd->config);
1400 
1401 	/* limits on data size */
1402 	if ((cmd->data_size < 1) ||
1403 	    (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1404 		i40e_debug(hw, I40E_DEBUG_NVM,
1405 			   "i40e_nvmupd_validate_command data_size %d\n",
1406 			   cmd->data_size);
1407 		*perrno = -EFAULT;
1408 		return I40E_NVMUPD_INVALID;
1409 	}
1410 
1411 	switch (cmd->command) {
1412 	case I40E_NVM_READ:
1413 		switch (transaction) {
1414 		case I40E_NVM_CON:
1415 			upd_cmd = I40E_NVMUPD_READ_CON;
1416 			break;
1417 		case I40E_NVM_SNT:
1418 			upd_cmd = I40E_NVMUPD_READ_SNT;
1419 			break;
1420 		case I40E_NVM_LCB:
1421 			upd_cmd = I40E_NVMUPD_READ_LCB;
1422 			break;
1423 		case I40E_NVM_SA:
1424 			upd_cmd = I40E_NVMUPD_READ_SA;
1425 			break;
1426 		case I40E_NVM_EXEC:
1427 			switch (module) {
1428 			case I40E_NVM_EXEC_GET_AQ_RESULT:
1429 				upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1430 				break;
1431 			case I40E_NVM_EXEC_FEATURES:
1432 				upd_cmd = I40E_NVMUPD_FEATURES;
1433 				break;
1434 			case I40E_NVM_EXEC_STATUS:
1435 				upd_cmd = I40E_NVMUPD_STATUS;
1436 				break;
1437 			default:
1438 				*perrno = -EFAULT;
1439 				return I40E_NVMUPD_INVALID;
1440 			}
1441 			break;
1442 		case I40E_NVM_AQE:
1443 			upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1444 			break;
1445 		}
1446 		break;
1447 
1448 	case I40E_NVM_WRITE:
1449 		switch (transaction) {
1450 		case I40E_NVM_CON:
1451 			upd_cmd = I40E_NVMUPD_WRITE_CON;
1452 			break;
1453 		case I40E_NVM_SNT:
1454 			upd_cmd = I40E_NVMUPD_WRITE_SNT;
1455 			break;
1456 		case I40E_NVM_LCB:
1457 			upd_cmd = I40E_NVMUPD_WRITE_LCB;
1458 			break;
1459 		case I40E_NVM_SA:
1460 			upd_cmd = I40E_NVMUPD_WRITE_SA;
1461 			break;
1462 		case I40E_NVM_ERA:
1463 			upd_cmd = I40E_NVMUPD_WRITE_ERA;
1464 			break;
1465 		case I40E_NVM_CSUM:
1466 			upd_cmd = I40E_NVMUPD_CSUM_CON;
1467 			break;
1468 		case (I40E_NVM_CSUM|I40E_NVM_SA):
1469 			upd_cmd = I40E_NVMUPD_CSUM_SA;
1470 			break;
1471 		case (I40E_NVM_CSUM|I40E_NVM_LCB):
1472 			upd_cmd = I40E_NVMUPD_CSUM_LCB;
1473 			break;
1474 		case I40E_NVM_EXEC:
1475 			if (module == 0)
1476 				upd_cmd = I40E_NVMUPD_EXEC_AQ;
1477 			break;
1478 		}
1479 		break;
1480 	}
1481 
1482 	return upd_cmd;
1483 }
1484 
1485 /**
1486  * i40e_nvmupd_exec_aq - Run an AQ command
1487  * @hw: pointer to hardware structure
1488  * @cmd: pointer to nvm update command buffer
1489  * @bytes: pointer to the data buffer
1490  * @perrno: pointer to return error code
1491  *
1492  * cmd structure contains identifiers and data buffer
1493  **/
i40e_nvmupd_exec_aq(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1494 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1495 						 struct i40e_nvm_access *cmd,
1496 						 u8 *bytes, int *perrno)
1497 {
1498 	struct i40e_asq_cmd_details cmd_details;
1499 	enum i40e_status_code status;
1500 	struct i40e_aq_desc *aq_desc;
1501 	u32 buff_size = 0;
1502 	u8 *buff = NULL;
1503 	u32 aq_desc_len;
1504 	u32 aq_data_len;
1505 
1506 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1507 	if (cmd->offset == 0xffff)
1508 		return I40E_SUCCESS;
1509 
1510 	memset(&cmd_details, 0, sizeof(cmd_details));
1511 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1512 
1513 	aq_desc_len = sizeof(struct i40e_aq_desc);
1514 	memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1515 
1516 	/* get the aq descriptor */
1517 	if (cmd->data_size < aq_desc_len) {
1518 		i40e_debug(hw, I40E_DEBUG_NVM,
1519 			   "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1520 			   cmd->data_size, aq_desc_len);
1521 		*perrno = -EINVAL;
1522 		return I40E_ERR_PARAM;
1523 	}
1524 	aq_desc = (struct i40e_aq_desc *)bytes;
1525 
1526 	/* if data buffer needed, make sure it's ready */
1527 	aq_data_len = cmd->data_size - aq_desc_len;
1528 	buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1529 	if (buff_size) {
1530 		if (!hw->nvm_buff.va) {
1531 			status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1532 							hw->aq.asq_buf_size);
1533 			if (status)
1534 				i40e_debug(hw, I40E_DEBUG_NVM,
1535 					   "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1536 					   status);
1537 		}
1538 
1539 		if (hw->nvm_buff.va) {
1540 			buff = hw->nvm_buff.va;
1541 			i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1542 				I40E_NONDMA_TO_NONDMA);
1543 		}
1544 	}
1545 
1546 	if (cmd->offset)
1547 		memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1548 
1549 	/* and away we go! */
1550 	status = i40e_asq_send_command(hw, aq_desc, buff,
1551 				       buff_size, &cmd_details);
1552 	if (status) {
1553 		i40e_debug(hw, I40E_DEBUG_NVM,
1554 			   "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1555 			   i40e_stat_str(hw, status),
1556 			   i40e_aq_str(hw, hw->aq.asq_last_status));
1557 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1558 		return status;
1559 	}
1560 
1561 	/* should we wait for a followup event? */
1562 	if (cmd->offset) {
1563 		hw->nvm_wait_opcode = cmd->offset;
1564 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1565 	}
1566 
1567 	return status;
1568 }
1569 
1570 /**
1571  * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1572  * @hw: pointer to hardware structure
1573  * @cmd: pointer to nvm update command buffer
1574  * @bytes: pointer to the data buffer
1575  * @perrno: pointer to return error code
1576  *
1577  * cmd structure contains identifiers and data buffer
1578  **/
i40e_nvmupd_get_aq_result(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1579 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1580 						    struct i40e_nvm_access *cmd,
1581 						    u8 *bytes, int *perrno)
1582 {
1583 	u32 aq_total_len;
1584 	u32 aq_desc_len;
1585 	int remainder;
1586 	u8 *buff;
1587 
1588 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1589 
1590 	aq_desc_len = sizeof(struct i40e_aq_desc);
1591 	aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1592 
1593 	/* check offset range */
1594 	if (cmd->offset > aq_total_len) {
1595 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1596 			   __func__, cmd->offset, aq_total_len);
1597 		*perrno = -EINVAL;
1598 		return I40E_ERR_PARAM;
1599 	}
1600 
1601 	/* check copylength range */
1602 	if (cmd->data_size > (aq_total_len - cmd->offset)) {
1603 		int new_len = aq_total_len - cmd->offset;
1604 
1605 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1606 			   __func__, cmd->data_size, new_len);
1607 		cmd->data_size = new_len;
1608 	}
1609 
1610 	remainder = cmd->data_size;
1611 	if (cmd->offset < aq_desc_len) {
1612 		u32 len = aq_desc_len - cmd->offset;
1613 
1614 		len = min(len, cmd->data_size);
1615 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1616 			   __func__, cmd->offset, cmd->offset + len);
1617 
1618 		buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1619 		i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1620 
1621 		bytes += len;
1622 		remainder -= len;
1623 		buff = hw->nvm_buff.va;
1624 	} else {
1625 		buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1626 	}
1627 
1628 	if (remainder > 0) {
1629 		int start_byte = buff - (u8 *)hw->nvm_buff.va;
1630 
1631 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1632 			   __func__, start_byte, start_byte + remainder);
1633 		i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1634 	}
1635 
1636 	return I40E_SUCCESS;
1637 }
1638 
1639 /**
1640  * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1641  * @hw: pointer to hardware structure
1642  * @cmd: pointer to nvm update command buffer
1643  * @bytes: pointer to the data buffer
1644  * @perrno: pointer to return error code
1645  *
1646  * cmd structure contains identifiers and data buffer
1647  **/
i40e_nvmupd_get_aq_event(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1648 STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1649 						    struct i40e_nvm_access *cmd,
1650 						    u8 *bytes, int *perrno)
1651 {
1652 	u32 aq_total_len;
1653 	u32 aq_desc_len;
1654 
1655 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1656 
1657 	aq_desc_len = sizeof(struct i40e_aq_desc);
1658 	aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
1659 
1660 	/* check copylength range */
1661 	if (cmd->data_size > aq_total_len) {
1662 		i40e_debug(hw, I40E_DEBUG_NVM,
1663 			   "%s: copy length %d too big, trimming to %d\n",
1664 			   __func__, cmd->data_size, aq_total_len);
1665 		cmd->data_size = aq_total_len;
1666 	}
1667 
1668 	i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
1669 		    I40E_NONDMA_TO_NONDMA);
1670 
1671 	return I40E_SUCCESS;
1672 }
1673 
1674 /**
1675  * i40e_nvmupd_nvm_read - Read NVM
1676  * @hw: pointer to hardware structure
1677  * @cmd: pointer to nvm update command buffer
1678  * @bytes: pointer to the data buffer
1679  * @perrno: pointer to return error code
1680  *
1681  * cmd structure contains identifiers and data buffer
1682  **/
i40e_nvmupd_nvm_read(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1683 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1684 						  struct i40e_nvm_access *cmd,
1685 						  u8 *bytes, int *perrno)
1686 {
1687 	struct i40e_asq_cmd_details cmd_details;
1688 	enum i40e_status_code status;
1689 	u8 module, transaction;
1690 	bool last;
1691 
1692 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1693 	module = i40e_nvmupd_get_module(cmd->config);
1694 	last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1695 
1696 	memset(&cmd_details, 0, sizeof(cmd_details));
1697 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1698 
1699 	status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1700 				  bytes, last, &cmd_details);
1701 	if (status) {
1702 		i40e_debug(hw, I40E_DEBUG_NVM,
1703 			   "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
1704 			   module, cmd->offset, cmd->data_size);
1705 		i40e_debug(hw, I40E_DEBUG_NVM,
1706 			   "i40e_nvmupd_nvm_read status %d aq %d\n",
1707 			   status, hw->aq.asq_last_status);
1708 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1709 	}
1710 
1711 	return status;
1712 }
1713 
1714 /**
1715  * i40e_nvmupd_nvm_erase - Erase an NVM module
1716  * @hw: pointer to hardware structure
1717  * @cmd: pointer to nvm update command buffer
1718  * @perrno: pointer to return error code
1719  *
1720  * module, offset, data_size and data are in cmd structure
1721  **/
i40e_nvmupd_nvm_erase(struct i40e_hw * hw,struct i40e_nvm_access * cmd,int * perrno)1722 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1723 						   struct i40e_nvm_access *cmd,
1724 						   int *perrno)
1725 {
1726 	enum i40e_status_code status = I40E_SUCCESS;
1727 	struct i40e_asq_cmd_details cmd_details;
1728 	u8 module, transaction;
1729 	bool last;
1730 
1731 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1732 	module = i40e_nvmupd_get_module(cmd->config);
1733 	last = (transaction & I40E_NVM_LCB);
1734 
1735 	memset(&cmd_details, 0, sizeof(cmd_details));
1736 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1737 
1738 	status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1739 				   last, &cmd_details);
1740 	if (status) {
1741 		i40e_debug(hw, I40E_DEBUG_NVM,
1742 			   "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
1743 			   module, cmd->offset, cmd->data_size);
1744 		i40e_debug(hw, I40E_DEBUG_NVM,
1745 			   "i40e_nvmupd_nvm_erase status %d aq %d\n",
1746 			   status, hw->aq.asq_last_status);
1747 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1748 	}
1749 
1750 	return status;
1751 }
1752 
1753 /**
1754  * i40e_nvmupd_nvm_write - Write NVM
1755  * @hw: pointer to hardware structure
1756  * @cmd: pointer to nvm update command buffer
1757  * @bytes: pointer to the data buffer
1758  * @perrno: pointer to return error code
1759  *
1760  * module, offset, data_size and data are in cmd structure
1761  **/
i40e_nvmupd_nvm_write(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1762 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1763 						   struct i40e_nvm_access *cmd,
1764 						   u8 *bytes, int *perrno)
1765 {
1766 	enum i40e_status_code status = I40E_SUCCESS;
1767 	struct i40e_asq_cmd_details cmd_details;
1768 	u8 module, transaction;
1769 	u8 preservation_flags;
1770 	bool last;
1771 
1772 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1773 	module = i40e_nvmupd_get_module(cmd->config);
1774 	last = (transaction & I40E_NVM_LCB);
1775 	preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1776 
1777 	memset(&cmd_details, 0, sizeof(cmd_details));
1778 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1779 
1780 	status = i40e_aq_update_nvm(hw, module, cmd->offset,
1781 				    (u16)cmd->data_size, bytes, last,
1782 				    preservation_flags, &cmd_details);
1783 	if (status) {
1784 		i40e_debug(hw, I40E_DEBUG_NVM,
1785 			   "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1786 			   module, cmd->offset, cmd->data_size);
1787 		i40e_debug(hw, I40E_DEBUG_NVM,
1788 			   "i40e_nvmupd_nvm_write status %d aq %d\n",
1789 			   status, hw->aq.asq_last_status);
1790 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1791 	}
1792 
1793 	return status;
1794 }
1795