1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
3 */
4
5 #include <inttypes.h>
6
7 #include "i40e_prototype.h"
8
9 /**
10 * i40e_init_nvm - Initialize NVM function pointers
11 * @hw: pointer to the HW structure
12 *
13 * Setup the function pointers and the NVM info structure. Should be called
14 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
15 * Please notice that the NVM term is used here (& in all methods covered
16 * in this file) as an equivalent of the FLASH part mapped into the SR.
17 * We are accessing FLASH always through the Shadow RAM.
18 **/
i40e_init_nvm(struct i40e_hw * hw)19 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
20 {
21 struct i40e_nvm_info *nvm = &hw->nvm;
22 enum i40e_status_code ret_code = I40E_SUCCESS;
23 u32 fla, gens;
24 u8 sr_size;
25
26 DEBUGFUNC("i40e_init_nvm");
27
28 /* The SR size is stored regardless of the nvm programming mode
29 * as the blank mode may be used in the factory line.
30 */
31 gens = rd32(hw, I40E_GLNVM_GENS);
32 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
33 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
34 /* Switching to words (sr_size contains power of 2KB) */
35 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
36
37 /* Check if we are in the normal or blank NVM programming mode */
38 fla = rd32(hw, I40E_GLNVM_FLA);
39 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
40 /* Max NVM timeout */
41 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
42 nvm->blank_nvm_mode = false;
43 } else { /* Blank programming mode */
44 nvm->blank_nvm_mode = true;
45 ret_code = I40E_ERR_NVM_BLANK_MODE;
46 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
47 }
48
49 return ret_code;
50 }
51
52 /**
53 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
54 * @hw: pointer to the HW structure
55 * @access: NVM access type (read or write)
56 *
57 * This function will request NVM ownership for reading
58 * via the proper Admin Command.
59 **/
i40e_acquire_nvm(struct i40e_hw * hw,enum i40e_aq_resource_access_type access)60 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
61 enum i40e_aq_resource_access_type access)
62 {
63 enum i40e_status_code ret_code = I40E_SUCCESS;
64 u64 gtime, timeout;
65 u64 time_left = 0;
66
67 DEBUGFUNC("i40e_acquire_nvm");
68
69 if (hw->nvm.blank_nvm_mode)
70 goto i40e_i40e_acquire_nvm_exit;
71
72 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
73 0, &time_left, NULL);
74 /* Reading the Global Device Timer */
75 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
76
77 /* Store the timeout */
78 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
79
80 if (ret_code)
81 i40e_debug(hw, I40E_DEBUG_NVM,
82 "NVM acquire type %d failed time_left=%" PRIu64 " ret=%d aq_err=%d\n",
83 access, time_left, ret_code, hw->aq.asq_last_status);
84
85 if (ret_code && time_left) {
86 /* Poll until the current NVM owner timeouts */
87 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
88 while ((gtime < timeout) && time_left) {
89 i40e_msec_delay(10);
90 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
91 ret_code = i40e_aq_request_resource(hw,
92 I40E_NVM_RESOURCE_ID,
93 access, 0, &time_left,
94 NULL);
95 if (ret_code == I40E_SUCCESS) {
96 hw->nvm.hw_semaphore_timeout =
97 I40E_MS_TO_GTIME(time_left) + gtime;
98 break;
99 }
100 }
101 if (ret_code != I40E_SUCCESS) {
102 hw->nvm.hw_semaphore_timeout = 0;
103 i40e_debug(hw, I40E_DEBUG_NVM,
104 "NVM acquire timed out, wait %" PRIu64 " ms before trying again. status=%d aq_err=%d\n",
105 time_left, ret_code, hw->aq.asq_last_status);
106 }
107 }
108
109 i40e_i40e_acquire_nvm_exit:
110 return ret_code;
111 }
112
113 /**
114 * i40e_release_nvm - Generic request for releasing the NVM ownership
115 * @hw: pointer to the HW structure
116 *
117 * This function will release NVM resource via the proper Admin Command.
118 **/
i40e_release_nvm(struct i40e_hw * hw)119 void i40e_release_nvm(struct i40e_hw *hw)
120 {
121 enum i40e_status_code ret_code = I40E_SUCCESS;
122 u32 total_delay = 0;
123
124 DEBUGFUNC("i40e_release_nvm");
125
126 if (hw->nvm.blank_nvm_mode)
127 return;
128
129 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
130
131 /* there are some rare cases when trying to release the resource
132 * results in an admin Q timeout, so handle them correctly
133 */
134 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
135 (total_delay < hw->aq.asq_cmd_timeout)) {
136 i40e_msec_delay(1);
137 ret_code = i40e_aq_release_resource(hw,
138 I40E_NVM_RESOURCE_ID, 0, NULL);
139 total_delay++;
140 }
141 }
142
143 /**
144 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
145 * @hw: pointer to the HW structure
146 *
147 * Polls the SRCTL Shadow RAM register done bit.
148 **/
i40e_poll_sr_srctl_done_bit(struct i40e_hw * hw)149 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
150 {
151 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
152 u32 srctl, wait_cnt;
153
154 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
155
156 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
157 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
158 srctl = rd32(hw, I40E_GLNVM_SRCTL);
159 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
160 ret_code = I40E_SUCCESS;
161 break;
162 }
163 i40e_usec_delay(5);
164 }
165 if (ret_code == I40E_ERR_TIMEOUT)
166 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
167 return ret_code;
168 }
169
170 /**
171 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
172 * @hw: pointer to the HW structure
173 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
174 * @data: word read from the Shadow RAM
175 *
176 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
177 **/
i40e_read_nvm_word_srctl(struct i40e_hw * hw,u16 offset,u16 * data)178 STATIC enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw,
179 u16 offset,
180 u16 *data)
181 {
182 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
183 u32 sr_reg;
184
185 DEBUGFUNC("i40e_read_nvm_word_srctl");
186
187 if (offset >= hw->nvm.sr_size) {
188 i40e_debug(hw, I40E_DEBUG_NVM,
189 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
190 offset, hw->nvm.sr_size);
191 ret_code = I40E_ERR_PARAM;
192 goto read_nvm_exit;
193 }
194
195 /* Poll the done bit first */
196 ret_code = i40e_poll_sr_srctl_done_bit(hw);
197 if (ret_code == I40E_SUCCESS) {
198 /* Write the address and start reading */
199 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
200 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
201 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
202
203 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
204 ret_code = i40e_poll_sr_srctl_done_bit(hw);
205 if (ret_code == I40E_SUCCESS) {
206 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
207 *data = (u16)((sr_reg &
208 I40E_GLNVM_SRDATA_RDDATA_MASK)
209 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
210 }
211 }
212 if (ret_code != I40E_SUCCESS)
213 i40e_debug(hw, I40E_DEBUG_NVM,
214 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
215 offset);
216
217 read_nvm_exit:
218 return ret_code;
219 }
220
221 /**
222 * i40e_read_nvm_aq - Read Shadow RAM.
223 * @hw: pointer to the HW structure.
224 * @module_pointer: module pointer location in words from the NVM beginning
225 * @offset: offset in words from module start
226 * @words: number of words to write
227 * @data: buffer with words to write to the Shadow RAM
228 * @last_command: tells the AdminQ that this is the last command
229 *
230 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
231 **/
i40e_read_nvm_aq(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data,bool last_command)232 STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
233 u8 module_pointer, u32 offset,
234 u16 words, void *data,
235 bool last_command)
236 {
237 enum i40e_status_code ret_code = I40E_ERR_NVM;
238 struct i40e_asq_cmd_details cmd_details;
239
240 DEBUGFUNC("i40e_read_nvm_aq");
241
242 memset(&cmd_details, 0, sizeof(cmd_details));
243 cmd_details.wb_desc = &hw->nvm_wb_desc;
244
245 /* Here we are checking the SR limit only for the flat memory model.
246 * We cannot do it for the module-based model, as we did not acquire
247 * the NVM resource yet (we cannot get the module pointer value).
248 * Firmware will check the module-based model.
249 */
250 if ((offset + words) > hw->nvm.sr_size)
251 i40e_debug(hw, I40E_DEBUG_NVM,
252 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
253 (offset + words), hw->nvm.sr_size);
254 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
255 /* We can write only up to 4KB (one sector), in one AQ write */
256 i40e_debug(hw, I40E_DEBUG_NVM,
257 "NVM write fail error: tried to write %d words, limit is %d.\n",
258 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
259 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
260 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
261 /* A single write cannot spread over two sectors */
262 i40e_debug(hw, I40E_DEBUG_NVM,
263 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
264 offset, words);
265 else
266 ret_code = i40e_aq_read_nvm(hw, module_pointer,
267 2 * offset, /*bytes*/
268 2 * words, /*bytes*/
269 data, last_command, &cmd_details);
270
271 return ret_code;
272 }
273
274 /**
275 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
276 * @hw: pointer to the HW structure
277 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
278 * @data: word read from the Shadow RAM
279 *
280 * Reads one 16 bit word from the Shadow RAM using the AdminQ
281 **/
i40e_read_nvm_word_aq(struct i40e_hw * hw,u16 offset,u16 * data)282 STATIC enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
283 u16 *data)
284 {
285 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
286
287 DEBUGFUNC("i40e_read_nvm_word_aq");
288
289 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
290 *data = LE16_TO_CPU(*(__le16 *)data);
291
292 return ret_code;
293 }
294
295 /**
296 * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
297 * @hw: pointer to the HW structure
298 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
299 * @data: word read from the Shadow RAM
300 *
301 * Reads one 16 bit word from the Shadow RAM.
302 *
303 * Do not use this function except in cases where the nvm lock is already
304 * taken via i40e_acquire_nvm().
305 **/
__i40e_read_nvm_word(struct i40e_hw * hw,u16 offset,u16 * data)306 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
307 u16 offset,
308 u16 *data)
309 {
310
311 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
312 return i40e_read_nvm_word_aq(hw, offset, data);
313
314 return i40e_read_nvm_word_srctl(hw, offset, data);
315 }
316
317 /**
318 * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
319 * @hw: pointer to the HW structure
320 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
321 * @data: word read from the Shadow RAM
322 *
323 * Reads one 16 bit word from the Shadow RAM.
324 **/
i40e_read_nvm_word(struct i40e_hw * hw,u16 offset,u16 * data)325 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
326 u16 *data)
327 {
328 enum i40e_status_code ret_code = I40E_SUCCESS;
329
330 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
331 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
332
333 if (ret_code)
334 return ret_code;
335 ret_code = __i40e_read_nvm_word(hw, offset, data);
336
337 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
338 i40e_release_nvm(hw);
339 return ret_code;
340 }
341
342 /**
343 * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
344 * @hw: Pointer to the HW structure
345 * @module_ptr: Pointer to module in words with respect to NVM beginning
346 * @module_offset: Offset in words from module start
347 * @data_offset: Offset in words from reading data area start
348 * @words_data_size: Words to read from NVM
349 * @data_ptr: Pointer to memory location where resulting buffer will be stored
350 **/
351 enum i40e_status_code
i40e_read_nvm_module_data(struct i40e_hw * hw,u8 module_ptr,u16 module_offset,u16 data_offset,u16 words_data_size,u16 * data_ptr)352 i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
353 u16 data_offset, u16 words_data_size, u16 *data_ptr)
354 {
355 enum i40e_status_code status;
356 u16 specific_ptr = 0;
357 u16 ptr_value = 0;
358 u16 offset = 0;
359
360 if (module_ptr != 0) {
361 status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
362 if (status != I40E_SUCCESS) {
363 i40e_debug(hw, I40E_DEBUG_ALL,
364 "Reading nvm word failed.Error code: %d.\n",
365 status);
366 return I40E_ERR_NVM;
367 }
368 }
369 #define I40E_NVM_INVALID_PTR_VAL 0x7FFF
370 #define I40E_NVM_INVALID_VAL 0xFFFF
371
372 /* Pointer not initialized */
373 if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
374 ptr_value == I40E_NVM_INVALID_VAL) {
375 i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
376 return I40E_ERR_BAD_PTR;
377 }
378
379 /* Check whether the module is in SR mapped area or outside */
380 if (ptr_value & I40E_PTR_TYPE) {
381 /* Pointer points outside of the Shared RAM mapped area */
382 i40e_debug(hw, I40E_DEBUG_ALL,
383 "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
384
385 return I40E_ERR_PARAM;
386 } else {
387 /* Read from the Shadow RAM */
388
389 status = i40e_read_nvm_word(hw, ptr_value + module_offset,
390 &specific_ptr);
391 if (status != I40E_SUCCESS) {
392 i40e_debug(hw, I40E_DEBUG_ALL,
393 "Reading nvm word failed.Error code: %d.\n",
394 status);
395 return I40E_ERR_NVM;
396 }
397
398 offset = ptr_value + module_offset + specific_ptr +
399 data_offset;
400
401 status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
402 data_ptr);
403 if (status != I40E_SUCCESS) {
404 i40e_debug(hw, I40E_DEBUG_ALL,
405 "Reading nvm buffer failed.Error code: %d.\n",
406 status);
407 }
408 }
409
410 return status;
411 }
412
413 /**
414 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
415 * @hw: pointer to the HW structure
416 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
417 * @words: (in) number of words to read; (out) number of words actually read
418 * @data: words read from the Shadow RAM
419 *
420 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
421 * method. The buffer read is preceded by the NVM ownership take
422 * and followed by the release.
423 **/
i40e_read_nvm_buffer_srctl(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)424 STATIC enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
425 u16 *words, u16 *data)
426 {
427 enum i40e_status_code ret_code = I40E_SUCCESS;
428 u16 index, word;
429
430 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
431
432 /* Loop through the selected region */
433 for (word = 0; word < *words; word++) {
434 index = offset + word;
435 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
436 if (ret_code != I40E_SUCCESS)
437 break;
438 }
439
440 /* Update the number of words read from the Shadow RAM */
441 *words = word;
442
443 return ret_code;
444 }
445
446 /**
447 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
448 * @hw: pointer to the HW structure
449 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
450 * @words: (in) number of words to read; (out) number of words actually read
451 * @data: words read from the Shadow RAM
452 *
453 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
454 * method. The buffer read is preceded by the NVM ownership take
455 * and followed by the release.
456 **/
i40e_read_nvm_buffer_aq(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)457 STATIC enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
458 u16 *words, u16 *data)
459 {
460 enum i40e_status_code ret_code;
461 u16 read_size = *words;
462 bool last_cmd = false;
463 u16 words_read = 0;
464 u16 i = 0;
465
466 DEBUGFUNC("i40e_read_nvm_buffer_aq");
467
468 do {
469 /* Calculate number of bytes we should read in this step.
470 * FVL AQ do not allow to read more than one page at a time or
471 * to cross page boundaries.
472 */
473 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
474 read_size = min(*words,
475 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
476 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
477 else
478 read_size = min((*words - words_read),
479 I40E_SR_SECTOR_SIZE_IN_WORDS);
480
481 /* Check if this is last command, if so set proper flag */
482 if ((words_read + read_size) >= *words)
483 last_cmd = true;
484
485 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
486 data + words_read, last_cmd);
487 if (ret_code != I40E_SUCCESS)
488 goto read_nvm_buffer_aq_exit;
489
490 /* Increment counter for words already read and move offset to
491 * new read location
492 */
493 words_read += read_size;
494 offset += read_size;
495 } while (words_read < *words);
496
497 for (i = 0; i < *words; i++)
498 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
499
500 read_nvm_buffer_aq_exit:
501 *words = words_read;
502 return ret_code;
503 }
504
505 /**
506 * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
507 * @hw: pointer to the HW structure
508 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
509 * @words: (in) number of words to read; (out) number of words actually read
510 * @data: words read from the Shadow RAM
511 *
512 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
513 * method.
514 **/
__i40e_read_nvm_buffer(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)515 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
516 u16 offset,
517 u16 *words, u16 *data)
518 {
519 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
520 return i40e_read_nvm_buffer_aq(hw, offset, words, data);
521
522 return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
523 }
524
525 /**
526 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
527 * @hw: pointer to the HW structure
528 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
529 * @words: (in) number of words to read; (out) number of words actually read
530 * @data: words read from the Shadow RAM
531 *
532 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
533 * method. The buffer read is preceded by the NVM ownership take
534 * and followed by the release.
535 **/
i40e_read_nvm_buffer(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)536 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
537 u16 *words, u16 *data)
538 {
539 enum i40e_status_code ret_code = I40E_SUCCESS;
540
541 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
542 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
543 if (!ret_code) {
544 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
545 data);
546 i40e_release_nvm(hw);
547 }
548 } else {
549 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
550 }
551
552 return ret_code;
553 }
554
555 /**
556 * i40e_write_nvm_aq - Writes Shadow RAM.
557 * @hw: pointer to the HW structure.
558 * @module_pointer: module pointer location in words from the NVM beginning
559 * @offset: offset in words from module start
560 * @words: number of words to write
561 * @data: buffer with words to write to the Shadow RAM
562 * @last_command: tells the AdminQ that this is the last command
563 *
564 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
565 **/
i40e_write_nvm_aq(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data,bool last_command)566 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
567 u32 offset, u16 words, void *data,
568 bool last_command)
569 {
570 enum i40e_status_code ret_code = I40E_ERR_NVM;
571 struct i40e_asq_cmd_details cmd_details;
572
573 DEBUGFUNC("i40e_write_nvm_aq");
574
575 memset(&cmd_details, 0, sizeof(cmd_details));
576 cmd_details.wb_desc = &hw->nvm_wb_desc;
577
578 /* Here we are checking the SR limit only for the flat memory model.
579 * We cannot do it for the module-based model, as we did not acquire
580 * the NVM resource yet (we cannot get the module pointer value).
581 * Firmware will check the module-based model.
582 */
583 if ((offset + words) > hw->nvm.sr_size)
584 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
585 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
586 /* We can write only up to 4KB (one sector), in one AQ write */
587 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
588 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
589 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
590 /* A single write cannot spread over two sectors */
591 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
592 else
593 ret_code = i40e_aq_update_nvm(hw, module_pointer,
594 2 * offset, /*bytes*/
595 2 * words, /*bytes*/
596 data, last_command, 0,
597 &cmd_details);
598
599 return ret_code;
600 }
601
602 /**
603 * __i40e_write_nvm_word - Writes Shadow RAM word
604 * @hw: pointer to the HW structure
605 * @offset: offset of the Shadow RAM word to write
606 * @data: word to write to the Shadow RAM
607 *
608 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
609 * NVM ownership have to be acquired and released (on ARQ completion event
610 * reception) by caller. To commit SR to NVM update checksum function
611 * should be called.
612 **/
__i40e_write_nvm_word(struct i40e_hw * hw,u32 offset,void * data)613 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
614 void *data)
615 {
616 DEBUGFUNC("i40e_write_nvm_word");
617
618 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
619
620 /* Value 0x00 below means that we treat SR as a flat mem */
621 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
622 }
623
624 /**
625 * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
626 * @hw: pointer to the HW structure
627 * @module_pointer: module pointer location in words from the NVM beginning
628 * @offset: offset of the Shadow RAM buffer to write
629 * @words: number of words to write
630 * @data: words to write to the Shadow RAM
631 *
632 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
633 * NVM ownership must be acquired before calling this function and released
634 * on ARQ completion event reception by caller. To commit SR to NVM update
635 * checksum function should be called.
636 **/
__i40e_write_nvm_buffer(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data)637 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
638 u8 module_pointer, u32 offset,
639 u16 words, void *data)
640 {
641 __le16 *le_word_ptr = (__le16 *)data;
642 u16 *word_ptr = (u16 *)data;
643 u32 i = 0;
644
645 DEBUGFUNC("i40e_write_nvm_buffer");
646
647 for (i = 0; i < words; i++)
648 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
649
650 /* Here we will only write one buffer as the size of the modules
651 * mirrored in the Shadow RAM is always less than 4K.
652 */
653 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
654 data, false);
655 }
656
657 /**
658 * i40e_calc_nvm_checksum - Calculates and returns the checksum
659 * @hw: pointer to hardware structure
660 * @checksum: pointer to the checksum
661 *
662 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
663 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
664 * is customer specific and unknown. Therefore, this function skips all maximum
665 * possible size of VPD (1kB).
666 **/
i40e_calc_nvm_checksum(struct i40e_hw * hw,u16 * checksum)667 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
668 {
669 enum i40e_status_code ret_code = I40E_SUCCESS;
670 struct i40e_virt_mem vmem;
671 u16 pcie_alt_module = 0;
672 u16 checksum_local = 0;
673 u16 vpd_module = 0;
674 u16 *data;
675 u16 i = 0;
676
677 DEBUGFUNC("i40e_calc_nvm_checksum");
678
679 ret_code = i40e_allocate_virt_mem(hw, &vmem,
680 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
681 if (ret_code)
682 goto i40e_calc_nvm_checksum_exit;
683 data = (u16 *)vmem.va;
684
685 /* read pointer to VPD area */
686 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
687 if (ret_code != I40E_SUCCESS) {
688 ret_code = I40E_ERR_NVM_CHECKSUM;
689 goto i40e_calc_nvm_checksum_exit;
690 }
691
692 /* read pointer to PCIe Alt Auto-load module */
693 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
694 &pcie_alt_module);
695 if (ret_code != I40E_SUCCESS) {
696 ret_code = I40E_ERR_NVM_CHECKSUM;
697 goto i40e_calc_nvm_checksum_exit;
698 }
699
700 /* Calculate SW checksum that covers the whole 64kB shadow RAM
701 * except the VPD and PCIe ALT Auto-load modules
702 */
703 for (i = 0; i < hw->nvm.sr_size; i++) {
704 /* Read SR page */
705 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
706 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
707
708 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
709 if (ret_code != I40E_SUCCESS) {
710 ret_code = I40E_ERR_NVM_CHECKSUM;
711 goto i40e_calc_nvm_checksum_exit;
712 }
713 }
714
715 /* Skip Checksum word */
716 if (i == I40E_SR_SW_CHECKSUM_WORD)
717 continue;
718 /* Skip VPD module (convert byte size to word count) */
719 if ((i >= (u32)vpd_module) &&
720 (i < ((u32)vpd_module +
721 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
722 continue;
723 }
724 /* Skip PCIe ALT module (convert byte size to word count) */
725 if ((i >= (u32)pcie_alt_module) &&
726 (i < ((u32)pcie_alt_module +
727 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
728 continue;
729 }
730
731 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
732 }
733
734 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
735
736 i40e_calc_nvm_checksum_exit:
737 i40e_free_virt_mem(hw, &vmem);
738 return ret_code;
739 }
740
741 /**
742 * i40e_update_nvm_checksum - Updates the NVM checksum
743 * @hw: pointer to hardware structure
744 *
745 * NVM ownership must be acquired before calling this function and released
746 * on ARQ completion event reception by caller.
747 * This function will commit SR to NVM.
748 **/
i40e_update_nvm_checksum(struct i40e_hw * hw)749 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
750 {
751 enum i40e_status_code ret_code = I40E_SUCCESS;
752 u16 checksum;
753 __le16 le_sum;
754
755 DEBUGFUNC("i40e_update_nvm_checksum");
756
757 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
758 if (ret_code == I40E_SUCCESS) {
759 le_sum = CPU_TO_LE16(checksum);
760 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
761 1, &le_sum, true);
762 }
763
764 return ret_code;
765 }
766
767 /**
768 * i40e_validate_nvm_checksum - Validate EEPROM checksum
769 * @hw: pointer to hardware structure
770 * @checksum: calculated checksum
771 *
772 * Performs checksum calculation and validates the NVM SW checksum. If the
773 * caller does not need checksum, the value can be NULL.
774 **/
i40e_validate_nvm_checksum(struct i40e_hw * hw,u16 * checksum)775 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
776 u16 *checksum)
777 {
778 enum i40e_status_code ret_code = I40E_SUCCESS;
779 u16 checksum_sr = 0;
780 u16 checksum_local = 0;
781
782 DEBUGFUNC("i40e_validate_nvm_checksum");
783
784 /* We must acquire the NVM lock in order to correctly synchronize the
785 * NVM accesses across multiple PFs. Without doing so it is possible
786 * for one of the PFs to read invalid data potentially indicating that
787 * the checksum is invalid.
788 */
789 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
790 if (ret_code)
791 return ret_code;
792 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
793 __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
794 i40e_release_nvm(hw);
795 if (ret_code)
796 return ret_code;
797
798 /* Verify read checksum from EEPROM is the same as
799 * calculated checksum
800 */
801 if (checksum_local != checksum_sr)
802 ret_code = I40E_ERR_NVM_CHECKSUM;
803
804 /* If the user cares, return the calculated checksum */
805 if (checksum)
806 *checksum = checksum_local;
807
808 return ret_code;
809 }
810
811 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
812 struct i40e_nvm_access *cmd,
813 u8 *bytes, int *perrno);
814 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
815 struct i40e_nvm_access *cmd,
816 u8 *bytes, int *perrno);
817 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
818 struct i40e_nvm_access *cmd,
819 u8 *bytes, int *perrno);
820 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
821 struct i40e_nvm_access *cmd,
822 int *perrno);
823 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
824 struct i40e_nvm_access *cmd,
825 int *perrno);
826 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
827 struct i40e_nvm_access *cmd,
828 u8 *bytes, int *perrno);
829 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
830 struct i40e_nvm_access *cmd,
831 u8 *bytes, int *perrno);
832 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
833 struct i40e_nvm_access *cmd,
834 u8 *bytes, int *perrno);
835 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
836 struct i40e_nvm_access *cmd,
837 u8 *bytes, int *perrno);
838 STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
839 struct i40e_nvm_access *cmd,
840 u8 *bytes, int *perrno);
i40e_nvmupd_get_module(u32 val)841 STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
842 {
843 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
844 }
i40e_nvmupd_get_transaction(u32 val)845 STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
846 {
847 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
848 }
849
i40e_nvmupd_get_preservation_flags(u32 val)850 STATIC INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
851 {
852 return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
853 I40E_NVM_PRESERVATION_FLAGS_SHIFT);
854 }
855
856 STATIC const char *i40e_nvm_update_state_str[] = {
857 "I40E_NVMUPD_INVALID",
858 "I40E_NVMUPD_READ_CON",
859 "I40E_NVMUPD_READ_SNT",
860 "I40E_NVMUPD_READ_LCB",
861 "I40E_NVMUPD_READ_SA",
862 "I40E_NVMUPD_WRITE_ERA",
863 "I40E_NVMUPD_WRITE_CON",
864 "I40E_NVMUPD_WRITE_SNT",
865 "I40E_NVMUPD_WRITE_LCB",
866 "I40E_NVMUPD_WRITE_SA",
867 "I40E_NVMUPD_CSUM_CON",
868 "I40E_NVMUPD_CSUM_SA",
869 "I40E_NVMUPD_CSUM_LCB",
870 "I40E_NVMUPD_STATUS",
871 "I40E_NVMUPD_EXEC_AQ",
872 "I40E_NVMUPD_GET_AQ_RESULT",
873 "I40E_NVMUPD_GET_AQ_EVENT",
874 "I40E_NVMUPD_GET_FEATURES",
875 };
876
877 /**
878 * i40e_nvmupd_command - Process an NVM update command
879 * @hw: pointer to hardware structure
880 * @cmd: pointer to nvm update command
881 * @bytes: pointer to the data buffer
882 * @perrno: pointer to return error code
883 *
884 * Dispatches command depending on what update state is current
885 **/
i40e_nvmupd_command(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)886 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
887 struct i40e_nvm_access *cmd,
888 u8 *bytes, int *perrno)
889 {
890 enum i40e_status_code status;
891 enum i40e_nvmupd_cmd upd_cmd;
892
893 DEBUGFUNC("i40e_nvmupd_command");
894
895 /* assume success */
896 *perrno = 0;
897
898 /* early check for status command and debug msgs */
899 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
900
901 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
902 i40e_nvm_update_state_str[upd_cmd],
903 hw->nvmupd_state,
904 hw->nvm_release_on_done, hw->nvm_wait_opcode,
905 cmd->command, cmd->config, cmd->offset, cmd->data_size);
906
907 if (upd_cmd == I40E_NVMUPD_INVALID) {
908 *perrno = -EFAULT;
909 i40e_debug(hw, I40E_DEBUG_NVM,
910 "i40e_nvmupd_validate_command returns %d errno %d\n",
911 upd_cmd, *perrno);
912 }
913
914 /* a status request returns immediately rather than
915 * going into the state machine
916 */
917 if (upd_cmd == I40E_NVMUPD_STATUS) {
918 if (!cmd->data_size) {
919 *perrno = -EFAULT;
920 return I40E_ERR_BUF_TOO_SHORT;
921 }
922
923 bytes[0] = hw->nvmupd_state;
924
925 if (cmd->data_size >= 4) {
926 bytes[1] = 0;
927 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
928 }
929
930 /* Clear error status on read */
931 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
932 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
933
934 return I40E_SUCCESS;
935 }
936
937 /*
938 * A supported features request returns immediately
939 * rather than going into state machine
940 */
941 if (upd_cmd == I40E_NVMUPD_FEATURES) {
942 if (cmd->data_size < hw->nvmupd_features.size) {
943 *perrno = -EFAULT;
944 return I40E_ERR_BUF_TOO_SHORT;
945 }
946
947 /*
948 * If buffer is bigger than i40e_nvmupd_features structure,
949 * make sure the trailing bytes are set to 0x0.
950 */
951 if (cmd->data_size > hw->nvmupd_features.size)
952 i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
953 cmd->data_size - hw->nvmupd_features.size,
954 I40E_NONDMA_MEM);
955
956 i40e_memcpy(bytes, &hw->nvmupd_features,
957 hw->nvmupd_features.size, I40E_NONDMA_MEM);
958
959 return I40E_SUCCESS;
960 }
961
962 /* Clear status even it is not read and log */
963 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
964 i40e_debug(hw, I40E_DEBUG_NVM,
965 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
966 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
967 }
968
969 /* Acquire lock to prevent race condition where adminq_task
970 * can execute after i40e_nvmupd_nvm_read/write but before state
971 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
972 *
973 * During NVMUpdate, it is observed that lock could be held for
974 * ~5ms for most commands. However lock is held for ~60ms for
975 * NVMUPD_CSUM_LCB command.
976 */
977 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
978 switch (hw->nvmupd_state) {
979 case I40E_NVMUPD_STATE_INIT:
980 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
981 break;
982
983 case I40E_NVMUPD_STATE_READING:
984 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
985 break;
986
987 case I40E_NVMUPD_STATE_WRITING:
988 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
989 break;
990
991 case I40E_NVMUPD_STATE_INIT_WAIT:
992 case I40E_NVMUPD_STATE_WRITE_WAIT:
993 /* if we need to stop waiting for an event, clear
994 * the wait info and return before doing anything else
995 */
996 if (cmd->offset == 0xffff) {
997 i40e_nvmupd_clear_wait_state(hw);
998 status = I40E_SUCCESS;
999 break;
1000 }
1001
1002 status = I40E_ERR_NOT_READY;
1003 *perrno = -EBUSY;
1004 break;
1005
1006 default:
1007 /* invalid state, should never happen */
1008 i40e_debug(hw, I40E_DEBUG_NVM,
1009 "NVMUPD: no such state %d\n", hw->nvmupd_state);
1010 status = I40E_NOT_SUPPORTED;
1011 *perrno = -ESRCH;
1012 break;
1013 }
1014
1015 i40e_release_spinlock(&hw->aq.arq_spinlock);
1016 return status;
1017 }
1018
1019 /**
1020 * i40e_nvmupd_state_init - Handle NVM update state Init
1021 * @hw: pointer to hardware structure
1022 * @cmd: pointer to nvm update command buffer
1023 * @bytes: pointer to the data buffer
1024 * @perrno: pointer to return error code
1025 *
1026 * Process legitimate commands of the Init state and conditionally set next
1027 * state. Reject all other commands.
1028 **/
i40e_nvmupd_state_init(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1029 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
1030 struct i40e_nvm_access *cmd,
1031 u8 *bytes, int *perrno)
1032 {
1033 enum i40e_status_code status = I40E_SUCCESS;
1034 enum i40e_nvmupd_cmd upd_cmd;
1035
1036 DEBUGFUNC("i40e_nvmupd_state_init");
1037
1038 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1039
1040 switch (upd_cmd) {
1041 case I40E_NVMUPD_READ_SA:
1042 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1043 if (status) {
1044 *perrno = i40e_aq_rc_to_posix(status,
1045 hw->aq.asq_last_status);
1046 } else {
1047 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1048 i40e_release_nvm(hw);
1049 }
1050 break;
1051
1052 case I40E_NVMUPD_READ_SNT:
1053 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1054 if (status) {
1055 *perrno = i40e_aq_rc_to_posix(status,
1056 hw->aq.asq_last_status);
1057 } else {
1058 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1059 if (status)
1060 i40e_release_nvm(hw);
1061 else
1062 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
1063 }
1064 break;
1065
1066 case I40E_NVMUPD_WRITE_ERA:
1067 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1068 if (status) {
1069 *perrno = i40e_aq_rc_to_posix(status,
1070 hw->aq.asq_last_status);
1071 } else {
1072 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1073 if (status) {
1074 i40e_release_nvm(hw);
1075 } else {
1076 hw->nvm_release_on_done = true;
1077 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1078 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1079 }
1080 }
1081 break;
1082
1083 case I40E_NVMUPD_WRITE_SA:
1084 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1085 if (status) {
1086 *perrno = i40e_aq_rc_to_posix(status,
1087 hw->aq.asq_last_status);
1088 } else {
1089 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1090 if (status) {
1091 i40e_release_nvm(hw);
1092 } else {
1093 hw->nvm_release_on_done = true;
1094 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1095 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1096 }
1097 }
1098 break;
1099
1100 case I40E_NVMUPD_WRITE_SNT:
1101 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1102 if (status) {
1103 *perrno = i40e_aq_rc_to_posix(status,
1104 hw->aq.asq_last_status);
1105 } else {
1106 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1107 if (status) {
1108 i40e_release_nvm(hw);
1109 } else {
1110 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1111 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1112 }
1113 }
1114 break;
1115
1116 case I40E_NVMUPD_CSUM_SA:
1117 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1118 if (status) {
1119 *perrno = i40e_aq_rc_to_posix(status,
1120 hw->aq.asq_last_status);
1121 } else {
1122 status = i40e_update_nvm_checksum(hw);
1123 if (status) {
1124 *perrno = hw->aq.asq_last_status ?
1125 i40e_aq_rc_to_posix(status,
1126 hw->aq.asq_last_status) :
1127 -EIO;
1128 i40e_release_nvm(hw);
1129 } else {
1130 hw->nvm_release_on_done = true;
1131 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1132 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1133 }
1134 }
1135 break;
1136
1137 case I40E_NVMUPD_EXEC_AQ:
1138 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1139 break;
1140
1141 case I40E_NVMUPD_GET_AQ_RESULT:
1142 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1143 break;
1144
1145 case I40E_NVMUPD_GET_AQ_EVENT:
1146 status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
1147 break;
1148
1149 default:
1150 i40e_debug(hw, I40E_DEBUG_NVM,
1151 "NVMUPD: bad cmd %s in init state\n",
1152 i40e_nvm_update_state_str[upd_cmd]);
1153 status = I40E_ERR_NVM;
1154 *perrno = -ESRCH;
1155 break;
1156 }
1157 return status;
1158 }
1159
1160 /**
1161 * i40e_nvmupd_state_reading - Handle NVM update state Reading
1162 * @hw: pointer to hardware structure
1163 * @cmd: pointer to nvm update command buffer
1164 * @bytes: pointer to the data buffer
1165 * @perrno: pointer to return error code
1166 *
1167 * NVM ownership is already held. Process legitimate commands and set any
1168 * change in state; reject all other commands.
1169 **/
i40e_nvmupd_state_reading(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1170 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1171 struct i40e_nvm_access *cmd,
1172 u8 *bytes, int *perrno)
1173 {
1174 enum i40e_status_code status = I40E_SUCCESS;
1175 enum i40e_nvmupd_cmd upd_cmd;
1176
1177 DEBUGFUNC("i40e_nvmupd_state_reading");
1178
1179 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1180
1181 switch (upd_cmd) {
1182 case I40E_NVMUPD_READ_SA:
1183 case I40E_NVMUPD_READ_CON:
1184 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1185 break;
1186
1187 case I40E_NVMUPD_READ_LCB:
1188 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1189 i40e_release_nvm(hw);
1190 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1191 break;
1192
1193 default:
1194 i40e_debug(hw, I40E_DEBUG_NVM,
1195 "NVMUPD: bad cmd %s in reading state.\n",
1196 i40e_nvm_update_state_str[upd_cmd]);
1197 status = I40E_NOT_SUPPORTED;
1198 *perrno = -ESRCH;
1199 break;
1200 }
1201 return status;
1202 }
1203
1204 /**
1205 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1206 * @hw: pointer to hardware structure
1207 * @cmd: pointer to nvm update command buffer
1208 * @bytes: pointer to the data buffer
1209 * @perrno: pointer to return error code
1210 *
1211 * NVM ownership is already held. Process legitimate commands and set any
1212 * change in state; reject all other commands
1213 **/
i40e_nvmupd_state_writing(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1214 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1215 struct i40e_nvm_access *cmd,
1216 u8 *bytes, int *perrno)
1217 {
1218 enum i40e_status_code status = I40E_SUCCESS;
1219 enum i40e_nvmupd_cmd upd_cmd;
1220 bool retry_attempt = false;
1221
1222 DEBUGFUNC("i40e_nvmupd_state_writing");
1223
1224 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1225
1226 retry:
1227 switch (upd_cmd) {
1228 case I40E_NVMUPD_WRITE_CON:
1229 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1230 if (!status) {
1231 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1232 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1233 }
1234 break;
1235
1236 case I40E_NVMUPD_WRITE_LCB:
1237 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1238 if (status) {
1239 *perrno = hw->aq.asq_last_status ?
1240 i40e_aq_rc_to_posix(status,
1241 hw->aq.asq_last_status) :
1242 -EIO;
1243 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1244 } else {
1245 hw->nvm_release_on_done = true;
1246 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1247 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1248 }
1249 break;
1250
1251 case I40E_NVMUPD_CSUM_CON:
1252 /* Assumes the caller has acquired the nvm */
1253 status = i40e_update_nvm_checksum(hw);
1254 if (status) {
1255 *perrno = hw->aq.asq_last_status ?
1256 i40e_aq_rc_to_posix(status,
1257 hw->aq.asq_last_status) :
1258 -EIO;
1259 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1260 } else {
1261 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1262 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1263 }
1264 break;
1265
1266 case I40E_NVMUPD_CSUM_LCB:
1267 /* Assumes the caller has acquired the nvm */
1268 status = i40e_update_nvm_checksum(hw);
1269 if (status) {
1270 *perrno = hw->aq.asq_last_status ?
1271 i40e_aq_rc_to_posix(status,
1272 hw->aq.asq_last_status) :
1273 -EIO;
1274 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1275 } else {
1276 hw->nvm_release_on_done = true;
1277 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1278 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1279 }
1280 break;
1281
1282 default:
1283 i40e_debug(hw, I40E_DEBUG_NVM,
1284 "NVMUPD: bad cmd %s in writing state.\n",
1285 i40e_nvm_update_state_str[upd_cmd]);
1286 status = I40E_NOT_SUPPORTED;
1287 *perrno = -ESRCH;
1288 break;
1289 }
1290
1291 /* In some circumstances, a multi-write transaction takes longer
1292 * than the default 3 minute timeout on the write semaphore. If
1293 * the write failed with an EBUSY status, this is likely the problem,
1294 * so here we try to reacquire the semaphore then retry the write.
1295 * We only do one retry, then give up.
1296 */
1297 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1298 !retry_attempt) {
1299 enum i40e_status_code old_status = status;
1300 u32 old_asq_status = hw->aq.asq_last_status;
1301 u32 gtime;
1302
1303 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1304 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1305 i40e_debug(hw, I40E_DEBUG_ALL,
1306 "NVMUPD: write semaphore expired (%d >= %" PRIu64 "), retrying\n",
1307 gtime, hw->nvm.hw_semaphore_timeout);
1308 i40e_release_nvm(hw);
1309 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1310 if (status) {
1311 i40e_debug(hw, I40E_DEBUG_ALL,
1312 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1313 hw->aq.asq_last_status);
1314 status = old_status;
1315 hw->aq.asq_last_status = old_asq_status;
1316 } else {
1317 retry_attempt = true;
1318 goto retry;
1319 }
1320 }
1321 }
1322
1323 return status;
1324 }
1325
1326 /**
1327 * i40e_nvmupd_clear_wait_state - clear wait state on hw
1328 * @hw: pointer to the hardware structure
1329 **/
i40e_nvmupd_clear_wait_state(struct i40e_hw * hw)1330 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1331 {
1332 i40e_debug(hw, I40E_DEBUG_NVM,
1333 "NVMUPD: clearing wait on opcode 0x%04x\n",
1334 hw->nvm_wait_opcode);
1335
1336 if (hw->nvm_release_on_done) {
1337 i40e_release_nvm(hw);
1338 hw->nvm_release_on_done = false;
1339 }
1340 hw->nvm_wait_opcode = 0;
1341
1342 if (hw->aq.arq_last_status) {
1343 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1344 return;
1345 }
1346
1347 switch (hw->nvmupd_state) {
1348 case I40E_NVMUPD_STATE_INIT_WAIT:
1349 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1350 break;
1351
1352 case I40E_NVMUPD_STATE_WRITE_WAIT:
1353 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1354 break;
1355
1356 default:
1357 break;
1358 }
1359 }
1360
1361 /**
1362 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1363 * @hw: pointer to the hardware structure
1364 * @opcode: the event that just happened
1365 * @desc: AdminQ descriptor
1366 **/
i40e_nvmupd_check_wait_event(struct i40e_hw * hw,u16 opcode,struct i40e_aq_desc * desc)1367 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1368 struct i40e_aq_desc *desc)
1369 {
1370 u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1371
1372 if (opcode == hw->nvm_wait_opcode) {
1373 i40e_memcpy(&hw->nvm_aq_event_desc, desc,
1374 aq_desc_len, I40E_NONDMA_TO_NONDMA);
1375 i40e_nvmupd_clear_wait_state(hw);
1376 }
1377 }
1378
1379 /**
1380 * i40e_nvmupd_validate_command - Validate given command
1381 * @hw: pointer to hardware structure
1382 * @cmd: pointer to nvm update command buffer
1383 * @perrno: pointer to return error code
1384 *
1385 * Return one of the valid command types or I40E_NVMUPD_INVALID
1386 **/
i40e_nvmupd_validate_command(struct i40e_hw * hw,struct i40e_nvm_access * cmd,int * perrno)1387 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1388 struct i40e_nvm_access *cmd,
1389 int *perrno)
1390 {
1391 enum i40e_nvmupd_cmd upd_cmd;
1392 u8 module, transaction;
1393
1394 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1395
1396 /* anything that doesn't match a recognized case is an error */
1397 upd_cmd = I40E_NVMUPD_INVALID;
1398
1399 transaction = i40e_nvmupd_get_transaction(cmd->config);
1400 module = i40e_nvmupd_get_module(cmd->config);
1401
1402 /* limits on data size */
1403 if ((cmd->data_size < 1) ||
1404 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1405 i40e_debug(hw, I40E_DEBUG_NVM,
1406 "i40e_nvmupd_validate_command data_size %d\n",
1407 cmd->data_size);
1408 *perrno = -EFAULT;
1409 return I40E_NVMUPD_INVALID;
1410 }
1411
1412 switch (cmd->command) {
1413 case I40E_NVM_READ:
1414 switch (transaction) {
1415 case I40E_NVM_CON:
1416 upd_cmd = I40E_NVMUPD_READ_CON;
1417 break;
1418 case I40E_NVM_SNT:
1419 upd_cmd = I40E_NVMUPD_READ_SNT;
1420 break;
1421 case I40E_NVM_LCB:
1422 upd_cmd = I40E_NVMUPD_READ_LCB;
1423 break;
1424 case I40E_NVM_SA:
1425 upd_cmd = I40E_NVMUPD_READ_SA;
1426 break;
1427 case I40E_NVM_EXEC:
1428 switch (module) {
1429 case I40E_NVM_EXEC_GET_AQ_RESULT:
1430 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1431 break;
1432 case I40E_NVM_EXEC_FEATURES:
1433 upd_cmd = I40E_NVMUPD_FEATURES;
1434 break;
1435 case I40E_NVM_EXEC_STATUS:
1436 upd_cmd = I40E_NVMUPD_STATUS;
1437 break;
1438 default:
1439 *perrno = -EFAULT;
1440 return I40E_NVMUPD_INVALID;
1441 }
1442 break;
1443 case I40E_NVM_AQE:
1444 upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1445 break;
1446 }
1447 break;
1448
1449 case I40E_NVM_WRITE:
1450 switch (transaction) {
1451 case I40E_NVM_CON:
1452 upd_cmd = I40E_NVMUPD_WRITE_CON;
1453 break;
1454 case I40E_NVM_SNT:
1455 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1456 break;
1457 case I40E_NVM_LCB:
1458 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1459 break;
1460 case I40E_NVM_SA:
1461 upd_cmd = I40E_NVMUPD_WRITE_SA;
1462 break;
1463 case I40E_NVM_ERA:
1464 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1465 break;
1466 case I40E_NVM_CSUM:
1467 upd_cmd = I40E_NVMUPD_CSUM_CON;
1468 break;
1469 case (I40E_NVM_CSUM|I40E_NVM_SA):
1470 upd_cmd = I40E_NVMUPD_CSUM_SA;
1471 break;
1472 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1473 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1474 break;
1475 case I40E_NVM_EXEC:
1476 if (module == 0)
1477 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1478 break;
1479 }
1480 break;
1481 }
1482
1483 return upd_cmd;
1484 }
1485
1486 /**
1487 * i40e_nvmupd_exec_aq - Run an AQ command
1488 * @hw: pointer to hardware structure
1489 * @cmd: pointer to nvm update command buffer
1490 * @bytes: pointer to the data buffer
1491 * @perrno: pointer to return error code
1492 *
1493 * cmd structure contains identifiers and data buffer
1494 **/
i40e_nvmupd_exec_aq(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1495 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1496 struct i40e_nvm_access *cmd,
1497 u8 *bytes, int *perrno)
1498 {
1499 struct i40e_asq_cmd_details cmd_details;
1500 enum i40e_status_code status;
1501 struct i40e_aq_desc *aq_desc;
1502 u32 buff_size = 0;
1503 u8 *buff = NULL;
1504 u32 aq_desc_len;
1505 u32 aq_data_len;
1506
1507 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1508 if (cmd->offset == 0xffff)
1509 return I40E_SUCCESS;
1510
1511 memset(&cmd_details, 0, sizeof(cmd_details));
1512 cmd_details.wb_desc = &hw->nvm_wb_desc;
1513
1514 aq_desc_len = sizeof(struct i40e_aq_desc);
1515 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1516
1517 /* get the aq descriptor */
1518 if (cmd->data_size < aq_desc_len) {
1519 i40e_debug(hw, I40E_DEBUG_NVM,
1520 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1521 cmd->data_size, aq_desc_len);
1522 *perrno = -EINVAL;
1523 return I40E_ERR_PARAM;
1524 }
1525 aq_desc = (struct i40e_aq_desc *)bytes;
1526
1527 /* if data buffer needed, make sure it's ready */
1528 aq_data_len = cmd->data_size - aq_desc_len;
1529 buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1530 if (buff_size) {
1531 if (!hw->nvm_buff.va) {
1532 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1533 hw->aq.asq_buf_size);
1534 if (status)
1535 i40e_debug(hw, I40E_DEBUG_NVM,
1536 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1537 status);
1538 }
1539
1540 if (hw->nvm_buff.va) {
1541 buff = hw->nvm_buff.va;
1542 i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1543 I40E_NONDMA_TO_NONDMA);
1544 }
1545 }
1546
1547 if (cmd->offset)
1548 memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1549
1550 /* and away we go! */
1551 status = i40e_asq_send_command(hw, aq_desc, buff,
1552 buff_size, &cmd_details);
1553 if (status) {
1554 i40e_debug(hw, I40E_DEBUG_NVM,
1555 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1556 i40e_stat_str(hw, status),
1557 i40e_aq_str(hw, hw->aq.asq_last_status));
1558 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1559 return status;
1560 }
1561
1562 /* should we wait for a followup event? */
1563 if (cmd->offset) {
1564 hw->nvm_wait_opcode = cmd->offset;
1565 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1566 }
1567
1568 return status;
1569 }
1570
1571 /**
1572 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1573 * @hw: pointer to hardware structure
1574 * @cmd: pointer to nvm update command buffer
1575 * @bytes: pointer to the data buffer
1576 * @perrno: pointer to return error code
1577 *
1578 * cmd structure contains identifiers and data buffer
1579 **/
i40e_nvmupd_get_aq_result(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1580 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1581 struct i40e_nvm_access *cmd,
1582 u8 *bytes, int *perrno)
1583 {
1584 u32 aq_total_len;
1585 u32 aq_desc_len;
1586 int remainder;
1587 u8 *buff;
1588
1589 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1590
1591 aq_desc_len = sizeof(struct i40e_aq_desc);
1592 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1593
1594 /* check offset range */
1595 if (cmd->offset > aq_total_len) {
1596 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1597 __func__, cmd->offset, aq_total_len);
1598 *perrno = -EINVAL;
1599 return I40E_ERR_PARAM;
1600 }
1601
1602 /* check copylength range */
1603 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1604 int new_len = aq_total_len - cmd->offset;
1605
1606 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1607 __func__, cmd->data_size, new_len);
1608 cmd->data_size = new_len;
1609 }
1610
1611 remainder = cmd->data_size;
1612 if (cmd->offset < aq_desc_len) {
1613 u32 len = aq_desc_len - cmd->offset;
1614
1615 len = min(len, cmd->data_size);
1616 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1617 __func__, cmd->offset, cmd->offset + len);
1618
1619 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1620 i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1621
1622 bytes += len;
1623 remainder -= len;
1624 buff = hw->nvm_buff.va;
1625 } else {
1626 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1627 }
1628
1629 if (remainder > 0) {
1630 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1631
1632 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1633 __func__, start_byte, start_byte + remainder);
1634 i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1635 }
1636
1637 return I40E_SUCCESS;
1638 }
1639
1640 /**
1641 * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1642 * @hw: pointer to hardware structure
1643 * @cmd: pointer to nvm update command buffer
1644 * @bytes: pointer to the data buffer
1645 * @perrno: pointer to return error code
1646 *
1647 * cmd structure contains identifiers and data buffer
1648 **/
i40e_nvmupd_get_aq_event(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1649 STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1650 struct i40e_nvm_access *cmd,
1651 u8 *bytes, int *perrno)
1652 {
1653 u32 aq_total_len;
1654 u32 aq_desc_len;
1655
1656 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1657
1658 aq_desc_len = sizeof(struct i40e_aq_desc);
1659 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
1660
1661 /* check copylength range */
1662 if (cmd->data_size > aq_total_len) {
1663 i40e_debug(hw, I40E_DEBUG_NVM,
1664 "%s: copy length %d too big, trimming to %d\n",
1665 __func__, cmd->data_size, aq_total_len);
1666 cmd->data_size = aq_total_len;
1667 }
1668
1669 i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
1670 I40E_NONDMA_TO_NONDMA);
1671
1672 return I40E_SUCCESS;
1673 }
1674
1675 /**
1676 * i40e_nvmupd_nvm_read - Read NVM
1677 * @hw: pointer to hardware structure
1678 * @cmd: pointer to nvm update command buffer
1679 * @bytes: pointer to the data buffer
1680 * @perrno: pointer to return error code
1681 *
1682 * cmd structure contains identifiers and data buffer
1683 **/
i40e_nvmupd_nvm_read(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1684 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1685 struct i40e_nvm_access *cmd,
1686 u8 *bytes, int *perrno)
1687 {
1688 struct i40e_asq_cmd_details cmd_details;
1689 enum i40e_status_code status;
1690 u8 module, transaction;
1691 bool last;
1692
1693 transaction = i40e_nvmupd_get_transaction(cmd->config);
1694 module = i40e_nvmupd_get_module(cmd->config);
1695 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1696
1697 memset(&cmd_details, 0, sizeof(cmd_details));
1698 cmd_details.wb_desc = &hw->nvm_wb_desc;
1699
1700 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1701 bytes, last, &cmd_details);
1702 if (status) {
1703 i40e_debug(hw, I40E_DEBUG_NVM,
1704 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1705 module, cmd->offset, cmd->data_size);
1706 i40e_debug(hw, I40E_DEBUG_NVM,
1707 "i40e_nvmupd_nvm_read status %d aq %d\n",
1708 status, hw->aq.asq_last_status);
1709 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1710 }
1711
1712 return status;
1713 }
1714
1715 /**
1716 * i40e_nvmupd_nvm_erase - Erase an NVM module
1717 * @hw: pointer to hardware structure
1718 * @cmd: pointer to nvm update command buffer
1719 * @perrno: pointer to return error code
1720 *
1721 * module, offset, data_size and data are in cmd structure
1722 **/
i40e_nvmupd_nvm_erase(struct i40e_hw * hw,struct i40e_nvm_access * cmd,int * perrno)1723 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1724 struct i40e_nvm_access *cmd,
1725 int *perrno)
1726 {
1727 enum i40e_status_code status = I40E_SUCCESS;
1728 struct i40e_asq_cmd_details cmd_details;
1729 u8 module, transaction;
1730 bool last;
1731
1732 transaction = i40e_nvmupd_get_transaction(cmd->config);
1733 module = i40e_nvmupd_get_module(cmd->config);
1734 last = (transaction & I40E_NVM_LCB);
1735
1736 memset(&cmd_details, 0, sizeof(cmd_details));
1737 cmd_details.wb_desc = &hw->nvm_wb_desc;
1738
1739 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1740 last, &cmd_details);
1741 if (status) {
1742 i40e_debug(hw, I40E_DEBUG_NVM,
1743 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1744 module, cmd->offset, cmd->data_size);
1745 i40e_debug(hw, I40E_DEBUG_NVM,
1746 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1747 status, hw->aq.asq_last_status);
1748 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1749 }
1750
1751 return status;
1752 }
1753
1754 /**
1755 * i40e_nvmupd_nvm_write - Write NVM
1756 * @hw: pointer to hardware structure
1757 * @cmd: pointer to nvm update command buffer
1758 * @bytes: pointer to the data buffer
1759 * @perrno: pointer to return error code
1760 *
1761 * module, offset, data_size and data are in cmd structure
1762 **/
i40e_nvmupd_nvm_write(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1763 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1764 struct i40e_nvm_access *cmd,
1765 u8 *bytes, int *perrno)
1766 {
1767 enum i40e_status_code status = I40E_SUCCESS;
1768 struct i40e_asq_cmd_details cmd_details;
1769 u8 module, transaction;
1770 u8 preservation_flags;
1771 bool last;
1772
1773 transaction = i40e_nvmupd_get_transaction(cmd->config);
1774 module = i40e_nvmupd_get_module(cmd->config);
1775 last = (transaction & I40E_NVM_LCB);
1776 preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1777
1778 memset(&cmd_details, 0, sizeof(cmd_details));
1779 cmd_details.wb_desc = &hw->nvm_wb_desc;
1780
1781 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1782 (u16)cmd->data_size, bytes, last,
1783 preservation_flags, &cmd_details);
1784 if (status) {
1785 i40e_debug(hw, I40E_DEBUG_NVM,
1786 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1787 module, cmd->offset, cmd->data_size);
1788 i40e_debug(hw, I40E_DEBUG_NVM,
1789 "i40e_nvmupd_nvm_write status %d aq %d\n",
1790 status, hw->aq.asq_last_status);
1791 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1792 }
1793
1794 return status;
1795 }
1796