1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
3 */
4
5 #include "i40e_status.h"
6 #include "i40e_type.h"
7 #include "i40e_register.h"
8 #include "i40e_adminq.h"
9 #include "i40e_prototype.h"
10
11 /**
12 * i40e_adminq_init_regs - Initialize AdminQ registers
13 * @hw: pointer to the hardware structure
14 *
15 * This assumes the alloc_asq and alloc_arq functions have already been called
16 **/
i40e_adminq_init_regs(struct i40e_hw * hw)17 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
18 {
19 /* set head and tail registers in our local struct */
20 if (i40e_is_vf(hw)) {
21 hw->aq.asq.tail = I40E_VF_ATQT1;
22 hw->aq.asq.head = I40E_VF_ATQH1;
23 hw->aq.asq.len = I40E_VF_ATQLEN1;
24 hw->aq.asq.bal = I40E_VF_ATQBAL1;
25 hw->aq.asq.bah = I40E_VF_ATQBAH1;
26 hw->aq.arq.tail = I40E_VF_ARQT1;
27 hw->aq.arq.head = I40E_VF_ARQH1;
28 hw->aq.arq.len = I40E_VF_ARQLEN1;
29 hw->aq.arq.bal = I40E_VF_ARQBAL1;
30 hw->aq.arq.bah = I40E_VF_ARQBAH1;
31 #ifdef PF_DRIVER
32 } else {
33 hw->aq.asq.tail = I40E_PF_ATQT;
34 hw->aq.asq.head = I40E_PF_ATQH;
35 hw->aq.asq.len = I40E_PF_ATQLEN;
36 hw->aq.asq.bal = I40E_PF_ATQBAL;
37 hw->aq.asq.bah = I40E_PF_ATQBAH;
38 hw->aq.arq.tail = I40E_PF_ARQT;
39 hw->aq.arq.head = I40E_PF_ARQH;
40 hw->aq.arq.len = I40E_PF_ARQLEN;
41 hw->aq.arq.bal = I40E_PF_ARQBAL;
42 hw->aq.arq.bah = I40E_PF_ARQBAH;
43 #endif
44 }
45 }
46
47 /**
48 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
49 * @hw: pointer to the hardware structure
50 **/
i40e_alloc_adminq_asq_ring(struct i40e_hw * hw)51 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
52 {
53 enum i40e_status_code ret_code;
54
55 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
56 i40e_mem_atq_ring,
57 (hw->aq.num_asq_entries *
58 sizeof(struct i40e_aq_desc)),
59 I40E_ADMINQ_DESC_ALIGNMENT);
60 if (ret_code)
61 return ret_code;
62
63 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
64 (hw->aq.num_asq_entries *
65 sizeof(struct i40e_asq_cmd_details)));
66 if (ret_code) {
67 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
68 return ret_code;
69 }
70
71 return ret_code;
72 }
73
74 /**
75 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
76 * @hw: pointer to the hardware structure
77 **/
i40e_alloc_adminq_arq_ring(struct i40e_hw * hw)78 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
79 {
80 enum i40e_status_code ret_code;
81
82 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
83 i40e_mem_arq_ring,
84 (hw->aq.num_arq_entries *
85 sizeof(struct i40e_aq_desc)),
86 I40E_ADMINQ_DESC_ALIGNMENT);
87
88 return ret_code;
89 }
90
91 /**
92 * i40e_free_adminq_asq - Free Admin Queue send rings
93 * @hw: pointer to the hardware structure
94 *
95 * This assumes the posted send buffers have already been cleaned
96 * and de-allocated
97 **/
i40e_free_adminq_asq(struct i40e_hw * hw)98 void i40e_free_adminq_asq(struct i40e_hw *hw)
99 {
100 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
101 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
102 }
103
104 /**
105 * i40e_free_adminq_arq - Free Admin Queue receive rings
106 * @hw: pointer to the hardware structure
107 *
108 * This assumes the posted receive buffers have already been cleaned
109 * and de-allocated
110 **/
i40e_free_adminq_arq(struct i40e_hw * hw)111 void i40e_free_adminq_arq(struct i40e_hw *hw)
112 {
113 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
114 }
115
116 /**
117 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
118 * @hw: pointer to the hardware structure
119 **/
i40e_alloc_arq_bufs(struct i40e_hw * hw)120 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
121 {
122 enum i40e_status_code ret_code;
123 struct i40e_aq_desc *desc;
124 struct i40e_dma_mem *bi;
125 int i;
126
127 /* We'll be allocating the buffer info memory first, then we can
128 * allocate the mapped buffers for the event processing
129 */
130
131 /* buffer_info structures do not need alignment */
132 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
133 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
134 if (ret_code)
135 goto alloc_arq_bufs;
136 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
137
138 /* allocate the mapped buffers */
139 for (i = 0; i < hw->aq.num_arq_entries; i++) {
140 bi = &hw->aq.arq.r.arq_bi[i];
141 ret_code = i40e_allocate_dma_mem(hw, bi,
142 i40e_mem_arq_buf,
143 hw->aq.arq_buf_size,
144 I40E_ADMINQ_DESC_ALIGNMENT);
145 if (ret_code)
146 goto unwind_alloc_arq_bufs;
147
148 /* now configure the descriptors for use */
149 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
150
151 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
152 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
153 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
154 desc->opcode = 0;
155 /* This is in accordance with Admin queue design, there is no
156 * register for buffer size configuration
157 */
158 desc->datalen = CPU_TO_LE16((u16)bi->size);
159 desc->retval = 0;
160 desc->cookie_high = 0;
161 desc->cookie_low = 0;
162 desc->params.external.addr_high =
163 CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
164 desc->params.external.addr_low =
165 CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
166 desc->params.external.param0 = 0;
167 desc->params.external.param1 = 0;
168 }
169
170 alloc_arq_bufs:
171 return ret_code;
172
173 unwind_alloc_arq_bufs:
174 /* don't try to free the one that failed... */
175 i--;
176 for (; i >= 0; i--)
177 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
178 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
179
180 return ret_code;
181 }
182
183 /**
184 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
185 * @hw: pointer to the hardware structure
186 **/
i40e_alloc_asq_bufs(struct i40e_hw * hw)187 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
188 {
189 enum i40e_status_code ret_code;
190 struct i40e_dma_mem *bi;
191 int i;
192
193 /* No mapped memory needed yet, just the buffer info structures */
194 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
195 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
196 if (ret_code)
197 goto alloc_asq_bufs;
198 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
199
200 /* allocate the mapped buffers */
201 for (i = 0; i < hw->aq.num_asq_entries; i++) {
202 bi = &hw->aq.asq.r.asq_bi[i];
203 ret_code = i40e_allocate_dma_mem(hw, bi,
204 i40e_mem_asq_buf,
205 hw->aq.asq_buf_size,
206 I40E_ADMINQ_DESC_ALIGNMENT);
207 if (ret_code)
208 goto unwind_alloc_asq_bufs;
209 }
210 alloc_asq_bufs:
211 return ret_code;
212
213 unwind_alloc_asq_bufs:
214 /* don't try to free the one that failed... */
215 i--;
216 for (; i >= 0; i--)
217 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
218 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
219
220 return ret_code;
221 }
222
223 /**
224 * i40e_free_arq_bufs - Free receive queue buffer info elements
225 * @hw: pointer to the hardware structure
226 **/
i40e_free_arq_bufs(struct i40e_hw * hw)227 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
228 {
229 int i;
230
231 /* free descriptors */
232 for (i = 0; i < hw->aq.num_arq_entries; i++)
233 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
234
235 /* free the descriptor memory */
236 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
237
238 /* free the dma header */
239 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
240 }
241
242 /**
243 * i40e_free_asq_bufs - Free send queue buffer info elements
244 * @hw: pointer to the hardware structure
245 **/
i40e_free_asq_bufs(struct i40e_hw * hw)246 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
247 {
248 int i;
249
250 /* only unmap if the address is non-NULL */
251 for (i = 0; i < hw->aq.num_asq_entries; i++)
252 if (hw->aq.asq.r.asq_bi[i].pa)
253 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
254
255 /* free the buffer info list */
256 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
257
258 /* free the descriptor memory */
259 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
260
261 /* free the dma header */
262 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
263 }
264
265 /**
266 * i40e_config_asq_regs - configure ASQ registers
267 * @hw: pointer to the hardware structure
268 *
269 * Configure base address and length registers for the transmit queue
270 **/
i40e_config_asq_regs(struct i40e_hw * hw)271 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
272 {
273 enum i40e_status_code ret_code = I40E_SUCCESS;
274 u32 reg = 0;
275
276 /* Clear Head and Tail */
277 wr32(hw, hw->aq.asq.head, 0);
278 wr32(hw, hw->aq.asq.tail, 0);
279
280 /* set starting point */
281 #ifdef PF_DRIVER
282 #ifdef INTEGRATED_VF
283 if (!i40e_is_vf(hw))
284 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
285 I40E_PF_ATQLEN_ATQENABLE_MASK));
286 #else
287 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
288 I40E_PF_ATQLEN_ATQENABLE_MASK));
289 #endif /* INTEGRATED_VF */
290 #endif /* PF_DRIVER */
291 #ifdef VF_DRIVER
292 #ifdef INTEGRATED_VF
293 if (i40e_is_vf(hw))
294 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
295 I40E_VF_ATQLEN1_ATQENABLE_MASK));
296 #else
297 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
298 I40E_VF_ATQLEN1_ATQENABLE_MASK));
299 #endif /* INTEGRATED_VF */
300 #endif /* VF_DRIVER */
301 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
302 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
303
304 /* Check one register to verify that config was applied */
305 reg = rd32(hw, hw->aq.asq.bal);
306 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
307 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
308
309 return ret_code;
310 }
311
312 /**
313 * i40e_config_arq_regs - ARQ register configuration
314 * @hw: pointer to the hardware structure
315 *
316 * Configure base address and length registers for the receive (event queue)
317 **/
i40e_config_arq_regs(struct i40e_hw * hw)318 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
319 {
320 enum i40e_status_code ret_code = I40E_SUCCESS;
321 u32 reg = 0;
322
323 /* Clear Head and Tail */
324 wr32(hw, hw->aq.arq.head, 0);
325 wr32(hw, hw->aq.arq.tail, 0);
326
327 /* set starting point */
328 #ifdef PF_DRIVER
329 #ifdef INTEGRATED_VF
330 if (!i40e_is_vf(hw))
331 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
332 I40E_PF_ARQLEN_ARQENABLE_MASK));
333 #else
334 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
335 I40E_PF_ARQLEN_ARQENABLE_MASK));
336 #endif /* INTEGRATED_VF */
337 #endif /* PF_DRIVER */
338 #ifdef VF_DRIVER
339 #ifdef INTEGRATED_VF
340 if (i40e_is_vf(hw))
341 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
342 I40E_VF_ARQLEN1_ARQENABLE_MASK));
343 #else
344 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
345 I40E_VF_ARQLEN1_ARQENABLE_MASK));
346 #endif /* INTEGRATED_VF */
347 #endif /* VF_DRIVER */
348 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
349 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
350
351 /* Update tail in the HW to post pre-allocated buffers */
352 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
353
354 /* Check one register to verify that config was applied */
355 reg = rd32(hw, hw->aq.arq.bal);
356 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
357 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
358
359 return ret_code;
360 }
361
362 /**
363 * i40e_init_asq - main initialization routine for ASQ
364 * @hw: pointer to the hardware structure
365 *
366 * This is the main initialization routine for the Admin Send Queue
367 * Prior to calling this function, drivers *MUST* set the following fields
368 * in the hw->aq structure:
369 * - hw->aq.num_asq_entries
370 * - hw->aq.arq_buf_size
371 *
372 * Do *NOT* hold the lock when calling this as the memory allocation routines
373 * called are not going to be atomic context safe
374 **/
i40e_init_asq(struct i40e_hw * hw)375 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
376 {
377 enum i40e_status_code ret_code = I40E_SUCCESS;
378
379 if (hw->aq.asq.count > 0) {
380 /* queue already initialized */
381 ret_code = I40E_ERR_NOT_READY;
382 goto init_adminq_exit;
383 }
384
385 /* verify input for valid configuration */
386 if ((hw->aq.num_asq_entries == 0) ||
387 (hw->aq.asq_buf_size == 0)) {
388 ret_code = I40E_ERR_CONFIG;
389 goto init_adminq_exit;
390 }
391
392 hw->aq.asq.next_to_use = 0;
393 hw->aq.asq.next_to_clean = 0;
394
395 /* allocate the ring memory */
396 ret_code = i40e_alloc_adminq_asq_ring(hw);
397 if (ret_code != I40E_SUCCESS)
398 goto init_adminq_exit;
399
400 /* allocate buffers in the rings */
401 ret_code = i40e_alloc_asq_bufs(hw);
402 if (ret_code != I40E_SUCCESS)
403 goto init_adminq_free_rings;
404
405 /* initialize base registers */
406 ret_code = i40e_config_asq_regs(hw);
407 if (ret_code != I40E_SUCCESS)
408 goto init_config_regs;
409
410 /* success! */
411 hw->aq.asq.count = hw->aq.num_asq_entries;
412 goto init_adminq_exit;
413
414 init_adminq_free_rings:
415 i40e_free_adminq_asq(hw);
416 return ret_code;
417
418 init_config_regs:
419 i40e_free_asq_bufs(hw);
420
421 init_adminq_exit:
422 return ret_code;
423 }
424
425 /**
426 * i40e_init_arq - initialize ARQ
427 * @hw: pointer to the hardware structure
428 *
429 * The main initialization routine for the Admin Receive (Event) Queue.
430 * Prior to calling this function, drivers *MUST* set the following fields
431 * in the hw->aq structure:
432 * - hw->aq.num_asq_entries
433 * - hw->aq.arq_buf_size
434 *
435 * Do *NOT* hold the lock when calling this as the memory allocation routines
436 * called are not going to be atomic context safe
437 **/
i40e_init_arq(struct i40e_hw * hw)438 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
439 {
440 enum i40e_status_code ret_code = I40E_SUCCESS;
441
442 if (hw->aq.arq.count > 0) {
443 /* queue already initialized */
444 ret_code = I40E_ERR_NOT_READY;
445 goto init_adminq_exit;
446 }
447
448 /* verify input for valid configuration */
449 if ((hw->aq.num_arq_entries == 0) ||
450 (hw->aq.arq_buf_size == 0)) {
451 ret_code = I40E_ERR_CONFIG;
452 goto init_adminq_exit;
453 }
454
455 hw->aq.arq.next_to_use = 0;
456 hw->aq.arq.next_to_clean = 0;
457
458 /* allocate the ring memory */
459 ret_code = i40e_alloc_adminq_arq_ring(hw);
460 if (ret_code != I40E_SUCCESS)
461 goto init_adminq_exit;
462
463 /* allocate buffers in the rings */
464 ret_code = i40e_alloc_arq_bufs(hw);
465 if (ret_code != I40E_SUCCESS)
466 goto init_adminq_free_rings;
467
468 /* initialize base registers */
469 ret_code = i40e_config_arq_regs(hw);
470 if (ret_code != I40E_SUCCESS)
471 goto init_adminq_free_rings;
472
473 /* success! */
474 hw->aq.arq.count = hw->aq.num_arq_entries;
475 goto init_adminq_exit;
476
477 init_adminq_free_rings:
478 i40e_free_adminq_arq(hw);
479
480 init_adminq_exit:
481 return ret_code;
482 }
483
484 /**
485 * i40e_shutdown_asq - shutdown the ASQ
486 * @hw: pointer to the hardware structure
487 *
488 * The main shutdown routine for the Admin Send Queue
489 **/
i40e_shutdown_asq(struct i40e_hw * hw)490 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
491 {
492 enum i40e_status_code ret_code = I40E_SUCCESS;
493
494 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
495
496 if (hw->aq.asq.count == 0) {
497 ret_code = I40E_ERR_NOT_READY;
498 goto shutdown_asq_out;
499 }
500
501 /* Stop firmware AdminQ processing */
502 wr32(hw, hw->aq.asq.head, 0);
503 wr32(hw, hw->aq.asq.tail, 0);
504 wr32(hw, hw->aq.asq.len, 0);
505 wr32(hw, hw->aq.asq.bal, 0);
506 wr32(hw, hw->aq.asq.bah, 0);
507
508 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
509
510 /* free ring buffers */
511 i40e_free_asq_bufs(hw);
512
513 shutdown_asq_out:
514 i40e_release_spinlock(&hw->aq.asq_spinlock);
515 return ret_code;
516 }
517
518 /**
519 * i40e_shutdown_arq - shutdown ARQ
520 * @hw: pointer to the hardware structure
521 *
522 * The main shutdown routine for the Admin Receive Queue
523 **/
i40e_shutdown_arq(struct i40e_hw * hw)524 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
525 {
526 enum i40e_status_code ret_code = I40E_SUCCESS;
527
528 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
529
530 if (hw->aq.arq.count == 0) {
531 ret_code = I40E_ERR_NOT_READY;
532 goto shutdown_arq_out;
533 }
534
535 /* Stop firmware AdminQ processing */
536 wr32(hw, hw->aq.arq.head, 0);
537 wr32(hw, hw->aq.arq.tail, 0);
538 wr32(hw, hw->aq.arq.len, 0);
539 wr32(hw, hw->aq.arq.bal, 0);
540 wr32(hw, hw->aq.arq.bah, 0);
541
542 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
543
544 /* free ring buffers */
545 i40e_free_arq_bufs(hw);
546
547 shutdown_arq_out:
548 i40e_release_spinlock(&hw->aq.arq_spinlock);
549 return ret_code;
550 }
551 #ifdef PF_DRIVER
552
553 /**
554 * i40e_resume_aq - resume AQ processing from 0
555 * @hw: pointer to the hardware structure
556 **/
i40e_resume_aq(struct i40e_hw * hw)557 STATIC void i40e_resume_aq(struct i40e_hw *hw)
558 {
559 /* Registers are reset after PF reset */
560 hw->aq.asq.next_to_use = 0;
561 hw->aq.asq.next_to_clean = 0;
562
563 i40e_config_asq_regs(hw);
564
565 hw->aq.arq.next_to_use = 0;
566 hw->aq.arq.next_to_clean = 0;
567
568 i40e_config_arq_regs(hw);
569 }
570 #endif /* PF_DRIVER */
571
572 /**
573 * i40e_set_hw_flags - set HW flags
574 * @hw: pointer to the hardware structure
575 **/
i40e_set_hw_flags(struct i40e_hw * hw)576 STATIC void i40e_set_hw_flags(struct i40e_hw *hw)
577 {
578 struct i40e_adminq_info *aq = &hw->aq;
579
580 hw->flags = 0;
581
582 switch (hw->mac.type) {
583 case I40E_MAC_XL710:
584 if (aq->api_maj_ver > 1 ||
585 (aq->api_maj_ver == 1 &&
586 aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
587 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
588 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
589 /* The ability to RX (not drop) 802.1ad frames */
590 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
591 }
592 break;
593 case I40E_MAC_X722:
594 hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
595 I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
596
597 if (aq->api_maj_ver > 1 ||
598 (aq->api_maj_ver == 1 &&
599 aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
600 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
601
602 if (aq->api_maj_ver > 1 ||
603 (aq->api_maj_ver == 1 &&
604 aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
605 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
606
607 if (aq->api_maj_ver > 1 ||
608 (aq->api_maj_ver == 1 &&
609 aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
610 hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
611
612 /* fall through */
613 default:
614 break;
615 }
616
617 /* Newer versions of firmware require lock when reading the NVM */
618 if (aq->api_maj_ver > 1 ||
619 (aq->api_maj_ver == 1 &&
620 aq->api_min_ver >= 5))
621 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
622
623 if (aq->api_maj_ver > 1 ||
624 (aq->api_maj_ver == 1 &&
625 aq->api_min_ver >= 8)) {
626 hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
627 hw->flags |= I40E_HW_FLAG_DROP_MODE;
628 }
629
630 if (aq->api_maj_ver > 1 ||
631 (aq->api_maj_ver == 1 &&
632 aq->api_min_ver >= 9))
633 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
634 }
635
636 /**
637 * i40e_init_adminq - main initialization routine for Admin Queue
638 * @hw: pointer to the hardware structure
639 *
640 * Prior to calling this function, drivers *MUST* set the following fields
641 * in the hw->aq structure:
642 * - hw->aq.num_asq_entries
643 * - hw->aq.num_arq_entries
644 * - hw->aq.arq_buf_size
645 * - hw->aq.asq_buf_size
646 **/
i40e_init_adminq(struct i40e_hw * hw)647 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
648 {
649 struct i40e_adminq_info *aq = &hw->aq;
650 enum i40e_status_code ret_code;
651 u16 cfg_ptr, oem_hi, oem_lo;
652 u16 eetrack_lo, eetrack_hi;
653 int retry = 0;
654
655 /* verify input for valid configuration */
656 if (aq->num_arq_entries == 0 ||
657 aq->num_asq_entries == 0 ||
658 aq->arq_buf_size == 0 ||
659 aq->asq_buf_size == 0) {
660 ret_code = I40E_ERR_CONFIG;
661 goto init_adminq_exit;
662 }
663 i40e_init_spinlock(&aq->asq_spinlock);
664 i40e_init_spinlock(&aq->arq_spinlock);
665
666 /* Set up register offsets */
667 i40e_adminq_init_regs(hw);
668
669 /* setup ASQ command write back timeout */
670 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
671
672 /* allocate the ASQ */
673 ret_code = i40e_init_asq(hw);
674 if (ret_code != I40E_SUCCESS)
675 goto init_adminq_destroy_spinlocks;
676
677 /* allocate the ARQ */
678 ret_code = i40e_init_arq(hw);
679 if (ret_code != I40E_SUCCESS)
680 goto init_adminq_free_asq;
681
682 /* VF has no need of firmware */
683 if (i40e_is_vf(hw))
684 goto init_adminq_exit;
685
686 /* There are some cases where the firmware may not be quite ready
687 * for AdminQ operations, so we retry the AdminQ setup a few times
688 * if we see timeouts in this first AQ call.
689 */
690 do {
691 ret_code = i40e_aq_get_firmware_version(hw,
692 &aq->fw_maj_ver,
693 &aq->fw_min_ver,
694 &aq->fw_build,
695 &aq->api_maj_ver,
696 &aq->api_min_ver,
697 NULL);
698 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
699 break;
700 retry++;
701 i40e_msec_delay(100);
702 i40e_resume_aq(hw);
703 } while (retry < 10);
704 if (ret_code != I40E_SUCCESS)
705 goto init_adminq_free_arq;
706
707 /*
708 * Some features were introduced in different FW API version
709 * for different MAC type.
710 */
711 i40e_set_hw_flags(hw);
712
713 /* get the NVM version info */
714 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
715 &hw->nvm.version);
716 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
717 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
718 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
719 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
720 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
721 &oem_hi);
722 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
723 &oem_lo);
724 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
725
726 if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
727 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
728 goto init_adminq_free_arq;
729 }
730
731 /* pre-emptive resource lock release */
732 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
733 hw->nvm_release_on_done = false;
734 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
735
736 ret_code = I40E_SUCCESS;
737
738 /* success! */
739 goto init_adminq_exit;
740
741 init_adminq_free_arq:
742 i40e_shutdown_arq(hw);
743 init_adminq_free_asq:
744 i40e_shutdown_asq(hw);
745 init_adminq_destroy_spinlocks:
746 i40e_destroy_spinlock(&aq->asq_spinlock);
747 i40e_destroy_spinlock(&aq->arq_spinlock);
748
749 init_adminq_exit:
750 return ret_code;
751 }
752
753 /**
754 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
755 * @hw: pointer to the hardware structure
756 **/
i40e_shutdown_adminq(struct i40e_hw * hw)757 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
758 {
759 enum i40e_status_code ret_code = I40E_SUCCESS;
760
761 if (i40e_check_asq_alive(hw))
762 i40e_aq_queue_shutdown(hw, true);
763
764 i40e_shutdown_asq(hw);
765 i40e_shutdown_arq(hw);
766 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
767 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
768
769 if (hw->nvm_buff.va)
770 i40e_free_virt_mem(hw, &hw->nvm_buff);
771
772 return ret_code;
773 }
774
775 /**
776 * i40e_clean_asq - cleans Admin send queue
777 * @hw: pointer to the hardware structure
778 *
779 * returns the number of free desc
780 **/
i40e_clean_asq(struct i40e_hw * hw)781 u16 i40e_clean_asq(struct i40e_hw *hw)
782 {
783 struct i40e_adminq_ring *asq = &(hw->aq.asq);
784 struct i40e_asq_cmd_details *details;
785 u16 ntc = asq->next_to_clean;
786 struct i40e_aq_desc desc_cb;
787 struct i40e_aq_desc *desc;
788
789 desc = I40E_ADMINQ_DESC(*asq, ntc);
790 details = I40E_ADMINQ_DETAILS(*asq, ntc);
791 while (rd32(hw, hw->aq.asq.head) != ntc) {
792 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
793 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
794
795 if (details->callback) {
796 I40E_ADMINQ_CALLBACK cb_func =
797 (I40E_ADMINQ_CALLBACK)details->callback;
798 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
799 I40E_DMA_TO_DMA);
800 cb_func(hw, &desc_cb);
801 }
802 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
803 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
804 ntc++;
805 if (ntc == asq->count)
806 ntc = 0;
807 desc = I40E_ADMINQ_DESC(*asq, ntc);
808 details = I40E_ADMINQ_DETAILS(*asq, ntc);
809 }
810
811 asq->next_to_clean = ntc;
812
813 return I40E_DESC_UNUSED(asq);
814 }
815
816 /**
817 * i40e_asq_done - check if FW has processed the Admin Send Queue
818 * @hw: pointer to the hw struct
819 *
820 * Returns true if the firmware has processed all descriptors on the
821 * admin send queue. Returns false if there are still requests pending.
822 **/
823 #ifdef VF_DRIVER
i40e_asq_done(struct i40e_hw * hw)824 bool i40e_asq_done(struct i40e_hw *hw)
825 #else
826 STATIC bool i40e_asq_done(struct i40e_hw *hw)
827 #endif
828 {
829 /* AQ designers suggest use of head for better
830 * timing reliability than DD bit
831 */
832 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
833
834 }
835
836 /**
837 * i40e_asq_send_command - send command to Admin Queue
838 * @hw: pointer to the hw struct
839 * @desc: prefilled descriptor describing the command (non DMA mem)
840 * @buff: buffer to use for indirect commands
841 * @buff_size: size of buffer for indirect commands
842 * @cmd_details: pointer to command details structure
843 *
844 * This is the main send command driver routine for the Admin Queue send
845 * queue. It runs the queue, cleans the queue, etc
846 **/
i40e_asq_send_command(struct i40e_hw * hw,struct i40e_aq_desc * desc,void * buff,u16 buff_size,struct i40e_asq_cmd_details * cmd_details)847 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
848 struct i40e_aq_desc *desc,
849 void *buff, /* can be NULL */
850 u16 buff_size,
851 struct i40e_asq_cmd_details *cmd_details)
852 {
853 enum i40e_status_code status = I40E_SUCCESS;
854 struct i40e_dma_mem *dma_buff = NULL;
855 struct i40e_asq_cmd_details *details;
856 struct i40e_aq_desc *desc_on_ring;
857 bool cmd_completed = false;
858 u16 retval = 0;
859 u32 val = 0;
860
861 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
862
863 hw->aq.asq_last_status = I40E_AQ_RC_OK;
864
865 if (hw->aq.asq.count == 0) {
866 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
867 "AQTX: Admin queue not initialized.\n");
868 status = I40E_ERR_QUEUE_EMPTY;
869 goto asq_send_command_error;
870 }
871
872 val = rd32(hw, hw->aq.asq.head);
873 if (val >= hw->aq.num_asq_entries) {
874 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
875 "AQTX: head overrun at %d\n", val);
876 status = I40E_ERR_ADMIN_QUEUE_FULL;
877 goto asq_send_command_error;
878 }
879
880 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
881 if (cmd_details) {
882 i40e_memcpy(details,
883 cmd_details,
884 sizeof(struct i40e_asq_cmd_details),
885 I40E_NONDMA_TO_NONDMA);
886
887 /* If the cmd_details are defined copy the cookie. The
888 * CPU_TO_LE32 is not needed here because the data is ignored
889 * by the FW, only used by the driver
890 */
891 if (details->cookie) {
892 desc->cookie_high =
893 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
894 desc->cookie_low =
895 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
896 }
897 } else {
898 i40e_memset(details, 0,
899 sizeof(struct i40e_asq_cmd_details),
900 I40E_NONDMA_MEM);
901 }
902
903 /* clear requested flags and then set additional flags if defined */
904 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
905 desc->flags |= CPU_TO_LE16(details->flags_ena);
906
907 if (buff_size > hw->aq.asq_buf_size) {
908 i40e_debug(hw,
909 I40E_DEBUG_AQ_MESSAGE,
910 "AQTX: Invalid buffer size: %d.\n",
911 buff_size);
912 status = I40E_ERR_INVALID_SIZE;
913 goto asq_send_command_error;
914 }
915
916 if (details->postpone && !details->async) {
917 i40e_debug(hw,
918 I40E_DEBUG_AQ_MESSAGE,
919 "AQTX: Async flag not set along with postpone flag");
920 status = I40E_ERR_PARAM;
921 goto asq_send_command_error;
922 }
923
924 /* call clean and check queue available function to reclaim the
925 * descriptors that were processed by FW, the function returns the
926 * number of desc available
927 */
928 /* the clean function called here could be called in a separate thread
929 * in case of asynchronous completions
930 */
931 if (i40e_clean_asq(hw) == 0) {
932 i40e_debug(hw,
933 I40E_DEBUG_AQ_MESSAGE,
934 "AQTX: Error queue is full.\n");
935 status = I40E_ERR_ADMIN_QUEUE_FULL;
936 goto asq_send_command_error;
937 }
938
939 /* initialize the temp desc pointer with the right desc */
940 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
941
942 /* if the desc is available copy the temp desc to the right place */
943 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
944 I40E_NONDMA_TO_DMA);
945
946 /* if buff is not NULL assume indirect command */
947 if (buff != NULL) {
948 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
949 /* copy the user buff into the respective DMA buff */
950 i40e_memcpy(dma_buff->va, buff, buff_size,
951 I40E_NONDMA_TO_DMA);
952 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
953
954 /* Update the address values in the desc with the pa value
955 * for respective buffer
956 */
957 desc_on_ring->params.external.addr_high =
958 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
959 desc_on_ring->params.external.addr_low =
960 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
961 }
962
963 /* bump the tail */
964 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
965 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
966 buff, buff_size);
967 (hw->aq.asq.next_to_use)++;
968 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
969 hw->aq.asq.next_to_use = 0;
970 if (!details->postpone)
971 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
972
973 /* if cmd_details are not defined or async flag is not set,
974 * we need to wait for desc write back
975 */
976 if (!details->async && !details->postpone) {
977 u32 total_delay = 0;
978
979 do {
980 /* AQ designers suggest use of head for better
981 * timing reliability than DD bit
982 */
983 if (i40e_asq_done(hw))
984 break;
985 i40e_usec_delay(50);
986 total_delay += 50;
987 } while (total_delay < hw->aq.asq_cmd_timeout);
988 }
989
990 /* if ready, copy the desc back to temp */
991 if (i40e_asq_done(hw)) {
992 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
993 I40E_DMA_TO_NONDMA);
994 if (buff != NULL)
995 i40e_memcpy(buff, dma_buff->va, buff_size,
996 I40E_DMA_TO_NONDMA);
997 retval = LE16_TO_CPU(desc->retval);
998 if (retval != 0) {
999 i40e_debug(hw,
1000 I40E_DEBUG_AQ_MESSAGE,
1001 "AQTX: Command completed with error 0x%X.\n",
1002 retval);
1003
1004 /* strip off FW internal code */
1005 retval &= 0xff;
1006 }
1007 cmd_completed = true;
1008 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
1009 status = I40E_SUCCESS;
1010 else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
1011 status = I40E_ERR_NOT_READY;
1012 else
1013 status = I40E_ERR_ADMIN_QUEUE_ERROR;
1014 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
1015 }
1016
1017 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
1018 "AQTX: desc and buffer writeback:\n");
1019 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
1020
1021 /* save writeback aq if requested */
1022 if (details->wb_desc)
1023 i40e_memcpy(details->wb_desc, desc_on_ring,
1024 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
1025
1026 /* update the error if time out occurred */
1027 if ((!cmd_completed) &&
1028 (!details->async && !details->postpone)) {
1029 #ifdef PF_DRIVER
1030 if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
1031 #else
1032 if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
1033 #endif
1034 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1035 "AQTX: AQ Critical error.\n");
1036 status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
1037 } else {
1038 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1039 "AQTX: Writeback timeout.\n");
1040 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
1041 }
1042 }
1043
1044 asq_send_command_error:
1045 i40e_release_spinlock(&hw->aq.asq_spinlock);
1046 return status;
1047 }
1048
1049 /**
1050 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1051 * @desc: pointer to the temp descriptor (non DMA mem)
1052 * @opcode: the opcode can be used to decide which flags to turn off or on
1053 *
1054 * Fill the desc with default values
1055 **/
1056 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1057 u16 opcode)
1058 {
1059 /* zero out the desc */
1060 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1061 I40E_NONDMA_MEM);
1062 desc->opcode = CPU_TO_LE16(opcode);
1063 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1064 }
1065
1066 /**
1067 * i40e_clean_arq_element
1068 * @hw: pointer to the hw struct
1069 * @e: event info from the receive descriptor, includes any buffers
1070 * @pending: number of events that could be left to process
1071 *
1072 * This function cleans one Admin Receive Queue element and returns
1073 * the contents through e. It can also return how many events are
1074 * left to process through 'pending'
1075 **/
1076 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1077 struct i40e_arq_event_info *e,
1078 u16 *pending)
1079 {
1080 enum i40e_status_code ret_code = I40E_SUCCESS;
1081 u16 ntc = hw->aq.arq.next_to_clean;
1082 struct i40e_aq_desc *desc;
1083 struct i40e_dma_mem *bi;
1084 u16 desc_idx;
1085 u16 datalen;
1086 u16 flags;
1087 u16 ntu;
1088
1089 /* pre-clean the event info */
1090 i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1091
1092 /* take the lock before we start messing with the ring */
1093 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1094
1095 if (hw->aq.arq.count == 0) {
1096 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1097 "AQRX: Admin queue not initialized.\n");
1098 ret_code = I40E_ERR_QUEUE_EMPTY;
1099 goto clean_arq_element_err;
1100 }
1101
1102 /* set next_to_use to head */
1103 #ifdef INTEGRATED_VF
1104 if (!i40e_is_vf(hw))
1105 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1106 else
1107 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1108 #else
1109 #ifdef PF_DRIVER
1110 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1111 #endif /* PF_DRIVER */
1112 #ifdef VF_DRIVER
1113 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1114 #endif /* VF_DRIVER */
1115 #endif /* INTEGRATED_VF */
1116 if (ntu == ntc) {
1117 /* nothing to do - shouldn't need to update ring's values */
1118 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1119 goto clean_arq_element_out;
1120 }
1121
1122 /* now clean the next descriptor */
1123 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1124 desc_idx = ntc;
1125
1126 hw->aq.arq_last_status =
1127 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1128 flags = LE16_TO_CPU(desc->flags);
1129 if (flags & I40E_AQ_FLAG_ERR) {
1130 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1131 i40e_debug(hw,
1132 I40E_DEBUG_AQ_MESSAGE,
1133 "AQRX: Event received with error 0x%X.\n",
1134 hw->aq.arq_last_status);
1135 }
1136
1137 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1138 I40E_DMA_TO_NONDMA);
1139 datalen = LE16_TO_CPU(desc->datalen);
1140 e->msg_len = min(datalen, e->buf_len);
1141 if (e->msg_buf != NULL && (e->msg_len != 0))
1142 i40e_memcpy(e->msg_buf,
1143 hw->aq.arq.r.arq_bi[desc_idx].va,
1144 e->msg_len, I40E_DMA_TO_NONDMA);
1145
1146 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1147 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1148 hw->aq.arq_buf_size);
1149
1150 /* Restore the original datalen and buffer address in the desc,
1151 * FW updates datalen to indicate the event message
1152 * size
1153 */
1154 bi = &hw->aq.arq.r.arq_bi[ntc];
1155 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1156
1157 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1158 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1159 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1160 desc->datalen = CPU_TO_LE16((u16)bi->size);
1161 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1162 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1163
1164 /* set tail = the last cleaned desc index. */
1165 wr32(hw, hw->aq.arq.tail, ntc);
1166 /* ntc is updated to tail + 1 */
1167 ntc++;
1168 if (ntc == hw->aq.num_arq_entries)
1169 ntc = 0;
1170 hw->aq.arq.next_to_clean = ntc;
1171 hw->aq.arq.next_to_use = ntu;
1172
1173 #ifdef PF_DRIVER
1174 i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
1175 #endif /* PF_DRIVER */
1176 clean_arq_element_out:
1177 /* Set pending if needed, unlock and return */
1178 if (pending != NULL)
1179 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1180 clean_arq_element_err:
1181 i40e_release_spinlock(&hw->aq.arq_spinlock);
1182
1183 return ret_code;
1184 }
1185
1186