1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2001-2020 Intel Corporation 3 */ 4 5 #include "i40e_status.h" 6 #include "i40e_type.h" 7 #include "i40e_register.h" 8 #include "i40e_adminq.h" 9 #include "i40e_prototype.h" 10 11 /** 12 * i40e_adminq_init_regs - Initialize AdminQ registers 13 * @hw: pointer to the hardware structure 14 * 15 * This assumes the alloc_asq and alloc_arq functions have already been called 16 **/ 17 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw) 18 { 19 /* set head and tail registers in our local struct */ 20 if (i40e_is_vf(hw)) { 21 hw->aq.asq.tail = I40E_VF_ATQT1; 22 hw->aq.asq.head = I40E_VF_ATQH1; 23 hw->aq.asq.len = I40E_VF_ATQLEN1; 24 hw->aq.asq.bal = I40E_VF_ATQBAL1; 25 hw->aq.asq.bah = I40E_VF_ATQBAH1; 26 hw->aq.arq.tail = I40E_VF_ARQT1; 27 hw->aq.arq.head = I40E_VF_ARQH1; 28 hw->aq.arq.len = I40E_VF_ARQLEN1; 29 hw->aq.arq.bal = I40E_VF_ARQBAL1; 30 hw->aq.arq.bah = I40E_VF_ARQBAH1; 31 #ifdef PF_DRIVER 32 } else { 33 hw->aq.asq.tail = I40E_PF_ATQT; 34 hw->aq.asq.head = I40E_PF_ATQH; 35 hw->aq.asq.len = I40E_PF_ATQLEN; 36 hw->aq.asq.bal = I40E_PF_ATQBAL; 37 hw->aq.asq.bah = I40E_PF_ATQBAH; 38 hw->aq.arq.tail = I40E_PF_ARQT; 39 hw->aq.arq.head = I40E_PF_ARQH; 40 hw->aq.arq.len = I40E_PF_ARQLEN; 41 hw->aq.arq.bal = I40E_PF_ARQBAL; 42 hw->aq.arq.bah = I40E_PF_ARQBAH; 43 #endif 44 } 45 } 46 47 /** 48 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings 49 * @hw: pointer to the hardware structure 50 **/ 51 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) 52 { 53 enum i40e_status_code ret_code; 54 55 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, 56 i40e_mem_atq_ring, 57 (hw->aq.num_asq_entries * 58 sizeof(struct i40e_aq_desc)), 59 I40E_ADMINQ_DESC_ALIGNMENT); 60 if (ret_code) 61 return ret_code; 62 63 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, 64 (hw->aq.num_asq_entries * 65 sizeof(struct i40e_asq_cmd_details))); 66 if (ret_code) { 67 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 68 return ret_code; 69 } 70 71 return ret_code; 72 } 73 74 /** 75 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings 76 * @hw: pointer to the hardware structure 77 **/ 78 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) 79 { 80 enum i40e_status_code ret_code; 81 82 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, 83 i40e_mem_arq_ring, 84 (hw->aq.num_arq_entries * 85 sizeof(struct i40e_aq_desc)), 86 I40E_ADMINQ_DESC_ALIGNMENT); 87 88 return ret_code; 89 } 90 91 /** 92 * i40e_free_adminq_asq - Free Admin Queue send rings 93 * @hw: pointer to the hardware structure 94 * 95 * This assumes the posted send buffers have already been cleaned 96 * and de-allocated 97 **/ 98 void i40e_free_adminq_asq(struct i40e_hw *hw) 99 { 100 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); 101 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 102 } 103 104 /** 105 * i40e_free_adminq_arq - Free Admin Queue receive rings 106 * @hw: pointer to the hardware structure 107 * 108 * This assumes the posted receive buffers have already been cleaned 109 * and de-allocated 110 **/ 111 void i40e_free_adminq_arq(struct i40e_hw *hw) 112 { 113 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 114 } 115 116 /** 117 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue 118 * @hw: pointer to the hardware structure 119 **/ 120 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw) 121 { 122 enum i40e_status_code ret_code; 123 struct i40e_aq_desc *desc; 124 struct i40e_dma_mem *bi; 125 int i; 126 127 /* We'll be allocating the buffer info memory first, then we can 128 * allocate the mapped buffers for the event processing 129 */ 130 131 /* buffer_info structures do not need alignment */ 132 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, 133 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); 134 if (ret_code) 135 goto alloc_arq_bufs; 136 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; 137 138 /* allocate the mapped buffers */ 139 for (i = 0; i < hw->aq.num_arq_entries; i++) { 140 bi = &hw->aq.arq.r.arq_bi[i]; 141 ret_code = i40e_allocate_dma_mem(hw, bi, 142 i40e_mem_arq_buf, 143 hw->aq.arq_buf_size, 144 I40E_ADMINQ_DESC_ALIGNMENT); 145 if (ret_code) 146 goto unwind_alloc_arq_bufs; 147 148 /* now configure the descriptors for use */ 149 desc = I40E_ADMINQ_DESC(hw->aq.arq, i); 150 151 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); 152 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 153 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); 154 desc->opcode = 0; 155 /* This is in accordance with Admin queue design, there is no 156 * register for buffer size configuration 157 */ 158 desc->datalen = CPU_TO_LE16((u16)bi->size); 159 desc->retval = 0; 160 desc->cookie_high = 0; 161 desc->cookie_low = 0; 162 desc->params.external.addr_high = 163 CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); 164 desc->params.external.addr_low = 165 CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); 166 desc->params.external.param0 = 0; 167 desc->params.external.param1 = 0; 168 } 169 170 alloc_arq_bufs: 171 return ret_code; 172 173 unwind_alloc_arq_bufs: 174 /* don't try to free the one that failed... */ 175 i--; 176 for (; i >= 0; i--) 177 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 178 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 179 180 return ret_code; 181 } 182 183 /** 184 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue 185 * @hw: pointer to the hardware structure 186 **/ 187 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw) 188 { 189 enum i40e_status_code ret_code; 190 struct i40e_dma_mem *bi; 191 int i; 192 193 /* No mapped memory needed yet, just the buffer info structures */ 194 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, 195 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); 196 if (ret_code) 197 goto alloc_asq_bufs; 198 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; 199 200 /* allocate the mapped buffers */ 201 for (i = 0; i < hw->aq.num_asq_entries; i++) { 202 bi = &hw->aq.asq.r.asq_bi[i]; 203 ret_code = i40e_allocate_dma_mem(hw, bi, 204 i40e_mem_asq_buf, 205 hw->aq.asq_buf_size, 206 I40E_ADMINQ_DESC_ALIGNMENT); 207 if (ret_code) 208 goto unwind_alloc_asq_bufs; 209 } 210 alloc_asq_bufs: 211 return ret_code; 212 213 unwind_alloc_asq_bufs: 214 /* don't try to free the one that failed... */ 215 i--; 216 for (; i >= 0; i--) 217 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 218 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 219 220 return ret_code; 221 } 222 223 /** 224 * i40e_free_arq_bufs - Free receive queue buffer info elements 225 * @hw: pointer to the hardware structure 226 **/ 227 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw) 228 { 229 int i; 230 231 /* free descriptors */ 232 for (i = 0; i < hw->aq.num_arq_entries; i++) 233 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 234 235 /* free the descriptor memory */ 236 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 237 238 /* free the dma header */ 239 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 240 } 241 242 /** 243 * i40e_free_asq_bufs - Free send queue buffer info elements 244 * @hw: pointer to the hardware structure 245 **/ 246 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw) 247 { 248 int i; 249 250 /* only unmap if the address is non-NULL */ 251 for (i = 0; i < hw->aq.num_asq_entries; i++) 252 if (hw->aq.asq.r.asq_bi[i].pa) 253 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 254 255 /* free the buffer info list */ 256 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); 257 258 /* free the descriptor memory */ 259 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 260 261 /* free the dma header */ 262 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 263 } 264 265 /** 266 * i40e_config_asq_regs - configure ASQ registers 267 * @hw: pointer to the hardware structure 268 * 269 * Configure base address and length registers for the transmit queue 270 **/ 271 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw) 272 { 273 enum i40e_status_code ret_code = I40E_SUCCESS; 274 u32 reg = 0; 275 276 /* Clear Head and Tail */ 277 wr32(hw, hw->aq.asq.head, 0); 278 wr32(hw, hw->aq.asq.tail, 0); 279 280 /* set starting point */ 281 #ifdef PF_DRIVER 282 #ifdef INTEGRATED_VF 283 if (!i40e_is_vf(hw)) 284 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 285 I40E_PF_ATQLEN_ATQENABLE_MASK)); 286 #else 287 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 288 I40E_PF_ATQLEN_ATQENABLE_MASK)); 289 #endif /* INTEGRATED_VF */ 290 #endif /* PF_DRIVER */ 291 #ifdef VF_DRIVER 292 #ifdef INTEGRATED_VF 293 if (i40e_is_vf(hw)) 294 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 295 I40E_VF_ATQLEN1_ATQENABLE_MASK)); 296 #else 297 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 298 I40E_VF_ATQLEN1_ATQENABLE_MASK)); 299 #endif /* INTEGRATED_VF */ 300 #endif /* VF_DRIVER */ 301 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)); 302 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa)); 303 304 /* Check one register to verify that config was applied */ 305 reg = rd32(hw, hw->aq.asq.bal); 306 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)) 307 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 308 309 return ret_code; 310 } 311 312 /** 313 * i40e_config_arq_regs - ARQ register configuration 314 * @hw: pointer to the hardware structure 315 * 316 * Configure base address and length registers for the receive (event queue) 317 **/ 318 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw) 319 { 320 enum i40e_status_code ret_code = I40E_SUCCESS; 321 u32 reg = 0; 322 323 /* Clear Head and Tail */ 324 wr32(hw, hw->aq.arq.head, 0); 325 wr32(hw, hw->aq.arq.tail, 0); 326 327 /* set starting point */ 328 #ifdef PF_DRIVER 329 #ifdef INTEGRATED_VF 330 if (!i40e_is_vf(hw)) 331 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 332 I40E_PF_ARQLEN_ARQENABLE_MASK)); 333 #else 334 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 335 I40E_PF_ARQLEN_ARQENABLE_MASK)); 336 #endif /* INTEGRATED_VF */ 337 #endif /* PF_DRIVER */ 338 #ifdef VF_DRIVER 339 #ifdef INTEGRATED_VF 340 if (i40e_is_vf(hw)) 341 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 342 I40E_VF_ARQLEN1_ARQENABLE_MASK)); 343 #else 344 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 345 I40E_VF_ARQLEN1_ARQENABLE_MASK)); 346 #endif /* INTEGRATED_VF */ 347 #endif /* VF_DRIVER */ 348 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)); 349 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa)); 350 351 /* Update tail in the HW to post pre-allocated buffers */ 352 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 353 354 /* Check one register to verify that config was applied */ 355 reg = rd32(hw, hw->aq.arq.bal); 356 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)) 357 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 358 359 return ret_code; 360 } 361 362 /** 363 * i40e_init_asq - main initialization routine for ASQ 364 * @hw: pointer to the hardware structure 365 * 366 * This is the main initialization routine for the Admin Send Queue 367 * Prior to calling this function, drivers *MUST* set the following fields 368 * in the hw->aq structure: 369 * - hw->aq.num_asq_entries 370 * - hw->aq.arq_buf_size 371 * 372 * Do *NOT* hold the lock when calling this as the memory allocation routines 373 * called are not going to be atomic context safe 374 **/ 375 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw) 376 { 377 enum i40e_status_code ret_code = I40E_SUCCESS; 378 379 if (hw->aq.asq.count > 0) { 380 /* queue already initialized */ 381 ret_code = I40E_ERR_NOT_READY; 382 goto init_adminq_exit; 383 } 384 385 /* verify input for valid configuration */ 386 if ((hw->aq.num_asq_entries == 0) || 387 (hw->aq.asq_buf_size == 0)) { 388 ret_code = I40E_ERR_CONFIG; 389 goto init_adminq_exit; 390 } 391 392 hw->aq.asq.next_to_use = 0; 393 hw->aq.asq.next_to_clean = 0; 394 395 /* allocate the ring memory */ 396 ret_code = i40e_alloc_adminq_asq_ring(hw); 397 if (ret_code != I40E_SUCCESS) 398 goto init_adminq_exit; 399 400 /* allocate buffers in the rings */ 401 ret_code = i40e_alloc_asq_bufs(hw); 402 if (ret_code != I40E_SUCCESS) 403 goto init_adminq_free_rings; 404 405 /* initialize base registers */ 406 ret_code = i40e_config_asq_regs(hw); 407 if (ret_code != I40E_SUCCESS) 408 goto init_config_regs; 409 410 /* success! */ 411 hw->aq.asq.count = hw->aq.num_asq_entries; 412 goto init_adminq_exit; 413 414 init_adminq_free_rings: 415 i40e_free_adminq_asq(hw); 416 return ret_code; 417 418 init_config_regs: 419 i40e_free_asq_bufs(hw); 420 421 init_adminq_exit: 422 return ret_code; 423 } 424 425 /** 426 * i40e_init_arq - initialize ARQ 427 * @hw: pointer to the hardware structure 428 * 429 * The main initialization routine for the Admin Receive (Event) Queue. 430 * Prior to calling this function, drivers *MUST* set the following fields 431 * in the hw->aq structure: 432 * - hw->aq.num_asq_entries 433 * - hw->aq.arq_buf_size 434 * 435 * Do *NOT* hold the lock when calling this as the memory allocation routines 436 * called are not going to be atomic context safe 437 **/ 438 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw) 439 { 440 enum i40e_status_code ret_code = I40E_SUCCESS; 441 442 if (hw->aq.arq.count > 0) { 443 /* queue already initialized */ 444 ret_code = I40E_ERR_NOT_READY; 445 goto init_adminq_exit; 446 } 447 448 /* verify input for valid configuration */ 449 if ((hw->aq.num_arq_entries == 0) || 450 (hw->aq.arq_buf_size == 0)) { 451 ret_code = I40E_ERR_CONFIG; 452 goto init_adminq_exit; 453 } 454 455 hw->aq.arq.next_to_use = 0; 456 hw->aq.arq.next_to_clean = 0; 457 458 /* allocate the ring memory */ 459 ret_code = i40e_alloc_adminq_arq_ring(hw); 460 if (ret_code != I40E_SUCCESS) 461 goto init_adminq_exit; 462 463 /* allocate buffers in the rings */ 464 ret_code = i40e_alloc_arq_bufs(hw); 465 if (ret_code != I40E_SUCCESS) 466 goto init_adminq_free_rings; 467 468 /* initialize base registers */ 469 ret_code = i40e_config_arq_regs(hw); 470 if (ret_code != I40E_SUCCESS) 471 goto init_config_regs; 472 473 /* success! */ 474 hw->aq.arq.count = hw->aq.num_arq_entries; 475 goto init_adminq_exit; 476 477 init_adminq_free_rings: 478 i40e_free_adminq_arq(hw); 479 return ret_code; 480 481 init_config_regs: 482 i40e_free_arq_bufs(hw); 483 484 init_adminq_exit: 485 return ret_code; 486 } 487 488 /** 489 * i40e_shutdown_asq - shutdown the ASQ 490 * @hw: pointer to the hardware structure 491 * 492 * The main shutdown routine for the Admin Send Queue 493 **/ 494 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw) 495 { 496 enum i40e_status_code ret_code = I40E_SUCCESS; 497 498 i40e_acquire_spinlock(&hw->aq.asq_spinlock); 499 500 if (hw->aq.asq.count == 0) { 501 ret_code = I40E_ERR_NOT_READY; 502 goto shutdown_asq_out; 503 } 504 505 /* Stop firmware AdminQ processing */ 506 wr32(hw, hw->aq.asq.head, 0); 507 wr32(hw, hw->aq.asq.tail, 0); 508 wr32(hw, hw->aq.asq.len, 0); 509 wr32(hw, hw->aq.asq.bal, 0); 510 wr32(hw, hw->aq.asq.bah, 0); 511 512 hw->aq.asq.count = 0; /* to indicate uninitialized queue */ 513 514 /* free ring buffers */ 515 i40e_free_asq_bufs(hw); 516 517 shutdown_asq_out: 518 i40e_release_spinlock(&hw->aq.asq_spinlock); 519 return ret_code; 520 } 521 522 /** 523 * i40e_shutdown_arq - shutdown ARQ 524 * @hw: pointer to the hardware structure 525 * 526 * The main shutdown routine for the Admin Receive Queue 527 **/ 528 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw) 529 { 530 enum i40e_status_code ret_code = I40E_SUCCESS; 531 532 i40e_acquire_spinlock(&hw->aq.arq_spinlock); 533 534 if (hw->aq.arq.count == 0) { 535 ret_code = I40E_ERR_NOT_READY; 536 goto shutdown_arq_out; 537 } 538 539 /* Stop firmware AdminQ processing */ 540 wr32(hw, hw->aq.arq.head, 0); 541 wr32(hw, hw->aq.arq.tail, 0); 542 wr32(hw, hw->aq.arq.len, 0); 543 wr32(hw, hw->aq.arq.bal, 0); 544 wr32(hw, hw->aq.arq.bah, 0); 545 546 hw->aq.arq.count = 0; /* to indicate uninitialized queue */ 547 548 /* free ring buffers */ 549 i40e_free_arq_bufs(hw); 550 551 shutdown_arq_out: 552 i40e_release_spinlock(&hw->aq.arq_spinlock); 553 return ret_code; 554 } 555 #ifdef PF_DRIVER 556 557 /** 558 * i40e_resume_aq - resume AQ processing from 0 559 * @hw: pointer to the hardware structure 560 **/ 561 STATIC void i40e_resume_aq(struct i40e_hw *hw) 562 { 563 /* Registers are reset after PF reset */ 564 hw->aq.asq.next_to_use = 0; 565 hw->aq.asq.next_to_clean = 0; 566 567 i40e_config_asq_regs(hw); 568 569 hw->aq.arq.next_to_use = 0; 570 hw->aq.arq.next_to_clean = 0; 571 572 i40e_config_arq_regs(hw); 573 } 574 #endif /* PF_DRIVER */ 575 576 /** 577 * i40e_set_hw_flags - set HW flags 578 * @hw: pointer to the hardware structure 579 **/ 580 STATIC void i40e_set_hw_flags(struct i40e_hw *hw) 581 { 582 struct i40e_adminq_info *aq = &hw->aq; 583 584 hw->flags = 0; 585 586 switch (hw->mac.type) { 587 case I40E_MAC_XL710: 588 if (aq->api_maj_ver > 1 || 589 (aq->api_maj_ver == 1 && 590 aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) { 591 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; 592 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; 593 /* The ability to RX (not drop) 802.1ad frames */ 594 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; 595 } 596 break; 597 case I40E_MAC_X722: 598 hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | 599 I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; 600 601 if (aq->api_maj_ver > 1 || 602 (aq->api_maj_ver == 1 && 603 aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) 604 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; 605 606 if (aq->api_maj_ver > 1 || 607 (aq->api_maj_ver == 1 && 608 aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722)) 609 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; 610 611 if (aq->api_maj_ver > 1 || 612 (aq->api_maj_ver == 1 && 613 aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722)) 614 hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE; 615 616 /* fall through */ 617 default: 618 break; 619 } 620 621 /* Newer versions of firmware require lock when reading the NVM */ 622 if (aq->api_maj_ver > 1 || 623 (aq->api_maj_ver == 1 && 624 aq->api_min_ver >= 5)) 625 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; 626 627 if (aq->api_maj_ver > 1 || 628 (aq->api_maj_ver == 1 && 629 aq->api_min_ver >= 8)) { 630 hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; 631 hw->flags |= I40E_HW_FLAG_DROP_MODE; 632 } 633 634 if (aq->api_maj_ver > 1 || 635 (aq->api_maj_ver == 1 && 636 aq->api_min_ver >= 9)) 637 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED; 638 } 639 640 /** 641 * i40e_init_adminq - main initialization routine for Admin Queue 642 * @hw: pointer to the hardware structure 643 * 644 * Prior to calling this function, drivers *MUST* set the following fields 645 * in the hw->aq structure: 646 * - hw->aq.num_asq_entries 647 * - hw->aq.num_arq_entries 648 * - hw->aq.arq_buf_size 649 * - hw->aq.asq_buf_size 650 **/ 651 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) 652 { 653 struct i40e_adminq_info *aq = &hw->aq; 654 enum i40e_status_code ret_code; 655 u16 oem_hi = 0, oem_lo = 0; 656 u16 eetrack_hi = 0; 657 u16 eetrack_lo = 0; 658 u16 cfg_ptr = 0; 659 int retry = 0; 660 661 /* verify input for valid configuration */ 662 if (aq->num_arq_entries == 0 || 663 aq->num_asq_entries == 0 || 664 aq->arq_buf_size == 0 || 665 aq->asq_buf_size == 0) { 666 ret_code = I40E_ERR_CONFIG; 667 goto init_adminq_exit; 668 } 669 i40e_init_spinlock(&aq->asq_spinlock); 670 i40e_init_spinlock(&aq->arq_spinlock); 671 672 /* Set up register offsets */ 673 i40e_adminq_init_regs(hw); 674 675 /* setup ASQ command write back timeout */ 676 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; 677 678 /* allocate the ASQ */ 679 ret_code = i40e_init_asq(hw); 680 if (ret_code != I40E_SUCCESS) 681 goto init_adminq_destroy_spinlocks; 682 683 /* allocate the ARQ */ 684 ret_code = i40e_init_arq(hw); 685 if (ret_code != I40E_SUCCESS) 686 goto init_adminq_free_asq; 687 688 /* VF has no need of firmware */ 689 if (i40e_is_vf(hw)) 690 goto init_adminq_exit; 691 692 /* There are some cases where the firmware may not be quite ready 693 * for AdminQ operations, so we retry the AdminQ setup a few times 694 * if we see timeouts in this first AQ call. 695 */ 696 do { 697 ret_code = i40e_aq_get_firmware_version(hw, 698 &aq->fw_maj_ver, 699 &aq->fw_min_ver, 700 &aq->fw_build, 701 &aq->api_maj_ver, 702 &aq->api_min_ver, 703 NULL); 704 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) 705 break; 706 retry++; 707 i40e_msec_delay(100); 708 i40e_resume_aq(hw); 709 } while (retry < 10); 710 if (ret_code != I40E_SUCCESS) 711 goto init_adminq_free_arq; 712 713 /* 714 * Some features were introduced in different FW API version 715 * for different MAC type. 716 */ 717 i40e_set_hw_flags(hw); 718 719 /* get the NVM version info */ 720 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, 721 &hw->nvm.version); 722 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); 723 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 724 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; 725 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); 726 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), 727 &oem_hi); 728 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), 729 &oem_lo); 730 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; 731 732 if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) { 733 ret_code = I40E_ERR_FIRMWARE_API_VERSION; 734 goto init_adminq_free_arq; 735 } 736 737 /* pre-emptive resource lock release */ 738 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 739 hw->nvm_release_on_done = false; 740 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 741 742 ret_code = I40E_SUCCESS; 743 744 /* success! */ 745 goto init_adminq_exit; 746 747 init_adminq_free_arq: 748 i40e_shutdown_arq(hw); 749 init_adminq_free_asq: 750 i40e_shutdown_asq(hw); 751 init_adminq_destroy_spinlocks: 752 i40e_destroy_spinlock(&aq->asq_spinlock); 753 i40e_destroy_spinlock(&aq->arq_spinlock); 754 755 init_adminq_exit: 756 return ret_code; 757 } 758 759 /** 760 * i40e_shutdown_adminq - shutdown routine for the Admin Queue 761 * @hw: pointer to the hardware structure 762 **/ 763 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) 764 { 765 enum i40e_status_code ret_code = I40E_SUCCESS; 766 767 if (i40e_check_asq_alive(hw)) 768 i40e_aq_queue_shutdown(hw, true); 769 770 i40e_shutdown_asq(hw); 771 i40e_shutdown_arq(hw); 772 i40e_destroy_spinlock(&hw->aq.asq_spinlock); 773 i40e_destroy_spinlock(&hw->aq.arq_spinlock); 774 775 if (hw->nvm_buff.va) 776 i40e_free_virt_mem(hw, &hw->nvm_buff); 777 778 return ret_code; 779 } 780 781 /** 782 * i40e_clean_asq - cleans Admin send queue 783 * @hw: pointer to the hardware structure 784 * 785 * returns the number of free desc 786 **/ 787 u16 i40e_clean_asq(struct i40e_hw *hw) 788 { 789 struct i40e_adminq_ring *asq = &(hw->aq.asq); 790 struct i40e_asq_cmd_details *details; 791 u16 ntc = asq->next_to_clean; 792 struct i40e_aq_desc desc_cb; 793 struct i40e_aq_desc *desc; 794 795 desc = I40E_ADMINQ_DESC(*asq, ntc); 796 details = I40E_ADMINQ_DETAILS(*asq, ntc); 797 while (rd32(hw, hw->aq.asq.head) != ntc) { 798 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, 799 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); 800 801 if (details->callback) { 802 I40E_ADMINQ_CALLBACK cb_func = 803 (I40E_ADMINQ_CALLBACK)details->callback; 804 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc), 805 I40E_DMA_TO_DMA); 806 cb_func(hw, &desc_cb); 807 } 808 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM); 809 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM); 810 ntc++; 811 if (ntc == asq->count) 812 ntc = 0; 813 desc = I40E_ADMINQ_DESC(*asq, ntc); 814 details = I40E_ADMINQ_DETAILS(*asq, ntc); 815 } 816 817 asq->next_to_clean = ntc; 818 819 return I40E_DESC_UNUSED(asq); 820 } 821 822 /** 823 * i40e_asq_done - check if FW has processed the Admin Send Queue 824 * @hw: pointer to the hw struct 825 * 826 * Returns true if the firmware has processed all descriptors on the 827 * admin send queue. Returns false if there are still requests pending. 828 **/ 829 #ifdef VF_DRIVER 830 bool i40e_asq_done(struct i40e_hw *hw) 831 #else 832 STATIC bool i40e_asq_done(struct i40e_hw *hw) 833 #endif 834 { 835 /* AQ designers suggest use of head for better 836 * timing reliability than DD bit 837 */ 838 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; 839 840 } 841 842 /** 843 * i40e_asq_send_command_exec - send command to Admin Queue 844 * @hw: pointer to the hw struct 845 * @desc: prefilled descriptor describing the command (non DMA mem) 846 * @buff: buffer to use for indirect commands 847 * @buff_size: size of buffer for indirect commands 848 * @cmd_details: pointer to command details structure 849 * 850 * This is the main send command driver routine for the Admin Queue send 851 * queue. It runs the queue, cleans the queue, etc 852 **/ 853 STATIC enum i40e_status_code 854 i40e_asq_send_command_exec(struct i40e_hw *hw, 855 struct i40e_aq_desc *desc, 856 void *buff, /* can be NULL */ 857 u16 buff_size, 858 struct i40e_asq_cmd_details *cmd_details) 859 { 860 enum i40e_status_code status = I40E_SUCCESS; 861 struct i40e_dma_mem *dma_buff = NULL; 862 struct i40e_asq_cmd_details *details; 863 struct i40e_aq_desc *desc_on_ring; 864 bool cmd_completed = false; 865 u16 retval = 0; 866 u32 val = 0; 867 868 hw->aq.asq_last_status = I40E_AQ_RC_OK; 869 870 if (hw->aq.asq.count == 0) { 871 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 872 "AQTX: Admin queue not initialized.\n"); 873 status = I40E_ERR_QUEUE_EMPTY; 874 goto asq_send_command_error; 875 } 876 877 val = rd32(hw, hw->aq.asq.head); 878 if (val >= hw->aq.num_asq_entries) { 879 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 880 "AQTX: head overrun at %d\n", val); 881 status = I40E_ERR_ADMIN_QUEUE_FULL; 882 goto asq_send_command_error; 883 } 884 885 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 886 if (cmd_details) { 887 i40e_memcpy(details, 888 cmd_details, 889 sizeof(struct i40e_asq_cmd_details), 890 I40E_NONDMA_TO_NONDMA); 891 892 /* If the cmd_details are defined copy the cookie. The 893 * CPU_TO_LE32 is not needed here because the data is ignored 894 * by the FW, only used by the driver 895 */ 896 if (details->cookie) { 897 desc->cookie_high = 898 CPU_TO_LE32(I40E_HI_DWORD(details->cookie)); 899 desc->cookie_low = 900 CPU_TO_LE32(I40E_LO_DWORD(details->cookie)); 901 } 902 } else { 903 i40e_memset(details, 0, 904 sizeof(struct i40e_asq_cmd_details), 905 I40E_NONDMA_MEM); 906 } 907 908 /* clear requested flags and then set additional flags if defined */ 909 desc->flags &= ~CPU_TO_LE16(details->flags_dis); 910 desc->flags |= CPU_TO_LE16(details->flags_ena); 911 912 if (buff_size > hw->aq.asq_buf_size) { 913 i40e_debug(hw, 914 I40E_DEBUG_AQ_MESSAGE, 915 "AQTX: Invalid buffer size: %d.\n", 916 buff_size); 917 status = I40E_ERR_INVALID_SIZE; 918 goto asq_send_command_error; 919 } 920 921 if (details->postpone && !details->async) { 922 i40e_debug(hw, 923 I40E_DEBUG_AQ_MESSAGE, 924 "AQTX: Async flag not set along with postpone flag"); 925 status = I40E_ERR_PARAM; 926 goto asq_send_command_error; 927 } 928 929 /* call clean and check queue available function to reclaim the 930 * descriptors that were processed by FW, the function returns the 931 * number of desc available 932 */ 933 /* the clean function called here could be called in a separate thread 934 * in case of asynchronous completions 935 */ 936 if (i40e_clean_asq(hw) == 0) { 937 i40e_debug(hw, 938 I40E_DEBUG_AQ_MESSAGE, 939 "AQTX: Error queue is full.\n"); 940 status = I40E_ERR_ADMIN_QUEUE_FULL; 941 goto asq_send_command_error; 942 } 943 944 /* initialize the temp desc pointer with the right desc */ 945 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 946 947 /* if the desc is available copy the temp desc to the right place */ 948 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc), 949 I40E_NONDMA_TO_DMA); 950 951 /* if buff is not NULL assume indirect command */ 952 if (buff != NULL) { 953 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); 954 /* copy the user buff into the respective DMA buff */ 955 i40e_memcpy(dma_buff->va, buff, buff_size, 956 I40E_NONDMA_TO_DMA); 957 desc_on_ring->datalen = CPU_TO_LE16(buff_size); 958 959 /* Update the address values in the desc with the pa value 960 * for respective buffer 961 */ 962 desc_on_ring->params.external.addr_high = 963 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa)); 964 desc_on_ring->params.external.addr_low = 965 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa)); 966 } 967 968 /* bump the tail */ 969 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n"); 970 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, 971 buff, buff_size); 972 (hw->aq.asq.next_to_use)++; 973 if (hw->aq.asq.next_to_use == hw->aq.asq.count) 974 hw->aq.asq.next_to_use = 0; 975 if (!details->postpone) 976 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); 977 978 /* if cmd_details are not defined or async flag is not set, 979 * we need to wait for desc write back 980 */ 981 if (!details->async && !details->postpone) { 982 u32 total_delay = 0; 983 984 do { 985 /* AQ designers suggest use of head for better 986 * timing reliability than DD bit 987 */ 988 if (i40e_asq_done(hw)) 989 break; 990 i40e_usec_delay(50); 991 total_delay += 50; 992 } while (total_delay < hw->aq.asq_cmd_timeout); 993 } 994 995 /* if ready, copy the desc back to temp */ 996 if (i40e_asq_done(hw)) { 997 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc), 998 I40E_DMA_TO_NONDMA); 999 if (buff != NULL) 1000 i40e_memcpy(buff, dma_buff->va, buff_size, 1001 I40E_DMA_TO_NONDMA); 1002 retval = LE16_TO_CPU(desc->retval); 1003 if (retval != 0) { 1004 i40e_debug(hw, 1005 I40E_DEBUG_AQ_MESSAGE, 1006 "AQTX: Command completed with error 0x%X.\n", 1007 retval); 1008 1009 /* strip off FW internal code */ 1010 retval &= 0xff; 1011 } 1012 cmd_completed = true; 1013 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) 1014 status = I40E_SUCCESS; 1015 else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) 1016 status = I40E_ERR_NOT_READY; 1017 else 1018 status = I40E_ERR_ADMIN_QUEUE_ERROR; 1019 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 1020 } 1021 1022 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, 1023 "AQTX: desc and buffer writeback:\n"); 1024 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); 1025 1026 /* save writeback aq if requested */ 1027 if (details->wb_desc) 1028 i40e_memcpy(details->wb_desc, desc_on_ring, 1029 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); 1030 1031 /* update the error if time out occurred */ 1032 if ((!cmd_completed) && 1033 (!details->async && !details->postpone)) { 1034 #ifdef PF_DRIVER 1035 if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { 1036 #else 1037 if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) { 1038 #endif 1039 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 1040 "AQTX: AQ Critical error.\n"); 1041 status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; 1042 } else { 1043 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 1044 "AQTX: Writeback timeout.\n"); 1045 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; 1046 } 1047 } 1048 1049 asq_send_command_error: 1050 return status; 1051 } 1052 1053 /** 1054 * i40e_asq_send_command - send command to Admin Queue 1055 * @hw: pointer to the hw struct 1056 * @desc: prefilled descriptor describing the command (non DMA mem) 1057 * @buff: buffer to use for indirect commands 1058 * @buff_size: size of buffer for indirect commands 1059 * @cmd_details: pointer to command details structure 1060 * 1061 * Acquires the lock and calls the main send command execution 1062 * routine. 1063 **/ 1064 enum i40e_status_code 1065 i40e_asq_send_command(struct i40e_hw *hw, 1066 struct i40e_aq_desc *desc, 1067 void *buff, /* can be NULL */ 1068 u16 buff_size, 1069 struct i40e_asq_cmd_details *cmd_details) 1070 { 1071 enum i40e_status_code status = I40E_SUCCESS; 1072 1073 i40e_acquire_spinlock(&hw->aq.asq_spinlock); 1074 status = i40e_asq_send_command_exec(hw, desc, buff, buff_size, 1075 cmd_details); 1076 i40e_release_spinlock(&hw->aq.asq_spinlock); 1077 return status; 1078 } 1079 1080 /** 1081 * i40e_asq_send_command_v2 - send command to Admin Queue 1082 * @hw: pointer to the hw struct 1083 * @desc: prefilled descriptor describing the command (non DMA mem) 1084 * @buff: buffer to use for indirect commands 1085 * @buff_size: size of buffer for indirect commands 1086 * @cmd_details: pointer to command details structure 1087 * @aq_status: pointer to Admin Queue status return value 1088 * 1089 * Acquires the lock and calls the main send command execution 1090 * routine. Returns the last Admin Queue status in aq_status 1091 * to avoid race conditions in access to hw->aq.asq_last_status. 1092 **/ 1093 enum i40e_status_code 1094 i40e_asq_send_command_v2(struct i40e_hw *hw, 1095 struct i40e_aq_desc *desc, 1096 void *buff, /* can be NULL */ 1097 u16 buff_size, 1098 struct i40e_asq_cmd_details *cmd_details, 1099 enum i40e_admin_queue_err *aq_status) 1100 { 1101 enum i40e_status_code status = I40E_SUCCESS; 1102 1103 i40e_acquire_spinlock(&hw->aq.asq_spinlock); 1104 status = i40e_asq_send_command_exec(hw, desc, buff, buff_size, 1105 cmd_details); 1106 if (aq_status) 1107 *aq_status = hw->aq.asq_last_status; 1108 i40e_release_spinlock(&hw->aq.asq_spinlock); 1109 return status; 1110 } 1111 1112 /** 1113 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function 1114 * @desc: pointer to the temp descriptor (non DMA mem) 1115 * @opcode: the opcode can be used to decide which flags to turn off or on 1116 * 1117 * Fill the desc with default values 1118 **/ 1119 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 1120 u16 opcode) 1121 { 1122 /* zero out the desc */ 1123 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), 1124 I40E_NONDMA_MEM); 1125 desc->opcode = CPU_TO_LE16(opcode); 1126 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI); 1127 } 1128 1129 /** 1130 * i40e_clean_arq_element 1131 * @hw: pointer to the hw struct 1132 * @e: event info from the receive descriptor, includes any buffers 1133 * @pending: number of events that could be left to process 1134 * 1135 * This function cleans one Admin Receive Queue element and returns 1136 * the contents through e. It can also return how many events are 1137 * left to process through 'pending' 1138 **/ 1139 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, 1140 struct i40e_arq_event_info *e, 1141 u16 *pending) 1142 { 1143 enum i40e_status_code ret_code = I40E_SUCCESS; 1144 u16 ntc = hw->aq.arq.next_to_clean; 1145 struct i40e_aq_desc *desc; 1146 struct i40e_dma_mem *bi; 1147 u16 desc_idx; 1148 u16 datalen; 1149 u16 flags; 1150 u16 ntu; 1151 1152 /* pre-clean the event info */ 1153 i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM); 1154 1155 /* take the lock before we start messing with the ring */ 1156 i40e_acquire_spinlock(&hw->aq.arq_spinlock); 1157 1158 if (hw->aq.arq.count == 0) { 1159 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 1160 "AQRX: Admin queue not initialized.\n"); 1161 ret_code = I40E_ERR_QUEUE_EMPTY; 1162 goto clean_arq_element_err; 1163 } 1164 1165 /* set next_to_use to head */ 1166 #ifdef INTEGRATED_VF 1167 if (!i40e_is_vf(hw)) 1168 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; 1169 else 1170 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; 1171 #else 1172 #ifdef PF_DRIVER 1173 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; 1174 #endif /* PF_DRIVER */ 1175 #ifdef VF_DRIVER 1176 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; 1177 #endif /* VF_DRIVER */ 1178 #endif /* INTEGRATED_VF */ 1179 if (ntu == ntc) { 1180 /* nothing to do - shouldn't need to update ring's values */ 1181 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; 1182 goto clean_arq_element_out; 1183 } 1184 1185 /* now clean the next descriptor */ 1186 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); 1187 desc_idx = ntc; 1188 1189 hw->aq.arq_last_status = 1190 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval); 1191 flags = LE16_TO_CPU(desc->flags); 1192 if (flags & I40E_AQ_FLAG_ERR) { 1193 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 1194 i40e_debug(hw, 1195 I40E_DEBUG_AQ_MESSAGE, 1196 "AQRX: Event received with error 0x%X.\n", 1197 hw->aq.arq_last_status); 1198 } 1199 1200 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc), 1201 I40E_DMA_TO_NONDMA); 1202 datalen = LE16_TO_CPU(desc->datalen); 1203 e->msg_len = min(datalen, e->buf_len); 1204 if (e->msg_buf != NULL && (e->msg_len != 0)) 1205 i40e_memcpy(e->msg_buf, 1206 hw->aq.arq.r.arq_bi[desc_idx].va, 1207 e->msg_len, I40E_DMA_TO_NONDMA); 1208 1209 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n"); 1210 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, 1211 hw->aq.arq_buf_size); 1212 1213 /* Restore the original datalen and buffer address in the desc, 1214 * FW updates datalen to indicate the event message 1215 * size 1216 */ 1217 bi = &hw->aq.arq.r.arq_bi[ntc]; 1218 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM); 1219 1220 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); 1221 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 1222 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); 1223 desc->datalen = CPU_TO_LE16((u16)bi->size); 1224 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); 1225 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); 1226 1227 /* set tail = the last cleaned desc index. */ 1228 wr32(hw, hw->aq.arq.tail, ntc); 1229 /* ntc is updated to tail + 1 */ 1230 ntc++; 1231 if (ntc == hw->aq.num_arq_entries) 1232 ntc = 0; 1233 hw->aq.arq.next_to_clean = ntc; 1234 hw->aq.arq.next_to_use = ntu; 1235 1236 #ifdef PF_DRIVER 1237 i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc); 1238 #endif /* PF_DRIVER */ 1239 clean_arq_element_out: 1240 /* Set pending if needed, unlock and return */ 1241 if (pending != NULL) 1242 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 1243 clean_arq_element_err: 1244 i40e_release_spinlock(&hw->aq.arq_spinlock); 1245 1246 return ret_code; 1247 } 1248 1249