1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
3 */
4
5 #include <rte_ethdev_pci.h>
6 #include <rte_io.h>
7
8 #include "hns3_ethdev.h"
9 #include "hns3_regs.h"
10 #include "hns3_intr.h"
11 #include "hns3_logs.h"
12
13 #define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
14
15 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
16
17 static int
hns3_ring_space(struct hns3_cmq_ring * ring)18 hns3_ring_space(struct hns3_cmq_ring *ring)
19 {
20 int ntu = ring->next_to_use;
21 int ntc = ring->next_to_clean;
22 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
23
24 return ring->desc_num - used - 1;
25 }
26
27 static bool
is_valid_csq_clean_head(struct hns3_cmq_ring * ring,int head)28 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
29 {
30 int ntu = ring->next_to_use;
31 int ntc = ring->next_to_clean;
32
33 if (ntu > ntc)
34 return head >= ntc && head <= ntu;
35
36 return head >= ntc || head <= ntu;
37 }
38
39 /*
40 * hns3_allocate_dma_mem - Specific memory alloc for command function.
41 * Malloc a memzone, which is a contiguous portion of physical memory identified
42 * by a name.
43 * @ring: pointer to the ring structure
44 * @size: size of memory requested
45 * @alignment: what to align the allocation to
46 */
47 static int
hns3_allocate_dma_mem(struct hns3_hw * hw,struct hns3_cmq_ring * ring,uint64_t size,uint32_t alignment)48 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
49 uint64_t size, uint32_t alignment)
50 {
51 const struct rte_memzone *mz = NULL;
52 char z_name[RTE_MEMZONE_NAMESIZE];
53
54 snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
55 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
56 RTE_MEMZONE_IOVA_CONTIG, alignment,
57 RTE_PGSIZE_2M);
58 if (mz == NULL)
59 return -ENOMEM;
60
61 ring->buf_size = size;
62 ring->desc = mz->addr;
63 ring->desc_dma_addr = mz->iova;
64 ring->zone = (const void *)mz;
65 hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
66 mz->name, ring->desc_dma_addr);
67
68 return 0;
69 }
70
71 static void
hns3_free_dma_mem(struct hns3_hw * hw,struct hns3_cmq_ring * ring)72 hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
73 {
74 hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
75 ((const struct rte_memzone *)ring->zone)->name,
76 ring->desc_dma_addr);
77 rte_memzone_free((const struct rte_memzone *)ring->zone);
78 ring->buf_size = 0;
79 ring->desc = NULL;
80 ring->desc_dma_addr = 0;
81 ring->zone = NULL;
82 }
83
84 static int
hns3_alloc_cmd_desc(struct hns3_hw * hw,struct hns3_cmq_ring * ring)85 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
86 {
87 int size = ring->desc_num * sizeof(struct hns3_cmd_desc);
88
89 if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
90 hns3_err(hw, "allocate dma mem failed");
91 return -ENOMEM;
92 }
93
94 return 0;
95 }
96
97 static void
hns3_free_cmd_desc(struct hns3_hw * hw,struct hns3_cmq_ring * ring)98 hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
99 {
100 if (ring->desc)
101 hns3_free_dma_mem(hw, ring);
102 }
103
104 static int
hns3_alloc_cmd_queue(struct hns3_hw * hw,int ring_type)105 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
106 {
107 struct hns3_cmq_ring *ring =
108 (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
109 int ret;
110
111 ring->ring_type = ring_type;
112 ring->hw = hw;
113
114 ret = hns3_alloc_cmd_desc(hw, ring);
115 if (ret)
116 hns3_err(hw, "descriptor %s alloc error %d",
117 (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
118
119 return ret;
120 }
121
122 void
hns3_cmd_reuse_desc(struct hns3_cmd_desc * desc,bool is_read)123 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
124 {
125 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
126 if (is_read)
127 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
128 else
129 desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
130 }
131
132 void
hns3_cmd_setup_basic_desc(struct hns3_cmd_desc * desc,enum hns3_opcode_type opcode,bool is_read)133 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
134 enum hns3_opcode_type opcode, bool is_read)
135 {
136 memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
137 desc->opcode = rte_cpu_to_le_16(opcode);
138 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
139
140 if (is_read)
141 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
142 }
143
144 static void
hns3_cmd_clear_regs(struct hns3_hw * hw)145 hns3_cmd_clear_regs(struct hns3_hw *hw)
146 {
147 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
148 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
149 hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
150 hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
151 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
152 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
153 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
154 hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
155 hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
156 hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
157 }
158
159 static void
hns3_cmd_config_regs(struct hns3_cmq_ring * ring)160 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
161 {
162 uint64_t dma = ring->desc_dma_addr;
163
164 if (ring->ring_type == HNS3_TYPE_CSQ) {
165 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
166 lower_32_bits(dma));
167 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
168 upper_32_bits(dma));
169 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
170 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
171 HNS3_NIC_SW_RST_RDY);
172 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
173 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
174 } else {
175 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
176 lower_32_bits(dma));
177 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
178 upper_32_bits(dma));
179 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
180 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
181 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
182 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
183 }
184 }
185
186 static void
hns3_cmd_init_regs(struct hns3_hw * hw)187 hns3_cmd_init_regs(struct hns3_hw *hw)
188 {
189 hns3_cmd_config_regs(&hw->cmq.csq);
190 hns3_cmd_config_regs(&hw->cmq.crq);
191 }
192
193 static int
hns3_cmd_csq_clean(struct hns3_hw * hw)194 hns3_cmd_csq_clean(struct hns3_hw *hw)
195 {
196 struct hns3_cmq_ring *csq = &hw->cmq.csq;
197 uint32_t head;
198 int clean;
199
200 head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
201 if (!is_valid_csq_clean_head(csq, head)) {
202 hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
203 csq->next_to_use, csq->next_to_clean);
204 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
205 rte_atomic16_set(&hw->reset.disable_cmd, 1);
206 hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
207 }
208
209 return -EIO;
210 }
211
212 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
213 csq->next_to_clean = head;
214 return clean;
215 }
216
217 static int
hns3_cmd_csq_done(struct hns3_hw * hw)218 hns3_cmd_csq_done(struct hns3_hw *hw)
219 {
220 uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
221
222 return head == hw->cmq.csq.next_to_use;
223 }
224
225 static bool
hns3_is_special_opcode(uint16_t opcode)226 hns3_is_special_opcode(uint16_t opcode)
227 {
228 /*
229 * These commands have several descriptors,
230 * and use the first one to save opcode and return value.
231 */
232 uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
233 HNS3_OPC_STATS_32_BIT,
234 HNS3_OPC_STATS_MAC,
235 HNS3_OPC_STATS_MAC_ALL,
236 HNS3_OPC_QUERY_32_BIT_REG,
237 HNS3_OPC_QUERY_64_BIT_REG};
238 uint32_t i;
239
240 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
241 if (spec_opcode[i] == opcode)
242 return true;
243
244 return false;
245 }
246
247 static int
hns3_cmd_convert_err_code(uint16_t desc_ret)248 hns3_cmd_convert_err_code(uint16_t desc_ret)
249 {
250 switch (desc_ret) {
251 case HNS3_CMD_EXEC_SUCCESS:
252 return 0;
253 case HNS3_CMD_NO_AUTH:
254 return -EPERM;
255 case HNS3_CMD_NOT_SUPPORTED:
256 return -EOPNOTSUPP;
257 case HNS3_CMD_QUEUE_FULL:
258 return -EXFULL;
259 case HNS3_CMD_NEXT_ERR:
260 return -ENOSR;
261 case HNS3_CMD_UNEXE_ERR:
262 return -ENOTBLK;
263 case HNS3_CMD_PARA_ERR:
264 return -EINVAL;
265 case HNS3_CMD_RESULT_ERR:
266 return -ERANGE;
267 case HNS3_CMD_TIMEOUT:
268 return -ETIME;
269 case HNS3_CMD_HILINK_ERR:
270 return -ENOLINK;
271 case HNS3_CMD_QUEUE_ILLEGAL:
272 return -ENXIO;
273 case HNS3_CMD_INVALID:
274 return -EBADR;
275 default:
276 return -EREMOTEIO;
277 }
278 }
279
280 static int
hns3_cmd_get_hardware_reply(struct hns3_hw * hw,struct hns3_cmd_desc * desc,int num,int ntc)281 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
282 struct hns3_cmd_desc *desc, int num, int ntc)
283 {
284 uint16_t opcode, desc_ret;
285 int current_ntc = ntc;
286 int handle;
287
288 opcode = rte_le_to_cpu_16(desc[0].opcode);
289 for (handle = 0; handle < num; handle++) {
290 /* Get the result of hardware write back */
291 desc[handle] = hw->cmq.csq.desc[current_ntc];
292
293 current_ntc++;
294 if (current_ntc == hw->cmq.csq.desc_num)
295 current_ntc = 0;
296 }
297
298 if (likely(!hns3_is_special_opcode(opcode)))
299 desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
300 else
301 desc_ret = rte_le_to_cpu_16(desc[0].retval);
302
303 hw->cmq.last_status = desc_ret;
304 return hns3_cmd_convert_err_code(desc_ret);
305 }
306
hns3_cmd_poll_reply(struct hns3_hw * hw)307 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
308 {
309 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
310 uint32_t timeout = 0;
311
312 do {
313 if (hns3_cmd_csq_done(hw))
314 return 0;
315
316 if (rte_atomic16_read(&hw->reset.disable_cmd)) {
317 hns3_err(hw,
318 "Don't wait for reply because of disable_cmd");
319 return -EBUSY;
320 }
321
322 if (is_reset_pending(hns)) {
323 hns3_err(hw, "Don't wait for reply because of reset pending");
324 return -EIO;
325 }
326
327 rte_delay_us(1);
328 timeout++;
329 } while (timeout < hw->cmq.tx_timeout);
330 hns3_err(hw, "Wait for reply timeout");
331 return -ETIME;
332 }
333
334 /*
335 * hns3_cmd_send - send command to command queue
336 *
337 * @param hw
338 * pointer to the hw struct
339 * @param desc
340 * prefilled descriptor for describing the command
341 * @param num
342 * the number of descriptors to be sent
343 * @return
344 * - -EBUSY if detect device is in resetting
345 * - -EIO if detect cmd csq corrupted (due to reset) or
346 * there is reset pending
347 * - -ENOMEM/-ETIME/...(Non-Zero) if other error case
348 * - Zero if operation completed successfully
349 *
350 * Note -BUSY/-EIO only used in reset case
351 *
352 * Note this is the main send command for command queue, it
353 * sends the queue, cleans the queue, etc
354 */
355 int
hns3_cmd_send(struct hns3_hw * hw,struct hns3_cmd_desc * desc,int num)356 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
357 {
358 struct hns3_cmd_desc *desc_to_use;
359 int handle = 0;
360 int retval;
361 uint32_t ntc;
362
363 if (rte_atomic16_read(&hw->reset.disable_cmd))
364 return -EBUSY;
365
366 rte_spinlock_lock(&hw->cmq.csq.lock);
367
368 /* Clean the command send queue */
369 retval = hns3_cmd_csq_clean(hw);
370 if (retval < 0) {
371 rte_spinlock_unlock(&hw->cmq.csq.lock);
372 return retval;
373 }
374
375 if (num > hns3_ring_space(&hw->cmq.csq)) {
376 rte_spinlock_unlock(&hw->cmq.csq.lock);
377 return -ENOMEM;
378 }
379
380 /*
381 * Record the location of desc in the ring for this time
382 * which will be use for hardware to write back
383 */
384 ntc = hw->cmq.csq.next_to_use;
385
386 while (handle < num) {
387 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
388 *desc_to_use = desc[handle];
389 (hw->cmq.csq.next_to_use)++;
390 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
391 hw->cmq.csq.next_to_use = 0;
392 handle++;
393 }
394
395 /* Write to hardware */
396 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
397
398 /*
399 * If the command is sync, wait for the firmware to write back,
400 * if multi descriptors to be sent, use the first one to check.
401 */
402 if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
403 retval = hns3_cmd_poll_reply(hw);
404 if (!retval)
405 retval = hns3_cmd_get_hardware_reply(hw, desc, num,
406 ntc);
407 }
408
409 rte_spinlock_unlock(&hw->cmq.csq.lock);
410 return retval;
411 }
412
hns3_parse_capability(struct hns3_hw * hw,struct hns3_query_version_cmd * cmd)413 static void hns3_parse_capability(struct hns3_hw *hw,
414 struct hns3_query_version_cmd *cmd)
415 {
416 uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
417
418 if (hns3_get_bit(caps, HNS3_CAPS_UDP_GSO_B))
419 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_UDP_GSO_B, 1);
420 if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
421 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
422 1);
423 if (hns3_get_bit(caps, HNS3_CAPS_PTP_B))
424 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
425 if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
426 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
427 if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
428 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
429 if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
430 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
431 if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
432 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
433 }
434
435 static enum hns3_cmd_status
hns3_cmd_query_firmware_version_and_capability(struct hns3_hw * hw)436 hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
437 {
438 struct hns3_query_version_cmd *resp;
439 struct hns3_cmd_desc desc;
440 int ret;
441
442 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
443 resp = (struct hns3_query_version_cmd *)desc.data;
444
445 /* Initialize the cmd function */
446 ret = hns3_cmd_send(hw, &desc, 1);
447 if (ret)
448 return ret;
449
450 hw->fw_version = rte_le_to_cpu_32(resp->firmware);
451 hns3_parse_capability(hw, resp);
452
453 return 0;
454 }
455
456 int
hns3_cmd_init_queue(struct hns3_hw * hw)457 hns3_cmd_init_queue(struct hns3_hw *hw)
458 {
459 int ret;
460
461 /* Setup the lock for command queue */
462 rte_spinlock_init(&hw->cmq.csq.lock);
463 rte_spinlock_init(&hw->cmq.crq.lock);
464
465 /*
466 * Clear up all command register,
467 * in case there are some residual values
468 */
469 hns3_cmd_clear_regs(hw);
470
471 /* Setup the queue entries for use cmd queue */
472 hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
473 hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
474
475 /* Setup Tx write back timeout */
476 hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
477
478 /* Setup queue rings */
479 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
480 if (ret) {
481 PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
482 return ret;
483 }
484
485 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
486 if (ret) {
487 PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
488 goto err_crq;
489 }
490
491 return 0;
492
493 err_crq:
494 hns3_free_cmd_desc(hw, &hw->cmq.csq);
495
496 return ret;
497 }
498
499 int
hns3_cmd_init(struct hns3_hw * hw)500 hns3_cmd_init(struct hns3_hw *hw)
501 {
502 uint32_t version;
503 int ret;
504
505 rte_spinlock_lock(&hw->cmq.csq.lock);
506 rte_spinlock_lock(&hw->cmq.crq.lock);
507
508 hw->cmq.csq.next_to_clean = 0;
509 hw->cmq.csq.next_to_use = 0;
510 hw->cmq.crq.next_to_clean = 0;
511 hw->cmq.crq.next_to_use = 0;
512 hw->mbx_resp.head = 0;
513 hw->mbx_resp.tail = 0;
514 hw->mbx_resp.lost = 0;
515 hns3_cmd_init_regs(hw);
516
517 rte_spinlock_unlock(&hw->cmq.crq.lock);
518 rte_spinlock_unlock(&hw->cmq.csq.lock);
519
520 /*
521 * Check if there is new reset pending, because the higher level
522 * reset may happen when lower level reset is being processed.
523 */
524 if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
525 PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
526 ret = -EBUSY;
527 goto err_cmd_init;
528 }
529 rte_atomic16_clear(&hw->reset.disable_cmd);
530
531 ret = hns3_cmd_query_firmware_version_and_capability(hw);
532 if (ret) {
533 PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
534 goto err_cmd_init;
535 }
536
537 version = hw->fw_version;
538 PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
539 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
540 HNS3_FW_VERSION_BYTE3_S),
541 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
542 HNS3_FW_VERSION_BYTE2_S),
543 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
544 HNS3_FW_VERSION_BYTE1_S),
545 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
546 HNS3_FW_VERSION_BYTE0_S));
547
548 return 0;
549
550 err_cmd_init:
551 rte_atomic16_set(&hw->reset.disable_cmd, 1);
552 return ret;
553 }
554
555 static void
hns3_destroy_queue(struct hns3_hw * hw,struct hns3_cmq_ring * ring)556 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
557 {
558 rte_spinlock_lock(&ring->lock);
559
560 hns3_free_cmd_desc(hw, ring);
561
562 rte_spinlock_unlock(&ring->lock);
563 }
564
565 void
hns3_cmd_destroy_queue(struct hns3_hw * hw)566 hns3_cmd_destroy_queue(struct hns3_hw *hw)
567 {
568 hns3_destroy_queue(hw, &hw->cmq.csq);
569 hns3_destroy_queue(hw, &hw->cmq.crq);
570 }
571
572 void
hns3_cmd_uninit(struct hns3_hw * hw)573 hns3_cmd_uninit(struct hns3_hw *hw)
574 {
575 rte_spinlock_lock(&hw->cmq.csq.lock);
576 rte_spinlock_lock(&hw->cmq.crq.lock);
577 rte_atomic16_set(&hw->reset.disable_cmd, 1);
578 hns3_cmd_clear_regs(hw);
579 rte_spinlock_unlock(&hw->cmq.crq.lock);
580 rte_spinlock_unlock(&hw->cmq.csq.lock);
581 }
582