1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
3 */
4
5 #include <ethdev_pci.h>
6 #include <rte_io.h>
7
8 #include "hns3_common.h"
9 #include "hns3_regs.h"
10 #include "hns3_intr.h"
11 #include "hns3_logs.h"
12
13 static int
hns3_ring_space(struct hns3_cmq_ring * ring)14 hns3_ring_space(struct hns3_cmq_ring *ring)
15 {
16 int ntu = ring->next_to_use;
17 int ntc = ring->next_to_clean;
18 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
19
20 return ring->desc_num - used - 1;
21 }
22
23 static bool
is_valid_csq_clean_head(struct hns3_cmq_ring * ring,int head)24 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
25 {
26 int ntu = ring->next_to_use;
27 int ntc = ring->next_to_clean;
28
29 if (ntu > ntc)
30 return head >= ntc && head <= ntu;
31
32 return head >= ntc || head <= ntu;
33 }
34
35 /*
36 * hns3_allocate_dma_mem - Specific memory alloc for command function.
37 * Malloc a memzone, which is a contiguous portion of physical memory identified
38 * by a name.
39 * @ring: pointer to the ring structure
40 * @size: size of memory requested
41 * @alignment: what to align the allocation to
42 */
43 static int
hns3_allocate_dma_mem(struct hns3_hw * hw,struct hns3_cmq_ring * ring,uint64_t size,uint32_t alignment)44 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
45 uint64_t size, uint32_t alignment)
46 {
47 static uint64_t hns3_dma_memzone_id;
48 const struct rte_memzone *mz = NULL;
49 char z_name[RTE_MEMZONE_NAMESIZE];
50
51 snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
52 __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
53 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
54 RTE_MEMZONE_IOVA_CONTIG, alignment,
55 RTE_PGSIZE_2M);
56 if (mz == NULL)
57 return -ENOMEM;
58
59 ring->buf_size = size;
60 ring->desc = mz->addr;
61 ring->desc_dma_addr = mz->iova;
62 ring->zone = (const void *)mz;
63 hns3_dbg(hw, "cmd ring memzone name: %s", mz->name);
64
65 return 0;
66 }
67
68 static void
hns3_free_dma_mem(struct hns3_cmq_ring * ring)69 hns3_free_dma_mem(struct hns3_cmq_ring *ring)
70 {
71 rte_memzone_free((const struct rte_memzone *)ring->zone);
72 ring->buf_size = 0;
73 ring->desc = NULL;
74 ring->desc_dma_addr = 0;
75 ring->zone = NULL;
76 }
77
78 static int
hns3_alloc_cmd_desc(struct hns3_hw * hw,struct hns3_cmq_ring * ring)79 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
80 {
81 int size = ring->desc_num * sizeof(struct hns3_cmd_desc);
82
83 if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
84 hns3_err(hw, "allocate dma mem failed");
85 return -ENOMEM;
86 }
87
88 return 0;
89 }
90
91 static void
hns3_free_cmd_desc(__rte_unused struct hns3_hw * hw,struct hns3_cmq_ring * ring)92 hns3_free_cmd_desc(__rte_unused struct hns3_hw *hw, struct hns3_cmq_ring *ring)
93 {
94 if (ring->desc)
95 hns3_free_dma_mem(ring);
96 }
97
98 static int
hns3_alloc_cmd_queue(struct hns3_hw * hw,int ring_type)99 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
100 {
101 struct hns3_cmq_ring *ring =
102 (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
103 int ret;
104
105 ring->ring_type = ring_type;
106 ring->hw = hw;
107
108 ret = hns3_alloc_cmd_desc(hw, ring);
109 if (ret)
110 hns3_err(hw, "descriptor %s alloc error %d",
111 (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
112
113 return ret;
114 }
115
116 void
hns3_cmd_reuse_desc(struct hns3_cmd_desc * desc,bool is_read)117 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
118 {
119 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
120 if (is_read)
121 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
122 else
123 desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
124 }
125
126 void
hns3_cmd_setup_basic_desc(struct hns3_cmd_desc * desc,enum hns3_opcode_type opcode,bool is_read)127 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
128 enum hns3_opcode_type opcode, bool is_read)
129 {
130 memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
131 desc->opcode = rte_cpu_to_le_16(opcode);
132 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
133
134 if (is_read)
135 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
136 }
137
138 static void
hns3_cmd_clear_regs(struct hns3_hw * hw)139 hns3_cmd_clear_regs(struct hns3_hw *hw)
140 {
141 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
142 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
143 hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
144 hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
145 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
146 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
147 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
148 hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
149 hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
150 hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
151 }
152
153 static void
hns3_cmd_config_regs(struct hns3_cmq_ring * ring)154 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
155 {
156 uint64_t dma = ring->desc_dma_addr;
157
158 if (ring->ring_type == HNS3_TYPE_CSQ) {
159 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
160 lower_32_bits(dma));
161 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
162 upper_32_bits(dma));
163 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
164 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
165 HNS3_NIC_SW_RST_RDY);
166 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
167 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
168 } else {
169 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
170 lower_32_bits(dma));
171 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
172 upper_32_bits(dma));
173 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
174 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
175 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
176 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
177 }
178 }
179
180 static void
hns3_cmd_init_regs(struct hns3_hw * hw)181 hns3_cmd_init_regs(struct hns3_hw *hw)
182 {
183 hns3_cmd_config_regs(&hw->cmq.csq);
184 hns3_cmd_config_regs(&hw->cmq.crq);
185 }
186
187 static int
hns3_cmd_csq_clean(struct hns3_hw * hw)188 hns3_cmd_csq_clean(struct hns3_hw *hw)
189 {
190 struct hns3_cmq_ring *csq = &hw->cmq.csq;
191 uint32_t head;
192 uint32_t addr;
193 int clean;
194
195 head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
196 addr = hns3_read_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG);
197 if (!is_valid_csq_clean_head(csq, head) || addr == 0) {
198 hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
199 csq->next_to_use, csq->next_to_clean);
200 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
201 __atomic_store_n(&hw->reset.disable_cmd, 1,
202 __ATOMIC_RELAXED);
203 hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
204 }
205
206 return -EIO;
207 }
208
209 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
210 csq->next_to_clean = head;
211 return clean;
212 }
213
214 static int
hns3_cmd_csq_done(struct hns3_hw * hw)215 hns3_cmd_csq_done(struct hns3_hw *hw)
216 {
217 uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
218
219 return head == hw->cmq.csq.next_to_use;
220 }
221
222 static bool
hns3_is_special_opcode(uint16_t opcode)223 hns3_is_special_opcode(uint16_t opcode)
224 {
225 /*
226 * These commands have several descriptors,
227 * and use the first one to save opcode and return value.
228 */
229 uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
230 HNS3_OPC_STATS_32_BIT,
231 HNS3_OPC_STATS_MAC,
232 HNS3_OPC_STATS_MAC_ALL,
233 HNS3_OPC_QUERY_32_BIT_REG,
234 HNS3_OPC_QUERY_64_BIT_REG,
235 HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT,
236 HNS3_OPC_QUERY_CLEAR_PF_RAS_INT,
237 HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT,
238 HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT,
239 HNS3_OPC_QUERY_ALL_ERR_INFO,};
240 uint32_t i;
241
242 for (i = 0; i < RTE_DIM(spec_opcode); i++)
243 if (spec_opcode[i] == opcode)
244 return true;
245
246 return false;
247 }
248
249 static int
hns3_cmd_convert_err_code(uint16_t desc_ret)250 hns3_cmd_convert_err_code(uint16_t desc_ret)
251 {
252 static const struct {
253 uint16_t imp_errcode;
254 int linux_errcode;
255 } hns3_cmdq_status[] = {
256 {HNS3_CMD_EXEC_SUCCESS, 0},
257 {HNS3_CMD_NO_AUTH, -EPERM},
258 {HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
259 {HNS3_CMD_QUEUE_FULL, -EXFULL},
260 {HNS3_CMD_NEXT_ERR, -ENOSR},
261 {HNS3_CMD_UNEXE_ERR, -ENOTBLK},
262 {HNS3_CMD_PARA_ERR, -EINVAL},
263 {HNS3_CMD_RESULT_ERR, -ERANGE},
264 {HNS3_CMD_TIMEOUT, -ETIME},
265 {HNS3_CMD_HILINK_ERR, -ENOLINK},
266 {HNS3_CMD_QUEUE_ILLEGAL, -ENXIO},
267 {HNS3_CMD_INVALID, -EBADR},
268 {HNS3_CMD_ROH_CHECK_FAIL, -EINVAL}
269 };
270
271 uint32_t i;
272
273 for (i = 0; i < RTE_DIM(hns3_cmdq_status); i++)
274 if (hns3_cmdq_status[i].imp_errcode == desc_ret)
275 return hns3_cmdq_status[i].linux_errcode;
276
277 return -EREMOTEIO;
278 }
279
280 static int
hns3_cmd_get_hardware_reply(struct hns3_hw * hw,struct hns3_cmd_desc * desc,int num,int ntc)281 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
282 struct hns3_cmd_desc *desc, int num, int ntc)
283 {
284 uint16_t opcode, desc_ret;
285 int current_ntc = ntc;
286 int handle;
287
288 opcode = rte_le_to_cpu_16(desc[0].opcode);
289 for (handle = 0; handle < num; handle++) {
290 /* Get the result of hardware write back */
291 desc[handle] = hw->cmq.csq.desc[current_ntc];
292
293 current_ntc++;
294 if (current_ntc == hw->cmq.csq.desc_num)
295 current_ntc = 0;
296 }
297
298 if (likely(!hns3_is_special_opcode(opcode)))
299 desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
300 else
301 desc_ret = rte_le_to_cpu_16(desc[0].retval);
302
303 hw->cmq.last_status = desc_ret;
304 return hns3_cmd_convert_err_code(desc_ret);
305 }
306
hns3_cmd_poll_reply(struct hns3_hw * hw)307 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
308 {
309 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
310 uint32_t timeout = 0;
311
312 do {
313 if (hns3_cmd_csq_done(hw))
314 return 0;
315
316 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
317 hns3_err(hw,
318 "Don't wait for reply because of disable_cmd");
319 return -EBUSY;
320 }
321
322 if (is_reset_pending(hns)) {
323 hns3_err(hw, "Don't wait for reply because of reset pending");
324 return -EIO;
325 }
326
327 rte_delay_us(1);
328 timeout++;
329 } while (timeout < hw->cmq.tx_timeout);
330 hns3_err(hw, "Wait for reply timeout");
331 return -ETIME;
332 }
333
334 /*
335 * hns3_cmd_send - send command to command queue
336 *
337 * @param hw
338 * pointer to the hw struct
339 * @param desc
340 * prefilled descriptor for describing the command
341 * @param num
342 * the number of descriptors to be sent
343 * @return
344 * - -EBUSY if detect device is in resetting
345 * - -EIO if detect cmd csq corrupted (due to reset) or
346 * there is reset pending
347 * - -ENOMEM/-ETIME/...(Non-Zero) if other error case
348 * - Zero if operation completed successfully
349 *
350 * Note -BUSY/-EIO only used in reset case
351 *
352 * Note this is the main send command for command queue, it
353 * sends the queue, cleans the queue, etc
354 */
355 int
hns3_cmd_send(struct hns3_hw * hw,struct hns3_cmd_desc * desc,int num)356 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
357 {
358 struct hns3_cmd_desc *desc_to_use;
359 int handle = 0;
360 int retval;
361 uint32_t ntc;
362
363 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
364 return -EBUSY;
365
366 rte_spinlock_lock(&hw->cmq.csq.lock);
367
368 /* Clean the command send queue */
369 retval = hns3_cmd_csq_clean(hw);
370 if (retval < 0) {
371 rte_spinlock_unlock(&hw->cmq.csq.lock);
372 return retval;
373 }
374
375 if (num > hns3_ring_space(&hw->cmq.csq)) {
376 rte_spinlock_unlock(&hw->cmq.csq.lock);
377 return -ENOMEM;
378 }
379
380 /*
381 * Record the location of desc in the ring for this time
382 * which will be use for hardware to write back
383 */
384 ntc = hw->cmq.csq.next_to_use;
385
386 while (handle < num) {
387 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
388 *desc_to_use = desc[handle];
389 (hw->cmq.csq.next_to_use)++;
390 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
391 hw->cmq.csq.next_to_use = 0;
392 handle++;
393 }
394
395 /* Write to hardware */
396 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
397
398 /*
399 * If the command is sync, wait for the firmware to write back,
400 * if multi descriptors to be sent, use the first one to check.
401 */
402 if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
403 retval = hns3_cmd_poll_reply(hw);
404 if (!retval)
405 retval = hns3_cmd_get_hardware_reply(hw, desc, num,
406 ntc);
407 }
408
409 rte_spinlock_unlock(&hw->cmq.csq.lock);
410 return retval;
411 }
412
413 static const char *
hns3_get_caps_name(uint32_t caps_id)414 hns3_get_caps_name(uint32_t caps_id)
415 {
416 const struct {
417 enum HNS3_CAPS_BITS caps;
418 const char *name;
419 } dev_caps[] = {
420 { HNS3_CAPS_FD_QUEUE_REGION_B, "fd_queue_region" },
421 { HNS3_CAPS_PTP_B, "ptp" },
422 { HNS3_CAPS_TX_PUSH_B, "tx_push" },
423 { HNS3_CAPS_PHY_IMP_B, "phy_imp" },
424 { HNS3_CAPS_TQP_TXRX_INDEP_B, "tqp_txrx_indep" },
425 { HNS3_CAPS_HW_PAD_B, "hw_pad" },
426 { HNS3_CAPS_STASH_B, "stash" },
427 { HNS3_CAPS_UDP_TUNNEL_CSUM_B, "udp_tunnel_csum" },
428 { HNS3_CAPS_RAS_IMP_B, "ras_imp" },
429 { HNS3_CAPS_RXD_ADV_LAYOUT_B, "rxd_adv_layout" },
430 { HNS3_CAPS_TM_B, "tm_capability" }
431 };
432 uint32_t i;
433
434 for (i = 0; i < RTE_DIM(dev_caps); i++) {
435 if (dev_caps[i].caps == caps_id)
436 return dev_caps[i].name;
437 }
438
439 return "unknown";
440 }
441
442 static void
hns3_mask_capability(struct hns3_hw * hw,struct hns3_query_version_cmd * cmd)443 hns3_mask_capability(struct hns3_hw *hw,
444 struct hns3_query_version_cmd *cmd)
445 {
446 #define MAX_CAPS_BIT 64
447
448 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
449 uint64_t caps_org, caps_new, caps_masked;
450 uint32_t i;
451
452 if (hns->dev_caps_mask == 0)
453 return;
454
455 memcpy(&caps_org, &cmd->caps[0], sizeof(caps_org));
456 caps_org = rte_le_to_cpu_64(caps_org);
457 caps_new = caps_org ^ (caps_org & hns->dev_caps_mask);
458 caps_masked = caps_org ^ caps_new;
459 caps_new = rte_cpu_to_le_64(caps_new);
460 memcpy(&cmd->caps[0], &caps_new, sizeof(caps_new));
461
462 for (i = 0; i < MAX_CAPS_BIT; i++) {
463 if (!(caps_masked & BIT_ULL(i)))
464 continue;
465 hns3_info(hw, "mask capability: id-%u, name-%s.",
466 i, hns3_get_caps_name(i));
467 }
468 }
469
470 static void
hns3_parse_capability(struct hns3_hw * hw,struct hns3_query_version_cmd * cmd)471 hns3_parse_capability(struct hns3_hw *hw,
472 struct hns3_query_version_cmd *cmd)
473 {
474 uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
475
476 if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
477 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
478 1);
479 if (hns3_get_bit(caps, HNS3_CAPS_PTP_B)) {
480 /*
481 * PTP depends on special packet type reported by hardware which
482 * enabled rxd advanced layout, so if the hardware doesn't
483 * support rxd advanced layout, driver should ignore the PTP
484 * capability.
485 */
486 if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
487 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
488 else
489 hns3_warn(hw, "ignore PTP capability due to lack of "
490 "rxd advanced layout capability.");
491 }
492 if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
493 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
494 if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
495 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
496 if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
497 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
498 if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
499 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
500 if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
501 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
502 1);
503 if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B))
504 hns3_set_bit(hw->capability,
505 HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
506 if (hns3_get_bit(caps, HNS3_CAPS_RAS_IMP_B))
507 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1);
508 if (hns3_get_bit(caps, HNS3_CAPS_TM_B))
509 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TM_B, 1);
510 }
511
512 static uint32_t
hns3_build_api_caps(void)513 hns3_build_api_caps(void)
514 {
515 uint32_t api_caps = 0;
516
517 hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1);
518
519 return rte_cpu_to_le_32(api_caps);
520 }
521
522 static int
hns3_cmd_query_firmware_version_and_capability(struct hns3_hw * hw)523 hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
524 {
525 struct hns3_query_version_cmd *resp;
526 struct hns3_cmd_desc desc;
527 int ret;
528
529 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
530 resp = (struct hns3_query_version_cmd *)desc.data;
531 resp->api_caps = hns3_build_api_caps();
532
533 /* Initialize the cmd function */
534 ret = hns3_cmd_send(hw, &desc, 1);
535 if (ret)
536 return ret;
537
538 hw->fw_version = rte_le_to_cpu_32(resp->firmware);
539 /*
540 * Make sure mask the capability before parse capability because it
541 * may overwrite resp's data.
542 */
543 hns3_mask_capability(hw, resp);
544 hns3_parse_capability(hw, resp);
545
546 return 0;
547 }
548
549 int
hns3_cmd_init_queue(struct hns3_hw * hw)550 hns3_cmd_init_queue(struct hns3_hw *hw)
551 {
552 int ret;
553
554 /* Setup the lock for command queue */
555 rte_spinlock_init(&hw->cmq.csq.lock);
556 rte_spinlock_init(&hw->cmq.crq.lock);
557
558 /*
559 * Clear up all command register,
560 * in case there are some residual values
561 */
562 hns3_cmd_clear_regs(hw);
563
564 /* Setup the queue entries for use cmd queue */
565 hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
566 hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
567
568 /* Setup Tx write back timeout */
569 hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
570
571 /* Setup queue rings */
572 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
573 if (ret) {
574 PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
575 return ret;
576 }
577
578 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
579 if (ret) {
580 PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
581 goto err_crq;
582 }
583
584 return 0;
585
586 err_crq:
587 hns3_free_cmd_desc(hw, &hw->cmq.csq);
588
589 return ret;
590 }
591
592 static void
hns3_update_dev_lsc_cap(struct hns3_hw * hw,int fw_compact_cmd_result)593 hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result)
594 {
595 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
596
597 if (hw->adapter_state != HNS3_NIC_UNINITIALIZED)
598 return;
599
600 if (fw_compact_cmd_result != 0) {
601 /*
602 * If fw_compact_cmd_result is not zero, it means firmware don't
603 * support link status change interrupt.
604 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
605 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. It need to clear
606 * the RTE_ETH_DEV_INTR_LSC capability when detect firmware
607 * don't support link status change interrupt.
608 */
609 dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
610 }
611 }
612
613 static int
hns3_apply_fw_compat_cmd_result(struct hns3_hw * hw,int result)614 hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result)
615 {
616 if (result != 0 && hns3_dev_get_support(hw, COPPER)) {
617 hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.",
618 result);
619 return result;
620 }
621
622 hns3_update_dev_lsc_cap(hw, result);
623
624 return 0;
625 }
626
627 static int
hns3_firmware_compat_config(struct hns3_hw * hw,bool is_init)628 hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
629 {
630 struct hns3_firmware_compat_cmd *req;
631 struct hns3_cmd_desc desc;
632 uint32_t compat = 0;
633
634 #if defined(RTE_HNS3_ONLY_1630_FPGA)
635 /* If resv reg enabled phy driver of imp is not configured, driver
636 * will use temporary phy driver.
637 */
638 struct rte_pci_device *pci_dev;
639 struct rte_eth_dev *eth_dev;
640 uint8_t revision;
641 int ret;
642
643 eth_dev = &rte_eth_devices[hw->data->port_id];
644 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
645 /* Get PCI revision id */
646 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
647 HNS3_PCI_REVISION_ID);
648 if (ret != HNS3_PCI_REVISION_ID_LEN) {
649 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
650 ret);
651 return -EIO;
652 }
653 if (revision == PCI_REVISION_ID_HIP09_A) {
654 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
655 if (hns3_dev_get_support(hw, COPPER) == 0 || pf->is_tmp_phy) {
656 PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***");
657 pf->is_tmp_phy = true;
658 hns3_set_bit(hw->capability,
659 HNS3_DEV_SUPPORT_COPPER_B, 1);
660 return 0;
661 }
662
663 PMD_INIT_LOG(ERR, "***use phy driver in imp***");
664 }
665 #endif
666
667 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false);
668 req = (struct hns3_firmware_compat_cmd *)desc.data;
669
670 if (is_init) {
671 hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
672 hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
673 if (hns3_dev_get_support(hw, COPPER))
674 hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
675 }
676 req->compat = rte_cpu_to_le_32(compat);
677
678 return hns3_cmd_send(hw, &desc, 1);
679 }
680
681 int
hns3_cmd_init(struct hns3_hw * hw)682 hns3_cmd_init(struct hns3_hw *hw)
683 {
684 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
685 uint32_t version;
686 int ret;
687
688 rte_spinlock_lock(&hw->cmq.csq.lock);
689 rte_spinlock_lock(&hw->cmq.crq.lock);
690
691 hw->cmq.csq.next_to_clean = 0;
692 hw->cmq.csq.next_to_use = 0;
693 hw->cmq.crq.next_to_clean = 0;
694 hw->cmq.crq.next_to_use = 0;
695 hw->mbx_resp.head = 0;
696 hw->mbx_resp.tail = 0;
697 hw->mbx_resp.lost = 0;
698 hns3_cmd_init_regs(hw);
699
700 rte_spinlock_unlock(&hw->cmq.crq.lock);
701 rte_spinlock_unlock(&hw->cmq.csq.lock);
702
703 /*
704 * Check if there is new reset pending, because the higher level
705 * reset may happen when lower level reset is being processed.
706 */
707 if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
708 PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
709 ret = -EBUSY;
710 goto err_cmd_init;
711 }
712 __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
713
714 ret = hns3_cmd_query_firmware_version_and_capability(hw);
715 if (ret) {
716 PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
717 goto err_cmd_init;
718 }
719
720 version = hw->fw_version;
721 PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
722 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
723 HNS3_FW_VERSION_BYTE3_S),
724 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
725 HNS3_FW_VERSION_BYTE2_S),
726 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
727 HNS3_FW_VERSION_BYTE1_S),
728 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
729 HNS3_FW_VERSION_BYTE0_S));
730
731 if (hns->is_vf)
732 return 0;
733
734 /*
735 * Requiring firmware to enable some features, fiber port can still
736 * work without it, but copper port can't work because the firmware
737 * fails to take over the PHY.
738 */
739 ret = hns3_firmware_compat_config(hw, true);
740 if (ret)
741 PMD_INIT_LOG(WARNING, "firmware compatible features not "
742 "supported, ret = %d.", ret);
743
744 /*
745 * Perform some corresponding operations based on the firmware
746 * compatibility configuration result.
747 */
748 ret = hns3_apply_fw_compat_cmd_result(hw, ret);
749 if (ret)
750 goto err_cmd_init;
751
752 return 0;
753
754 err_cmd_init:
755 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
756 return ret;
757 }
758
759 static void
hns3_destroy_queue(struct hns3_hw * hw,struct hns3_cmq_ring * ring)760 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
761 {
762 rte_spinlock_lock(&ring->lock);
763
764 hns3_free_cmd_desc(hw, ring);
765
766 rte_spinlock_unlock(&ring->lock);
767 }
768
769 void
hns3_cmd_destroy_queue(struct hns3_hw * hw)770 hns3_cmd_destroy_queue(struct hns3_hw *hw)
771 {
772 hns3_destroy_queue(hw, &hw->cmq.csq);
773 hns3_destroy_queue(hw, &hw->cmq.crq);
774 }
775
776 void
hns3_cmd_uninit(struct hns3_hw * hw)777 hns3_cmd_uninit(struct hns3_hw *hw)
778 {
779 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
780
781 if (!hns->is_vf)
782 (void)hns3_firmware_compat_config(hw, false);
783
784 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
785
786 /*
787 * A delay is added to ensure that the register cleanup operations
788 * will not be performed concurrently with the firmware command and
789 * ensure that all the reserved commands are executed.
790 * Concurrency may occur in two scenarios: asynchronous command and
791 * timeout command. If the command fails to be executed due to busy
792 * scheduling, the command will be processed in the next scheduling
793 * of the firmware.
794 */
795 rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME);
796
797 rte_spinlock_lock(&hw->cmq.csq.lock);
798 rte_spinlock_lock(&hw->cmq.crq.lock);
799 hns3_cmd_clear_regs(hw);
800 rte_spinlock_unlock(&hw->cmq.crq.lock);
801 rte_spinlock_unlock(&hw->cmq.csq.lock);
802 }
803