1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
3 */
4
5 #include <rte_alarm.h>
6 #include <rte_bus_pci.h>
7 #include <ethdev_pci.h>
8
9 #include "hns3_ethdev.h"
10 #include "hns3_common.h"
11 #include "hns3_logs.h"
12 #include "hns3_rxtx.h"
13 #include "hns3_intr.h"
14 #include "hns3_regs.h"
15 #include "hns3_dcb.h"
16 #include "hns3_mp.h"
17 #include "hns3_flow.h"
18
19 #define HNS3_SERVICE_INTERVAL 1000000 /* us */
20 #define HNS3_SERVICE_QUICK_INTERVAL 10
21 #define HNS3_INVALID_PVID 0xFFFF
22
23 #define HNS3_FILTER_TYPE_VF 0
24 #define HNS3_FILTER_TYPE_PORT 1
25 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0)
26 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0)
27 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1)
28 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2)
29 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3)
30 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \
31 | HNS3_FILTER_FE_ROCE_EGRESS_B)
32 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \
33 | HNS3_FILTER_FE_ROCE_INGRESS_B)
34
35 /* Reset related Registers */
36 #define HNS3_GLOBAL_RESET_BIT 0
37 #define HNS3_CORE_RESET_BIT 1
38 #define HNS3_IMP_RESET_BIT 2
39 #define HNS3_FUN_RST_ING_B 0
40
41 #define HNS3_VECTOR0_IMP_RESET_INT_B 1
42 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U
43 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U
44 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U
45
46 #define HNS3_RESET_WAIT_MS 100
47 #define HNS3_RESET_WAIT_CNT 200
48
49 /* FEC mode order defined in HNS3 hardware */
50 #define HNS3_HW_FEC_MODE_NOFEC 0
51 #define HNS3_HW_FEC_MODE_BASER 1
52 #define HNS3_HW_FEC_MODE_RS 2
53
54 enum hns3_evt_cause {
55 HNS3_VECTOR0_EVENT_RST,
56 HNS3_VECTOR0_EVENT_MBX,
57 HNS3_VECTOR0_EVENT_ERR,
58 HNS3_VECTOR0_EVENT_PTP,
59 HNS3_VECTOR0_EVENT_OTHER,
60 };
61
62 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
63 { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
64 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
65 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
66
67 { RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
68 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
69 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
70 RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
71
72 { RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
73 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
74 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
75
76 { RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
77 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
78 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
79 RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
80
81 { RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
82 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
83 RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
84
85 { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
86 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
87 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
88 };
89
90 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
91 uint64_t *levels);
92 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
93 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
94 int on);
95 static int hns3_update_link_info(struct rte_eth_dev *eth_dev);
96 static bool hns3_update_link_status(struct hns3_hw *hw);
97
98 static int hns3_add_mc_mac_addr(struct hns3_hw *hw,
99 struct rte_ether_addr *mac_addr);
100 static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
101 struct rte_ether_addr *mac_addr);
102 static int hns3_restore_fec(struct hns3_hw *hw);
103 static int hns3_query_dev_fec_info(struct hns3_hw *hw);
104 static int hns3_do_stop(struct hns3_adapter *hns);
105 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds);
106 static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable);
107
108
109 static void
hns3_pf_disable_irq0(struct hns3_hw * hw)110 hns3_pf_disable_irq0(struct hns3_hw *hw)
111 {
112 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
113 }
114
115 static void
hns3_pf_enable_irq0(struct hns3_hw * hw)116 hns3_pf_enable_irq0(struct hns3_hw *hw)
117 {
118 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
119 }
120
121 static enum hns3_evt_cause
hns3_proc_imp_reset_event(struct hns3_adapter * hns,bool is_delay,uint32_t * vec_val)122 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay,
123 uint32_t *vec_val)
124 {
125 struct hns3_hw *hw = &hns->hw;
126
127 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
128 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
129 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
130 if (!is_delay) {
131 hw->reset.stats.imp_cnt++;
132 hns3_warn(hw, "IMP reset detected, clear reset status");
133 } else {
134 hns3_schedule_delayed_reset(hns);
135 hns3_warn(hw, "IMP reset detected, don't clear reset status");
136 }
137
138 return HNS3_VECTOR0_EVENT_RST;
139 }
140
141 static enum hns3_evt_cause
hns3_proc_global_reset_event(struct hns3_adapter * hns,bool is_delay,uint32_t * vec_val)142 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay,
143 uint32_t *vec_val)
144 {
145 struct hns3_hw *hw = &hns->hw;
146
147 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
148 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
149 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
150 if (!is_delay) {
151 hw->reset.stats.global_cnt++;
152 hns3_warn(hw, "Global reset detected, clear reset status");
153 } else {
154 hns3_schedule_delayed_reset(hns);
155 hns3_warn(hw,
156 "Global reset detected, don't clear reset status");
157 }
158
159 return HNS3_VECTOR0_EVENT_RST;
160 }
161
162 static enum hns3_evt_cause
hns3_check_event_cause(struct hns3_adapter * hns,uint32_t * clearval)163 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
164 {
165 struct hns3_hw *hw = &hns->hw;
166 uint32_t vector0_int_stats;
167 uint32_t cmdq_src_val;
168 uint32_t hw_err_src_reg;
169 uint32_t val;
170 enum hns3_evt_cause ret;
171 bool is_delay;
172
173 /* fetch the events from their corresponding regs */
174 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
175 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
176 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
177
178 is_delay = clearval == NULL ? true : false;
179 /*
180 * Assumption: If by any chance reset and mailbox events are reported
181 * together then we will only process reset event and defer the
182 * processing of the mailbox events. Since, we would have not cleared
183 * RX CMDQ event this time we would receive again another interrupt
184 * from H/W just for the mailbox.
185 */
186 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */
187 ret = hns3_proc_imp_reset_event(hns, is_delay, &val);
188 goto out;
189 }
190
191 /* Global reset */
192 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) {
193 ret = hns3_proc_global_reset_event(hns, is_delay, &val);
194 goto out;
195 }
196
197 /* Check for vector0 1588 event source */
198 if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) {
199 val = BIT(HNS3_VECTOR0_1588_INT_B);
200 ret = HNS3_VECTOR0_EVENT_PTP;
201 goto out;
202 }
203
204 /* check for vector0 msix event source */
205 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK ||
206 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) {
207 val = vector0_int_stats | hw_err_src_reg;
208 ret = HNS3_VECTOR0_EVENT_ERR;
209 goto out;
210 }
211
212 /* check for vector0 mailbox(=CMDQ RX) event source */
213 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) {
214 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
215 val = cmdq_src_val;
216 ret = HNS3_VECTOR0_EVENT_MBX;
217 goto out;
218 }
219
220 val = vector0_int_stats;
221 ret = HNS3_VECTOR0_EVENT_OTHER;
222 out:
223
224 if (clearval)
225 *clearval = val;
226 return ret;
227 }
228
229 static void
hns3_clear_event_cause(struct hns3_hw * hw,uint32_t event_type,uint32_t regclr)230 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
231 {
232 if (event_type == HNS3_VECTOR0_EVENT_RST ||
233 event_type == HNS3_VECTOR0_EVENT_PTP)
234 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
235 else if (event_type == HNS3_VECTOR0_EVENT_MBX)
236 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
237 }
238
239 static void
hns3_clear_all_event_cause(struct hns3_hw * hw)240 hns3_clear_all_event_cause(struct hns3_hw *hw)
241 {
242 uint32_t vector0_int_stats;
243
244 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
245 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats)
246 hns3_warn(hw, "Probe during IMP reset interrupt");
247
248 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats)
249 hns3_warn(hw, "Probe during Global reset interrupt");
250
251 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST,
252 BIT(HNS3_VECTOR0_IMPRESET_INT_B) |
253 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) |
254 BIT(HNS3_VECTOR0_CORERESET_INT_B));
255 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0);
256 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP,
257 BIT(HNS3_VECTOR0_1588_INT_B));
258 }
259
260 static void
hns3_handle_mac_tnl(struct hns3_hw * hw)261 hns3_handle_mac_tnl(struct hns3_hw *hw)
262 {
263 struct hns3_cmd_desc desc;
264 uint32_t status;
265 int ret;
266
267 /* query and clear mac tnl interrupt */
268 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true);
269 ret = hns3_cmd_send(hw, &desc, 1);
270 if (ret) {
271 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret);
272 return;
273 }
274
275 status = rte_le_to_cpu_32(desc.data[0]);
276 if (status) {
277 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status);
278 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT,
279 false);
280 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR);
281 ret = hns3_cmd_send(hw, &desc, 1);
282 if (ret)
283 hns3_err(hw, "failed to clear mac tnl int, ret = %d.",
284 ret);
285 }
286 }
287
288 static void
hns3_interrupt_handler(void * param)289 hns3_interrupt_handler(void *param)
290 {
291 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
292 struct hns3_adapter *hns = dev->data->dev_private;
293 struct hns3_hw *hw = &hns->hw;
294 enum hns3_evt_cause event_cause;
295 uint32_t clearval = 0;
296 uint32_t vector0_int;
297 uint32_t ras_int;
298 uint32_t cmdq_int;
299
300 /* Disable interrupt */
301 hns3_pf_disable_irq0(hw);
302
303 event_cause = hns3_check_event_cause(hns, &clearval);
304 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
305 ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
306 cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
307 hns3_clear_event_cause(hw, event_cause, clearval);
308 /* vector 0 interrupt is shared with reset and mailbox source events. */
309 if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
310 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x "
311 "ras_int_stat:0x%x cmdq_int_stat:0x%x",
312 vector0_int, ras_int, cmdq_int);
313 hns3_handle_mac_tnl(hw);
314 hns3_handle_error(hns);
315 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) {
316 hns3_warn(hw, "received reset interrupt");
317 hns3_schedule_reset(hns);
318 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) {
319 hns3_dev_handle_mbx_msg(hw);
320 } else {
321 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x "
322 "ras_int_stat:0x%x cmdq_int_stat:0x%x",
323 vector0_int, ras_int, cmdq_int);
324 }
325
326 /* Enable interrupt if it is not cause by reset */
327 hns3_pf_enable_irq0(hw);
328 }
329
330 static int
hns3_set_port_vlan_filter(struct hns3_adapter * hns,uint16_t vlan_id,int on)331 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on)
332 {
333 #define HNS3_VLAN_ID_OFFSET_STEP 160
334 #define HNS3_VLAN_BYTE_SIZE 8
335 struct hns3_vlan_filter_pf_cfg_cmd *req;
336 struct hns3_hw *hw = &hns->hw;
337 uint8_t vlan_offset_byte_val;
338 struct hns3_cmd_desc desc;
339 uint8_t vlan_offset_byte;
340 uint8_t vlan_offset_base;
341 int ret;
342
343 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false);
344
345 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP;
346 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) /
347 HNS3_VLAN_BYTE_SIZE;
348 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE);
349
350 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data;
351 req->vlan_offset = vlan_offset_base;
352 req->vlan_cfg = on ? 0 : 1;
353 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
354
355 ret = hns3_cmd_send(hw, &desc, 1);
356 if (ret)
357 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d",
358 vlan_id, ret);
359
360 return ret;
361 }
362
363 static void
hns3_rm_dev_vlan_table(struct hns3_adapter * hns,uint16_t vlan_id)364 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id)
365 {
366 struct hns3_user_vlan_table *vlan_entry;
367 struct hns3_pf *pf = &hns->pf;
368
369 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
370 if (vlan_entry->vlan_id == vlan_id) {
371 if (vlan_entry->hd_tbl_status)
372 hns3_set_port_vlan_filter(hns, vlan_id, 0);
373 LIST_REMOVE(vlan_entry, next);
374 rte_free(vlan_entry);
375 break;
376 }
377 }
378 }
379
380 static void
hns3_add_dev_vlan_table(struct hns3_adapter * hns,uint16_t vlan_id,bool writen_to_tbl)381 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
382 bool writen_to_tbl)
383 {
384 struct hns3_user_vlan_table *vlan_entry;
385 struct hns3_hw *hw = &hns->hw;
386 struct hns3_pf *pf = &hns->pf;
387
388 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
389 if (vlan_entry->vlan_id == vlan_id)
390 return;
391 }
392
393 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0);
394 if (vlan_entry == NULL) {
395 hns3_err(hw, "Failed to malloc hns3 vlan table");
396 return;
397 }
398
399 vlan_entry->hd_tbl_status = writen_to_tbl;
400 vlan_entry->vlan_id = vlan_id;
401
402 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);
403 }
404
405 static int
hns3_restore_vlan_table(struct hns3_adapter * hns)406 hns3_restore_vlan_table(struct hns3_adapter *hns)
407 {
408 struct hns3_user_vlan_table *vlan_entry;
409 struct hns3_hw *hw = &hns->hw;
410 struct hns3_pf *pf = &hns->pf;
411 uint16_t vlan_id;
412 int ret = 0;
413
414 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE)
415 return hns3_vlan_pvid_configure(hns,
416 hw->port_base_vlan_cfg.pvid, 1);
417
418 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
419 if (vlan_entry->hd_tbl_status) {
420 vlan_id = vlan_entry->vlan_id;
421 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1);
422 if (ret)
423 break;
424 }
425 }
426
427 return ret;
428 }
429
430 static int
hns3_vlan_filter_configure(struct hns3_adapter * hns,uint16_t vlan_id,int on)431 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
432 {
433 struct hns3_hw *hw = &hns->hw;
434 bool writen_to_tbl = false;
435 int ret = 0;
436
437 /*
438 * When vlan filter is enabled, hardware regards packets without vlan
439 * as packets with vlan 0. So, to receive packets without vlan, vlan id
440 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter.
441 */
442 if (on == 0 && vlan_id == 0)
443 return 0;
444
445 /*
446 * When port base vlan enabled, we use port base vlan as the vlan
447 * filter condition. In this case, we don't update vlan filter table
448 * when user add new vlan or remove exist vlan, just update the
449 * vlan list. The vlan id in vlan list will be written in vlan filter
450 * table until port base vlan disabled
451 */
452 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
453 ret = hns3_set_port_vlan_filter(hns, vlan_id, on);
454 writen_to_tbl = true;
455 }
456
457 if (ret == 0) {
458 if (on)
459 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);
460 else
461 hns3_rm_dev_vlan_table(hns, vlan_id);
462 }
463 return ret;
464 }
465
466 static int
hns3_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)467 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
468 {
469 struct hns3_adapter *hns = dev->data->dev_private;
470 struct hns3_hw *hw = &hns->hw;
471 int ret;
472
473 rte_spinlock_lock(&hw->lock);
474 ret = hns3_vlan_filter_configure(hns, vlan_id, on);
475 rte_spinlock_unlock(&hw->lock);
476 return ret;
477 }
478
479 static int
hns3_vlan_tpid_configure(struct hns3_adapter * hns,enum rte_vlan_type vlan_type,uint16_t tpid)480 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
481 uint16_t tpid)
482 {
483 struct hns3_rx_vlan_type_cfg_cmd *rx_req;
484 struct hns3_tx_vlan_type_cfg_cmd *tx_req;
485 struct hns3_hw *hw = &hns->hw;
486 struct hns3_cmd_desc desc;
487 int ret;
488
489 if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
490 vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
491 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
492 return -EINVAL;
493 }
494
495 if (tpid != RTE_ETHER_TYPE_VLAN) {
496 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type);
497 return -EINVAL;
498 }
499
500 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
501 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
502
503 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
504 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
505 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
506 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
507 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
508 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
509 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
510 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid);
511 }
512
513 ret = hns3_cmd_send(hw, &desc, 1);
514 if (ret) {
515 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d",
516 ret);
517 return ret;
518 }
519
520 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false);
521
522 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data;
523 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid);
524 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid);
525
526 ret = hns3_cmd_send(hw, &desc, 1);
527 if (ret)
528 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d",
529 ret);
530 return ret;
531 }
532
533 static int
hns3_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)534 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
535 uint16_t tpid)
536 {
537 struct hns3_adapter *hns = dev->data->dev_private;
538 struct hns3_hw *hw = &hns->hw;
539 int ret;
540
541 rte_spinlock_lock(&hw->lock);
542 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid);
543 rte_spinlock_unlock(&hw->lock);
544 return ret;
545 }
546
547 static int
hns3_set_vlan_rx_offload_cfg(struct hns3_adapter * hns,struct hns3_rx_vtag_cfg * vcfg)548 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
549 struct hns3_rx_vtag_cfg *vcfg)
550 {
551 struct hns3_vport_vtag_rx_cfg_cmd *req;
552 struct hns3_hw *hw = &hns->hw;
553 struct hns3_cmd_desc desc;
554 uint16_t vport_id;
555 uint8_t bitmap;
556 int ret;
557
558 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false);
559
560 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data;
561 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B,
562 vcfg->strip_tag1_en ? 1 : 0);
563 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B,
564 vcfg->strip_tag2_en ? 1 : 0);
565 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B,
566 vcfg->vlan1_vlan_prionly ? 1 : 0);
567 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
568 vcfg->vlan2_vlan_prionly ? 1 : 0);
569
570 /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
571 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B,
572 vcfg->strip_tag1_discard_en ? 1 : 0);
573 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B,
574 vcfg->strip_tag2_discard_en ? 1 : 0);
575 /*
576 * In current version VF is not supported when PF is driven by DPDK
577 * driver, just need to configure parameters for PF vport.
578 */
579 vport_id = HNS3_PF_FUNC_ID;
580 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
581 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
582 req->vf_bitmap[req->vf_offset] = bitmap;
583
584 ret = hns3_cmd_send(hw, &desc, 1);
585 if (ret)
586 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret);
587 return ret;
588 }
589
590 static int
hns3_en_hw_strip_rxvtag(struct hns3_adapter * hns,bool enable)591 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
592 {
593 struct hns3_rx_vtag_cfg rxvlan_cfg;
594 struct hns3_hw *hw = &hns->hw;
595 int ret;
596
597 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
598 rxvlan_cfg.strip_tag1_en = false;
599 rxvlan_cfg.strip_tag2_en = enable;
600 rxvlan_cfg.strip_tag2_discard_en = false;
601 } else {
602 rxvlan_cfg.strip_tag1_en = enable;
603 rxvlan_cfg.strip_tag2_en = true;
604 rxvlan_cfg.strip_tag2_discard_en = true;
605 }
606
607 rxvlan_cfg.strip_tag1_discard_en = false;
608 rxvlan_cfg.vlan1_vlan_prionly = false;
609 rxvlan_cfg.vlan2_vlan_prionly = false;
610 rxvlan_cfg.rx_vlan_offload_en = enable;
611
612 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
613 if (ret) {
614 hns3_err(hw, "%s strip rx vtag failed, ret = %d.",
615 enable ? "enable" : "disable", ret);
616 return ret;
617 }
618
619 memcpy(&hns->pf.vtag_config.rx_vcfg, &rxvlan_cfg,
620 sizeof(struct hns3_rx_vtag_cfg));
621
622 return ret;
623 }
624
625 static int
hns3_set_vlan_filter_ctrl(struct hns3_hw * hw,uint8_t vlan_type,uint8_t fe_type,bool filter_en,uint8_t vf_id)626 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type,
627 uint8_t fe_type, bool filter_en, uint8_t vf_id)
628 {
629 struct hns3_vlan_filter_ctrl_cmd *req;
630 struct hns3_cmd_desc desc;
631 int ret;
632
633 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false);
634
635 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data;
636 req->vlan_type = vlan_type;
637 req->vlan_fe = filter_en ? fe_type : 0;
638 req->vf_id = vf_id;
639
640 ret = hns3_cmd_send(hw, &desc, 1);
641 if (ret)
642 hns3_err(hw, "set vlan filter fail, ret =%d", ret);
643
644 return ret;
645 }
646
647 static int
hns3_vlan_filter_init(struct hns3_adapter * hns)648 hns3_vlan_filter_init(struct hns3_adapter *hns)
649 {
650 struct hns3_hw *hw = &hns->hw;
651 int ret;
652
653 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF,
654 HNS3_FILTER_FE_EGRESS, false,
655 HNS3_PF_FUNC_ID);
656 if (ret) {
657 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret);
658 return ret;
659 }
660
661 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
662 HNS3_FILTER_FE_INGRESS, false,
663 HNS3_PF_FUNC_ID);
664 if (ret)
665 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret);
666
667 return ret;
668 }
669
670 static int
hns3_enable_vlan_filter(struct hns3_adapter * hns,bool enable)671 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable)
672 {
673 struct hns3_hw *hw = &hns->hw;
674 int ret;
675
676 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
677 HNS3_FILTER_FE_INGRESS, enable,
678 HNS3_PF_FUNC_ID);
679 if (ret)
680 hns3_err(hw, "failed to %s port vlan filter, ret = %d",
681 enable ? "enable" : "disable", ret);
682
683 return ret;
684 }
685
686 static int
hns3_vlan_offload_set(struct rte_eth_dev * dev,int mask)687 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
688 {
689 struct hns3_adapter *hns = dev->data->dev_private;
690 struct hns3_hw *hw = &hns->hw;
691 struct rte_eth_rxmode *rxmode;
692 unsigned int tmp_mask;
693 bool enable;
694 int ret = 0;
695
696 rte_spinlock_lock(&hw->lock);
697 rxmode = &dev->data->dev_conf.rxmode;
698 tmp_mask = (unsigned int)mask;
699 if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
700 /* ignore vlan filter configuration during promiscuous mode */
701 if (!dev->data->promiscuous) {
702 /* Enable or disable VLAN filter */
703 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
704 true : false;
705
706 ret = hns3_enable_vlan_filter(hns, enable);
707 if (ret) {
708 rte_spinlock_unlock(&hw->lock);
709 hns3_err(hw, "failed to %s rx filter, ret = %d",
710 enable ? "enable" : "disable", ret);
711 return ret;
712 }
713 }
714 }
715
716 if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
717 /* Enable or disable VLAN stripping */
718 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
719 true : false;
720
721 ret = hns3_en_hw_strip_rxvtag(hns, enable);
722 if (ret) {
723 rte_spinlock_unlock(&hw->lock);
724 hns3_err(hw, "failed to %s rx strip, ret = %d",
725 enable ? "enable" : "disable", ret);
726 return ret;
727 }
728 }
729
730 rte_spinlock_unlock(&hw->lock);
731
732 return ret;
733 }
734
735 static int
hns3_set_vlan_tx_offload_cfg(struct hns3_adapter * hns,struct hns3_tx_vtag_cfg * vcfg)736 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
737 struct hns3_tx_vtag_cfg *vcfg)
738 {
739 struct hns3_vport_vtag_tx_cfg_cmd *req;
740 struct hns3_cmd_desc desc;
741 struct hns3_hw *hw = &hns->hw;
742 uint16_t vport_id;
743 uint8_t bitmap;
744 int ret;
745
746 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false);
747
748 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data;
749 req->def_vlan_tag1 = vcfg->default_tag1;
750 req->def_vlan_tag2 = vcfg->default_tag2;
751 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B,
752 vcfg->accept_tag1 ? 1 : 0);
753 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B,
754 vcfg->accept_untag1 ? 1 : 0);
755 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B,
756 vcfg->accept_tag2 ? 1 : 0);
757 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B,
758 vcfg->accept_untag2 ? 1 : 0);
759 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B,
760 vcfg->insert_tag1_en ? 1 : 0);
761 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B,
762 vcfg->insert_tag2_en ? 1 : 0);
763 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
764
765 /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
766 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B,
767 vcfg->tag_shift_mode_en ? 1 : 0);
768
769 /*
770 * In current version VF is not supported when PF is driven by DPDK
771 * driver, just need to configure parameters for PF vport.
772 */
773 vport_id = HNS3_PF_FUNC_ID;
774 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
775 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
776 req->vf_bitmap[req->vf_offset] = bitmap;
777
778 ret = hns3_cmd_send(hw, &desc, 1);
779 if (ret)
780 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret);
781
782 return ret;
783 }
784
785 static int
hns3_vlan_txvlan_cfg(struct hns3_adapter * hns,uint16_t port_base_vlan_state,uint16_t pvid)786 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state,
787 uint16_t pvid)
788 {
789 struct hns3_hw *hw = &hns->hw;
790 struct hns3_tx_vtag_cfg txvlan_cfg;
791 int ret;
792
793 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) {
794 txvlan_cfg.accept_tag1 = true;
795 txvlan_cfg.insert_tag1_en = false;
796 txvlan_cfg.default_tag1 = 0;
797 } else {
798 txvlan_cfg.accept_tag1 =
799 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE;
800 txvlan_cfg.insert_tag1_en = true;
801 txvlan_cfg.default_tag1 = pvid;
802 }
803
804 txvlan_cfg.accept_untag1 = true;
805 txvlan_cfg.accept_tag2 = true;
806 txvlan_cfg.accept_untag2 = true;
807 txvlan_cfg.insert_tag2_en = false;
808 txvlan_cfg.default_tag2 = 0;
809 txvlan_cfg.tag_shift_mode_en = true;
810
811 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg);
812 if (ret) {
813 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid,
814 ret);
815 return ret;
816 }
817
818 memcpy(&hns->pf.vtag_config.tx_vcfg, &txvlan_cfg,
819 sizeof(struct hns3_tx_vtag_cfg));
820
821 return ret;
822 }
823
824
825 static void
hns3_rm_all_vlan_table(struct hns3_adapter * hns,bool is_del_list)826 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list)
827 {
828 struct hns3_user_vlan_table *vlan_entry;
829 struct hns3_pf *pf = &hns->pf;
830
831 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
832 if (vlan_entry->hd_tbl_status) {
833 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0);
834 vlan_entry->hd_tbl_status = false;
835 }
836 }
837
838 if (is_del_list) {
839 vlan_entry = LIST_FIRST(&pf->vlan_list);
840 while (vlan_entry) {
841 LIST_REMOVE(vlan_entry, next);
842 rte_free(vlan_entry);
843 vlan_entry = LIST_FIRST(&pf->vlan_list);
844 }
845 }
846 }
847
848 static void
hns3_add_all_vlan_table(struct hns3_adapter * hns)849 hns3_add_all_vlan_table(struct hns3_adapter *hns)
850 {
851 struct hns3_user_vlan_table *vlan_entry;
852 struct hns3_pf *pf = &hns->pf;
853
854 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
855 if (!vlan_entry->hd_tbl_status) {
856 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1);
857 vlan_entry->hd_tbl_status = true;
858 }
859 }
860 }
861
862 static void
hns3_remove_all_vlan_table(struct hns3_adapter * hns)863 hns3_remove_all_vlan_table(struct hns3_adapter *hns)
864 {
865 struct hns3_hw *hw = &hns->hw;
866 int ret;
867
868 hns3_rm_all_vlan_table(hns, true);
869 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) {
870 ret = hns3_set_port_vlan_filter(hns,
871 hw->port_base_vlan_cfg.pvid, 0);
872 if (ret) {
873 hns3_err(hw, "Failed to remove all vlan table, ret =%d",
874 ret);
875 return;
876 }
877 }
878 }
879
880 static int
hns3_update_vlan_filter_entries(struct hns3_adapter * hns,uint16_t port_base_vlan_state,uint16_t new_pvid)881 hns3_update_vlan_filter_entries(struct hns3_adapter *hns,
882 uint16_t port_base_vlan_state, uint16_t new_pvid)
883 {
884 struct hns3_hw *hw = &hns->hw;
885 uint16_t old_pvid;
886 int ret;
887
888 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) {
889 old_pvid = hw->port_base_vlan_cfg.pvid;
890 if (old_pvid != HNS3_INVALID_PVID) {
891 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0);
892 if (ret) {
893 hns3_err(hw, "failed to remove old pvid %u, "
894 "ret = %d", old_pvid, ret);
895 return ret;
896 }
897 }
898
899 hns3_rm_all_vlan_table(hns, false);
900 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1);
901 if (ret) {
902 hns3_err(hw, "failed to add new pvid %u, ret = %d",
903 new_pvid, ret);
904 return ret;
905 }
906 } else {
907 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0);
908 if (ret) {
909 hns3_err(hw, "failed to remove pvid %u, ret = %d",
910 new_pvid, ret);
911 return ret;
912 }
913
914 hns3_add_all_vlan_table(hns);
915 }
916 return 0;
917 }
918
919 static int
hns3_en_pvid_strip(struct hns3_adapter * hns,int on)920 hns3_en_pvid_strip(struct hns3_adapter *hns, int on)
921 {
922 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg;
923 struct hns3_rx_vtag_cfg rx_vlan_cfg;
924 bool rx_strip_en;
925 int ret;
926
927 rx_strip_en = old_cfg->rx_vlan_offload_en;
928 if (on) {
929 rx_vlan_cfg.strip_tag1_en = rx_strip_en;
930 rx_vlan_cfg.strip_tag2_en = true;
931 rx_vlan_cfg.strip_tag2_discard_en = true;
932 } else {
933 rx_vlan_cfg.strip_tag1_en = false;
934 rx_vlan_cfg.strip_tag2_en = rx_strip_en;
935 rx_vlan_cfg.strip_tag2_discard_en = false;
936 }
937 rx_vlan_cfg.strip_tag1_discard_en = false;
938 rx_vlan_cfg.vlan1_vlan_prionly = false;
939 rx_vlan_cfg.vlan2_vlan_prionly = false;
940 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en;
941
942 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg);
943 if (ret)
944 return ret;
945
946 memcpy(&hns->pf.vtag_config.rx_vcfg, &rx_vlan_cfg,
947 sizeof(struct hns3_rx_vtag_cfg));
948
949 return ret;
950 }
951
952 static int
hns3_vlan_pvid_configure(struct hns3_adapter * hns,uint16_t pvid,int on)953 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on)
954 {
955 struct hns3_hw *hw = &hns->hw;
956 uint16_t port_base_vlan_state;
957 int ret, err;
958
959 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) {
960 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID)
961 hns3_warn(hw, "Invalid operation! As current pvid set "
962 "is %u, disable pvid %u is invalid",
963 hw->port_base_vlan_cfg.pvid, pvid);
964 return 0;
965 }
966
967 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE :
968 HNS3_PORT_BASE_VLAN_DISABLE;
969 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid);
970 if (ret) {
971 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d",
972 ret);
973 return ret;
974 }
975
976 ret = hns3_en_pvid_strip(hns, on);
977 if (ret) {
978 hns3_err(hw, "failed to config rx vlan strip for pvid, "
979 "ret = %d", ret);
980 goto pvid_vlan_strip_fail;
981 }
982
983 if (pvid == HNS3_INVALID_PVID)
984 goto out;
985 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid);
986 if (ret) {
987 hns3_err(hw, "failed to update vlan filter entries, ret = %d",
988 ret);
989 goto vlan_filter_set_fail;
990 }
991
992 out:
993 hw->port_base_vlan_cfg.state = port_base_vlan_state;
994 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID;
995 return ret;
996
997 vlan_filter_set_fail:
998 err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state ==
999 HNS3_PORT_BASE_VLAN_ENABLE);
1000 if (err)
1001 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err);
1002
1003 pvid_vlan_strip_fail:
1004 err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state,
1005 hw->port_base_vlan_cfg.pvid);
1006 if (err)
1007 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err);
1008
1009 return ret;
1010 }
1011
1012 static int
hns3_vlan_pvid_set(struct rte_eth_dev * dev,uint16_t pvid,int on)1013 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1014 {
1015 struct hns3_adapter *hns = dev->data->dev_private;
1016 struct hns3_hw *hw = &hns->hw;
1017 bool pvid_en_state_change;
1018 uint16_t pvid_state;
1019 int ret;
1020
1021 if (pvid > RTE_ETHER_MAX_VLAN_ID) {
1022 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid,
1023 RTE_ETHER_MAX_VLAN_ID);
1024 return -EINVAL;
1025 }
1026
1027 /*
1028 * If PVID configuration state change, should refresh the PVID
1029 * configuration state in struct hns3_tx_queue/hns3_rx_queue.
1030 */
1031 pvid_state = hw->port_base_vlan_cfg.state;
1032 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) ||
1033 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE))
1034 pvid_en_state_change = false;
1035 else
1036 pvid_en_state_change = true;
1037
1038 rte_spinlock_lock(&hw->lock);
1039 ret = hns3_vlan_pvid_configure(hns, pvid, on);
1040 rte_spinlock_unlock(&hw->lock);
1041 if (ret)
1042 return ret;
1043 /*
1044 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx
1045 * need be processed by PMD.
1046 */
1047 if (pvid_en_state_change &&
1048 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1049 hns3_update_all_queues_pvid_proc_en(hw);
1050
1051 return 0;
1052 }
1053
1054 static int
hns3_default_vlan_config(struct hns3_adapter * hns)1055 hns3_default_vlan_config(struct hns3_adapter *hns)
1056 {
1057 struct hns3_hw *hw = &hns->hw;
1058 int ret;
1059
1060 /*
1061 * When vlan filter is enabled, hardware regards packets without vlan
1062 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan
1063 * table, packets without vlan won't be received. So, add vlan 0 as
1064 * the default vlan.
1065 */
1066 ret = hns3_vlan_filter_configure(hns, 0, 1);
1067 if (ret)
1068 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret);
1069 return ret;
1070 }
1071
1072 static int
hns3_init_vlan_config(struct hns3_adapter * hns)1073 hns3_init_vlan_config(struct hns3_adapter *hns)
1074 {
1075 struct hns3_hw *hw = &hns->hw;
1076 int ret;
1077
1078 /*
1079 * This function can be called in the initialization and reset process,
1080 * when in reset process, it means that hardware had been reseted
1081 * successfully and we need to restore the hardware configuration to
1082 * ensure that the hardware configuration remains unchanged before and
1083 * after reset.
1084 */
1085 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1086 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
1087 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
1088 }
1089
1090 ret = hns3_vlan_filter_init(hns);
1091 if (ret) {
1092 hns3_err(hw, "vlan init fail in pf, ret =%d", ret);
1093 return ret;
1094 }
1095
1096 ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
1097 RTE_ETHER_TYPE_VLAN);
1098 if (ret) {
1099 hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
1100 return ret;
1101 }
1102
1103 /*
1104 * When in the reinit dev stage of the reset process, the following
1105 * vlan-related configurations may differ from those at initialization,
1106 * we will restore configurations to hardware in hns3_restore_vlan_table
1107 * and hns3_restore_vlan_conf later.
1108 */
1109 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1110 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
1111 if (ret) {
1112 hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
1113 return ret;
1114 }
1115
1116 ret = hns3_en_hw_strip_rxvtag(hns, false);
1117 if (ret) {
1118 hns3_err(hw, "rx strip configure fail in pf, ret =%d",
1119 ret);
1120 return ret;
1121 }
1122 }
1123
1124 return hns3_default_vlan_config(hns);
1125 }
1126
1127 static int
hns3_restore_vlan_conf(struct hns3_adapter * hns)1128 hns3_restore_vlan_conf(struct hns3_adapter *hns)
1129 {
1130 struct hns3_pf *pf = &hns->pf;
1131 struct hns3_hw *hw = &hns->hw;
1132 uint64_t offloads;
1133 bool enable;
1134 int ret;
1135
1136 if (!hw->data->promiscuous) {
1137 /* restore vlan filter states */
1138 offloads = hw->data->dev_conf.rxmode.offloads;
1139 enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
1140 ret = hns3_enable_vlan_filter(hns, enable);
1141 if (ret) {
1142 hns3_err(hw, "failed to restore vlan rx filter conf, "
1143 "ret = %d", ret);
1144 return ret;
1145 }
1146 }
1147
1148 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg);
1149 if (ret) {
1150 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret);
1151 return ret;
1152 }
1153
1154 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg);
1155 if (ret)
1156 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret);
1157
1158 return ret;
1159 }
1160
1161 static int
hns3_dev_configure_vlan(struct rte_eth_dev * dev)1162 hns3_dev_configure_vlan(struct rte_eth_dev *dev)
1163 {
1164 struct hns3_adapter *hns = dev->data->dev_private;
1165 struct rte_eth_dev_data *data = dev->data;
1166 struct rte_eth_txmode *txmode;
1167 struct hns3_hw *hw = &hns->hw;
1168 int mask;
1169 int ret;
1170
1171 txmode = &data->dev_conf.txmode;
1172 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged)
1173 hns3_warn(hw,
1174 "hw_vlan_reject_tagged or hw_vlan_reject_untagged "
1175 "configuration is not supported! Ignore these two "
1176 "parameters: hw_vlan_reject_tagged(%u), "
1177 "hw_vlan_reject_untagged(%u)",
1178 txmode->hw_vlan_reject_tagged,
1179 txmode->hw_vlan_reject_untagged);
1180
1181 /* Apply vlan offload setting */
1182 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
1183 ret = hns3_vlan_offload_set(dev, mask);
1184 if (ret) {
1185 hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
1186 ret);
1187 return ret;
1188 }
1189
1190 /*
1191 * If pvid config is not set in rte_eth_conf, driver needn't to set
1192 * VLAN pvid related configuration to hardware.
1193 */
1194 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0)
1195 return 0;
1196
1197 /* Apply pvid setting */
1198 ret = hns3_vlan_pvid_set(dev, txmode->pvid,
1199 txmode->hw_vlan_insert_pvid);
1200 if (ret)
1201 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d",
1202 txmode->pvid, ret);
1203
1204 return ret;
1205 }
1206
1207 static int
hns3_config_tso(struct hns3_hw * hw,unsigned int tso_mss_min,unsigned int tso_mss_max)1208 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
1209 unsigned int tso_mss_max)
1210 {
1211 struct hns3_cfg_tso_status_cmd *req;
1212 struct hns3_cmd_desc desc;
1213 uint16_t tso_mss;
1214
1215 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false);
1216
1217 req = (struct hns3_cfg_tso_status_cmd *)desc.data;
1218
1219 tso_mss = 0;
1220 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1221 tso_mss_min);
1222 req->tso_mss_min = rte_cpu_to_le_16(tso_mss);
1223
1224 tso_mss = 0;
1225 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1226 tso_mss_max);
1227 req->tso_mss_max = rte_cpu_to_le_16(tso_mss);
1228
1229 return hns3_cmd_send(hw, &desc, 1);
1230 }
1231
1232 static int
hns3_set_umv_space(struct hns3_hw * hw,uint16_t space_size,uint16_t * allocated_size,bool is_alloc)1233 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size,
1234 uint16_t *allocated_size, bool is_alloc)
1235 {
1236 struct hns3_umv_spc_alc_cmd *req;
1237 struct hns3_cmd_desc desc;
1238 int ret;
1239
1240 req = (struct hns3_umv_spc_alc_cmd *)desc.data;
1241 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false);
1242 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1);
1243 req->space_size = rte_cpu_to_le_32(space_size);
1244
1245 ret = hns3_cmd_send(hw, &desc, 1);
1246 if (ret) {
1247 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d",
1248 is_alloc ? "allocate" : "free", ret);
1249 return ret;
1250 }
1251
1252 if (is_alloc && allocated_size)
1253 *allocated_size = rte_le_to_cpu_32(desc.data[1]);
1254
1255 return 0;
1256 }
1257
1258 static int
hns3_init_umv_space(struct hns3_hw * hw)1259 hns3_init_umv_space(struct hns3_hw *hw)
1260 {
1261 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1262 struct hns3_pf *pf = &hns->pf;
1263 uint16_t allocated_size = 0;
1264 int ret;
1265
1266 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size,
1267 true);
1268 if (ret)
1269 return ret;
1270
1271 if (allocated_size < pf->wanted_umv_size)
1272 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u",
1273 pf->wanted_umv_size, allocated_size);
1274
1275 pf->max_umv_size = (!!allocated_size) ? allocated_size :
1276 pf->wanted_umv_size;
1277 pf->used_umv_size = 0;
1278 return 0;
1279 }
1280
1281 static int
hns3_uninit_umv_space(struct hns3_hw * hw)1282 hns3_uninit_umv_space(struct hns3_hw *hw)
1283 {
1284 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1285 struct hns3_pf *pf = &hns->pf;
1286 int ret;
1287
1288 if (pf->max_umv_size == 0)
1289 return 0;
1290
1291 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false);
1292 if (ret)
1293 return ret;
1294
1295 pf->max_umv_size = 0;
1296
1297 return 0;
1298 }
1299
1300 static bool
hns3_is_umv_space_full(struct hns3_hw * hw)1301 hns3_is_umv_space_full(struct hns3_hw *hw)
1302 {
1303 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1304 struct hns3_pf *pf = &hns->pf;
1305 bool is_full;
1306
1307 is_full = (pf->used_umv_size >= pf->max_umv_size);
1308
1309 return is_full;
1310 }
1311
1312 static void
hns3_update_umv_space(struct hns3_hw * hw,bool is_free)1313 hns3_update_umv_space(struct hns3_hw *hw, bool is_free)
1314 {
1315 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1316 struct hns3_pf *pf = &hns->pf;
1317
1318 if (is_free) {
1319 if (pf->used_umv_size > 0)
1320 pf->used_umv_size--;
1321 } else
1322 pf->used_umv_size++;
1323 }
1324
1325 static void
hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd * new_req,const uint8_t * addr,bool is_mc)1326 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req,
1327 const uint8_t *addr, bool is_mc)
1328 {
1329 const unsigned char *mac_addr = addr;
1330 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) |
1331 ((uint32_t)mac_addr[2] << 16) |
1332 ((uint32_t)mac_addr[1] << 8) |
1333 (uint32_t)mac_addr[0];
1334 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4];
1335
1336 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1337 if (is_mc) {
1338 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1339 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1);
1340 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1341 }
1342
1343 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val);
1344 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff);
1345 }
1346
1347 static int
hns3_get_mac_vlan_cmd_status(struct hns3_hw * hw,uint16_t cmdq_resp,uint8_t resp_code,enum hns3_mac_vlan_tbl_opcode op)1348 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp,
1349 uint8_t resp_code,
1350 enum hns3_mac_vlan_tbl_opcode op)
1351 {
1352 if (cmdq_resp) {
1353 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u",
1354 cmdq_resp);
1355 return -EIO;
1356 }
1357
1358 if (op == HNS3_MAC_VLAN_ADD) {
1359 if (resp_code == 0 || resp_code == 1) {
1360 return 0;
1361 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) {
1362 hns3_err(hw, "add mac addr failed for uc_overflow");
1363 return -ENOSPC;
1364 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) {
1365 hns3_err(hw, "add mac addr failed for mc_overflow");
1366 return -ENOSPC;
1367 }
1368
1369 hns3_err(hw, "add mac addr failed for undefined, code=%u",
1370 resp_code);
1371 return -EIO;
1372 } else if (op == HNS3_MAC_VLAN_REMOVE) {
1373 if (resp_code == 0) {
1374 return 0;
1375 } else if (resp_code == 1) {
1376 hns3_dbg(hw, "remove mac addr failed for miss");
1377 return -ENOENT;
1378 }
1379
1380 hns3_err(hw, "remove mac addr failed for undefined, code=%u",
1381 resp_code);
1382 return -EIO;
1383 } else if (op == HNS3_MAC_VLAN_LKUP) {
1384 if (resp_code == 0) {
1385 return 0;
1386 } else if (resp_code == 1) {
1387 hns3_dbg(hw, "lookup mac addr failed for miss");
1388 return -ENOENT;
1389 }
1390
1391 hns3_err(hw, "lookup mac addr failed for undefined, code=%u",
1392 resp_code);
1393 return -EIO;
1394 }
1395
1396 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u",
1397 op);
1398
1399 return -EINVAL;
1400 }
1401
1402 static int
hns3_lookup_mac_vlan_tbl(struct hns3_hw * hw,struct hns3_mac_vlan_tbl_entry_cmd * req,struct hns3_cmd_desc * desc,uint8_t desc_num)1403 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw,
1404 struct hns3_mac_vlan_tbl_entry_cmd *req,
1405 struct hns3_cmd_desc *desc, uint8_t desc_num)
1406 {
1407 uint8_t resp_code;
1408 uint16_t retval;
1409 int ret;
1410 int i;
1411
1412 if (desc_num == HNS3_MC_MAC_VLAN_OPS_DESC_NUM) {
1413 for (i = 0; i < desc_num - 1; i++) {
1414 hns3_cmd_setup_basic_desc(&desc[i],
1415 HNS3_OPC_MAC_VLAN_ADD, true);
1416 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1417 if (i == 0)
1418 memcpy(desc[i].data, req,
1419 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1420 }
1421 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_MAC_VLAN_ADD,
1422 true);
1423 } else {
1424 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD,
1425 true);
1426 memcpy(desc[0].data, req,
1427 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1428 }
1429 ret = hns3_cmd_send(hw, desc, desc_num);
1430 if (ret) {
1431 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.",
1432 ret);
1433 return ret;
1434 }
1435 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1436 retval = rte_le_to_cpu_16(desc[0].retval);
1437
1438 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1439 HNS3_MAC_VLAN_LKUP);
1440 }
1441
1442 static int
hns3_add_mac_vlan_tbl(struct hns3_hw * hw,struct hns3_mac_vlan_tbl_entry_cmd * req,struct hns3_cmd_desc * desc,uint8_t desc_num)1443 hns3_add_mac_vlan_tbl(struct hns3_hw *hw,
1444 struct hns3_mac_vlan_tbl_entry_cmd *req,
1445 struct hns3_cmd_desc *desc, uint8_t desc_num)
1446 {
1447 uint8_t resp_code;
1448 uint16_t retval;
1449 int cfg_status;
1450 int ret;
1451 int i;
1452
1453 if (desc_num == HNS3_UC_MAC_VLAN_OPS_DESC_NUM) {
1454 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_MAC_VLAN_ADD, false);
1455 memcpy(desc->data, req,
1456 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1457 ret = hns3_cmd_send(hw, desc, desc_num);
1458 resp_code = (rte_le_to_cpu_32(desc->data[0]) >> 8) & 0xff;
1459 retval = rte_le_to_cpu_16(desc->retval);
1460
1461 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1462 HNS3_MAC_VLAN_ADD);
1463 } else {
1464 for (i = 0; i < desc_num; i++) {
1465 hns3_cmd_reuse_desc(&desc[i], false);
1466 if (i == desc_num - 1)
1467 desc[i].flag &=
1468 rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT);
1469 else
1470 desc[i].flag |=
1471 rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1472 }
1473 memcpy(desc[0].data, req,
1474 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1475 desc[0].retval = 0;
1476 ret = hns3_cmd_send(hw, desc, desc_num);
1477 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1478 retval = rte_le_to_cpu_16(desc[0].retval);
1479
1480 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1481 HNS3_MAC_VLAN_ADD);
1482 }
1483
1484 if (ret) {
1485 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret);
1486 return ret;
1487 }
1488
1489 return cfg_status;
1490 }
1491
1492 static int
hns3_remove_mac_vlan_tbl(struct hns3_hw * hw,struct hns3_mac_vlan_tbl_entry_cmd * req)1493 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw,
1494 struct hns3_mac_vlan_tbl_entry_cmd *req)
1495 {
1496 struct hns3_cmd_desc desc;
1497 uint8_t resp_code;
1498 uint16_t retval;
1499 int ret;
1500
1501 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false);
1502
1503 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1504
1505 ret = hns3_cmd_send(hw, &desc, 1);
1506 if (ret) {
1507 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret);
1508 return ret;
1509 }
1510 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1511 retval = rte_le_to_cpu_16(desc.retval);
1512
1513 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1514 HNS3_MAC_VLAN_REMOVE);
1515 }
1516
1517 static int
hns3_add_uc_mac_addr(struct hns3_hw * hw,struct rte_ether_addr * mac_addr)1518 hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1519 {
1520 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1521 struct hns3_mac_vlan_tbl_entry_cmd req;
1522 struct hns3_pf *pf = &hns->pf;
1523 struct hns3_cmd_desc desc;
1524 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1525 uint16_t egress_port = 0;
1526 uint8_t vf_id;
1527 int ret;
1528
1529 /* check if mac addr is valid */
1530 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1531 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1532 mac_addr);
1533 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid",
1534 mac_str);
1535 return -EINVAL;
1536 }
1537
1538 memset(&req, 0, sizeof(req));
1539
1540 /*
1541 * In current version VF is not supported when PF is driven by DPDK
1542 * driver, just need to configure parameters for PF vport.
1543 */
1544 vf_id = HNS3_PF_FUNC_ID;
1545 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M,
1546 HNS3_MAC_EPORT_VFID_S, vf_id);
1547
1548 req.egress_port = rte_cpu_to_le_16(egress_port);
1549
1550 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1551
1552 /*
1553 * Lookup the mac address in the mac_vlan table, and add
1554 * it if the entry is inexistent. Repeated unicast entry
1555 * is not allowed in the mac vlan table.
1556 */
1557 ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc,
1558 HNS3_UC_MAC_VLAN_OPS_DESC_NUM);
1559 if (ret == -ENOENT) {
1560 if (!hns3_is_umv_space_full(hw)) {
1561 ret = hns3_add_mac_vlan_tbl(hw, &req, &desc,
1562 HNS3_UC_MAC_VLAN_OPS_DESC_NUM);
1563 if (!ret)
1564 hns3_update_umv_space(hw, false);
1565 return ret;
1566 }
1567
1568 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size);
1569
1570 return -ENOSPC;
1571 }
1572
1573 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
1574
1575 /* check if we just hit the duplicate */
1576 if (ret == 0) {
1577 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str);
1578 return 0;
1579 }
1580
1581 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table",
1582 mac_str);
1583
1584 return ret;
1585 }
1586
1587 static int
hns3_remove_uc_mac_addr(struct hns3_hw * hw,struct rte_ether_addr * mac_addr)1588 hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1589 {
1590 struct hns3_mac_vlan_tbl_entry_cmd req;
1591 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1592 int ret;
1593
1594 /* check if mac addr is valid */
1595 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1596 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1597 mac_addr);
1598 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid",
1599 mac_str);
1600 return -EINVAL;
1601 }
1602
1603 memset(&req, 0, sizeof(req));
1604 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1605 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1606 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1607 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */
1608 return 0;
1609 else if (ret == 0)
1610 hns3_update_umv_space(hw, true);
1611
1612 return ret;
1613 }
1614
1615 static int
hns3_set_default_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)1616 hns3_set_default_mac_addr(struct rte_eth_dev *dev,
1617 struct rte_ether_addr *mac_addr)
1618 {
1619 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1620 struct rte_ether_addr *oaddr;
1621 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1622 int ret, ret_val;
1623
1624 rte_spinlock_lock(&hw->lock);
1625 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr;
1626 ret = hw->ops.del_uc_mac_addr(hw, oaddr);
1627 if (ret) {
1628 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1629 oaddr);
1630 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
1631 mac_str, ret);
1632
1633 rte_spinlock_unlock(&hw->lock);
1634 return ret;
1635 }
1636
1637 ret = hw->ops.add_uc_mac_addr(hw, mac_addr);
1638 if (ret) {
1639 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1640 mac_addr);
1641 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret);
1642 goto err_add_uc_addr;
1643 }
1644
1645 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes);
1646 if (ret) {
1647 hns3_err(hw, "Failed to configure mac pause address: %d", ret);
1648 goto err_pause_addr_cfg;
1649 }
1650
1651 rte_ether_addr_copy(mac_addr,
1652 (struct rte_ether_addr *)hw->mac.mac_addr);
1653 rte_spinlock_unlock(&hw->lock);
1654
1655 return 0;
1656
1657 err_pause_addr_cfg:
1658 ret_val = hw->ops.del_uc_mac_addr(hw, mac_addr);
1659 if (ret_val) {
1660 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1661 mac_addr);
1662 hns3_warn(hw,
1663 "Failed to roll back to del setted mac addr(%s): %d",
1664 mac_str, ret_val);
1665 }
1666
1667 err_add_uc_addr:
1668 ret_val = hw->ops.add_uc_mac_addr(hw, oaddr);
1669 if (ret_val) {
1670 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr);
1671 hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d",
1672 mac_str, ret_val);
1673 }
1674 rte_spinlock_unlock(&hw->lock);
1675
1676 return ret;
1677 }
1678
1679 static void
hns3_update_desc_vfid(struct hns3_cmd_desc * desc,uint8_t vfid,bool clr)1680 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr)
1681 {
1682 #define HNS3_VF_NUM_IN_FIRST_DESC 192
1683 uint8_t word_num;
1684 uint8_t bit_num;
1685
1686 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) {
1687 word_num = vfid / 32;
1688 bit_num = vfid % 32;
1689 if (clr)
1690 desc[1].data[word_num] &=
1691 rte_cpu_to_le_32(~(1UL << bit_num));
1692 else
1693 desc[1].data[word_num] |=
1694 rte_cpu_to_le_32(1UL << bit_num);
1695 } else {
1696 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32;
1697 bit_num = vfid % 32;
1698 if (clr)
1699 desc[2].data[word_num] &=
1700 rte_cpu_to_le_32(~(1UL << bit_num));
1701 else
1702 desc[2].data[word_num] |=
1703 rte_cpu_to_le_32(1UL << bit_num);
1704 }
1705 }
1706
1707 static int
hns3_add_mc_mac_addr(struct hns3_hw * hw,struct rte_ether_addr * mac_addr)1708 hns3_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1709 {
1710 struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM];
1711 struct hns3_mac_vlan_tbl_entry_cmd req;
1712 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1713 uint8_t vf_id;
1714 int ret;
1715
1716 /* Check if mac addr is valid */
1717 if (!rte_is_multicast_ether_addr(mac_addr)) {
1718 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1719 mac_addr);
1720 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid",
1721 mac_str);
1722 return -EINVAL;
1723 }
1724
1725 memset(&req, 0, sizeof(req));
1726 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1727 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1728 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc,
1729 HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1730 if (ret) {
1731 /* This mac addr do not exist, add new entry for it */
1732 memset(desc[0].data, 0, sizeof(desc[0].data));
1733 memset(desc[1].data, 0, sizeof(desc[0].data));
1734 memset(desc[2].data, 0, sizeof(desc[0].data));
1735 }
1736
1737 /*
1738 * In current version VF is not supported when PF is driven by DPDK
1739 * driver, just need to configure parameters for PF vport.
1740 */
1741 vf_id = HNS3_PF_FUNC_ID;
1742 hns3_update_desc_vfid(desc, vf_id, false);
1743 ret = hns3_add_mac_vlan_tbl(hw, &req, desc,
1744 HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1745 if (ret) {
1746 if (ret == -ENOSPC)
1747 hns3_err(hw, "mc mac vlan table is full");
1748 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1749 mac_addr);
1750 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret);
1751 }
1752
1753 return ret;
1754 }
1755
1756 static int
hns3_remove_mc_mac_addr(struct hns3_hw * hw,struct rte_ether_addr * mac_addr)1757 hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1758 {
1759 struct hns3_mac_vlan_tbl_entry_cmd req;
1760 struct hns3_cmd_desc desc[3];
1761 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1762 uint8_t vf_id;
1763 int ret;
1764
1765 /* Check if mac addr is valid */
1766 if (!rte_is_multicast_ether_addr(mac_addr)) {
1767 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1768 mac_addr);
1769 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid",
1770 mac_str);
1771 return -EINVAL;
1772 }
1773
1774 memset(&req, 0, sizeof(req));
1775 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1776 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1777 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc,
1778 HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1779 if (ret == 0) {
1780 /*
1781 * This mac addr exist, remove this handle's VFID for it.
1782 * In current version VF is not supported when PF is driven by
1783 * DPDK driver, just need to configure parameters for PF vport.
1784 */
1785 vf_id = HNS3_PF_FUNC_ID;
1786 hns3_update_desc_vfid(desc, vf_id, true);
1787
1788 /* All the vfid is zero, so need to delete this entry */
1789 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1790 } else if (ret == -ENOENT) {
1791 /* This mac addr doesn't exist. */
1792 return 0;
1793 }
1794
1795 if (ret) {
1796 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1797 mac_addr);
1798 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret);
1799 }
1800
1801 return ret;
1802 }
1803
1804 static int
hns3_check_mq_mode(struct rte_eth_dev * dev)1805 hns3_check_mq_mode(struct rte_eth_dev *dev)
1806 {
1807 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1808 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1809 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1810 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1811 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1812 struct rte_eth_dcb_tx_conf *dcb_tx_conf;
1813 uint8_t num_tc;
1814 int max_tc = 0;
1815 int i;
1816
1817 if (((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
1818 (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
1819 tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
1820 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
1821 rx_mq_mode, tx_mq_mode);
1822 return -EOPNOTSUPP;
1823 }
1824
1825 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1826 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
1827 if ((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
1828 if (dcb_rx_conf->nb_tcs > pf->tc_max) {
1829 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
1830 dcb_rx_conf->nb_tcs, pf->tc_max);
1831 return -EINVAL;
1832 }
1833
1834 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
1835 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
1836 hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
1837 "nb_tcs(%d) != %d or %d in rx direction.",
1838 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
1839 return -EINVAL;
1840 }
1841
1842 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
1843 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
1844 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
1845 return -EINVAL;
1846 }
1847
1848 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1849 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
1850 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, "
1851 "is not equal to one in tx direction.",
1852 i, dcb_rx_conf->dcb_tc[i]);
1853 return -EINVAL;
1854 }
1855 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1856 max_tc = dcb_rx_conf->dcb_tc[i];
1857 }
1858
1859 num_tc = max_tc + 1;
1860 if (num_tc > dcb_rx_conf->nb_tcs) {
1861 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
1862 num_tc, dcb_rx_conf->nb_tcs);
1863 return -EINVAL;
1864 }
1865 }
1866
1867 return 0;
1868 }
1869
1870 static int
hns3_bind_ring_with_vector(struct hns3_hw * hw,uint16_t vector_id,bool en,enum hns3_ring_type queue_type,uint16_t queue_id)1871 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en,
1872 enum hns3_ring_type queue_type, uint16_t queue_id)
1873 {
1874 struct hns3_cmd_desc desc;
1875 struct hns3_ctrl_vector_chain_cmd *req =
1876 (struct hns3_ctrl_vector_chain_cmd *)desc.data;
1877 enum hns3_opcode_type op;
1878 uint16_t tqp_type_and_id = 0;
1879 uint16_t type;
1880 uint16_t gl;
1881 int ret;
1882
1883 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
1884 hns3_cmd_setup_basic_desc(&desc, op, false);
1885 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M,
1886 HNS3_TQP_INT_ID_L_S);
1887 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M,
1888 HNS3_TQP_INT_ID_H_S);
1889
1890 if (queue_type == HNS3_RING_TYPE_RX)
1891 gl = HNS3_RING_GL_RX;
1892 else
1893 gl = HNS3_RING_GL_TX;
1894
1895 type = queue_type;
1896
1897 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
1898 type);
1899 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
1900 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
1901 gl);
1902 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
1903 req->int_cause_num = 1;
1904 ret = hns3_cmd_send(hw, &desc, 1);
1905 if (ret) {
1906 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.",
1907 en ? "Map" : "Unmap", queue_id, vector_id, ret);
1908 return ret;
1909 }
1910
1911 return 0;
1912 }
1913
1914 static int
hns3_setup_dcb(struct rte_eth_dev * dev)1915 hns3_setup_dcb(struct rte_eth_dev *dev)
1916 {
1917 struct hns3_adapter *hns = dev->data->dev_private;
1918 struct hns3_hw *hw = &hns->hw;
1919 int ret;
1920
1921 if (!hns3_dev_get_support(hw, DCB)) {
1922 hns3_err(hw, "this port does not support dcb configurations.");
1923 return -EOPNOTSUPP;
1924 }
1925
1926 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
1927 hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
1928 return -EOPNOTSUPP;
1929 }
1930
1931 ret = hns3_dcb_configure(hns);
1932 if (ret)
1933 hns3_err(hw, "failed to config dcb: %d", ret);
1934
1935 return ret;
1936 }
1937
1938 static int
hns3_check_link_speed(struct hns3_hw * hw,uint32_t link_speeds)1939 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
1940 {
1941 int ret;
1942
1943 /*
1944 * Some hardware doesn't support auto-negotiation, but users may not
1945 * configure link_speeds (default 0), which means auto-negotiation.
1946 * In this case, it should return success.
1947 */
1948 if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
1949 hw->mac.support_autoneg == 0)
1950 return 0;
1951
1952 if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
1953 ret = hns3_check_port_speed(hw, link_speeds);
1954 if (ret)
1955 return ret;
1956 }
1957
1958 return 0;
1959 }
1960
1961 static int
hns3_check_dev_conf(struct rte_eth_dev * dev)1962 hns3_check_dev_conf(struct rte_eth_dev *dev)
1963 {
1964 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1965 struct rte_eth_conf *conf = &dev->data->dev_conf;
1966 int ret;
1967
1968 ret = hns3_check_mq_mode(dev);
1969 if (ret)
1970 return ret;
1971
1972 return hns3_check_link_speed(hw, conf->link_speeds);
1973 }
1974
1975 static int
hns3_dev_configure(struct rte_eth_dev * dev)1976 hns3_dev_configure(struct rte_eth_dev *dev)
1977 {
1978 struct hns3_adapter *hns = dev->data->dev_private;
1979 struct rte_eth_conf *conf = &dev->data->dev_conf;
1980 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
1981 struct hns3_hw *hw = &hns->hw;
1982 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1983 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1984 struct rte_eth_rss_conf rss_conf;
1985 bool gro_en;
1986 int ret;
1987
1988 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
1989
1990 /*
1991 * Some versions of hardware network engine does not support
1992 * individually enable/disable/reset the Tx or Rx queue. These devices
1993 * must enable/disable/reset Tx and Rx queues at the same time. When the
1994 * numbers of Tx queues allocated by upper applications are not equal to
1995 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
1996 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
1997 * work as usual. But these fake queues are imperceptible, and can not
1998 * be used by upper applications.
1999 */
2000 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
2001 if (ret) {
2002 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
2003 hw->cfg_max_queues = 0;
2004 return ret;
2005 }
2006
2007 hw->adapter_state = HNS3_NIC_CONFIGURING;
2008 ret = hns3_check_dev_conf(dev);
2009 if (ret)
2010 goto cfg_err;
2011
2012 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
2013 ret = hns3_setup_dcb(dev);
2014 if (ret)
2015 goto cfg_err;
2016 }
2017
2018 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
2019 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
2020 rss_conf = conf->rx_adv_conf.rss_conf;
2021 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
2022 if (ret)
2023 goto cfg_err;
2024 }
2025
2026 ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu);
2027 if (ret != 0)
2028 goto cfg_err;
2029
2030 ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf);
2031 if (ret)
2032 goto cfg_err;
2033
2034 ret = hns3_dev_configure_vlan(dev);
2035 if (ret)
2036 goto cfg_err;
2037
2038 /* config hardware GRO */
2039 gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
2040 ret = hns3_config_gro(hw, gro_en);
2041 if (ret)
2042 goto cfg_err;
2043
2044 hns3_init_rx_ptype_tble(dev);
2045 hw->adapter_state = HNS3_NIC_CONFIGURED;
2046
2047 return 0;
2048
2049 cfg_err:
2050 hw->cfg_max_queues = 0;
2051 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
2052 hw->adapter_state = HNS3_NIC_INITIALIZED;
2053
2054 return ret;
2055 }
2056
2057 static int
hns3_set_mac_mtu(struct hns3_hw * hw,uint16_t new_mps)2058 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
2059 {
2060 struct hns3_config_max_frm_size_cmd *req;
2061 struct hns3_cmd_desc desc;
2062
2063 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);
2064
2065 req = (struct hns3_config_max_frm_size_cmd *)desc.data;
2066 req->max_frm_size = rte_cpu_to_le_16(new_mps);
2067 req->min_frm_size = RTE_ETHER_MIN_LEN;
2068
2069 return hns3_cmd_send(hw, &desc, 1);
2070 }
2071
2072 static int
hns3_config_mtu(struct hns3_hw * hw,uint16_t mps)2073 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
2074 {
2075 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2076 int err;
2077 int ret;
2078
2079 ret = hns3_set_mac_mtu(hw, mps);
2080 if (ret) {
2081 hns3_err(hw, "failed to set mtu, ret = %d", ret);
2082 return ret;
2083 }
2084
2085 ret = hns3_buffer_alloc(hw);
2086 if (ret) {
2087 hns3_err(hw, "failed to allocate buffer, ret = %d", ret);
2088 goto rollback;
2089 }
2090
2091 hns->pf.mps = mps;
2092
2093 return 0;
2094
2095 rollback:
2096 err = hns3_set_mac_mtu(hw, hns->pf.mps);
2097 if (err)
2098 hns3_err(hw, "fail to rollback MTU, err = %d", err);
2099
2100 return ret;
2101 }
2102
2103 static int
hns3_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)2104 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2105 {
2106 struct hns3_adapter *hns = dev->data->dev_private;
2107 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
2108 struct hns3_hw *hw = &hns->hw;
2109 int ret;
2110
2111 if (dev->data->dev_started) {
2112 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
2113 "before configuration", dev->data->port_id);
2114 return -EBUSY;
2115 }
2116
2117 rte_spinlock_lock(&hw->lock);
2118 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
2119
2120 /*
2121 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely
2122 * assign to "uint16_t" type variable.
2123 */
2124 ret = hns3_config_mtu(hw, (uint16_t)frame_size);
2125 if (ret) {
2126 rte_spinlock_unlock(&hw->lock);
2127 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d",
2128 dev->data->port_id, mtu, ret);
2129 return ret;
2130 }
2131
2132 rte_spinlock_unlock(&hw->lock);
2133
2134 return 0;
2135 }
2136
2137 static uint32_t
hns3_get_copper_port_speed_capa(uint32_t supported_speed)2138 hns3_get_copper_port_speed_capa(uint32_t supported_speed)
2139 {
2140 uint32_t speed_capa = 0;
2141
2142 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
2143 speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
2144 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
2145 speed_capa |= RTE_ETH_LINK_SPEED_10M;
2146 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
2147 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
2148 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
2149 speed_capa |= RTE_ETH_LINK_SPEED_100M;
2150 if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
2151 speed_capa |= RTE_ETH_LINK_SPEED_1G;
2152
2153 return speed_capa;
2154 }
2155
2156 static uint32_t
hns3_get_firber_port_speed_capa(uint32_t supported_speed)2157 hns3_get_firber_port_speed_capa(uint32_t supported_speed)
2158 {
2159 uint32_t speed_capa = 0;
2160
2161 if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
2162 speed_capa |= RTE_ETH_LINK_SPEED_1G;
2163 if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
2164 speed_capa |= RTE_ETH_LINK_SPEED_10G;
2165 if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
2166 speed_capa |= RTE_ETH_LINK_SPEED_25G;
2167 if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
2168 speed_capa |= RTE_ETH_LINK_SPEED_40G;
2169 if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
2170 speed_capa |= RTE_ETH_LINK_SPEED_50G;
2171 if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
2172 speed_capa |= RTE_ETH_LINK_SPEED_100G;
2173 if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
2174 speed_capa |= RTE_ETH_LINK_SPEED_200G;
2175
2176 return speed_capa;
2177 }
2178
2179 uint32_t
hns3_get_speed_capa(struct hns3_hw * hw)2180 hns3_get_speed_capa(struct hns3_hw *hw)
2181 {
2182 struct hns3_mac *mac = &hw->mac;
2183 uint32_t speed_capa;
2184
2185 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
2186 speed_capa =
2187 hns3_get_copper_port_speed_capa(mac->supported_speed);
2188 else
2189 speed_capa =
2190 hns3_get_firber_port_speed_capa(mac->supported_speed);
2191
2192 if (mac->support_autoneg == 0)
2193 speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
2194
2195 return speed_capa;
2196 }
2197
2198 static int
hns3_update_port_link_info(struct rte_eth_dev * eth_dev)2199 hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
2200 {
2201 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2202 int ret;
2203
2204 (void)hns3_update_link_status(hw);
2205
2206 ret = hns3_update_link_info(eth_dev);
2207 if (ret)
2208 hw->mac.link_status = RTE_ETH_LINK_DOWN;
2209
2210 return ret;
2211 }
2212
2213 static void
hns3_setup_linkstatus(struct rte_eth_dev * eth_dev,struct rte_eth_link * new_link)2214 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
2215 struct rte_eth_link *new_link)
2216 {
2217 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2218 struct hns3_mac *mac = &hw->mac;
2219
2220 switch (mac->link_speed) {
2221 case RTE_ETH_SPEED_NUM_10M:
2222 case RTE_ETH_SPEED_NUM_100M:
2223 case RTE_ETH_SPEED_NUM_1G:
2224 case RTE_ETH_SPEED_NUM_10G:
2225 case RTE_ETH_SPEED_NUM_25G:
2226 case RTE_ETH_SPEED_NUM_40G:
2227 case RTE_ETH_SPEED_NUM_50G:
2228 case RTE_ETH_SPEED_NUM_100G:
2229 case RTE_ETH_SPEED_NUM_200G:
2230 if (mac->link_status)
2231 new_link->link_speed = mac->link_speed;
2232 break;
2233 default:
2234 if (mac->link_status)
2235 new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
2236 break;
2237 }
2238
2239 if (!mac->link_status)
2240 new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
2241
2242 new_link->link_duplex = mac->link_duplex;
2243 new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
2244 new_link->link_autoneg = mac->link_autoneg;
2245 }
2246
2247 static int
hns3_dev_link_update(struct rte_eth_dev * eth_dev,int wait_to_complete)2248 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
2249 {
2250 #define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */
2251 #define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */
2252
2253 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2254 uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES;
2255 struct hns3_mac *mac = &hw->mac;
2256 struct rte_eth_link new_link;
2257 int ret;
2258
2259 /* When port is stopped, report link down. */
2260 if (eth_dev->data->dev_started == 0) {
2261 new_link.link_autoneg = mac->link_autoneg;
2262 new_link.link_duplex = mac->link_duplex;
2263 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2264 new_link.link_status = RTE_ETH_LINK_DOWN;
2265 goto out;
2266 }
2267
2268 do {
2269 ret = hns3_update_port_link_info(eth_dev);
2270 if (ret) {
2271 hns3_err(hw, "failed to get port link info, ret = %d.",
2272 ret);
2273 break;
2274 }
2275
2276 if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
2277 break;
2278
2279 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
2280 } while (retry_cnt--);
2281
2282 memset(&new_link, 0, sizeof(new_link));
2283 hns3_setup_linkstatus(eth_dev, &new_link);
2284
2285 out:
2286 return rte_eth_linkstatus_set(eth_dev, &new_link);
2287 }
2288
2289 static int
hns3_dev_set_link_up(struct rte_eth_dev * dev)2290 hns3_dev_set_link_up(struct rte_eth_dev *dev)
2291 {
2292 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2293 int ret;
2294
2295 /*
2296 * The "tx_pkt_burst" will be restored. But the secondary process does
2297 * not support the mechanism for notifying the primary process.
2298 */
2299 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2300 hns3_err(hw, "secondary process does not support to set link up.");
2301 return -ENOTSUP;
2302 }
2303
2304 /*
2305 * If device isn't started Rx/Tx function is still disabled, setting
2306 * link up is not allowed. But it is probably better to return success
2307 * to reduce the impact on the upper layer.
2308 */
2309 if (hw->adapter_state != HNS3_NIC_STARTED) {
2310 hns3_info(hw, "device isn't started, can't set link up.");
2311 return 0;
2312 }
2313
2314 if (!hw->set_link_down)
2315 return 0;
2316
2317 rte_spinlock_lock(&hw->lock);
2318 ret = hns3_cfg_mac_mode(hw, true);
2319 if (ret) {
2320 rte_spinlock_unlock(&hw->lock);
2321 hns3_err(hw, "failed to set link up, ret = %d", ret);
2322 return ret;
2323 }
2324
2325 hw->set_link_down = false;
2326 hns3_start_tx_datapath(dev);
2327 rte_spinlock_unlock(&hw->lock);
2328
2329 return 0;
2330 }
2331
2332 static int
hns3_dev_set_link_down(struct rte_eth_dev * dev)2333 hns3_dev_set_link_down(struct rte_eth_dev *dev)
2334 {
2335 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2336 int ret;
2337
2338 /*
2339 * The "tx_pkt_burst" will be set to dummy function. But the secondary
2340 * process does not support the mechanism for notifying the primary
2341 * process.
2342 */
2343 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2344 hns3_err(hw, "secondary process does not support to set link down.");
2345 return -ENOTSUP;
2346 }
2347
2348 /*
2349 * If device isn't started or the API has been called, link status is
2350 * down, return success.
2351 */
2352 if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down)
2353 return 0;
2354
2355 rte_spinlock_lock(&hw->lock);
2356 hns3_stop_tx_datapath(dev);
2357 ret = hns3_cfg_mac_mode(hw, false);
2358 if (ret) {
2359 hns3_start_tx_datapath(dev);
2360 rte_spinlock_unlock(&hw->lock);
2361 hns3_err(hw, "failed to set link down, ret = %d", ret);
2362 return ret;
2363 }
2364
2365 hw->set_link_down = true;
2366 rte_spinlock_unlock(&hw->lock);
2367
2368 return 0;
2369 }
2370
2371 static int
hns3_parse_func_status(struct hns3_hw * hw,struct hns3_func_status_cmd * status)2372 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
2373 {
2374 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2375 struct hns3_pf *pf = &hns->pf;
2376
2377 if (!(status->pf_state & HNS3_PF_STATE_DONE))
2378 return -EINVAL;
2379
2380 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;
2381
2382 return 0;
2383 }
2384
2385 static int
hns3_query_function_status(struct hns3_hw * hw)2386 hns3_query_function_status(struct hns3_hw *hw)
2387 {
2388 #define HNS3_QUERY_MAX_CNT 10
2389 #define HNS3_QUERY_SLEEP_MSCOEND 1
2390 struct hns3_func_status_cmd *req;
2391 struct hns3_cmd_desc desc;
2392 int timeout = 0;
2393 int ret;
2394
2395 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);
2396 req = (struct hns3_func_status_cmd *)desc.data;
2397
2398 do {
2399 ret = hns3_cmd_send(hw, &desc, 1);
2400 if (ret) {
2401 PMD_INIT_LOG(ERR, "query function status failed %d",
2402 ret);
2403 return ret;
2404 }
2405
2406 /* Check pf reset is done */
2407 if (req->pf_state)
2408 break;
2409
2410 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);
2411 } while (timeout++ < HNS3_QUERY_MAX_CNT);
2412
2413 return hns3_parse_func_status(hw, req);
2414 }
2415
2416 static int
hns3_get_pf_max_tqp_num(struct hns3_hw * hw)2417 hns3_get_pf_max_tqp_num(struct hns3_hw *hw)
2418 {
2419 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2420 struct hns3_pf *pf = &hns->pf;
2421
2422 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) {
2423 /*
2424 * The total_tqps_num obtained from firmware is maximum tqp
2425 * numbers of this port, which should be used for PF and VFs.
2426 * There is no need for pf to have so many tqp numbers in
2427 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
2428 * coming from config file, is assigned to maximum queue number
2429 * for the PF of this port by user. So users can modify the
2430 * maximum queue number of PF according to their own application
2431 * scenarios, which is more flexible to use. In addition, many
2432 * memories can be saved due to allocating queue statistics
2433 * room according to the actual number of queues required. The
2434 * maximum queue number of PF for network engine with
2435 * revision_id greater than 0x30 is assigned by config file.
2436 */
2437 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) {
2438 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) "
2439 "must be greater than 0.",
2440 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF);
2441 return -EINVAL;
2442 }
2443
2444 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
2445 hw->total_tqps_num);
2446 } else {
2447 /*
2448 * Due to the limitation on the number of PF interrupts
2449 * available, the maximum queue number assigned to PF on
2450 * the network engine with revision_id 0x21 is 64.
2451 */
2452 hw->tqps_num = RTE_MIN(hw->total_tqps_num,
2453 HNS3_MAX_TQP_NUM_HIP08_PF);
2454 }
2455
2456 return 0;
2457 }
2458
2459 static int
hns3_query_pf_resource(struct hns3_hw * hw)2460 hns3_query_pf_resource(struct hns3_hw *hw)
2461 {
2462 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2463 struct hns3_pf *pf = &hns->pf;
2464 struct hns3_pf_res_cmd *req;
2465 struct hns3_cmd_desc desc;
2466 int ret;
2467
2468 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
2469 ret = hns3_cmd_send(hw, &desc, 1);
2470 if (ret) {
2471 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret);
2472 return ret;
2473 }
2474
2475 req = (struct hns3_pf_res_cmd *)desc.data;
2476 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) +
2477 rte_le_to_cpu_16(req->ext_tqp_num);
2478 ret = hns3_get_pf_max_tqp_num(hw);
2479 if (ret)
2480 return ret;
2481
2482 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
2483 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number);
2484
2485 if (req->tx_buf_size)
2486 pf->tx_buf_size =
2487 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;
2488 else
2489 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF;
2490
2491 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);
2492
2493 if (req->dv_buf_size)
2494 pf->dv_buf_size =
2495 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;
2496 else
2497 pf->dv_buf_size = HNS3_DEFAULT_DV;
2498
2499 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
2500
2501 hw->num_msi =
2502 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number),
2503 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
2504
2505 return 0;
2506 }
2507
2508 static void
hns3_parse_cfg(struct hns3_cfg * cfg,struct hns3_cmd_desc * desc)2509 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)
2510 {
2511 struct hns3_cfg_param_cmd *req;
2512 uint64_t mac_addr_tmp_high;
2513 uint8_t ext_rss_size_max;
2514 uint64_t mac_addr_tmp;
2515 uint32_t i;
2516
2517 req = (struct hns3_cfg_param_cmd *)desc[0].data;
2518
2519 /* get the configuration */
2520 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2521 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
2522
2523 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2524 HNS3_CFG_PHY_ADDR_M,
2525 HNS3_CFG_PHY_ADDR_S);
2526 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2527 HNS3_CFG_MEDIA_TP_M,
2528 HNS3_CFG_MEDIA_TP_S);
2529 /* get mac address */
2530 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
2531 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2532 HNS3_CFG_MAC_ADDR_H_M,
2533 HNS3_CFG_MAC_ADDR_H_S);
2534
2535 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
2536
2537 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2538 HNS3_CFG_DEFAULT_SPEED_M,
2539 HNS3_CFG_DEFAULT_SPEED_S);
2540 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2541 HNS3_CFG_RSS_SIZE_M,
2542 HNS3_CFG_RSS_SIZE_S);
2543
2544 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
2545 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
2546
2547 req = (struct hns3_cfg_param_cmd *)desc[1].data;
2548 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);
2549
2550 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2551 HNS3_CFG_SPEED_ABILITY_M,
2552 HNS3_CFG_SPEED_ABILITY_S);
2553 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2554 HNS3_CFG_UMV_TBL_SPACE_M,
2555 HNS3_CFG_UMV_TBL_SPACE_S);
2556 if (!cfg->umv_space)
2557 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;
2558
2559 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]),
2560 HNS3_CFG_EXT_RSS_SIZE_M,
2561 HNS3_CFG_EXT_RSS_SIZE_S);
2562 /*
2563 * Field ext_rss_size_max obtained from firmware will be more flexible
2564 * for future changes and expansions, which is an exponent of 2, instead
2565 * of reading out directly. If this field is not zero, hns3 PF PMD
2566 * uses it as rss_size_max under one TC. Device, whose revision
2567 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the
2568 * maximum number of queues supported under a TC through this field.
2569 */
2570 if (ext_rss_size_max)
2571 cfg->rss_size_max = 1U << ext_rss_size_max;
2572 }
2573
2574 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash
2575 * @hw: pointer to struct hns3_hw
2576 * @hcfg: the config structure to be getted
2577 */
2578 static int
hns3_get_board_cfg(struct hns3_hw * hw,struct hns3_cfg * hcfg)2579 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)
2580 {
2581 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];
2582 struct hns3_cfg_param_cmd *req;
2583 uint32_t offset;
2584 uint32_t i;
2585 int ret;
2586
2587 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {
2588 offset = 0;
2589 req = (struct hns3_cfg_param_cmd *)desc[i].data;
2590 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,
2591 true);
2592 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,
2593 i * HNS3_CFG_RD_LEN_BYTES);
2594 /* Len should be divided by 4 when send to hardware */
2595 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,
2596 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);
2597 req->offset = rte_cpu_to_le_32(offset);
2598 }
2599
2600 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);
2601 if (ret) {
2602 PMD_INIT_LOG(ERR, "get config failed %d.", ret);
2603 return ret;
2604 }
2605
2606 hns3_parse_cfg(hcfg, desc);
2607
2608 return 0;
2609 }
2610
2611 static int
hns3_parse_speed(int speed_cmd,uint32_t * speed)2612 hns3_parse_speed(int speed_cmd, uint32_t *speed)
2613 {
2614 switch (speed_cmd) {
2615 case HNS3_CFG_SPEED_10M:
2616 *speed = RTE_ETH_SPEED_NUM_10M;
2617 break;
2618 case HNS3_CFG_SPEED_100M:
2619 *speed = RTE_ETH_SPEED_NUM_100M;
2620 break;
2621 case HNS3_CFG_SPEED_1G:
2622 *speed = RTE_ETH_SPEED_NUM_1G;
2623 break;
2624 case HNS3_CFG_SPEED_10G:
2625 *speed = RTE_ETH_SPEED_NUM_10G;
2626 break;
2627 case HNS3_CFG_SPEED_25G:
2628 *speed = RTE_ETH_SPEED_NUM_25G;
2629 break;
2630 case HNS3_CFG_SPEED_40G:
2631 *speed = RTE_ETH_SPEED_NUM_40G;
2632 break;
2633 case HNS3_CFG_SPEED_50G:
2634 *speed = RTE_ETH_SPEED_NUM_50G;
2635 break;
2636 case HNS3_CFG_SPEED_100G:
2637 *speed = RTE_ETH_SPEED_NUM_100G;
2638 break;
2639 case HNS3_CFG_SPEED_200G:
2640 *speed = RTE_ETH_SPEED_NUM_200G;
2641 break;
2642 default:
2643 return -EINVAL;
2644 }
2645
2646 return 0;
2647 }
2648
2649 static void
hns3_set_default_dev_specifications(struct hns3_hw * hw)2650 hns3_set_default_dev_specifications(struct hns3_hw *hw)
2651 {
2652 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
2653 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
2654 hw->rss_key_size = HNS3_RSS_KEY_SIZE;
2655 hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
2656 hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
2657 }
2658
2659 static void
hns3_parse_dev_specifications(struct hns3_hw * hw,struct hns3_cmd_desc * desc)2660 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
2661 {
2662 struct hns3_dev_specs_0_cmd *req0;
2663
2664 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
2665
2666 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
2667 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
2668 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
2669 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
2670 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
2671 }
2672
2673 static int
hns3_check_dev_specifications(struct hns3_hw * hw)2674 hns3_check_dev_specifications(struct hns3_hw *hw)
2675 {
2676 if (hw->rss_ind_tbl_size == 0 ||
2677 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
2678 hns3_err(hw, "the size of hash lookup table configured (%u)"
2679 " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
2680 HNS3_RSS_IND_TBL_SIZE_MAX);
2681 return -EINVAL;
2682 }
2683
2684 return 0;
2685 }
2686
2687 static int
hns3_query_dev_specifications(struct hns3_hw * hw)2688 hns3_query_dev_specifications(struct hns3_hw *hw)
2689 {
2690 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
2691 int ret;
2692 int i;
2693
2694 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
2695 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
2696 true);
2697 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
2698 }
2699 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
2700
2701 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
2702 if (ret)
2703 return ret;
2704
2705 hns3_parse_dev_specifications(hw, desc);
2706
2707 return hns3_check_dev_specifications(hw);
2708 }
2709
2710 static int
hns3_get_capability(struct hns3_hw * hw)2711 hns3_get_capability(struct hns3_hw *hw)
2712 {
2713 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2714 struct rte_pci_device *pci_dev;
2715 struct hns3_pf *pf = &hns->pf;
2716 struct rte_eth_dev *eth_dev;
2717 uint16_t device_id;
2718 int ret;
2719
2720 eth_dev = &rte_eth_devices[hw->data->port_id];
2721 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2722 device_id = pci_dev->id.device_id;
2723
2724 if (device_id == HNS3_DEV_ID_25GE_RDMA ||
2725 device_id == HNS3_DEV_ID_50GE_RDMA ||
2726 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC ||
2727 device_id == HNS3_DEV_ID_200G_RDMA)
2728 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
2729
2730 ret = hns3_get_pci_revision_id(hw, &hw->revision);
2731 if (ret)
2732 return ret;
2733
2734 ret = hns3_query_mac_stats_reg_num(hw);
2735 if (ret)
2736 return ret;
2737
2738 if (hw->revision < PCI_REVISION_ID_HIP09_A) {
2739 hns3_set_default_dev_specifications(hw);
2740 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
2741 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
2742 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
2743 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE;
2744 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
2745 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
2746 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE;
2747 hw->rss_info.ipv6_sctp_offload_supported = false;
2748 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE;
2749 pf->support_multi_tc_pause = false;
2750 return 0;
2751 }
2752
2753 ret = hns3_query_dev_specifications(hw);
2754 if (ret) {
2755 PMD_INIT_LOG(ERR,
2756 "failed to query dev specifications, ret = %d",
2757 ret);
2758 return ret;
2759 }
2760
2761 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
2762 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
2763 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
2764 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE;
2765 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
2766 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
2767 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE;
2768 hw->rss_info.ipv6_sctp_offload_supported = true;
2769 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE;
2770 pf->support_multi_tc_pause = true;
2771
2772 return 0;
2773 }
2774
2775 static int
hns3_check_media_type(struct hns3_hw * hw,uint8_t media_type)2776 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type)
2777 {
2778 int ret;
2779
2780 switch (media_type) {
2781 case HNS3_MEDIA_TYPE_COPPER:
2782 if (!hns3_dev_get_support(hw, COPPER)) {
2783 PMD_INIT_LOG(ERR,
2784 "Media type is copper, not supported.");
2785 ret = -EOPNOTSUPP;
2786 } else {
2787 ret = 0;
2788 }
2789 break;
2790 case HNS3_MEDIA_TYPE_FIBER:
2791 ret = 0;
2792 break;
2793 case HNS3_MEDIA_TYPE_BACKPLANE:
2794 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported.");
2795 ret = -EOPNOTSUPP;
2796 break;
2797 default:
2798 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type);
2799 ret = -EINVAL;
2800 break;
2801 }
2802
2803 return ret;
2804 }
2805
2806 static int
hns3_get_board_configuration(struct hns3_hw * hw)2807 hns3_get_board_configuration(struct hns3_hw *hw)
2808 {
2809 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2810 struct hns3_pf *pf = &hns->pf;
2811 struct hns3_cfg cfg;
2812 int ret;
2813
2814 ret = hns3_get_board_cfg(hw, &cfg);
2815 if (ret) {
2816 PMD_INIT_LOG(ERR, "get board config failed %d", ret);
2817 return ret;
2818 }
2819
2820 ret = hns3_check_media_type(hw, cfg.media_type);
2821 if (ret)
2822 return ret;
2823
2824 hw->mac.media_type = cfg.media_type;
2825 hw->rss_size_max = cfg.rss_size_max;
2826 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
2827 hw->mac.phy_addr = cfg.phy_addr;
2828 hw->dcb_info.num_pg = 1;
2829 hw->dcb_info.hw_pfc_map = 0;
2830
2831 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
2832 if (ret) {
2833 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d",
2834 cfg.default_speed, ret);
2835 return ret;
2836 }
2837
2838 pf->tc_max = cfg.tc_num;
2839 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) {
2840 PMD_INIT_LOG(WARNING,
2841 "Get TC num(%u) from flash, set TC num to 1",
2842 pf->tc_max);
2843 pf->tc_max = 1;
2844 }
2845
2846 /* Dev does not support DCB */
2847 if (!hns3_dev_get_support(hw, DCB)) {
2848 pf->tc_max = 1;
2849 pf->pfc_max = 0;
2850 } else
2851 pf->pfc_max = pf->tc_max;
2852
2853 hw->dcb_info.num_tc = 1;
2854 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max,
2855 hw->tqps_num / hw->dcb_info.num_tc);
2856 hns3_set_bit(hw->hw_tc_map, 0, 1);
2857 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE;
2858
2859 pf->wanted_umv_size = cfg.umv_space;
2860
2861 return ret;
2862 }
2863
2864 static int
hns3_get_configuration(struct hns3_hw * hw)2865 hns3_get_configuration(struct hns3_hw *hw)
2866 {
2867 int ret;
2868
2869 ret = hns3_query_function_status(hw);
2870 if (ret) {
2871 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret);
2872 return ret;
2873 }
2874
2875 /* Get device capability */
2876 ret = hns3_get_capability(hw);
2877 if (ret) {
2878 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
2879 return ret;
2880 }
2881
2882 /* Get pf resource */
2883 ret = hns3_query_pf_resource(hw);
2884 if (ret) {
2885 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret);
2886 return ret;
2887 }
2888
2889 ret = hns3_get_board_configuration(hw);
2890 if (ret) {
2891 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret);
2892 return ret;
2893 }
2894
2895 ret = hns3_query_dev_fec_info(hw);
2896 if (ret)
2897 PMD_INIT_LOG(ERR,
2898 "failed to query FEC information, ret = %d", ret);
2899
2900 return ret;
2901 }
2902
2903 static int
hns3_map_tqps_to_func(struct hns3_hw * hw,uint16_t func_id,uint16_t tqp_pid,uint16_t tqp_vid,bool is_pf)2904 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid,
2905 uint16_t tqp_vid, bool is_pf)
2906 {
2907 struct hns3_tqp_map_cmd *req;
2908 struct hns3_cmd_desc desc;
2909 int ret;
2910
2911 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false);
2912
2913 req = (struct hns3_tqp_map_cmd *)desc.data;
2914 req->tqp_id = rte_cpu_to_le_16(tqp_pid);
2915 req->tqp_vf = func_id;
2916 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B;
2917 if (!is_pf)
2918 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B);
2919 req->tqp_vid = rte_cpu_to_le_16(tqp_vid);
2920
2921 ret = hns3_cmd_send(hw, &desc, 1);
2922 if (ret)
2923 PMD_INIT_LOG(ERR, "TQP map failed %d", ret);
2924
2925 return ret;
2926 }
2927
2928 static int
hns3_map_tqp(struct hns3_hw * hw)2929 hns3_map_tqp(struct hns3_hw *hw)
2930 {
2931 int ret;
2932 int i;
2933
2934 /*
2935 * In current version, VF is not supported when PF is driven by DPDK
2936 * driver, so we assign total tqps_num tqps allocated to this port
2937 * to PF.
2938 */
2939 for (i = 0; i < hw->total_tqps_num; i++) {
2940 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true);
2941 if (ret)
2942 return ret;
2943 }
2944
2945 return 0;
2946 }
2947
2948 static int
hns3_cfg_mac_speed_dup_hw(struct hns3_hw * hw,uint32_t speed,uint8_t duplex)2949 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
2950 {
2951 struct hns3_config_mac_speed_dup_cmd *req;
2952 struct hns3_cmd_desc desc;
2953 int ret;
2954
2955 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data;
2956
2957 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false);
2958
2959 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
2960
2961 switch (speed) {
2962 case RTE_ETH_SPEED_NUM_10M:
2963 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2964 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
2965 break;
2966 case RTE_ETH_SPEED_NUM_100M:
2967 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2968 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
2969 break;
2970 case RTE_ETH_SPEED_NUM_1G:
2971 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2972 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
2973 break;
2974 case RTE_ETH_SPEED_NUM_10G:
2975 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2976 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
2977 break;
2978 case RTE_ETH_SPEED_NUM_25G:
2979 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2980 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
2981 break;
2982 case RTE_ETH_SPEED_NUM_40G:
2983 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2984 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
2985 break;
2986 case RTE_ETH_SPEED_NUM_50G:
2987 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2988 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
2989 break;
2990 case RTE_ETH_SPEED_NUM_100G:
2991 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2992 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
2993 break;
2994 case RTE_ETH_SPEED_NUM_200G:
2995 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2996 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
2997 break;
2998 default:
2999 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed);
3000 return -EINVAL;
3001 }
3002
3003 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1);
3004
3005 ret = hns3_cmd_send(hw, &desc, 1);
3006 if (ret)
3007 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret);
3008
3009 return ret;
3010 }
3011
3012 static int
hns3_tx_buffer_calc(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc)3013 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3014 {
3015 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3016 struct hns3_pf *pf = &hns->pf;
3017 struct hns3_priv_buf *priv;
3018 uint32_t i, total_size;
3019
3020 total_size = pf->pkt_buf_size;
3021
3022 /* alloc tx buffer for all enabled tc */
3023 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3024 priv = &buf_alloc->priv_buf[i];
3025
3026 if (hw->hw_tc_map & BIT(i)) {
3027 if (total_size < pf->tx_buf_size)
3028 return -ENOMEM;
3029
3030 priv->tx_buf_size = pf->tx_buf_size;
3031 } else
3032 priv->tx_buf_size = 0;
3033
3034 total_size -= priv->tx_buf_size;
3035 }
3036
3037 return 0;
3038 }
3039
3040 static int
hns3_tx_buffer_alloc(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc)3041 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3042 {
3043 /* TX buffer size is unit by 128 byte */
3044 #define HNS3_BUF_SIZE_UNIT_SHIFT 7
3045 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15)
3046 struct hns3_tx_buff_alloc_cmd *req;
3047 struct hns3_cmd_desc desc;
3048 uint32_t buf_size;
3049 uint32_t i;
3050 int ret;
3051
3052 req = (struct hns3_tx_buff_alloc_cmd *)desc.data;
3053
3054 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0);
3055 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3056 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
3057
3058 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT;
3059 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size |
3060 HNS3_BUF_SIZE_UPDATE_EN_MSK);
3061 }
3062
3063 ret = hns3_cmd_send(hw, &desc, 1);
3064 if (ret)
3065 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret);
3066
3067 return ret;
3068 }
3069
3070 static int
hns3_get_tc_num(struct hns3_hw * hw)3071 hns3_get_tc_num(struct hns3_hw *hw)
3072 {
3073 int cnt = 0;
3074 uint8_t i;
3075
3076 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3077 if (hw->hw_tc_map & BIT(i))
3078 cnt++;
3079 return cnt;
3080 }
3081
3082 static uint32_t
hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc * buf_alloc)3083 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3084 {
3085 struct hns3_priv_buf *priv;
3086 uint32_t rx_priv = 0;
3087 int i;
3088
3089 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3090 priv = &buf_alloc->priv_buf[i];
3091 if (priv->enable)
3092 rx_priv += priv->buf_size;
3093 }
3094 return rx_priv;
3095 }
3096
3097 static uint32_t
hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc * buf_alloc)3098 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3099 {
3100 uint32_t total_tx_size = 0;
3101 uint32_t i;
3102
3103 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3104 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
3105
3106 return total_tx_size;
3107 }
3108
3109 /* Get the number of pfc enabled TCs, which have private buffer */
3110 static int
hns3_get_pfc_priv_num(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc)3111 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3112 {
3113 struct hns3_priv_buf *priv;
3114 int cnt = 0;
3115 uint8_t i;
3116
3117 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3118 priv = &buf_alloc->priv_buf[i];
3119 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3120 cnt++;
3121 }
3122
3123 return cnt;
3124 }
3125
3126 /* Get the number of pfc disabled TCs, which have private buffer */
3127 static int
hns3_get_no_pfc_priv_num(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc)3128 hns3_get_no_pfc_priv_num(struct hns3_hw *hw,
3129 struct hns3_pkt_buf_alloc *buf_alloc)
3130 {
3131 struct hns3_priv_buf *priv;
3132 int cnt = 0;
3133 uint8_t i;
3134
3135 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3136 priv = &buf_alloc->priv_buf[i];
3137 if (hw->hw_tc_map & BIT(i) &&
3138 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3139 cnt++;
3140 }
3141
3142 return cnt;
3143 }
3144
3145 static bool
hns3_is_rx_buf_ok(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc,uint32_t rx_all)3146 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
3147 uint32_t rx_all)
3148 {
3149 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
3150 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3151 struct hns3_pf *pf = &hns->pf;
3152 uint32_t shared_buf, aligned_mps;
3153 uint32_t rx_priv;
3154 uint8_t tc_num;
3155 uint8_t i;
3156
3157 tc_num = hns3_get_tc_num(hw);
3158 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3159
3160 if (hns3_dev_get_support(hw, DCB))
3161 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
3162 pf->dv_buf_size;
3163 else
3164 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF
3165 + pf->dv_buf_size;
3166
3167 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
3168 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc),
3169 HNS3_BUF_SIZE_UNIT);
3170
3171 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);
3172 if (rx_all < rx_priv + shared_std)
3173 return false;
3174
3175 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
3176 buf_alloc->s_buf.buf_size = shared_buf;
3177 if (hns3_dev_get_support(hw, DCB)) {
3178 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
3179 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
3180 - roundup(aligned_mps / HNS3_BUF_DIV_BY,
3181 HNS3_BUF_SIZE_UNIT);
3182 } else {
3183 buf_alloc->s_buf.self.high =
3184 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3185 buf_alloc->s_buf.self.low = aligned_mps;
3186 }
3187
3188 if (hns3_dev_get_support(hw, DCB)) {
3189 hi_thrd = shared_buf - pf->dv_buf_size;
3190
3191 if (tc_num <= NEED_RESERVE_TC_NUM)
3192 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT /
3193 BUF_MAX_PERCENT;
3194
3195 if (tc_num)
3196 hi_thrd = hi_thrd / tc_num;
3197
3198 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps);
3199 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);
3200 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;
3201 } else {
3202 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3203 lo_thrd = aligned_mps;
3204 }
3205
3206 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3207 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
3208 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
3209 }
3210
3211 return true;
3212 }
3213
3214 static bool
hns3_rx_buf_calc_all(struct hns3_hw * hw,bool max,struct hns3_pkt_buf_alloc * buf_alloc)3215 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max,
3216 struct hns3_pkt_buf_alloc *buf_alloc)
3217 {
3218 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3219 struct hns3_pf *pf = &hns->pf;
3220 struct hns3_priv_buf *priv;
3221 uint32_t aligned_mps;
3222 uint32_t rx_all;
3223 uint8_t i;
3224
3225 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3226 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3227
3228 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3229 priv = &buf_alloc->priv_buf[i];
3230
3231 priv->enable = 0;
3232 priv->wl.low = 0;
3233 priv->wl.high = 0;
3234 priv->buf_size = 0;
3235
3236 if (!(hw->hw_tc_map & BIT(i)))
3237 continue;
3238
3239 priv->enable = 1;
3240 if (hw->dcb_info.hw_pfc_map & BIT(i)) {
3241 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT;
3242 priv->wl.high = roundup(priv->wl.low + aligned_mps,
3243 HNS3_BUF_SIZE_UNIT);
3244 } else {
3245 priv->wl.low = 0;
3246 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) :
3247 aligned_mps;
3248 }
3249
3250 priv->buf_size = priv->wl.high + pf->dv_buf_size;
3251 }
3252
3253 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3254 }
3255
3256 static bool
hns3_drop_nopfc_buf_till_fit(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc)3257 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw,
3258 struct hns3_pkt_buf_alloc *buf_alloc)
3259 {
3260 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3261 struct hns3_pf *pf = &hns->pf;
3262 struct hns3_priv_buf *priv;
3263 int no_pfc_priv_num;
3264 uint32_t rx_all;
3265 uint8_t mask;
3266 int i;
3267
3268 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3269 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc);
3270
3271 /* let the last to be cleared first */
3272 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3273 priv = &buf_alloc->priv_buf[i];
3274 mask = BIT((uint8_t)i);
3275 if (hw->hw_tc_map & mask &&
3276 !(hw->dcb_info.hw_pfc_map & mask)) {
3277 /* Clear the no pfc TC private buffer */
3278 priv->wl.low = 0;
3279 priv->wl.high = 0;
3280 priv->buf_size = 0;
3281 priv->enable = 0;
3282 no_pfc_priv_num--;
3283 }
3284
3285 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3286 no_pfc_priv_num == 0)
3287 break;
3288 }
3289
3290 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3291 }
3292
3293 static bool
hns3_drop_pfc_buf_till_fit(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc)3294 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
3295 struct hns3_pkt_buf_alloc *buf_alloc)
3296 {
3297 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3298 struct hns3_pf *pf = &hns->pf;
3299 struct hns3_priv_buf *priv;
3300 uint32_t rx_all;
3301 int pfc_priv_num;
3302 uint8_t mask;
3303 int i;
3304
3305 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3306 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc);
3307
3308 /* let the last to be cleared first */
3309 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3310 priv = &buf_alloc->priv_buf[i];
3311 mask = BIT((uint8_t)i);
3312 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) {
3313 /* Reduce the number of pfc TC with private buffer */
3314 priv->wl.low = 0;
3315 priv->enable = 0;
3316 priv->wl.high = 0;
3317 priv->buf_size = 0;
3318 pfc_priv_num--;
3319 }
3320 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3321 pfc_priv_num == 0)
3322 break;
3323 }
3324
3325 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3326 }
3327
3328 static bool
hns3_only_alloc_priv_buff(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc)3329 hns3_only_alloc_priv_buff(struct hns3_hw *hw,
3330 struct hns3_pkt_buf_alloc *buf_alloc)
3331 {
3332 #define COMPENSATE_BUFFER 0x3C00
3333 #define COMPENSATE_HALF_MPS_NUM 5
3334 #define PRIV_WL_GAP 0x1800
3335 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3336 struct hns3_pf *pf = &hns->pf;
3337 uint32_t tc_num = hns3_get_tc_num(hw);
3338 uint32_t half_mps = pf->mps >> 1;
3339 struct hns3_priv_buf *priv;
3340 uint32_t min_rx_priv;
3341 uint32_t rx_priv;
3342 uint8_t i;
3343
3344 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3345 if (tc_num)
3346 rx_priv = rx_priv / tc_num;
3347
3348 if (tc_num <= NEED_RESERVE_TC_NUM)
3349 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
3350
3351 /*
3352 * Minimum value of private buffer in rx direction (min_rx_priv) is
3353 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private
3354 * buffer if rx_priv is greater than min_rx_priv.
3355 */
3356 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER +
3357 COMPENSATE_HALF_MPS_NUM * half_mps;
3358 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);
3359 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);
3360 if (rx_priv < min_rx_priv)
3361 return false;
3362
3363 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3364 priv = &buf_alloc->priv_buf[i];
3365 priv->enable = 0;
3366 priv->wl.low = 0;
3367 priv->wl.high = 0;
3368 priv->buf_size = 0;
3369
3370 if (!(hw->hw_tc_map & BIT(i)))
3371 continue;
3372
3373 priv->enable = 1;
3374 priv->buf_size = rx_priv;
3375 priv->wl.high = rx_priv - pf->dv_buf_size;
3376 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
3377 }
3378
3379 buf_alloc->s_buf.buf_size = 0;
3380
3381 return true;
3382 }
3383
3384 /*
3385 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
3386 * @hw: pointer to struct hns3_hw
3387 * @buf_alloc: pointer to buffer calculation data
3388 * @return: 0: calculate successful, negative: fail
3389 */
3390 static int
hns3_rx_buffer_calc(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc)3391 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3392 {
3393 /* When DCB is not supported, rx private buffer is not allocated. */
3394 if (!hns3_dev_get_support(hw, DCB)) {
3395 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3396 struct hns3_pf *pf = &hns->pf;
3397 uint32_t rx_all = pf->pkt_buf_size;
3398
3399 rx_all -= hns3_get_tx_buff_alloced(buf_alloc);
3400 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all))
3401 return -ENOMEM;
3402
3403 return 0;
3404 }
3405
3406 /*
3407 * Try to allocate privated packet buffer for all TCs without share
3408 * buffer.
3409 */
3410 if (hns3_only_alloc_priv_buff(hw, buf_alloc))
3411 return 0;
3412
3413 /*
3414 * Try to allocate privated packet buffer for all TCs with share
3415 * buffer.
3416 */
3417 if (hns3_rx_buf_calc_all(hw, true, buf_alloc))
3418 return 0;
3419
3420 /*
3421 * For different application scenes, the enabled port number, TC number
3422 * and no_drop TC number are different. In order to obtain the better
3423 * performance, software could allocate the buffer size and configure
3424 * the waterline by trying to decrease the private buffer size according
3425 * to the order, namely, waterline of valid tc, pfc disabled tc, pfc
3426 * enabled tc.
3427 */
3428 if (hns3_rx_buf_calc_all(hw, false, buf_alloc))
3429 return 0;
3430
3431 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc))
3432 return 0;
3433
3434 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc))
3435 return 0;
3436
3437 return -ENOMEM;
3438 }
3439
3440 static int
hns3_rx_priv_buf_alloc(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc)3441 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3442 {
3443 struct hns3_rx_priv_buff_cmd *req;
3444 struct hns3_cmd_desc desc;
3445 uint32_t buf_size;
3446 int ret;
3447 int i;
3448
3449 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false);
3450 req = (struct hns3_rx_priv_buff_cmd *)desc.data;
3451
3452 /* Alloc private buffer TCs */
3453 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3454 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i];
3455
3456 req->buf_num[i] =
3457 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S);
3458 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B);
3459 }
3460
3461 buf_size = buf_alloc->s_buf.buf_size;
3462 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) |
3463 (1 << HNS3_TC0_PRI_BUF_EN_B));
3464
3465 ret = hns3_cmd_send(hw, &desc, 1);
3466 if (ret)
3467 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret);
3468
3469 return ret;
3470 }
3471
3472 static int
hns3_rx_priv_wl_config(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc)3473 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3474 {
3475 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2
3476 struct hns3_rx_priv_wl_buf *req;
3477 struct hns3_priv_buf *priv;
3478 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM];
3479 int i, j;
3480 int ret;
3481
3482 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) {
3483 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC,
3484 false);
3485 req = (struct hns3_rx_priv_wl_buf *)desc[i].data;
3486
3487 /* The first descriptor set the NEXT bit to 1 */
3488 if (i == 0)
3489 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3490 else
3491 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3492
3493 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3494 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j;
3495
3496 priv = &buf_alloc->priv_buf[idx];
3497 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >>
3498 HNS3_BUF_UNIT_S);
3499 req->tc_wl[j].high |=
3500 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3501 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >>
3502 HNS3_BUF_UNIT_S);
3503 req->tc_wl[j].low |=
3504 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3505 }
3506 }
3507
3508 /* Send 2 descriptor at one time */
3509 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM);
3510 if (ret)
3511 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d",
3512 ret);
3513 return ret;
3514 }
3515
3516 static int
hns3_common_thrd_config(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc)3517 hns3_common_thrd_config(struct hns3_hw *hw,
3518 struct hns3_pkt_buf_alloc *buf_alloc)
3519 {
3520 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2
3521 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf;
3522 struct hns3_rx_com_thrd *req;
3523 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM];
3524 struct hns3_tc_thrd *tc;
3525 int tc_idx;
3526 int i, j;
3527 int ret;
3528
3529 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) {
3530 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC,
3531 false);
3532 req = (struct hns3_rx_com_thrd *)&desc[i].data;
3533
3534 /* The first descriptor set the NEXT bit to 1 */
3535 if (i == 0)
3536 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3537 else
3538 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3539
3540 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3541 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j;
3542 tc = &s_buf->tc_thrd[tc_idx];
3543
3544 req->com_thrd[j].high =
3545 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S);
3546 req->com_thrd[j].high |=
3547 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3548 req->com_thrd[j].low =
3549 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S);
3550 req->com_thrd[j].low |=
3551 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3552 }
3553 }
3554
3555 /* Send 2 descriptors at one time */
3556 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM);
3557 if (ret)
3558 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret);
3559
3560 return ret;
3561 }
3562
3563 static int
hns3_common_wl_config(struct hns3_hw * hw,struct hns3_pkt_buf_alloc * buf_alloc)3564 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3565 {
3566 struct hns3_shared_buf *buf = &buf_alloc->s_buf;
3567 struct hns3_rx_com_wl *req;
3568 struct hns3_cmd_desc desc;
3569 int ret;
3570
3571 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false);
3572
3573 req = (struct hns3_rx_com_wl *)desc.data;
3574 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S);
3575 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3576
3577 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S);
3578 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3579
3580 ret = hns3_cmd_send(hw, &desc, 1);
3581 if (ret)
3582 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret);
3583
3584 return ret;
3585 }
3586
3587 int
hns3_buffer_alloc(struct hns3_hw * hw)3588 hns3_buffer_alloc(struct hns3_hw *hw)
3589 {
3590 struct hns3_pkt_buf_alloc pkt_buf;
3591 int ret;
3592
3593 memset(&pkt_buf, 0, sizeof(pkt_buf));
3594 ret = hns3_tx_buffer_calc(hw, &pkt_buf);
3595 if (ret) {
3596 PMD_INIT_LOG(ERR,
3597 "could not calc tx buffer size for all TCs %d",
3598 ret);
3599 return ret;
3600 }
3601
3602 ret = hns3_tx_buffer_alloc(hw, &pkt_buf);
3603 if (ret) {
3604 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret);
3605 return ret;
3606 }
3607
3608 ret = hns3_rx_buffer_calc(hw, &pkt_buf);
3609 if (ret) {
3610 PMD_INIT_LOG(ERR,
3611 "could not calc rx priv buffer size for all TCs %d",
3612 ret);
3613 return ret;
3614 }
3615
3616 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf);
3617 if (ret) {
3618 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret);
3619 return ret;
3620 }
3621
3622 if (hns3_dev_get_support(hw, DCB)) {
3623 ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
3624 if (ret) {
3625 PMD_INIT_LOG(ERR,
3626 "could not configure rx private waterline %d",
3627 ret);
3628 return ret;
3629 }
3630
3631 ret = hns3_common_thrd_config(hw, &pkt_buf);
3632 if (ret) {
3633 PMD_INIT_LOG(ERR,
3634 "could not configure common threshold %d",
3635 ret);
3636 return ret;
3637 }
3638 }
3639
3640 ret = hns3_common_wl_config(hw, &pkt_buf);
3641 if (ret)
3642 PMD_INIT_LOG(ERR, "could not configure common waterline %d",
3643 ret);
3644
3645 return ret;
3646 }
3647
3648 static int
hns3_mac_init(struct hns3_hw * hw)3649 hns3_mac_init(struct hns3_hw *hw)
3650 {
3651 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3652 struct hns3_mac *mac = &hw->mac;
3653 struct hns3_pf *pf = &hns->pf;
3654 int ret;
3655
3656 pf->support_sfp_query = true;
3657 mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3658 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
3659 if (ret) {
3660 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
3661 return ret;
3662 }
3663
3664 mac->link_status = RTE_ETH_LINK_DOWN;
3665
3666 return hns3_config_mtu(hw, pf->mps);
3667 }
3668
3669 static int
hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp,uint8_t resp_code)3670 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
3671 {
3672 #define HNS3_ETHERTYPE_SUCCESS_ADD 0
3673 #define HNS3_ETHERTYPE_ALREADY_ADD 1
3674 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2
3675 #define HNS3_ETHERTYPE_KEY_CONFLICT 3
3676 int return_status;
3677
3678 if (cmdq_resp) {
3679 PMD_INIT_LOG(ERR,
3680 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
3681 cmdq_resp);
3682 return -EIO;
3683 }
3684
3685 switch (resp_code) {
3686 case HNS3_ETHERTYPE_SUCCESS_ADD:
3687 case HNS3_ETHERTYPE_ALREADY_ADD:
3688 return_status = 0;
3689 break;
3690 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW:
3691 PMD_INIT_LOG(ERR,
3692 "add mac ethertype failed for manager table overflow.");
3693 return_status = -EIO;
3694 break;
3695 case HNS3_ETHERTYPE_KEY_CONFLICT:
3696 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict.");
3697 return_status = -EIO;
3698 break;
3699 default:
3700 PMD_INIT_LOG(ERR,
3701 "add mac ethertype failed for undefined, code=%u.",
3702 resp_code);
3703 return_status = -EIO;
3704 break;
3705 }
3706
3707 return return_status;
3708 }
3709
3710 static int
hns3_add_mgr_tbl(struct hns3_hw * hw,const struct hns3_mac_mgr_tbl_entry_cmd * req)3711 hns3_add_mgr_tbl(struct hns3_hw *hw,
3712 const struct hns3_mac_mgr_tbl_entry_cmd *req)
3713 {
3714 struct hns3_cmd_desc desc;
3715 uint8_t resp_code;
3716 uint16_t retval;
3717 int ret;
3718
3719 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false);
3720 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd));
3721
3722 ret = hns3_cmd_send(hw, &desc, 1);
3723 if (ret) {
3724 PMD_INIT_LOG(ERR,
3725 "add mac ethertype failed for cmd_send, ret =%d.",
3726 ret);
3727 return ret;
3728 }
3729
3730 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
3731 retval = rte_le_to_cpu_16(desc.retval);
3732
3733 return hns3_get_mac_ethertype_cmd_status(retval, resp_code);
3734 }
3735
3736 static void
hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd * mgr_table,int * table_item_num)3737 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table,
3738 int *table_item_num)
3739 {
3740 struct hns3_mac_mgr_tbl_entry_cmd *tbl;
3741
3742 /*
3743 * In current version, we add one item in management table as below:
3744 * 0x0180C200000E -- LLDP MC address
3745 */
3746 tbl = mgr_table;
3747 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B;
3748 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP);
3749 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200));
3750 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E));
3751 tbl->i_port_bitmap = 0x1;
3752 *table_item_num = 1;
3753 }
3754
3755 static int
hns3_init_mgr_tbl(struct hns3_hw * hw)3756 hns3_init_mgr_tbl(struct hns3_hw *hw)
3757 {
3758 #define HNS_MAC_MGR_TBL_MAX_SIZE 16
3759 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE];
3760 int table_item_num;
3761 int ret;
3762 int i;
3763
3764 memset(mgr_table, 0, sizeof(mgr_table));
3765 hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
3766 for (i = 0; i < table_item_num; i++) {
3767 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
3768 if (ret) {
3769 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
3770 ret);
3771 return ret;
3772 }
3773 }
3774
3775 return 0;
3776 }
3777
3778 static void
hns3_promisc_param_init(struct hns3_promisc_param * param,bool en_uc,bool en_mc,bool en_bc,int vport_id)3779 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
3780 bool en_mc, bool en_bc, int vport_id)
3781 {
3782 if (!param)
3783 return;
3784
3785 memset(param, 0, sizeof(struct hns3_promisc_param));
3786 if (en_uc)
3787 param->enable = HNS3_PROMISC_EN_UC;
3788 if (en_mc)
3789 param->enable |= HNS3_PROMISC_EN_MC;
3790 if (en_bc)
3791 param->enable |= HNS3_PROMISC_EN_BC;
3792 param->vf_id = vport_id;
3793 }
3794
3795 static int
hns3_cmd_set_promisc_mode(struct hns3_hw * hw,struct hns3_promisc_param * param)3796 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
3797 {
3798 struct hns3_promisc_cfg_cmd *req;
3799 struct hns3_cmd_desc desc;
3800 int ret;
3801
3802 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
3803
3804 req = (struct hns3_promisc_cfg_cmd *)desc.data;
3805 req->vf_id = param->vf_id;
3806 req->flag = (param->enable << HNS3_PROMISC_EN_B) |
3807 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
3808
3809 ret = hns3_cmd_send(hw, &desc, 1);
3810 if (ret)
3811 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret);
3812
3813 return ret;
3814 }
3815
3816 static int
hns3_set_promisc_mode(struct hns3_hw * hw,bool en_uc_pmc,bool en_mc_pmc)3817 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
3818 {
3819 struct hns3_promisc_param param;
3820 bool en_bc_pmc = true;
3821 uint8_t vf_id;
3822
3823 /*
3824 * In current version VF is not supported when PF is driven by DPDK
3825 * driver, just need to configure parameters for PF vport.
3826 */
3827 vf_id = HNS3_PF_FUNC_ID;
3828
3829 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
3830 return hns3_cmd_set_promisc_mode(hw, ¶m);
3831 }
3832
3833 static int
hns3_promisc_init(struct hns3_hw * hw)3834 hns3_promisc_init(struct hns3_hw *hw)
3835 {
3836 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3837 struct hns3_pf *pf = &hns->pf;
3838 struct hns3_promisc_param param;
3839 uint16_t func_id;
3840 int ret;
3841
3842 ret = hns3_set_promisc_mode(hw, false, false);
3843 if (ret) {
3844 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret);
3845 return ret;
3846 }
3847
3848 /*
3849 * In current version VFs are not supported when PF is driven by DPDK
3850 * driver. After PF has been taken over by DPDK, the original VF will
3851 * be invalid. So, there is a possibility of entry residues. It should
3852 * clear VFs's promisc mode to avoid unnecessary bandwidth usage
3853 * during init.
3854 */
3855 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) {
3856 hns3_promisc_param_init(¶m, false, false, false, func_id);
3857 ret = hns3_cmd_set_promisc_mode(hw, ¶m);
3858 if (ret) {
3859 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode,"
3860 " ret = %d", func_id, ret);
3861 return ret;
3862 }
3863 }
3864
3865 return 0;
3866 }
3867
3868 static void
hns3_promisc_uninit(struct hns3_hw * hw)3869 hns3_promisc_uninit(struct hns3_hw *hw)
3870 {
3871 struct hns3_promisc_param param;
3872 uint16_t func_id;
3873 int ret;
3874
3875 func_id = HNS3_PF_FUNC_ID;
3876
3877 /*
3878 * In current version VFs are not supported when PF is driven by
3879 * DPDK driver, and VFs' promisc mode status has been cleared during
3880 * init and their status will not change. So just clear PF's promisc
3881 * mode status during uninit.
3882 */
3883 hns3_promisc_param_init(¶m, false, false, false, func_id);
3884 ret = hns3_cmd_set_promisc_mode(hw, ¶m);
3885 if (ret)
3886 PMD_INIT_LOG(ERR, "failed to clear promisc status during"
3887 " uninit, ret = %d", ret);
3888 }
3889
3890 static int
hns3_dev_promiscuous_enable(struct rte_eth_dev * dev)3891 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
3892 {
3893 bool allmulti = dev->data->all_multicast ? true : false;
3894 struct hns3_adapter *hns = dev->data->dev_private;
3895 struct hns3_hw *hw = &hns->hw;
3896 uint64_t offloads;
3897 int err;
3898 int ret;
3899
3900 rte_spinlock_lock(&hw->lock);
3901 ret = hns3_set_promisc_mode(hw, true, true);
3902 if (ret) {
3903 rte_spinlock_unlock(&hw->lock);
3904 hns3_err(hw, "failed to enable promiscuous mode, ret = %d",
3905 ret);
3906 return ret;
3907 }
3908
3909 /*
3910 * When promiscuous mode was enabled, disable the vlan filter to let
3911 * all packets coming in in the receiving direction.
3912 */
3913 offloads = dev->data->dev_conf.rxmode.offloads;
3914 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
3915 ret = hns3_enable_vlan_filter(hns, false);
3916 if (ret) {
3917 hns3_err(hw, "failed to enable promiscuous mode due to "
3918 "failure to disable vlan filter, ret = %d",
3919 ret);
3920 err = hns3_set_promisc_mode(hw, false, allmulti);
3921 if (err)
3922 hns3_err(hw, "failed to restore promiscuous "
3923 "status after disable vlan filter "
3924 "failed during enabling promiscuous "
3925 "mode, ret = %d", ret);
3926 }
3927 }
3928
3929 rte_spinlock_unlock(&hw->lock);
3930
3931 return ret;
3932 }
3933
3934 static int
hns3_dev_promiscuous_disable(struct rte_eth_dev * dev)3935 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
3936 {
3937 bool allmulti = dev->data->all_multicast ? true : false;
3938 struct hns3_adapter *hns = dev->data->dev_private;
3939 struct hns3_hw *hw = &hns->hw;
3940 uint64_t offloads;
3941 int err;
3942 int ret;
3943
3944 /* If now in all_multicast mode, must remain in all_multicast mode. */
3945 rte_spinlock_lock(&hw->lock);
3946 ret = hns3_set_promisc_mode(hw, false, allmulti);
3947 if (ret) {
3948 rte_spinlock_unlock(&hw->lock);
3949 hns3_err(hw, "failed to disable promiscuous mode, ret = %d",
3950 ret);
3951 return ret;
3952 }
3953 /* when promiscuous mode was disabled, restore the vlan filter status */
3954 offloads = dev->data->dev_conf.rxmode.offloads;
3955 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
3956 ret = hns3_enable_vlan_filter(hns, true);
3957 if (ret) {
3958 hns3_err(hw, "failed to disable promiscuous mode due to"
3959 " failure to restore vlan filter, ret = %d",
3960 ret);
3961 err = hns3_set_promisc_mode(hw, true, true);
3962 if (err)
3963 hns3_err(hw, "failed to restore promiscuous "
3964 "status after enabling vlan filter "
3965 "failed during disabling promiscuous "
3966 "mode, ret = %d", ret);
3967 }
3968 }
3969 rte_spinlock_unlock(&hw->lock);
3970
3971 return ret;
3972 }
3973
3974 static int
hns3_dev_allmulticast_enable(struct rte_eth_dev * dev)3975 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
3976 {
3977 struct hns3_adapter *hns = dev->data->dev_private;
3978 struct hns3_hw *hw = &hns->hw;
3979 int ret;
3980
3981 if (dev->data->promiscuous)
3982 return 0;
3983
3984 rte_spinlock_lock(&hw->lock);
3985 ret = hns3_set_promisc_mode(hw, false, true);
3986 rte_spinlock_unlock(&hw->lock);
3987 if (ret)
3988 hns3_err(hw, "failed to enable allmulticast mode, ret = %d",
3989 ret);
3990
3991 return ret;
3992 }
3993
3994 static int
hns3_dev_allmulticast_disable(struct rte_eth_dev * dev)3995 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
3996 {
3997 struct hns3_adapter *hns = dev->data->dev_private;
3998 struct hns3_hw *hw = &hns->hw;
3999 int ret;
4000
4001 /* If now in promiscuous mode, must remain in all_multicast mode. */
4002 if (dev->data->promiscuous)
4003 return 0;
4004
4005 rte_spinlock_lock(&hw->lock);
4006 ret = hns3_set_promisc_mode(hw, false, false);
4007 rte_spinlock_unlock(&hw->lock);
4008 if (ret)
4009 hns3_err(hw, "failed to disable allmulticast mode, ret = %d",
4010 ret);
4011
4012 return ret;
4013 }
4014
4015 static int
hns3_dev_promisc_restore(struct hns3_adapter * hns)4016 hns3_dev_promisc_restore(struct hns3_adapter *hns)
4017 {
4018 struct hns3_hw *hw = &hns->hw;
4019 bool allmulti = hw->data->all_multicast ? true : false;
4020 int ret;
4021
4022 if (hw->data->promiscuous) {
4023 ret = hns3_set_promisc_mode(hw, true, true);
4024 if (ret)
4025 hns3_err(hw, "failed to restore promiscuous mode, "
4026 "ret = %d", ret);
4027 return ret;
4028 }
4029
4030 ret = hns3_set_promisc_mode(hw, false, allmulti);
4031 if (ret)
4032 hns3_err(hw, "failed to restore allmulticast mode, ret = %d",
4033 ret);
4034 return ret;
4035 }
4036
4037 static int
hns3_get_sfp_info(struct hns3_hw * hw,struct hns3_mac * mac_info)4038 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
4039 {
4040 struct hns3_sfp_info_cmd *resp;
4041 struct hns3_cmd_desc desc;
4042 int ret;
4043
4044 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
4045 resp = (struct hns3_sfp_info_cmd *)desc.data;
4046 resp->query_type = HNS3_ACTIVE_QUERY;
4047
4048 ret = hns3_cmd_send(hw, &desc, 1);
4049 if (ret == -EOPNOTSUPP) {
4050 hns3_warn(hw, "firmware does not support get SFP info,"
4051 " ret = %d.", ret);
4052 return ret;
4053 } else if (ret) {
4054 hns3_err(hw, "get sfp info failed, ret = %d.", ret);
4055 return ret;
4056 }
4057
4058 /*
4059 * In some case, the speed of MAC obtained from firmware may be 0, it
4060 * shouldn't be set to mac->speed.
4061 */
4062 if (!rte_le_to_cpu_32(resp->sfp_speed))
4063 return 0;
4064
4065 mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed);
4066 /*
4067 * if resp->supported_speed is 0, it means it's an old version
4068 * firmware, do not update these params.
4069 */
4070 if (resp->supported_speed) {
4071 mac_info->query_type = HNS3_ACTIVE_QUERY;
4072 mac_info->supported_speed =
4073 rte_le_to_cpu_32(resp->supported_speed);
4074 mac_info->support_autoneg = resp->autoneg_ability;
4075 mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
4076 : RTE_ETH_LINK_AUTONEG;
4077 } else {
4078 mac_info->query_type = HNS3_DEFAULT_QUERY;
4079 }
4080
4081 return 0;
4082 }
4083
4084 static uint8_t
hns3_check_speed_dup(uint8_t duplex,uint32_t speed)4085 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
4086 {
4087 if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
4088 duplex = RTE_ETH_LINK_FULL_DUPLEX;
4089
4090 return duplex;
4091 }
4092
4093 static int
hns3_cfg_mac_speed_dup(struct hns3_hw * hw,uint32_t speed,uint8_t duplex)4094 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
4095 {
4096 struct hns3_mac *mac = &hw->mac;
4097 int ret;
4098
4099 duplex = hns3_check_speed_dup(duplex, speed);
4100 if (mac->link_speed == speed && mac->link_duplex == duplex)
4101 return 0;
4102
4103 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex);
4104 if (ret)
4105 return ret;
4106
4107 ret = hns3_port_shaper_update(hw, speed);
4108 if (ret)
4109 return ret;
4110
4111 mac->link_speed = speed;
4112 mac->link_duplex = duplex;
4113
4114 return 0;
4115 }
4116
4117 static int
hns3_update_fiber_link_info(struct hns3_hw * hw)4118 hns3_update_fiber_link_info(struct hns3_hw *hw)
4119 {
4120 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
4121 struct hns3_mac *mac = &hw->mac;
4122 struct hns3_mac mac_info;
4123 int ret;
4124
4125 /* If firmware do not support get SFP/qSFP speed, return directly */
4126 if (!pf->support_sfp_query)
4127 return 0;
4128
4129 memset(&mac_info, 0, sizeof(struct hns3_mac));
4130 ret = hns3_get_sfp_info(hw, &mac_info);
4131 if (ret == -EOPNOTSUPP) {
4132 pf->support_sfp_query = false;
4133 return ret;
4134 } else if (ret)
4135 return ret;
4136
4137 /* Do nothing if no SFP */
4138 if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
4139 return 0;
4140
4141 /*
4142 * If query_type is HNS3_ACTIVE_QUERY, it is no need
4143 * to reconfigure the speed of MAC. Otherwise, it indicates
4144 * that the current firmware only supports to obtain the
4145 * speed of the SFP, and the speed of MAC needs to reconfigure.
4146 */
4147 mac->query_type = mac_info.query_type;
4148 if (mac->query_type == HNS3_ACTIVE_QUERY) {
4149 if (mac_info.link_speed != mac->link_speed) {
4150 ret = hns3_port_shaper_update(hw, mac_info.link_speed);
4151 if (ret)
4152 return ret;
4153 }
4154
4155 mac->link_speed = mac_info.link_speed;
4156 mac->supported_speed = mac_info.supported_speed;
4157 mac->support_autoneg = mac_info.support_autoneg;
4158 mac->link_autoneg = mac_info.link_autoneg;
4159
4160 return 0;
4161 }
4162
4163 /* Config full duplex for SFP */
4164 return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
4165 RTE_ETH_LINK_FULL_DUPLEX);
4166 }
4167
4168 static void
hns3_parse_copper_phy_params(struct hns3_cmd_desc * desc,struct hns3_mac * mac)4169 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac)
4170 {
4171 #define HNS3_PHY_SUPPORTED_SPEED_MASK 0x2f
4172
4173 struct hns3_phy_params_bd0_cmd *req;
4174 uint32_t supported;
4175
4176 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
4177 mac->link_speed = rte_le_to_cpu_32(req->speed);
4178 mac->link_duplex = hns3_get_bit(req->duplex,
4179 HNS3_PHY_DUPLEX_CFG_B);
4180 mac->link_autoneg = hns3_get_bit(req->autoneg,
4181 HNS3_PHY_AUTONEG_CFG_B);
4182 mac->advertising = rte_le_to_cpu_32(req->advertising);
4183 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising);
4184 supported = rte_le_to_cpu_32(req->supported);
4185 mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK;
4186 mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT);
4187 }
4188
4189 static int
hns3_get_copper_phy_params(struct hns3_hw * hw,struct hns3_mac * mac)4190 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac)
4191 {
4192 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
4193 uint16_t i;
4194 int ret;
4195
4196 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
4197 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
4198 true);
4199 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4200 }
4201 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true);
4202
4203 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
4204 if (ret) {
4205 hns3_err(hw, "get phy parameters failed, ret = %d.", ret);
4206 return ret;
4207 }
4208
4209 hns3_parse_copper_phy_params(desc, mac);
4210
4211 return 0;
4212 }
4213
4214 static int
hns3_update_copper_link_info(struct hns3_hw * hw)4215 hns3_update_copper_link_info(struct hns3_hw *hw)
4216 {
4217 struct hns3_mac *mac = &hw->mac;
4218 struct hns3_mac mac_info;
4219 int ret;
4220
4221 memset(&mac_info, 0, sizeof(struct hns3_mac));
4222 ret = hns3_get_copper_phy_params(hw, &mac_info);
4223 if (ret)
4224 return ret;
4225
4226 if (mac_info.link_speed != mac->link_speed) {
4227 ret = hns3_port_shaper_update(hw, mac_info.link_speed);
4228 if (ret)
4229 return ret;
4230 }
4231
4232 mac->link_speed = mac_info.link_speed;
4233 mac->link_duplex = mac_info.link_duplex;
4234 mac->link_autoneg = mac_info.link_autoneg;
4235 mac->supported_speed = mac_info.supported_speed;
4236 mac->advertising = mac_info.advertising;
4237 mac->lp_advertising = mac_info.lp_advertising;
4238 mac->support_autoneg = mac_info.support_autoneg;
4239
4240 return 0;
4241 }
4242
4243 static int
hns3_update_link_info(struct rte_eth_dev * eth_dev)4244 hns3_update_link_info(struct rte_eth_dev *eth_dev)
4245 {
4246 struct hns3_adapter *hns = eth_dev->data->dev_private;
4247 struct hns3_hw *hw = &hns->hw;
4248 int ret = 0;
4249
4250 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER)
4251 ret = hns3_update_copper_link_info(hw);
4252 else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER)
4253 ret = hns3_update_fiber_link_info(hw);
4254
4255 return ret;
4256 }
4257
4258 static int
hns3_cfg_mac_mode(struct hns3_hw * hw,bool enable)4259 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
4260 {
4261 struct hns3_config_mac_mode_cmd *req;
4262 struct hns3_cmd_desc desc;
4263 uint32_t loop_en = 0;
4264 uint8_t val = 0;
4265 int ret;
4266
4267 req = (struct hns3_config_mac_mode_cmd *)desc.data;
4268
4269 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false);
4270 if (enable)
4271 val = 1;
4272 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val);
4273 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val);
4274 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val);
4275 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val);
4276 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0);
4277 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0);
4278 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0);
4279 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0);
4280 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val);
4281 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
4282
4283 /*
4284 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
4285 * when receiving frames. Otherwise, CRC will be stripped.
4286 */
4287 if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
4288 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
4289 else
4290 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
4291 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val);
4292 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val);
4293 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val);
4294 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en);
4295
4296 ret = hns3_cmd_send(hw, &desc, 1);
4297 if (ret)
4298 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret);
4299
4300 return ret;
4301 }
4302
4303 static int
hns3_get_mac_link_status(struct hns3_hw * hw)4304 hns3_get_mac_link_status(struct hns3_hw *hw)
4305 {
4306 struct hns3_link_status_cmd *req;
4307 struct hns3_cmd_desc desc;
4308 int link_status;
4309 int ret;
4310
4311 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true);
4312 ret = hns3_cmd_send(hw, &desc, 1);
4313 if (ret) {
4314 hns3_err(hw, "get link status cmd failed %d", ret);
4315 return RTE_ETH_LINK_DOWN;
4316 }
4317
4318 req = (struct hns3_link_status_cmd *)desc.data;
4319 link_status = req->status & HNS3_LINK_STATUS_UP_M;
4320
4321 return !!link_status;
4322 }
4323
4324 static bool
hns3_update_link_status(struct hns3_hw * hw)4325 hns3_update_link_status(struct hns3_hw *hw)
4326 {
4327 int state;
4328
4329 state = hns3_get_mac_link_status(hw);
4330 if (state != hw->mac.link_status) {
4331 hw->mac.link_status = state;
4332 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down");
4333 return true;
4334 }
4335
4336 return false;
4337 }
4338
4339 void
hns3_update_linkstatus_and_event(struct hns3_hw * hw,bool query)4340 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query)
4341 {
4342 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
4343 struct rte_eth_link new_link;
4344 int ret;
4345
4346 if (query)
4347 hns3_update_port_link_info(dev);
4348
4349 memset(&new_link, 0, sizeof(new_link));
4350 hns3_setup_linkstatus(dev, &new_link);
4351
4352 ret = rte_eth_linkstatus_set(dev, &new_link);
4353 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
4354 hns3_start_report_lse(dev);
4355 }
4356
4357 static void
hns3_service_handler(void * param)4358 hns3_service_handler(void *param)
4359 {
4360 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
4361 struct hns3_adapter *hns = eth_dev->data->dev_private;
4362 struct hns3_hw *hw = &hns->hw;
4363
4364 if (!hns3_is_reset_pending(hns)) {
4365 hns3_update_linkstatus_and_event(hw, true);
4366 hns3_update_hw_stats(hw);
4367 } else {
4368 hns3_warn(hw, "Cancel the query when reset is pending");
4369 }
4370
4371 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
4372 }
4373
4374 static int
hns3_init_hardware(struct hns3_adapter * hns)4375 hns3_init_hardware(struct hns3_adapter *hns)
4376 {
4377 struct hns3_hw *hw = &hns->hw;
4378 int ret;
4379
4380 /*
4381 * All queue-related HW operations must be performed after the TCAM
4382 * table is configured.
4383 */
4384 ret = hns3_map_tqp(hw);
4385 if (ret) {
4386 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
4387 return ret;
4388 }
4389
4390 ret = hns3_init_umv_space(hw);
4391 if (ret) {
4392 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret);
4393 return ret;
4394 }
4395
4396 ret = hns3_mac_init(hw);
4397 if (ret) {
4398 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret);
4399 goto err_mac_init;
4400 }
4401
4402 ret = hns3_init_mgr_tbl(hw);
4403 if (ret) {
4404 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret);
4405 goto err_mac_init;
4406 }
4407
4408 ret = hns3_promisc_init(hw);
4409 if (ret) {
4410 PMD_INIT_LOG(ERR, "Failed to init promisc: %d",
4411 ret);
4412 goto err_mac_init;
4413 }
4414
4415 ret = hns3_init_vlan_config(hns);
4416 if (ret) {
4417 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret);
4418 goto err_mac_init;
4419 }
4420
4421 ret = hns3_dcb_init(hw);
4422 if (ret) {
4423 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret);
4424 goto err_mac_init;
4425 }
4426
4427 ret = hns3_init_fd_config(hns);
4428 if (ret) {
4429 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret);
4430 goto err_mac_init;
4431 }
4432
4433 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
4434 if (ret) {
4435 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
4436 goto err_mac_init;
4437 }
4438
4439 ret = hns3_config_gro(hw, false);
4440 if (ret) {
4441 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
4442 goto err_mac_init;
4443 }
4444
4445 /*
4446 * In the initialization clearing the all hardware mapping relationship
4447 * configurations between queues and interrupt vectors is needed, so
4448 * some error caused by the residual configurations, such as the
4449 * unexpected interrupt, can be avoid.
4450 */
4451 ret = hns3_init_ring_with_vector(hw);
4452 if (ret) {
4453 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
4454 goto err_mac_init;
4455 }
4456
4457 return 0;
4458
4459 err_mac_init:
4460 hns3_uninit_umv_space(hw);
4461 return ret;
4462 }
4463
4464 static int
hns3_clear_hw(struct hns3_hw * hw)4465 hns3_clear_hw(struct hns3_hw *hw)
4466 {
4467 struct hns3_cmd_desc desc;
4468 int ret;
4469
4470 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false);
4471
4472 ret = hns3_cmd_send(hw, &desc, 1);
4473 if (ret && ret != -EOPNOTSUPP)
4474 return ret;
4475
4476 return 0;
4477 }
4478
4479 static void
hns3_config_all_msix_error(struct hns3_hw * hw,bool enable)4480 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable)
4481 {
4482 uint32_t val;
4483
4484 /*
4485 * The new firmware support report more hardware error types by
4486 * msix mode. These errors are defined as RAS errors in hardware
4487 * and belong to a different type from the MSI-x errors processed
4488 * by the network driver.
4489 *
4490 * Network driver should open the new error report on initialization.
4491 */
4492 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
4493 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0);
4494 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val);
4495 }
4496
4497 static uint32_t
hns3_set_firber_default_support_speed(struct hns3_hw * hw)4498 hns3_set_firber_default_support_speed(struct hns3_hw *hw)
4499 {
4500 struct hns3_mac *mac = &hw->mac;
4501
4502 switch (mac->link_speed) {
4503 case RTE_ETH_SPEED_NUM_1G:
4504 return HNS3_FIBER_LINK_SPEED_1G_BIT;
4505 case RTE_ETH_SPEED_NUM_10G:
4506 return HNS3_FIBER_LINK_SPEED_10G_BIT;
4507 case RTE_ETH_SPEED_NUM_25G:
4508 return HNS3_FIBER_LINK_SPEED_25G_BIT;
4509 case RTE_ETH_SPEED_NUM_40G:
4510 return HNS3_FIBER_LINK_SPEED_40G_BIT;
4511 case RTE_ETH_SPEED_NUM_50G:
4512 return HNS3_FIBER_LINK_SPEED_50G_BIT;
4513 case RTE_ETH_SPEED_NUM_100G:
4514 return HNS3_FIBER_LINK_SPEED_100G_BIT;
4515 case RTE_ETH_SPEED_NUM_200G:
4516 return HNS3_FIBER_LINK_SPEED_200G_BIT;
4517 default:
4518 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
4519 return 0;
4520 }
4521 }
4522
4523 /*
4524 * Validity of supported_speed for fiber and copper media type can be
4525 * guaranteed by the following policy:
4526 * Copper:
4527 * Although the initialization of the phy in the firmware may not be
4528 * completed, the firmware can guarantees that the supported_speed is
4529 * an valid value.
4530 * Firber:
4531 * If the version of firmware supports the active query way of the
4532 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained
4533 * through it. If unsupported, use the SFP's speed as the value of the
4534 * supported_speed.
4535 */
4536 static int
hns3_get_port_supported_speed(struct rte_eth_dev * eth_dev)4537 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev)
4538 {
4539 struct hns3_adapter *hns = eth_dev->data->dev_private;
4540 struct hns3_hw *hw = &hns->hw;
4541 struct hns3_mac *mac = &hw->mac;
4542 int ret;
4543
4544 ret = hns3_update_link_info(eth_dev);
4545 if (ret)
4546 return ret;
4547
4548 if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) {
4549 /*
4550 * Some firmware does not support the report of supported_speed,
4551 * and only report the effective speed of SFP. In this case, it
4552 * is necessary to use the SFP's speed as the supported_speed.
4553 */
4554 if (mac->supported_speed == 0)
4555 mac->supported_speed =
4556 hns3_set_firber_default_support_speed(hw);
4557 }
4558
4559 return 0;
4560 }
4561
4562 static void
hns3_get_fc_autoneg_capability(struct hns3_adapter * hns)4563 hns3_get_fc_autoneg_capability(struct hns3_adapter *hns)
4564 {
4565 struct hns3_mac *mac = &hns->hw.mac;
4566
4567 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) {
4568 hns->pf.support_fc_autoneg = true;
4569 return;
4570 }
4571
4572 /*
4573 * Flow control auto-negotiation requires the cooperation of the driver
4574 * and firmware. Currently, the optical port does not support flow
4575 * control auto-negotiation.
4576 */
4577 hns->pf.support_fc_autoneg = false;
4578 }
4579
4580 static int
hns3_init_pf(struct rte_eth_dev * eth_dev)4581 hns3_init_pf(struct rte_eth_dev *eth_dev)
4582 {
4583 struct rte_device *dev = eth_dev->device;
4584 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4585 struct hns3_adapter *hns = eth_dev->data->dev_private;
4586 struct hns3_hw *hw = &hns->hw;
4587 int ret;
4588
4589 PMD_INIT_FUNC_TRACE();
4590
4591 /* Get hardware io base address from pcie BAR2 IO space */
4592 hw->io_base = pci_dev->mem_resource[2].addr;
4593
4594 /* Firmware command queue initialize */
4595 ret = hns3_cmd_init_queue(hw);
4596 if (ret) {
4597 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
4598 goto err_cmd_init_queue;
4599 }
4600
4601 hns3_clear_all_event_cause(hw);
4602
4603 /* Firmware command initialize */
4604 ret = hns3_cmd_init(hw);
4605 if (ret) {
4606 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
4607 goto err_cmd_init;
4608 }
4609
4610 hns3_tx_push_init(eth_dev);
4611
4612 /*
4613 * To ensure that the hardware environment is clean during
4614 * initialization, the driver actively clear the hardware environment
4615 * during initialization, including PF and corresponding VFs' vlan, mac,
4616 * flow table configurations, etc.
4617 */
4618 ret = hns3_clear_hw(hw);
4619 if (ret) {
4620 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret);
4621 goto err_cmd_init;
4622 }
4623
4624 hns3_config_all_msix_error(hw, true);
4625
4626 ret = rte_intr_callback_register(pci_dev->intr_handle,
4627 hns3_interrupt_handler,
4628 eth_dev);
4629 if (ret) {
4630 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
4631 goto err_intr_callback_register;
4632 }
4633
4634 ret = hns3_ptp_init(hw);
4635 if (ret)
4636 goto err_get_config;
4637
4638 /* Enable interrupt */
4639 rte_intr_enable(pci_dev->intr_handle);
4640 hns3_pf_enable_irq0(hw);
4641
4642 /* Get configuration */
4643 ret = hns3_get_configuration(hw);
4644 if (ret) {
4645 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
4646 goto err_get_config;
4647 }
4648
4649 ret = hns3_stats_init(hw);
4650 if (ret)
4651 goto err_get_config;
4652
4653 ret = hns3_init_hardware(hns);
4654 if (ret) {
4655 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
4656 goto err_init_hw;
4657 }
4658
4659 /* Initialize flow director filter list & hash */
4660 ret = hns3_fdir_filter_init(hns);
4661 if (ret) {
4662 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
4663 goto err_fdir;
4664 }
4665
4666 hns3_rss_set_default_args(hw);
4667
4668 ret = hns3_enable_hw_error_intr(hns, true);
4669 if (ret) {
4670 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d",
4671 ret);
4672 goto err_enable_intr;
4673 }
4674
4675 ret = hns3_get_port_supported_speed(eth_dev);
4676 if (ret) {
4677 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported "
4678 "by device, ret = %d.", ret);
4679 goto err_supported_speed;
4680 }
4681
4682 hns3_get_fc_autoneg_capability(hns);
4683
4684 hns3_tm_conf_init(eth_dev);
4685
4686 return 0;
4687
4688 err_supported_speed:
4689 (void)hns3_enable_hw_error_intr(hns, false);
4690 err_enable_intr:
4691 hns3_fdir_filter_uninit(hns);
4692 err_fdir:
4693 hns3_uninit_umv_space(hw);
4694 err_init_hw:
4695 hns3_stats_uninit(hw);
4696 err_get_config:
4697 hns3_pf_disable_irq0(hw);
4698 rte_intr_disable(pci_dev->intr_handle);
4699 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler,
4700 eth_dev);
4701 err_intr_callback_register:
4702 err_cmd_init:
4703 hns3_cmd_uninit(hw);
4704 hns3_cmd_destroy_queue(hw);
4705 err_cmd_init_queue:
4706 hw->io_base = NULL;
4707
4708 return ret;
4709 }
4710
4711 static void
hns3_uninit_pf(struct rte_eth_dev * eth_dev)4712 hns3_uninit_pf(struct rte_eth_dev *eth_dev)
4713 {
4714 struct hns3_adapter *hns = eth_dev->data->dev_private;
4715 struct rte_device *dev = eth_dev->device;
4716 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4717 struct hns3_hw *hw = &hns->hw;
4718
4719 PMD_INIT_FUNC_TRACE();
4720
4721 hns3_tm_conf_uninit(eth_dev);
4722 hns3_enable_hw_error_intr(hns, false);
4723 hns3_rss_uninit(hns);
4724 (void)hns3_config_gro(hw, false);
4725 hns3_promisc_uninit(hw);
4726 hns3_flow_uninit(eth_dev);
4727 hns3_fdir_filter_uninit(hns);
4728 hns3_uninit_umv_space(hw);
4729 hns3_stats_uninit(hw);
4730 hns3_config_mac_tnl_int(hw, false);
4731 hns3_pf_disable_irq0(hw);
4732 rte_intr_disable(pci_dev->intr_handle);
4733 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler,
4734 eth_dev);
4735 hns3_config_all_msix_error(hw, false);
4736 hns3_cmd_uninit(hw);
4737 hns3_cmd_destroy_queue(hw);
4738 hw->io_base = NULL;
4739 }
4740
4741 static uint32_t
hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)4742 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
4743 {
4744 uint32_t speed_bit;
4745
4746 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
4747 case RTE_ETH_LINK_SPEED_10M:
4748 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
4749 break;
4750 case RTE_ETH_LINK_SPEED_10M_HD:
4751 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
4752 break;
4753 case RTE_ETH_LINK_SPEED_100M:
4754 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
4755 break;
4756 case RTE_ETH_LINK_SPEED_100M_HD:
4757 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
4758 break;
4759 case RTE_ETH_LINK_SPEED_1G:
4760 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
4761 break;
4762 default:
4763 speed_bit = 0;
4764 break;
4765 }
4766
4767 return speed_bit;
4768 }
4769
4770 static uint32_t
hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)4771 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
4772 {
4773 uint32_t speed_bit;
4774
4775 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
4776 case RTE_ETH_LINK_SPEED_1G:
4777 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
4778 break;
4779 case RTE_ETH_LINK_SPEED_10G:
4780 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
4781 break;
4782 case RTE_ETH_LINK_SPEED_25G:
4783 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
4784 break;
4785 case RTE_ETH_LINK_SPEED_40G:
4786 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
4787 break;
4788 case RTE_ETH_LINK_SPEED_50G:
4789 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
4790 break;
4791 case RTE_ETH_LINK_SPEED_100G:
4792 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
4793 break;
4794 case RTE_ETH_LINK_SPEED_200G:
4795 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
4796 break;
4797 default:
4798 speed_bit = 0;
4799 break;
4800 }
4801
4802 return speed_bit;
4803 }
4804
4805 static int
hns3_check_port_speed(struct hns3_hw * hw,uint32_t link_speeds)4806 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
4807 {
4808 struct hns3_mac *mac = &hw->mac;
4809 uint32_t supported_speed = mac->supported_speed;
4810 uint32_t speed_bit = 0;
4811
4812 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
4813 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds);
4814 else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER)
4815 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds);
4816
4817 if (!(speed_bit & supported_speed)) {
4818 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.",
4819 link_speeds);
4820 return -EINVAL;
4821 }
4822
4823 return 0;
4824 }
4825
4826 static uint32_t
hns3_get_link_speed(uint32_t link_speeds)4827 hns3_get_link_speed(uint32_t link_speeds)
4828 {
4829 uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
4830
4831 if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
4832 link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
4833 speed = RTE_ETH_SPEED_NUM_10M;
4834 if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
4835 link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
4836 speed = RTE_ETH_SPEED_NUM_100M;
4837 if (link_speeds & RTE_ETH_LINK_SPEED_1G)
4838 speed = RTE_ETH_SPEED_NUM_1G;
4839 if (link_speeds & RTE_ETH_LINK_SPEED_10G)
4840 speed = RTE_ETH_SPEED_NUM_10G;
4841 if (link_speeds & RTE_ETH_LINK_SPEED_25G)
4842 speed = RTE_ETH_SPEED_NUM_25G;
4843 if (link_speeds & RTE_ETH_LINK_SPEED_40G)
4844 speed = RTE_ETH_SPEED_NUM_40G;
4845 if (link_speeds & RTE_ETH_LINK_SPEED_50G)
4846 speed = RTE_ETH_SPEED_NUM_50G;
4847 if (link_speeds & RTE_ETH_LINK_SPEED_100G)
4848 speed = RTE_ETH_SPEED_NUM_100G;
4849 if (link_speeds & RTE_ETH_LINK_SPEED_200G)
4850 speed = RTE_ETH_SPEED_NUM_200G;
4851
4852 return speed;
4853 }
4854
4855 static uint8_t
hns3_get_link_duplex(uint32_t link_speeds)4856 hns3_get_link_duplex(uint32_t link_speeds)
4857 {
4858 if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
4859 (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
4860 return RTE_ETH_LINK_HALF_DUPLEX;
4861 else
4862 return RTE_ETH_LINK_FULL_DUPLEX;
4863 }
4864
4865 static int
hns3_set_copper_port_link_speed(struct hns3_hw * hw,struct hns3_set_link_speed_cfg * cfg)4866 hns3_set_copper_port_link_speed(struct hns3_hw *hw,
4867 struct hns3_set_link_speed_cfg *cfg)
4868 {
4869 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
4870 struct hns3_phy_params_bd0_cmd *req;
4871 uint16_t i;
4872
4873 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
4874 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
4875 false);
4876 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4877 }
4878 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false);
4879 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
4880 req->autoneg = cfg->autoneg;
4881
4882 /*
4883 * The full speed capability is used to negotiate when
4884 * auto-negotiation is enabled.
4885 */
4886 if (cfg->autoneg) {
4887 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT |
4888 HNS3_PHY_LINK_SPEED_10M_HD_BIT |
4889 HNS3_PHY_LINK_SPEED_100M_BIT |
4890 HNS3_PHY_LINK_SPEED_100M_HD_BIT |
4891 HNS3_PHY_LINK_SPEED_1000M_BIT;
4892 } else {
4893 req->speed = cfg->speed;
4894 req->duplex = cfg->duplex;
4895 }
4896
4897 return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
4898 }
4899
4900 static int
hns3_set_autoneg(struct hns3_hw * hw,bool enable)4901 hns3_set_autoneg(struct hns3_hw *hw, bool enable)
4902 {
4903 struct hns3_config_auto_neg_cmd *req;
4904 struct hns3_cmd_desc desc;
4905 uint32_t flag = 0;
4906 int ret;
4907
4908 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false);
4909
4910 req = (struct hns3_config_auto_neg_cmd *)desc.data;
4911 if (enable)
4912 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1);
4913 req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag);
4914
4915 ret = hns3_cmd_send(hw, &desc, 1);
4916 if (ret)
4917 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret);
4918
4919 return ret;
4920 }
4921
4922 static int
hns3_set_fiber_port_link_speed(struct hns3_hw * hw,struct hns3_set_link_speed_cfg * cfg)4923 hns3_set_fiber_port_link_speed(struct hns3_hw *hw,
4924 struct hns3_set_link_speed_cfg *cfg)
4925 {
4926 int ret;
4927
4928 if (hw->mac.support_autoneg) {
4929 ret = hns3_set_autoneg(hw, cfg->autoneg);
4930 if (ret) {
4931 hns3_err(hw, "failed to configure auto-negotiation.");
4932 return ret;
4933 }
4934
4935 /*
4936 * To enable auto-negotiation, we only need to open the switch
4937 * of auto-negotiation, then firmware sets all speed
4938 * capabilities.
4939 */
4940 if (cfg->autoneg)
4941 return 0;
4942 }
4943
4944 /*
4945 * Some hardware doesn't support auto-negotiation, but users may not
4946 * configure link_speeds (default 0), which means auto-negotiation.
4947 * In this case, a warning message need to be printed, instead of
4948 * an error.
4949 */
4950 if (cfg->autoneg) {
4951 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!");
4952 return 0;
4953 }
4954
4955 return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex);
4956 }
4957
4958 static int
hns3_set_port_link_speed(struct hns3_hw * hw,struct hns3_set_link_speed_cfg * cfg)4959 hns3_set_port_link_speed(struct hns3_hw *hw,
4960 struct hns3_set_link_speed_cfg *cfg)
4961 {
4962 int ret;
4963
4964 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) {
4965 #if defined(RTE_HNS3_ONLY_1630_FPGA)
4966 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
4967 if (pf->is_tmp_phy)
4968 return 0;
4969 #endif
4970
4971 ret = hns3_set_copper_port_link_speed(hw, cfg);
4972 if (ret) {
4973 hns3_err(hw, "failed to set copper port link speed,"
4974 "ret = %d.", ret);
4975 return ret;
4976 }
4977 } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) {
4978 ret = hns3_set_fiber_port_link_speed(hw, cfg);
4979 if (ret) {
4980 hns3_err(hw, "failed to set fiber port link speed,"
4981 "ret = %d.", ret);
4982 return ret;
4983 }
4984 }
4985
4986 return 0;
4987 }
4988
4989 static int
hns3_apply_link_speed(struct hns3_hw * hw)4990 hns3_apply_link_speed(struct hns3_hw *hw)
4991 {
4992 struct rte_eth_conf *conf = &hw->data->dev_conf;
4993 struct hns3_set_link_speed_cfg cfg;
4994
4995 memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
4996 cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
4997 RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
4998 if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
4999 cfg.speed = hns3_get_link_speed(conf->link_speeds);
5000 cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
5001 }
5002
5003 return hns3_set_port_link_speed(hw, &cfg);
5004 }
5005
5006 static int
hns3_do_start(struct hns3_adapter * hns,bool reset_queue)5007 hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
5008 {
5009 struct hns3_hw *hw = &hns->hw;
5010 bool link_en;
5011 int ret;
5012
5013 ret = hns3_update_queue_map_configure(hns);
5014 if (ret) {
5015 hns3_err(hw, "failed to update queue mapping configuration, ret = %d",
5016 ret);
5017 return ret;
5018 }
5019
5020 /* Note: hns3_tm_conf_update must be called after configuring DCB. */
5021 ret = hns3_tm_conf_update(hw);
5022 if (ret) {
5023 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret);
5024 return ret;
5025 }
5026
5027 hns3_enable_rxd_adv_layout(hw);
5028
5029 ret = hns3_init_queues(hns, reset_queue);
5030 if (ret) {
5031 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret);
5032 return ret;
5033 }
5034
5035 link_en = hw->set_link_down ? false : true;
5036 ret = hns3_cfg_mac_mode(hw, link_en);
5037 if (ret) {
5038 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret);
5039 goto err_config_mac_mode;
5040 }
5041
5042 ret = hns3_apply_link_speed(hw);
5043 if (ret)
5044 goto err_set_link_speed;
5045
5046 return 0;
5047
5048 err_set_link_speed:
5049 (void)hns3_cfg_mac_mode(hw, false);
5050
5051 err_config_mac_mode:
5052 hns3_dev_release_mbufs(hns);
5053 /*
5054 * Here is exception handling, hns3_reset_all_tqps will have the
5055 * corresponding error message if it is handled incorrectly, so it is
5056 * not necessary to check hns3_reset_all_tqps return value, here keep
5057 * ret as the error code causing the exception.
5058 */
5059 (void)hns3_reset_all_tqps(hns);
5060 return ret;
5061 }
5062
5063 static void
hns3_restore_filter(struct rte_eth_dev * dev)5064 hns3_restore_filter(struct rte_eth_dev *dev)
5065 {
5066 hns3_restore_rss_filter(dev);
5067 }
5068
5069 static int
hns3_dev_start(struct rte_eth_dev * dev)5070 hns3_dev_start(struct rte_eth_dev *dev)
5071 {
5072 struct hns3_adapter *hns = dev->data->dev_private;
5073 struct hns3_hw *hw = &hns->hw;
5074 bool old_state = hw->set_link_down;
5075 int ret;
5076
5077 PMD_INIT_FUNC_TRACE();
5078 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
5079 return -EBUSY;
5080
5081 rte_spinlock_lock(&hw->lock);
5082 hw->adapter_state = HNS3_NIC_STARTING;
5083
5084 /*
5085 * If the dev_set_link_down() API has been called, the "set_link_down"
5086 * flag can be cleared by dev_start() API. In addition, the flag should
5087 * also be cleared before calling hns3_do_start() so that MAC can be
5088 * enabled in dev_start stage.
5089 */
5090 hw->set_link_down = false;
5091 ret = hns3_do_start(hns, true);
5092 if (ret)
5093 goto do_start_fail;
5094
5095 ret = hns3_map_rx_interrupt(dev);
5096 if (ret)
5097 goto map_rx_inter_err;
5098
5099 /*
5100 * There are three register used to control the status of a TQP
5101 * (contains a pair of Tx queue and Rx queue) in the new version network
5102 * engine. One is used to control the enabling of Tx queue, the other is
5103 * used to control the enabling of Rx queue, and the last is the master
5104 * switch used to control the enabling of the tqp. The Tx register and
5105 * TQP register must be enabled at the same time to enable a Tx queue.
5106 * The same applies to the Rx queue. For the older network engine, this
5107 * function only refresh the enabled flag, and it is used to update the
5108 * status of queue in the dpdk framework.
5109 */
5110 ret = hns3_start_all_txqs(dev);
5111 if (ret)
5112 goto map_rx_inter_err;
5113
5114 ret = hns3_start_all_rxqs(dev);
5115 if (ret)
5116 goto start_all_rxqs_fail;
5117
5118 hw->adapter_state = HNS3_NIC_STARTED;
5119 rte_spinlock_unlock(&hw->lock);
5120
5121 hns3_rx_scattered_calc(dev);
5122 hns3_set_rxtx_function(dev);
5123 hns3_mp_req_start_rxtx(dev);
5124
5125 hns3_restore_filter(dev);
5126
5127 /* Enable interrupt of all rx queues before enabling queues */
5128 hns3_dev_all_rx_queue_intr_enable(hw, true);
5129
5130 /*
5131 * After finished the initialization, enable tqps to receive/transmit
5132 * packets and refresh all queue status.
5133 */
5134 hns3_start_tqps(hw);
5135
5136 hns3_tm_dev_start_proc(hw);
5137
5138 if (dev->data->dev_conf.intr_conf.lsc != 0)
5139 hns3_dev_link_update(dev, 0);
5140 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
5141
5142 hns3_info(hw, "hns3 dev start successful!");
5143
5144 return 0;
5145
5146 start_all_rxqs_fail:
5147 hns3_stop_all_txqs(dev);
5148 map_rx_inter_err:
5149 (void)hns3_do_stop(hns);
5150 do_start_fail:
5151 hw->set_link_down = old_state;
5152 hw->adapter_state = HNS3_NIC_CONFIGURED;
5153 rte_spinlock_unlock(&hw->lock);
5154
5155 return ret;
5156 }
5157
5158 static int
hns3_do_stop(struct hns3_adapter * hns)5159 hns3_do_stop(struct hns3_adapter *hns)
5160 {
5161 struct hns3_hw *hw = &hns->hw;
5162 int ret;
5163
5164 /*
5165 * The "hns3_do_stop" function will also be called by .stop_service to
5166 * prepare reset. At the time of global or IMP reset, the command cannot
5167 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
5168 * accessed during the reset process. So the mbuf can not be released
5169 * during reset and is required to be released after the reset is
5170 * completed.
5171 */
5172 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
5173 hns3_dev_release_mbufs(hns);
5174
5175 ret = hns3_cfg_mac_mode(hw, false);
5176 if (ret)
5177 return ret;
5178 hw->mac.link_status = RTE_ETH_LINK_DOWN;
5179
5180 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
5181 hns3_configure_all_mac_addr(hns, true);
5182 ret = hns3_reset_all_tqps(hns);
5183 if (ret) {
5184 hns3_err(hw, "failed to reset all queues ret = %d.",
5185 ret);
5186 return ret;
5187 }
5188 }
5189
5190 return 0;
5191 }
5192
5193 static int
hns3_dev_stop(struct rte_eth_dev * dev)5194 hns3_dev_stop(struct rte_eth_dev *dev)
5195 {
5196 struct hns3_adapter *hns = dev->data->dev_private;
5197 struct hns3_hw *hw = &hns->hw;
5198
5199 PMD_INIT_FUNC_TRACE();
5200 dev->data->dev_started = 0;
5201
5202 hw->adapter_state = HNS3_NIC_STOPPING;
5203 hns3_set_rxtx_function(dev);
5204 rte_wmb();
5205 /* Disable datapath on secondary process. */
5206 hns3_mp_req_stop_rxtx(dev);
5207 /* Prevent crashes when queues are still in use. */
5208 rte_delay_ms(hw->cfg_max_queues);
5209
5210 rte_spinlock_lock(&hw->lock);
5211 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
5212 hns3_tm_dev_stop_proc(hw);
5213 hns3_config_mac_tnl_int(hw, false);
5214 hns3_stop_tqps(hw);
5215 hns3_do_stop(hns);
5216 hns3_unmap_rx_interrupt(dev);
5217 hw->adapter_state = HNS3_NIC_CONFIGURED;
5218 }
5219 hns3_rx_scattered_reset(dev);
5220 rte_eal_alarm_cancel(hns3_service_handler, dev);
5221 hns3_stop_report_lse(dev);
5222 rte_spinlock_unlock(&hw->lock);
5223
5224 return 0;
5225 }
5226
5227 static int
hns3_dev_close(struct rte_eth_dev * eth_dev)5228 hns3_dev_close(struct rte_eth_dev *eth_dev)
5229 {
5230 struct hns3_adapter *hns = eth_dev->data->dev_private;
5231 struct hns3_hw *hw = &hns->hw;
5232 int ret = 0;
5233
5234 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
5235 hns3_mp_uninit(eth_dev);
5236 return 0;
5237 }
5238
5239 if (hw->adapter_state == HNS3_NIC_STARTED)
5240 ret = hns3_dev_stop(eth_dev);
5241
5242 hw->adapter_state = HNS3_NIC_CLOSING;
5243 hns3_reset_abort(hns);
5244 hw->adapter_state = HNS3_NIC_CLOSED;
5245
5246 hns3_configure_all_mc_mac_addr(hns, true);
5247 hns3_remove_all_vlan_table(hns);
5248 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
5249 hns3_uninit_pf(eth_dev);
5250 hns3_free_all_queues(eth_dev);
5251 rte_free(hw->reset.wait_data);
5252 hns3_mp_uninit(eth_dev);
5253 hns3_warn(hw, "Close port %u finished", hw->data->port_id);
5254
5255 return ret;
5256 }
5257
5258 static void
hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw * hw,bool * rx_pause,bool * tx_pause)5259 hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause,
5260 bool *tx_pause)
5261 {
5262 struct hns3_mac *mac = &hw->mac;
5263 uint32_t advertising = mac->advertising;
5264 uint32_t lp_advertising = mac->lp_advertising;
5265 *rx_pause = false;
5266 *tx_pause = false;
5267
5268 if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) {
5269 *rx_pause = true;
5270 *tx_pause = true;
5271 } else if (advertising & lp_advertising &
5272 HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) {
5273 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT)
5274 *rx_pause = true;
5275 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT)
5276 *tx_pause = true;
5277 }
5278 }
5279
5280 static enum hns3_fc_mode
hns3_get_autoneg_fc_mode(struct hns3_hw * hw)5281 hns3_get_autoneg_fc_mode(struct hns3_hw *hw)
5282 {
5283 enum hns3_fc_mode current_mode;
5284 bool rx_pause = false;
5285 bool tx_pause = false;
5286
5287 switch (hw->mac.media_type) {
5288 case HNS3_MEDIA_TYPE_COPPER:
5289 hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause);
5290 break;
5291
5292 /*
5293 * Flow control auto-negotiation is not supported for fiber and
5294 * backplane media type.
5295 */
5296 case HNS3_MEDIA_TYPE_FIBER:
5297 case HNS3_MEDIA_TYPE_BACKPLANE:
5298 hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled.");
5299 current_mode = hw->requested_fc_mode;
5300 goto out;
5301 default:
5302 hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).",
5303 hw->mac.media_type);
5304 current_mode = HNS3_FC_NONE;
5305 goto out;
5306 }
5307
5308 if (rx_pause && tx_pause)
5309 current_mode = HNS3_FC_FULL;
5310 else if (rx_pause)
5311 current_mode = HNS3_FC_RX_PAUSE;
5312 else if (tx_pause)
5313 current_mode = HNS3_FC_TX_PAUSE;
5314 else
5315 current_mode = HNS3_FC_NONE;
5316
5317 out:
5318 return current_mode;
5319 }
5320
5321 static enum hns3_fc_mode
hns3_get_current_fc_mode(struct rte_eth_dev * dev)5322 hns3_get_current_fc_mode(struct rte_eth_dev *dev)
5323 {
5324 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5325 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5326 struct hns3_mac *mac = &hw->mac;
5327
5328 /*
5329 * When the flow control mode is obtained, the device may not complete
5330 * auto-negotiation. It is necessary to wait for link establishment.
5331 */
5332 (void)hns3_dev_link_update(dev, 1);
5333
5334 /*
5335 * If the link auto-negotiation of the nic is disabled, or the flow
5336 * control auto-negotiation is not supported, the forced flow control
5337 * mode is used.
5338 */
5339 if (mac->link_autoneg == 0 || !pf->support_fc_autoneg)
5340 return hw->requested_fc_mode;
5341
5342 return hns3_get_autoneg_fc_mode(hw);
5343 }
5344
5345 int
hns3_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)5346 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
5347 {
5348 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5349 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5350 enum hns3_fc_mode current_mode;
5351
5352 current_mode = hns3_get_current_fc_mode(dev);
5353 switch (current_mode) {
5354 case HNS3_FC_FULL:
5355 fc_conf->mode = RTE_ETH_FC_FULL;
5356 break;
5357 case HNS3_FC_TX_PAUSE:
5358 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
5359 break;
5360 case HNS3_FC_RX_PAUSE:
5361 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
5362 break;
5363 case HNS3_FC_NONE:
5364 default:
5365 fc_conf->mode = RTE_ETH_FC_NONE;
5366 break;
5367 }
5368
5369 fc_conf->pause_time = pf->pause_time;
5370 fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0;
5371
5372 return 0;
5373 }
5374
5375 static int
hns3_check_fc_autoneg_valid(struct hns3_hw * hw,uint8_t autoneg)5376 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg)
5377 {
5378 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
5379
5380 if (!pf->support_fc_autoneg) {
5381 if (autoneg != 0) {
5382 hns3_err(hw, "unsupported fc auto-negotiation setting.");
5383 return -EOPNOTSUPP;
5384 }
5385
5386 /*
5387 * Flow control auto-negotiation of the NIC is not supported,
5388 * but other auto-negotiation features may be supported.
5389 */
5390 if (autoneg != hw->mac.link_autoneg) {
5391 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!");
5392 return -EOPNOTSUPP;
5393 }
5394
5395 return 0;
5396 }
5397
5398 /*
5399 * If flow control auto-negotiation of the NIC is supported, all
5400 * auto-negotiation features are supported.
5401 */
5402 if (autoneg != hw->mac.link_autoneg) {
5403 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!");
5404 return -EOPNOTSUPP;
5405 }
5406
5407 return 0;
5408 }
5409
5410 static int
hns3_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)5411 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
5412 {
5413 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5414 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5415 int ret;
5416
5417 if (fc_conf->high_water || fc_conf->low_water ||
5418 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) {
5419 hns3_err(hw, "Unsupported flow control settings specified, "
5420 "high_water(%u), low_water(%u), send_xon(%u) and "
5421 "mac_ctrl_frame_fwd(%u) must be set to '0'",
5422 fc_conf->high_water, fc_conf->low_water,
5423 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd);
5424 return -EINVAL;
5425 }
5426
5427 ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg);
5428 if (ret)
5429 return ret;
5430
5431 if (!fc_conf->pause_time) {
5432 hns3_err(hw, "Invalid pause time %u setting.",
5433 fc_conf->pause_time);
5434 return -EINVAL;
5435 }
5436
5437 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
5438 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) {
5439 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. "
5440 "current_fc_status = %d", hw->current_fc_status);
5441 return -EOPNOTSUPP;
5442 }
5443
5444 if (hw->num_tc > 1 && !pf->support_multi_tc_pause) {
5445 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported.");
5446 return -EOPNOTSUPP;
5447 }
5448
5449 rte_spinlock_lock(&hw->lock);
5450 ret = hns3_fc_enable(dev, fc_conf);
5451 rte_spinlock_unlock(&hw->lock);
5452
5453 return ret;
5454 }
5455
5456 static int
hns3_priority_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_pfc_conf * pfc_conf)5457 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,
5458 struct rte_eth_pfc_conf *pfc_conf)
5459 {
5460 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5461 int ret;
5462
5463 if (!hns3_dev_get_support(hw, DCB)) {
5464 hns3_err(hw, "This port does not support dcb configurations.");
5465 return -EOPNOTSUPP;
5466 }
5467
5468 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water ||
5469 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) {
5470 hns3_err(hw, "Unsupported flow control settings specified, "
5471 "high_water(%u), low_water(%u), send_xon(%u) and "
5472 "mac_ctrl_frame_fwd(%u) must be set to '0'",
5473 pfc_conf->fc.high_water, pfc_conf->fc.low_water,
5474 pfc_conf->fc.send_xon,
5475 pfc_conf->fc.mac_ctrl_frame_fwd);
5476 return -EINVAL;
5477 }
5478 if (pfc_conf->fc.autoneg) {
5479 hns3_err(hw, "Unsupported fc auto-negotiation setting.");
5480 return -EINVAL;
5481 }
5482 if (pfc_conf->fc.pause_time == 0) {
5483 hns3_err(hw, "Invalid pause time %u setting.",
5484 pfc_conf->fc.pause_time);
5485 return -EINVAL;
5486 }
5487
5488 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
5489 hw->current_fc_status == HNS3_FC_STATUS_PFC)) {
5490 hns3_err(hw, "MAC pause is enabled. Cannot set PFC."
5491 "current_fc_status = %d", hw->current_fc_status);
5492 return -EOPNOTSUPP;
5493 }
5494
5495 rte_spinlock_lock(&hw->lock);
5496 ret = hns3_dcb_pfc_enable(dev, pfc_conf);
5497 rte_spinlock_unlock(&hw->lock);
5498
5499 return ret;
5500 }
5501
5502 static int
hns3_get_dcb_info(struct rte_eth_dev * dev,struct rte_eth_dcb_info * dcb_info)5503 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
5504 {
5505 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5506 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5507 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
5508 int i;
5509
5510 rte_spinlock_lock(&hw->lock);
5511 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
5512 dcb_info->nb_tcs = pf->local_max_tc;
5513 else
5514 dcb_info->nb_tcs = 1;
5515
5516 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
5517 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i];
5518 for (i = 0; i < dcb_info->nb_tcs; i++)
5519 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
5520
5521 for (i = 0; i < hw->num_tc; i++) {
5522 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i;
5523 dcb_info->tc_queue.tc_txq[0][i].base =
5524 hw->tc_queue[i].tqp_offset;
5525 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size;
5526 dcb_info->tc_queue.tc_txq[0][i].nb_queue =
5527 hw->tc_queue[i].tqp_count;
5528 }
5529 rte_spinlock_unlock(&hw->lock);
5530
5531 return 0;
5532 }
5533
5534 static int
hns3_reinit_dev(struct hns3_adapter * hns)5535 hns3_reinit_dev(struct hns3_adapter *hns)
5536 {
5537 struct hns3_hw *hw = &hns->hw;
5538 int ret;
5539
5540 ret = hns3_cmd_init(hw);
5541 if (ret) {
5542 hns3_err(hw, "Failed to init cmd: %d", ret);
5543 return ret;
5544 }
5545
5546 ret = hns3_init_hardware(hns);
5547 if (ret) {
5548 hns3_err(hw, "Failed to init hardware: %d", ret);
5549 return ret;
5550 }
5551
5552 ret = hns3_reset_all_tqps(hns);
5553 if (ret) {
5554 hns3_err(hw, "Failed to reset all queues: %d", ret);
5555 return ret;
5556 }
5557
5558 ret = hns3_enable_hw_error_intr(hns, true);
5559 if (ret) {
5560 hns3_err(hw, "fail to enable hw error interrupts: %d",
5561 ret);
5562 return ret;
5563 }
5564 hns3_info(hw, "Reset done, driver initialization finished.");
5565
5566 return 0;
5567 }
5568
5569 static bool
is_pf_reset_done(struct hns3_hw * hw)5570 is_pf_reset_done(struct hns3_hw *hw)
5571 {
5572 uint32_t val, reg, reg_bit;
5573
5574 switch (hw->reset.level) {
5575 case HNS3_IMP_RESET:
5576 reg = HNS3_GLOBAL_RESET_REG;
5577 reg_bit = HNS3_IMP_RESET_BIT;
5578 break;
5579 case HNS3_GLOBAL_RESET:
5580 reg = HNS3_GLOBAL_RESET_REG;
5581 reg_bit = HNS3_GLOBAL_RESET_BIT;
5582 break;
5583 case HNS3_FUNC_RESET:
5584 reg = HNS3_FUN_RST_ING;
5585 reg_bit = HNS3_FUN_RST_ING_B;
5586 break;
5587 case HNS3_FLR_RESET:
5588 default:
5589 hns3_err(hw, "Wait for unsupported reset level: %d",
5590 hw->reset.level);
5591 return true;
5592 }
5593 val = hns3_read_dev(hw, reg);
5594 if (hns3_get_bit(val, reg_bit))
5595 return false;
5596 else
5597 return true;
5598 }
5599
5600 bool
hns3_is_reset_pending(struct hns3_adapter * hns)5601 hns3_is_reset_pending(struct hns3_adapter *hns)
5602 {
5603 struct hns3_hw *hw = &hns->hw;
5604 enum hns3_reset_level reset;
5605
5606 hns3_check_event_cause(hns, NULL);
5607 reset = hns3_get_reset_level(hns, &hw->reset.pending);
5608 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
5609 hw->reset.level < reset) {
5610 hns3_warn(hw, "High level reset %d is pending", reset);
5611 return true;
5612 }
5613 reset = hns3_get_reset_level(hns, &hw->reset.request);
5614 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
5615 hw->reset.level < reset) {
5616 hns3_warn(hw, "High level reset %d is request", reset);
5617 return true;
5618 }
5619 return false;
5620 }
5621
5622 static int
hns3_wait_hardware_ready(struct hns3_adapter * hns)5623 hns3_wait_hardware_ready(struct hns3_adapter *hns)
5624 {
5625 struct hns3_hw *hw = &hns->hw;
5626 struct hns3_wait_data *wait_data = hw->reset.wait_data;
5627 struct timeval tv;
5628
5629 if (wait_data->result == HNS3_WAIT_SUCCESS)
5630 return 0;
5631 else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
5632 hns3_clock_gettime(&tv);
5633 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
5634 tv.tv_sec, tv.tv_usec);
5635 return -ETIME;
5636 } else if (wait_data->result == HNS3_WAIT_REQUEST)
5637 return -EAGAIN;
5638
5639 wait_data->hns = hns;
5640 wait_data->check_completion = is_pf_reset_done;
5641 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT *
5642 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms();
5643 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC;
5644 wait_data->count = HNS3_RESET_WAIT_CNT;
5645 wait_data->result = HNS3_WAIT_REQUEST;
5646 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
5647 return -EAGAIN;
5648 }
5649
5650 static int
hns3_func_reset_cmd(struct hns3_hw * hw,int func_id)5651 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id)
5652 {
5653 struct hns3_cmd_desc desc;
5654 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data;
5655
5656 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
5657 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1);
5658 req->fun_reset_vfid = func_id;
5659
5660 return hns3_cmd_send(hw, &desc, 1);
5661 }
5662
5663 static int
hns3_imp_reset_cmd(struct hns3_hw * hw)5664 hns3_imp_reset_cmd(struct hns3_hw *hw)
5665 {
5666 struct hns3_cmd_desc desc;
5667
5668 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false);
5669 desc.data[0] = 0xeedd;
5670
5671 return hns3_cmd_send(hw, &desc, 1);
5672 }
5673
5674 static void
hns3_msix_process(struct hns3_adapter * hns,enum hns3_reset_level reset_level)5675 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level)
5676 {
5677 struct hns3_hw *hw = &hns->hw;
5678 struct timeval tv;
5679 uint32_t val;
5680
5681 hns3_clock_gettime(&tv);
5682 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) ||
5683 hns3_read_dev(hw, HNS3_FUN_RST_ING)) {
5684 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld",
5685 tv.tv_sec, tv.tv_usec);
5686 return;
5687 }
5688
5689 switch (reset_level) {
5690 case HNS3_IMP_RESET:
5691 hns3_imp_reset_cmd(hw);
5692 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld",
5693 tv.tv_sec, tv.tv_usec);
5694 break;
5695 case HNS3_GLOBAL_RESET:
5696 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
5697 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1);
5698 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val);
5699 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld",
5700 tv.tv_sec, tv.tv_usec);
5701 break;
5702 case HNS3_FUNC_RESET:
5703 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld",
5704 tv.tv_sec, tv.tv_usec);
5705 /* schedule again to check later */
5706 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending);
5707 hns3_schedule_reset(hns);
5708 break;
5709 default:
5710 hns3_warn(hw, "Unsupported reset level: %d", reset_level);
5711 return;
5712 }
5713 hns3_atomic_clear_bit(reset_level, &hw->reset.request);
5714 }
5715
5716 static enum hns3_reset_level
hns3_get_reset_level(struct hns3_adapter * hns,uint64_t * levels)5717 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
5718 {
5719 struct hns3_hw *hw = &hns->hw;
5720 enum hns3_reset_level reset_level = HNS3_NONE_RESET;
5721
5722 /* Return the highest priority reset level amongst all */
5723 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels))
5724 reset_level = HNS3_IMP_RESET;
5725 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
5726 reset_level = HNS3_GLOBAL_RESET;
5727 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
5728 reset_level = HNS3_FUNC_RESET;
5729 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
5730 reset_level = HNS3_FLR_RESET;
5731
5732 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
5733 return HNS3_NONE_RESET;
5734
5735 return reset_level;
5736 }
5737
5738 static void
hns3_record_imp_error(struct hns3_adapter * hns)5739 hns3_record_imp_error(struct hns3_adapter *hns)
5740 {
5741 struct hns3_hw *hw = &hns->hw;
5742 uint32_t reg_val;
5743
5744 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
5745 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
5746 hns3_warn(hw, "Detected IMP RD poison!");
5747 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
5748 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
5749 }
5750
5751 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
5752 hns3_warn(hw, "Detected IMP CMDQ error!");
5753 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
5754 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
5755 }
5756 }
5757
5758 static int
hns3_prepare_reset(struct hns3_adapter * hns)5759 hns3_prepare_reset(struct hns3_adapter *hns)
5760 {
5761 struct hns3_hw *hw = &hns->hw;
5762 uint32_t reg_val;
5763 int ret;
5764
5765 switch (hw->reset.level) {
5766 case HNS3_FUNC_RESET:
5767 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID);
5768 if (ret)
5769 return ret;
5770
5771 /*
5772 * After performaning pf reset, it is not necessary to do the
5773 * mailbox handling or send any command to firmware, because
5774 * any mailbox handling or command to firmware is only valid
5775 * after hns3_cmd_init is called.
5776 */
5777 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
5778 hw->reset.stats.request_cnt++;
5779 break;
5780 case HNS3_IMP_RESET:
5781 hns3_record_imp_error(hns);
5782 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
5783 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
5784 BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
5785 break;
5786 default:
5787 break;
5788 }
5789 return 0;
5790 }
5791
5792 static int
hns3_set_rst_done(struct hns3_hw * hw)5793 hns3_set_rst_done(struct hns3_hw *hw)
5794 {
5795 struct hns3_pf_rst_done_cmd *req;
5796 struct hns3_cmd_desc desc;
5797
5798 req = (struct hns3_pf_rst_done_cmd *)desc.data;
5799 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
5800 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
5801 return hns3_cmd_send(hw, &desc, 1);
5802 }
5803
5804 static int
hns3_stop_service(struct hns3_adapter * hns)5805 hns3_stop_service(struct hns3_adapter *hns)
5806 {
5807 struct hns3_hw *hw = &hns->hw;
5808 struct rte_eth_dev *eth_dev;
5809
5810 eth_dev = &rte_eth_devices[hw->data->port_id];
5811 hw->mac.link_status = RTE_ETH_LINK_DOWN;
5812 if (hw->adapter_state == HNS3_NIC_STARTED) {
5813 rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
5814 hns3_update_linkstatus_and_event(hw, false);
5815 }
5816
5817 hns3_set_rxtx_function(eth_dev);
5818 rte_wmb();
5819 /* Disable datapath on secondary process. */
5820 hns3_mp_req_stop_rxtx(eth_dev);
5821 rte_delay_ms(hw->cfg_max_queues);
5822
5823 rte_spinlock_lock(&hw->lock);
5824 if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
5825 hw->adapter_state == HNS3_NIC_STOPPING) {
5826 hns3_enable_all_queues(hw, false);
5827 hns3_do_stop(hns);
5828 hw->reset.mbuf_deferred_free = true;
5829 } else
5830 hw->reset.mbuf_deferred_free = false;
5831
5832 /*
5833 * It is cumbersome for hardware to pick-and-choose entries for deletion
5834 * from table space. Hence, for function reset software intervention is
5835 * required to delete the entries
5836 */
5837 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
5838 hns3_configure_all_mc_mac_addr(hns, true);
5839 rte_spinlock_unlock(&hw->lock);
5840
5841 return 0;
5842 }
5843
5844 static int
hns3_start_service(struct hns3_adapter * hns)5845 hns3_start_service(struct hns3_adapter *hns)
5846 {
5847 struct hns3_hw *hw = &hns->hw;
5848 struct rte_eth_dev *eth_dev;
5849
5850 if (hw->reset.level == HNS3_IMP_RESET ||
5851 hw->reset.level == HNS3_GLOBAL_RESET)
5852 hns3_set_rst_done(hw);
5853 eth_dev = &rte_eth_devices[hw->data->port_id];
5854 hns3_set_rxtx_function(eth_dev);
5855 hns3_mp_req_start_rxtx(eth_dev);
5856 if (hw->adapter_state == HNS3_NIC_STARTED) {
5857 /*
5858 * This API parent function already hold the hns3_hw.lock, the
5859 * hns3_service_handler may report lse, in bonding application
5860 * it will call driver's ops which may acquire the hns3_hw.lock
5861 * again, thus lead to deadlock.
5862 * We defer calls hns3_service_handler to avoid the deadlock.
5863 */
5864 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
5865 hns3_service_handler, eth_dev);
5866
5867 /* Enable interrupt of all rx queues before enabling queues */
5868 hns3_dev_all_rx_queue_intr_enable(hw, true);
5869 /*
5870 * Enable state of each rxq and txq will be recovered after
5871 * reset, so we need to restore them before enable all tqps;
5872 */
5873 hns3_restore_tqp_enable_state(hw);
5874 /*
5875 * When finished the initialization, enable queues to receive
5876 * and transmit packets.
5877 */
5878 hns3_enable_all_queues(hw, true);
5879 }
5880
5881 return 0;
5882 }
5883
5884 static int
hns3_restore_conf(struct hns3_adapter * hns)5885 hns3_restore_conf(struct hns3_adapter *hns)
5886 {
5887 struct hns3_hw *hw = &hns->hw;
5888 int ret;
5889
5890 ret = hns3_configure_all_mac_addr(hns, false);
5891 if (ret)
5892 return ret;
5893
5894 ret = hns3_configure_all_mc_mac_addr(hns, false);
5895 if (ret)
5896 goto err_mc_mac;
5897
5898 ret = hns3_dev_promisc_restore(hns);
5899 if (ret)
5900 goto err_promisc;
5901
5902 ret = hns3_restore_vlan_table(hns);
5903 if (ret)
5904 goto err_promisc;
5905
5906 ret = hns3_restore_vlan_conf(hns);
5907 if (ret)
5908 goto err_promisc;
5909
5910 ret = hns3_restore_all_fdir_filter(hns);
5911 if (ret)
5912 goto err_promisc;
5913
5914 ret = hns3_restore_ptp(hns);
5915 if (ret)
5916 goto err_promisc;
5917
5918 ret = hns3_restore_rx_interrupt(hw);
5919 if (ret)
5920 goto err_promisc;
5921
5922 ret = hns3_restore_gro_conf(hw);
5923 if (ret)
5924 goto err_promisc;
5925
5926 ret = hns3_restore_fec(hw);
5927 if (ret)
5928 goto err_promisc;
5929
5930 if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
5931 ret = hns3_do_start(hns, false);
5932 if (ret)
5933 goto err_promisc;
5934 hns3_info(hw, "hns3 dev restart successful!");
5935 } else if (hw->adapter_state == HNS3_NIC_STOPPING)
5936 hw->adapter_state = HNS3_NIC_CONFIGURED;
5937 return 0;
5938
5939 err_promisc:
5940 hns3_configure_all_mc_mac_addr(hns, true);
5941 err_mc_mac:
5942 hns3_configure_all_mac_addr(hns, true);
5943 return ret;
5944 }
5945
5946 static void
hns3_reset_service(void * param)5947 hns3_reset_service(void *param)
5948 {
5949 struct hns3_adapter *hns = (struct hns3_adapter *)param;
5950 struct hns3_hw *hw = &hns->hw;
5951 enum hns3_reset_level reset_level;
5952 struct timeval tv_delta;
5953 struct timeval tv_start;
5954 struct timeval tv;
5955 uint64_t msec;
5956 int ret;
5957
5958 /*
5959 * The interrupt is not triggered within the delay time.
5960 * The interrupt may have been lost. It is necessary to handle
5961 * the interrupt to recover from the error.
5962 */
5963 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
5964 SCHEDULE_DEFERRED) {
5965 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
5966 __ATOMIC_RELAXED);
5967 hns3_err(hw, "Handling interrupts in delayed tasks");
5968 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
5969 reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
5970 if (reset_level == HNS3_NONE_RESET) {
5971 hns3_err(hw, "No reset level is set, try IMP reset");
5972 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
5973 }
5974 }
5975 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
5976
5977 /*
5978 * Check if there is any ongoing reset in the hardware. This status can
5979 * be checked from reset_pending. If there is then, we need to wait for
5980 * hardware to complete reset.
5981 * a. If we are able to figure out in reasonable time that hardware
5982 * has fully resetted then, we can proceed with driver, client
5983 * reset.
5984 * b. else, we can come back later to check this status so re-sched
5985 * now.
5986 */
5987 reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
5988 if (reset_level != HNS3_NONE_RESET) {
5989 hns3_clock_gettime(&tv_start);
5990 ret = hns3_reset_process(hns, reset_level);
5991 hns3_clock_gettime(&tv);
5992 timersub(&tv, &tv_start, &tv_delta);
5993 msec = hns3_clock_calctime_ms(&tv_delta);
5994 if (msec > HNS3_RESET_PROCESS_MS)
5995 hns3_err(hw, "%d handle long time delta %" PRIu64
5996 " ms time=%ld.%.6ld",
5997 hw->reset.level, msec,
5998 tv.tv_sec, tv.tv_usec);
5999 if (ret == -EAGAIN)
6000 return;
6001 }
6002
6003 /* Check if we got any *new* reset requests to be honored */
6004 reset_level = hns3_get_reset_level(hns, &hw->reset.request);
6005 if (reset_level != HNS3_NONE_RESET)
6006 hns3_msix_process(hns, reset_level);
6007 }
6008
6009 static unsigned int
hns3_get_speed_capa_num(uint16_t device_id)6010 hns3_get_speed_capa_num(uint16_t device_id)
6011 {
6012 unsigned int num;
6013
6014 switch (device_id) {
6015 case HNS3_DEV_ID_25GE:
6016 case HNS3_DEV_ID_25GE_RDMA:
6017 num = 2;
6018 break;
6019 case HNS3_DEV_ID_100G_RDMA_MACSEC:
6020 case HNS3_DEV_ID_200G_RDMA:
6021 num = 1;
6022 break;
6023 default:
6024 num = 0;
6025 break;
6026 }
6027
6028 return num;
6029 }
6030
6031 static int
hns3_get_speed_fec_capa(struct rte_eth_fec_capa * speed_fec_capa,uint16_t device_id)6032 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa,
6033 uint16_t device_id)
6034 {
6035 switch (device_id) {
6036 case HNS3_DEV_ID_25GE:
6037 /* fallthrough */
6038 case HNS3_DEV_ID_25GE_RDMA:
6039 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed;
6040 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa;
6041
6042 /* In HNS3 device, the 25G NIC is compatible with 10G rate */
6043 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed;
6044 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa;
6045 break;
6046 case HNS3_DEV_ID_100G_RDMA_MACSEC:
6047 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed;
6048 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa;
6049 break;
6050 case HNS3_DEV_ID_200G_RDMA:
6051 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed;
6052 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa;
6053 break;
6054 default:
6055 return -ENOTSUP;
6056 }
6057
6058 return 0;
6059 }
6060
6061 static int
hns3_fec_get_capability(struct rte_eth_dev * dev,struct rte_eth_fec_capa * speed_fec_capa,unsigned int num)6062 hns3_fec_get_capability(struct rte_eth_dev *dev,
6063 struct rte_eth_fec_capa *speed_fec_capa,
6064 unsigned int num)
6065 {
6066 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6067 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
6068 uint16_t device_id = pci_dev->id.device_id;
6069 unsigned int capa_num;
6070 int ret;
6071
6072 capa_num = hns3_get_speed_capa_num(device_id);
6073 if (capa_num == 0) {
6074 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD",
6075 device_id);
6076 return -ENOTSUP;
6077 }
6078
6079 if (speed_fec_capa == NULL || num < capa_num)
6080 return capa_num;
6081
6082 ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id);
6083 if (ret)
6084 return -ENOTSUP;
6085
6086 return capa_num;
6087 }
6088
6089 static int
get_current_fec_auto_state(struct hns3_hw * hw,uint8_t * state)6090 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
6091 {
6092 struct hns3_config_fec_cmd *req;
6093 struct hns3_cmd_desc desc;
6094 int ret;
6095
6096 /*
6097 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported
6098 * in device of link speed
6099 * below 10 Gbps.
6100 */
6101 if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
6102 *state = 0;
6103 return 0;
6104 }
6105
6106 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
6107 req = (struct hns3_config_fec_cmd *)desc.data;
6108 ret = hns3_cmd_send(hw, &desc, 1);
6109 if (ret) {
6110 hns3_err(hw, "get current fec auto state failed, ret = %d",
6111 ret);
6112 return ret;
6113 }
6114
6115 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B);
6116 return 0;
6117 }
6118
6119 static int
hns3_fec_get_internal(struct hns3_hw * hw,uint32_t * fec_capa)6120 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
6121 {
6122 struct hns3_sfp_info_cmd *resp;
6123 uint32_t tmp_fec_capa;
6124 uint8_t auto_state;
6125 struct hns3_cmd_desc desc;
6126 int ret;
6127
6128 /*
6129 * If link is down and AUTO is enabled, AUTO is returned, otherwise,
6130 * configured FEC mode is returned.
6131 * If link is up, current FEC mode is returned.
6132 */
6133 if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
6134 ret = get_current_fec_auto_state(hw, &auto_state);
6135 if (ret)
6136 return ret;
6137
6138 if (auto_state == 0x1) {
6139 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
6140 return 0;
6141 }
6142 }
6143
6144 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
6145 resp = (struct hns3_sfp_info_cmd *)desc.data;
6146 resp->query_type = HNS3_ACTIVE_QUERY;
6147
6148 ret = hns3_cmd_send(hw, &desc, 1);
6149 if (ret == -EOPNOTSUPP) {
6150 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret);
6151 return ret;
6152 } else if (ret) {
6153 hns3_err(hw, "get FEC failed, ret = %d", ret);
6154 return ret;
6155 }
6156
6157 /*
6158 * FEC mode order defined in hns3 hardware is inconsistent with
6159 * that defined in the ethdev library. So the sequence needs
6160 * to be converted.
6161 */
6162 switch (resp->active_fec) {
6163 case HNS3_HW_FEC_MODE_NOFEC:
6164 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
6165 break;
6166 case HNS3_HW_FEC_MODE_BASER:
6167 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
6168 break;
6169 case HNS3_HW_FEC_MODE_RS:
6170 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
6171 break;
6172 default:
6173 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
6174 break;
6175 }
6176
6177 *fec_capa = tmp_fec_capa;
6178 return 0;
6179 }
6180
6181 static int
hns3_fec_get(struct rte_eth_dev * dev,uint32_t * fec_capa)6182 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
6183 {
6184 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6185
6186 return hns3_fec_get_internal(hw, fec_capa);
6187 }
6188
6189 static int
hns3_set_fec_hw(struct hns3_hw * hw,uint32_t mode)6190 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
6191 {
6192 struct hns3_config_fec_cmd *req;
6193 struct hns3_cmd_desc desc;
6194 int ret;
6195
6196 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false);
6197
6198 req = (struct hns3_config_fec_cmd *)desc.data;
6199 switch (mode) {
6200 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC):
6201 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6202 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF);
6203 break;
6204 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER):
6205 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6206 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER);
6207 break;
6208 case RTE_ETH_FEC_MODE_CAPA_MASK(RS):
6209 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6210 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS);
6211 break;
6212 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO):
6213 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1);
6214 break;
6215 default:
6216 return 0;
6217 }
6218 ret = hns3_cmd_send(hw, &desc, 1);
6219 if (ret)
6220 hns3_err(hw, "set fec mode failed, ret = %d", ret);
6221
6222 return ret;
6223 }
6224
6225 static uint32_t
get_current_speed_fec_cap(struct hns3_hw * hw,struct rte_eth_fec_capa * fec_capa)6226 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
6227 {
6228 struct hns3_mac *mac = &hw->mac;
6229 uint32_t cur_capa;
6230
6231 switch (mac->link_speed) {
6232 case RTE_ETH_SPEED_NUM_10G:
6233 cur_capa = fec_capa[1].capa;
6234 break;
6235 case RTE_ETH_SPEED_NUM_25G:
6236 case RTE_ETH_SPEED_NUM_100G:
6237 case RTE_ETH_SPEED_NUM_200G:
6238 cur_capa = fec_capa[0].capa;
6239 break;
6240 default:
6241 cur_capa = 0;
6242 break;
6243 }
6244
6245 return cur_capa;
6246 }
6247
6248 static bool
is_fec_mode_one_bit_set(uint32_t mode)6249 is_fec_mode_one_bit_set(uint32_t mode)
6250 {
6251 int cnt = 0;
6252 uint8_t i;
6253
6254 for (i = 0; i < sizeof(mode); i++)
6255 if (mode >> i & 0x1)
6256 cnt++;
6257
6258 return cnt == 1 ? true : false;
6259 }
6260
6261 static int
hns3_fec_set(struct rte_eth_dev * dev,uint32_t mode)6262 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
6263 {
6264 #define FEC_CAPA_NUM 2
6265 struct hns3_adapter *hns = dev->data->dev_private;
6266 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6267 struct hns3_pf *pf = &hns->pf;
6268 struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
6269 uint32_t cur_capa;
6270 uint32_t num = FEC_CAPA_NUM;
6271 int ret;
6272
6273 ret = hns3_fec_get_capability(dev, fec_capa, num);
6274 if (ret < 0)
6275 return ret;
6276
6277 /* HNS3 PMD only support one bit set mode, e.g. 0x1, 0x4 */
6278 if (!is_fec_mode_one_bit_set(mode)) {
6279 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
6280 "FEC mode should be only one bit set", mode);
6281 return -EINVAL;
6282 }
6283
6284 /*
6285 * Check whether the configured mode is within the FEC capability.
6286 * If not, the configured mode will not be supported.
6287 */
6288 cur_capa = get_current_speed_fec_cap(hw, fec_capa);
6289 if (!(cur_capa & mode)) {
6290 hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
6291 return -EINVAL;
6292 }
6293
6294 rte_spinlock_lock(&hw->lock);
6295 ret = hns3_set_fec_hw(hw, mode);
6296 if (ret) {
6297 rte_spinlock_unlock(&hw->lock);
6298 return ret;
6299 }
6300
6301 pf->fec_mode = mode;
6302 rte_spinlock_unlock(&hw->lock);
6303
6304 return 0;
6305 }
6306
6307 static int
hns3_restore_fec(struct hns3_hw * hw)6308 hns3_restore_fec(struct hns3_hw *hw)
6309 {
6310 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
6311 struct hns3_pf *pf = &hns->pf;
6312 uint32_t mode = pf->fec_mode;
6313 int ret;
6314
6315 ret = hns3_set_fec_hw(hw, mode);
6316 if (ret)
6317 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d",
6318 mode, ret);
6319
6320 return ret;
6321 }
6322
6323 static int
hns3_query_dev_fec_info(struct hns3_hw * hw)6324 hns3_query_dev_fec_info(struct hns3_hw *hw)
6325 {
6326 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
6327 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns);
6328 int ret;
6329
6330 ret = hns3_fec_get_internal(hw, &pf->fec_mode);
6331 if (ret)
6332 hns3_err(hw, "query device FEC info failed, ret = %d", ret);
6333
6334 return ret;
6335 }
6336
6337 static bool
hns3_optical_module_existed(struct hns3_hw * hw)6338 hns3_optical_module_existed(struct hns3_hw *hw)
6339 {
6340 struct hns3_cmd_desc desc;
6341 bool existed;
6342 int ret;
6343
6344 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true);
6345 ret = hns3_cmd_send(hw, &desc, 1);
6346 if (ret) {
6347 hns3_err(hw,
6348 "fail to get optical module exist state, ret = %d.\n",
6349 ret);
6350 return false;
6351 }
6352 existed = !!desc.data[0];
6353
6354 return existed;
6355 }
6356
6357 static int
hns3_get_module_eeprom_data(struct hns3_hw * hw,uint32_t offset,uint32_t len,uint8_t * data)6358 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset,
6359 uint32_t len, uint8_t *data)
6360 {
6361 #define HNS3_SFP_INFO_CMD_NUM 6
6362 #define HNS3_SFP_INFO_MAX_LEN \
6363 (HNS3_SFP_INFO_BD0_LEN + \
6364 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN)
6365 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM];
6366 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0;
6367 uint16_t read_len;
6368 uint16_t copy_len;
6369 int ret;
6370 int i;
6371
6372 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) {
6373 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM,
6374 true);
6375 if (i < HNS3_SFP_INFO_CMD_NUM - 1)
6376 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
6377 }
6378
6379 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data;
6380 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset);
6381 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN);
6382 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len);
6383
6384 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM);
6385 if (ret) {
6386 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n",
6387 ret);
6388 return ret;
6389 }
6390
6391 /* The data format in BD0 is different with the others. */
6392 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN);
6393 memcpy(data, sfp_info_bd0->data, copy_len);
6394 read_len = copy_len;
6395
6396 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) {
6397 if (read_len >= len)
6398 break;
6399
6400 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN);
6401 memcpy(data + read_len, desc[i].data, copy_len);
6402 read_len += copy_len;
6403 }
6404
6405 return (int)read_len;
6406 }
6407
6408 static int
hns3_get_module_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * info)6409 hns3_get_module_eeprom(struct rte_eth_dev *dev,
6410 struct rte_dev_eeprom_info *info)
6411 {
6412 struct hns3_adapter *hns = dev->data->dev_private;
6413 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6414 uint32_t offset = info->offset;
6415 uint32_t len = info->length;
6416 uint8_t *data = info->data;
6417 uint32_t read_len = 0;
6418
6419 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER)
6420 return -ENOTSUP;
6421
6422 if (!hns3_optical_module_existed(hw)) {
6423 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n");
6424 return -EIO;
6425 }
6426
6427 while (read_len < len) {
6428 int ret;
6429 ret = hns3_get_module_eeprom_data(hw, offset + read_len,
6430 len - read_len,
6431 data + read_len);
6432 if (ret < 0)
6433 return -EIO;
6434 read_len += ret;
6435 }
6436
6437 return 0;
6438 }
6439
6440 static int
hns3_get_module_info(struct rte_eth_dev * dev,struct rte_eth_dev_module_info * modinfo)6441 hns3_get_module_info(struct rte_eth_dev *dev,
6442 struct rte_eth_dev_module_info *modinfo)
6443 {
6444 #define HNS3_SFF8024_ID_SFP 0x03
6445 #define HNS3_SFF8024_ID_QSFP_8438 0x0c
6446 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d
6447 #define HNS3_SFF8024_ID_QSFP28_8636 0x11
6448 #define HNS3_SFF_8636_V1_3 0x03
6449 struct hns3_adapter *hns = dev->data->dev_private;
6450 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6451 struct rte_dev_eeprom_info info;
6452 struct hns3_sfp_type sfp_type;
6453 int ret;
6454
6455 memset(&sfp_type, 0, sizeof(sfp_type));
6456 memset(&info, 0, sizeof(info));
6457 info.data = (uint8_t *)&sfp_type;
6458 info.length = sizeof(sfp_type);
6459 ret = hns3_get_module_eeprom(dev, &info);
6460 if (ret)
6461 return ret;
6462
6463 switch (sfp_type.type) {
6464 case HNS3_SFF8024_ID_SFP:
6465 modinfo->type = RTE_ETH_MODULE_SFF_8472;
6466 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
6467 break;
6468 case HNS3_SFF8024_ID_QSFP_8438:
6469 modinfo->type = RTE_ETH_MODULE_SFF_8436;
6470 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
6471 break;
6472 case HNS3_SFF8024_ID_QSFP_8436_8636:
6473 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
6474 modinfo->type = RTE_ETH_MODULE_SFF_8436;
6475 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
6476 } else {
6477 modinfo->type = RTE_ETH_MODULE_SFF_8636;
6478 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
6479 }
6480 break;
6481 case HNS3_SFF8024_ID_QSFP28_8636:
6482 modinfo->type = RTE_ETH_MODULE_SFF_8636;
6483 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
6484 break;
6485 default:
6486 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n",
6487 sfp_type.type, sfp_type.ext_type);
6488 return -EINVAL;
6489 }
6490
6491 return 0;
6492 }
6493
6494 static const struct eth_dev_ops hns3_eth_dev_ops = {
6495 .dev_configure = hns3_dev_configure,
6496 .dev_start = hns3_dev_start,
6497 .dev_stop = hns3_dev_stop,
6498 .dev_close = hns3_dev_close,
6499 .promiscuous_enable = hns3_dev_promiscuous_enable,
6500 .promiscuous_disable = hns3_dev_promiscuous_disable,
6501 .allmulticast_enable = hns3_dev_allmulticast_enable,
6502 .allmulticast_disable = hns3_dev_allmulticast_disable,
6503 .mtu_set = hns3_dev_mtu_set,
6504 .stats_get = hns3_stats_get,
6505 .stats_reset = hns3_stats_reset,
6506 .xstats_get = hns3_dev_xstats_get,
6507 .xstats_get_names = hns3_dev_xstats_get_names,
6508 .xstats_reset = hns3_dev_xstats_reset,
6509 .xstats_get_by_id = hns3_dev_xstats_get_by_id,
6510 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
6511 .dev_infos_get = hns3_dev_infos_get,
6512 .fw_version_get = hns3_fw_version_get,
6513 .rx_queue_setup = hns3_rx_queue_setup,
6514 .tx_queue_setup = hns3_tx_queue_setup,
6515 .rx_queue_release = hns3_dev_rx_queue_release,
6516 .tx_queue_release = hns3_dev_tx_queue_release,
6517 .rx_queue_start = hns3_dev_rx_queue_start,
6518 .rx_queue_stop = hns3_dev_rx_queue_stop,
6519 .tx_queue_start = hns3_dev_tx_queue_start,
6520 .tx_queue_stop = hns3_dev_tx_queue_stop,
6521 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
6522 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
6523 .rxq_info_get = hns3_rxq_info_get,
6524 .txq_info_get = hns3_txq_info_get,
6525 .rx_burst_mode_get = hns3_rx_burst_mode_get,
6526 .tx_burst_mode_get = hns3_tx_burst_mode_get,
6527 .flow_ctrl_get = hns3_flow_ctrl_get,
6528 .flow_ctrl_set = hns3_flow_ctrl_set,
6529 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,
6530 .mac_addr_add = hns3_add_mac_addr,
6531 .mac_addr_remove = hns3_remove_mac_addr,
6532 .mac_addr_set = hns3_set_default_mac_addr,
6533 .set_mc_addr_list = hns3_set_mc_mac_addr_list,
6534 .link_update = hns3_dev_link_update,
6535 .dev_set_link_up = hns3_dev_set_link_up,
6536 .dev_set_link_down = hns3_dev_set_link_down,
6537 .rss_hash_update = hns3_dev_rss_hash_update,
6538 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
6539 .reta_update = hns3_dev_rss_reta_update,
6540 .reta_query = hns3_dev_rss_reta_query,
6541 .flow_ops_get = hns3_dev_flow_ops_get,
6542 .vlan_filter_set = hns3_vlan_filter_set,
6543 .vlan_tpid_set = hns3_vlan_tpid_set,
6544 .vlan_offload_set = hns3_vlan_offload_set,
6545 .vlan_pvid_set = hns3_vlan_pvid_set,
6546 .get_reg = hns3_get_regs,
6547 .get_module_info = hns3_get_module_info,
6548 .get_module_eeprom = hns3_get_module_eeprom,
6549 .get_dcb_info = hns3_get_dcb_info,
6550 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
6551 .fec_get_capability = hns3_fec_get_capability,
6552 .fec_get = hns3_fec_get,
6553 .fec_set = hns3_fec_set,
6554 .tm_ops_get = hns3_tm_ops_get,
6555 .tx_done_cleanup = hns3_tx_done_cleanup,
6556 .timesync_enable = hns3_timesync_enable,
6557 .timesync_disable = hns3_timesync_disable,
6558 .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp,
6559 .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp,
6560 .timesync_adjust_time = hns3_timesync_adjust_time,
6561 .timesync_read_time = hns3_timesync_read_time,
6562 .timesync_write_time = hns3_timesync_write_time,
6563 .eth_dev_priv_dump = hns3_eth_dev_priv_dump,
6564 };
6565
6566 static const struct hns3_reset_ops hns3_reset_ops = {
6567 .reset_service = hns3_reset_service,
6568 .stop_service = hns3_stop_service,
6569 .prepare_reset = hns3_prepare_reset,
6570 .wait_hardware_ready = hns3_wait_hardware_ready,
6571 .reinit_dev = hns3_reinit_dev,
6572 .restore_conf = hns3_restore_conf,
6573 .start_service = hns3_start_service,
6574 };
6575
6576 static void
hns3_init_hw_ops(struct hns3_hw * hw)6577 hns3_init_hw_ops(struct hns3_hw *hw)
6578 {
6579 hw->ops.add_mc_mac_addr = hns3_add_mc_mac_addr;
6580 hw->ops.del_mc_mac_addr = hns3_remove_mc_mac_addr;
6581 hw->ops.add_uc_mac_addr = hns3_add_uc_mac_addr;
6582 hw->ops.del_uc_mac_addr = hns3_remove_uc_mac_addr;
6583 hw->ops.bind_ring_with_vector = hns3_bind_ring_with_vector;
6584 }
6585
6586 static int
hns3_dev_init(struct rte_eth_dev * eth_dev)6587 hns3_dev_init(struct rte_eth_dev *eth_dev)
6588 {
6589 struct hns3_adapter *hns = eth_dev->data->dev_private;
6590 struct hns3_hw *hw = &hns->hw;
6591 int ret;
6592
6593 PMD_INIT_FUNC_TRACE();
6594
6595 hns3_flow_init(eth_dev);
6596
6597 hns3_set_rxtx_function(eth_dev);
6598 eth_dev->dev_ops = &hns3_eth_dev_ops;
6599 eth_dev->rx_queue_count = hns3_rx_queue_count;
6600 ret = hns3_mp_init(eth_dev);
6601 if (ret)
6602 goto err_mp_init;
6603
6604 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
6605 hns3_tx_push_init(eth_dev);
6606 return 0;
6607 }
6608
6609 hw->adapter_state = HNS3_NIC_UNINITIALIZED;
6610 hns->is_vf = false;
6611 hw->data = eth_dev->data;
6612 hns3_parse_devargs(eth_dev);
6613
6614 /*
6615 * Set default max packet size according to the mtu
6616 * default vale in DPDK frame.
6617 */
6618 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
6619
6620 ret = hns3_reset_init(hw);
6621 if (ret)
6622 goto err_init_reset;
6623 hw->reset.ops = &hns3_reset_ops;
6624
6625 hns3_init_hw_ops(hw);
6626 ret = hns3_init_pf(eth_dev);
6627 if (ret) {
6628 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
6629 goto err_init_pf;
6630 }
6631
6632 ret = hns3_init_mac_addrs(eth_dev);
6633 if (ret != 0)
6634 goto err_init_mac_addrs;
6635
6636 hw->adapter_state = HNS3_NIC_INITIALIZED;
6637
6638 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
6639 SCHEDULE_PENDING) {
6640 hns3_err(hw, "Reschedule reset service after dev_init");
6641 hns3_schedule_reset(hns);
6642 } else {
6643 /* IMP will wait ready flag before reset */
6644 hns3_notify_reset_ready(hw, false);
6645 }
6646
6647 hns3_info(hw, "hns3 dev initialization successful!");
6648 return 0;
6649
6650 err_init_mac_addrs:
6651 hns3_uninit_pf(eth_dev);
6652
6653 err_init_pf:
6654 rte_free(hw->reset.wait_data);
6655
6656 err_init_reset:
6657 hns3_mp_uninit(eth_dev);
6658
6659 err_mp_init:
6660 eth_dev->dev_ops = NULL;
6661 eth_dev->rx_pkt_burst = NULL;
6662 eth_dev->rx_descriptor_status = NULL;
6663 eth_dev->tx_pkt_burst = NULL;
6664 eth_dev->tx_pkt_prepare = NULL;
6665 eth_dev->tx_descriptor_status = NULL;
6666 return ret;
6667 }
6668
6669 static int
hns3_dev_uninit(struct rte_eth_dev * eth_dev)6670 hns3_dev_uninit(struct rte_eth_dev *eth_dev)
6671 {
6672 struct hns3_adapter *hns = eth_dev->data->dev_private;
6673 struct hns3_hw *hw = &hns->hw;
6674
6675 PMD_INIT_FUNC_TRACE();
6676
6677 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
6678 hns3_mp_uninit(eth_dev);
6679 return 0;
6680 }
6681
6682 if (hw->adapter_state < HNS3_NIC_CLOSING)
6683 hns3_dev_close(eth_dev);
6684
6685 hw->adapter_state = HNS3_NIC_REMOVED;
6686 return 0;
6687 }
6688
6689 static int
eth_hns3_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)6690 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
6691 struct rte_pci_device *pci_dev)
6692 {
6693 return rte_eth_dev_pci_generic_probe(pci_dev,
6694 sizeof(struct hns3_adapter),
6695 hns3_dev_init);
6696 }
6697
6698 static int
eth_hns3_pci_remove(struct rte_pci_device * pci_dev)6699 eth_hns3_pci_remove(struct rte_pci_device *pci_dev)
6700 {
6701 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit);
6702 }
6703
6704 static const struct rte_pci_id pci_id_hns3_map[] = {
6705 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) },
6706 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) },
6707 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) },
6708 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
6709 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
6710 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) },
6711 { .vendor_id = 0, }, /* sentinel */
6712 };
6713
6714 static struct rte_pci_driver rte_hns3_pmd = {
6715 .id_table = pci_id_hns3_map,
6716 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
6717 .probe = eth_hns3_pci_probe,
6718 .remove = eth_hns3_pci_remove,
6719 };
6720
6721 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
6722 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
6723 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
6724 RTE_PMD_REGISTER_PARAM_STRING(net_hns3,
6725 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
6726 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
6727 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
6728 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16> ");
6729 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE);
6730 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE);
6731