1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 HiSilicon Limited
3 */
4
5 #include <rte_kvargs.h>
6 #include <rte_bus_pci.h>
7 #include <ethdev_pci.h>
8 #include <rte_pci.h>
9
10 #include "hns3_common.h"
11 #include "hns3_logs.h"
12 #include "hns3_regs.h"
13 #include "hns3_rxtx.h"
14
15 int
hns3_fw_version_get(struct rte_eth_dev * eth_dev,char * fw_version,size_t fw_size)16 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
17 size_t fw_size)
18 {
19 struct hns3_adapter *hns = eth_dev->data->dev_private;
20 struct hns3_hw *hw = &hns->hw;
21 uint32_t version = hw->fw_version;
22 int ret;
23
24 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
25 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
26 HNS3_FW_VERSION_BYTE3_S),
27 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
28 HNS3_FW_VERSION_BYTE2_S),
29 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
30 HNS3_FW_VERSION_BYTE1_S),
31 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
32 HNS3_FW_VERSION_BYTE0_S));
33 if (ret < 0)
34 return -EINVAL;
35
36 ret += 1; /* add the size of '\0' */
37 if (fw_size < (size_t)ret)
38 return ret;
39 else
40 return 0;
41 }
42
43 int
hns3_dev_infos_get(struct rte_eth_dev * eth_dev,struct rte_eth_dev_info * info)44 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
45 {
46 struct hns3_adapter *hns = eth_dev->data->dev_private;
47 struct hns3_hw *hw = &hns->hw;
48 uint16_t queue_num = hw->tqps_num;
49
50 /*
51 * In interrupt mode, 'max_rx_queues' is set based on the number of
52 * MSI-X interrupt resources of the hardware.
53 */
54 if (hw->data->dev_conf.intr_conf.rxq == 1)
55 queue_num = hw->intr_tqps_num;
56
57 info->max_rx_queues = queue_num;
58 info->max_tx_queues = hw->tqps_num;
59 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
60 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
61 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
62 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
63 info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
64 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
65 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
66 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
67 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
68 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
69 RTE_ETH_RX_OFFLOAD_SCATTER |
70 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
71 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
72 RTE_ETH_RX_OFFLOAD_RSS_HASH |
73 RTE_ETH_RX_OFFLOAD_TCP_LRO);
74 info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
75 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
76 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
77 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
78 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
79 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
80 RTE_ETH_TX_OFFLOAD_TCP_TSO |
81 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
82 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
83 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
84 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
85 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
86
87 if (!hw->port_base_vlan_cfg.state)
88 info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
89
90 if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
91 info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
92
93 if (hns3_dev_get_support(hw, INDEP_TXRX))
94 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
95 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
96 info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
97
98 if (hns3_dev_get_support(hw, PTP))
99 info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
100
101 info->rx_desc_lim = (struct rte_eth_desc_lim) {
102 .nb_max = HNS3_MAX_RING_DESC,
103 .nb_min = HNS3_MIN_RING_DESC,
104 .nb_align = HNS3_ALIGN_RING_DESC,
105 };
106
107 info->tx_desc_lim = (struct rte_eth_desc_lim) {
108 .nb_max = HNS3_MAX_RING_DESC,
109 .nb_min = HNS3_MIN_RING_DESC,
110 .nb_align = HNS3_ALIGN_RING_DESC,
111 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
112 .nb_mtu_seg_max = hw->max_non_tso_bd_num,
113 };
114
115 info->default_rxconf = (struct rte_eth_rxconf) {
116 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
117 /*
118 * If there are no available Rx buffer descriptors, incoming
119 * packets are always dropped by hardware based on hns3 network
120 * engine.
121 */
122 .rx_drop_en = 1,
123 .offloads = 0,
124 };
125 info->default_txconf = (struct rte_eth_txconf) {
126 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
127 .offloads = 0,
128 };
129
130 info->reta_size = hw->rss_ind_tbl_size;
131 info->hash_key_size = HNS3_RSS_KEY_SIZE;
132 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
133
134 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
135 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
136 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
137 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
138 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
139 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
140
141 /*
142 * Next is the PF/VF difference section.
143 */
144 if (!hns->is_vf) {
145 info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
146 info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
147 info->speed_capa = hns3_get_speed_capa(hw);
148 } else {
149 info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
150 }
151
152 return 0;
153 }
154
155 static int
hns3_parse_io_hint_func(const char * key,const char * value,void * extra_args)156 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args)
157 {
158 uint32_t hint = HNS3_IO_FUNC_HINT_NONE;
159
160 RTE_SET_USED(key);
161
162 if (strcmp(value, "vec") == 0)
163 hint = HNS3_IO_FUNC_HINT_VEC;
164 else if (strcmp(value, "sve") == 0)
165 hint = HNS3_IO_FUNC_HINT_SVE;
166 else if (strcmp(value, "simple") == 0)
167 hint = HNS3_IO_FUNC_HINT_SIMPLE;
168 else if (strcmp(value, "common") == 0)
169 hint = HNS3_IO_FUNC_HINT_COMMON;
170
171 /* If the hint is valid then update output parameters */
172 if (hint != HNS3_IO_FUNC_HINT_NONE)
173 *(uint32_t *)extra_args = hint;
174
175 return 0;
176 }
177
178 static const char *
hns3_get_io_hint_func_name(uint32_t hint)179 hns3_get_io_hint_func_name(uint32_t hint)
180 {
181 switch (hint) {
182 case HNS3_IO_FUNC_HINT_VEC:
183 return "vec";
184 case HNS3_IO_FUNC_HINT_SVE:
185 return "sve";
186 case HNS3_IO_FUNC_HINT_SIMPLE:
187 return "simple";
188 case HNS3_IO_FUNC_HINT_COMMON:
189 return "common";
190 default:
191 return "none";
192 }
193 }
194
195 static int
hns3_parse_dev_caps_mask(const char * key,const char * value,void * extra_args)196 hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args)
197 {
198 uint64_t val;
199
200 RTE_SET_USED(key);
201
202 val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL);
203 *(uint64_t *)extra_args = val;
204
205 return 0;
206 }
207
208 static int
hns3_parse_mbx_time_limit(const char * key,const char * value,void * extra_args)209 hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args)
210 {
211 uint32_t val;
212
213 RTE_SET_USED(key);
214
215 val = strtoul(value, NULL, HNS3_CONVERT_TO_DECIMAL);
216
217 /*
218 * 500ms is empirical value in process of mailbox communication. If
219 * the delay value is set to one lower than the empirical value, mailbox
220 * communication may fail.
221 */
222 if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX)
223 *(uint16_t *)extra_args = val;
224
225 return 0;
226 }
227
228 void
hns3_parse_devargs(struct rte_eth_dev * dev)229 hns3_parse_devargs(struct rte_eth_dev *dev)
230 {
231 uint16_t mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
232 struct hns3_adapter *hns = dev->data->dev_private;
233 uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
234 uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
235 struct hns3_hw *hw = &hns->hw;
236 uint64_t dev_caps_mask = 0;
237 struct rte_kvargs *kvlist;
238
239 /* Set default value of runtime config parameters. */
240 hns->rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
241 hns->tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
242 hns->dev_caps_mask = 0;
243 hns->mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
244
245 if (dev->device->devargs == NULL)
246 return;
247
248 kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL);
249 if (!kvlist)
250 return;
251
252 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT,
253 &hns3_parse_io_hint_func, &rx_func_hint);
254 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT,
255 &hns3_parse_io_hint_func, &tx_func_hint);
256 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK,
257 &hns3_parse_dev_caps_mask, &dev_caps_mask);
258 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS,
259 &hns3_parse_mbx_time_limit, &mbx_time_limit_ms);
260
261 rte_kvargs_free(kvlist);
262
263 if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE)
264 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT,
265 hns3_get_io_hint_func_name(rx_func_hint));
266 hns->rx_func_hint = rx_func_hint;
267 if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE)
268 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT,
269 hns3_get_io_hint_func_name(tx_func_hint));
270 hns->tx_func_hint = tx_func_hint;
271
272 if (dev_caps_mask != 0)
273 hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".",
274 HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask);
275 hns->dev_caps_mask = dev_caps_mask;
276
277 if (mbx_time_limit_ms != HNS3_MBX_DEF_TIME_LIMIT_MS)
278 hns3_warn(hw, "parsed %s = %u.", HNS3_DEVARG_MBX_TIME_LIMIT_MS,
279 mbx_time_limit_ms);
280 hns->mbx_time_limit_ms = mbx_time_limit_ms;
281 }
282
283 void
hns3_clock_gettime(struct timeval * tv)284 hns3_clock_gettime(struct timeval *tv)
285 {
286 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
287 #define CLOCK_TYPE CLOCK_MONOTONIC_RAW
288 #else
289 #define CLOCK_TYPE CLOCK_MONOTONIC
290 #endif
291 #define NSEC_TO_USEC_DIV 1000
292
293 struct timespec spec;
294 (void)clock_gettime(CLOCK_TYPE, &spec);
295
296 tv->tv_sec = spec.tv_sec;
297 tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV;
298 }
299
300 uint64_t
hns3_clock_calctime_ms(struct timeval * tv)301 hns3_clock_calctime_ms(struct timeval *tv)
302 {
303 return (uint64_t)tv->tv_sec * MSEC_PER_SEC +
304 tv->tv_usec / USEC_PER_MSEC;
305 }
306
307 uint64_t
hns3_clock_gettime_ms(void)308 hns3_clock_gettime_ms(void)
309 {
310 struct timeval tv;
311
312 hns3_clock_gettime(&tv);
313 return hns3_clock_calctime_ms(&tv);
314 }
315
hns3_ether_format_addr(char * buf,uint16_t size,const struct rte_ether_addr * ether_addr)316 void hns3_ether_format_addr(char *buf, uint16_t size,
317 const struct rte_ether_addr *ether_addr)
318 {
319 (void)snprintf(buf, size, "%02X:**:**:**:%02X:%02X",
320 ether_addr->addr_bytes[0],
321 ether_addr->addr_bytes[4],
322 ether_addr->addr_bytes[5]);
323 }
324
325 static int
hns3_set_mc_addr_chk_param(struct hns3_hw * hw,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)326 hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
327 struct rte_ether_addr *mc_addr_set,
328 uint32_t nb_mc_addr)
329 {
330 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
331 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
332 struct rte_ether_addr *addr;
333 uint16_t mac_addrs_capa;
334 uint32_t i;
335 uint32_t j;
336
337 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
338 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
339 "invalid. valid range: 0~%d",
340 nb_mc_addr, HNS3_MC_MACADDR_NUM);
341 return -EINVAL;
342 }
343
344 /* Check if input mac addresses are valid */
345 for (i = 0; i < nb_mc_addr; i++) {
346 addr = &mc_addr_set[i];
347 if (!rte_is_multicast_ether_addr(addr)) {
348 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
349 addr);
350 hns3_err(hw,
351 "failed to set mc mac addr, addr(%s) invalid.",
352 mac_str);
353 return -EINVAL;
354 }
355
356 /* Check if there are duplicate addresses */
357 for (j = i + 1; j < nb_mc_addr; j++) {
358 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
359 hns3_ether_format_addr(mac_str,
360 RTE_ETHER_ADDR_FMT_SIZE,
361 addr);
362 hns3_err(hw, "failed to set mc mac addr, "
363 "addrs invalid. two same addrs(%s).",
364 mac_str);
365 return -EINVAL;
366 }
367 }
368
369 /*
370 * Check if there are duplicate addresses between mac_addrs
371 * and mc_addr_set
372 */
373 mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM :
374 HNS3_UC_MACADDR_NUM;
375 for (j = 0; j < mac_addrs_capa; j++) {
376 if (rte_is_same_ether_addr(addr,
377 &hw->data->mac_addrs[j])) {
378 hns3_ether_format_addr(mac_str,
379 RTE_ETHER_ADDR_FMT_SIZE,
380 addr);
381 hns3_err(hw, "failed to set mc mac addr, "
382 "addrs invalid. addrs(%s) has already "
383 "configured in mac_addr add API",
384 mac_str);
385 return -EINVAL;
386 }
387 }
388 }
389
390 return 0;
391 }
392
393 int
hns3_set_mc_mac_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)394 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev,
395 struct rte_ether_addr *mc_addr_set,
396 uint32_t nb_mc_addr)
397 {
398 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
399 struct rte_ether_addr *addr;
400 int cur_addr_num;
401 int set_addr_num;
402 int num;
403 int ret;
404 int i;
405
406 /* Check if input parameters are valid */
407 ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
408 if (ret)
409 return ret;
410
411 rte_spinlock_lock(&hw->lock);
412 cur_addr_num = hw->mc_addrs_num;
413 for (i = 0; i < cur_addr_num; i++) {
414 num = cur_addr_num - i - 1;
415 addr = &hw->mc_addrs[num];
416 ret = hw->ops.del_mc_mac_addr(hw, addr);
417 if (ret) {
418 rte_spinlock_unlock(&hw->lock);
419 return ret;
420 }
421
422 hw->mc_addrs_num--;
423 }
424
425 set_addr_num = (int)nb_mc_addr;
426 for (i = 0; i < set_addr_num; i++) {
427 addr = &mc_addr_set[i];
428 ret = hw->ops.add_mc_mac_addr(hw, addr);
429 if (ret) {
430 rte_spinlock_unlock(&hw->lock);
431 return ret;
432 }
433
434 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
435 hw->mc_addrs_num++;
436 }
437 rte_spinlock_unlock(&hw->lock);
438
439 return 0;
440 }
441
442 int
hns3_configure_all_mc_mac_addr(struct hns3_adapter * hns,bool del)443 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
444 {
445 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
446 struct hns3_hw *hw = &hns->hw;
447 struct rte_ether_addr *addr;
448 int ret = 0;
449 int i;
450
451 for (i = 0; i < hw->mc_addrs_num; i++) {
452 addr = &hw->mc_addrs[i];
453 if (!rte_is_multicast_ether_addr(addr))
454 continue;
455 if (del)
456 ret = hw->ops.del_mc_mac_addr(hw, addr);
457 else
458 ret = hw->ops.add_mc_mac_addr(hw, addr);
459 if (ret) {
460 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
461 addr);
462 hns3_dbg(hw, "failed to %s mc mac addr: %s ret = %d",
463 del ? "Remove" : "Restore", mac_str, ret);
464 }
465 }
466 return ret;
467 }
468
469 int
hns3_configure_all_mac_addr(struct hns3_adapter * hns,bool del)470 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
471 {
472 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
473 struct hns3_hw *hw = &hns->hw;
474 struct hns3_hw_ops *ops = &hw->ops;
475 struct rte_ether_addr *addr;
476 uint16_t mac_addrs_capa;
477 int ret = 0;
478 int i;
479
480 mac_addrs_capa =
481 hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : HNS3_UC_MACADDR_NUM;
482 for (i = 0; i < mac_addrs_capa; i++) {
483 addr = &hw->data->mac_addrs[i];
484 if (rte_is_zero_ether_addr(addr))
485 continue;
486 if (rte_is_multicast_ether_addr(addr))
487 ret = del ? ops->del_mc_mac_addr(hw, addr) :
488 ops->add_mc_mac_addr(hw, addr);
489 else
490 ret = del ? ops->del_uc_mac_addr(hw, addr) :
491 ops->add_uc_mac_addr(hw, addr);
492
493 if (ret) {
494 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
495 addr);
496 hns3_err(hw, "failed to %s mac addr(%s) index:%d ret = %d.",
497 del ? "remove" : "restore", mac_str, i, ret);
498 }
499 }
500
501 return ret;
502 }
503
504 static bool
hns3_find_duplicate_mc_addr(struct hns3_hw * hw,struct rte_ether_addr * mc_addr)505 hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr)
506 {
507 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
508 struct rte_ether_addr *addr;
509 int i;
510
511 for (i = 0; i < hw->mc_addrs_num; i++) {
512 addr = &hw->mc_addrs[i];
513 /* Check if there are duplicate addresses in mc_addrs[] */
514 if (rte_is_same_ether_addr(addr, mc_addr)) {
515 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
516 addr);
517 hns3_err(hw, "failed to add mc mac addr, same addrs"
518 "(%s) is added by the set_mc_mac_addr_list "
519 "API", mac_str);
520 return true;
521 }
522 }
523
524 return false;
525 }
526
527 int
hns3_add_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,__rte_unused uint32_t idx,__rte_unused uint32_t pool)528 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
529 __rte_unused uint32_t idx, __rte_unused uint32_t pool)
530 {
531 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
532 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
533 int ret;
534
535 rte_spinlock_lock(&hw->lock);
536
537 /*
538 * In hns3 network engine adding UC and MC mac address with different
539 * commands with firmware. We need to determine whether the input
540 * address is a UC or a MC address to call different commands.
541 * By the way, it is recommended calling the API function named
542 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because
543 * using the rte_eth_dev_mac_addr_add API function to set MC mac address
544 * may affect the specifications of UC mac addresses.
545 */
546 if (rte_is_multicast_ether_addr(mac_addr)) {
547 if (hns3_find_duplicate_mc_addr(hw, mac_addr)) {
548 rte_spinlock_unlock(&hw->lock);
549 return -EINVAL;
550 }
551 ret = hw->ops.add_mc_mac_addr(hw, mac_addr);
552 } else {
553 ret = hw->ops.add_uc_mac_addr(hw, mac_addr);
554 }
555 rte_spinlock_unlock(&hw->lock);
556 if (ret) {
557 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
558 mac_addr);
559 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
560 ret);
561 }
562
563 return ret;
564 }
565
566 void
hns3_remove_mac_addr(struct rte_eth_dev * dev,uint32_t idx)567 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
568 {
569 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
570 /* index will be checked by upper level rte interface */
571 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
572 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
573 int ret;
574
575 rte_spinlock_lock(&hw->lock);
576
577 if (rte_is_multicast_ether_addr(mac_addr))
578 ret = hw->ops.del_mc_mac_addr(hw, mac_addr);
579 else
580 ret = hw->ops.del_uc_mac_addr(hw, mac_addr);
581 rte_spinlock_unlock(&hw->lock);
582 if (ret) {
583 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
584 mac_addr);
585 hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str,
586 ret);
587 }
588 }
589
590 int
hns3_init_mac_addrs(struct rte_eth_dev * dev)591 hns3_init_mac_addrs(struct rte_eth_dev *dev)
592 {
593 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
594 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
595 const char *memory_name = hns->is_vf ? "hns3vf-mac" : "hns3-mac";
596 uint16_t mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM :
597 HNS3_UC_MACADDR_NUM;
598 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
599 struct rte_ether_addr *eth_addr;
600
601 /* Allocate memory for storing MAC addresses */
602 dev->data->mac_addrs = rte_zmalloc(memory_name,
603 sizeof(struct rte_ether_addr) * mac_addrs_capa,
604 0);
605 if (dev->data->mac_addrs == NULL) {
606 hns3_err(hw, "failed to allocate %zx bytes needed to store MAC addresses",
607 sizeof(struct rte_ether_addr) * mac_addrs_capa);
608 return -ENOMEM;
609 }
610
611 eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
612 if (!hns->is_vf) {
613 if (!rte_is_valid_assigned_ether_addr(eth_addr)) {
614 rte_eth_random_addr(hw->mac.mac_addr);
615 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
616 (struct rte_ether_addr *)hw->mac.mac_addr);
617 hns3_warn(hw, "default mac_addr from firmware is an invalid "
618 "unicast address, using random MAC address %s",
619 mac_str);
620 }
621 } else {
622 /*
623 * The hns3 PF ethdev driver in kernel support setting VF MAC
624 * address on the host by "ip link set ..." command. To avoid
625 * some incorrect scenes, for example, hns3 VF PMD driver fails
626 * to receive and send packets after user configure the MAC
627 * address by using the "ip link set ..." command, hns3 VF PMD
628 * driver keep the same MAC address strategy as the hns3 kernel
629 * ethdev driver in the initialization. If user configure a MAC
630 * address by the ip command for VF device, then hns3 VF PMD
631 * driver will start with it, otherwise start with a random MAC
632 * address in the initialization.
633 */
634 if (rte_is_zero_ether_addr(eth_addr))
635 rte_eth_random_addr(hw->mac.mac_addr);
636 }
637
638 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
639 &dev->data->mac_addrs[0]);
640
641 return 0;
642 }
643
644 int
hns3_init_ring_with_vector(struct hns3_hw * hw)645 hns3_init_ring_with_vector(struct hns3_hw *hw)
646 {
647 uint16_t vec;
648 int ret;
649 int i;
650
651 /*
652 * In hns3 network engine, vector 0 is always the misc interrupt of this
653 * function, vector 1~N can be used respectively for the queues of the
654 * function. Tx and Rx queues with the same number share the interrupt
655 * vector. In the initialization clearing the all hardware mapping
656 * relationship configurations between queues and interrupt vectors is
657 * needed, so some error caused by the residual configurations, such as
658 * the unexpected Tx interrupt, can be avoid.
659 */
660 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
661 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
662 vec = vec - 1; /* the last interrupt is reserved */
663 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
664 for (i = 0; i < hw->intr_tqps_num; i++) {
665 /*
666 * Set gap limiter/rate limiter/quantity limiter algorithm
667 * configuration for interrupt coalesce of queue's interrupt.
668 */
669 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
670 HNS3_TQP_INTR_GL_DEFAULT);
671 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
672 HNS3_TQP_INTR_GL_DEFAULT);
673 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
674 /*
675 * QL(quantity limiter) is not used currently, just set 0 to
676 * close it.
677 */
678 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
679
680 ret = hw->ops.bind_ring_with_vector(hw, vec, false,
681 HNS3_RING_TYPE_TX, i);
682 if (ret) {
683 PMD_INIT_LOG(ERR, "fail to unbind TX ring(%d) with "
684 "vector: %u, ret=%d", i, vec, ret);
685 return ret;
686 }
687
688 ret = hw->ops.bind_ring_with_vector(hw, vec, false,
689 HNS3_RING_TYPE_RX, i);
690 if (ret) {
691 PMD_INIT_LOG(ERR, "fail to unbind RX ring(%d) with "
692 "vector: %u, ret=%d", i, vec, ret);
693 return ret;
694 }
695 }
696
697 return 0;
698 }
699
700 int
hns3_map_rx_interrupt(struct rte_eth_dev * dev)701 hns3_map_rx_interrupt(struct rte_eth_dev *dev)
702 {
703 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
704 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
705 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
706 uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
707 uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
708 uint32_t intr_vector;
709 uint16_t q_id;
710 int ret;
711
712 /*
713 * hns3 needs a separate interrupt to be used as event interrupt which
714 * could not be shared with task queue pair, so KERNEL drivers need
715 * support multiple interrupt vectors.
716 */
717 if (dev->data->dev_conf.intr_conf.rxq == 0 ||
718 !rte_intr_cap_multiple(intr_handle))
719 return 0;
720
721 rte_intr_disable(intr_handle);
722 intr_vector = hw->used_rx_queues;
723 /* creates event fd for each intr vector when MSIX is used */
724 if (rte_intr_efd_enable(intr_handle, intr_vector))
725 return -EINVAL;
726
727 /* Allocate vector list */
728 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
729 hw->used_rx_queues)) {
730 hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
731 hw->used_rx_queues);
732 ret = -ENOMEM;
733 goto alloc_intr_vec_error;
734 }
735
736 if (rte_intr_allow_others(intr_handle)) {
737 vec = RTE_INTR_VEC_RXTX_OFFSET;
738 base = RTE_INTR_VEC_RXTX_OFFSET;
739 }
740
741 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
742 ret = hw->ops.bind_ring_with_vector(hw, vec, true,
743 HNS3_RING_TYPE_RX, q_id);
744 if (ret)
745 goto bind_vector_error;
746
747 if (rte_intr_vec_list_index_set(intr_handle, q_id, vec))
748 goto bind_vector_error;
749 /*
750 * If there are not enough efds (e.g. not enough interrupt),
751 * remaining queues will be bond to the last interrupt.
752 */
753 if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
754 vec++;
755 }
756 rte_intr_enable(intr_handle);
757 return 0;
758
759 bind_vector_error:
760 rte_intr_vec_list_free(intr_handle);
761 alloc_intr_vec_error:
762 rte_intr_efd_disable(intr_handle);
763 return ret;
764 }
765
766 void
hns3_unmap_rx_interrupt(struct rte_eth_dev * dev)767 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
768 {
769 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
770 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
771 struct hns3_adapter *hns = dev->data->dev_private;
772 struct hns3_hw *hw = &hns->hw;
773 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
774 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
775 uint16_t q_id;
776
777 if (dev->data->dev_conf.intr_conf.rxq == 0)
778 return;
779
780 /* unmap the ring with vector */
781 if (rte_intr_allow_others(intr_handle)) {
782 vec = RTE_INTR_VEC_RXTX_OFFSET;
783 base = RTE_INTR_VEC_RXTX_OFFSET;
784 }
785 if (rte_intr_dp_is_en(intr_handle)) {
786 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
787 (void)hw->ops.bind_ring_with_vector(hw, vec, false,
788 HNS3_RING_TYPE_RX,
789 q_id);
790 if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
791 vec++;
792 }
793 }
794 /* Clean datapath event and queue/vec mapping */
795 rte_intr_efd_disable(intr_handle);
796 rte_intr_vec_list_free(intr_handle);
797 }
798
799 int
hns3_restore_rx_interrupt(struct hns3_hw * hw)800 hns3_restore_rx_interrupt(struct hns3_hw *hw)
801 {
802 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
803 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
804 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
805 uint16_t q_id;
806 int ret;
807
808 if (dev->data->dev_conf.intr_conf.rxq == 0)
809 return 0;
810
811 if (rte_intr_dp_is_en(intr_handle)) {
812 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
813 ret = hw->ops.bind_ring_with_vector(hw,
814 rte_intr_vec_list_index_get(intr_handle,
815 q_id),
816 true, HNS3_RING_TYPE_RX, q_id);
817 if (ret)
818 return ret;
819 }
820 }
821
822 return 0;
823 }
824
825 int
hns3_get_pci_revision_id(struct hns3_hw * hw,uint8_t * revision_id)826 hns3_get_pci_revision_id(struct hns3_hw *hw, uint8_t *revision_id)
827 {
828 struct rte_pci_device *pci_dev;
829 struct rte_eth_dev *eth_dev;
830 uint8_t revision;
831 int ret;
832
833 eth_dev = &rte_eth_devices[hw->data->port_id];
834 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
835 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
836 HNS3_PCI_REVISION_ID);
837 if (ret != HNS3_PCI_REVISION_ID_LEN) {
838 hns3_err(hw, "failed to read pci revision id, ret = %d", ret);
839 return -EIO;
840 }
841
842 *revision_id = revision;
843
844 return 0;
845 }
846