xref: /dpdk/drivers/net/hns3/hns3_ethdev_vf.c (revision 29fd052d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <linux/pci_regs.h>
6 #include <rte_alarm.h>
7 #include <ethdev_pci.h>
8 #include <rte_io.h>
9 #include <rte_vfio.h>
10 
11 #include "hns3_ethdev.h"
12 #include "hns3_common.h"
13 #include "hns3_logs.h"
14 #include "hns3_rxtx.h"
15 #include "hns3_regs.h"
16 #include "hns3_intr.h"
17 #include "hns3_dcb.h"
18 #include "hns3_mp.h"
19 #include "hns3_flow.h"
20 
21 #define HNS3VF_KEEP_ALIVE_INTERVAL	2000000 /* us */
22 #define HNS3VF_SERVICE_INTERVAL		1000000 /* us */
23 
24 #define HNS3VF_RESET_WAIT_MS	20
25 #define HNS3VF_RESET_WAIT_CNT	2000
26 
27 /* Reset related Registers */
28 #define HNS3_GLOBAL_RESET_BIT		0
29 #define HNS3_CORE_RESET_BIT		1
30 #define HNS3_IMP_RESET_BIT		2
31 #define HNS3_FUN_RST_ING_B		0
32 
33 enum hns3vf_evt_cause {
34 	HNS3VF_VECTOR0_EVENT_RST,
35 	HNS3VF_VECTOR0_EVENT_MBX,
36 	HNS3VF_VECTOR0_EVENT_OTHER,
37 };
38 
39 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
40 						    uint64_t *levels);
41 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
42 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
43 
44 static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
45 				  struct rte_ether_addr *mac_addr);
46 static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
47 				     struct rte_ether_addr *mac_addr);
48 static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
49 				   __rte_unused int wait_to_complete);
50 
51 /* set PCI bus mastering */
52 static int
53 hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
54 {
55 	uint16_t reg;
56 	int ret;
57 
58 	ret = rte_pci_read_config(device, &reg, sizeof(reg), PCI_COMMAND);
59 	if (ret < 0) {
60 		PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
61 			     PCI_COMMAND);
62 		return ret;
63 	}
64 
65 	if (op)
66 		/* set the master bit */
67 		reg |= PCI_COMMAND_MASTER;
68 	else
69 		reg &= ~(PCI_COMMAND_MASTER);
70 
71 	return rte_pci_write_config(device, &reg, sizeof(reg), PCI_COMMAND);
72 }
73 
74 /**
75  * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
76  * @cap: the capability
77  *
78  * Return the address of the given capability within the PCI capability list.
79  */
80 static int
81 hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
82 {
83 #define MAX_PCIE_CAPABILITY 48
84 	uint16_t status;
85 	uint8_t pos;
86 	uint8_t id;
87 	int ttl;
88 	int ret;
89 
90 	ret = rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
91 	if (ret < 0) {
92 		PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_STATUS);
93 		return 0;
94 	}
95 
96 	if (!(status & PCI_STATUS_CAP_LIST))
97 		return 0;
98 
99 	ttl = MAX_PCIE_CAPABILITY;
100 	ret = rte_pci_read_config(device, &pos, sizeof(pos),
101 				  PCI_CAPABILITY_LIST);
102 	if (ret < 0) {
103 		PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
104 			     PCI_CAPABILITY_LIST);
105 		return 0;
106 	}
107 
108 	while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
109 		ret = rte_pci_read_config(device, &id, sizeof(id),
110 					  (pos + PCI_CAP_LIST_ID));
111 		if (ret < 0) {
112 			PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
113 				     (pos + PCI_CAP_LIST_ID));
114 			break;
115 		}
116 
117 		if (id == 0xFF)
118 			break;
119 
120 		if (id == cap)
121 			return (int)pos;
122 
123 		ret = rte_pci_read_config(device, &pos, sizeof(pos),
124 					  (pos + PCI_CAP_LIST_NEXT));
125 		if (ret < 0) {
126 			PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
127 				     (pos + PCI_CAP_LIST_NEXT));
128 			break;
129 		}
130 	}
131 	return 0;
132 }
133 
134 static int
135 hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
136 {
137 	uint16_t control;
138 	int pos;
139 	int ret;
140 
141 	pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
142 	if (pos) {
143 		ret = rte_pci_read_config(device, &control, sizeof(control),
144 				    (pos + PCI_MSIX_FLAGS));
145 		if (ret < 0) {
146 			PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
147 				     (pos + PCI_MSIX_FLAGS));
148 			return -ENXIO;
149 		}
150 
151 		if (op)
152 			control |= PCI_MSIX_FLAGS_ENABLE;
153 		else
154 			control &= ~PCI_MSIX_FLAGS_ENABLE;
155 		ret = rte_pci_write_config(device, &control, sizeof(control),
156 					  (pos + PCI_MSIX_FLAGS));
157 		if (ret < 0) {
158 			PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x",
159 				    (pos + PCI_MSIX_FLAGS));
160 			return -ENXIO;
161 		}
162 
163 		return 0;
164 	}
165 
166 	return -ENXIO;
167 }
168 
169 static int
170 hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
171 {
172 	/* mac address was checked by upper level interface */
173 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
174 	int ret;
175 
176 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
177 				HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
178 				RTE_ETHER_ADDR_LEN, false, NULL, 0);
179 	if (ret) {
180 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
181 				      mac_addr);
182 		hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
183 			 mac_str, ret);
184 	}
185 	return ret;
186 }
187 
188 static int
189 hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
190 {
191 	/* mac address was checked by upper level interface */
192 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
193 	int ret;
194 
195 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
196 				HNS3_MBX_MAC_VLAN_UC_REMOVE,
197 				mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN,
198 				false, NULL, 0);
199 	if (ret) {
200 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
201 				      mac_addr);
202 		hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
203 			 mac_str, ret);
204 	}
205 	return ret;
206 }
207 
208 static int
209 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
210 			    struct rte_ether_addr *mac_addr)
211 {
212 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
213 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
214 	struct rte_ether_addr *old_addr;
215 	uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
216 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
217 	int ret;
218 
219 	/*
220 	 * It has been guaranteed that input parameter named mac_addr is valid
221 	 * address in the rte layer of DPDK framework.
222 	 */
223 	old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
224 	rte_spinlock_lock(&hw->lock);
225 	memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
226 	memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
227 	       RTE_ETHER_ADDR_LEN);
228 
229 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
230 				HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
231 				HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
232 	if (ret) {
233 		/*
234 		 * The hns3 VF PMD depends on the hns3 PF kernel ethdev
235 		 * driver. When user has configured a MAC address for VF device
236 		 * by "ip link set ..." command based on the PF device, the hns3
237 		 * PF kernel ethdev driver does not allow VF driver to request
238 		 * reconfiguring a different default MAC address, and return
239 		 * -EPREM to VF driver through mailbox.
240 		 */
241 		if (ret == -EPERM) {
242 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
243 					      old_addr);
244 			hns3_warn(hw, "Has permanent mac addr(%s) for vf",
245 				  mac_str);
246 		} else {
247 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
248 					      mac_addr);
249 			hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
250 				 mac_str, ret);
251 		}
252 	}
253 
254 	rte_ether_addr_copy(mac_addr,
255 			    (struct rte_ether_addr *)hw->mac.mac_addr);
256 	rte_spinlock_unlock(&hw->lock);
257 
258 	return ret;
259 }
260 
261 static int
262 hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
263 		       struct rte_ether_addr *mac_addr)
264 {
265 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
266 	int ret;
267 
268 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
269 				HNS3_MBX_MAC_VLAN_MC_ADD,
270 				mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
271 				NULL, 0);
272 	if (ret) {
273 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
274 				      mac_addr);
275 		hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
276 			 mac_str, ret);
277 	}
278 
279 	return ret;
280 }
281 
282 static int
283 hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
284 			  struct rte_ether_addr *mac_addr)
285 {
286 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
287 	int ret;
288 
289 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
290 				HNS3_MBX_MAC_VLAN_MC_REMOVE,
291 				mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
292 				NULL, 0);
293 	if (ret) {
294 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
295 				      mac_addr);
296 		hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
297 			 mac_str, ret);
298 	}
299 
300 	return ret;
301 }
302 
303 static int
304 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
305 			bool en_uc_pmc, bool en_mc_pmc)
306 {
307 	struct hns3_mbx_vf_to_pf_cmd *req;
308 	struct hns3_cmd_desc desc;
309 	int ret;
310 
311 	req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
312 
313 	/*
314 	 * The hns3 VF PMD depends on the hns3 PF kernel ethdev driver,
315 	 * so there are some features for promiscuous/allmulticast mode in hns3
316 	 * VF PMD as below:
317 	 * 1. The promiscuous/allmulticast mode can be configured successfully
318 	 *    only based on the trusted VF device. If based on the non trusted
319 	 *    VF device, configuring promiscuous/allmulticast mode will fail.
320 	 *    The hns3 VF device can be configured as trusted device by hns3 PF
321 	 *    kernel ethdev driver on the host by the following command:
322 	 *      "ip link set <eth num> vf <vf id> turst on"
323 	 * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
324 	 *    can receive the ingress and outgoing traffic. This includes
325 	 *    all the ingress packets, all the packets sent from the PF and
326 	 *    other VFs on the same physical port.
327 	 * 3. Note: Because of the hardware constraints, By default vlan filter
328 	 *    is enabled and couldn't be turned off based on VF device, so vlan
329 	 *    filter is still effective even in promiscuous mode. If upper
330 	 *    applications don't call rte_eth_dev_vlan_filter API function to
331 	 *    set vlan based on VF device, hns3 VF PMD will can't receive
332 	 *    the packets with vlan tag in promiscuous mode.
333 	 */
334 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
335 	req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
336 	req->msg[1] = en_bc_pmc ? 1 : 0;
337 	req->msg[2] = en_uc_pmc ? 1 : 0;
338 	req->msg[3] = en_mc_pmc ? 1 : 0;
339 	req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0;
340 
341 	ret = hns3_cmd_send(hw, &desc, 1);
342 	if (ret)
343 		hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
344 
345 	return ret;
346 }
347 
348 static int
349 hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
350 {
351 	struct hns3_adapter *hns = dev->data->dev_private;
352 	struct hns3_hw *hw = &hns->hw;
353 	int ret;
354 
355 	ret = hns3vf_set_promisc_mode(hw, true, true, true);
356 	if (ret)
357 		hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
358 			ret);
359 	return ret;
360 }
361 
362 static int
363 hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
364 {
365 	bool allmulti = dev->data->all_multicast ? true : false;
366 	struct hns3_adapter *hns = dev->data->dev_private;
367 	struct hns3_hw *hw = &hns->hw;
368 	int ret;
369 
370 	ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
371 	if (ret)
372 		hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
373 			ret);
374 	return ret;
375 }
376 
377 static int
378 hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
379 {
380 	struct hns3_adapter *hns = dev->data->dev_private;
381 	struct hns3_hw *hw = &hns->hw;
382 	int ret;
383 
384 	if (dev->data->promiscuous)
385 		return 0;
386 
387 	ret = hns3vf_set_promisc_mode(hw, true, false, true);
388 	if (ret)
389 		hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
390 			ret);
391 	return ret;
392 }
393 
394 static int
395 hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
396 {
397 	struct hns3_adapter *hns = dev->data->dev_private;
398 	struct hns3_hw *hw = &hns->hw;
399 	int ret;
400 
401 	if (dev->data->promiscuous)
402 		return 0;
403 
404 	ret = hns3vf_set_promisc_mode(hw, true, false, false);
405 	if (ret)
406 		hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
407 			ret);
408 	return ret;
409 }
410 
411 static int
412 hns3vf_restore_promisc(struct hns3_adapter *hns)
413 {
414 	struct hns3_hw *hw = &hns->hw;
415 	bool allmulti = hw->data->all_multicast ? true : false;
416 
417 	if (hw->data->promiscuous)
418 		return hns3vf_set_promisc_mode(hw, true, true, true);
419 
420 	return hns3vf_set_promisc_mode(hw, true, false, allmulti);
421 }
422 
423 static int
424 hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id,
425 			     bool mmap, enum hns3_ring_type queue_type,
426 			     uint16_t queue_id)
427 {
428 	struct hns3_vf_bind_vector_msg bind_msg;
429 	const char *op_str;
430 	uint16_t code;
431 	int ret;
432 
433 	memset(&bind_msg, 0, sizeof(bind_msg));
434 	code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
435 		HNS3_MBX_UNMAP_RING_TO_VECTOR;
436 	bind_msg.vector_id = (uint8_t)vector_id;
437 
438 	if (queue_type == HNS3_RING_TYPE_RX)
439 		bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
440 	else
441 		bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
442 
443 	bind_msg.param[0].ring_type = queue_type;
444 	bind_msg.ring_num = 1;
445 	bind_msg.param[0].tqp_index = queue_id;
446 	op_str = mmap ? "Map" : "Unmap";
447 	ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
448 				sizeof(bind_msg), false, NULL, 0);
449 	if (ret)
450 		hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.",
451 			 op_str, queue_id, bind_msg.vector_id, ret);
452 
453 	return ret;
454 }
455 
456 static int
457 hns3vf_dev_configure(struct rte_eth_dev *dev)
458 {
459 	struct hns3_adapter *hns = dev->data->dev_private;
460 	struct hns3_hw *hw = &hns->hw;
461 	struct rte_eth_conf *conf = &dev->data->dev_conf;
462 	enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
463 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
464 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
465 	struct rte_eth_rss_conf rss_conf;
466 	bool gro_en;
467 	int ret;
468 
469 	hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
470 
471 	/*
472 	 * Some versions of hardware network engine does not support
473 	 * individually enable/disable/reset the Tx or Rx queue. These devices
474 	 * must enable/disable/reset Tx and Rx queues at the same time. When the
475 	 * numbers of Tx queues allocated by upper applications are not equal to
476 	 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
477 	 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
478 	 * work as usual. But these fake queues are imperceptible, and can not
479 	 * be used by upper applications.
480 	 */
481 	ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
482 	if (ret) {
483 		hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
484 		hw->cfg_max_queues = 0;
485 		return ret;
486 	}
487 
488 	hw->adapter_state = HNS3_NIC_CONFIGURING;
489 	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
490 		hns3_err(hw, "setting link speed/duplex not supported");
491 		ret = -EINVAL;
492 		goto cfg_err;
493 	}
494 
495 	/* When RSS is not configured, redirect the packet queue 0 */
496 	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
497 		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
498 		hw->rss_dis_flag = false;
499 		rss_conf = conf->rx_adv_conf.rss_conf;
500 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
501 		if (ret)
502 			goto cfg_err;
503 	}
504 
505 	ret = hns3vf_dev_mtu_set(dev, conf->rxmode.mtu);
506 	if (ret != 0)
507 		goto cfg_err;
508 
509 	ret = hns3vf_dev_configure_vlan(dev);
510 	if (ret)
511 		goto cfg_err;
512 
513 	/* config hardware GRO */
514 	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
515 	ret = hns3_config_gro(hw, gro_en);
516 	if (ret)
517 		goto cfg_err;
518 
519 	hns3_init_rx_ptype_tble(dev);
520 
521 	hw->adapter_state = HNS3_NIC_CONFIGURED;
522 	return 0;
523 
524 cfg_err:
525 	hw->cfg_max_queues = 0;
526 	(void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
527 	hw->adapter_state = HNS3_NIC_INITIALIZED;
528 
529 	return ret;
530 }
531 
532 static int
533 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
534 {
535 	int ret;
536 
537 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
538 				sizeof(mtu), true, NULL, 0);
539 	if (ret)
540 		hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
541 
542 	return ret;
543 }
544 
545 static int
546 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
547 {
548 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
549 	uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
550 	int ret;
551 
552 	/*
553 	 * The hns3 PF/VF devices on the same port share the hardware MTU
554 	 * configuration. Currently, we send mailbox to inform hns3 PF kernel
555 	 * ethdev driver to finish hardware MTU configuration in hns3 VF PMD,
556 	 * there is no need to stop the port for hns3 VF device, and the
557 	 * MTU value issued by hns3 VF PMD must be less than or equal to
558 	 * PF's MTU.
559 	 */
560 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
561 		hns3_err(hw, "Failed to set mtu during resetting");
562 		return -EIO;
563 	}
564 
565 	/*
566 	 * when Rx of scattered packets is off, we have some possibility of
567 	 * using vector Rx process function or simple Rx functions in hns3 PMD.
568 	 * If the input MTU is increased and the maximum length of
569 	 * received packets is greater than the length of a buffer for Rx
570 	 * packet, the hardware network engine needs to use multiple BDs and
571 	 * buffers to store these packets. This will cause problems when still
572 	 * using vector Rx process function or simple Rx function to receiving
573 	 * packets. So, when Rx of scattered packets is off and device is
574 	 * started, it is not permitted to increase MTU so that the maximum
575 	 * length of Rx packets is greater than Rx buffer length.
576 	 */
577 	if (dev->data->dev_started && !dev->data->scattered_rx &&
578 	    frame_size > hw->rx_buf_len) {
579 		hns3_err(hw, "failed to set mtu because current is "
580 			"not scattered rx mode");
581 		return -EOPNOTSUPP;
582 	}
583 
584 	rte_spinlock_lock(&hw->lock);
585 	ret = hns3vf_config_mtu(hw, mtu);
586 	if (ret) {
587 		rte_spinlock_unlock(&hw->lock);
588 		return ret;
589 	}
590 	rte_spinlock_unlock(&hw->lock);
591 
592 	return 0;
593 }
594 
595 static void
596 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
597 {
598 	hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
599 }
600 
601 static void
602 hns3vf_disable_irq0(struct hns3_hw *hw)
603 {
604 	hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
605 }
606 
607 static void
608 hns3vf_enable_irq0(struct hns3_hw *hw)
609 {
610 	hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
611 }
612 
613 static enum hns3vf_evt_cause
614 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
615 {
616 	struct hns3_hw *hw = &hns->hw;
617 	enum hns3vf_evt_cause ret;
618 	uint32_t cmdq_stat_reg;
619 	uint32_t rst_ing_reg;
620 	uint32_t val;
621 
622 	/* Fetch the events from their corresponding regs */
623 	cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
624 	if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
625 		rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
626 		hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
627 		hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
628 		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
629 		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
630 		hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
631 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
632 		if (clearval) {
633 			hw->reset.stats.global_cnt++;
634 			hns3_warn(hw, "Global reset detected, clear reset status");
635 		} else {
636 			hns3_schedule_delayed_reset(hns);
637 			hns3_warn(hw, "Global reset detected, don't clear reset status");
638 		}
639 
640 		ret = HNS3VF_VECTOR0_EVENT_RST;
641 		goto out;
642 	}
643 
644 	/* Check for vector0 mailbox(=CMDQ RX) event source */
645 	if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
646 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
647 		ret = HNS3VF_VECTOR0_EVENT_MBX;
648 		goto out;
649 	}
650 
651 	val = 0;
652 	ret = HNS3VF_VECTOR0_EVENT_OTHER;
653 out:
654 	if (clearval)
655 		*clearval = val;
656 	return ret;
657 }
658 
659 static void
660 hns3vf_interrupt_handler(void *param)
661 {
662 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
663 	struct hns3_adapter *hns = dev->data->dev_private;
664 	struct hns3_hw *hw = &hns->hw;
665 	enum hns3vf_evt_cause event_cause;
666 	uint32_t clearval;
667 
668 	/* Disable interrupt */
669 	hns3vf_disable_irq0(hw);
670 
671 	/* Read out interrupt causes */
672 	event_cause = hns3vf_check_event_cause(hns, &clearval);
673 	/* Clear interrupt causes */
674 	hns3vf_clear_event_cause(hw, clearval);
675 
676 	switch (event_cause) {
677 	case HNS3VF_VECTOR0_EVENT_RST:
678 		hns3_schedule_reset(hns);
679 		break;
680 	case HNS3VF_VECTOR0_EVENT_MBX:
681 		hns3_dev_handle_mbx_msg(hw);
682 		break;
683 	default:
684 		break;
685 	}
686 
687 	/* Enable interrupt */
688 	hns3vf_enable_irq0(hw);
689 }
690 
691 static void
692 hns3vf_set_default_dev_specifications(struct hns3_hw *hw)
693 {
694 	hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
695 	hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
696 	hw->rss_key_size = HNS3_RSS_KEY_SIZE;
697 	hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
698 }
699 
700 static void
701 hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
702 {
703 	struct hns3_dev_specs_0_cmd *req0;
704 
705 	req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
706 
707 	hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
708 	hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
709 	hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
710 	hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
711 }
712 
713 static int
714 hns3vf_check_dev_specifications(struct hns3_hw *hw)
715 {
716 	if (hw->rss_ind_tbl_size == 0 ||
717 	    hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
718 		hns3_warn(hw, "the size of hash lookup table configured (%u)"
719 			      " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
720 			      HNS3_RSS_IND_TBL_SIZE_MAX);
721 		return -EINVAL;
722 	}
723 
724 	return 0;
725 }
726 
727 static int
728 hns3vf_query_dev_specifications(struct hns3_hw *hw)
729 {
730 	struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
731 	int ret;
732 	int i;
733 
734 	for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
735 		hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
736 					  true);
737 		desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
738 	}
739 	hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
740 
741 	ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
742 	if (ret)
743 		return ret;
744 
745 	hns3vf_parse_dev_specifications(hw, desc);
746 
747 	return hns3vf_check_dev_specifications(hw);
748 }
749 
750 void
751 hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
752 {
753 	uint16_t val = supported ? HNS3_PF_PUSH_LSC_CAP_SUPPORTED :
754 				   HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
755 	uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
756 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
757 
758 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
759 		__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
760 					  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
761 }
762 
763 static void
764 hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
765 {
766 #define HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS	500
767 
768 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
769 	int32_t remain_ms = HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS;
770 	uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
771 	uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
772 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
773 
774 	__atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
775 			 __ATOMIC_RELEASE);
776 
777 	(void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
778 				NULL, 0);
779 
780 	while (remain_ms > 0) {
781 		rte_delay_ms(HNS3_POLL_RESPONE_MS);
782 		if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
783 			HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
784 			break;
785 		remain_ms--;
786 	}
787 
788 	/*
789 	 * When exit above loop, the pf_push_lsc_cap could be one of the three
790 	 * state: unknown (means pf not ack), not_supported, supported.
791 	 * Here config it as 'not_supported' when it's 'unknown' state.
792 	 */
793 	__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
794 				  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
795 
796 	if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
797 		HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
798 		hns3_info(hw, "detect PF support push link status change!");
799 	} else {
800 		/*
801 		 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
802 		 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. So here cleared
803 		 * the RTE_ETH_DEV_INTR_LSC capability.
804 		 */
805 		dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
806 	}
807 }
808 
809 static int
810 hns3vf_get_capability(struct hns3_hw *hw)
811 {
812 	int ret;
813 
814 	ret = hns3_get_pci_revision_id(hw, &hw->revision);
815 	if (ret)
816 		return ret;
817 
818 	if (hw->revision < PCI_REVISION_ID_HIP09_A) {
819 		hns3vf_set_default_dev_specifications(hw);
820 		hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
821 		hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
822 		hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
823 		hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
824 		hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
825 		hw->rss_info.ipv6_sctp_offload_supported = false;
826 		hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
827 		return 0;
828 	}
829 
830 	ret = hns3vf_query_dev_specifications(hw);
831 	if (ret) {
832 		PMD_INIT_LOG(ERR,
833 			     "failed to query dev specifications, ret = %d",
834 			     ret);
835 		return ret;
836 	}
837 
838 	hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
839 	hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
840 	hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
841 	hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
842 	hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
843 	hw->rss_info.ipv6_sctp_offload_supported = true;
844 	hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
845 
846 	return 0;
847 }
848 
849 static int
850 hns3vf_check_tqp_info(struct hns3_hw *hw)
851 {
852 	if (hw->tqps_num == 0) {
853 		PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF.");
854 		return -EINVAL;
855 	}
856 
857 	if (hw->rss_size_max == 0) {
858 		PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF.");
859 		return -EINVAL;
860 	}
861 
862 	hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num);
863 
864 	return 0;
865 }
866 
867 static int
868 hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
869 {
870 	uint8_t resp_msg;
871 	int ret;
872 
873 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
874 				HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0,
875 				true, &resp_msg, sizeof(resp_msg));
876 	if (ret) {
877 		if (ret == -ETIME) {
878 			/*
879 			 * Getting current port based VLAN state from PF driver
880 			 * will not affect VF driver's basic function. Because
881 			 * the VF driver relies on hns3 PF kernel ether driver,
882 			 * to avoid introducing compatibility issues with older
883 			 * version of PF driver, no failure will be returned
884 			 * when the return value is ETIME. This return value has
885 			 * the following scenarios:
886 			 * 1) Firmware didn't return the results in time
887 			 * 2) the result return by firmware is timeout
888 			 * 3) the older version of kernel side PF driver does
889 			 *    not support this mailbox message.
890 			 * For scenarios 1 and 2, it is most likely that a
891 			 * hardware error has occurred, or a hardware reset has
892 			 * occurred. In this case, these errors will be caught
893 			 * by other functions.
894 			 */
895 			PMD_INIT_LOG(WARNING,
896 				"failed to get PVID state for timeout, maybe "
897 				"kernel side PF driver doesn't support this "
898 				"mailbox message, or firmware didn't respond.");
899 			resp_msg = HNS3_PORT_BASE_VLAN_DISABLE;
900 		} else {
901 			PMD_INIT_LOG(ERR, "failed to get port based VLAN state,"
902 				" ret = %d", ret);
903 			return ret;
904 		}
905 	}
906 	hw->port_base_vlan_cfg.state = resp_msg ?
907 		HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
908 	return 0;
909 }
910 
911 static int
912 hns3vf_get_queue_info(struct hns3_hw *hw)
913 {
914 #define HNS3VF_TQPS_RSS_INFO_LEN	6
915 	uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
916 	int ret;
917 
918 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
919 				resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
920 	if (ret) {
921 		PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
922 		return ret;
923 	}
924 
925 	memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
926 	memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
927 
928 	return hns3vf_check_tqp_info(hw);
929 }
930 
931 static void
932 hns3vf_update_caps(struct hns3_hw *hw, uint32_t caps)
933 {
934 	if (hns3_get_bit(caps, HNS3VF_CAPS_VLAN_FLT_MOD_B))
935 		hns3_set_bit(hw->capability,
936 				HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 1);
937 }
938 
939 static int
940 hns3vf_get_num_tc(struct hns3_hw *hw)
941 {
942 	uint8_t num_tc = 0;
943 	uint32_t i;
944 
945 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
946 		if (hw->hw_tc_map & BIT(i))
947 			num_tc++;
948 	}
949 	return num_tc;
950 }
951 
952 static int
953 hns3vf_get_basic_info(struct hns3_hw *hw)
954 {
955 	uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE];
956 	struct hns3_basic_info *basic_info;
957 	int ret;
958 
959 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0,
960 				true, resp_msg, sizeof(resp_msg));
961 	if (ret) {
962 		hns3_err(hw, "failed to get basic info from PF, ret = %d.",
963 				ret);
964 		return ret;
965 	}
966 
967 	basic_info = (struct hns3_basic_info *)resp_msg;
968 	hw->hw_tc_map = basic_info->hw_tc_map;
969 	hw->num_tc = hns3vf_get_num_tc(hw);
970 	hw->pf_vf_if_version = basic_info->pf_vf_if_version;
971 	hns3vf_update_caps(hw, basic_info->caps);
972 
973 	return 0;
974 }
975 
976 static int
977 hns3vf_get_host_mac_addr(struct hns3_hw *hw)
978 {
979 	uint8_t host_mac[RTE_ETHER_ADDR_LEN];
980 	int ret;
981 
982 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
983 				true, host_mac, RTE_ETHER_ADDR_LEN);
984 	if (ret) {
985 		hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
986 		return ret;
987 	}
988 
989 	memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
990 
991 	return 0;
992 }
993 
994 static int
995 hns3vf_get_configuration(struct hns3_hw *hw)
996 {
997 	int ret;
998 
999 	hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
1000 	hw->rss_dis_flag = false;
1001 
1002 	/* Get device capability */
1003 	ret = hns3vf_get_capability(hw);
1004 	if (ret) {
1005 		PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
1006 		return ret;
1007 	}
1008 
1009 	hns3vf_get_push_lsc_cap(hw);
1010 
1011 	/* Get basic info from PF */
1012 	ret = hns3vf_get_basic_info(hw);
1013 	if (ret)
1014 		return ret;
1015 
1016 	/* Get queue configuration from PF */
1017 	ret = hns3vf_get_queue_info(hw);
1018 	if (ret)
1019 		return ret;
1020 
1021 	/* Get user defined VF MAC addr from PF */
1022 	ret = hns3vf_get_host_mac_addr(hw);
1023 	if (ret)
1024 		return ret;
1025 
1026 	return hns3vf_get_port_base_vlan_filter_state(hw);
1027 }
1028 
1029 static void
1030 hns3vf_request_link_info(struct hns3_hw *hw)
1031 {
1032 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1033 	bool send_req;
1034 	int ret;
1035 
1036 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1037 		return;
1038 
1039 	send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
1040 		   vf->req_link_info_cnt > 0;
1041 	if (!send_req)
1042 		return;
1043 
1044 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
1045 				NULL, 0);
1046 	if (ret) {
1047 		hns3_err(hw, "failed to fetch link status, ret = %d", ret);
1048 		return;
1049 	}
1050 
1051 	if (vf->req_link_info_cnt > 0)
1052 		vf->req_link_info_cnt--;
1053 }
1054 
1055 void
1056 hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
1057 			  uint32_t link_speed, uint8_t link_duplex)
1058 {
1059 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1060 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1061 	struct hns3_mac *mac = &hw->mac;
1062 	int ret;
1063 
1064 	/*
1065 	 * PF kernel driver may push link status when VF driver is in resetting,
1066 	 * driver will stop polling job in this case, after resetting done
1067 	 * driver will start polling job again.
1068 	 * When polling job started, driver will get initial link status by
1069 	 * sending request to PF kernel driver, then could update link status by
1070 	 * process PF kernel driver's link status mailbox message.
1071 	 */
1072 	if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
1073 		return;
1074 
1075 	if (hw->adapter_state != HNS3_NIC_STARTED)
1076 		return;
1077 
1078 	mac->link_status = link_status;
1079 	mac->link_speed = link_speed;
1080 	mac->link_duplex = link_duplex;
1081 	ret = hns3vf_dev_link_update(dev, 0);
1082 	if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
1083 		hns3_start_report_lse(dev);
1084 }
1085 
1086 static int
1087 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
1088 {
1089 #define HNS3VF_VLAN_MBX_MSG_LEN 5
1090 	struct hns3_hw *hw = &hns->hw;
1091 	uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
1092 	uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
1093 	uint8_t is_kill = on ? 0 : 1;
1094 
1095 	msg_data[0] = is_kill;
1096 	memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1097 	memcpy(&msg_data[3], &proto, sizeof(proto));
1098 
1099 	return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
1100 				 msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
1101 				 0);
1102 }
1103 
1104 static int
1105 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1106 {
1107 	struct hns3_adapter *hns = dev->data->dev_private;
1108 	struct hns3_hw *hw = &hns->hw;
1109 	int ret;
1110 
1111 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1112 		hns3_err(hw,
1113 			 "vf set vlan id failed during resetting, vlan_id =%u",
1114 			 vlan_id);
1115 		return -EIO;
1116 	}
1117 	rte_spinlock_lock(&hw->lock);
1118 	ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1119 	rte_spinlock_unlock(&hw->lock);
1120 	if (ret)
1121 		hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
1122 			 vlan_id, ret);
1123 
1124 	return ret;
1125 }
1126 
1127 static int
1128 hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
1129 {
1130 	uint8_t msg_data;
1131 	int ret;
1132 
1133 	if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD))
1134 		return 0;
1135 
1136 	msg_data = enable ? 1 : 0;
1137 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
1138 			HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data,
1139 			sizeof(msg_data), true, NULL, 0);
1140 	if (ret)
1141 		hns3_err(hw, "%s vlan filter failed, ret = %d.",
1142 				enable ? "enable" : "disable", ret);
1143 
1144 	return ret;
1145 }
1146 
1147 static int
1148 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
1149 {
1150 	uint8_t msg_data;
1151 	int ret;
1152 
1153 	msg_data = enable ? 1 : 0;
1154 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
1155 				&msg_data, sizeof(msg_data), false, NULL, 0);
1156 	if (ret)
1157 		hns3_err(hw, "vf %s strip failed, ret = %d.",
1158 				enable ? "enable" : "disable", ret);
1159 
1160 	return ret;
1161 }
1162 
1163 static int
1164 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1165 {
1166 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1167 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1168 	unsigned int tmp_mask;
1169 	int ret = 0;
1170 
1171 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1172 		hns3_err(hw, "vf set vlan offload failed during resetting, "
1173 			     "mask = 0x%x", mask);
1174 		return -EIO;
1175 	}
1176 
1177 	tmp_mask = (unsigned int)mask;
1178 
1179 	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
1180 		rte_spinlock_lock(&hw->lock);
1181 		/* Enable or disable VLAN filter */
1182 		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1183 			ret = hns3vf_en_vlan_filter(hw, true);
1184 		else
1185 			ret = hns3vf_en_vlan_filter(hw, false);
1186 		rte_spinlock_unlock(&hw->lock);
1187 		if (ret)
1188 			return ret;
1189 	}
1190 
1191 	/* Vlan stripping setting */
1192 	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
1193 		rte_spinlock_lock(&hw->lock);
1194 		/* Enable or disable VLAN stripping */
1195 		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1196 			ret = hns3vf_en_hw_strip_rxvtag(hw, true);
1197 		else
1198 			ret = hns3vf_en_hw_strip_rxvtag(hw, false);
1199 		rte_spinlock_unlock(&hw->lock);
1200 	}
1201 
1202 	return ret;
1203 }
1204 
1205 static int
1206 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
1207 {
1208 	struct rte_vlan_filter_conf *vfc;
1209 	struct hns3_hw *hw = &hns->hw;
1210 	uint16_t vlan_id;
1211 	uint64_t vbit;
1212 	uint64_t ids;
1213 	int ret = 0;
1214 	uint32_t i;
1215 
1216 	vfc = &hw->data->vlan_filter_conf;
1217 	for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1218 		if (vfc->ids[i] == 0)
1219 			continue;
1220 		ids = vfc->ids[i];
1221 		while (ids) {
1222 			/*
1223 			 * 64 means the num bits of ids, one bit corresponds to
1224 			 * one vlan id
1225 			 */
1226 			vlan_id = 64 * i;
1227 			/* count trailing zeroes */
1228 			vbit = ~ids & (ids - 1);
1229 			/* clear least significant bit set */
1230 			ids ^= (ids ^ (ids - 1)) ^ vbit;
1231 			for (; vbit;) {
1232 				vbit >>= 1;
1233 				vlan_id++;
1234 			}
1235 			ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1236 			if (ret) {
1237 				hns3_err(hw,
1238 					 "VF handle vlan table failed, ret =%d, on = %d",
1239 					 ret, on);
1240 				return ret;
1241 			}
1242 		}
1243 	}
1244 
1245 	return ret;
1246 }
1247 
1248 static int
1249 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1250 {
1251 	return hns3vf_handle_all_vlan_table(hns, 0);
1252 }
1253 
1254 static int
1255 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1256 {
1257 	struct hns3_hw *hw = &hns->hw;
1258 	struct rte_eth_conf *dev_conf;
1259 	bool en;
1260 	int ret;
1261 
1262 	dev_conf = &hw->data->dev_conf;
1263 	en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
1264 								   : false;
1265 	ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1266 	if (ret)
1267 		hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1268 			 ret);
1269 	return ret;
1270 }
1271 
1272 static int
1273 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1274 {
1275 	struct hns3_adapter *hns = dev->data->dev_private;
1276 	struct rte_eth_dev_data *data = dev->data;
1277 	struct hns3_hw *hw = &hns->hw;
1278 	int ret;
1279 
1280 	if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1281 	    data->dev_conf.txmode.hw_vlan_reject_untagged ||
1282 	    data->dev_conf.txmode.hw_vlan_insert_pvid) {
1283 		hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1284 			      "or hw_vlan_insert_pvid is not support!");
1285 	}
1286 
1287 	/* Apply vlan offload setting */
1288 	ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
1289 					RTE_ETH_VLAN_FILTER_MASK);
1290 	if (ret)
1291 		hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
1292 
1293 	return ret;
1294 }
1295 
1296 static int
1297 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1298 {
1299 	uint8_t msg_data;
1300 
1301 	msg_data = alive ? 1 : 0;
1302 	return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
1303 				 sizeof(msg_data), false, NULL, 0);
1304 }
1305 
1306 static void
1307 hns3vf_keep_alive_handler(void *param)
1308 {
1309 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1310 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1311 	struct hns3_hw *hw = &hns->hw;
1312 	int ret;
1313 
1314 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
1315 				false, NULL, 0);
1316 	if (ret)
1317 		hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1318 			 ret);
1319 
1320 	rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1321 			  eth_dev);
1322 }
1323 
1324 static void
1325 hns3vf_service_handler(void *param)
1326 {
1327 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1328 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1329 	struct hns3_hw *hw = &hns->hw;
1330 
1331 	/*
1332 	 * The query link status and reset processing are executed in the
1333 	 * interrupt thread. When the IMP reset occurs, IMP will not respond,
1334 	 * and the query operation will timeout after 30ms. In the case of
1335 	 * multiple PF/VFs, each query failure timeout causes the IMP reset
1336 	 * interrupt to fail to respond within 100ms.
1337 	 * Before querying the link status, check whether there is a reset
1338 	 * pending, and if so, abandon the query.
1339 	 */
1340 	if (!hns3vf_is_reset_pending(hns))
1341 		hns3vf_request_link_info(hw);
1342 	else
1343 		hns3_warn(hw, "Cancel the query when reset is pending");
1344 
1345 	rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1346 			  eth_dev);
1347 }
1348 
1349 static void
1350 hns3vf_start_poll_job(struct rte_eth_dev *dev)
1351 {
1352 #define HNS3_REQUEST_LINK_INFO_REMAINS_CNT	3
1353 
1354 	struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1355 
1356 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
1357 		vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
1358 
1359 	__atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
1360 
1361 	hns3vf_service_handler(dev);
1362 }
1363 
1364 static void
1365 hns3vf_stop_poll_job(struct rte_eth_dev *dev)
1366 {
1367 	struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1368 
1369 	rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1370 
1371 	__atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
1372 }
1373 
1374 static int
1375 hns3_query_vf_resource(struct hns3_hw *hw)
1376 {
1377 	struct hns3_vf_res_cmd *req;
1378 	struct hns3_cmd_desc desc;
1379 	uint16_t num_msi;
1380 	int ret;
1381 
1382 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
1383 	ret = hns3_cmd_send(hw, &desc, 1);
1384 	if (ret) {
1385 		hns3_err(hw, "query vf resource failed, ret = %d", ret);
1386 		return ret;
1387 	}
1388 
1389 	req = (struct hns3_vf_res_cmd *)desc.data;
1390 	num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
1391 				 HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
1392 	if (num_msi < HNS3_MIN_VECTOR_NUM) {
1393 		hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
1394 			 num_msi, HNS3_MIN_VECTOR_NUM);
1395 		return -EINVAL;
1396 	}
1397 
1398 	hw->num_msi = num_msi;
1399 
1400 	return 0;
1401 }
1402 
1403 static int
1404 hns3vf_init_hardware(struct hns3_adapter *hns)
1405 {
1406 	struct hns3_hw *hw = &hns->hw;
1407 	uint16_t mtu = hw->data->mtu;
1408 	int ret;
1409 
1410 	ret = hns3vf_set_promisc_mode(hw, true, false, false);
1411 	if (ret)
1412 		return ret;
1413 
1414 	ret = hns3vf_config_mtu(hw, mtu);
1415 	if (ret)
1416 		goto err_init_hardware;
1417 
1418 	ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1419 	if (ret) {
1420 		PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1421 		goto err_init_hardware;
1422 	}
1423 
1424 	ret = hns3_config_gro(hw, false);
1425 	if (ret) {
1426 		PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1427 		goto err_init_hardware;
1428 	}
1429 
1430 	/*
1431 	 * In the initialization clearing the all hardware mapping relationship
1432 	 * configurations between queues and interrupt vectors is needed, so
1433 	 * some error caused by the residual configurations, such as the
1434 	 * unexpected interrupt, can be avoid.
1435 	 */
1436 	ret = hns3_init_ring_with_vector(hw);
1437 	if (ret) {
1438 		PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
1439 		goto err_init_hardware;
1440 	}
1441 
1442 	return 0;
1443 
1444 err_init_hardware:
1445 	(void)hns3vf_set_promisc_mode(hw, false, false, false);
1446 	return ret;
1447 }
1448 
1449 static int
1450 hns3vf_clear_vport_list(struct hns3_hw *hw)
1451 {
1452 	return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
1453 				 HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
1454 				 NULL, 0);
1455 }
1456 
1457 static int
1458 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1459 {
1460 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1461 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1462 	struct hns3_hw *hw = &hns->hw;
1463 	int ret;
1464 
1465 	PMD_INIT_FUNC_TRACE();
1466 
1467 	/* Get hardware io base address from pcie BAR2 IO space */
1468 	hw->io_base = pci_dev->mem_resource[2].addr;
1469 
1470 	/* Firmware command queue initialize */
1471 	ret = hns3_cmd_init_queue(hw);
1472 	if (ret) {
1473 		PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1474 		goto err_cmd_init_queue;
1475 	}
1476 
1477 	/* Firmware command initialize */
1478 	ret = hns3_cmd_init(hw);
1479 	if (ret) {
1480 		PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1481 		goto err_cmd_init;
1482 	}
1483 
1484 	hns3_tx_push_init(eth_dev);
1485 
1486 	/* Get VF resource */
1487 	ret = hns3_query_vf_resource(hw);
1488 	if (ret)
1489 		goto err_cmd_init;
1490 
1491 	rte_spinlock_init(&hw->mbx_resp.lock);
1492 
1493 	hns3vf_clear_event_cause(hw, 0);
1494 
1495 	ret = rte_intr_callback_register(pci_dev->intr_handle,
1496 					 hns3vf_interrupt_handler, eth_dev);
1497 	if (ret) {
1498 		PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1499 		goto err_intr_callback_register;
1500 	}
1501 
1502 	/* Enable interrupt */
1503 	rte_intr_enable(pci_dev->intr_handle);
1504 	hns3vf_enable_irq0(hw);
1505 
1506 	/* Get configuration from PF */
1507 	ret = hns3vf_get_configuration(hw);
1508 	if (ret) {
1509 		PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1510 		goto err_get_config;
1511 	}
1512 
1513 	ret = hns3_tqp_stats_init(hw);
1514 	if (ret)
1515 		goto err_get_config;
1516 
1517 	/* Hardware statistics of imissed registers cleared. */
1518 	ret = hns3_update_imissed_stats(hw, true);
1519 	if (ret) {
1520 		hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
1521 		goto err_set_tc_queue;
1522 	}
1523 
1524 	ret = hns3_queue_to_tc_mapping(hw, hw->tqps_num, hw->tqps_num);
1525 	if (ret) {
1526 		PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
1527 		goto err_set_tc_queue;
1528 	}
1529 
1530 	ret = hns3vf_clear_vport_list(hw);
1531 	if (ret) {
1532 		PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1533 		goto err_set_tc_queue;
1534 	}
1535 
1536 	ret = hns3vf_init_hardware(hns);
1537 	if (ret)
1538 		goto err_set_tc_queue;
1539 
1540 	hns3_rss_set_default_args(hw);
1541 
1542 	ret = hns3vf_set_alive(hw, true);
1543 	if (ret) {
1544 		PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1545 		goto err_set_tc_queue;
1546 	}
1547 
1548 	return 0;
1549 
1550 err_set_tc_queue:
1551 	hns3_tqp_stats_uninit(hw);
1552 
1553 err_get_config:
1554 	hns3vf_disable_irq0(hw);
1555 	rte_intr_disable(pci_dev->intr_handle);
1556 	hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1557 			     eth_dev);
1558 err_intr_callback_register:
1559 err_cmd_init:
1560 	hns3_cmd_uninit(hw);
1561 	hns3_cmd_destroy_queue(hw);
1562 err_cmd_init_queue:
1563 	hw->io_base = NULL;
1564 
1565 	return ret;
1566 }
1567 
1568 static void
1569 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1570 {
1571 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1572 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1573 	struct hns3_hw *hw = &hns->hw;
1574 
1575 	PMD_INIT_FUNC_TRACE();
1576 
1577 	hns3_rss_uninit(hns);
1578 	(void)hns3_config_gro(hw, false);
1579 	(void)hns3vf_set_alive(hw, false);
1580 	(void)hns3vf_set_promisc_mode(hw, false, false, false);
1581 	hns3_flow_uninit(eth_dev);
1582 	hns3_tqp_stats_uninit(hw);
1583 	hns3vf_disable_irq0(hw);
1584 	rte_intr_disable(pci_dev->intr_handle);
1585 	hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1586 			     eth_dev);
1587 	hns3_cmd_uninit(hw);
1588 	hns3_cmd_destroy_queue(hw);
1589 	hw->io_base = NULL;
1590 }
1591 
1592 static int
1593 hns3vf_do_stop(struct hns3_adapter *hns)
1594 {
1595 	struct hns3_hw *hw = &hns->hw;
1596 	int ret;
1597 
1598 	hw->mac.link_status = RTE_ETH_LINK_DOWN;
1599 
1600 	/*
1601 	 * The "hns3vf_do_stop" function will also be called by .stop_service to
1602 	 * prepare reset. At the time of global or IMP reset, the command cannot
1603 	 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
1604 	 * accessed during the reset process. So the mbuf can not be released
1605 	 * during reset and is required to be released after the reset is
1606 	 * completed.
1607 	 */
1608 	if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
1609 		hns3_dev_release_mbufs(hns);
1610 
1611 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
1612 		hns3_configure_all_mac_addr(hns, true);
1613 		ret = hns3_reset_all_tqps(hns);
1614 		if (ret) {
1615 			hns3_err(hw, "failed to reset all queues ret = %d",
1616 				 ret);
1617 			return ret;
1618 		}
1619 	}
1620 	return 0;
1621 }
1622 
1623 static int
1624 hns3vf_dev_stop(struct rte_eth_dev *dev)
1625 {
1626 	struct hns3_adapter *hns = dev->data->dev_private;
1627 	struct hns3_hw *hw = &hns->hw;
1628 
1629 	PMD_INIT_FUNC_TRACE();
1630 	dev->data->dev_started = 0;
1631 
1632 	hw->adapter_state = HNS3_NIC_STOPPING;
1633 	hns3_set_rxtx_function(dev);
1634 	rte_wmb();
1635 	/* Disable datapath on secondary process. */
1636 	hns3_mp_req_stop_rxtx(dev);
1637 	/* Prevent crashes when queues are still in use. */
1638 	rte_delay_ms(hw->cfg_max_queues);
1639 
1640 	rte_spinlock_lock(&hw->lock);
1641 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1642 		hns3_stop_tqps(hw);
1643 		hns3vf_do_stop(hns);
1644 		hns3_unmap_rx_interrupt(dev);
1645 		hw->adapter_state = HNS3_NIC_CONFIGURED;
1646 	}
1647 	hns3_rx_scattered_reset(dev);
1648 	hns3vf_stop_poll_job(dev);
1649 	hns3_stop_report_lse(dev);
1650 	rte_spinlock_unlock(&hw->lock);
1651 
1652 	return 0;
1653 }
1654 
1655 static int
1656 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1657 {
1658 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1659 	struct hns3_hw *hw = &hns->hw;
1660 	int ret = 0;
1661 
1662 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1663 		hns3_mp_uninit(eth_dev);
1664 		return 0;
1665 	}
1666 
1667 	if (hw->adapter_state == HNS3_NIC_STARTED)
1668 		ret = hns3vf_dev_stop(eth_dev);
1669 
1670 	hw->adapter_state = HNS3_NIC_CLOSING;
1671 	hns3_reset_abort(hns);
1672 	hw->adapter_state = HNS3_NIC_CLOSED;
1673 	rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1674 	hns3_configure_all_mc_mac_addr(hns, true);
1675 	hns3vf_remove_all_vlan_table(hns);
1676 	hns3vf_uninit_vf(eth_dev);
1677 	hns3_free_all_queues(eth_dev);
1678 	rte_free(hw->reset.wait_data);
1679 	hns3_mp_uninit(eth_dev);
1680 	hns3_warn(hw, "Close port %u finished", hw->data->port_id);
1681 
1682 	return ret;
1683 }
1684 
1685 static int
1686 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1687 		       __rte_unused int wait_to_complete)
1688 {
1689 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1690 	struct hns3_hw *hw = &hns->hw;
1691 	struct hns3_mac *mac = &hw->mac;
1692 	struct rte_eth_link new_link;
1693 
1694 	memset(&new_link, 0, sizeof(new_link));
1695 	switch (mac->link_speed) {
1696 	case RTE_ETH_SPEED_NUM_10M:
1697 	case RTE_ETH_SPEED_NUM_100M:
1698 	case RTE_ETH_SPEED_NUM_1G:
1699 	case RTE_ETH_SPEED_NUM_10G:
1700 	case RTE_ETH_SPEED_NUM_25G:
1701 	case RTE_ETH_SPEED_NUM_40G:
1702 	case RTE_ETH_SPEED_NUM_50G:
1703 	case RTE_ETH_SPEED_NUM_100G:
1704 	case RTE_ETH_SPEED_NUM_200G:
1705 		if (mac->link_status)
1706 			new_link.link_speed = mac->link_speed;
1707 		break;
1708 	default:
1709 		if (mac->link_status)
1710 			new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
1711 		break;
1712 	}
1713 
1714 	if (!mac->link_status)
1715 		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1716 
1717 	new_link.link_duplex = mac->link_duplex;
1718 	new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
1719 	new_link.link_autoneg =
1720 	    !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
1721 
1722 	return rte_eth_linkstatus_set(eth_dev, &new_link);
1723 }
1724 
1725 static int
1726 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1727 {
1728 	struct hns3_hw *hw = &hns->hw;
1729 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
1730 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
1731 	int ret;
1732 
1733 	ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
1734 	if (ret)
1735 		return ret;
1736 
1737 	hns3_enable_rxd_adv_layout(hw);
1738 
1739 	ret = hns3_init_queues(hns, reset_queue);
1740 	if (ret)
1741 		hns3_err(hw, "failed to init queues, ret = %d.", ret);
1742 
1743 	return ret;
1744 }
1745 
1746 static void
1747 hns3vf_restore_filter(struct rte_eth_dev *dev)
1748 {
1749 	hns3_restore_rss_filter(dev);
1750 }
1751 
1752 static int
1753 hns3vf_dev_start(struct rte_eth_dev *dev)
1754 {
1755 	struct hns3_adapter *hns = dev->data->dev_private;
1756 	struct hns3_hw *hw = &hns->hw;
1757 	int ret;
1758 
1759 	PMD_INIT_FUNC_TRACE();
1760 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1761 		return -EBUSY;
1762 
1763 	rte_spinlock_lock(&hw->lock);
1764 	hw->adapter_state = HNS3_NIC_STARTING;
1765 	ret = hns3vf_do_start(hns, true);
1766 	if (ret) {
1767 		hw->adapter_state = HNS3_NIC_CONFIGURED;
1768 		rte_spinlock_unlock(&hw->lock);
1769 		return ret;
1770 	}
1771 	ret = hns3_map_rx_interrupt(dev);
1772 	if (ret)
1773 		goto map_rx_inter_err;
1774 
1775 	/*
1776 	 * There are three register used to control the status of a TQP
1777 	 * (contains a pair of Tx queue and Rx queue) in the new version network
1778 	 * engine. One is used to control the enabling of Tx queue, the other is
1779 	 * used to control the enabling of Rx queue, and the last is the master
1780 	 * switch used to control the enabling of the tqp. The Tx register and
1781 	 * TQP register must be enabled at the same time to enable a Tx queue.
1782 	 * The same applies to the Rx queue. For the older network enginem, this
1783 	 * function only refresh the enabled flag, and it is used to update the
1784 	 * status of queue in the dpdk framework.
1785 	 */
1786 	ret = hns3_start_all_txqs(dev);
1787 	if (ret)
1788 		goto map_rx_inter_err;
1789 
1790 	ret = hns3_start_all_rxqs(dev);
1791 	if (ret)
1792 		goto start_all_rxqs_fail;
1793 
1794 	hw->adapter_state = HNS3_NIC_STARTED;
1795 	rte_spinlock_unlock(&hw->lock);
1796 
1797 	hns3_rx_scattered_calc(dev);
1798 	hns3_set_rxtx_function(dev);
1799 	hns3_mp_req_start_rxtx(dev);
1800 
1801 	hns3vf_restore_filter(dev);
1802 
1803 	/* Enable interrupt of all rx queues before enabling queues */
1804 	hns3_dev_all_rx_queue_intr_enable(hw, true);
1805 	hns3_start_tqps(hw);
1806 
1807 	if (dev->data->dev_conf.intr_conf.lsc != 0)
1808 		hns3vf_dev_link_update(dev, 0);
1809 	hns3vf_start_poll_job(dev);
1810 
1811 	return ret;
1812 
1813 start_all_rxqs_fail:
1814 	hns3_stop_all_txqs(dev);
1815 map_rx_inter_err:
1816 	(void)hns3vf_do_stop(hns);
1817 	hw->adapter_state = HNS3_NIC_CONFIGURED;
1818 	rte_spinlock_unlock(&hw->lock);
1819 
1820 	return ret;
1821 }
1822 
1823 static bool
1824 is_vf_reset_done(struct hns3_hw *hw)
1825 {
1826 #define HNS3_FUN_RST_ING_BITS \
1827 	(BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1828 	 BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1829 	 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1830 	 BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1831 
1832 	uint32_t val;
1833 
1834 	if (hw->reset.level == HNS3_VF_RESET) {
1835 		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1836 		if (val & HNS3_VF_RST_ING_BIT)
1837 			return false;
1838 	} else {
1839 		val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1840 		if (val & HNS3_FUN_RST_ING_BITS)
1841 			return false;
1842 	}
1843 	return true;
1844 }
1845 
1846 bool
1847 hns3vf_is_reset_pending(struct hns3_adapter *hns)
1848 {
1849 	struct hns3_hw *hw = &hns->hw;
1850 	enum hns3_reset_level reset;
1851 
1852 	/*
1853 	 * According to the protocol of PCIe, FLR to a PF device resets the PF
1854 	 * state as well as the SR-IOV extended capability including VF Enable
1855 	 * which means that VFs no longer exist.
1856 	 *
1857 	 * HNS3_VF_FULL_RESET means PF device is in FLR reset. when PF device
1858 	 * is in FLR stage, the register state of VF device is not reliable,
1859 	 * so register states detection can not be carried out. In this case,
1860 	 * we just ignore the register states and return false to indicate that
1861 	 * there are no other reset states that need to be processed by driver.
1862 	 */
1863 	if (hw->reset.level == HNS3_VF_FULL_RESET)
1864 		return false;
1865 
1866 	/* Check the registers to confirm whether there is reset pending */
1867 	hns3vf_check_event_cause(hns, NULL);
1868 	reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
1869 	if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET &&
1870 	    hw->reset.level < reset) {
1871 		hns3_warn(hw, "High level reset %d is pending", reset);
1872 		return true;
1873 	}
1874 	return false;
1875 }
1876 
1877 static int
1878 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1879 {
1880 #define HNS3_WAIT_PF_RESET_READY_TIME 5
1881 	struct hns3_hw *hw = &hns->hw;
1882 	struct hns3_wait_data *wait_data = hw->reset.wait_data;
1883 	struct timeval tv;
1884 
1885 	if (wait_data->result == HNS3_WAIT_SUCCESS) {
1886 		/*
1887 		 * After vf reset is ready, the PF may not have completed
1888 		 * the reset processing. The vf sending mbox to PF may fail
1889 		 * during the pf reset, so it is better to add extra delay.
1890 		 */
1891 		if (hw->reset.level == HNS3_VF_FUNC_RESET ||
1892 		    hw->reset.level == HNS3_FLR_RESET)
1893 			return 0;
1894 		/* Reset retry process, no need to add extra delay. */
1895 		if (hw->reset.attempts)
1896 			return 0;
1897 		if (wait_data->check_completion == NULL)
1898 			return 0;
1899 
1900 		wait_data->check_completion = NULL;
1901 		wait_data->interval = HNS3_WAIT_PF_RESET_READY_TIME *
1902 			MSEC_PER_SEC * USEC_PER_MSEC;
1903 		wait_data->count = 1;
1904 		wait_data->result = HNS3_WAIT_REQUEST;
1905 		rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
1906 				  wait_data);
1907 		hns3_warn(hw, "hardware is ready, delay %d sec for PF reset complete",
1908 				HNS3_WAIT_PF_RESET_READY_TIME);
1909 		return -EAGAIN;
1910 	} else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1911 		hns3_clock_gettime(&tv);
1912 		hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1913 			  tv.tv_sec, tv.tv_usec);
1914 		return -ETIME;
1915 	} else if (wait_data->result == HNS3_WAIT_REQUEST)
1916 		return -EAGAIN;
1917 
1918 	wait_data->hns = hns;
1919 	wait_data->check_completion = is_vf_reset_done;
1920 	wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1921 				HNS3VF_RESET_WAIT_MS + hns3_clock_gettime_ms();
1922 	wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1923 	wait_data->count = HNS3VF_RESET_WAIT_CNT;
1924 	wait_data->result = HNS3_WAIT_REQUEST;
1925 	rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1926 	return -EAGAIN;
1927 }
1928 
1929 static int
1930 hns3vf_prepare_reset(struct hns3_adapter *hns)
1931 {
1932 	struct hns3_hw *hw = &hns->hw;
1933 	int ret;
1934 
1935 	if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1936 		ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
1937 					0, true, NULL, 0);
1938 		if (ret)
1939 			return ret;
1940 	}
1941 	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
1942 
1943 	return 0;
1944 }
1945 
1946 static int
1947 hns3vf_stop_service(struct hns3_adapter *hns)
1948 {
1949 	struct hns3_hw *hw = &hns->hw;
1950 	struct rte_eth_dev *eth_dev;
1951 
1952 	eth_dev = &rte_eth_devices[hw->data->port_id];
1953 	if (hw->adapter_state == HNS3_NIC_STARTED) {
1954 		/*
1955 		 * Make sure call update link status before hns3vf_stop_poll_job
1956 		 * because update link status depend on polling job exist.
1957 		 */
1958 		hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
1959 					  hw->mac.link_duplex);
1960 		hns3vf_stop_poll_job(eth_dev);
1961 	}
1962 	hw->mac.link_status = RTE_ETH_LINK_DOWN;
1963 
1964 	hns3_set_rxtx_function(eth_dev);
1965 	rte_wmb();
1966 	/* Disable datapath on secondary process. */
1967 	hns3_mp_req_stop_rxtx(eth_dev);
1968 	rte_delay_ms(hw->cfg_max_queues);
1969 
1970 	rte_spinlock_lock(&hw->lock);
1971 	if (hw->adapter_state == HNS3_NIC_STARTED ||
1972 	    hw->adapter_state == HNS3_NIC_STOPPING) {
1973 		hns3_enable_all_queues(hw, false);
1974 		hns3vf_do_stop(hns);
1975 		hw->reset.mbuf_deferred_free = true;
1976 	} else
1977 		hw->reset.mbuf_deferred_free = false;
1978 
1979 	/*
1980 	 * It is cumbersome for hardware to pick-and-choose entries for deletion
1981 	 * from table space. Hence, for function reset software intervention is
1982 	 * required to delete the entries.
1983 	 */
1984 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
1985 		hns3_configure_all_mc_mac_addr(hns, true);
1986 	rte_spinlock_unlock(&hw->lock);
1987 
1988 	return 0;
1989 }
1990 
1991 static int
1992 hns3vf_start_service(struct hns3_adapter *hns)
1993 {
1994 	struct hns3_hw *hw = &hns->hw;
1995 	struct rte_eth_dev *eth_dev;
1996 
1997 	eth_dev = &rte_eth_devices[hw->data->port_id];
1998 	hns3_set_rxtx_function(eth_dev);
1999 	hns3_mp_req_start_rxtx(eth_dev);
2000 	if (hw->adapter_state == HNS3_NIC_STARTED) {
2001 		hns3vf_start_poll_job(eth_dev);
2002 
2003 		/* Enable interrupt of all rx queues before enabling queues */
2004 		hns3_dev_all_rx_queue_intr_enable(hw, true);
2005 		/*
2006 		 * Enable state of each rxq and txq will be recovered after
2007 		 * reset, so we need to restore them before enable all tqps;
2008 		 */
2009 		hns3_restore_tqp_enable_state(hw);
2010 		/*
2011 		 * When finished the initialization, enable queues to receive
2012 		 * and transmit packets.
2013 		 */
2014 		hns3_enable_all_queues(hw, true);
2015 	}
2016 
2017 	return 0;
2018 }
2019 
2020 static int
2021 hns3vf_check_default_mac_change(struct hns3_hw *hw)
2022 {
2023 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
2024 	struct rte_ether_addr *hw_mac;
2025 	int ret;
2026 
2027 	/*
2028 	 * The hns3 PF ethdev driver in kernel support setting VF MAC address
2029 	 * on the host by "ip link set ..." command. If the hns3 PF kernel
2030 	 * ethdev driver sets the MAC address for VF device after the
2031 	 * initialization of the related VF device, the PF driver will notify
2032 	 * VF driver to reset VF device to make the new MAC address effective
2033 	 * immediately. The hns3 VF PMD should check whether the MAC
2034 	 * address has been changed by the PF kernel ethdev driver, if changed
2035 	 * VF driver should configure hardware using the new MAC address in the
2036 	 * recovering hardware configuration stage of the reset process.
2037 	 */
2038 	ret = hns3vf_get_host_mac_addr(hw);
2039 	if (ret)
2040 		return ret;
2041 
2042 	hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
2043 	ret = rte_is_zero_ether_addr(hw_mac);
2044 	if (ret) {
2045 		rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
2046 	} else {
2047 		ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
2048 		if (!ret) {
2049 			rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
2050 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
2051 					      &hw->data->mac_addrs[0]);
2052 			hns3_warn(hw, "Default MAC address has been changed to:"
2053 				  " %s by the host PF kernel ethdev driver",
2054 				  mac_str);
2055 		}
2056 	}
2057 
2058 	return 0;
2059 }
2060 
2061 static int
2062 hns3vf_restore_conf(struct hns3_adapter *hns)
2063 {
2064 	struct hns3_hw *hw = &hns->hw;
2065 	int ret;
2066 
2067 	ret = hns3vf_check_default_mac_change(hw);
2068 	if (ret)
2069 		return ret;
2070 
2071 	ret = hns3_configure_all_mac_addr(hns, false);
2072 	if (ret)
2073 		return ret;
2074 
2075 	ret = hns3_configure_all_mc_mac_addr(hns, false);
2076 	if (ret)
2077 		goto err_mc_mac;
2078 
2079 	ret = hns3vf_restore_promisc(hns);
2080 	if (ret)
2081 		goto err_vlan_table;
2082 
2083 	ret = hns3vf_restore_vlan_conf(hns);
2084 	if (ret)
2085 		goto err_vlan_table;
2086 
2087 	ret = hns3vf_get_port_base_vlan_filter_state(hw);
2088 	if (ret)
2089 		goto err_vlan_table;
2090 
2091 	ret = hns3_restore_rx_interrupt(hw);
2092 	if (ret)
2093 		goto err_vlan_table;
2094 
2095 	ret = hns3_restore_gro_conf(hw);
2096 	if (ret)
2097 		goto err_vlan_table;
2098 
2099 	if (hw->adapter_state == HNS3_NIC_STARTED) {
2100 		ret = hns3vf_do_start(hns, false);
2101 		if (ret)
2102 			goto err_vlan_table;
2103 		hns3_info(hw, "hns3vf dev restart successful!");
2104 	} else if (hw->adapter_state == HNS3_NIC_STOPPING)
2105 		hw->adapter_state = HNS3_NIC_CONFIGURED;
2106 
2107 	ret = hns3vf_set_alive(hw, true);
2108 	if (ret) {
2109 		hns3_err(hw, "failed to VF send alive to PF: %d", ret);
2110 		goto err_vlan_table;
2111 	}
2112 
2113 	return 0;
2114 
2115 err_vlan_table:
2116 	hns3_configure_all_mc_mac_addr(hns, true);
2117 err_mc_mac:
2118 	hns3_configure_all_mac_addr(hns, true);
2119 	return ret;
2120 }
2121 
2122 static enum hns3_reset_level
2123 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
2124 {
2125 	enum hns3_reset_level reset_level;
2126 
2127 	/* return the highest priority reset level amongst all */
2128 	if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
2129 		reset_level = HNS3_VF_RESET;
2130 	else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
2131 		reset_level = HNS3_VF_FULL_RESET;
2132 	else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
2133 		reset_level = HNS3_VF_PF_FUNC_RESET;
2134 	else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
2135 		reset_level = HNS3_VF_FUNC_RESET;
2136 	else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
2137 		reset_level = HNS3_FLR_RESET;
2138 	else
2139 		reset_level = HNS3_NONE_RESET;
2140 
2141 	if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
2142 		return HNS3_NONE_RESET;
2143 
2144 	return reset_level;
2145 }
2146 
2147 static void
2148 hns3vf_reset_service(void *param)
2149 {
2150 	struct hns3_adapter *hns = (struct hns3_adapter *)param;
2151 	struct hns3_hw *hw = &hns->hw;
2152 	enum hns3_reset_level reset_level;
2153 	struct timeval tv_delta;
2154 	struct timeval tv_start;
2155 	struct timeval tv;
2156 	uint64_t msec;
2157 
2158 	/*
2159 	 * The interrupt is not triggered within the delay time.
2160 	 * The interrupt may have been lost. It is necessary to handle
2161 	 * the interrupt to recover from the error.
2162 	 */
2163 	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2164 			    SCHEDULE_DEFERRED) {
2165 		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
2166 				 __ATOMIC_RELAXED);
2167 		hns3_err(hw, "Handling interrupts in delayed tasks");
2168 		hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
2169 		reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2170 		if (reset_level == HNS3_NONE_RESET) {
2171 			hns3_err(hw, "No reset level is set, try global reset");
2172 			hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
2173 		}
2174 	}
2175 	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
2176 
2177 	/*
2178 	 * Hardware reset has been notified, we now have to poll & check if
2179 	 * hardware has actually completed the reset sequence.
2180 	 */
2181 	reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2182 	if (reset_level != HNS3_NONE_RESET) {
2183 		hns3_clock_gettime(&tv_start);
2184 		hns3_reset_process(hns, reset_level);
2185 		hns3_clock_gettime(&tv);
2186 		timersub(&tv, &tv_start, &tv_delta);
2187 		msec = hns3_clock_calctime_ms(&tv_delta);
2188 		if (msec > HNS3_RESET_PROCESS_MS)
2189 			hns3_err(hw, "%d handle long time delta %" PRIu64
2190 				 " ms time=%ld.%.6ld",
2191 				 hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
2192 	}
2193 }
2194 
2195 static int
2196 hns3vf_reinit_dev(struct hns3_adapter *hns)
2197 {
2198 	struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
2199 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2200 	struct hns3_hw *hw = &hns->hw;
2201 	int ret;
2202 
2203 	if (hw->reset.level == HNS3_VF_FULL_RESET) {
2204 		rte_intr_disable(pci_dev->intr_handle);
2205 		ret = hns3vf_set_bus_master(pci_dev, true);
2206 		if (ret < 0) {
2207 			hns3_err(hw, "failed to set pci bus, ret = %d", ret);
2208 			return ret;
2209 		}
2210 	}
2211 
2212 	/* Firmware command initialize */
2213 	ret = hns3_cmd_init(hw);
2214 	if (ret) {
2215 		hns3_err(hw, "Failed to init cmd: %d", ret);
2216 		return ret;
2217 	}
2218 
2219 	if (hw->reset.level == HNS3_VF_FULL_RESET) {
2220 		/*
2221 		 * UIO enables msix by writing the pcie configuration space
2222 		 * vfio_pci enables msix in rte_intr_enable.
2223 		 */
2224 		if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO ||
2225 		    pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) {
2226 			if (hns3vf_enable_msix(pci_dev, true))
2227 				hns3_err(hw, "Failed to enable msix");
2228 		}
2229 
2230 		rte_intr_enable(pci_dev->intr_handle);
2231 	}
2232 
2233 	ret = hns3_reset_all_tqps(hns);
2234 	if (ret) {
2235 		hns3_err(hw, "Failed to reset all queues: %d", ret);
2236 		return ret;
2237 	}
2238 
2239 	ret = hns3vf_init_hardware(hns);
2240 	if (ret) {
2241 		hns3_err(hw, "Failed to init hardware: %d", ret);
2242 		return ret;
2243 	}
2244 
2245 	return 0;
2246 }
2247 
2248 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
2249 	.dev_configure      = hns3vf_dev_configure,
2250 	.dev_start          = hns3vf_dev_start,
2251 	.dev_stop           = hns3vf_dev_stop,
2252 	.dev_close          = hns3vf_dev_close,
2253 	.mtu_set            = hns3vf_dev_mtu_set,
2254 	.promiscuous_enable = hns3vf_dev_promiscuous_enable,
2255 	.promiscuous_disable = hns3vf_dev_promiscuous_disable,
2256 	.allmulticast_enable = hns3vf_dev_allmulticast_enable,
2257 	.allmulticast_disable = hns3vf_dev_allmulticast_disable,
2258 	.stats_get          = hns3_stats_get,
2259 	.stats_reset        = hns3_stats_reset,
2260 	.xstats_get         = hns3_dev_xstats_get,
2261 	.xstats_get_names   = hns3_dev_xstats_get_names,
2262 	.xstats_reset       = hns3_dev_xstats_reset,
2263 	.xstats_get_by_id   = hns3_dev_xstats_get_by_id,
2264 	.xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
2265 	.dev_infos_get      = hns3_dev_infos_get,
2266 	.fw_version_get     = hns3_fw_version_get,
2267 	.rx_queue_setup     = hns3_rx_queue_setup,
2268 	.tx_queue_setup     = hns3_tx_queue_setup,
2269 	.rx_queue_release   = hns3_dev_rx_queue_release,
2270 	.tx_queue_release   = hns3_dev_tx_queue_release,
2271 	.rx_queue_start     = hns3_dev_rx_queue_start,
2272 	.rx_queue_stop      = hns3_dev_rx_queue_stop,
2273 	.tx_queue_start     = hns3_dev_tx_queue_start,
2274 	.tx_queue_stop      = hns3_dev_tx_queue_stop,
2275 	.rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
2276 	.rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
2277 	.rxq_info_get       = hns3_rxq_info_get,
2278 	.txq_info_get       = hns3_txq_info_get,
2279 	.rx_burst_mode_get  = hns3_rx_burst_mode_get,
2280 	.tx_burst_mode_get  = hns3_tx_burst_mode_get,
2281 	.mac_addr_add       = hns3_add_mac_addr,
2282 	.mac_addr_remove    = hns3_remove_mac_addr,
2283 	.mac_addr_set       = hns3vf_set_default_mac_addr,
2284 	.set_mc_addr_list   = hns3_set_mc_mac_addr_list,
2285 	.link_update        = hns3vf_dev_link_update,
2286 	.rss_hash_update    = hns3_dev_rss_hash_update,
2287 	.rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
2288 	.reta_update        = hns3_dev_rss_reta_update,
2289 	.reta_query         = hns3_dev_rss_reta_query,
2290 	.flow_ops_get       = hns3_dev_flow_ops_get,
2291 	.vlan_filter_set    = hns3vf_vlan_filter_set,
2292 	.vlan_offload_set   = hns3vf_vlan_offload_set,
2293 	.get_reg            = hns3_get_regs,
2294 	.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
2295 	.tx_done_cleanup    = hns3_tx_done_cleanup,
2296 	.eth_dev_priv_dump  = hns3_eth_dev_priv_dump,
2297 };
2298 
2299 static const struct hns3_reset_ops hns3vf_reset_ops = {
2300 	.reset_service       = hns3vf_reset_service,
2301 	.stop_service        = hns3vf_stop_service,
2302 	.prepare_reset       = hns3vf_prepare_reset,
2303 	.wait_hardware_ready = hns3vf_wait_hardware_ready,
2304 	.reinit_dev          = hns3vf_reinit_dev,
2305 	.restore_conf        = hns3vf_restore_conf,
2306 	.start_service       = hns3vf_start_service,
2307 };
2308 
2309 static void
2310 hns3vf_init_hw_ops(struct hns3_hw *hw)
2311 {
2312 	hw->ops.add_mc_mac_addr = hns3vf_add_mc_mac_addr;
2313 	hw->ops.del_mc_mac_addr = hns3vf_remove_mc_mac_addr;
2314 	hw->ops.add_uc_mac_addr = hns3vf_add_uc_mac_addr;
2315 	hw->ops.del_uc_mac_addr = hns3vf_remove_uc_mac_addr;
2316 	hw->ops.bind_ring_with_vector = hns3vf_bind_ring_with_vector;
2317 }
2318 
2319 static int
2320 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
2321 {
2322 	struct hns3_adapter *hns = eth_dev->data->dev_private;
2323 	struct hns3_hw *hw = &hns->hw;
2324 	int ret;
2325 
2326 	PMD_INIT_FUNC_TRACE();
2327 
2328 	hns3_flow_init(eth_dev);
2329 
2330 	hns3_set_rxtx_function(eth_dev);
2331 	eth_dev->dev_ops = &hns3vf_eth_dev_ops;
2332 	eth_dev->rx_queue_count = hns3_rx_queue_count;
2333 	ret = hns3_mp_init(eth_dev);
2334 	if (ret)
2335 		goto err_mp_init;
2336 
2337 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2338 		hns3_tx_push_init(eth_dev);
2339 		return 0;
2340 	}
2341 
2342 	hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2343 	hns->is_vf = true;
2344 	hw->data = eth_dev->data;
2345 	hns3_parse_devargs(eth_dev);
2346 
2347 	ret = hns3_reset_init(hw);
2348 	if (ret)
2349 		goto err_init_reset;
2350 	hw->reset.ops = &hns3vf_reset_ops;
2351 
2352 	hns3vf_init_hw_ops(hw);
2353 	ret = hns3vf_init_vf(eth_dev);
2354 	if (ret) {
2355 		PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2356 		goto err_init_vf;
2357 	}
2358 
2359 	ret = hns3_init_mac_addrs(eth_dev);
2360 	if (ret != 0)
2361 		goto err_init_mac_addrs;
2362 
2363 	hw->adapter_state = HNS3_NIC_INITIALIZED;
2364 
2365 	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2366 			    SCHEDULE_PENDING) {
2367 		hns3_err(hw, "Reschedule reset service after dev_init");
2368 		hns3_schedule_reset(hns);
2369 	} else {
2370 		/* IMP will wait ready flag before reset */
2371 		hns3_notify_reset_ready(hw, false);
2372 	}
2373 	rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
2374 			  eth_dev);
2375 	return 0;
2376 
2377 err_init_mac_addrs:
2378 	hns3vf_uninit_vf(eth_dev);
2379 
2380 err_init_vf:
2381 	rte_free(hw->reset.wait_data);
2382 
2383 err_init_reset:
2384 	hns3_mp_uninit(eth_dev);
2385 
2386 err_mp_init:
2387 	eth_dev->dev_ops = NULL;
2388 	eth_dev->rx_pkt_burst = NULL;
2389 	eth_dev->rx_descriptor_status = NULL;
2390 	eth_dev->tx_pkt_burst = NULL;
2391 	eth_dev->tx_pkt_prepare = NULL;
2392 	eth_dev->tx_descriptor_status = NULL;
2393 
2394 	return ret;
2395 }
2396 
2397 static int
2398 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
2399 {
2400 	struct hns3_adapter *hns = eth_dev->data->dev_private;
2401 	struct hns3_hw *hw = &hns->hw;
2402 
2403 	PMD_INIT_FUNC_TRACE();
2404 
2405 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2406 		hns3_mp_uninit(eth_dev);
2407 		return 0;
2408 	}
2409 
2410 	if (hw->adapter_state < HNS3_NIC_CLOSING)
2411 		hns3vf_dev_close(eth_dev);
2412 
2413 	hw->adapter_state = HNS3_NIC_REMOVED;
2414 	return 0;
2415 }
2416 
2417 static int
2418 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2419 		     struct rte_pci_device *pci_dev)
2420 {
2421 	return rte_eth_dev_pci_generic_probe(pci_dev,
2422 					     sizeof(struct hns3_adapter),
2423 					     hns3vf_dev_init);
2424 }
2425 
2426 static int
2427 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
2428 {
2429 	return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
2430 }
2431 
2432 static const struct rte_pci_id pci_id_hns3vf_map[] = {
2433 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
2434 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
2435 	{ .vendor_id = 0, }, /* sentinel */
2436 };
2437 
2438 static struct rte_pci_driver rte_hns3vf_pmd = {
2439 	.id_table = pci_id_hns3vf_map,
2440 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2441 	.probe = eth_hns3vf_pci_probe,
2442 	.remove = eth_hns3vf_pci_remove,
2443 };
2444 
2445 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
2446 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
2447 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");
2448 RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf,
2449 		HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
2450 		HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
2451 		HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
2452 		HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16_t> ");
2453