xref: /dpdk/drivers/net/hns3/hns3_ethdev_vf.c (revision ec169149)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <linux/pci_regs.h>
6 #include <rte_alarm.h>
7 #include <ethdev_pci.h>
8 #include <rte_io.h>
9 #include <rte_vfio.h>
10 
11 #include "hns3_ethdev.h"
12 #include "hns3_common.h"
13 #include "hns3_logs.h"
14 #include "hns3_rxtx.h"
15 #include "hns3_regs.h"
16 #include "hns3_intr.h"
17 #include "hns3_dcb.h"
18 #include "hns3_mp.h"
19 #include "hns3_flow.h"
20 
21 #define HNS3VF_KEEP_ALIVE_INTERVAL	2000000 /* us */
22 #define HNS3VF_SERVICE_INTERVAL		1000000 /* us */
23 
24 #define HNS3VF_RESET_WAIT_MS	20
25 #define HNS3VF_RESET_WAIT_CNT	2000
26 
27 /* Reset related Registers */
28 #define HNS3_GLOBAL_RESET_BIT		0
29 #define HNS3_CORE_RESET_BIT		1
30 #define HNS3_IMP_RESET_BIT		2
31 #define HNS3_FUN_RST_ING_B		0
32 
33 enum hns3vf_evt_cause {
34 	HNS3VF_VECTOR0_EVENT_RST,
35 	HNS3VF_VECTOR0_EVENT_MBX,
36 	HNS3VF_VECTOR0_EVENT_OTHER,
37 };
38 
39 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
40 						    uint64_t *levels);
41 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
42 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
43 
44 static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
45 				  struct rte_ether_addr *mac_addr);
46 static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
47 				     struct rte_ether_addr *mac_addr);
48 static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
49 				   __rte_unused int wait_to_complete);
50 
51 /* set PCI bus mastering */
52 static int
hns3vf_set_bus_master(const struct rte_pci_device * device,bool op)53 hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
54 {
55 	uint16_t reg;
56 	int ret;
57 
58 	ret = rte_pci_read_config(device, &reg, sizeof(reg), PCI_COMMAND);
59 	if (ret < 0) {
60 		PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
61 			     PCI_COMMAND);
62 		return ret;
63 	}
64 
65 	if (op)
66 		/* set the master bit */
67 		reg |= PCI_COMMAND_MASTER;
68 	else
69 		reg &= ~(PCI_COMMAND_MASTER);
70 
71 	return rte_pci_write_config(device, &reg, sizeof(reg), PCI_COMMAND);
72 }
73 
74 /**
75  * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
76  * @cap: the capability
77  *
78  * Return the address of the given capability within the PCI capability list.
79  */
80 static int
hns3vf_find_pci_capability(const struct rte_pci_device * device,int cap)81 hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
82 {
83 #define MAX_PCIE_CAPABILITY 48
84 	uint16_t status;
85 	uint8_t pos;
86 	uint8_t id;
87 	int ttl;
88 	int ret;
89 
90 	ret = rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
91 	if (ret < 0) {
92 		PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_STATUS);
93 		return 0;
94 	}
95 
96 	if (!(status & PCI_STATUS_CAP_LIST))
97 		return 0;
98 
99 	ttl = MAX_PCIE_CAPABILITY;
100 	ret = rte_pci_read_config(device, &pos, sizeof(pos),
101 				  PCI_CAPABILITY_LIST);
102 	if (ret < 0) {
103 		PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
104 			     PCI_CAPABILITY_LIST);
105 		return 0;
106 	}
107 
108 	while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
109 		ret = rte_pci_read_config(device, &id, sizeof(id),
110 					  (pos + PCI_CAP_LIST_ID));
111 		if (ret < 0) {
112 			PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
113 				     (pos + PCI_CAP_LIST_ID));
114 			break;
115 		}
116 
117 		if (id == 0xFF)
118 			break;
119 
120 		if (id == cap)
121 			return (int)pos;
122 
123 		ret = rte_pci_read_config(device, &pos, sizeof(pos),
124 					  (pos + PCI_CAP_LIST_NEXT));
125 		if (ret < 0) {
126 			PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
127 				     (pos + PCI_CAP_LIST_NEXT));
128 			break;
129 		}
130 	}
131 	return 0;
132 }
133 
134 static int
hns3vf_enable_msix(const struct rte_pci_device * device,bool op)135 hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
136 {
137 	uint16_t control;
138 	int pos;
139 	int ret;
140 
141 	pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
142 	if (pos) {
143 		ret = rte_pci_read_config(device, &control, sizeof(control),
144 				    (pos + PCI_MSIX_FLAGS));
145 		if (ret < 0) {
146 			PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
147 				     (pos + PCI_MSIX_FLAGS));
148 			return -ENXIO;
149 		}
150 
151 		if (op)
152 			control |= PCI_MSIX_FLAGS_ENABLE;
153 		else
154 			control &= ~PCI_MSIX_FLAGS_ENABLE;
155 		ret = rte_pci_write_config(device, &control, sizeof(control),
156 					  (pos + PCI_MSIX_FLAGS));
157 		if (ret < 0) {
158 			PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x",
159 				    (pos + PCI_MSIX_FLAGS));
160 			return -ENXIO;
161 		}
162 
163 		return 0;
164 	}
165 
166 	return -ENXIO;
167 }
168 
169 static int
hns3vf_add_uc_mac_addr(struct hns3_hw * hw,struct rte_ether_addr * mac_addr)170 hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
171 {
172 	/* mac address was checked by upper level interface */
173 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
174 	int ret;
175 
176 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
177 				HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
178 				RTE_ETHER_ADDR_LEN, false, NULL, 0);
179 	if (ret) {
180 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
181 				      mac_addr);
182 		hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
183 			 mac_str, ret);
184 	}
185 	return ret;
186 }
187 
188 static int
hns3vf_remove_uc_mac_addr(struct hns3_hw * hw,struct rte_ether_addr * mac_addr)189 hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
190 {
191 	/* mac address was checked by upper level interface */
192 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
193 	int ret;
194 
195 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
196 				HNS3_MBX_MAC_VLAN_UC_REMOVE,
197 				mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN,
198 				false, NULL, 0);
199 	if (ret) {
200 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
201 				      mac_addr);
202 		hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
203 			 mac_str, ret);
204 	}
205 	return ret;
206 }
207 
208 static int
hns3vf_set_default_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)209 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
210 			    struct rte_ether_addr *mac_addr)
211 {
212 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
213 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
214 	struct rte_ether_addr *old_addr;
215 	uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
216 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
217 	int ret;
218 
219 	/*
220 	 * It has been guaranteed that input parameter named mac_addr is valid
221 	 * address in the rte layer of DPDK framework.
222 	 */
223 	old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
224 	rte_spinlock_lock(&hw->lock);
225 	memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
226 	memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
227 	       RTE_ETHER_ADDR_LEN);
228 
229 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
230 				HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
231 				HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
232 	if (ret) {
233 		/*
234 		 * The hns3 VF PMD depends on the hns3 PF kernel ethdev
235 		 * driver. When user has configured a MAC address for VF device
236 		 * by "ip link set ..." command based on the PF device, the hns3
237 		 * PF kernel ethdev driver does not allow VF driver to request
238 		 * reconfiguring a different default MAC address, and return
239 		 * -EPREM to VF driver through mailbox.
240 		 */
241 		if (ret == -EPERM) {
242 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
243 					      old_addr);
244 			hns3_warn(hw, "Has permanent mac addr(%s) for vf",
245 				  mac_str);
246 		} else {
247 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
248 					      mac_addr);
249 			hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
250 				 mac_str, ret);
251 		}
252 	}
253 
254 	rte_ether_addr_copy(mac_addr,
255 			    (struct rte_ether_addr *)hw->mac.mac_addr);
256 	rte_spinlock_unlock(&hw->lock);
257 
258 	return ret;
259 }
260 
261 static int
hns3vf_add_mc_mac_addr(struct hns3_hw * hw,struct rte_ether_addr * mac_addr)262 hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
263 		       struct rte_ether_addr *mac_addr)
264 {
265 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
266 	int ret;
267 
268 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
269 				HNS3_MBX_MAC_VLAN_MC_ADD,
270 				mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
271 				NULL, 0);
272 	if (ret) {
273 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
274 				      mac_addr);
275 		hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
276 			 mac_str, ret);
277 	}
278 
279 	return ret;
280 }
281 
282 static int
hns3vf_remove_mc_mac_addr(struct hns3_hw * hw,struct rte_ether_addr * mac_addr)283 hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
284 			  struct rte_ether_addr *mac_addr)
285 {
286 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
287 	int ret;
288 
289 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
290 				HNS3_MBX_MAC_VLAN_MC_REMOVE,
291 				mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
292 				NULL, 0);
293 	if (ret) {
294 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
295 				      mac_addr);
296 		hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
297 			 mac_str, ret);
298 	}
299 
300 	return ret;
301 }
302 
303 static int
hns3vf_set_promisc_mode(struct hns3_hw * hw,bool en_bc_pmc,bool en_uc_pmc,bool en_mc_pmc)304 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
305 			bool en_uc_pmc, bool en_mc_pmc)
306 {
307 	struct hns3_mbx_vf_to_pf_cmd *req;
308 	struct hns3_cmd_desc desc;
309 	int ret;
310 
311 	req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
312 
313 	/*
314 	 * The hns3 VF PMD depends on the hns3 PF kernel ethdev driver,
315 	 * so there are some features for promiscuous/allmulticast mode in hns3
316 	 * VF PMD as below:
317 	 * 1. The promiscuous/allmulticast mode can be configured successfully
318 	 *    only based on the trusted VF device. If based on the non trusted
319 	 *    VF device, configuring promiscuous/allmulticast mode will fail.
320 	 *    The hns3 VF device can be configured as trusted device by hns3 PF
321 	 *    kernel ethdev driver on the host by the following command:
322 	 *      "ip link set <eth num> vf <vf id> turst on"
323 	 * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
324 	 *    can receive the ingress and outgoing traffic. This includes
325 	 *    all the ingress packets, all the packets sent from the PF and
326 	 *    other VFs on the same physical port.
327 	 * 3. Note: Because of the hardware constraints, By default vlan filter
328 	 *    is enabled and couldn't be turned off based on VF device, so vlan
329 	 *    filter is still effective even in promiscuous mode. If upper
330 	 *    applications don't call rte_eth_dev_vlan_filter API function to
331 	 *    set vlan based on VF device, hns3 VF PMD will can't receive
332 	 *    the packets with vlan tag in promiscuous mode.
333 	 */
334 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
335 	req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
336 	req->msg[1] = en_bc_pmc ? 1 : 0;
337 	req->msg[2] = en_uc_pmc ? 1 : 0;
338 	req->msg[3] = en_mc_pmc ? 1 : 0;
339 	req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0;
340 
341 	ret = hns3_cmd_send(hw, &desc, 1);
342 	if (ret)
343 		hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
344 
345 	return ret;
346 }
347 
348 static int
hns3vf_dev_promiscuous_enable(struct rte_eth_dev * dev)349 hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
350 {
351 	struct hns3_adapter *hns = dev->data->dev_private;
352 	struct hns3_hw *hw = &hns->hw;
353 	int ret;
354 
355 	ret = hns3vf_set_promisc_mode(hw, true, true, true);
356 	if (ret)
357 		hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
358 			ret);
359 	return ret;
360 }
361 
362 static int
hns3vf_dev_promiscuous_disable(struct rte_eth_dev * dev)363 hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
364 {
365 	bool allmulti = dev->data->all_multicast ? true : false;
366 	struct hns3_adapter *hns = dev->data->dev_private;
367 	struct hns3_hw *hw = &hns->hw;
368 	int ret;
369 
370 	ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
371 	if (ret)
372 		hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
373 			ret);
374 	return ret;
375 }
376 
377 static int
hns3vf_dev_allmulticast_enable(struct rte_eth_dev * dev)378 hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
379 {
380 	struct hns3_adapter *hns = dev->data->dev_private;
381 	struct hns3_hw *hw = &hns->hw;
382 	int ret;
383 
384 	if (dev->data->promiscuous)
385 		return 0;
386 
387 	ret = hns3vf_set_promisc_mode(hw, true, false, true);
388 	if (ret)
389 		hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
390 			ret);
391 	return ret;
392 }
393 
394 static int
hns3vf_dev_allmulticast_disable(struct rte_eth_dev * dev)395 hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
396 {
397 	struct hns3_adapter *hns = dev->data->dev_private;
398 	struct hns3_hw *hw = &hns->hw;
399 	int ret;
400 
401 	if (dev->data->promiscuous)
402 		return 0;
403 
404 	ret = hns3vf_set_promisc_mode(hw, true, false, false);
405 	if (ret)
406 		hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
407 			ret);
408 	return ret;
409 }
410 
411 static int
hns3vf_restore_promisc(struct hns3_adapter * hns)412 hns3vf_restore_promisc(struct hns3_adapter *hns)
413 {
414 	struct hns3_hw *hw = &hns->hw;
415 	bool allmulti = hw->data->all_multicast ? true : false;
416 
417 	if (hw->data->promiscuous)
418 		return hns3vf_set_promisc_mode(hw, true, true, true);
419 
420 	return hns3vf_set_promisc_mode(hw, true, false, allmulti);
421 }
422 
423 static int
hns3vf_bind_ring_with_vector(struct hns3_hw * hw,uint16_t vector_id,bool mmap,enum hns3_ring_type queue_type,uint16_t queue_id)424 hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id,
425 			     bool mmap, enum hns3_ring_type queue_type,
426 			     uint16_t queue_id)
427 {
428 	struct hns3_vf_bind_vector_msg bind_msg;
429 	const char *op_str;
430 	uint16_t code;
431 	int ret;
432 
433 	memset(&bind_msg, 0, sizeof(bind_msg));
434 	code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
435 		HNS3_MBX_UNMAP_RING_TO_VECTOR;
436 	bind_msg.vector_id = (uint8_t)vector_id;
437 
438 	if (queue_type == HNS3_RING_TYPE_RX)
439 		bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
440 	else
441 		bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
442 
443 	bind_msg.param[0].ring_type = queue_type;
444 	bind_msg.ring_num = 1;
445 	bind_msg.param[0].tqp_index = queue_id;
446 	op_str = mmap ? "Map" : "Unmap";
447 	ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
448 				sizeof(bind_msg), false, NULL, 0);
449 	if (ret)
450 		hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.",
451 			 op_str, queue_id, bind_msg.vector_id, ret);
452 
453 	return ret;
454 }
455 
456 static int
hns3vf_dev_configure(struct rte_eth_dev * dev)457 hns3vf_dev_configure(struct rte_eth_dev *dev)
458 {
459 	struct hns3_adapter *hns = dev->data->dev_private;
460 	struct hns3_hw *hw = &hns->hw;
461 	struct rte_eth_conf *conf = &dev->data->dev_conf;
462 	enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
463 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
464 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
465 	struct rte_eth_rss_conf rss_conf;
466 	bool gro_en;
467 	int ret;
468 
469 	hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
470 
471 	/*
472 	 * Some versions of hardware network engine does not support
473 	 * individually enable/disable/reset the Tx or Rx queue. These devices
474 	 * must enable/disable/reset Tx and Rx queues at the same time. When the
475 	 * numbers of Tx queues allocated by upper applications are not equal to
476 	 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
477 	 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
478 	 * work as usual. But these fake queues are imperceptible, and can not
479 	 * be used by upper applications.
480 	 */
481 	ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
482 	if (ret) {
483 		hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
484 		hw->cfg_max_queues = 0;
485 		return ret;
486 	}
487 
488 	hw->adapter_state = HNS3_NIC_CONFIGURING;
489 	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
490 		hns3_err(hw, "setting link speed/duplex not supported");
491 		ret = -EINVAL;
492 		goto cfg_err;
493 	}
494 
495 	/* When RSS is not configured, redirect the packet queue 0 */
496 	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
497 		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
498 		rss_conf = conf->rx_adv_conf.rss_conf;
499 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
500 		if (ret)
501 			goto cfg_err;
502 	}
503 
504 	ret = hns3vf_dev_mtu_set(dev, conf->rxmode.mtu);
505 	if (ret != 0)
506 		goto cfg_err;
507 
508 	ret = hns3vf_dev_configure_vlan(dev);
509 	if (ret)
510 		goto cfg_err;
511 
512 	/* config hardware GRO */
513 	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
514 	ret = hns3_config_gro(hw, gro_en);
515 	if (ret)
516 		goto cfg_err;
517 
518 	hns3_init_rx_ptype_tble(dev);
519 
520 	hw->adapter_state = HNS3_NIC_CONFIGURED;
521 	return 0;
522 
523 cfg_err:
524 	hw->cfg_max_queues = 0;
525 	(void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
526 	hw->adapter_state = HNS3_NIC_INITIALIZED;
527 
528 	return ret;
529 }
530 
531 static int
hns3vf_config_mtu(struct hns3_hw * hw,uint16_t mtu)532 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
533 {
534 	int ret;
535 
536 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
537 				sizeof(mtu), true, NULL, 0);
538 	if (ret)
539 		hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
540 
541 	return ret;
542 }
543 
544 static int
hns3vf_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)545 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
546 {
547 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
548 	uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
549 	int ret;
550 
551 	/*
552 	 * The hns3 PF/VF devices on the same port share the hardware MTU
553 	 * configuration. Currently, we send mailbox to inform hns3 PF kernel
554 	 * ethdev driver to finish hardware MTU configuration in hns3 VF PMD,
555 	 * there is no need to stop the port for hns3 VF device, and the
556 	 * MTU value issued by hns3 VF PMD must be less than or equal to
557 	 * PF's MTU.
558 	 */
559 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
560 		hns3_err(hw, "Failed to set mtu during resetting");
561 		return -EIO;
562 	}
563 
564 	/*
565 	 * when Rx of scattered packets is off, we have some possibility of
566 	 * using vector Rx process function or simple Rx functions in hns3 PMD.
567 	 * If the input MTU is increased and the maximum length of
568 	 * received packets is greater than the length of a buffer for Rx
569 	 * packet, the hardware network engine needs to use multiple BDs and
570 	 * buffers to store these packets. This will cause problems when still
571 	 * using vector Rx process function or simple Rx function to receiving
572 	 * packets. So, when Rx of scattered packets is off and device is
573 	 * started, it is not permitted to increase MTU so that the maximum
574 	 * length of Rx packets is greater than Rx buffer length.
575 	 */
576 	if (dev->data->dev_started && !dev->data->scattered_rx &&
577 	    frame_size > hw->rx_buf_len) {
578 		hns3_err(hw, "failed to set mtu because current is "
579 			"not scattered rx mode");
580 		return -EOPNOTSUPP;
581 	}
582 
583 	rte_spinlock_lock(&hw->lock);
584 	ret = hns3vf_config_mtu(hw, mtu);
585 	if (ret) {
586 		rte_spinlock_unlock(&hw->lock);
587 		return ret;
588 	}
589 	rte_spinlock_unlock(&hw->lock);
590 
591 	return 0;
592 }
593 
594 static void
hns3vf_clear_event_cause(struct hns3_hw * hw,uint32_t regclr)595 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
596 {
597 	hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
598 }
599 
600 static void
hns3vf_disable_irq0(struct hns3_hw * hw)601 hns3vf_disable_irq0(struct hns3_hw *hw)
602 {
603 	hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
604 }
605 
606 static void
hns3vf_enable_irq0(struct hns3_hw * hw)607 hns3vf_enable_irq0(struct hns3_hw *hw)
608 {
609 	hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
610 }
611 
612 static enum hns3vf_evt_cause
hns3vf_check_event_cause(struct hns3_adapter * hns,uint32_t * clearval)613 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
614 {
615 	struct hns3_hw *hw = &hns->hw;
616 	enum hns3vf_evt_cause ret;
617 	uint32_t cmdq_stat_reg;
618 	uint32_t rst_ing_reg;
619 	uint32_t val;
620 
621 	/* Fetch the events from their corresponding regs */
622 	cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
623 	if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
624 		rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
625 		hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
626 		hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
627 		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
628 		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
629 		hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
630 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
631 		if (clearval) {
632 			hw->reset.stats.global_cnt++;
633 			hns3_warn(hw, "Global reset detected, clear reset status");
634 		} else {
635 			hns3_schedule_delayed_reset(hns);
636 			hns3_warn(hw, "Global reset detected, don't clear reset status");
637 		}
638 
639 		ret = HNS3VF_VECTOR0_EVENT_RST;
640 		goto out;
641 	}
642 
643 	/* Check for vector0 mailbox(=CMDQ RX) event source */
644 	if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
645 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
646 		ret = HNS3VF_VECTOR0_EVENT_MBX;
647 		goto out;
648 	}
649 
650 	val = 0;
651 	ret = HNS3VF_VECTOR0_EVENT_OTHER;
652 out:
653 	if (clearval)
654 		*clearval = val;
655 	return ret;
656 }
657 
658 static void
hns3vf_interrupt_handler(void * param)659 hns3vf_interrupt_handler(void *param)
660 {
661 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
662 	struct hns3_adapter *hns = dev->data->dev_private;
663 	struct hns3_hw *hw = &hns->hw;
664 	enum hns3vf_evt_cause event_cause;
665 	uint32_t clearval;
666 
667 	/* Disable interrupt */
668 	hns3vf_disable_irq0(hw);
669 
670 	/* Read out interrupt causes */
671 	event_cause = hns3vf_check_event_cause(hns, &clearval);
672 	/* Clear interrupt causes */
673 	hns3vf_clear_event_cause(hw, clearval);
674 
675 	switch (event_cause) {
676 	case HNS3VF_VECTOR0_EVENT_RST:
677 		hns3_schedule_reset(hns);
678 		break;
679 	case HNS3VF_VECTOR0_EVENT_MBX:
680 		hns3_dev_handle_mbx_msg(hw);
681 		break;
682 	default:
683 		break;
684 	}
685 
686 	/* Enable interrupt */
687 	hns3vf_enable_irq0(hw);
688 }
689 
690 static void
hns3vf_set_default_dev_specifications(struct hns3_hw * hw)691 hns3vf_set_default_dev_specifications(struct hns3_hw *hw)
692 {
693 	hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
694 	hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
695 	hw->rss_key_size = HNS3_RSS_KEY_SIZE;
696 	hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
697 }
698 
699 static void
hns3vf_parse_dev_specifications(struct hns3_hw * hw,struct hns3_cmd_desc * desc)700 hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
701 {
702 	struct hns3_dev_specs_0_cmd *req0;
703 
704 	req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
705 
706 	hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
707 	hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
708 	hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
709 	hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
710 }
711 
712 static int
hns3vf_check_dev_specifications(struct hns3_hw * hw)713 hns3vf_check_dev_specifications(struct hns3_hw *hw)
714 {
715 	if (hw->rss_ind_tbl_size == 0 ||
716 	    hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
717 		hns3_warn(hw, "the size of hash lookup table configured (%u)"
718 			      " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
719 			      HNS3_RSS_IND_TBL_SIZE_MAX);
720 		return -EINVAL;
721 	}
722 
723 	return 0;
724 }
725 
726 static int
hns3vf_query_dev_specifications(struct hns3_hw * hw)727 hns3vf_query_dev_specifications(struct hns3_hw *hw)
728 {
729 	struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
730 	int ret;
731 	int i;
732 
733 	for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
734 		hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
735 					  true);
736 		desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
737 	}
738 	hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
739 
740 	ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
741 	if (ret)
742 		return ret;
743 
744 	hns3vf_parse_dev_specifications(hw, desc);
745 
746 	return hns3vf_check_dev_specifications(hw);
747 }
748 
749 void
hns3vf_update_push_lsc_cap(struct hns3_hw * hw,bool supported)750 hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
751 {
752 	uint16_t val = supported ? HNS3_PF_PUSH_LSC_CAP_SUPPORTED :
753 				   HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
754 	uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
755 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
756 
757 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
758 		__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
759 					  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
760 }
761 
762 static void
hns3vf_get_push_lsc_cap(struct hns3_hw * hw)763 hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
764 {
765 #define HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS	500
766 
767 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
768 	int32_t remain_ms = HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS;
769 	uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
770 	uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
771 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
772 
773 	__atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
774 			 __ATOMIC_RELEASE);
775 
776 	(void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
777 				NULL, 0);
778 
779 	while (remain_ms > 0) {
780 		rte_delay_ms(HNS3_POLL_RESPONE_MS);
781 		if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
782 			HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
783 			break;
784 		remain_ms--;
785 	}
786 
787 	/*
788 	 * When exit above loop, the pf_push_lsc_cap could be one of the three
789 	 * state: unknown (means pf not ack), not_supported, supported.
790 	 * Here config it as 'not_supported' when it's 'unknown' state.
791 	 */
792 	__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
793 				  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
794 
795 	if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
796 		HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
797 		hns3_info(hw, "detect PF support push link status change!");
798 	} else {
799 		/*
800 		 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
801 		 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. So here cleared
802 		 * the RTE_ETH_DEV_INTR_LSC capability.
803 		 */
804 		dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
805 	}
806 }
807 
808 static int
hns3vf_get_capability(struct hns3_hw * hw)809 hns3vf_get_capability(struct hns3_hw *hw)
810 {
811 	int ret;
812 
813 	ret = hns3_get_pci_revision_id(hw, &hw->revision);
814 	if (ret)
815 		return ret;
816 
817 	if (hw->revision < PCI_REVISION_ID_HIP09_A) {
818 		hns3vf_set_default_dev_specifications(hw);
819 		hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
820 		hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
821 		hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
822 		hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
823 		hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
824 		hw->rss_info.ipv6_sctp_offload_supported = false;
825 		hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
826 		return 0;
827 	}
828 
829 	ret = hns3vf_query_dev_specifications(hw);
830 	if (ret) {
831 		PMD_INIT_LOG(ERR,
832 			     "failed to query dev specifications, ret = %d",
833 			     ret);
834 		return ret;
835 	}
836 
837 	hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
838 	hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
839 	hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
840 	hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
841 	hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
842 	hw->rss_info.ipv6_sctp_offload_supported = true;
843 	hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
844 
845 	return 0;
846 }
847 
848 static int
hns3vf_check_tqp_info(struct hns3_hw * hw)849 hns3vf_check_tqp_info(struct hns3_hw *hw)
850 {
851 	if (hw->tqps_num == 0) {
852 		PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF.");
853 		return -EINVAL;
854 	}
855 
856 	if (hw->rss_size_max == 0) {
857 		PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF.");
858 		return -EINVAL;
859 	}
860 
861 	hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num);
862 
863 	return 0;
864 }
865 
866 static int
hns3vf_get_port_base_vlan_filter_state(struct hns3_hw * hw)867 hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
868 {
869 	uint8_t resp_msg;
870 	int ret;
871 
872 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
873 				HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0,
874 				true, &resp_msg, sizeof(resp_msg));
875 	if (ret) {
876 		if (ret == -ETIME) {
877 			/*
878 			 * Getting current port based VLAN state from PF driver
879 			 * will not affect VF driver's basic function. Because
880 			 * the VF driver relies on hns3 PF kernel ether driver,
881 			 * to avoid introducing compatibility issues with older
882 			 * version of PF driver, no failure will be returned
883 			 * when the return value is ETIME. This return value has
884 			 * the following scenarios:
885 			 * 1) Firmware didn't return the results in time
886 			 * 2) the result return by firmware is timeout
887 			 * 3) the older version of kernel side PF driver does
888 			 *    not support this mailbox message.
889 			 * For scenarios 1 and 2, it is most likely that a
890 			 * hardware error has occurred, or a hardware reset has
891 			 * occurred. In this case, these errors will be caught
892 			 * by other functions.
893 			 */
894 			PMD_INIT_LOG(WARNING,
895 				"failed to get PVID state for timeout, maybe "
896 				"kernel side PF driver doesn't support this "
897 				"mailbox message, or firmware didn't respond.");
898 			resp_msg = HNS3_PORT_BASE_VLAN_DISABLE;
899 		} else {
900 			PMD_INIT_LOG(ERR, "failed to get port based VLAN state,"
901 				" ret = %d", ret);
902 			return ret;
903 		}
904 	}
905 	hw->port_base_vlan_cfg.state = resp_msg ?
906 		HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
907 	return 0;
908 }
909 
910 static int
hns3vf_get_queue_info(struct hns3_hw * hw)911 hns3vf_get_queue_info(struct hns3_hw *hw)
912 {
913 #define HNS3VF_TQPS_RSS_INFO_LEN	6
914 	uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
915 	int ret;
916 
917 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
918 				resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
919 	if (ret) {
920 		PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
921 		return ret;
922 	}
923 
924 	memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
925 	memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
926 
927 	return hns3vf_check_tqp_info(hw);
928 }
929 
930 static void
hns3vf_update_caps(struct hns3_hw * hw,uint32_t caps)931 hns3vf_update_caps(struct hns3_hw *hw, uint32_t caps)
932 {
933 	if (hns3_get_bit(caps, HNS3VF_CAPS_VLAN_FLT_MOD_B))
934 		hns3_set_bit(hw->capability,
935 				HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 1);
936 }
937 
938 static int
hns3vf_get_num_tc(struct hns3_hw * hw)939 hns3vf_get_num_tc(struct hns3_hw *hw)
940 {
941 	uint8_t num_tc = 0;
942 	uint32_t i;
943 
944 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
945 		if (hw->hw_tc_map & BIT(i))
946 			num_tc++;
947 	}
948 	return num_tc;
949 }
950 
951 static int
hns3vf_get_basic_info(struct hns3_hw * hw)952 hns3vf_get_basic_info(struct hns3_hw *hw)
953 {
954 	uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE];
955 	struct hns3_basic_info *basic_info;
956 	int ret;
957 
958 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0,
959 				true, resp_msg, sizeof(resp_msg));
960 	if (ret) {
961 		hns3_err(hw, "failed to get basic info from PF, ret = %d.",
962 				ret);
963 		return ret;
964 	}
965 
966 	basic_info = (struct hns3_basic_info *)resp_msg;
967 	hw->hw_tc_map = basic_info->hw_tc_map;
968 	hw->num_tc = hns3vf_get_num_tc(hw);
969 	hw->pf_vf_if_version = basic_info->pf_vf_if_version;
970 	hns3vf_update_caps(hw, basic_info->caps);
971 
972 	return 0;
973 }
974 
975 static int
hns3vf_get_host_mac_addr(struct hns3_hw * hw)976 hns3vf_get_host_mac_addr(struct hns3_hw *hw)
977 {
978 	uint8_t host_mac[RTE_ETHER_ADDR_LEN];
979 	int ret;
980 
981 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
982 				true, host_mac, RTE_ETHER_ADDR_LEN);
983 	if (ret) {
984 		hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
985 		return ret;
986 	}
987 
988 	memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
989 
990 	return 0;
991 }
992 
993 static int
hns3vf_get_configuration(struct hns3_hw * hw)994 hns3vf_get_configuration(struct hns3_hw *hw)
995 {
996 	int ret;
997 
998 	hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
999 
1000 	/* Get device capability */
1001 	ret = hns3vf_get_capability(hw);
1002 	if (ret) {
1003 		PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
1004 		return ret;
1005 	}
1006 
1007 	hns3vf_get_push_lsc_cap(hw);
1008 
1009 	/* Get basic info from PF */
1010 	ret = hns3vf_get_basic_info(hw);
1011 	if (ret)
1012 		return ret;
1013 
1014 	/* Get queue configuration from PF */
1015 	ret = hns3vf_get_queue_info(hw);
1016 	if (ret)
1017 		return ret;
1018 
1019 	/* Get user defined VF MAC addr from PF */
1020 	ret = hns3vf_get_host_mac_addr(hw);
1021 	if (ret)
1022 		return ret;
1023 
1024 	return hns3vf_get_port_base_vlan_filter_state(hw);
1025 }
1026 
1027 static void
hns3vf_request_link_info(struct hns3_hw * hw)1028 hns3vf_request_link_info(struct hns3_hw *hw)
1029 {
1030 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1031 	bool send_req;
1032 	int ret;
1033 
1034 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1035 		return;
1036 
1037 	send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
1038 		   vf->req_link_info_cnt > 0;
1039 	if (!send_req)
1040 		return;
1041 
1042 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
1043 				NULL, 0);
1044 	if (ret) {
1045 		hns3_err(hw, "failed to fetch link status, ret = %d", ret);
1046 		return;
1047 	}
1048 
1049 	if (vf->req_link_info_cnt > 0)
1050 		vf->req_link_info_cnt--;
1051 }
1052 
1053 void
hns3vf_update_link_status(struct hns3_hw * hw,uint8_t link_status,uint32_t link_speed,uint8_t link_duplex)1054 hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
1055 			  uint32_t link_speed, uint8_t link_duplex)
1056 {
1057 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1058 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1059 	struct hns3_mac *mac = &hw->mac;
1060 	int ret;
1061 
1062 	/*
1063 	 * PF kernel driver may push link status when VF driver is in resetting,
1064 	 * driver will stop polling job in this case, after resetting done
1065 	 * driver will start polling job again.
1066 	 * When polling job started, driver will get initial link status by
1067 	 * sending request to PF kernel driver, then could update link status by
1068 	 * process PF kernel driver's link status mailbox message.
1069 	 */
1070 	if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
1071 		return;
1072 
1073 	if (hw->adapter_state != HNS3_NIC_STARTED)
1074 		return;
1075 
1076 	mac->link_status = link_status;
1077 	mac->link_speed = link_speed;
1078 	mac->link_duplex = link_duplex;
1079 	ret = hns3vf_dev_link_update(dev, 0);
1080 	if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
1081 		hns3_start_report_lse(dev);
1082 }
1083 
1084 static int
hns3vf_vlan_filter_configure(struct hns3_adapter * hns,uint16_t vlan_id,int on)1085 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
1086 {
1087 #define HNS3VF_VLAN_MBX_MSG_LEN 5
1088 	struct hns3_hw *hw = &hns->hw;
1089 	uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
1090 	uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
1091 	uint8_t is_kill = on ? 0 : 1;
1092 
1093 	msg_data[0] = is_kill;
1094 	memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1095 	memcpy(&msg_data[3], &proto, sizeof(proto));
1096 
1097 	return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
1098 				 msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
1099 				 0);
1100 }
1101 
1102 static int
hns3vf_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)1103 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1104 {
1105 	struct hns3_adapter *hns = dev->data->dev_private;
1106 	struct hns3_hw *hw = &hns->hw;
1107 	int ret;
1108 
1109 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1110 		hns3_err(hw,
1111 			 "vf set vlan id failed during resetting, vlan_id =%u",
1112 			 vlan_id);
1113 		return -EIO;
1114 	}
1115 	rte_spinlock_lock(&hw->lock);
1116 	ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1117 	rte_spinlock_unlock(&hw->lock);
1118 	if (ret)
1119 		hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
1120 			 vlan_id, ret);
1121 
1122 	return ret;
1123 }
1124 
1125 static int
hns3vf_en_vlan_filter(struct hns3_hw * hw,bool enable)1126 hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
1127 {
1128 	uint8_t msg_data;
1129 	int ret;
1130 
1131 	if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD))
1132 		return 0;
1133 
1134 	msg_data = enable ? 1 : 0;
1135 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
1136 			HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data,
1137 			sizeof(msg_data), true, NULL, 0);
1138 	if (ret)
1139 		hns3_err(hw, "%s vlan filter failed, ret = %d.",
1140 				enable ? "enable" : "disable", ret);
1141 
1142 	return ret;
1143 }
1144 
1145 static int
hns3vf_en_hw_strip_rxvtag(struct hns3_hw * hw,bool enable)1146 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
1147 {
1148 	uint8_t msg_data;
1149 	int ret;
1150 
1151 	msg_data = enable ? 1 : 0;
1152 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
1153 				&msg_data, sizeof(msg_data), false, NULL, 0);
1154 	if (ret)
1155 		hns3_err(hw, "vf %s strip failed, ret = %d.",
1156 				enable ? "enable" : "disable", ret);
1157 
1158 	return ret;
1159 }
1160 
1161 static int
hns3vf_vlan_offload_set(struct rte_eth_dev * dev,int mask)1162 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1163 {
1164 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1165 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1166 	unsigned int tmp_mask;
1167 	int ret = 0;
1168 
1169 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1170 		hns3_err(hw, "vf set vlan offload failed during resetting, "
1171 			     "mask = 0x%x", mask);
1172 		return -EIO;
1173 	}
1174 
1175 	tmp_mask = (unsigned int)mask;
1176 
1177 	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
1178 		rte_spinlock_lock(&hw->lock);
1179 		/* Enable or disable VLAN filter */
1180 		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1181 			ret = hns3vf_en_vlan_filter(hw, true);
1182 		else
1183 			ret = hns3vf_en_vlan_filter(hw, false);
1184 		rte_spinlock_unlock(&hw->lock);
1185 		if (ret)
1186 			return ret;
1187 	}
1188 
1189 	/* Vlan stripping setting */
1190 	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
1191 		rte_spinlock_lock(&hw->lock);
1192 		/* Enable or disable VLAN stripping */
1193 		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1194 			ret = hns3vf_en_hw_strip_rxvtag(hw, true);
1195 		else
1196 			ret = hns3vf_en_hw_strip_rxvtag(hw, false);
1197 		rte_spinlock_unlock(&hw->lock);
1198 	}
1199 
1200 	return ret;
1201 }
1202 
1203 static int
hns3vf_handle_all_vlan_table(struct hns3_adapter * hns,int on)1204 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
1205 {
1206 	struct rte_vlan_filter_conf *vfc;
1207 	struct hns3_hw *hw = &hns->hw;
1208 	uint16_t vlan_id;
1209 	uint64_t vbit;
1210 	uint64_t ids;
1211 	int ret = 0;
1212 	uint32_t i;
1213 
1214 	vfc = &hw->data->vlan_filter_conf;
1215 	for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1216 		if (vfc->ids[i] == 0)
1217 			continue;
1218 		ids = vfc->ids[i];
1219 		while (ids) {
1220 			/*
1221 			 * 64 means the num bits of ids, one bit corresponds to
1222 			 * one vlan id
1223 			 */
1224 			vlan_id = 64 * i;
1225 			/* count trailing zeroes */
1226 			vbit = ~ids & (ids - 1);
1227 			/* clear least significant bit set */
1228 			ids ^= (ids ^ (ids - 1)) ^ vbit;
1229 			for (; vbit;) {
1230 				vbit >>= 1;
1231 				vlan_id++;
1232 			}
1233 			ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1234 			if (ret) {
1235 				hns3_err(hw,
1236 					 "VF handle vlan table failed, ret =%d, on = %d",
1237 					 ret, on);
1238 				return ret;
1239 			}
1240 		}
1241 	}
1242 
1243 	return ret;
1244 }
1245 
1246 static int
hns3vf_remove_all_vlan_table(struct hns3_adapter * hns)1247 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1248 {
1249 	return hns3vf_handle_all_vlan_table(hns, 0);
1250 }
1251 
1252 static int
hns3vf_restore_vlan_conf(struct hns3_adapter * hns)1253 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1254 {
1255 	struct hns3_hw *hw = &hns->hw;
1256 	struct rte_eth_conf *dev_conf;
1257 	bool en;
1258 	int ret;
1259 
1260 	dev_conf = &hw->data->dev_conf;
1261 	en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
1262 								   : false;
1263 	ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1264 	if (ret)
1265 		hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1266 			 ret);
1267 	return ret;
1268 }
1269 
1270 static int
hns3vf_dev_configure_vlan(struct rte_eth_dev * dev)1271 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1272 {
1273 	struct hns3_adapter *hns = dev->data->dev_private;
1274 	struct rte_eth_dev_data *data = dev->data;
1275 	struct hns3_hw *hw = &hns->hw;
1276 	int ret;
1277 
1278 	if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1279 	    data->dev_conf.txmode.hw_vlan_reject_untagged ||
1280 	    data->dev_conf.txmode.hw_vlan_insert_pvid) {
1281 		hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1282 			      "or hw_vlan_insert_pvid is not support!");
1283 	}
1284 
1285 	/* Apply vlan offload setting */
1286 	ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
1287 					RTE_ETH_VLAN_FILTER_MASK);
1288 	if (ret)
1289 		hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
1290 
1291 	return ret;
1292 }
1293 
1294 static int
hns3vf_set_alive(struct hns3_hw * hw,bool alive)1295 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1296 {
1297 	uint8_t msg_data;
1298 
1299 	msg_data = alive ? 1 : 0;
1300 	return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
1301 				 sizeof(msg_data), false, NULL, 0);
1302 }
1303 
1304 static void
hns3vf_keep_alive_handler(void * param)1305 hns3vf_keep_alive_handler(void *param)
1306 {
1307 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1308 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1309 	struct hns3_hw *hw = &hns->hw;
1310 	int ret;
1311 
1312 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
1313 				false, NULL, 0);
1314 	if (ret)
1315 		hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1316 			 ret);
1317 
1318 	rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1319 			  eth_dev);
1320 }
1321 
1322 static void
hns3vf_service_handler(void * param)1323 hns3vf_service_handler(void *param)
1324 {
1325 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1326 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1327 	struct hns3_hw *hw = &hns->hw;
1328 
1329 	/*
1330 	 * The query link status and reset processing are executed in the
1331 	 * interrupt thread. When the IMP reset occurs, IMP will not respond,
1332 	 * and the query operation will timeout after 30ms. In the case of
1333 	 * multiple PF/VFs, each query failure timeout causes the IMP reset
1334 	 * interrupt to fail to respond within 100ms.
1335 	 * Before querying the link status, check whether there is a reset
1336 	 * pending, and if so, abandon the query.
1337 	 */
1338 	if (!hns3vf_is_reset_pending(hns)) {
1339 		hns3vf_request_link_info(hw);
1340 		hns3_update_hw_stats(hw);
1341 	} else {
1342 		hns3_warn(hw, "Cancel the query when reset is pending");
1343 	}
1344 
1345 	rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1346 			  eth_dev);
1347 }
1348 
1349 static void
hns3vf_start_poll_job(struct rte_eth_dev * dev)1350 hns3vf_start_poll_job(struct rte_eth_dev *dev)
1351 {
1352 #define HNS3_REQUEST_LINK_INFO_REMAINS_CNT	3
1353 
1354 	struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1355 
1356 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
1357 		vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
1358 
1359 	__atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
1360 
1361 	hns3vf_service_handler(dev);
1362 }
1363 
1364 static void
hns3vf_stop_poll_job(struct rte_eth_dev * dev)1365 hns3vf_stop_poll_job(struct rte_eth_dev *dev)
1366 {
1367 	struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1368 
1369 	rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1370 
1371 	__atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
1372 }
1373 
1374 static int
hns3_query_vf_resource(struct hns3_hw * hw)1375 hns3_query_vf_resource(struct hns3_hw *hw)
1376 {
1377 	struct hns3_vf_res_cmd *req;
1378 	struct hns3_cmd_desc desc;
1379 	uint16_t num_msi;
1380 	int ret;
1381 
1382 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
1383 	ret = hns3_cmd_send(hw, &desc, 1);
1384 	if (ret) {
1385 		hns3_err(hw, "query vf resource failed, ret = %d", ret);
1386 		return ret;
1387 	}
1388 
1389 	req = (struct hns3_vf_res_cmd *)desc.data;
1390 	num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
1391 				 HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
1392 	if (num_msi < HNS3_MIN_VECTOR_NUM) {
1393 		hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
1394 			 num_msi, HNS3_MIN_VECTOR_NUM);
1395 		return -EINVAL;
1396 	}
1397 
1398 	hw->num_msi = num_msi;
1399 
1400 	return 0;
1401 }
1402 
1403 static int
hns3vf_init_hardware(struct hns3_adapter * hns)1404 hns3vf_init_hardware(struct hns3_adapter *hns)
1405 {
1406 	struct hns3_hw *hw = &hns->hw;
1407 	uint16_t mtu = hw->data->mtu;
1408 	int ret;
1409 
1410 	ret = hns3vf_set_promisc_mode(hw, true, false, false);
1411 	if (ret)
1412 		return ret;
1413 
1414 	ret = hns3vf_config_mtu(hw, mtu);
1415 	if (ret)
1416 		goto err_init_hardware;
1417 
1418 	ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1419 	if (ret) {
1420 		PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1421 		goto err_init_hardware;
1422 	}
1423 
1424 	ret = hns3_config_gro(hw, false);
1425 	if (ret) {
1426 		PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1427 		goto err_init_hardware;
1428 	}
1429 
1430 	/*
1431 	 * In the initialization clearing the all hardware mapping relationship
1432 	 * configurations between queues and interrupt vectors is needed, so
1433 	 * some error caused by the residual configurations, such as the
1434 	 * unexpected interrupt, can be avoid.
1435 	 */
1436 	ret = hns3_init_ring_with_vector(hw);
1437 	if (ret) {
1438 		PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
1439 		goto err_init_hardware;
1440 	}
1441 
1442 	return 0;
1443 
1444 err_init_hardware:
1445 	(void)hns3vf_set_promisc_mode(hw, false, false, false);
1446 	return ret;
1447 }
1448 
1449 static int
hns3vf_clear_vport_list(struct hns3_hw * hw)1450 hns3vf_clear_vport_list(struct hns3_hw *hw)
1451 {
1452 	return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
1453 				 HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
1454 				 NULL, 0);
1455 }
1456 
1457 static int
hns3vf_init_vf(struct rte_eth_dev * eth_dev)1458 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1459 {
1460 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1461 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1462 	struct hns3_hw *hw = &hns->hw;
1463 	int ret;
1464 
1465 	PMD_INIT_FUNC_TRACE();
1466 
1467 	/* Get hardware io base address from pcie BAR2 IO space */
1468 	hw->io_base = pci_dev->mem_resource[2].addr;
1469 
1470 	/* Firmware command queue initialize */
1471 	ret = hns3_cmd_init_queue(hw);
1472 	if (ret) {
1473 		PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1474 		goto err_cmd_init_queue;
1475 	}
1476 
1477 	/* Firmware command initialize */
1478 	ret = hns3_cmd_init(hw);
1479 	if (ret) {
1480 		PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1481 		goto err_cmd_init;
1482 	}
1483 
1484 	hns3_tx_push_init(eth_dev);
1485 
1486 	/* Get VF resource */
1487 	ret = hns3_query_vf_resource(hw);
1488 	if (ret)
1489 		goto err_cmd_init;
1490 
1491 	rte_spinlock_init(&hw->mbx_resp.lock);
1492 
1493 	hns3vf_clear_event_cause(hw, 0);
1494 
1495 	ret = rte_intr_callback_register(pci_dev->intr_handle,
1496 					 hns3vf_interrupt_handler, eth_dev);
1497 	if (ret) {
1498 		PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1499 		goto err_intr_callback_register;
1500 	}
1501 
1502 	/* Enable interrupt */
1503 	rte_intr_enable(pci_dev->intr_handle);
1504 	hns3vf_enable_irq0(hw);
1505 
1506 	/* Get configuration from PF */
1507 	ret = hns3vf_get_configuration(hw);
1508 	if (ret) {
1509 		PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1510 		goto err_get_config;
1511 	}
1512 
1513 	ret = hns3_stats_init(hw);
1514 	if (ret)
1515 		goto err_get_config;
1516 
1517 	ret = hns3_queue_to_tc_mapping(hw, hw->tqps_num, hw->tqps_num);
1518 	if (ret) {
1519 		PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
1520 		goto err_set_tc_queue;
1521 	}
1522 
1523 	ret = hns3vf_clear_vport_list(hw);
1524 	if (ret) {
1525 		PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1526 		goto err_set_tc_queue;
1527 	}
1528 
1529 	ret = hns3vf_init_hardware(hns);
1530 	if (ret)
1531 		goto err_set_tc_queue;
1532 
1533 	hns3_rss_set_default_args(hw);
1534 
1535 	ret = hns3vf_set_alive(hw, true);
1536 	if (ret) {
1537 		PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1538 		goto err_set_tc_queue;
1539 	}
1540 
1541 	return 0;
1542 
1543 err_set_tc_queue:
1544 	hns3_stats_uninit(hw);
1545 
1546 err_get_config:
1547 	hns3vf_disable_irq0(hw);
1548 	rte_intr_disable(pci_dev->intr_handle);
1549 	hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1550 			     eth_dev);
1551 err_intr_callback_register:
1552 err_cmd_init:
1553 	hns3_cmd_uninit(hw);
1554 	hns3_cmd_destroy_queue(hw);
1555 err_cmd_init_queue:
1556 	hw->io_base = NULL;
1557 
1558 	return ret;
1559 }
1560 
1561 static void
hns3vf_uninit_vf(struct rte_eth_dev * eth_dev)1562 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1563 {
1564 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1565 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1566 	struct hns3_hw *hw = &hns->hw;
1567 
1568 	PMD_INIT_FUNC_TRACE();
1569 
1570 	hns3_rss_uninit(hns);
1571 	(void)hns3_config_gro(hw, false);
1572 	(void)hns3vf_set_alive(hw, false);
1573 	(void)hns3vf_set_promisc_mode(hw, false, false, false);
1574 	hns3_flow_uninit(eth_dev);
1575 	hns3_stats_uninit(hw);
1576 	hns3vf_disable_irq0(hw);
1577 	rte_intr_disable(pci_dev->intr_handle);
1578 	hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1579 			     eth_dev);
1580 	hns3_cmd_uninit(hw);
1581 	hns3_cmd_destroy_queue(hw);
1582 	hw->io_base = NULL;
1583 }
1584 
1585 static int
hns3vf_do_stop(struct hns3_adapter * hns)1586 hns3vf_do_stop(struct hns3_adapter *hns)
1587 {
1588 	struct hns3_hw *hw = &hns->hw;
1589 	int ret;
1590 
1591 	hw->mac.link_status = RTE_ETH_LINK_DOWN;
1592 
1593 	/*
1594 	 * The "hns3vf_do_stop" function will also be called by .stop_service to
1595 	 * prepare reset. At the time of global or IMP reset, the command cannot
1596 	 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
1597 	 * accessed during the reset process. So the mbuf can not be released
1598 	 * during reset and is required to be released after the reset is
1599 	 * completed.
1600 	 */
1601 	if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
1602 		hns3_dev_release_mbufs(hns);
1603 
1604 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
1605 		hns3_configure_all_mac_addr(hns, true);
1606 		ret = hns3_reset_all_tqps(hns);
1607 		if (ret) {
1608 			hns3_err(hw, "failed to reset all queues ret = %d",
1609 				 ret);
1610 			return ret;
1611 		}
1612 	}
1613 	return 0;
1614 }
1615 
1616 static int
hns3vf_dev_stop(struct rte_eth_dev * dev)1617 hns3vf_dev_stop(struct rte_eth_dev *dev)
1618 {
1619 	struct hns3_adapter *hns = dev->data->dev_private;
1620 	struct hns3_hw *hw = &hns->hw;
1621 
1622 	PMD_INIT_FUNC_TRACE();
1623 	dev->data->dev_started = 0;
1624 
1625 	hw->adapter_state = HNS3_NIC_STOPPING;
1626 	hns3_set_rxtx_function(dev);
1627 	rte_wmb();
1628 	/* Disable datapath on secondary process. */
1629 	hns3_mp_req_stop_rxtx(dev);
1630 	/* Prevent crashes when queues are still in use. */
1631 	rte_delay_ms(hw->cfg_max_queues);
1632 
1633 	rte_spinlock_lock(&hw->lock);
1634 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1635 		hns3_stop_tqps(hw);
1636 		hns3vf_do_stop(hns);
1637 		hns3_unmap_rx_interrupt(dev);
1638 		hw->adapter_state = HNS3_NIC_CONFIGURED;
1639 	}
1640 	hns3_rx_scattered_reset(dev);
1641 	hns3vf_stop_poll_job(dev);
1642 	hns3_stop_report_lse(dev);
1643 	rte_spinlock_unlock(&hw->lock);
1644 
1645 	return 0;
1646 }
1647 
1648 static int
hns3vf_dev_close(struct rte_eth_dev * eth_dev)1649 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1650 {
1651 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1652 	struct hns3_hw *hw = &hns->hw;
1653 	int ret = 0;
1654 
1655 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1656 		hns3_mp_uninit(eth_dev);
1657 		return 0;
1658 	}
1659 
1660 	if (hw->adapter_state == HNS3_NIC_STARTED)
1661 		ret = hns3vf_dev_stop(eth_dev);
1662 
1663 	hw->adapter_state = HNS3_NIC_CLOSING;
1664 	hns3_reset_abort(hns);
1665 	hw->adapter_state = HNS3_NIC_CLOSED;
1666 	rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1667 	hns3_configure_all_mc_mac_addr(hns, true);
1668 	hns3vf_remove_all_vlan_table(hns);
1669 	hns3vf_uninit_vf(eth_dev);
1670 	hns3_free_all_queues(eth_dev);
1671 	rte_free(hw->reset.wait_data);
1672 	hns3_mp_uninit(eth_dev);
1673 	hns3_warn(hw, "Close port %u finished", hw->data->port_id);
1674 
1675 	return ret;
1676 }
1677 
1678 static int
hns3vf_dev_link_update(struct rte_eth_dev * eth_dev,__rte_unused int wait_to_complete)1679 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1680 		       __rte_unused int wait_to_complete)
1681 {
1682 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1683 	struct hns3_hw *hw = &hns->hw;
1684 	struct hns3_mac *mac = &hw->mac;
1685 	struct rte_eth_link new_link;
1686 
1687 	memset(&new_link, 0, sizeof(new_link));
1688 	switch (mac->link_speed) {
1689 	case RTE_ETH_SPEED_NUM_10M:
1690 	case RTE_ETH_SPEED_NUM_100M:
1691 	case RTE_ETH_SPEED_NUM_1G:
1692 	case RTE_ETH_SPEED_NUM_10G:
1693 	case RTE_ETH_SPEED_NUM_25G:
1694 	case RTE_ETH_SPEED_NUM_40G:
1695 	case RTE_ETH_SPEED_NUM_50G:
1696 	case RTE_ETH_SPEED_NUM_100G:
1697 	case RTE_ETH_SPEED_NUM_200G:
1698 		if (mac->link_status)
1699 			new_link.link_speed = mac->link_speed;
1700 		break;
1701 	default:
1702 		if (mac->link_status)
1703 			new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
1704 		break;
1705 	}
1706 
1707 	if (!mac->link_status)
1708 		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1709 
1710 	new_link.link_duplex = mac->link_duplex;
1711 	new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
1712 	new_link.link_autoneg =
1713 	    !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
1714 
1715 	return rte_eth_linkstatus_set(eth_dev, &new_link);
1716 }
1717 
1718 static int
hns3vf_do_start(struct hns3_adapter * hns,bool reset_queue)1719 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1720 {
1721 	struct hns3_hw *hw = &hns->hw;
1722 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
1723 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
1724 	int ret;
1725 
1726 	ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
1727 	if (ret)
1728 		return ret;
1729 
1730 	hns3_enable_rxd_adv_layout(hw);
1731 
1732 	ret = hns3_init_queues(hns, reset_queue);
1733 	if (ret)
1734 		hns3_err(hw, "failed to init queues, ret = %d.", ret);
1735 
1736 	return ret;
1737 }
1738 
1739 static void
hns3vf_restore_filter(struct rte_eth_dev * dev)1740 hns3vf_restore_filter(struct rte_eth_dev *dev)
1741 {
1742 	hns3_restore_rss_filter(dev);
1743 }
1744 
1745 static int
hns3vf_dev_start(struct rte_eth_dev * dev)1746 hns3vf_dev_start(struct rte_eth_dev *dev)
1747 {
1748 	struct hns3_adapter *hns = dev->data->dev_private;
1749 	struct hns3_hw *hw = &hns->hw;
1750 	int ret;
1751 
1752 	PMD_INIT_FUNC_TRACE();
1753 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1754 		return -EBUSY;
1755 
1756 	rte_spinlock_lock(&hw->lock);
1757 	hw->adapter_state = HNS3_NIC_STARTING;
1758 	ret = hns3vf_do_start(hns, true);
1759 	if (ret) {
1760 		hw->adapter_state = HNS3_NIC_CONFIGURED;
1761 		rte_spinlock_unlock(&hw->lock);
1762 		return ret;
1763 	}
1764 	ret = hns3_map_rx_interrupt(dev);
1765 	if (ret)
1766 		goto map_rx_inter_err;
1767 
1768 	/*
1769 	 * There are three register used to control the status of a TQP
1770 	 * (contains a pair of Tx queue and Rx queue) in the new version network
1771 	 * engine. One is used to control the enabling of Tx queue, the other is
1772 	 * used to control the enabling of Rx queue, and the last is the master
1773 	 * switch used to control the enabling of the tqp. The Tx register and
1774 	 * TQP register must be enabled at the same time to enable a Tx queue.
1775 	 * The same applies to the Rx queue. For the older network enginem, this
1776 	 * function only refresh the enabled flag, and it is used to update the
1777 	 * status of queue in the dpdk framework.
1778 	 */
1779 	ret = hns3_start_all_txqs(dev);
1780 	if (ret)
1781 		goto map_rx_inter_err;
1782 
1783 	ret = hns3_start_all_rxqs(dev);
1784 	if (ret)
1785 		goto start_all_rxqs_fail;
1786 
1787 	hw->adapter_state = HNS3_NIC_STARTED;
1788 	rte_spinlock_unlock(&hw->lock);
1789 
1790 	hns3_rx_scattered_calc(dev);
1791 	hns3_set_rxtx_function(dev);
1792 	hns3_mp_req_start_rxtx(dev);
1793 
1794 	hns3vf_restore_filter(dev);
1795 
1796 	/* Enable interrupt of all rx queues before enabling queues */
1797 	hns3_dev_all_rx_queue_intr_enable(hw, true);
1798 	hns3_start_tqps(hw);
1799 
1800 	if (dev->data->dev_conf.intr_conf.lsc != 0)
1801 		hns3vf_dev_link_update(dev, 0);
1802 	hns3vf_start_poll_job(dev);
1803 
1804 	return ret;
1805 
1806 start_all_rxqs_fail:
1807 	hns3_stop_all_txqs(dev);
1808 map_rx_inter_err:
1809 	(void)hns3vf_do_stop(hns);
1810 	hw->adapter_state = HNS3_NIC_CONFIGURED;
1811 	rte_spinlock_unlock(&hw->lock);
1812 
1813 	return ret;
1814 }
1815 
1816 static bool
is_vf_reset_done(struct hns3_hw * hw)1817 is_vf_reset_done(struct hns3_hw *hw)
1818 {
1819 #define HNS3_FUN_RST_ING_BITS \
1820 	(BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1821 	 BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1822 	 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1823 	 BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1824 
1825 	uint32_t val;
1826 
1827 	if (hw->reset.level == HNS3_VF_RESET) {
1828 		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1829 		if (val & HNS3_VF_RST_ING_BIT)
1830 			return false;
1831 	} else {
1832 		val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1833 		if (val & HNS3_FUN_RST_ING_BITS)
1834 			return false;
1835 	}
1836 	return true;
1837 }
1838 
1839 bool
hns3vf_is_reset_pending(struct hns3_adapter * hns)1840 hns3vf_is_reset_pending(struct hns3_adapter *hns)
1841 {
1842 	struct hns3_hw *hw = &hns->hw;
1843 	enum hns3_reset_level reset;
1844 
1845 	/*
1846 	 * According to the protocol of PCIe, FLR to a PF device resets the PF
1847 	 * state as well as the SR-IOV extended capability including VF Enable
1848 	 * which means that VFs no longer exist.
1849 	 *
1850 	 * HNS3_VF_FULL_RESET means PF device is in FLR reset. when PF device
1851 	 * is in FLR stage, the register state of VF device is not reliable,
1852 	 * so register states detection can not be carried out. In this case,
1853 	 * we just ignore the register states and return false to indicate that
1854 	 * there are no other reset states that need to be processed by driver.
1855 	 */
1856 	if (hw->reset.level == HNS3_VF_FULL_RESET)
1857 		return false;
1858 
1859 	/* Check the registers to confirm whether there is reset pending */
1860 	hns3vf_check_event_cause(hns, NULL);
1861 	reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
1862 	if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET &&
1863 	    hw->reset.level < reset) {
1864 		hns3_warn(hw, "High level reset %d is pending", reset);
1865 		return true;
1866 	}
1867 	return false;
1868 }
1869 
1870 static int
hns3vf_wait_hardware_ready(struct hns3_adapter * hns)1871 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1872 {
1873 #define HNS3_WAIT_PF_RESET_READY_TIME 5
1874 	struct hns3_hw *hw = &hns->hw;
1875 	struct hns3_wait_data *wait_data = hw->reset.wait_data;
1876 	struct timeval tv;
1877 
1878 	if (wait_data->result == HNS3_WAIT_SUCCESS) {
1879 		/*
1880 		 * After vf reset is ready, the PF may not have completed
1881 		 * the reset processing. The vf sending mbox to PF may fail
1882 		 * during the pf reset, so it is better to add extra delay.
1883 		 */
1884 		if (hw->reset.level == HNS3_VF_FUNC_RESET ||
1885 		    hw->reset.level == HNS3_FLR_RESET)
1886 			return 0;
1887 		/* Reset retry process, no need to add extra delay. */
1888 		if (hw->reset.attempts)
1889 			return 0;
1890 		if (wait_data->check_completion == NULL)
1891 			return 0;
1892 
1893 		wait_data->check_completion = NULL;
1894 		wait_data->interval = HNS3_WAIT_PF_RESET_READY_TIME *
1895 			MSEC_PER_SEC * USEC_PER_MSEC;
1896 		wait_data->count = 1;
1897 		wait_data->result = HNS3_WAIT_REQUEST;
1898 		rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
1899 				  wait_data);
1900 		hns3_warn(hw, "hardware is ready, delay %d sec for PF reset complete",
1901 				HNS3_WAIT_PF_RESET_READY_TIME);
1902 		return -EAGAIN;
1903 	} else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1904 		hns3_clock_gettime(&tv);
1905 		hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1906 			  tv.tv_sec, tv.tv_usec);
1907 		return -ETIME;
1908 	} else if (wait_data->result == HNS3_WAIT_REQUEST)
1909 		return -EAGAIN;
1910 
1911 	wait_data->hns = hns;
1912 	wait_data->check_completion = is_vf_reset_done;
1913 	wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1914 				HNS3VF_RESET_WAIT_MS + hns3_clock_gettime_ms();
1915 	wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1916 	wait_data->count = HNS3VF_RESET_WAIT_CNT;
1917 	wait_data->result = HNS3_WAIT_REQUEST;
1918 	rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1919 	return -EAGAIN;
1920 }
1921 
1922 static int
hns3vf_prepare_reset(struct hns3_adapter * hns)1923 hns3vf_prepare_reset(struct hns3_adapter *hns)
1924 {
1925 	struct hns3_hw *hw = &hns->hw;
1926 	int ret;
1927 
1928 	if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1929 		ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
1930 					0, true, NULL, 0);
1931 		if (ret)
1932 			return ret;
1933 	}
1934 	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
1935 
1936 	return 0;
1937 }
1938 
1939 static int
hns3vf_stop_service(struct hns3_adapter * hns)1940 hns3vf_stop_service(struct hns3_adapter *hns)
1941 {
1942 	struct hns3_hw *hw = &hns->hw;
1943 	struct rte_eth_dev *eth_dev;
1944 
1945 	eth_dev = &rte_eth_devices[hw->data->port_id];
1946 	if (hw->adapter_state == HNS3_NIC_STARTED) {
1947 		/*
1948 		 * Make sure call update link status before hns3vf_stop_poll_job
1949 		 * because update link status depend on polling job exist.
1950 		 */
1951 		hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
1952 					  hw->mac.link_duplex);
1953 		hns3vf_stop_poll_job(eth_dev);
1954 	}
1955 	hw->mac.link_status = RTE_ETH_LINK_DOWN;
1956 
1957 	hns3_set_rxtx_function(eth_dev);
1958 	rte_wmb();
1959 	/* Disable datapath on secondary process. */
1960 	hns3_mp_req_stop_rxtx(eth_dev);
1961 	rte_delay_ms(hw->cfg_max_queues);
1962 
1963 	rte_spinlock_lock(&hw->lock);
1964 	if (hw->adapter_state == HNS3_NIC_STARTED ||
1965 	    hw->adapter_state == HNS3_NIC_STOPPING) {
1966 		hns3_enable_all_queues(hw, false);
1967 		hns3vf_do_stop(hns);
1968 		hw->reset.mbuf_deferred_free = true;
1969 	} else
1970 		hw->reset.mbuf_deferred_free = false;
1971 
1972 	/*
1973 	 * It is cumbersome for hardware to pick-and-choose entries for deletion
1974 	 * from table space. Hence, for function reset software intervention is
1975 	 * required to delete the entries.
1976 	 */
1977 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
1978 		hns3_configure_all_mc_mac_addr(hns, true);
1979 	rte_spinlock_unlock(&hw->lock);
1980 
1981 	return 0;
1982 }
1983 
1984 static int
hns3vf_start_service(struct hns3_adapter * hns)1985 hns3vf_start_service(struct hns3_adapter *hns)
1986 {
1987 	struct hns3_hw *hw = &hns->hw;
1988 	struct rte_eth_dev *eth_dev;
1989 
1990 	eth_dev = &rte_eth_devices[hw->data->port_id];
1991 	hns3_set_rxtx_function(eth_dev);
1992 	hns3_mp_req_start_rxtx(eth_dev);
1993 	if (hw->adapter_state == HNS3_NIC_STARTED) {
1994 		hns3vf_start_poll_job(eth_dev);
1995 
1996 		/* Enable interrupt of all rx queues before enabling queues */
1997 		hns3_dev_all_rx_queue_intr_enable(hw, true);
1998 		/*
1999 		 * Enable state of each rxq and txq will be recovered after
2000 		 * reset, so we need to restore them before enable all tqps;
2001 		 */
2002 		hns3_restore_tqp_enable_state(hw);
2003 		/*
2004 		 * When finished the initialization, enable queues to receive
2005 		 * and transmit packets.
2006 		 */
2007 		hns3_enable_all_queues(hw, true);
2008 	}
2009 
2010 	return 0;
2011 }
2012 
2013 static int
hns3vf_check_default_mac_change(struct hns3_hw * hw)2014 hns3vf_check_default_mac_change(struct hns3_hw *hw)
2015 {
2016 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
2017 	struct rte_ether_addr *hw_mac;
2018 	int ret;
2019 
2020 	/*
2021 	 * The hns3 PF ethdev driver in kernel support setting VF MAC address
2022 	 * on the host by "ip link set ..." command. If the hns3 PF kernel
2023 	 * ethdev driver sets the MAC address for VF device after the
2024 	 * initialization of the related VF device, the PF driver will notify
2025 	 * VF driver to reset VF device to make the new MAC address effective
2026 	 * immediately. The hns3 VF PMD should check whether the MAC
2027 	 * address has been changed by the PF kernel ethdev driver, if changed
2028 	 * VF driver should configure hardware using the new MAC address in the
2029 	 * recovering hardware configuration stage of the reset process.
2030 	 */
2031 	ret = hns3vf_get_host_mac_addr(hw);
2032 	if (ret)
2033 		return ret;
2034 
2035 	hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
2036 	ret = rte_is_zero_ether_addr(hw_mac);
2037 	if (ret) {
2038 		rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
2039 	} else {
2040 		ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
2041 		if (!ret) {
2042 			rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
2043 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
2044 					      &hw->data->mac_addrs[0]);
2045 			hns3_warn(hw, "Default MAC address has been changed to:"
2046 				  " %s by the host PF kernel ethdev driver",
2047 				  mac_str);
2048 		}
2049 	}
2050 
2051 	return 0;
2052 }
2053 
2054 static int
hns3vf_restore_conf(struct hns3_adapter * hns)2055 hns3vf_restore_conf(struct hns3_adapter *hns)
2056 {
2057 	struct hns3_hw *hw = &hns->hw;
2058 	int ret;
2059 
2060 	ret = hns3vf_check_default_mac_change(hw);
2061 	if (ret)
2062 		return ret;
2063 
2064 	ret = hns3_configure_all_mac_addr(hns, false);
2065 	if (ret)
2066 		return ret;
2067 
2068 	ret = hns3_configure_all_mc_mac_addr(hns, false);
2069 	if (ret)
2070 		goto err_mc_mac;
2071 
2072 	ret = hns3vf_restore_promisc(hns);
2073 	if (ret)
2074 		goto err_vlan_table;
2075 
2076 	ret = hns3vf_restore_vlan_conf(hns);
2077 	if (ret)
2078 		goto err_vlan_table;
2079 
2080 	ret = hns3vf_get_port_base_vlan_filter_state(hw);
2081 	if (ret)
2082 		goto err_vlan_table;
2083 
2084 	ret = hns3_restore_rx_interrupt(hw);
2085 	if (ret)
2086 		goto err_vlan_table;
2087 
2088 	ret = hns3_restore_gro_conf(hw);
2089 	if (ret)
2090 		goto err_vlan_table;
2091 
2092 	if (hw->adapter_state == HNS3_NIC_STARTED) {
2093 		ret = hns3vf_do_start(hns, false);
2094 		if (ret)
2095 			goto err_vlan_table;
2096 		hns3_info(hw, "hns3vf dev restart successful!");
2097 	} else if (hw->adapter_state == HNS3_NIC_STOPPING)
2098 		hw->adapter_state = HNS3_NIC_CONFIGURED;
2099 
2100 	ret = hns3vf_set_alive(hw, true);
2101 	if (ret) {
2102 		hns3_err(hw, "failed to VF send alive to PF: %d", ret);
2103 		goto err_vlan_table;
2104 	}
2105 
2106 	return 0;
2107 
2108 err_vlan_table:
2109 	hns3_configure_all_mc_mac_addr(hns, true);
2110 err_mc_mac:
2111 	hns3_configure_all_mac_addr(hns, true);
2112 	return ret;
2113 }
2114 
2115 static enum hns3_reset_level
hns3vf_get_reset_level(struct hns3_hw * hw,uint64_t * levels)2116 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
2117 {
2118 	enum hns3_reset_level reset_level;
2119 
2120 	/* return the highest priority reset level amongst all */
2121 	if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
2122 		reset_level = HNS3_VF_RESET;
2123 	else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
2124 		reset_level = HNS3_VF_FULL_RESET;
2125 	else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
2126 		reset_level = HNS3_VF_PF_FUNC_RESET;
2127 	else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
2128 		reset_level = HNS3_VF_FUNC_RESET;
2129 	else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
2130 		reset_level = HNS3_FLR_RESET;
2131 	else
2132 		reset_level = HNS3_NONE_RESET;
2133 
2134 	if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
2135 		return HNS3_NONE_RESET;
2136 
2137 	return reset_level;
2138 }
2139 
2140 static void
hns3vf_reset_service(void * param)2141 hns3vf_reset_service(void *param)
2142 {
2143 	struct hns3_adapter *hns = (struct hns3_adapter *)param;
2144 	struct hns3_hw *hw = &hns->hw;
2145 	enum hns3_reset_level reset_level;
2146 	struct timeval tv_delta;
2147 	struct timeval tv_start;
2148 	struct timeval tv;
2149 	uint64_t msec;
2150 
2151 	/*
2152 	 * The interrupt is not triggered within the delay time.
2153 	 * The interrupt may have been lost. It is necessary to handle
2154 	 * the interrupt to recover from the error.
2155 	 */
2156 	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2157 			    SCHEDULE_DEFERRED) {
2158 		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
2159 				 __ATOMIC_RELAXED);
2160 		hns3_err(hw, "Handling interrupts in delayed tasks");
2161 		hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
2162 		reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2163 		if (reset_level == HNS3_NONE_RESET) {
2164 			hns3_err(hw, "No reset level is set, try global reset");
2165 			hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
2166 		}
2167 	}
2168 	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
2169 
2170 	/*
2171 	 * Hardware reset has been notified, we now have to poll & check if
2172 	 * hardware has actually completed the reset sequence.
2173 	 */
2174 	reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2175 	if (reset_level != HNS3_NONE_RESET) {
2176 		hns3_clock_gettime(&tv_start);
2177 		hns3_reset_process(hns, reset_level);
2178 		hns3_clock_gettime(&tv);
2179 		timersub(&tv, &tv_start, &tv_delta);
2180 		msec = hns3_clock_calctime_ms(&tv_delta);
2181 		if (msec > HNS3_RESET_PROCESS_MS)
2182 			hns3_err(hw, "%d handle long time delta %" PRIu64
2183 				 " ms time=%ld.%.6ld",
2184 				 hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
2185 	}
2186 }
2187 
2188 static int
hns3vf_reinit_dev(struct hns3_adapter * hns)2189 hns3vf_reinit_dev(struct hns3_adapter *hns)
2190 {
2191 	struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
2192 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2193 	struct hns3_hw *hw = &hns->hw;
2194 	int ret;
2195 
2196 	if (hw->reset.level == HNS3_VF_FULL_RESET) {
2197 		rte_intr_disable(pci_dev->intr_handle);
2198 		ret = hns3vf_set_bus_master(pci_dev, true);
2199 		if (ret < 0) {
2200 			hns3_err(hw, "failed to set pci bus, ret = %d", ret);
2201 			return ret;
2202 		}
2203 	}
2204 
2205 	/* Firmware command initialize */
2206 	ret = hns3_cmd_init(hw);
2207 	if (ret) {
2208 		hns3_err(hw, "Failed to init cmd: %d", ret);
2209 		return ret;
2210 	}
2211 
2212 	if (hw->reset.level == HNS3_VF_FULL_RESET) {
2213 		/*
2214 		 * UIO enables msix by writing the pcie configuration space
2215 		 * vfio_pci enables msix in rte_intr_enable.
2216 		 */
2217 		if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO ||
2218 		    pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) {
2219 			if (hns3vf_enable_msix(pci_dev, true))
2220 				hns3_err(hw, "Failed to enable msix");
2221 		}
2222 
2223 		rte_intr_enable(pci_dev->intr_handle);
2224 	}
2225 
2226 	ret = hns3_reset_all_tqps(hns);
2227 	if (ret) {
2228 		hns3_err(hw, "Failed to reset all queues: %d", ret);
2229 		return ret;
2230 	}
2231 
2232 	ret = hns3vf_init_hardware(hns);
2233 	if (ret) {
2234 		hns3_err(hw, "Failed to init hardware: %d", ret);
2235 		return ret;
2236 	}
2237 
2238 	return 0;
2239 }
2240 
2241 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
2242 	.dev_configure      = hns3vf_dev_configure,
2243 	.dev_start          = hns3vf_dev_start,
2244 	.dev_stop           = hns3vf_dev_stop,
2245 	.dev_close          = hns3vf_dev_close,
2246 	.mtu_set            = hns3vf_dev_mtu_set,
2247 	.promiscuous_enable = hns3vf_dev_promiscuous_enable,
2248 	.promiscuous_disable = hns3vf_dev_promiscuous_disable,
2249 	.allmulticast_enable = hns3vf_dev_allmulticast_enable,
2250 	.allmulticast_disable = hns3vf_dev_allmulticast_disable,
2251 	.stats_get          = hns3_stats_get,
2252 	.stats_reset        = hns3_stats_reset,
2253 	.xstats_get         = hns3_dev_xstats_get,
2254 	.xstats_get_names   = hns3_dev_xstats_get_names,
2255 	.xstats_reset       = hns3_dev_xstats_reset,
2256 	.xstats_get_by_id   = hns3_dev_xstats_get_by_id,
2257 	.xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
2258 	.dev_infos_get      = hns3_dev_infos_get,
2259 	.fw_version_get     = hns3_fw_version_get,
2260 	.rx_queue_setup     = hns3_rx_queue_setup,
2261 	.tx_queue_setup     = hns3_tx_queue_setup,
2262 	.rx_queue_release   = hns3_dev_rx_queue_release,
2263 	.tx_queue_release   = hns3_dev_tx_queue_release,
2264 	.rx_queue_start     = hns3_dev_rx_queue_start,
2265 	.rx_queue_stop      = hns3_dev_rx_queue_stop,
2266 	.tx_queue_start     = hns3_dev_tx_queue_start,
2267 	.tx_queue_stop      = hns3_dev_tx_queue_stop,
2268 	.rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
2269 	.rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
2270 	.rxq_info_get       = hns3_rxq_info_get,
2271 	.txq_info_get       = hns3_txq_info_get,
2272 	.rx_burst_mode_get  = hns3_rx_burst_mode_get,
2273 	.tx_burst_mode_get  = hns3_tx_burst_mode_get,
2274 	.mac_addr_add       = hns3_add_mac_addr,
2275 	.mac_addr_remove    = hns3_remove_mac_addr,
2276 	.mac_addr_set       = hns3vf_set_default_mac_addr,
2277 	.set_mc_addr_list   = hns3_set_mc_mac_addr_list,
2278 	.link_update        = hns3vf_dev_link_update,
2279 	.rss_hash_update    = hns3_dev_rss_hash_update,
2280 	.rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
2281 	.reta_update        = hns3_dev_rss_reta_update,
2282 	.reta_query         = hns3_dev_rss_reta_query,
2283 	.flow_ops_get       = hns3_dev_flow_ops_get,
2284 	.vlan_filter_set    = hns3vf_vlan_filter_set,
2285 	.vlan_offload_set   = hns3vf_vlan_offload_set,
2286 	.get_reg            = hns3_get_regs,
2287 	.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
2288 	.tx_done_cleanup    = hns3_tx_done_cleanup,
2289 	.eth_dev_priv_dump  = hns3_eth_dev_priv_dump,
2290 };
2291 
2292 static const struct hns3_reset_ops hns3vf_reset_ops = {
2293 	.reset_service       = hns3vf_reset_service,
2294 	.stop_service        = hns3vf_stop_service,
2295 	.prepare_reset       = hns3vf_prepare_reset,
2296 	.wait_hardware_ready = hns3vf_wait_hardware_ready,
2297 	.reinit_dev          = hns3vf_reinit_dev,
2298 	.restore_conf        = hns3vf_restore_conf,
2299 	.start_service       = hns3vf_start_service,
2300 };
2301 
2302 static void
hns3vf_init_hw_ops(struct hns3_hw * hw)2303 hns3vf_init_hw_ops(struct hns3_hw *hw)
2304 {
2305 	hw->ops.add_mc_mac_addr = hns3vf_add_mc_mac_addr;
2306 	hw->ops.del_mc_mac_addr = hns3vf_remove_mc_mac_addr;
2307 	hw->ops.add_uc_mac_addr = hns3vf_add_uc_mac_addr;
2308 	hw->ops.del_uc_mac_addr = hns3vf_remove_uc_mac_addr;
2309 	hw->ops.bind_ring_with_vector = hns3vf_bind_ring_with_vector;
2310 }
2311 
2312 static int
hns3vf_dev_init(struct rte_eth_dev * eth_dev)2313 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
2314 {
2315 	struct hns3_adapter *hns = eth_dev->data->dev_private;
2316 	struct hns3_hw *hw = &hns->hw;
2317 	int ret;
2318 
2319 	PMD_INIT_FUNC_TRACE();
2320 
2321 	hns3_flow_init(eth_dev);
2322 
2323 	hns3_set_rxtx_function(eth_dev);
2324 	eth_dev->dev_ops = &hns3vf_eth_dev_ops;
2325 	eth_dev->rx_queue_count = hns3_rx_queue_count;
2326 	ret = hns3_mp_init(eth_dev);
2327 	if (ret)
2328 		goto err_mp_init;
2329 
2330 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2331 		hns3_tx_push_init(eth_dev);
2332 		return 0;
2333 	}
2334 
2335 	hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2336 	hns->is_vf = true;
2337 	hw->data = eth_dev->data;
2338 	hns3_parse_devargs(eth_dev);
2339 
2340 	ret = hns3_reset_init(hw);
2341 	if (ret)
2342 		goto err_init_reset;
2343 	hw->reset.ops = &hns3vf_reset_ops;
2344 
2345 	hns3vf_init_hw_ops(hw);
2346 	ret = hns3vf_init_vf(eth_dev);
2347 	if (ret) {
2348 		PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2349 		goto err_init_vf;
2350 	}
2351 
2352 	ret = hns3_init_mac_addrs(eth_dev);
2353 	if (ret != 0)
2354 		goto err_init_mac_addrs;
2355 
2356 	hw->adapter_state = HNS3_NIC_INITIALIZED;
2357 
2358 	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2359 			    SCHEDULE_PENDING) {
2360 		hns3_err(hw, "Reschedule reset service after dev_init");
2361 		hns3_schedule_reset(hns);
2362 	} else {
2363 		/* IMP will wait ready flag before reset */
2364 		hns3_notify_reset_ready(hw, false);
2365 	}
2366 	rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
2367 			  eth_dev);
2368 	return 0;
2369 
2370 err_init_mac_addrs:
2371 	hns3vf_uninit_vf(eth_dev);
2372 
2373 err_init_vf:
2374 	rte_free(hw->reset.wait_data);
2375 
2376 err_init_reset:
2377 	hns3_mp_uninit(eth_dev);
2378 
2379 err_mp_init:
2380 	eth_dev->dev_ops = NULL;
2381 	eth_dev->rx_pkt_burst = NULL;
2382 	eth_dev->rx_descriptor_status = NULL;
2383 	eth_dev->tx_pkt_burst = NULL;
2384 	eth_dev->tx_pkt_prepare = NULL;
2385 	eth_dev->tx_descriptor_status = NULL;
2386 
2387 	return ret;
2388 }
2389 
2390 static int
hns3vf_dev_uninit(struct rte_eth_dev * eth_dev)2391 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
2392 {
2393 	struct hns3_adapter *hns = eth_dev->data->dev_private;
2394 	struct hns3_hw *hw = &hns->hw;
2395 
2396 	PMD_INIT_FUNC_TRACE();
2397 
2398 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2399 		hns3_mp_uninit(eth_dev);
2400 		return 0;
2401 	}
2402 
2403 	if (hw->adapter_state < HNS3_NIC_CLOSING)
2404 		hns3vf_dev_close(eth_dev);
2405 
2406 	hw->adapter_state = HNS3_NIC_REMOVED;
2407 	return 0;
2408 }
2409 
2410 static int
eth_hns3vf_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)2411 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2412 		     struct rte_pci_device *pci_dev)
2413 {
2414 	return rte_eth_dev_pci_generic_probe(pci_dev,
2415 					     sizeof(struct hns3_adapter),
2416 					     hns3vf_dev_init);
2417 }
2418 
2419 static int
eth_hns3vf_pci_remove(struct rte_pci_device * pci_dev)2420 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
2421 {
2422 	return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
2423 }
2424 
2425 static const struct rte_pci_id pci_id_hns3vf_map[] = {
2426 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
2427 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
2428 	{ .vendor_id = 0, }, /* sentinel */
2429 };
2430 
2431 static struct rte_pci_driver rte_hns3vf_pmd = {
2432 	.id_table = pci_id_hns3vf_map,
2433 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2434 	.probe = eth_hns3vf_pci_probe,
2435 	.remove = eth_hns3vf_pci_remove,
2436 };
2437 
2438 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
2439 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
2440 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");
2441 RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf,
2442 		HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
2443 		HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
2444 		HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
2445 		HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16_t> ");
2446