1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606  * Copyright(c) 2016 Cavium, Inc
3a9643ea8Slogwang  */
4a9643ea8Slogwang 
5a9643ea8Slogwang #include <assert.h>
6a9643ea8Slogwang #include <stdio.h>
7a9643ea8Slogwang #include <stdbool.h>
8a9643ea8Slogwang #include <errno.h>
9a9643ea8Slogwang #include <stdint.h>
10a9643ea8Slogwang #include <string.h>
11a9643ea8Slogwang #include <unistd.h>
12a9643ea8Slogwang #include <stdarg.h>
13a9643ea8Slogwang #include <inttypes.h>
14a9643ea8Slogwang #include <netinet/in.h>
15a9643ea8Slogwang #include <sys/queue.h>
16a9643ea8Slogwang 
17a9643ea8Slogwang #include <rte_alarm.h>
18a9643ea8Slogwang #include <rte_branch_prediction.h>
19a9643ea8Slogwang #include <rte_byteorder.h>
20a9643ea8Slogwang #include <rte_common.h>
21a9643ea8Slogwang #include <rte_cycles.h>
22a9643ea8Slogwang #include <rte_debug.h>
23a9643ea8Slogwang #include <rte_dev.h>
24a9643ea8Slogwang #include <rte_eal.h>
25a9643ea8Slogwang #include <rte_ether.h>
26d30ea906Sjfb8856606 #include <rte_ethdev_driver.h>
272bfe3f2eSlogwang #include <rte_ethdev_pci.h>
28a9643ea8Slogwang #include <rte_interrupts.h>
29a9643ea8Slogwang #include <rte_log.h>
30a9643ea8Slogwang #include <rte_memory.h>
31a9643ea8Slogwang #include <rte_memzone.h>
32a9643ea8Slogwang #include <rte_malloc.h>
33a9643ea8Slogwang #include <rte_random.h>
34a9643ea8Slogwang #include <rte_pci.h>
352bfe3f2eSlogwang #include <rte_bus_pci.h>
36a9643ea8Slogwang #include <rte_tailq.h>
37d30ea906Sjfb8856606 #include <rte_devargs.h>
38d30ea906Sjfb8856606 #include <rte_kvargs.h>
39a9643ea8Slogwang 
40a9643ea8Slogwang #include "base/nicvf_plat.h"
41a9643ea8Slogwang 
42a9643ea8Slogwang #include "nicvf_ethdev.h"
43a9643ea8Slogwang #include "nicvf_rxtx.h"
442bfe3f2eSlogwang #include "nicvf_svf.h"
45a9643ea8Slogwang #include "nicvf_logs.h"
46a9643ea8Slogwang 
47*2d9fd380Sjfb8856606 static int nicvf_dev_stop(struct rte_eth_dev *dev);
482bfe3f2eSlogwang static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
492bfe3f2eSlogwang static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
502bfe3f2eSlogwang 			  bool cleanup);
51d30ea906Sjfb8856606 static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
52d30ea906Sjfb8856606 static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
53a9643ea8Slogwang 
54*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(nicvf_logtype_mbox, pmd.net.thunderx.mbox, NOTICE);
55*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(nicvf_logtype_init, pmd.net.thunderx.init, NOTICE);
56*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(nicvf_logtype_driver, pmd.net.thunderx.driver, NOTICE);
575af785ecSfengbojiang(姜凤波) 
58d30ea906Sjfb8856606 static void
nicvf_link_status_update(struct nicvf * nic,struct rte_eth_link * link)59d30ea906Sjfb8856606 nicvf_link_status_update(struct nicvf *nic,
60d30ea906Sjfb8856606 			 struct rte_eth_link *link)
615af785ecSfengbojiang(姜凤波) {
62d30ea906Sjfb8856606 	memset(link, 0, sizeof(*link));
63d30ea906Sjfb8856606 
64d30ea906Sjfb8856606 	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
65d30ea906Sjfb8856606 
66a9643ea8Slogwang 	if (nic->duplex == NICVF_HALF_DUPLEX)
67a9643ea8Slogwang 		link->link_duplex = ETH_LINK_HALF_DUPLEX;
68a9643ea8Slogwang 	else if (nic->duplex == NICVF_FULL_DUPLEX)
69a9643ea8Slogwang 		link->link_duplex = ETH_LINK_FULL_DUPLEX;
70a9643ea8Slogwang 	link->link_speed = nic->speed;
712bfe3f2eSlogwang 	link->link_autoneg = ETH_LINK_AUTONEG;
72a9643ea8Slogwang }
73a9643ea8Slogwang 
74a9643ea8Slogwang static void
nicvf_interrupt(void * arg)75a9643ea8Slogwang nicvf_interrupt(void *arg)
76a9643ea8Slogwang {
772bfe3f2eSlogwang 	struct rte_eth_dev *dev = arg;
782bfe3f2eSlogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
79d30ea906Sjfb8856606 	struct rte_eth_link link;
80a9643ea8Slogwang 
81a9643ea8Slogwang 	if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
82d30ea906Sjfb8856606 		if (dev->data->dev_conf.intr_conf.lsc) {
83d30ea906Sjfb8856606 			nicvf_link_status_update(nic, &link);
84d30ea906Sjfb8856606 			rte_eth_linkstatus_set(dev, &link);
85d30ea906Sjfb8856606 
86*2d9fd380Sjfb8856606 			rte_eth_dev_callback_process(dev,
87d30ea906Sjfb8856606 						     RTE_ETH_EVENT_INTR_LSC,
88d30ea906Sjfb8856606 						     NULL);
89d30ea906Sjfb8856606 		}
90a9643ea8Slogwang 	}
91a9643ea8Slogwang 
92a9643ea8Slogwang 	rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
932bfe3f2eSlogwang 				nicvf_interrupt, dev);
942bfe3f2eSlogwang }
952bfe3f2eSlogwang 
962bfe3f2eSlogwang static void
nicvf_vf_interrupt(void * arg)972bfe3f2eSlogwang nicvf_vf_interrupt(void *arg)
982bfe3f2eSlogwang {
992bfe3f2eSlogwang 	struct nicvf *nic = arg;
1002bfe3f2eSlogwang 
1012bfe3f2eSlogwang 	nicvf_reg_poll_interrupts(nic);
1022bfe3f2eSlogwang 
1032bfe3f2eSlogwang 	rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
1042bfe3f2eSlogwang 				nicvf_vf_interrupt, nic);
105a9643ea8Slogwang }
106a9643ea8Slogwang 
107a9643ea8Slogwang static int
nicvf_periodic_alarm_start(void (fn)(void *),void * arg)1082bfe3f2eSlogwang nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
109a9643ea8Slogwang {
1102bfe3f2eSlogwang 	return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
111a9643ea8Slogwang }
112a9643ea8Slogwang 
113a9643ea8Slogwang static int
nicvf_periodic_alarm_stop(void (fn)(void *),void * arg)1142bfe3f2eSlogwang nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
115a9643ea8Slogwang {
1162bfe3f2eSlogwang 	return rte_eal_alarm_cancel(fn, arg);
117a9643ea8Slogwang }
118a9643ea8Slogwang 
119a9643ea8Slogwang /*
120a9643ea8Slogwang  * Return 0 means link status changed, -1 means not changed
121a9643ea8Slogwang  */
122a9643ea8Slogwang static int
nicvf_dev_link_update(struct rte_eth_dev * dev,int wait_to_complete)1232bfe3f2eSlogwang nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
124a9643ea8Slogwang {
1252bfe3f2eSlogwang #define CHECK_INTERVAL 100  /* 100ms */
1262bfe3f2eSlogwang #define MAX_CHECK_TIME 90   /* 9s (90 * 100ms) in total */
127a9643ea8Slogwang 	struct rte_eth_link link;
128a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
1292bfe3f2eSlogwang 	int i;
130a9643ea8Slogwang 
131a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
132a9643ea8Slogwang 
1332bfe3f2eSlogwang 	if (wait_to_complete) {
1342bfe3f2eSlogwang 		/* rte_eth_link_get() might need to wait up to 9 seconds */
1352bfe3f2eSlogwang 		for (i = 0; i < MAX_CHECK_TIME; i++) {
136d30ea906Sjfb8856606 			nicvf_link_status_update(nic, &link);
137d30ea906Sjfb8856606 			if (link.link_status == ETH_LINK_UP)
1382bfe3f2eSlogwang 				break;
1392bfe3f2eSlogwang 			rte_delay_ms(CHECK_INTERVAL);
1402bfe3f2eSlogwang 		}
1412bfe3f2eSlogwang 	} else {
142d30ea906Sjfb8856606 		nicvf_link_status_update(nic, &link);
1432bfe3f2eSlogwang 	}
144d30ea906Sjfb8856606 
145d30ea906Sjfb8856606 	return rte_eth_linkstatus_set(dev, &link);
146a9643ea8Slogwang }
147a9643ea8Slogwang 
148a9643ea8Slogwang static int
nicvf_dev_set_mtu(struct rte_eth_dev * dev,uint16_t mtu)149a9643ea8Slogwang nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
150a9643ea8Slogwang {
151a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
152d30ea906Sjfb8856606 	uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD;
1532bfe3f2eSlogwang 	size_t i;
154d30ea906Sjfb8856606 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
155a9643ea8Slogwang 
156a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
157a9643ea8Slogwang 
158a9643ea8Slogwang 	if (frame_size > NIC_HW_MAX_FRS)
159a9643ea8Slogwang 		return -EINVAL;
160a9643ea8Slogwang 
161a9643ea8Slogwang 	if (frame_size < NIC_HW_MIN_FRS)
162a9643ea8Slogwang 		return -EINVAL;
163a9643ea8Slogwang 
164a9643ea8Slogwang 	buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
165a9643ea8Slogwang 
166a9643ea8Slogwang 	/*
167a9643ea8Slogwang 	 * Refuse mtu that requires the support of scattered packets
168a9643ea8Slogwang 	 * when this feature has not been enabled before.
169a9643ea8Slogwang 	 */
170d30ea906Sjfb8856606 	if (dev->data->dev_started && !dev->data->scattered_rx &&
171a9643ea8Slogwang 		(frame_size + 2 * VLAN_TAG_SIZE > buffsz))
172a9643ea8Slogwang 		return -EINVAL;
173a9643ea8Slogwang 
174a9643ea8Slogwang 	/* check <seg size> * <max_seg>  >= max_frame */
175a9643ea8Slogwang 	if (dev->data->scattered_rx &&
176a9643ea8Slogwang 		(frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
177a9643ea8Slogwang 		return -EINVAL;
178a9643ea8Slogwang 
1794418919fSjohnjiang 	if (frame_size > RTE_ETHER_MAX_LEN)
180d30ea906Sjfb8856606 		rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
181a9643ea8Slogwang 	else
182d30ea906Sjfb8856606 		rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
183a9643ea8Slogwang 
184d30ea906Sjfb8856606 	if (nicvf_mbox_update_hw_max_frs(nic, mtu))
185a9643ea8Slogwang 		return -EINVAL;
186a9643ea8Slogwang 
187d30ea906Sjfb8856606 	/* Update max_rx_pkt_len */
1884418919fSjohnjiang 	rxmode->max_rx_pkt_len = mtu + RTE_ETHER_HDR_LEN;
189a9643ea8Slogwang 	nic->mtu = mtu;
1902bfe3f2eSlogwang 
1912bfe3f2eSlogwang 	for (i = 0; i < nic->sqs_count; i++)
1922bfe3f2eSlogwang 		nic->snicvf[i]->mtu = mtu;
1932bfe3f2eSlogwang 
194a9643ea8Slogwang 	return 0;
195a9643ea8Slogwang }
196a9643ea8Slogwang 
197a9643ea8Slogwang static int
nicvf_dev_get_regs(struct rte_eth_dev * dev,struct rte_dev_reg_info * regs)198a9643ea8Slogwang nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
199a9643ea8Slogwang {
200a9643ea8Slogwang 	uint64_t *data = regs->data;
201a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
202a9643ea8Slogwang 
203a9643ea8Slogwang 	if (data == NULL) {
204a9643ea8Slogwang 		regs->length = nicvf_reg_get_count();
205a9643ea8Slogwang 		regs->width = THUNDERX_REG_BYTES;
206a9643ea8Slogwang 		return 0;
207a9643ea8Slogwang 	}
208a9643ea8Slogwang 
209a9643ea8Slogwang 	/* Support only full register dump */
210a9643ea8Slogwang 	if ((regs->length == 0) ||
211a9643ea8Slogwang 		(regs->length == (uint32_t)nicvf_reg_get_count())) {
212a9643ea8Slogwang 		regs->version = nic->vendor_id << 16 | nic->device_id;
213a9643ea8Slogwang 		nicvf_reg_dump(nic, data);
214a9643ea8Slogwang 		return 0;
215a9643ea8Slogwang 	}
216a9643ea8Slogwang 	return -ENOTSUP;
217a9643ea8Slogwang }
218a9643ea8Slogwang 
2192bfe3f2eSlogwang static int
nicvf_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)220a9643ea8Slogwang nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
221a9643ea8Slogwang {
222a9643ea8Slogwang 	uint16_t qidx;
223a9643ea8Slogwang 	struct nicvf_hw_rx_qstats rx_qstats;
224a9643ea8Slogwang 	struct nicvf_hw_tx_qstats tx_qstats;
225a9643ea8Slogwang 	struct nicvf_hw_stats port_stats;
226a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
2272bfe3f2eSlogwang 	uint16_t rx_start, rx_end;
2282bfe3f2eSlogwang 	uint16_t tx_start, tx_end;
2292bfe3f2eSlogwang 	size_t i;
2302bfe3f2eSlogwang 
2312bfe3f2eSlogwang 	/* RX queue indices for the first VF */
2322bfe3f2eSlogwang 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
233a9643ea8Slogwang 
234a9643ea8Slogwang 	/* Reading per RX ring stats */
2352bfe3f2eSlogwang 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
2362bfe3f2eSlogwang 		if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
237a9643ea8Slogwang 			break;
238a9643ea8Slogwang 
239a9643ea8Slogwang 		nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
240a9643ea8Slogwang 		stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
241a9643ea8Slogwang 		stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
242a9643ea8Slogwang 	}
243a9643ea8Slogwang 
2442bfe3f2eSlogwang 	/* TX queue indices for the first VF */
2452bfe3f2eSlogwang 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
2462bfe3f2eSlogwang 
247a9643ea8Slogwang 	/* Reading per TX ring stats */
2482bfe3f2eSlogwang 	for (qidx = tx_start; qidx <= tx_end; qidx++) {
2492bfe3f2eSlogwang 		if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
250a9643ea8Slogwang 			break;
251a9643ea8Slogwang 
252a9643ea8Slogwang 		nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
253a9643ea8Slogwang 		stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
254a9643ea8Slogwang 		stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
255a9643ea8Slogwang 	}
256a9643ea8Slogwang 
2572bfe3f2eSlogwang 	for (i = 0; i < nic->sqs_count; i++) {
2582bfe3f2eSlogwang 		struct nicvf *snic = nic->snicvf[i];
2592bfe3f2eSlogwang 
2602bfe3f2eSlogwang 		if (snic == NULL)
2612bfe3f2eSlogwang 			break;
2622bfe3f2eSlogwang 
2632bfe3f2eSlogwang 		/* RX queue indices for a secondary VF */
2642bfe3f2eSlogwang 		nicvf_rx_range(dev, snic, &rx_start, &rx_end);
2652bfe3f2eSlogwang 
2662bfe3f2eSlogwang 		/* Reading per RX ring stats */
2672bfe3f2eSlogwang 		for (qidx = rx_start; qidx <= rx_end; qidx++) {
2682bfe3f2eSlogwang 			if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2692bfe3f2eSlogwang 				break;
2702bfe3f2eSlogwang 
2712bfe3f2eSlogwang 			nicvf_hw_get_rx_qstats(snic, &rx_qstats,
2722bfe3f2eSlogwang 					       qidx % MAX_RCV_QUEUES_PER_QS);
2732bfe3f2eSlogwang 			stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
2742bfe3f2eSlogwang 			stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
2752bfe3f2eSlogwang 		}
2762bfe3f2eSlogwang 
2772bfe3f2eSlogwang 		/* TX queue indices for a secondary VF */
2782bfe3f2eSlogwang 		nicvf_tx_range(dev, snic, &tx_start, &tx_end);
2792bfe3f2eSlogwang 		/* Reading per TX ring stats */
2802bfe3f2eSlogwang 		for (qidx = tx_start; qidx <= tx_end; qidx++) {
2812bfe3f2eSlogwang 			if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2822bfe3f2eSlogwang 				break;
2832bfe3f2eSlogwang 
2842bfe3f2eSlogwang 			nicvf_hw_get_tx_qstats(snic, &tx_qstats,
2852bfe3f2eSlogwang 					       qidx % MAX_SND_QUEUES_PER_QS);
2862bfe3f2eSlogwang 			stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
2872bfe3f2eSlogwang 			stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
2882bfe3f2eSlogwang 		}
2892bfe3f2eSlogwang 	}
2902bfe3f2eSlogwang 
291a9643ea8Slogwang 	nicvf_hw_get_stats(nic, &port_stats);
292a9643ea8Slogwang 	stats->ibytes = port_stats.rx_bytes;
293a9643ea8Slogwang 	stats->ipackets = port_stats.rx_ucast_frames;
294a9643ea8Slogwang 	stats->ipackets += port_stats.rx_bcast_frames;
295a9643ea8Slogwang 	stats->ipackets += port_stats.rx_mcast_frames;
296a9643ea8Slogwang 	stats->ierrors = port_stats.rx_l2_errors;
297a9643ea8Slogwang 	stats->imissed = port_stats.rx_drop_red;
298a9643ea8Slogwang 	stats->imissed += port_stats.rx_drop_overrun;
299a9643ea8Slogwang 	stats->imissed += port_stats.rx_drop_bcast;
300a9643ea8Slogwang 	stats->imissed += port_stats.rx_drop_mcast;
301a9643ea8Slogwang 	stats->imissed += port_stats.rx_drop_l3_bcast;
302a9643ea8Slogwang 	stats->imissed += port_stats.rx_drop_l3_mcast;
303a9643ea8Slogwang 
304a9643ea8Slogwang 	stats->obytes = port_stats.tx_bytes_ok;
305a9643ea8Slogwang 	stats->opackets = port_stats.tx_ucast_frames_ok;
306a9643ea8Slogwang 	stats->opackets += port_stats.tx_bcast_frames_ok;
307a9643ea8Slogwang 	stats->opackets += port_stats.tx_mcast_frames_ok;
308a9643ea8Slogwang 	stats->oerrors = port_stats.tx_drops;
3092bfe3f2eSlogwang 
3102bfe3f2eSlogwang 	return 0;
311a9643ea8Slogwang }
312a9643ea8Slogwang 
313a9643ea8Slogwang static const uint32_t *
nicvf_dev_supported_ptypes_get(struct rte_eth_dev * dev)314a9643ea8Slogwang nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
315a9643ea8Slogwang {
316a9643ea8Slogwang 	size_t copied;
317a9643ea8Slogwang 	static uint32_t ptypes[32];
318a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
3192bfe3f2eSlogwang 	static const uint32_t ptypes_common[] = {
320a9643ea8Slogwang 		RTE_PTYPE_L3_IPV4,
321a9643ea8Slogwang 		RTE_PTYPE_L3_IPV4_EXT,
322a9643ea8Slogwang 		RTE_PTYPE_L3_IPV6,
323a9643ea8Slogwang 		RTE_PTYPE_L3_IPV6_EXT,
324a9643ea8Slogwang 		RTE_PTYPE_L4_TCP,
325a9643ea8Slogwang 		RTE_PTYPE_L4_UDP,
326a9643ea8Slogwang 		RTE_PTYPE_L4_FRAG,
327a9643ea8Slogwang 	};
3282bfe3f2eSlogwang 	static const uint32_t ptypes_tunnel[] = {
329a9643ea8Slogwang 		RTE_PTYPE_TUNNEL_GRE,
330a9643ea8Slogwang 		RTE_PTYPE_TUNNEL_GENEVE,
331a9643ea8Slogwang 		RTE_PTYPE_TUNNEL_VXLAN,
332a9643ea8Slogwang 		RTE_PTYPE_TUNNEL_NVGRE,
333a9643ea8Slogwang 	};
334a9643ea8Slogwang 	static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
335a9643ea8Slogwang 
3362bfe3f2eSlogwang 	copied = sizeof(ptypes_common);
3372bfe3f2eSlogwang 	memcpy(ptypes, ptypes_common, copied);
3382bfe3f2eSlogwang 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
3392bfe3f2eSlogwang 		memcpy((char *)ptypes + copied, ptypes_tunnel,
3402bfe3f2eSlogwang 			sizeof(ptypes_tunnel));
3412bfe3f2eSlogwang 		copied += sizeof(ptypes_tunnel);
342a9643ea8Slogwang 	}
343a9643ea8Slogwang 
344a9643ea8Slogwang 	memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
3455af785ecSfengbojiang(姜凤波) 
346d30ea906Sjfb8856606 	/* All Ptypes are supported in all Rx functions. */
347d30ea906Sjfb8856606 	return ptypes;
348a9643ea8Slogwang }
349a9643ea8Slogwang 
3504418919fSjohnjiang static int
nicvf_dev_stats_reset(struct rte_eth_dev * dev)351a9643ea8Slogwang nicvf_dev_stats_reset(struct rte_eth_dev *dev)
352a9643ea8Slogwang {
353a9643ea8Slogwang 	int i;
354a9643ea8Slogwang 	uint16_t rxqs = 0, txqs = 0;
355a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
3562bfe3f2eSlogwang 	uint16_t rx_start, rx_end;
3572bfe3f2eSlogwang 	uint16_t tx_start, tx_end;
3584418919fSjohnjiang 	int ret;
359a9643ea8Slogwang 
3602bfe3f2eSlogwang 	/* Reset all primary nic counters */
3612bfe3f2eSlogwang 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
3622bfe3f2eSlogwang 	for (i = rx_start; i <= rx_end; i++)
363a9643ea8Slogwang 		rxqs |= (0x3 << (i * 2));
3642bfe3f2eSlogwang 
3652bfe3f2eSlogwang 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
3662bfe3f2eSlogwang 	for (i = tx_start; i <= tx_end; i++)
367a9643ea8Slogwang 		txqs |= (0x3 << (i * 2));
368a9643ea8Slogwang 
3694418919fSjohnjiang 	ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
3704418919fSjohnjiang 	if (ret != 0)
3714418919fSjohnjiang 		return ret;
3722bfe3f2eSlogwang 
3732bfe3f2eSlogwang 	/* Reset secondary nic queue counters */
3742bfe3f2eSlogwang 	for (i = 0; i < nic->sqs_count; i++) {
3752bfe3f2eSlogwang 		struct nicvf *snic = nic->snicvf[i];
3762bfe3f2eSlogwang 		if (snic == NULL)
3772bfe3f2eSlogwang 			break;
3782bfe3f2eSlogwang 
3792bfe3f2eSlogwang 		nicvf_rx_range(dev, snic, &rx_start, &rx_end);
3802bfe3f2eSlogwang 		for (i = rx_start; i <= rx_end; i++)
3812bfe3f2eSlogwang 			rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
3822bfe3f2eSlogwang 
3832bfe3f2eSlogwang 		nicvf_tx_range(dev, snic, &tx_start, &tx_end);
3842bfe3f2eSlogwang 		for (i = tx_start; i <= tx_end; i++)
3852bfe3f2eSlogwang 			txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
3862bfe3f2eSlogwang 
3874418919fSjohnjiang 		ret = nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
3884418919fSjohnjiang 		if (ret != 0)
3894418919fSjohnjiang 			return ret;
3902bfe3f2eSlogwang 	}
3914418919fSjohnjiang 
3924418919fSjohnjiang 	return 0;
393a9643ea8Slogwang }
394a9643ea8Slogwang 
395a9643ea8Slogwang /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
3964418919fSjohnjiang static int
nicvf_dev_promisc_enable(struct rte_eth_dev * dev __rte_unused)397a9643ea8Slogwang nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
398a9643ea8Slogwang {
3994418919fSjohnjiang 	return 0;
400a9643ea8Slogwang }
401a9643ea8Slogwang 
402a9643ea8Slogwang static inline uint64_t
nicvf_rss_ethdev_to_nic(struct nicvf * nic,uint64_t ethdev_rss)403a9643ea8Slogwang nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
404a9643ea8Slogwang {
405a9643ea8Slogwang 	uint64_t nic_rss = 0;
406a9643ea8Slogwang 
407a9643ea8Slogwang 	if (ethdev_rss & ETH_RSS_IPV4)
408a9643ea8Slogwang 		nic_rss |= RSS_IP_ENA;
409a9643ea8Slogwang 
410a9643ea8Slogwang 	if (ethdev_rss & ETH_RSS_IPV6)
411a9643ea8Slogwang 		nic_rss |= RSS_IP_ENA;
412a9643ea8Slogwang 
413a9643ea8Slogwang 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
414a9643ea8Slogwang 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
415a9643ea8Slogwang 
416a9643ea8Slogwang 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
417a9643ea8Slogwang 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
418a9643ea8Slogwang 
419a9643ea8Slogwang 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
420a9643ea8Slogwang 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
421a9643ea8Slogwang 
422a9643ea8Slogwang 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
423a9643ea8Slogwang 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
424a9643ea8Slogwang 
425a9643ea8Slogwang 	if (ethdev_rss & ETH_RSS_PORT)
426a9643ea8Slogwang 		nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
427a9643ea8Slogwang 
428a9643ea8Slogwang 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
429a9643ea8Slogwang 		if (ethdev_rss & ETH_RSS_VXLAN)
430a9643ea8Slogwang 			nic_rss |= RSS_TUN_VXLAN_ENA;
431a9643ea8Slogwang 
432a9643ea8Slogwang 		if (ethdev_rss & ETH_RSS_GENEVE)
433a9643ea8Slogwang 			nic_rss |= RSS_TUN_GENEVE_ENA;
434a9643ea8Slogwang 
435a9643ea8Slogwang 		if (ethdev_rss & ETH_RSS_NVGRE)
436a9643ea8Slogwang 			nic_rss |= RSS_TUN_NVGRE_ENA;
437a9643ea8Slogwang 	}
438a9643ea8Slogwang 
439a9643ea8Slogwang 	return nic_rss;
440a9643ea8Slogwang }
441a9643ea8Slogwang 
442a9643ea8Slogwang static inline uint64_t
nicvf_rss_nic_to_ethdev(struct nicvf * nic,uint64_t nic_rss)443a9643ea8Slogwang nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
444a9643ea8Slogwang {
445a9643ea8Slogwang 	uint64_t ethdev_rss = 0;
446a9643ea8Slogwang 
447a9643ea8Slogwang 	if (nic_rss & RSS_IP_ENA)
448a9643ea8Slogwang 		ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
449a9643ea8Slogwang 
450a9643ea8Slogwang 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
451a9643ea8Slogwang 		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
452a9643ea8Slogwang 				ETH_RSS_NONFRAG_IPV6_TCP);
453a9643ea8Slogwang 
454a9643ea8Slogwang 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
455a9643ea8Slogwang 		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
456a9643ea8Slogwang 				ETH_RSS_NONFRAG_IPV6_UDP);
457a9643ea8Slogwang 
458a9643ea8Slogwang 	if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
459a9643ea8Slogwang 		ethdev_rss |= ETH_RSS_PORT;
460a9643ea8Slogwang 
461a9643ea8Slogwang 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
462a9643ea8Slogwang 		if (nic_rss & RSS_TUN_VXLAN_ENA)
463a9643ea8Slogwang 			ethdev_rss |= ETH_RSS_VXLAN;
464a9643ea8Slogwang 
465a9643ea8Slogwang 		if (nic_rss & RSS_TUN_GENEVE_ENA)
466a9643ea8Slogwang 			ethdev_rss |= ETH_RSS_GENEVE;
467a9643ea8Slogwang 
468a9643ea8Slogwang 		if (nic_rss & RSS_TUN_NVGRE_ENA)
469a9643ea8Slogwang 			ethdev_rss |= ETH_RSS_NVGRE;
470a9643ea8Slogwang 	}
471a9643ea8Slogwang 	return ethdev_rss;
472a9643ea8Slogwang }
473a9643ea8Slogwang 
474a9643ea8Slogwang static int
nicvf_dev_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)475a9643ea8Slogwang nicvf_dev_reta_query(struct rte_eth_dev *dev,
476a9643ea8Slogwang 		     struct rte_eth_rss_reta_entry64 *reta_conf,
477a9643ea8Slogwang 		     uint16_t reta_size)
478a9643ea8Slogwang {
479a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
480a9643ea8Slogwang 	uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
481a9643ea8Slogwang 	int ret, i, j;
482a9643ea8Slogwang 
483a9643ea8Slogwang 	if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
4840c6bd470Sfengbojiang 		PMD_DRV_LOG(ERR,
4850c6bd470Sfengbojiang 			    "The size of hash lookup table configured "
4860c6bd470Sfengbojiang 			    "(%u) doesn't match the number hardware can supported "
4870c6bd470Sfengbojiang 			    "(%u)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
488a9643ea8Slogwang 		return -EINVAL;
489a9643ea8Slogwang 	}
490a9643ea8Slogwang 
491a9643ea8Slogwang 	ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
492a9643ea8Slogwang 	if (ret)
493a9643ea8Slogwang 		return ret;
494a9643ea8Slogwang 
495a9643ea8Slogwang 	/* Copy RETA table */
496a9643ea8Slogwang 	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
497a9643ea8Slogwang 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
498a9643ea8Slogwang 			if ((reta_conf[i].mask >> j) & 0x01)
499a9643ea8Slogwang 				reta_conf[i].reta[j] = tbl[j];
500a9643ea8Slogwang 	}
501a9643ea8Slogwang 
502a9643ea8Slogwang 	return 0;
503a9643ea8Slogwang }
504a9643ea8Slogwang 
505a9643ea8Slogwang static int
nicvf_dev_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)506a9643ea8Slogwang nicvf_dev_reta_update(struct rte_eth_dev *dev,
507a9643ea8Slogwang 		      struct rte_eth_rss_reta_entry64 *reta_conf,
508a9643ea8Slogwang 		      uint16_t reta_size)
509a9643ea8Slogwang {
510a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
511a9643ea8Slogwang 	uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
512a9643ea8Slogwang 	int ret, i, j;
513a9643ea8Slogwang 
514a9643ea8Slogwang 	if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
5150c6bd470Sfengbojiang 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
5160c6bd470Sfengbojiang 			"(%u) doesn't match the number hardware can supported "
5170c6bd470Sfengbojiang 			"(%u)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
518a9643ea8Slogwang 		return -EINVAL;
519a9643ea8Slogwang 	}
520a9643ea8Slogwang 
521a9643ea8Slogwang 	ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
522a9643ea8Slogwang 	if (ret)
523a9643ea8Slogwang 		return ret;
524a9643ea8Slogwang 
525a9643ea8Slogwang 	/* Copy RETA table */
526a9643ea8Slogwang 	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
527a9643ea8Slogwang 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
528a9643ea8Slogwang 			if ((reta_conf[i].mask >> j) & 0x01)
529a9643ea8Slogwang 				tbl[j] = reta_conf[i].reta[j];
530a9643ea8Slogwang 	}
531a9643ea8Slogwang 
532a9643ea8Slogwang 	return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
533a9643ea8Slogwang }
534a9643ea8Slogwang 
535a9643ea8Slogwang static int
nicvf_dev_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)536a9643ea8Slogwang nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
537a9643ea8Slogwang 			    struct rte_eth_rss_conf *rss_conf)
538a9643ea8Slogwang {
539a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
540a9643ea8Slogwang 
541a9643ea8Slogwang 	if (rss_conf->rss_key)
542a9643ea8Slogwang 		nicvf_rss_get_key(nic, rss_conf->rss_key);
543a9643ea8Slogwang 
544a9643ea8Slogwang 	rss_conf->rss_key_len =  RSS_HASH_KEY_BYTE_SIZE;
545a9643ea8Slogwang 	rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
546a9643ea8Slogwang 	return 0;
547a9643ea8Slogwang }
548a9643ea8Slogwang 
549a9643ea8Slogwang static int
nicvf_dev_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)550a9643ea8Slogwang nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
551a9643ea8Slogwang 			  struct rte_eth_rss_conf *rss_conf)
552a9643ea8Slogwang {
553a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
554a9643ea8Slogwang 	uint64_t nic_rss;
555a9643ea8Slogwang 
556a9643ea8Slogwang 	if (rss_conf->rss_key &&
557a9643ea8Slogwang 		rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
5580c6bd470Sfengbojiang 		PMD_DRV_LOG(ERR, "Hash key size mismatch %u",
559a9643ea8Slogwang 			    rss_conf->rss_key_len);
560a9643ea8Slogwang 		return -EINVAL;
561a9643ea8Slogwang 	}
562a9643ea8Slogwang 
563a9643ea8Slogwang 	if (rss_conf->rss_key)
564a9643ea8Slogwang 		nicvf_rss_set_key(nic, rss_conf->rss_key);
565a9643ea8Slogwang 
566a9643ea8Slogwang 	nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
567a9643ea8Slogwang 	nicvf_rss_set_cfg(nic, nic_rss);
568a9643ea8Slogwang 	return 0;
569a9643ea8Slogwang }
570a9643ea8Slogwang 
571a9643ea8Slogwang static int
nicvf_qset_cq_alloc(struct rte_eth_dev * dev,struct nicvf * nic,struct nicvf_rxq * rxq,uint16_t qidx,uint32_t desc_cnt)5722bfe3f2eSlogwang nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
5732bfe3f2eSlogwang 		    struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
574a9643ea8Slogwang {
575a9643ea8Slogwang 	const struct rte_memzone *rz;
576a9643ea8Slogwang 	uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
577a9643ea8Slogwang 
5782bfe3f2eSlogwang 	rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
5792bfe3f2eSlogwang 				      nicvf_netdev_qidx(nic, qidx), ring_size,
580a9643ea8Slogwang 				      NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
581a9643ea8Slogwang 	if (rz == NULL) {
582a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
583a9643ea8Slogwang 		return -ENOMEM;
584a9643ea8Slogwang 	}
585a9643ea8Slogwang 
586a9643ea8Slogwang 	memset(rz->addr, 0, ring_size);
587a9643ea8Slogwang 
5882bfe3f2eSlogwang 	rxq->phys = rz->iova;
589a9643ea8Slogwang 	rxq->desc = rz->addr;
590a9643ea8Slogwang 	rxq->qlen_mask = desc_cnt - 1;
591a9643ea8Slogwang 
592a9643ea8Slogwang 	return 0;
593a9643ea8Slogwang }
594a9643ea8Slogwang 
595a9643ea8Slogwang static int
nicvf_qset_sq_alloc(struct rte_eth_dev * dev,struct nicvf * nic,struct nicvf_txq * sq,uint16_t qidx,uint32_t desc_cnt)5962bfe3f2eSlogwang nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
5972bfe3f2eSlogwang 		    struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
598a9643ea8Slogwang {
599a9643ea8Slogwang 	const struct rte_memzone *rz;
600a9643ea8Slogwang 	uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
601a9643ea8Slogwang 
6022bfe3f2eSlogwang 	rz = rte_eth_dma_zone_reserve(dev, "sq",
6032bfe3f2eSlogwang 				      nicvf_netdev_qidx(nic, qidx), ring_size,
604a9643ea8Slogwang 				      NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
605a9643ea8Slogwang 	if (rz == NULL) {
606a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
607a9643ea8Slogwang 		return -ENOMEM;
608a9643ea8Slogwang 	}
609a9643ea8Slogwang 
610a9643ea8Slogwang 	memset(rz->addr, 0, ring_size);
611a9643ea8Slogwang 
6122bfe3f2eSlogwang 	sq->phys = rz->iova;
613a9643ea8Slogwang 	sq->desc = rz->addr;
614a9643ea8Slogwang 	sq->qlen_mask = desc_cnt - 1;
615a9643ea8Slogwang 
616a9643ea8Slogwang 	return 0;
617a9643ea8Slogwang }
618a9643ea8Slogwang 
619a9643ea8Slogwang static int
nicvf_qset_rbdr_alloc(struct rte_eth_dev * dev,struct nicvf * nic,uint32_t desc_cnt,uint32_t buffsz)6202bfe3f2eSlogwang nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
6212bfe3f2eSlogwang 		      uint32_t desc_cnt, uint32_t buffsz)
622a9643ea8Slogwang {
623a9643ea8Slogwang 	struct nicvf_rbdr *rbdr;
624a9643ea8Slogwang 	const struct rte_memzone *rz;
625a9643ea8Slogwang 	uint32_t ring_size;
626a9643ea8Slogwang 
627a9643ea8Slogwang 	assert(nic->rbdr == NULL);
628a9643ea8Slogwang 	rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
629a9643ea8Slogwang 				  RTE_CACHE_LINE_SIZE, nic->node);
630a9643ea8Slogwang 	if (rbdr == NULL) {
631a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
632a9643ea8Slogwang 		return -ENOMEM;
633a9643ea8Slogwang 	}
634a9643ea8Slogwang 
635a9643ea8Slogwang 	ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
6362bfe3f2eSlogwang 	rz = rte_eth_dma_zone_reserve(dev, "rbdr",
6372bfe3f2eSlogwang 				      nicvf_netdev_qidx(nic, 0), ring_size,
638a9643ea8Slogwang 				      NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
639a9643ea8Slogwang 	if (rz == NULL) {
640a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
6410c6bd470Sfengbojiang 		rte_free(rbdr);
642a9643ea8Slogwang 		return -ENOMEM;
643a9643ea8Slogwang 	}
644a9643ea8Slogwang 
645a9643ea8Slogwang 	memset(rz->addr, 0, ring_size);
646a9643ea8Slogwang 
6472bfe3f2eSlogwang 	rbdr->phys = rz->iova;
648a9643ea8Slogwang 	rbdr->tail = 0;
649a9643ea8Slogwang 	rbdr->next_tail = 0;
650a9643ea8Slogwang 	rbdr->desc = rz->addr;
651a9643ea8Slogwang 	rbdr->buffsz = buffsz;
652a9643ea8Slogwang 	rbdr->qlen_mask = desc_cnt - 1;
653a9643ea8Slogwang 	rbdr->rbdr_status =
654a9643ea8Slogwang 		nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
655a9643ea8Slogwang 	rbdr->rbdr_door =
656a9643ea8Slogwang 		nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
657a9643ea8Slogwang 
658a9643ea8Slogwang 	nic->rbdr = rbdr;
659a9643ea8Slogwang 	return 0;
660a9643ea8Slogwang }
661a9643ea8Slogwang 
662a9643ea8Slogwang static void
nicvf_rbdr_release_mbuf(struct rte_eth_dev * dev,struct nicvf * nic,nicvf_iova_addr_t phy)6632bfe3f2eSlogwang nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
6642bfe3f2eSlogwang 			nicvf_iova_addr_t phy)
665a9643ea8Slogwang {
666a9643ea8Slogwang 	uint16_t qidx;
667a9643ea8Slogwang 	void *obj;
668a9643ea8Slogwang 	struct nicvf_rxq *rxq;
6692bfe3f2eSlogwang 	uint16_t rx_start, rx_end;
670a9643ea8Slogwang 
6712bfe3f2eSlogwang 	/* Get queue ranges for this VF */
6722bfe3f2eSlogwang 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
6732bfe3f2eSlogwang 
6742bfe3f2eSlogwang 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
6752bfe3f2eSlogwang 		rxq = dev->data->rx_queues[qidx];
676a9643ea8Slogwang 		if (rxq->precharge_cnt) {
677a9643ea8Slogwang 			obj = (void *)nicvf_mbuff_phy2virt(phy,
678a9643ea8Slogwang 							   rxq->mbuf_phys_off);
679a9643ea8Slogwang 			rte_mempool_put(rxq->pool, obj);
680a9643ea8Slogwang 			rxq->precharge_cnt--;
681a9643ea8Slogwang 			break;
682a9643ea8Slogwang 		}
683a9643ea8Slogwang 	}
684a9643ea8Slogwang }
685a9643ea8Slogwang 
686a9643ea8Slogwang static inline void
nicvf_rbdr_release_mbufs(struct rte_eth_dev * dev,struct nicvf * nic)6872bfe3f2eSlogwang nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
688a9643ea8Slogwang {
689a9643ea8Slogwang 	uint32_t qlen_mask, head;
690a9643ea8Slogwang 	struct rbdr_entry_t *entry;
691a9643ea8Slogwang 	struct nicvf_rbdr *rbdr = nic->rbdr;
692a9643ea8Slogwang 
693a9643ea8Slogwang 	qlen_mask = rbdr->qlen_mask;
694a9643ea8Slogwang 	head = rbdr->head;
695a9643ea8Slogwang 	while (head != rbdr->tail) {
696a9643ea8Slogwang 		entry = rbdr->desc + head;
6972bfe3f2eSlogwang 		nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
698a9643ea8Slogwang 		head++;
699a9643ea8Slogwang 		head = head & qlen_mask;
700a9643ea8Slogwang 	}
701a9643ea8Slogwang }
702a9643ea8Slogwang 
703a9643ea8Slogwang static inline void
nicvf_tx_queue_release_mbufs(struct nicvf_txq * txq)704a9643ea8Slogwang nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
705a9643ea8Slogwang {
706a9643ea8Slogwang 	uint32_t head;
707a9643ea8Slogwang 
708a9643ea8Slogwang 	head = txq->head;
709a9643ea8Slogwang 	while (head != txq->tail) {
710a9643ea8Slogwang 		if (txq->txbuffs[head]) {
711a9643ea8Slogwang 			rte_pktmbuf_free_seg(txq->txbuffs[head]);
712a9643ea8Slogwang 			txq->txbuffs[head] = NULL;
713a9643ea8Slogwang 		}
714a9643ea8Slogwang 		head++;
715a9643ea8Slogwang 		head = head & txq->qlen_mask;
716a9643ea8Slogwang 	}
717a9643ea8Slogwang }
718a9643ea8Slogwang 
719a9643ea8Slogwang static void
nicvf_tx_queue_reset(struct nicvf_txq * txq)720a9643ea8Slogwang nicvf_tx_queue_reset(struct nicvf_txq *txq)
721a9643ea8Slogwang {
722a9643ea8Slogwang 	uint32_t txq_desc_cnt = txq->qlen_mask + 1;
723a9643ea8Slogwang 
724a9643ea8Slogwang 	memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
725a9643ea8Slogwang 	memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
726a9643ea8Slogwang 	txq->tail = 0;
727a9643ea8Slogwang 	txq->head = 0;
728a9643ea8Slogwang 	txq->xmit_bufs = 0;
729a9643ea8Slogwang }
730a9643ea8Slogwang 
731a9643ea8Slogwang static inline int
nicvf_vf_start_tx_queue(struct rte_eth_dev * dev,struct nicvf * nic,uint16_t qidx)7322bfe3f2eSlogwang nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
7332bfe3f2eSlogwang 			uint16_t qidx)
734a9643ea8Slogwang {
735a9643ea8Slogwang 	struct nicvf_txq *txq;
736a9643ea8Slogwang 	int ret;
737a9643ea8Slogwang 
7382bfe3f2eSlogwang 	assert(qidx < MAX_SND_QUEUES_PER_QS);
7392bfe3f2eSlogwang 
7402bfe3f2eSlogwang 	if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
7412bfe3f2eSlogwang 		RTE_ETH_QUEUE_STATE_STARTED)
742a9643ea8Slogwang 		return 0;
743a9643ea8Slogwang 
7442bfe3f2eSlogwang 	txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
745a9643ea8Slogwang 	txq->pool = NULL;
7462bfe3f2eSlogwang 	ret = nicvf_qset_sq_config(nic, qidx, txq);
747a9643ea8Slogwang 	if (ret) {
7482bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
7492bfe3f2eSlogwang 			     nic->vf_id, qidx, ret);
750a9643ea8Slogwang 		goto config_sq_error;
751a9643ea8Slogwang 	}
752a9643ea8Slogwang 
7532bfe3f2eSlogwang 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
7542bfe3f2eSlogwang 		RTE_ETH_QUEUE_STATE_STARTED;
755a9643ea8Slogwang 	return ret;
756a9643ea8Slogwang 
757a9643ea8Slogwang config_sq_error:
7582bfe3f2eSlogwang 	nicvf_qset_sq_reclaim(nic, qidx);
759a9643ea8Slogwang 	return ret;
760a9643ea8Slogwang }
761a9643ea8Slogwang 
762a9643ea8Slogwang static inline int
nicvf_vf_stop_tx_queue(struct rte_eth_dev * dev,struct nicvf * nic,uint16_t qidx)7632bfe3f2eSlogwang nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
7642bfe3f2eSlogwang 		       uint16_t qidx)
765a9643ea8Slogwang {
766a9643ea8Slogwang 	struct nicvf_txq *txq;
767a9643ea8Slogwang 	int ret;
768a9643ea8Slogwang 
7692bfe3f2eSlogwang 	assert(qidx < MAX_SND_QUEUES_PER_QS);
7702bfe3f2eSlogwang 
7712bfe3f2eSlogwang 	if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
7722bfe3f2eSlogwang 		RTE_ETH_QUEUE_STATE_STOPPED)
773a9643ea8Slogwang 		return 0;
774a9643ea8Slogwang 
7752bfe3f2eSlogwang 	ret = nicvf_qset_sq_reclaim(nic, qidx);
776a9643ea8Slogwang 	if (ret)
7772bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
7782bfe3f2eSlogwang 			     nic->vf_id, qidx, ret);
779a9643ea8Slogwang 
7802bfe3f2eSlogwang 	txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
781a9643ea8Slogwang 	nicvf_tx_queue_release_mbufs(txq);
782a9643ea8Slogwang 	nicvf_tx_queue_reset(txq);
783a9643ea8Slogwang 
7842bfe3f2eSlogwang 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
7852bfe3f2eSlogwang 		RTE_ETH_QUEUE_STATE_STOPPED;
786a9643ea8Slogwang 	return ret;
787a9643ea8Slogwang }
788a9643ea8Slogwang 
789a9643ea8Slogwang static inline int
nicvf_configure_cpi(struct rte_eth_dev * dev)790a9643ea8Slogwang nicvf_configure_cpi(struct rte_eth_dev *dev)
791a9643ea8Slogwang {
792a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
793a9643ea8Slogwang 	uint16_t qidx, qcnt;
794a9643ea8Slogwang 	int ret;
795a9643ea8Slogwang 
796a9643ea8Slogwang 	/* Count started rx queues */
7972bfe3f2eSlogwang 	for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
798a9643ea8Slogwang 		if (dev->data->rx_queue_state[qidx] ==
799a9643ea8Slogwang 		    RTE_ETH_QUEUE_STATE_STARTED)
800a9643ea8Slogwang 			qcnt++;
801a9643ea8Slogwang 
802a9643ea8Slogwang 	nic->cpi_alg = CPI_ALG_NONE;
803a9643ea8Slogwang 	ret = nicvf_mbox_config_cpi(nic, qcnt);
804a9643ea8Slogwang 	if (ret)
805a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
806a9643ea8Slogwang 
807a9643ea8Slogwang 	return ret;
808a9643ea8Slogwang }
809a9643ea8Slogwang 
810a9643ea8Slogwang static inline int
nicvf_configure_rss(struct rte_eth_dev * dev)811a9643ea8Slogwang nicvf_configure_rss(struct rte_eth_dev *dev)
812a9643ea8Slogwang {
813a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
814a9643ea8Slogwang 	uint64_t rsshf;
815a9643ea8Slogwang 	int ret = -EINVAL;
816a9643ea8Slogwang 
817a9643ea8Slogwang 	rsshf = nicvf_rss_ethdev_to_nic(nic,
818a9643ea8Slogwang 			dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
819a9643ea8Slogwang 	PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
820a9643ea8Slogwang 		    dev->data->dev_conf.rxmode.mq_mode,
8212bfe3f2eSlogwang 		    dev->data->nb_rx_queues,
8222bfe3f2eSlogwang 		    dev->data->dev_conf.lpbk_mode, rsshf);
823a9643ea8Slogwang 
824a9643ea8Slogwang 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
825a9643ea8Slogwang 		ret = nicvf_rss_term(nic);
826a9643ea8Slogwang 	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
8272bfe3f2eSlogwang 		ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
828a9643ea8Slogwang 	if (ret)
829a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
830a9643ea8Slogwang 
831a9643ea8Slogwang 	return ret;
832a9643ea8Slogwang }
833a9643ea8Slogwang 
834a9643ea8Slogwang static int
nicvf_configure_rss_reta(struct rte_eth_dev * dev)835a9643ea8Slogwang nicvf_configure_rss_reta(struct rte_eth_dev *dev)
836a9643ea8Slogwang {
837a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
838a9643ea8Slogwang 	unsigned int idx, qmap_size;
839a9643ea8Slogwang 	uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
840a9643ea8Slogwang 	uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
841a9643ea8Slogwang 
842a9643ea8Slogwang 	if (nic->cpi_alg != CPI_ALG_NONE)
843a9643ea8Slogwang 		return -EINVAL;
844a9643ea8Slogwang 
845a9643ea8Slogwang 	/* Prepare queue map */
846a9643ea8Slogwang 	for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
847a9643ea8Slogwang 		if (dev->data->rx_queue_state[idx] ==
848a9643ea8Slogwang 				RTE_ETH_QUEUE_STATE_STARTED)
849a9643ea8Slogwang 			qmap[qmap_size++] = idx;
850a9643ea8Slogwang 	}
851a9643ea8Slogwang 
852a9643ea8Slogwang 	/* Update default RSS RETA */
853a9643ea8Slogwang 	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
854a9643ea8Slogwang 		default_reta[idx] = qmap[idx % qmap_size];
855a9643ea8Slogwang 
856a9643ea8Slogwang 	return nicvf_rss_reta_update(nic, default_reta,
857a9643ea8Slogwang 				     NIC_MAX_RSS_IDR_TBL_SIZE);
858a9643ea8Slogwang }
859a9643ea8Slogwang 
860a9643ea8Slogwang static void
nicvf_dev_tx_queue_release(void * sq)861a9643ea8Slogwang nicvf_dev_tx_queue_release(void *sq)
862a9643ea8Slogwang {
863a9643ea8Slogwang 	struct nicvf_txq *txq;
864a9643ea8Slogwang 
865a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
866a9643ea8Slogwang 
867a9643ea8Slogwang 	txq = (struct nicvf_txq *)sq;
868a9643ea8Slogwang 	if (txq) {
869a9643ea8Slogwang 		if (txq->txbuffs != NULL) {
870a9643ea8Slogwang 			nicvf_tx_queue_release_mbufs(txq);
871a9643ea8Slogwang 			rte_free(txq->txbuffs);
872a9643ea8Slogwang 			txq->txbuffs = NULL;
873a9643ea8Slogwang 		}
874a9643ea8Slogwang 		rte_free(txq);
875a9643ea8Slogwang 	}
876a9643ea8Slogwang }
877a9643ea8Slogwang 
878a9643ea8Slogwang static void
nicvf_set_tx_function(struct rte_eth_dev * dev)879a9643ea8Slogwang nicvf_set_tx_function(struct rte_eth_dev *dev)
880a9643ea8Slogwang {
881579bf1e2Sjfb8856606 	struct nicvf_txq *txq = NULL;
882a9643ea8Slogwang 	size_t i;
883a9643ea8Slogwang 	bool multiseg = false;
884a9643ea8Slogwang 
885a9643ea8Slogwang 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
886a9643ea8Slogwang 		txq = dev->data->tx_queues[i];
887d30ea906Sjfb8856606 		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
888a9643ea8Slogwang 			multiseg = true;
889a9643ea8Slogwang 			break;
890a9643ea8Slogwang 		}
891a9643ea8Slogwang 	}
892a9643ea8Slogwang 
893a9643ea8Slogwang 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
894a9643ea8Slogwang 	if (multiseg) {
895a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
896a9643ea8Slogwang 		dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
897a9643ea8Slogwang 	} else {
898a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
899a9643ea8Slogwang 		dev->tx_pkt_burst = nicvf_xmit_pkts;
900a9643ea8Slogwang 	}
901a9643ea8Slogwang 
902579bf1e2Sjfb8856606 	if (!txq)
903579bf1e2Sjfb8856606 		return;
904579bf1e2Sjfb8856606 
905a9643ea8Slogwang 	if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
906a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
907a9643ea8Slogwang 	else
908a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
909a9643ea8Slogwang }
910a9643ea8Slogwang 
911a9643ea8Slogwang static void
nicvf_set_rx_function(struct rte_eth_dev * dev)912a9643ea8Slogwang nicvf_set_rx_function(struct rte_eth_dev *dev)
913a9643ea8Slogwang {
914d30ea906Sjfb8856606 	struct nicvf *nic = nicvf_pmd_priv(dev);
915d30ea906Sjfb8856606 
916d30ea906Sjfb8856606 	const eth_rx_burst_t rx_burst_func[2][2][2] = {
917d30ea906Sjfb8856606 	/* [NORMAL/SCATTER] [CKSUM/NO_CKSUM] [VLAN_STRIP/NO_VLAN_STRIP] */
918d30ea906Sjfb8856606 		[0][0][0] = nicvf_recv_pkts_no_offload,
919d30ea906Sjfb8856606 		[0][0][1] = nicvf_recv_pkts_vlan_strip,
920d30ea906Sjfb8856606 		[0][1][0] = nicvf_recv_pkts_cksum,
921d30ea906Sjfb8856606 		[0][1][1] = nicvf_recv_pkts_cksum_vlan_strip,
922d30ea906Sjfb8856606 		[1][0][0] = nicvf_recv_pkts_multiseg_no_offload,
923d30ea906Sjfb8856606 		[1][0][1] = nicvf_recv_pkts_multiseg_vlan_strip,
924d30ea906Sjfb8856606 		[1][1][0] = nicvf_recv_pkts_multiseg_cksum,
925d30ea906Sjfb8856606 		[1][1][1] = nicvf_recv_pkts_multiseg_cksum_vlan_strip,
926d30ea906Sjfb8856606 	};
927d30ea906Sjfb8856606 
928d30ea906Sjfb8856606 	dev->rx_pkt_burst =
929d30ea906Sjfb8856606 		rx_burst_func[dev->data->scattered_rx]
930d30ea906Sjfb8856606 			[nic->offload_cksum][nic->vlan_strip];
931a9643ea8Slogwang }
932a9643ea8Slogwang 
933a9643ea8Slogwang static int
nicvf_dev_tx_queue_setup(struct rte_eth_dev * dev,uint16_t qidx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)934a9643ea8Slogwang nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
935a9643ea8Slogwang 			 uint16_t nb_desc, unsigned int socket_id,
936a9643ea8Slogwang 			 const struct rte_eth_txconf *tx_conf)
937a9643ea8Slogwang {
938a9643ea8Slogwang 	uint16_t tx_free_thresh;
939d30ea906Sjfb8856606 	bool is_single_pool;
940a9643ea8Slogwang 	struct nicvf_txq *txq;
941a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
942d30ea906Sjfb8856606 	uint64_t offloads;
943a9643ea8Slogwang 
944a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
945a9643ea8Slogwang 
9462bfe3f2eSlogwang 	if (qidx >= MAX_SND_QUEUES_PER_QS)
9472bfe3f2eSlogwang 		nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
9482bfe3f2eSlogwang 
9492bfe3f2eSlogwang 	qidx = qidx % MAX_SND_QUEUES_PER_QS;
9502bfe3f2eSlogwang 
951a9643ea8Slogwang 	/* Socket id check */
952a9643ea8Slogwang 	if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
953a9643ea8Slogwang 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
954a9643ea8Slogwang 		socket_id, nic->node);
955a9643ea8Slogwang 
956a9643ea8Slogwang 	/* Tx deferred start is not supported */
957a9643ea8Slogwang 	if (tx_conf->tx_deferred_start) {
958a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
959a9643ea8Slogwang 		return -EINVAL;
960a9643ea8Slogwang 	}
961a9643ea8Slogwang 
962a9643ea8Slogwang 	/* Roundup nb_desc to available qsize and validate max number of desc */
963a9643ea8Slogwang 	nb_desc = nicvf_qsize_sq_roundup(nb_desc);
964a9643ea8Slogwang 	if (nb_desc == 0) {
965a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
966a9643ea8Slogwang 		return -EINVAL;
967a9643ea8Slogwang 	}
968a9643ea8Slogwang 
969a9643ea8Slogwang 	/* Validate tx_free_thresh */
970a9643ea8Slogwang 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
971a9643ea8Slogwang 				tx_conf->tx_free_thresh :
972a9643ea8Slogwang 				NICVF_DEFAULT_TX_FREE_THRESH);
973a9643ea8Slogwang 
974a9643ea8Slogwang 	if (tx_free_thresh > (nb_desc) ||
975a9643ea8Slogwang 		tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
976a9643ea8Slogwang 		PMD_INIT_LOG(ERR,
977a9643ea8Slogwang 			"tx_free_thresh must be less than the number of TX "
978a9643ea8Slogwang 			"descriptors. (tx_free_thresh=%u port=%d "
979a9643ea8Slogwang 			"queue=%d)", (unsigned int)tx_free_thresh,
980a9643ea8Slogwang 			(int)dev->data->port_id, (int)qidx);
981a9643ea8Slogwang 		return -EINVAL;
982a9643ea8Slogwang 	}
983a9643ea8Slogwang 
984a9643ea8Slogwang 	/* Free memory prior to re-allocation if needed. */
9852bfe3f2eSlogwang 	if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
986a9643ea8Slogwang 		PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
9872bfe3f2eSlogwang 				nicvf_netdev_qidx(nic, qidx));
9882bfe3f2eSlogwang 		nicvf_dev_tx_queue_release(
9892bfe3f2eSlogwang 			dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
9902bfe3f2eSlogwang 		dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
991a9643ea8Slogwang 	}
992a9643ea8Slogwang 
993a9643ea8Slogwang 	/* Allocating tx queue data structure */
994a9643ea8Slogwang 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
995a9643ea8Slogwang 					RTE_CACHE_LINE_SIZE, nic->node);
996a9643ea8Slogwang 	if (txq == NULL) {
9972bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
9982bfe3f2eSlogwang 			     nicvf_netdev_qidx(nic, qidx));
999a9643ea8Slogwang 		return -ENOMEM;
1000a9643ea8Slogwang 	}
1001a9643ea8Slogwang 
1002a9643ea8Slogwang 	txq->nic = nic;
1003a9643ea8Slogwang 	txq->queue_id = qidx;
1004a9643ea8Slogwang 	txq->tx_free_thresh = tx_free_thresh;
1005a9643ea8Slogwang 	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
1006a9643ea8Slogwang 	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
1007d30ea906Sjfb8856606 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1008d30ea906Sjfb8856606 	txq->offloads = offloads;
1009d30ea906Sjfb8856606 
1010d30ea906Sjfb8856606 	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
1011a9643ea8Slogwang 
1012a9643ea8Slogwang 	/* Choose optimum free threshold value for multipool case */
1013a9643ea8Slogwang 	if (!is_single_pool) {
1014a9643ea8Slogwang 		txq->tx_free_thresh = (uint16_t)
1015a9643ea8Slogwang 		(tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
1016a9643ea8Slogwang 				NICVF_TX_FREE_MPOOL_THRESH :
1017a9643ea8Slogwang 				tx_conf->tx_free_thresh);
1018a9643ea8Slogwang 		txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
1019a9643ea8Slogwang 	} else {
1020a9643ea8Slogwang 		txq->pool_free = nicvf_single_pool_free_xmited_buffers;
1021a9643ea8Slogwang 	}
1022a9643ea8Slogwang 
1023a9643ea8Slogwang 	/* Allocate software ring */
1024a9643ea8Slogwang 	txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
1025a9643ea8Slogwang 				nb_desc * sizeof(struct rte_mbuf *),
1026a9643ea8Slogwang 				RTE_CACHE_LINE_SIZE, nic->node);
1027a9643ea8Slogwang 
1028a9643ea8Slogwang 	if (txq->txbuffs == NULL) {
1029a9643ea8Slogwang 		nicvf_dev_tx_queue_release(txq);
1030a9643ea8Slogwang 		return -ENOMEM;
1031a9643ea8Slogwang 	}
1032a9643ea8Slogwang 
10332bfe3f2eSlogwang 	if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
1034a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
1035a9643ea8Slogwang 		nicvf_dev_tx_queue_release(txq);
1036a9643ea8Slogwang 		return -ENOMEM;
1037a9643ea8Slogwang 	}
1038a9643ea8Slogwang 
1039a9643ea8Slogwang 	nicvf_tx_queue_reset(txq);
1040a9643ea8Slogwang 
1041d30ea906Sjfb8856606 	PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
1042d30ea906Sjfb8856606 			" phys=0x%" PRIx64 " offloads=0x%" PRIx64,
10432bfe3f2eSlogwang 			nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
1044d30ea906Sjfb8856606 			txq->phys, txq->offloads);
1045a9643ea8Slogwang 
10462bfe3f2eSlogwang 	dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
10472bfe3f2eSlogwang 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
10482bfe3f2eSlogwang 		RTE_ETH_QUEUE_STATE_STOPPED;
1049a9643ea8Slogwang 	return 0;
1050a9643ea8Slogwang }
1051a9643ea8Slogwang 
1052a9643ea8Slogwang static inline void
nicvf_rx_queue_release_mbufs(struct rte_eth_dev * dev,struct nicvf_rxq * rxq)10532bfe3f2eSlogwang nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
1054a9643ea8Slogwang {
1055a9643ea8Slogwang 	uint32_t rxq_cnt;
1056a9643ea8Slogwang 	uint32_t nb_pkts, released_pkts = 0;
1057a9643ea8Slogwang 	uint32_t refill_cnt = 0;
1058a9643ea8Slogwang 	struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
1059a9643ea8Slogwang 
1060a9643ea8Slogwang 	if (dev->rx_pkt_burst == NULL)
1061a9643ea8Slogwang 		return;
1062a9643ea8Slogwang 
10632bfe3f2eSlogwang 	while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
10642bfe3f2eSlogwang 				nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
1065a9643ea8Slogwang 		nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
1066a9643ea8Slogwang 					NICVF_MAX_RX_FREE_THRESH);
1067a9643ea8Slogwang 		PMD_DRV_LOG(INFO, "nb_pkts=%d  rxq_cnt=%d", nb_pkts, rxq_cnt);
1068a9643ea8Slogwang 		while (nb_pkts) {
1069a9643ea8Slogwang 			rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
1070a9643ea8Slogwang 			released_pkts++;
1071a9643ea8Slogwang 		}
1072a9643ea8Slogwang 	}
1073a9643ea8Slogwang 
10742bfe3f2eSlogwang 
10752bfe3f2eSlogwang 	refill_cnt += nicvf_dev_rbdr_refill(dev,
10762bfe3f2eSlogwang 			nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
10772bfe3f2eSlogwang 
1078a9643ea8Slogwang 	PMD_DRV_LOG(INFO, "free_cnt=%d  refill_cnt=%d",
1079a9643ea8Slogwang 		    released_pkts, refill_cnt);
1080a9643ea8Slogwang }
1081a9643ea8Slogwang 
1082a9643ea8Slogwang static void
nicvf_rx_queue_reset(struct nicvf_rxq * rxq)1083a9643ea8Slogwang nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
1084a9643ea8Slogwang {
1085a9643ea8Slogwang 	rxq->head = 0;
1086a9643ea8Slogwang 	rxq->available_space = 0;
1087a9643ea8Slogwang 	rxq->recv_buffers = 0;
1088a9643ea8Slogwang }
1089a9643ea8Slogwang 
1090a9643ea8Slogwang static inline int
nicvf_vf_start_rx_queue(struct rte_eth_dev * dev,struct nicvf * nic,uint16_t qidx)10912bfe3f2eSlogwang nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
10922bfe3f2eSlogwang 			uint16_t qidx)
1093a9643ea8Slogwang {
1094a9643ea8Slogwang 	struct nicvf_rxq *rxq;
1095a9643ea8Slogwang 	int ret;
1096a9643ea8Slogwang 
10972bfe3f2eSlogwang 	assert(qidx < MAX_RCV_QUEUES_PER_QS);
10982bfe3f2eSlogwang 
10992bfe3f2eSlogwang 	if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
11002bfe3f2eSlogwang 		RTE_ETH_QUEUE_STATE_STARTED)
1101a9643ea8Slogwang 		return 0;
1102a9643ea8Slogwang 
1103a9643ea8Slogwang 	/* Update rbdr pointer to all rxq */
11042bfe3f2eSlogwang 	rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1105a9643ea8Slogwang 	rxq->shared_rbdr = nic->rbdr;
1106a9643ea8Slogwang 
1107a9643ea8Slogwang 	ret = nicvf_qset_rq_config(nic, qidx, rxq);
1108a9643ea8Slogwang 	if (ret) {
11092bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
11102bfe3f2eSlogwang 			     nic->vf_id, qidx, ret);
1111a9643ea8Slogwang 		goto config_rq_error;
1112a9643ea8Slogwang 	}
1113a9643ea8Slogwang 	ret = nicvf_qset_cq_config(nic, qidx, rxq);
1114a9643ea8Slogwang 	if (ret) {
11152bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
11162bfe3f2eSlogwang 			     nic->vf_id, qidx, ret);
1117a9643ea8Slogwang 		goto config_cq_error;
1118a9643ea8Slogwang 	}
1119a9643ea8Slogwang 
11202bfe3f2eSlogwang 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
11212bfe3f2eSlogwang 		RTE_ETH_QUEUE_STATE_STARTED;
1122a9643ea8Slogwang 	return 0;
1123a9643ea8Slogwang 
1124a9643ea8Slogwang config_cq_error:
1125a9643ea8Slogwang 	nicvf_qset_cq_reclaim(nic, qidx);
1126a9643ea8Slogwang config_rq_error:
1127a9643ea8Slogwang 	nicvf_qset_rq_reclaim(nic, qidx);
1128a9643ea8Slogwang 	return ret;
1129a9643ea8Slogwang }
1130a9643ea8Slogwang 
1131a9643ea8Slogwang static inline int
nicvf_vf_stop_rx_queue(struct rte_eth_dev * dev,struct nicvf * nic,uint16_t qidx)11322bfe3f2eSlogwang nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
11332bfe3f2eSlogwang 		       uint16_t qidx)
1134a9643ea8Slogwang {
1135a9643ea8Slogwang 	struct nicvf_rxq *rxq;
1136a9643ea8Slogwang 	int ret, other_error;
1137a9643ea8Slogwang 
11382bfe3f2eSlogwang 	if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
11392bfe3f2eSlogwang 		RTE_ETH_QUEUE_STATE_STOPPED)
1140a9643ea8Slogwang 		return 0;
1141a9643ea8Slogwang 
1142a9643ea8Slogwang 	ret = nicvf_qset_rq_reclaim(nic, qidx);
1143a9643ea8Slogwang 	if (ret)
11442bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
11452bfe3f2eSlogwang 			     nic->vf_id, qidx, ret);
1146a9643ea8Slogwang 
1147a9643ea8Slogwang 	other_error = ret;
11482bfe3f2eSlogwang 	rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
11492bfe3f2eSlogwang 	nicvf_rx_queue_release_mbufs(dev, rxq);
1150a9643ea8Slogwang 	nicvf_rx_queue_reset(rxq);
1151a9643ea8Slogwang 
1152a9643ea8Slogwang 	ret = nicvf_qset_cq_reclaim(nic, qidx);
1153a9643ea8Slogwang 	if (ret)
11542bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
11552bfe3f2eSlogwang 			     nic->vf_id, qidx, ret);
1156a9643ea8Slogwang 
1157a9643ea8Slogwang 	other_error |= ret;
11582bfe3f2eSlogwang 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
11592bfe3f2eSlogwang 		RTE_ETH_QUEUE_STATE_STOPPED;
1160a9643ea8Slogwang 	return other_error;
1161a9643ea8Slogwang }
1162a9643ea8Slogwang 
1163a9643ea8Slogwang static void
nicvf_dev_rx_queue_release(void * rx_queue)1164a9643ea8Slogwang nicvf_dev_rx_queue_release(void *rx_queue)
1165a9643ea8Slogwang {
1166a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1167a9643ea8Slogwang 
11682bfe3f2eSlogwang 	rte_free(rx_queue);
1169a9643ea8Slogwang }
1170a9643ea8Slogwang 
1171a9643ea8Slogwang static int
nicvf_dev_rx_queue_start(struct rte_eth_dev * dev,uint16_t qidx)1172a9643ea8Slogwang nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1173a9643ea8Slogwang {
11742bfe3f2eSlogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
1175a9643ea8Slogwang 	int ret;
1176a9643ea8Slogwang 
11772bfe3f2eSlogwang 	if (qidx >= MAX_RCV_QUEUES_PER_QS)
11782bfe3f2eSlogwang 		nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
11792bfe3f2eSlogwang 
11802bfe3f2eSlogwang 	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
11812bfe3f2eSlogwang 
11822bfe3f2eSlogwang 	ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
1183a9643ea8Slogwang 	if (ret)
1184a9643ea8Slogwang 		return ret;
1185a9643ea8Slogwang 
1186a9643ea8Slogwang 	ret = nicvf_configure_cpi(dev);
1187a9643ea8Slogwang 	if (ret)
1188a9643ea8Slogwang 		return ret;
1189a9643ea8Slogwang 
1190a9643ea8Slogwang 	return nicvf_configure_rss_reta(dev);
1191a9643ea8Slogwang }
1192a9643ea8Slogwang 
1193a9643ea8Slogwang static int
nicvf_dev_rx_queue_stop(struct rte_eth_dev * dev,uint16_t qidx)1194a9643ea8Slogwang nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1195a9643ea8Slogwang {
1196a9643ea8Slogwang 	int ret;
11972bfe3f2eSlogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
1198a9643ea8Slogwang 
11992bfe3f2eSlogwang 	if (qidx >= MAX_SND_QUEUES_PER_QS)
12002bfe3f2eSlogwang 		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
12012bfe3f2eSlogwang 
12022bfe3f2eSlogwang 	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
12032bfe3f2eSlogwang 
12042bfe3f2eSlogwang 	ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
1205a9643ea8Slogwang 	ret |= nicvf_configure_cpi(dev);
1206a9643ea8Slogwang 	ret |= nicvf_configure_rss_reta(dev);
1207a9643ea8Slogwang 	return ret;
1208a9643ea8Slogwang }
1209a9643ea8Slogwang 
1210a9643ea8Slogwang static int
nicvf_dev_tx_queue_start(struct rte_eth_dev * dev,uint16_t qidx)1211a9643ea8Slogwang nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1212a9643ea8Slogwang {
12132bfe3f2eSlogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
12142bfe3f2eSlogwang 
12152bfe3f2eSlogwang 	if (qidx >= MAX_SND_QUEUES_PER_QS)
12162bfe3f2eSlogwang 		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
12172bfe3f2eSlogwang 
12182bfe3f2eSlogwang 	qidx = qidx % MAX_SND_QUEUES_PER_QS;
12192bfe3f2eSlogwang 
12202bfe3f2eSlogwang 	return nicvf_vf_start_tx_queue(dev, nic, qidx);
1221a9643ea8Slogwang }
1222a9643ea8Slogwang 
1223a9643ea8Slogwang static int
nicvf_dev_tx_queue_stop(struct rte_eth_dev * dev,uint16_t qidx)1224a9643ea8Slogwang nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1225a9643ea8Slogwang {
12262bfe3f2eSlogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
12272bfe3f2eSlogwang 
12282bfe3f2eSlogwang 	if (qidx >= MAX_SND_QUEUES_PER_QS)
12292bfe3f2eSlogwang 		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
12302bfe3f2eSlogwang 
12312bfe3f2eSlogwang 	qidx = qidx % MAX_SND_QUEUES_PER_QS;
12322bfe3f2eSlogwang 
12332bfe3f2eSlogwang 	return nicvf_vf_stop_tx_queue(dev, nic, qidx);
12342bfe3f2eSlogwang }
12352bfe3f2eSlogwang 
12362bfe3f2eSlogwang static inline void
nicvf_rxq_mbuf_setup(struct nicvf_rxq * rxq)12372bfe3f2eSlogwang nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
12382bfe3f2eSlogwang {
12392bfe3f2eSlogwang 	uintptr_t p;
12402bfe3f2eSlogwang 	struct rte_mbuf mb_def;
1241d30ea906Sjfb8856606 	struct nicvf *nic = rxq->nic;
12422bfe3f2eSlogwang 
12432bfe3f2eSlogwang 	RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
12442bfe3f2eSlogwang 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
12452bfe3f2eSlogwang 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
12462bfe3f2eSlogwang 				offsetof(struct rte_mbuf, data_off) != 2);
12472bfe3f2eSlogwang 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
12482bfe3f2eSlogwang 				offsetof(struct rte_mbuf, data_off) != 4);
12492bfe3f2eSlogwang 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
12502bfe3f2eSlogwang 				offsetof(struct rte_mbuf, data_off) != 6);
1251d30ea906Sjfb8856606 	RTE_BUILD_BUG_ON(offsetof(struct nicvf_rxq, rxq_fastpath_data_end) -
1252d30ea906Sjfb8856606 				offsetof(struct nicvf_rxq,
1253d30ea906Sjfb8856606 					rxq_fastpath_data_start) > 128);
12542bfe3f2eSlogwang 	mb_def.nb_segs = 1;
1255d30ea906Sjfb8856606 	mb_def.data_off = RTE_PKTMBUF_HEADROOM + (nic->skip_bytes);
12562bfe3f2eSlogwang 	mb_def.port = rxq->port_id;
12572bfe3f2eSlogwang 	rte_mbuf_refcnt_set(&mb_def, 1);
12582bfe3f2eSlogwang 
12592bfe3f2eSlogwang 	/* Prevent compiler reordering: rearm_data covers previous fields */
12602bfe3f2eSlogwang 	rte_compiler_barrier();
12612bfe3f2eSlogwang 	p = (uintptr_t)&mb_def.rearm_data;
12622bfe3f2eSlogwang 	rxq->mbuf_initializer.value = *(uint64_t *)p;
1263a9643ea8Slogwang }
1264a9643ea8Slogwang 
1265a9643ea8Slogwang static int
nicvf_dev_rx_queue_setup(struct rte_eth_dev * dev,uint16_t qidx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)1266a9643ea8Slogwang nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1267a9643ea8Slogwang 			 uint16_t nb_desc, unsigned int socket_id,
1268a9643ea8Slogwang 			 const struct rte_eth_rxconf *rx_conf,
1269a9643ea8Slogwang 			 struct rte_mempool *mp)
1270a9643ea8Slogwang {
1271a9643ea8Slogwang 	uint16_t rx_free_thresh;
1272a9643ea8Slogwang 	struct nicvf_rxq *rxq;
1273a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
1274d30ea906Sjfb8856606 	uint64_t offloads;
1275d30ea906Sjfb8856606 	uint32_t buffsz;
1276d30ea906Sjfb8856606 	struct rte_pktmbuf_pool_private *mbp_priv;
1277a9643ea8Slogwang 
1278a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1279a9643ea8Slogwang 
1280d30ea906Sjfb8856606 	/* First skip check */
1281d30ea906Sjfb8856606 	mbp_priv = rte_mempool_get_priv(mp);
1282d30ea906Sjfb8856606 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1283d30ea906Sjfb8856606 	if (buffsz < (uint32_t)(nic->skip_bytes)) {
1284d30ea906Sjfb8856606 		PMD_INIT_LOG(ERR, "First skip is more than configured buffer size");
1285d30ea906Sjfb8856606 		return -EINVAL;
1286d30ea906Sjfb8856606 	}
1287d30ea906Sjfb8856606 
12882bfe3f2eSlogwang 	if (qidx >= MAX_RCV_QUEUES_PER_QS)
12892bfe3f2eSlogwang 		nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
12902bfe3f2eSlogwang 
12912bfe3f2eSlogwang 	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
12922bfe3f2eSlogwang 
1293a9643ea8Slogwang 	/* Socket id check */
1294a9643ea8Slogwang 	if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1295a9643ea8Slogwang 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1296a9643ea8Slogwang 		socket_id, nic->node);
1297a9643ea8Slogwang 
12982bfe3f2eSlogwang 	/* Mempool memory must be contiguous, so must be one memory segment*/
1299a9643ea8Slogwang 	if (mp->nb_mem_chunks != 1) {
13002bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
13012bfe3f2eSlogwang 		return -EINVAL;
13022bfe3f2eSlogwang 	}
13032bfe3f2eSlogwang 
13042bfe3f2eSlogwang 	/* Mempool memory must be physically contiguous */
1305d30ea906Sjfb8856606 	if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) {
13062bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1307a9643ea8Slogwang 		return -EINVAL;
1308a9643ea8Slogwang 	}
1309a9643ea8Slogwang 
1310a9643ea8Slogwang 	/* Rx deferred start is not supported */
1311a9643ea8Slogwang 	if (rx_conf->rx_deferred_start) {
1312a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1313a9643ea8Slogwang 		return -EINVAL;
1314a9643ea8Slogwang 	}
1315a9643ea8Slogwang 
1316a9643ea8Slogwang 	/* Roundup nb_desc to available qsize and validate max number of desc */
1317a9643ea8Slogwang 	nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1318a9643ea8Slogwang 	if (nb_desc == 0) {
1319a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1320a9643ea8Slogwang 		return -EINVAL;
1321a9643ea8Slogwang 	}
1322a9643ea8Slogwang 
1323d30ea906Sjfb8856606 
1324a9643ea8Slogwang 	/* Check rx_free_thresh upper bound */
1325a9643ea8Slogwang 	rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1326a9643ea8Slogwang 				rx_conf->rx_free_thresh :
1327a9643ea8Slogwang 				NICVF_DEFAULT_RX_FREE_THRESH);
1328a9643ea8Slogwang 	if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1329a9643ea8Slogwang 		rx_free_thresh >= nb_desc * .75) {
1330a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1331a9643ea8Slogwang 				rx_free_thresh);
1332a9643ea8Slogwang 		return -EINVAL;
1333a9643ea8Slogwang 	}
1334a9643ea8Slogwang 
1335a9643ea8Slogwang 	/* Free memory prior to re-allocation if needed */
13362bfe3f2eSlogwang 	if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
1337a9643ea8Slogwang 		PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
13382bfe3f2eSlogwang 				nicvf_netdev_qidx(nic, qidx));
13392bfe3f2eSlogwang 		nicvf_dev_rx_queue_release(
13402bfe3f2eSlogwang 			dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
13412bfe3f2eSlogwang 		dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
1342a9643ea8Slogwang 	}
1343a9643ea8Slogwang 
1344a9643ea8Slogwang 	/* Allocate rxq memory */
1345a9643ea8Slogwang 	rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1346a9643ea8Slogwang 					RTE_CACHE_LINE_SIZE, nic->node);
1347a9643ea8Slogwang 	if (rxq == NULL) {
13482bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
13492bfe3f2eSlogwang 			     nicvf_netdev_qidx(nic, qidx));
1350a9643ea8Slogwang 		return -ENOMEM;
1351a9643ea8Slogwang 	}
1352a9643ea8Slogwang 
1353a9643ea8Slogwang 	rxq->nic = nic;
1354a9643ea8Slogwang 	rxq->pool = mp;
1355a9643ea8Slogwang 	rxq->queue_id = qidx;
1356a9643ea8Slogwang 	rxq->port_id = dev->data->port_id;
1357a9643ea8Slogwang 	rxq->rx_free_thresh = rx_free_thresh;
1358a9643ea8Slogwang 	rxq->rx_drop_en = rx_conf->rx_drop_en;
1359a9643ea8Slogwang 	rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1360a9643ea8Slogwang 	rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1361a9643ea8Slogwang 	rxq->precharge_cnt = 0;
13622bfe3f2eSlogwang 
13632bfe3f2eSlogwang 	if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
13642bfe3f2eSlogwang 		rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
13652bfe3f2eSlogwang 	else
1366a9643ea8Slogwang 		rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1367a9643ea8Slogwang 
13682bfe3f2eSlogwang 	nicvf_rxq_mbuf_setup(rxq);
13692bfe3f2eSlogwang 
1370a9643ea8Slogwang 	/* Alloc completion queue */
13712bfe3f2eSlogwang 	if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1372a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1373a9643ea8Slogwang 		nicvf_dev_rx_queue_release(rxq);
1374a9643ea8Slogwang 		return -ENOMEM;
1375a9643ea8Slogwang 	}
1376a9643ea8Slogwang 
1377a9643ea8Slogwang 	nicvf_rx_queue_reset(rxq);
1378a9643ea8Slogwang 
1379d30ea906Sjfb8856606 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1380d30ea906Sjfb8856606 	PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
1381d30ea906Sjfb8856606 			" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
13822bfe3f2eSlogwang 			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
1383d30ea906Sjfb8856606 			rte_mempool_avail_count(mp), rxq->phys, offloads);
1384a9643ea8Slogwang 
13852bfe3f2eSlogwang 	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
13862bfe3f2eSlogwang 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
13872bfe3f2eSlogwang 		RTE_ETH_QUEUE_STATE_STOPPED;
1388a9643ea8Slogwang 	return 0;
1389a9643ea8Slogwang }
1390a9643ea8Slogwang 
13914418919fSjohnjiang static int
nicvf_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)1392a9643ea8Slogwang nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1393a9643ea8Slogwang {
1394a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
13952bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1396a9643ea8Slogwang 
1397a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1398a9643ea8Slogwang 
13992bfe3f2eSlogwang 	/* Autonegotiation may be disabled */
14002bfe3f2eSlogwang 	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
14012bfe3f2eSlogwang 	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
14022bfe3f2eSlogwang 				 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
14032bfe3f2eSlogwang 	if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
14042bfe3f2eSlogwang 		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
14052bfe3f2eSlogwang 
14064418919fSjohnjiang 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
14074418919fSjohnjiang 	dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
14082bfe3f2eSlogwang 	dev_info->max_rx_queues =
14092bfe3f2eSlogwang 			(uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
14102bfe3f2eSlogwang 	dev_info->max_tx_queues =
14112bfe3f2eSlogwang 			(uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1412a9643ea8Slogwang 	dev_info->max_mac_addrs = 1;
14132bfe3f2eSlogwang 	dev_info->max_vfs = pci_dev->max_vfs;
1414a9643ea8Slogwang 
1415d30ea906Sjfb8856606 	dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1416d30ea906Sjfb8856606 	dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1417d30ea906Sjfb8856606 	dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1418d30ea906Sjfb8856606 	dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1419a9643ea8Slogwang 
1420a9643ea8Slogwang 	dev_info->reta_size = nic->rss_info.rss_size;
1421a9643ea8Slogwang 	dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1422a9643ea8Slogwang 	dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1423a9643ea8Slogwang 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1424a9643ea8Slogwang 		dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1425a9643ea8Slogwang 
1426a9643ea8Slogwang 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1427a9643ea8Slogwang 		.rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1428a9643ea8Slogwang 		.rx_drop_en = 0,
1429a9643ea8Slogwang 	};
1430a9643ea8Slogwang 
1431a9643ea8Slogwang 	dev_info->default_txconf = (struct rte_eth_txconf) {
1432a9643ea8Slogwang 		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1433d30ea906Sjfb8856606 		.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
1434d30ea906Sjfb8856606 			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
1435d30ea906Sjfb8856606 			DEV_TX_OFFLOAD_UDP_CKSUM          |
1436d30ea906Sjfb8856606 			DEV_TX_OFFLOAD_TCP_CKSUM,
1437a9643ea8Slogwang 	};
14384418919fSjohnjiang 
14394418919fSjohnjiang 	return 0;
1440a9643ea8Slogwang }
1441a9643ea8Slogwang 
14422bfe3f2eSlogwang static nicvf_iova_addr_t
rbdr_rte_mempool_get(void * dev,void * opaque)14432bfe3f2eSlogwang rbdr_rte_mempool_get(void *dev, void *opaque)
1444a9643ea8Slogwang {
1445a9643ea8Slogwang 	uint16_t qidx;
1446a9643ea8Slogwang 	uintptr_t mbuf;
1447a9643ea8Slogwang 	struct nicvf_rxq *rxq;
14482bfe3f2eSlogwang 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
14492bfe3f2eSlogwang 	struct nicvf *nic = (struct nicvf *)opaque;
14502bfe3f2eSlogwang 	uint16_t rx_start, rx_end;
1451a9643ea8Slogwang 
14522bfe3f2eSlogwang 	/* Get queue ranges for this VF */
14532bfe3f2eSlogwang 	nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
14542bfe3f2eSlogwang 
14552bfe3f2eSlogwang 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
14562bfe3f2eSlogwang 		rxq = eth_dev->data->rx_queues[qidx];
1457a9643ea8Slogwang 		/* Maintain equal buffer count across all pools */
1458a9643ea8Slogwang 		if (rxq->precharge_cnt >= rxq->qlen_mask)
1459a9643ea8Slogwang 			continue;
1460a9643ea8Slogwang 		rxq->precharge_cnt++;
1461a9643ea8Slogwang 		mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
1462a9643ea8Slogwang 		if (mbuf)
1463a9643ea8Slogwang 			return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
1464a9643ea8Slogwang 	}
1465a9643ea8Slogwang 	return 0;
1466a9643ea8Slogwang }
1467a9643ea8Slogwang 
1468a9643ea8Slogwang static int
nicvf_vf_start(struct rte_eth_dev * dev,struct nicvf * nic,uint32_t rbdrsz)14692bfe3f2eSlogwang nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
1470a9643ea8Slogwang {
1471a9643ea8Slogwang 	int ret;
14722bfe3f2eSlogwang 	uint16_t qidx, data_off;
1473a9643ea8Slogwang 	uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
1474a9643ea8Slogwang 	uint64_t mbuf_phys_off = 0;
1475a9643ea8Slogwang 	struct nicvf_rxq *rxq;
1476a9643ea8Slogwang 	struct rte_mbuf *mbuf;
14772bfe3f2eSlogwang 	uint16_t rx_start, rx_end;
14782bfe3f2eSlogwang 	uint16_t tx_start, tx_end;
1479d30ea906Sjfb8856606 	int mask;
1480a9643ea8Slogwang 
1481a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1482a9643ea8Slogwang 
1483a9643ea8Slogwang 	/* Userspace process exited without proper shutdown in last run */
1484a9643ea8Slogwang 	if (nicvf_qset_rbdr_active(nic, 0))
14852bfe3f2eSlogwang 		nicvf_vf_stop(dev, nic, false);
14862bfe3f2eSlogwang 
14872bfe3f2eSlogwang 	/* Get queue ranges for this VF */
14882bfe3f2eSlogwang 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1489a9643ea8Slogwang 
1490a9643ea8Slogwang 	/*
1491a9643ea8Slogwang 	 * Thunderx nicvf PMD can support more than one pool per port only when
1492a9643ea8Slogwang 	 * 1) Data payload size is same across all the pools in given port
1493a9643ea8Slogwang 	 * AND
1494a9643ea8Slogwang 	 * 2) All mbuffs in the pools are from the same hugepage
1495a9643ea8Slogwang 	 * AND
1496a9643ea8Slogwang 	 * 3) Mbuff metadata size is same across all the pools in given port
1497a9643ea8Slogwang 	 *
1498a9643ea8Slogwang 	 * This is to support existing application that uses multiple pool/port.
1499a9643ea8Slogwang 	 * But, the purpose of using multipool for QoS will not be addressed.
1500a9643ea8Slogwang 	 *
1501a9643ea8Slogwang 	 */
1502a9643ea8Slogwang 
1503a9643ea8Slogwang 	/* Validate mempool attributes */
15042bfe3f2eSlogwang 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
1505a9643ea8Slogwang 		rxq = dev->data->rx_queues[qidx];
1506a9643ea8Slogwang 		rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
1507a9643ea8Slogwang 		mbuf = rte_pktmbuf_alloc(rxq->pool);
1508a9643ea8Slogwang 		if (mbuf == NULL) {
15092bfe3f2eSlogwang 			PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
15102bfe3f2eSlogwang 				     "pool=%s",
15112bfe3f2eSlogwang 				     nic->vf_id, qidx, rxq->pool->name);
1512a9643ea8Slogwang 			return -ENOMEM;
1513a9643ea8Slogwang 		}
15142bfe3f2eSlogwang 		data_off = nicvf_mbuff_meta_length(mbuf);
15152bfe3f2eSlogwang 		data_off += RTE_PKTMBUF_HEADROOM;
1516a9643ea8Slogwang 		rte_pktmbuf_free(mbuf);
1517a9643ea8Slogwang 
15182bfe3f2eSlogwang 		if (data_off % RTE_CACHE_LINE_SIZE) {
15192bfe3f2eSlogwang 			PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
15202bfe3f2eSlogwang 				rxq->pool->name, data_off,
15212bfe3f2eSlogwang 				data_off % RTE_CACHE_LINE_SIZE);
15222bfe3f2eSlogwang 			return -EINVAL;
15232bfe3f2eSlogwang 		}
15242bfe3f2eSlogwang 		rxq->mbuf_phys_off -= data_off;
1525d30ea906Sjfb8856606 		rxq->mbuf_phys_off -= nic->skip_bytes;
15262bfe3f2eSlogwang 
1527a9643ea8Slogwang 		if (mbuf_phys_off == 0)
1528a9643ea8Slogwang 			mbuf_phys_off = rxq->mbuf_phys_off;
1529a9643ea8Slogwang 		if (mbuf_phys_off != rxq->mbuf_phys_off) {
15302bfe3f2eSlogwang 			PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
15312bfe3f2eSlogwang 				     PRIx64, rxq->pool->name, nic->vf_id,
15322bfe3f2eSlogwang 				     mbuf_phys_off);
1533a9643ea8Slogwang 			return -EINVAL;
1534a9643ea8Slogwang 		}
1535a9643ea8Slogwang 	}
1536a9643ea8Slogwang 
1537a9643ea8Slogwang 	/* Check the level of buffers in the pool */
1538a9643ea8Slogwang 	total_rxq_desc = 0;
15392bfe3f2eSlogwang 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
1540a9643ea8Slogwang 		rxq = dev->data->rx_queues[qidx];
1541a9643ea8Slogwang 		/* Count total numbers of rxq descs */
1542a9643ea8Slogwang 		total_rxq_desc += rxq->qlen_mask + 1;
1543a9643ea8Slogwang 		exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
15442bfe3f2eSlogwang 		exp_buffs *= dev->data->nb_rx_queues;
1545a9643ea8Slogwang 		if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
1546a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
1547a9643ea8Slogwang 				     rxq->pool->name,
1548a9643ea8Slogwang 				     rte_mempool_avail_count(rxq->pool),
1549a9643ea8Slogwang 				     exp_buffs);
1550a9643ea8Slogwang 			return -ENOENT;
1551a9643ea8Slogwang 		}
1552a9643ea8Slogwang 	}
1553a9643ea8Slogwang 
1554a9643ea8Slogwang 	/* Check RBDR desc overflow */
1555a9643ea8Slogwang 	ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1556a9643ea8Slogwang 	if (ret == 0) {
15572bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
15582bfe3f2eSlogwang 			     "VF%d", nic->vf_id);
1559a9643ea8Slogwang 		return -ENOMEM;
1560a9643ea8Slogwang 	}
1561a9643ea8Slogwang 
1562a9643ea8Slogwang 	/* Enable qset */
1563a9643ea8Slogwang 	ret = nicvf_qset_config(nic);
1564a9643ea8Slogwang 	if (ret) {
15652bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
15662bfe3f2eSlogwang 			     nic->vf_id);
1567a9643ea8Slogwang 		return ret;
1568a9643ea8Slogwang 	}
1569a9643ea8Slogwang 
1570a9643ea8Slogwang 	/* Allocate RBDR and RBDR ring desc */
1571a9643ea8Slogwang 	nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
15722bfe3f2eSlogwang 	ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
1573a9643ea8Slogwang 	if (ret) {
15742bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
15752bfe3f2eSlogwang 			     "VF%d", nic->vf_id);
1576a9643ea8Slogwang 		goto qset_reclaim;
1577a9643ea8Slogwang 	}
1578a9643ea8Slogwang 
1579a9643ea8Slogwang 	/* Enable and configure RBDR registers */
1580a9643ea8Slogwang 	ret = nicvf_qset_rbdr_config(nic, 0);
1581a9643ea8Slogwang 	if (ret) {
15822bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
15832bfe3f2eSlogwang 			     nic->vf_id);
1584a9643ea8Slogwang 		goto qset_rbdr_free;
1585a9643ea8Slogwang 	}
1586a9643ea8Slogwang 
1587a9643ea8Slogwang 	/* Fill rte_mempool buffers in RBDR pool and precharge it */
15882bfe3f2eSlogwang 	ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
15892bfe3f2eSlogwang 					total_rxq_desc);
1590a9643ea8Slogwang 	if (ret) {
15912bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
15922bfe3f2eSlogwang 			     nic->vf_id);
1593a9643ea8Slogwang 		goto qset_rbdr_reclaim;
1594a9643ea8Slogwang 	}
1595a9643ea8Slogwang 
15962bfe3f2eSlogwang 	PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
15972bfe3f2eSlogwang 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
1598a9643ea8Slogwang 
1599a9643ea8Slogwang 	/* Configure VLAN Strip */
1600d30ea906Sjfb8856606 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1601d30ea906Sjfb8856606 		ETH_VLAN_EXTEND_MASK;
1602d30ea906Sjfb8856606 	ret = nicvf_vlan_offload_config(dev, mask);
1603a9643ea8Slogwang 
16042bfe3f2eSlogwang 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
16052bfe3f2eSlogwang 	 * to the 64bit memory address.
16062bfe3f2eSlogwang 	 * The alignment creates a hole in mbuf(between the end of headroom and
16072bfe3f2eSlogwang 	 * packet data start). The new revision of the HW provides an option to
16082bfe3f2eSlogwang 	 * disable the L3 alignment feature and make mbuf layout looks
16092bfe3f2eSlogwang 	 * more like other NICs. For better application compatibility, disabling
16102bfe3f2eSlogwang 	 * l3 alignment feature on the hardware revisions it supports
16112bfe3f2eSlogwang 	 */
16122bfe3f2eSlogwang 	nicvf_apad_config(nic, false);
16132bfe3f2eSlogwang 
16142bfe3f2eSlogwang 	/* Get queue ranges for this VF */
16152bfe3f2eSlogwang 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
16162bfe3f2eSlogwang 
1617a9643ea8Slogwang 	/* Configure TX queues */
16182bfe3f2eSlogwang 	for (qidx = tx_start; qidx <= tx_end; qidx++) {
16192bfe3f2eSlogwang 		ret = nicvf_vf_start_tx_queue(dev, nic,
16202bfe3f2eSlogwang 			qidx % MAX_SND_QUEUES_PER_QS);
1621a9643ea8Slogwang 		if (ret)
1622a9643ea8Slogwang 			goto start_txq_error;
1623a9643ea8Slogwang 	}
1624a9643ea8Slogwang 
16252bfe3f2eSlogwang 	/* Configure RX queues */
16262bfe3f2eSlogwang 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
16272bfe3f2eSlogwang 		ret = nicvf_vf_start_rx_queue(dev, nic,
16282bfe3f2eSlogwang 			qidx % MAX_RCV_QUEUES_PER_QS);
16292bfe3f2eSlogwang 		if (ret)
16302bfe3f2eSlogwang 			goto start_rxq_error;
16312bfe3f2eSlogwang 	}
16322bfe3f2eSlogwang 
16332bfe3f2eSlogwang 	if (!nic->sqs_mode) {
1634a9643ea8Slogwang 		/* Configure CPI algorithm */
1635a9643ea8Slogwang 		ret = nicvf_configure_cpi(dev);
1636a9643ea8Slogwang 		if (ret)
1637a9643ea8Slogwang 			goto start_txq_error;
1638a9643ea8Slogwang 
16392bfe3f2eSlogwang 		ret = nicvf_mbox_get_rss_size(nic);
16402bfe3f2eSlogwang 		if (ret) {
16412bfe3f2eSlogwang 			PMD_INIT_LOG(ERR, "Failed to get rss table size");
16422bfe3f2eSlogwang 			goto qset_rss_error;
16432bfe3f2eSlogwang 		}
16442bfe3f2eSlogwang 
1645a9643ea8Slogwang 		/* Configure RSS */
1646a9643ea8Slogwang 		ret = nicvf_configure_rss(dev);
1647a9643ea8Slogwang 		if (ret)
1648a9643ea8Slogwang 			goto qset_rss_error;
16492bfe3f2eSlogwang 	}
16502bfe3f2eSlogwang 
16512bfe3f2eSlogwang 	/* Done; Let PF make the BGX's RX and TX switches to ON position */
16522bfe3f2eSlogwang 	nicvf_mbox_cfg_done(nic);
16532bfe3f2eSlogwang 	return 0;
16542bfe3f2eSlogwang 
16552bfe3f2eSlogwang qset_rss_error:
16562bfe3f2eSlogwang 	nicvf_rss_term(nic);
16572bfe3f2eSlogwang start_rxq_error:
16582bfe3f2eSlogwang 	for (qidx = rx_start; qidx <= rx_end; qidx++)
16592bfe3f2eSlogwang 		nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
16602bfe3f2eSlogwang start_txq_error:
16612bfe3f2eSlogwang 	for (qidx = tx_start; qidx <= tx_end; qidx++)
16622bfe3f2eSlogwang 		nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
16632bfe3f2eSlogwang qset_rbdr_reclaim:
16642bfe3f2eSlogwang 	nicvf_qset_rbdr_reclaim(nic, 0);
16652bfe3f2eSlogwang 	nicvf_rbdr_release_mbufs(dev, nic);
16662bfe3f2eSlogwang qset_rbdr_free:
16672bfe3f2eSlogwang 	if (nic->rbdr) {
16682bfe3f2eSlogwang 		rte_free(nic->rbdr);
16692bfe3f2eSlogwang 		nic->rbdr = NULL;
16702bfe3f2eSlogwang 	}
16712bfe3f2eSlogwang qset_reclaim:
16722bfe3f2eSlogwang 	nicvf_qset_reclaim(nic);
16732bfe3f2eSlogwang 	return ret;
16742bfe3f2eSlogwang }
16752bfe3f2eSlogwang 
16762bfe3f2eSlogwang static int
nicvf_dev_start(struct rte_eth_dev * dev)16772bfe3f2eSlogwang nicvf_dev_start(struct rte_eth_dev *dev)
16782bfe3f2eSlogwang {
16792bfe3f2eSlogwang 	uint16_t qidx;
16802bfe3f2eSlogwang 	int ret;
16812bfe3f2eSlogwang 	size_t i;
16822bfe3f2eSlogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
16832bfe3f2eSlogwang 	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
16842bfe3f2eSlogwang 	uint16_t mtu;
16852bfe3f2eSlogwang 	uint32_t buffsz = 0, rbdrsz = 0;
16862bfe3f2eSlogwang 	struct rte_pktmbuf_pool_private *mbp_priv;
16872bfe3f2eSlogwang 	struct nicvf_rxq *rxq;
16882bfe3f2eSlogwang 
16892bfe3f2eSlogwang 	PMD_INIT_FUNC_TRACE();
16902bfe3f2eSlogwang 
16912bfe3f2eSlogwang 	/* This function must be called for a primary device */
16922bfe3f2eSlogwang 	assert_primary(nic);
16932bfe3f2eSlogwang 
16942bfe3f2eSlogwang 	/* Validate RBDR buff size */
16952bfe3f2eSlogwang 	for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
16962bfe3f2eSlogwang 		rxq = dev->data->rx_queues[qidx];
16972bfe3f2eSlogwang 		mbp_priv = rte_mempool_get_priv(rxq->pool);
16982bfe3f2eSlogwang 		buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
16992bfe3f2eSlogwang 		if (buffsz % 128) {
17002bfe3f2eSlogwang 			PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
17012bfe3f2eSlogwang 			return -EINVAL;
17022bfe3f2eSlogwang 		}
17032bfe3f2eSlogwang 		if (rbdrsz == 0)
17042bfe3f2eSlogwang 			rbdrsz = buffsz;
17052bfe3f2eSlogwang 		if (rbdrsz != buffsz) {
17062bfe3f2eSlogwang 			PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
17072bfe3f2eSlogwang 				     qidx, rbdrsz, buffsz);
17082bfe3f2eSlogwang 			return -EINVAL;
17092bfe3f2eSlogwang 		}
17102bfe3f2eSlogwang 	}
1711a9643ea8Slogwang 
1712a9643ea8Slogwang 	/* Configure loopback */
1713a9643ea8Slogwang 	ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
1714a9643ea8Slogwang 	if (ret) {
1715a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
17162bfe3f2eSlogwang 		return ret;
1717a9643ea8Slogwang 	}
1718a9643ea8Slogwang 
1719a9643ea8Slogwang 	/* Reset all statistics counters attached to this port */
1720a9643ea8Slogwang 	ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
1721a9643ea8Slogwang 	if (ret) {
1722a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
17232bfe3f2eSlogwang 		return ret;
1724a9643ea8Slogwang 	}
1725a9643ea8Slogwang 
1726a9643ea8Slogwang 	/* Setup scatter mode if needed by jumbo */
1727a9643ea8Slogwang 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
1728a9643ea8Slogwang 					    2 * VLAN_TAG_SIZE > buffsz)
1729a9643ea8Slogwang 		dev->data->scattered_rx = 1;
1730d30ea906Sjfb8856606 	if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
1731a9643ea8Slogwang 		dev->data->scattered_rx = 1;
1732a9643ea8Slogwang 
1733a9643ea8Slogwang 	/* Setup MTU based on max_rx_pkt_len or default */
1734d30ea906Sjfb8856606 	mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
1735a9643ea8Slogwang 		dev->data->dev_conf.rxmode.max_rx_pkt_len
17364418919fSjohnjiang 			-  RTE_ETHER_HDR_LEN : RTE_ETHER_MTU;
1737a9643ea8Slogwang 
1738a9643ea8Slogwang 	if (nicvf_dev_set_mtu(dev, mtu)) {
1739a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to set default mtu size");
1740a9643ea8Slogwang 		return -EBUSY;
1741a9643ea8Slogwang 	}
1742a9643ea8Slogwang 
17432bfe3f2eSlogwang 	ret = nicvf_vf_start(dev, nic, rbdrsz);
17442bfe3f2eSlogwang 	if (ret != 0)
17452bfe3f2eSlogwang 		return ret;
17462bfe3f2eSlogwang 
17472bfe3f2eSlogwang 	for (i = 0; i < nic->sqs_count; i++) {
17482bfe3f2eSlogwang 		assert(nic->snicvf[i]);
17492bfe3f2eSlogwang 
17502bfe3f2eSlogwang 		ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
17512bfe3f2eSlogwang 		if (ret != 0)
17522bfe3f2eSlogwang 			return ret;
17532bfe3f2eSlogwang 	}
17542bfe3f2eSlogwang 
1755d30ea906Sjfb8856606 	/* Configure callbacks based on offloads */
1756a9643ea8Slogwang 	nicvf_set_tx_function(dev);
1757a9643ea8Slogwang 	nicvf_set_rx_function(dev);
1758a9643ea8Slogwang 
1759a9643ea8Slogwang 	return 0;
1760a9643ea8Slogwang }
1761a9643ea8Slogwang 
1762a9643ea8Slogwang static void
nicvf_dev_stop_cleanup(struct rte_eth_dev * dev,bool cleanup)17632bfe3f2eSlogwang nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
1764a9643ea8Slogwang {
17652bfe3f2eSlogwang 	size_t i;
1766a9643ea8Slogwang 	int ret;
1767a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
1768a9643ea8Slogwang 
1769a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1770*2d9fd380Sjfb8856606 	dev->data->dev_started = 0;
1771a9643ea8Slogwang 
17722bfe3f2eSlogwang 	/* Teardown secondary vf first */
17732bfe3f2eSlogwang 	for (i = 0; i < nic->sqs_count; i++) {
17742bfe3f2eSlogwang 		if (!nic->snicvf[i])
17752bfe3f2eSlogwang 			continue;
17762bfe3f2eSlogwang 
17772bfe3f2eSlogwang 		nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
17782bfe3f2eSlogwang 	}
17792bfe3f2eSlogwang 
17802bfe3f2eSlogwang 	/* Stop the primary VF now */
17812bfe3f2eSlogwang 	nicvf_vf_stop(dev, nic, cleanup);
1782a9643ea8Slogwang 
1783a9643ea8Slogwang 	/* Disable loopback */
1784a9643ea8Slogwang 	ret = nicvf_loopback_config(nic, 0);
1785a9643ea8Slogwang 	if (ret)
1786a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
1787a9643ea8Slogwang 
17882bfe3f2eSlogwang 	/* Reclaim CPI configuration */
17892bfe3f2eSlogwang 	ret = nicvf_mbox_config_cpi(nic, 0);
17902bfe3f2eSlogwang 	if (ret)
17912bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
17922bfe3f2eSlogwang }
17932bfe3f2eSlogwang 
1794*2d9fd380Sjfb8856606 static int
nicvf_dev_stop(struct rte_eth_dev * dev)17952bfe3f2eSlogwang nicvf_dev_stop(struct rte_eth_dev *dev)
17962bfe3f2eSlogwang {
17972bfe3f2eSlogwang 	PMD_INIT_FUNC_TRACE();
17982bfe3f2eSlogwang 
17992bfe3f2eSlogwang 	nicvf_dev_stop_cleanup(dev, false);
1800*2d9fd380Sjfb8856606 
1801*2d9fd380Sjfb8856606 	return 0;
18022bfe3f2eSlogwang }
18032bfe3f2eSlogwang 
18042bfe3f2eSlogwang static void
nicvf_vf_stop(struct rte_eth_dev * dev,struct nicvf * nic,bool cleanup)18052bfe3f2eSlogwang nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
18062bfe3f2eSlogwang {
18072bfe3f2eSlogwang 	int ret;
18082bfe3f2eSlogwang 	uint16_t qidx;
18092bfe3f2eSlogwang 	uint16_t tx_start, tx_end;
18102bfe3f2eSlogwang 	uint16_t rx_start, rx_end;
18112bfe3f2eSlogwang 
18122bfe3f2eSlogwang 	PMD_INIT_FUNC_TRACE();
18132bfe3f2eSlogwang 
18142bfe3f2eSlogwang 	if (cleanup) {
18152bfe3f2eSlogwang 		/* Let PF make the BGX's RX and TX switches to OFF position */
18162bfe3f2eSlogwang 		nicvf_mbox_shutdown(nic);
18172bfe3f2eSlogwang 	}
18182bfe3f2eSlogwang 
1819a9643ea8Slogwang 	/* Disable VLAN Strip */
1820a9643ea8Slogwang 	nicvf_vlan_hw_strip(nic, 0);
1821a9643ea8Slogwang 
18222bfe3f2eSlogwang 	/* Get queue ranges for this VF */
18232bfe3f2eSlogwang 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
18242bfe3f2eSlogwang 
18252bfe3f2eSlogwang 	for (qidx = tx_start; qidx <= tx_end; qidx++)
18262bfe3f2eSlogwang 		nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
18272bfe3f2eSlogwang 
18282bfe3f2eSlogwang 	/* Get queue ranges for this VF */
18292bfe3f2eSlogwang 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1830a9643ea8Slogwang 
1831a9643ea8Slogwang 	/* Reclaim rq */
18322bfe3f2eSlogwang 	for (qidx = rx_start; qidx <= rx_end; qidx++)
18332bfe3f2eSlogwang 		nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1834a9643ea8Slogwang 
1835a9643ea8Slogwang 	/* Reclaim RBDR */
1836a9643ea8Slogwang 	ret = nicvf_qset_rbdr_reclaim(nic, 0);
1837a9643ea8Slogwang 	if (ret)
1838a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
1839a9643ea8Slogwang 
1840a9643ea8Slogwang 	/* Move all charged buffers in RBDR back to pool */
1841a9643ea8Slogwang 	if (nic->rbdr != NULL)
18422bfe3f2eSlogwang 		nicvf_rbdr_release_mbufs(dev, nic);
1843a9643ea8Slogwang 
1844a9643ea8Slogwang 	/* Disable qset */
18452bfe3f2eSlogwang 	ret = nicvf_qset_reclaim(nic);
1846a9643ea8Slogwang 	if (ret)
1847a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
1848a9643ea8Slogwang 
1849a9643ea8Slogwang 	/* Disable all interrupts */
1850a9643ea8Slogwang 	nicvf_disable_all_interrupts(nic);
1851a9643ea8Slogwang 
1852a9643ea8Slogwang 	/* Free RBDR SW structure */
1853a9643ea8Slogwang 	if (nic->rbdr) {
1854a9643ea8Slogwang 		rte_free(nic->rbdr);
1855a9643ea8Slogwang 		nic->rbdr = NULL;
1856a9643ea8Slogwang 	}
1857a9643ea8Slogwang }
1858a9643ea8Slogwang 
1859*2d9fd380Sjfb8856606 static int
nicvf_dev_close(struct rte_eth_dev * dev)1860a9643ea8Slogwang nicvf_dev_close(struct rte_eth_dev *dev)
1861a9643ea8Slogwang {
18622bfe3f2eSlogwang 	size_t i;
1863a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
1864a9643ea8Slogwang 
1865a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1866*2d9fd380Sjfb8856606 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1867*2d9fd380Sjfb8856606 		return 0;
1868a9643ea8Slogwang 
18692bfe3f2eSlogwang 	nicvf_dev_stop_cleanup(dev, true);
18702bfe3f2eSlogwang 	nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
18712bfe3f2eSlogwang 
18722bfe3f2eSlogwang 	for (i = 0; i < nic->sqs_count; i++) {
18732bfe3f2eSlogwang 		if (!nic->snicvf[i])
18742bfe3f2eSlogwang 			continue;
18752bfe3f2eSlogwang 
18762bfe3f2eSlogwang 		nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
18772bfe3f2eSlogwang 	}
1878*2d9fd380Sjfb8856606 
1879*2d9fd380Sjfb8856606 	return 0;
18802bfe3f2eSlogwang }
18812bfe3f2eSlogwang 
18822bfe3f2eSlogwang static int
nicvf_request_sqs(struct nicvf * nic)18832bfe3f2eSlogwang nicvf_request_sqs(struct nicvf *nic)
18842bfe3f2eSlogwang {
18852bfe3f2eSlogwang 	size_t i;
18862bfe3f2eSlogwang 
18872bfe3f2eSlogwang 	assert_primary(nic);
18882bfe3f2eSlogwang 	assert(nic->sqs_count > 0);
18892bfe3f2eSlogwang 	assert(nic->sqs_count <= MAX_SQS_PER_VF);
18902bfe3f2eSlogwang 
18912bfe3f2eSlogwang 	/* Set no of Rx/Tx queues in each of the SQsets */
18922bfe3f2eSlogwang 	for (i = 0; i < nic->sqs_count; i++) {
18932bfe3f2eSlogwang 		if (nicvf_svf_empty())
18942bfe3f2eSlogwang 			rte_panic("Cannot assign sufficient number of "
18952bfe3f2eSlogwang 				  "secondary queues to primary VF%" PRIu8 "\n",
18962bfe3f2eSlogwang 				  nic->vf_id);
18972bfe3f2eSlogwang 
18982bfe3f2eSlogwang 		nic->snicvf[i] = nicvf_svf_pop();
18992bfe3f2eSlogwang 		nic->snicvf[i]->sqs_id = i;
19002bfe3f2eSlogwang 	}
19012bfe3f2eSlogwang 
19022bfe3f2eSlogwang 	return nicvf_mbox_request_sqs(nic);
1903a9643ea8Slogwang }
1904a9643ea8Slogwang 
1905a9643ea8Slogwang static int
nicvf_dev_configure(struct rte_eth_dev * dev)1906a9643ea8Slogwang nicvf_dev_configure(struct rte_eth_dev *dev)
1907a9643ea8Slogwang {
19082bfe3f2eSlogwang 	struct rte_eth_dev_data *data = dev->data;
19092bfe3f2eSlogwang 	struct rte_eth_conf *conf = &data->dev_conf;
1910a9643ea8Slogwang 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
1911a9643ea8Slogwang 	struct rte_eth_txmode *txmode = &conf->txmode;
1912a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(dev);
19132bfe3f2eSlogwang 	uint8_t cqcount;
1914a9643ea8Slogwang 
1915a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1916a9643ea8Slogwang 
19174418919fSjohnjiang 	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
19184418919fSjohnjiang 		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
19194418919fSjohnjiang 
1920a9643ea8Slogwang 	if (!rte_eal_has_hugepages()) {
1921a9643ea8Slogwang 		PMD_INIT_LOG(INFO, "Huge page is not configured");
1922a9643ea8Slogwang 		return -EINVAL;
1923a9643ea8Slogwang 	}
1924a9643ea8Slogwang 
1925a9643ea8Slogwang 	if (txmode->mq_mode) {
1926a9643ea8Slogwang 		PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1927a9643ea8Slogwang 		return -EINVAL;
1928a9643ea8Slogwang 	}
1929a9643ea8Slogwang 
1930a9643ea8Slogwang 	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1931a9643ea8Slogwang 		rxmode->mq_mode != ETH_MQ_RX_RSS) {
1932a9643ea8Slogwang 		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1933a9643ea8Slogwang 		return -EINVAL;
1934a9643ea8Slogwang 	}
1935a9643ea8Slogwang 
1936a9643ea8Slogwang 	if (rxmode->split_hdr_size) {
1937a9643ea8Slogwang 		PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1938a9643ea8Slogwang 		return -EINVAL;
1939a9643ea8Slogwang 	}
1940a9643ea8Slogwang 
1941a9643ea8Slogwang 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1942a9643ea8Slogwang 		PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1943a9643ea8Slogwang 		return -EINVAL;
1944a9643ea8Slogwang 	}
1945a9643ea8Slogwang 
1946a9643ea8Slogwang 	if (conf->dcb_capability_en) {
1947a9643ea8Slogwang 		PMD_INIT_LOG(INFO, "DCB enable not supported");
1948a9643ea8Slogwang 		return -EINVAL;
1949a9643ea8Slogwang 	}
1950a9643ea8Slogwang 
1951a9643ea8Slogwang 	if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1952a9643ea8Slogwang 		PMD_INIT_LOG(INFO, "Flow director not supported");
1953a9643ea8Slogwang 		return -EINVAL;
1954a9643ea8Slogwang 	}
1955a9643ea8Slogwang 
19562bfe3f2eSlogwang 	assert_primary(nic);
19572bfe3f2eSlogwang 	NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
19582bfe3f2eSlogwang 	cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
19592bfe3f2eSlogwang 	if (cqcount > MAX_RCV_QUEUES_PER_QS) {
19602bfe3f2eSlogwang 		nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
19612bfe3f2eSlogwang 		nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
19622bfe3f2eSlogwang 	} else {
19632bfe3f2eSlogwang 		nic->sqs_count = 0;
19642bfe3f2eSlogwang 	}
19652bfe3f2eSlogwang 
19662bfe3f2eSlogwang 	assert(nic->sqs_count <= MAX_SQS_PER_VF);
19672bfe3f2eSlogwang 
19682bfe3f2eSlogwang 	if (nic->sqs_count > 0) {
19692bfe3f2eSlogwang 		if (nicvf_request_sqs(nic)) {
19702bfe3f2eSlogwang 			rte_panic("Cannot assign sufficient number of "
19712bfe3f2eSlogwang 				  "secondary queues to PORT%d VF%" PRIu8 "\n",
19722bfe3f2eSlogwang 				  dev->data->port_id, nic->vf_id);
19732bfe3f2eSlogwang 		}
19742bfe3f2eSlogwang 	}
19752bfe3f2eSlogwang 
1976d30ea906Sjfb8856606 	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
1977d30ea906Sjfb8856606 		nic->offload_cksum = 1;
1978d30ea906Sjfb8856606 
1979a9643ea8Slogwang 	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
1980a9643ea8Slogwang 		dev->data->port_id, nicvf_hw_cap(nic));
1981a9643ea8Slogwang 
1982a9643ea8Slogwang 	return 0;
1983a9643ea8Slogwang }
1984a9643ea8Slogwang 
1985*2d9fd380Sjfb8856606 static int
nicvf_dev_set_link_up(struct rte_eth_dev * dev)1986*2d9fd380Sjfb8856606 nicvf_dev_set_link_up(struct rte_eth_dev *dev)
1987*2d9fd380Sjfb8856606 {
1988*2d9fd380Sjfb8856606 	struct nicvf *nic = nicvf_pmd_priv(dev);
1989*2d9fd380Sjfb8856606 	int rc, i;
1990*2d9fd380Sjfb8856606 
1991*2d9fd380Sjfb8856606 	rc = nicvf_mbox_set_link_up_down(nic, true);
1992*2d9fd380Sjfb8856606 	if (rc)
1993*2d9fd380Sjfb8856606 		goto done;
1994*2d9fd380Sjfb8856606 
1995*2d9fd380Sjfb8856606 	/* Start tx queues  */
1996*2d9fd380Sjfb8856606 	for (i = 0; i < dev->data->nb_tx_queues; i++)
1997*2d9fd380Sjfb8856606 		nicvf_dev_tx_queue_start(dev, i);
1998*2d9fd380Sjfb8856606 
1999*2d9fd380Sjfb8856606 done:
2000*2d9fd380Sjfb8856606 	return rc;
2001*2d9fd380Sjfb8856606 }
2002*2d9fd380Sjfb8856606 
2003*2d9fd380Sjfb8856606 static int
nicvf_dev_set_link_down(struct rte_eth_dev * dev)2004*2d9fd380Sjfb8856606 nicvf_dev_set_link_down(struct rte_eth_dev *dev)
2005*2d9fd380Sjfb8856606 {
2006*2d9fd380Sjfb8856606 	struct nicvf *nic = nicvf_pmd_priv(dev);
2007*2d9fd380Sjfb8856606 	int i;
2008*2d9fd380Sjfb8856606 
2009*2d9fd380Sjfb8856606 	/* Stop tx queues  */
2010*2d9fd380Sjfb8856606 	for (i = 0; i < dev->data->nb_tx_queues; i++)
2011*2d9fd380Sjfb8856606 		nicvf_dev_tx_queue_stop(dev, i);
2012*2d9fd380Sjfb8856606 
2013*2d9fd380Sjfb8856606 	return nicvf_mbox_set_link_up_down(nic, false);
2014*2d9fd380Sjfb8856606 }
2015*2d9fd380Sjfb8856606 
2016a9643ea8Slogwang /* Initialize and register driver with DPDK Application */
2017a9643ea8Slogwang static const struct eth_dev_ops nicvf_eth_dev_ops = {
2018a9643ea8Slogwang 	.dev_configure            = nicvf_dev_configure,
2019a9643ea8Slogwang 	.dev_start                = nicvf_dev_start,
2020a9643ea8Slogwang 	.dev_stop                 = nicvf_dev_stop,
2021a9643ea8Slogwang 	.link_update              = nicvf_dev_link_update,
2022a9643ea8Slogwang 	.dev_close                = nicvf_dev_close,
2023a9643ea8Slogwang 	.stats_get                = nicvf_dev_stats_get,
2024a9643ea8Slogwang 	.stats_reset              = nicvf_dev_stats_reset,
2025a9643ea8Slogwang 	.promiscuous_enable       = nicvf_dev_promisc_enable,
2026a9643ea8Slogwang 	.dev_infos_get            = nicvf_dev_info_get,
2027a9643ea8Slogwang 	.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
2028a9643ea8Slogwang 	.mtu_set                  = nicvf_dev_set_mtu,
2029d30ea906Sjfb8856606 	.vlan_offload_set         = nicvf_vlan_offload_set,
2030a9643ea8Slogwang 	.reta_update              = nicvf_dev_reta_update,
2031a9643ea8Slogwang 	.reta_query               = nicvf_dev_reta_query,
2032a9643ea8Slogwang 	.rss_hash_update          = nicvf_dev_rss_hash_update,
2033a9643ea8Slogwang 	.rss_hash_conf_get        = nicvf_dev_rss_hash_conf_get,
2034a9643ea8Slogwang 	.rx_queue_start           = nicvf_dev_rx_queue_start,
2035a9643ea8Slogwang 	.rx_queue_stop            = nicvf_dev_rx_queue_stop,
2036a9643ea8Slogwang 	.tx_queue_start           = nicvf_dev_tx_queue_start,
2037a9643ea8Slogwang 	.tx_queue_stop            = nicvf_dev_tx_queue_stop,
2038a9643ea8Slogwang 	.rx_queue_setup           = nicvf_dev_rx_queue_setup,
2039a9643ea8Slogwang 	.rx_queue_release         = nicvf_dev_rx_queue_release,
2040a9643ea8Slogwang 	.tx_queue_setup           = nicvf_dev_tx_queue_setup,
2041a9643ea8Slogwang 	.tx_queue_release         = nicvf_dev_tx_queue_release,
2042*2d9fd380Sjfb8856606 	.dev_set_link_up          = nicvf_dev_set_link_up,
2043*2d9fd380Sjfb8856606 	.dev_set_link_down        = nicvf_dev_set_link_down,
2044a9643ea8Slogwang 	.get_reg                  = nicvf_dev_get_regs,
2045a9643ea8Slogwang };
2046a9643ea8Slogwang 
2047a9643ea8Slogwang static int
nicvf_vlan_offload_config(struct rte_eth_dev * dev,int mask)2048d30ea906Sjfb8856606 nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
2049d30ea906Sjfb8856606 {
2050d30ea906Sjfb8856606 	struct rte_eth_rxmode *rxmode;
2051d30ea906Sjfb8856606 	struct nicvf *nic = nicvf_pmd_priv(dev);
2052d30ea906Sjfb8856606 	rxmode = &dev->data->dev_conf.rxmode;
2053d30ea906Sjfb8856606 	if (mask & ETH_VLAN_STRIP_MASK) {
2054d30ea906Sjfb8856606 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2055d30ea906Sjfb8856606 			nicvf_vlan_hw_strip(nic, true);
2056d30ea906Sjfb8856606 		else
2057d30ea906Sjfb8856606 			nicvf_vlan_hw_strip(nic, false);
2058d30ea906Sjfb8856606 	}
2059d30ea906Sjfb8856606 
2060d30ea906Sjfb8856606 	return 0;
2061d30ea906Sjfb8856606 }
2062d30ea906Sjfb8856606 
2063d30ea906Sjfb8856606 static int
nicvf_vlan_offload_set(struct rte_eth_dev * dev,int mask)2064d30ea906Sjfb8856606 nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2065d30ea906Sjfb8856606 {
2066d30ea906Sjfb8856606 	nicvf_vlan_offload_config(dev, mask);
2067d30ea906Sjfb8856606 
2068d30ea906Sjfb8856606 	return 0;
2069d30ea906Sjfb8856606 }
2070d30ea906Sjfb8856606 
2071d30ea906Sjfb8856606 static inline int
nicvf_set_first_skip(struct rte_eth_dev * dev)2072d30ea906Sjfb8856606 nicvf_set_first_skip(struct rte_eth_dev *dev)
2073d30ea906Sjfb8856606 {
2074d30ea906Sjfb8856606 	int bytes_to_skip = 0;
2075d30ea906Sjfb8856606 	int ret = 0;
2076d30ea906Sjfb8856606 	unsigned int i;
2077d30ea906Sjfb8856606 	struct rte_kvargs *kvlist;
2078d30ea906Sjfb8856606 	static const char *const skip[] = {
2079d30ea906Sjfb8856606 		SKIP_DATA_BYTES,
2080d30ea906Sjfb8856606 		NULL};
2081d30ea906Sjfb8856606 	struct nicvf *nic = nicvf_pmd_priv(dev);
2082d30ea906Sjfb8856606 
2083d30ea906Sjfb8856606 	if (!dev->device->devargs) {
2084d30ea906Sjfb8856606 		nicvf_first_skip_config(nic, 0);
2085d30ea906Sjfb8856606 		return ret;
2086d30ea906Sjfb8856606 	}
2087d30ea906Sjfb8856606 
2088d30ea906Sjfb8856606 	kvlist = rte_kvargs_parse(dev->device->devargs->args, skip);
2089d30ea906Sjfb8856606 	if (!kvlist)
2090d30ea906Sjfb8856606 		return -EINVAL;
2091d30ea906Sjfb8856606 
2092d30ea906Sjfb8856606 	if (kvlist->count == 0)
2093d30ea906Sjfb8856606 		goto exit;
2094d30ea906Sjfb8856606 
2095d30ea906Sjfb8856606 	for (i = 0; i != kvlist->count; ++i) {
2096d30ea906Sjfb8856606 		const struct rte_kvargs_pair *pair = &kvlist->pairs[i];
2097d30ea906Sjfb8856606 
2098d30ea906Sjfb8856606 		if (!strcmp(pair->key, SKIP_DATA_BYTES))
2099d30ea906Sjfb8856606 			bytes_to_skip = atoi(pair->value);
2100d30ea906Sjfb8856606 	}
2101d30ea906Sjfb8856606 
2102d30ea906Sjfb8856606 	/*128 bytes amounts to one cache line*/
2103d30ea906Sjfb8856606 	if (bytes_to_skip >= 0 && bytes_to_skip < 128) {
2104d30ea906Sjfb8856606 		if (!(bytes_to_skip % 8)) {
2105d30ea906Sjfb8856606 			nicvf_first_skip_config(nic, (bytes_to_skip / 8));
2106d30ea906Sjfb8856606 			nic->skip_bytes = bytes_to_skip;
2107d30ea906Sjfb8856606 			goto kvlist_free;
2108d30ea906Sjfb8856606 		} else {
2109d30ea906Sjfb8856606 			PMD_INIT_LOG(ERR, "skip_data_bytes should be multiple of 8");
2110d30ea906Sjfb8856606 			ret = -EINVAL;
2111d30ea906Sjfb8856606 			goto exit;
2112d30ea906Sjfb8856606 		}
2113d30ea906Sjfb8856606 	} else {
2114d30ea906Sjfb8856606 		PMD_INIT_LOG(ERR, "skip_data_bytes should be less than 128");
2115d30ea906Sjfb8856606 		ret = -EINVAL;
2116d30ea906Sjfb8856606 		goto exit;
2117d30ea906Sjfb8856606 	}
2118d30ea906Sjfb8856606 exit:
2119d30ea906Sjfb8856606 	nicvf_first_skip_config(nic, 0);
2120d30ea906Sjfb8856606 kvlist_free:
2121d30ea906Sjfb8856606 	rte_kvargs_free(kvlist);
2122d30ea906Sjfb8856606 	return ret;
2123d30ea906Sjfb8856606 }
2124d30ea906Sjfb8856606 static int
nicvf_eth_dev_uninit(struct rte_eth_dev * dev)21254b05018fSfengbojiang nicvf_eth_dev_uninit(struct rte_eth_dev *dev)
21264b05018fSfengbojiang {
21274b05018fSfengbojiang 	PMD_INIT_FUNC_TRACE();
21284b05018fSfengbojiang 	nicvf_dev_close(dev);
21294b05018fSfengbojiang 	return 0;
21304b05018fSfengbojiang }
21314b05018fSfengbojiang static int
nicvf_eth_dev_init(struct rte_eth_dev * eth_dev)2132a9643ea8Slogwang nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
2133a9643ea8Slogwang {
2134a9643ea8Slogwang 	int ret;
2135a9643ea8Slogwang 	struct rte_pci_device *pci_dev;
2136a9643ea8Slogwang 	struct nicvf *nic = nicvf_pmd_priv(eth_dev);
2137a9643ea8Slogwang 
2138a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
2139a9643ea8Slogwang 
2140a9643ea8Slogwang 	eth_dev->dev_ops = &nicvf_eth_dev_ops;
2141*2d9fd380Sjfb8856606 	eth_dev->rx_queue_count = nicvf_dev_rx_queue_count;
2142a9643ea8Slogwang 
2143a9643ea8Slogwang 	/* For secondary processes, the primary has done all the work */
2144a9643ea8Slogwang 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
21452bfe3f2eSlogwang 		if (nic) {
2146a9643ea8Slogwang 			/* Setup callbacks for secondary process */
2147a9643ea8Slogwang 			nicvf_set_tx_function(eth_dev);
2148a9643ea8Slogwang 			nicvf_set_rx_function(eth_dev);
2149a9643ea8Slogwang 			return 0;
21502bfe3f2eSlogwang 		} else {
21512bfe3f2eSlogwang 			/* If nic == NULL than it is secondary function
21522bfe3f2eSlogwang 			 * so ethdev need to be released by caller */
21532bfe3f2eSlogwang 			return ENOTSUP;
21542bfe3f2eSlogwang 		}
2155a9643ea8Slogwang 	}
2156a9643ea8Slogwang 
21572bfe3f2eSlogwang 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2158a9643ea8Slogwang 	rte_eth_copy_pci_info(eth_dev, pci_dev);
2159*2d9fd380Sjfb8856606 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2160a9643ea8Slogwang 
2161a9643ea8Slogwang 	nic->device_id = pci_dev->id.device_id;
2162a9643ea8Slogwang 	nic->vendor_id = pci_dev->id.vendor_id;
2163a9643ea8Slogwang 	nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
2164a9643ea8Slogwang 	nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2165a9643ea8Slogwang 
2166a9643ea8Slogwang 	PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
2167a9643ea8Slogwang 			pci_dev->id.vendor_id, pci_dev->id.device_id,
2168a9643ea8Slogwang 			pci_dev->addr.domain, pci_dev->addr.bus,
2169a9643ea8Slogwang 			pci_dev->addr.devid, pci_dev->addr.function);
2170a9643ea8Slogwang 
2171a9643ea8Slogwang 	nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
2172a9643ea8Slogwang 	if (!nic->reg_base) {
2173a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to map BAR0");
2174a9643ea8Slogwang 		ret = -ENODEV;
2175a9643ea8Slogwang 		goto fail;
2176a9643ea8Slogwang 	}
2177a9643ea8Slogwang 
2178a9643ea8Slogwang 	nicvf_disable_all_interrupts(nic);
2179a9643ea8Slogwang 
21802bfe3f2eSlogwang 	ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
2181a9643ea8Slogwang 	if (ret) {
2182a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to start period alarm");
2183a9643ea8Slogwang 		goto fail;
2184a9643ea8Slogwang 	}
2185a9643ea8Slogwang 
2186a9643ea8Slogwang 	ret = nicvf_mbox_check_pf_ready(nic);
2187a9643ea8Slogwang 	if (ret) {
2188a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
2189a9643ea8Slogwang 		goto alarm_fail;
2190a9643ea8Slogwang 	} else {
2191a9643ea8Slogwang 		PMD_INIT_LOG(INFO,
2192a9643ea8Slogwang 			"node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
2193a9643ea8Slogwang 			nic->node, nic->vf_id,
2194a9643ea8Slogwang 			nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
2195a9643ea8Slogwang 			nic->sqs_mode ? "true" : "false",
2196a9643ea8Slogwang 			nic->loopback_supported ? "true" : "false"
2197a9643ea8Slogwang 			);
2198a9643ea8Slogwang 	}
2199a9643ea8Slogwang 
22002bfe3f2eSlogwang 	ret = nicvf_base_init(nic);
22012bfe3f2eSlogwang 	if (ret) {
22022bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
22032bfe3f2eSlogwang 		goto malloc_fail;
22042bfe3f2eSlogwang 	}
22052bfe3f2eSlogwang 
2206a9643ea8Slogwang 	if (nic->sqs_mode) {
22072bfe3f2eSlogwang 		/* Push nic to stack of secondary vfs */
22082bfe3f2eSlogwang 		nicvf_svf_push(nic);
22092bfe3f2eSlogwang 
22102bfe3f2eSlogwang 		/* Steal nic pointer from the device for further reuse */
22112bfe3f2eSlogwang 		eth_dev->data->dev_private = NULL;
22122bfe3f2eSlogwang 
22132bfe3f2eSlogwang 		nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
22142bfe3f2eSlogwang 		ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
22152bfe3f2eSlogwang 		if (ret) {
22162bfe3f2eSlogwang 			PMD_INIT_LOG(ERR, "Failed to start period alarm");
22172bfe3f2eSlogwang 			goto fail;
22182bfe3f2eSlogwang 		}
22192bfe3f2eSlogwang 
22202bfe3f2eSlogwang 		/* Detach port by returning positive error number */
22212bfe3f2eSlogwang 		return ENOTSUP;
2222a9643ea8Slogwang 	}
2223a9643ea8Slogwang 
22244418919fSjohnjiang 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
22254418919fSjohnjiang 					RTE_ETHER_ADDR_LEN, 0);
2226a9643ea8Slogwang 	if (eth_dev->data->mac_addrs == NULL) {
2227a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
2228a9643ea8Slogwang 		ret = -ENOMEM;
2229a9643ea8Slogwang 		goto alarm_fail;
2230a9643ea8Slogwang 	}
22314418919fSjohnjiang 	if (rte_is_zero_ether_addr((struct rte_ether_addr *)nic->mac_addr))
22324418919fSjohnjiang 		rte_eth_random_addr(&nic->mac_addr[0]);
2233a9643ea8Slogwang 
22344418919fSjohnjiang 	rte_ether_addr_copy((struct rte_ether_addr *)nic->mac_addr,
2235a9643ea8Slogwang 			&eth_dev->data->mac_addrs[0]);
2236a9643ea8Slogwang 
2237a9643ea8Slogwang 	ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
2238a9643ea8Slogwang 	if (ret) {
2239a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to set mac addr");
2240a9643ea8Slogwang 		goto malloc_fail;
2241a9643ea8Slogwang 	}
2242a9643ea8Slogwang 
2243d30ea906Sjfb8856606 	ret = nicvf_set_first_skip(eth_dev);
2244d30ea906Sjfb8856606 	if (ret) {
2245d30ea906Sjfb8856606 		PMD_INIT_LOG(ERR, "Failed to configure first skip");
2246d30ea906Sjfb8856606 		goto malloc_fail;
2247d30ea906Sjfb8856606 	}
2248a9643ea8Slogwang 	PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
2249a9643ea8Slogwang 		eth_dev->data->port_id, nic->vendor_id, nic->device_id,
2250a9643ea8Slogwang 		nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
2251a9643ea8Slogwang 		nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
2252a9643ea8Slogwang 
2253a9643ea8Slogwang 	return 0;
2254a9643ea8Slogwang 
2255a9643ea8Slogwang malloc_fail:
2256a9643ea8Slogwang 	rte_free(eth_dev->data->mac_addrs);
22574b05018fSfengbojiang 	eth_dev->data->mac_addrs = NULL;
2258a9643ea8Slogwang alarm_fail:
22592bfe3f2eSlogwang 	nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2260a9643ea8Slogwang fail:
2261a9643ea8Slogwang 	return ret;
2262a9643ea8Slogwang }
2263a9643ea8Slogwang 
2264a9643ea8Slogwang static const struct rte_pci_id pci_id_nicvf_map[] = {
2265a9643ea8Slogwang 	{
2266a9643ea8Slogwang 		.class_id = RTE_CLASS_ANY_ID,
2267a9643ea8Slogwang 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
22682bfe3f2eSlogwang 		.device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
2269a9643ea8Slogwang 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
22702bfe3f2eSlogwang 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
2271a9643ea8Slogwang 	},
2272a9643ea8Slogwang 	{
2273a9643ea8Slogwang 		.class_id = RTE_CLASS_ANY_ID,
2274a9643ea8Slogwang 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
22752bfe3f2eSlogwang 		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2276a9643ea8Slogwang 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
22772bfe3f2eSlogwang 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
22782bfe3f2eSlogwang 	},
22792bfe3f2eSlogwang 	{
22802bfe3f2eSlogwang 		.class_id = RTE_CLASS_ANY_ID,
22812bfe3f2eSlogwang 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
22822bfe3f2eSlogwang 		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
22832bfe3f2eSlogwang 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
22842bfe3f2eSlogwang 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
22852bfe3f2eSlogwang 	},
22862bfe3f2eSlogwang 	{
22872bfe3f2eSlogwang 		.class_id = RTE_CLASS_ANY_ID,
22882bfe3f2eSlogwang 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
22892bfe3f2eSlogwang 		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
22902bfe3f2eSlogwang 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
22912bfe3f2eSlogwang 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
2292a9643ea8Slogwang 	},
2293a9643ea8Slogwang 	{
2294a9643ea8Slogwang 		.vendor_id = 0,
2295a9643ea8Slogwang 	},
2296a9643ea8Slogwang };
2297a9643ea8Slogwang 
nicvf_eth_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)22982bfe3f2eSlogwang static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
22992bfe3f2eSlogwang 	struct rte_pci_device *pci_dev)
2300a9643ea8Slogwang {
23012bfe3f2eSlogwang 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
23022bfe3f2eSlogwang 		nicvf_eth_dev_init);
2303a9643ea8Slogwang }
2304a9643ea8Slogwang 
nicvf_eth_pci_remove(struct rte_pci_device * pci_dev)23052bfe3f2eSlogwang static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
23062bfe3f2eSlogwang {
23074b05018fSfengbojiang 	return rte_eth_dev_pci_generic_remove(pci_dev, nicvf_eth_dev_uninit);
23082bfe3f2eSlogwang }
23092bfe3f2eSlogwang 
23102bfe3f2eSlogwang static struct rte_pci_driver rte_nicvf_pmd = {
23112bfe3f2eSlogwang 	.id_table = pci_id_nicvf_map,
23122bfe3f2eSlogwang 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
23132bfe3f2eSlogwang 			RTE_PCI_DRV_INTR_LSC,
23142bfe3f2eSlogwang 	.probe = nicvf_eth_pci_probe,
23152bfe3f2eSlogwang 	.remove = nicvf_eth_pci_remove,
2316a9643ea8Slogwang };
2317a9643ea8Slogwang 
23182bfe3f2eSlogwang RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
23192bfe3f2eSlogwang RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
23202bfe3f2eSlogwang RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");
2321d30ea906Sjfb8856606 RTE_PMD_REGISTER_PARAM_STRING(net_thunderx, SKIP_DATA_BYTES "=<int>");
2322