xref: /f-stack/dpdk/drivers/net/null/rte_eth_null.c (revision 2d9fd380)
14418919fSjohnjiang /* SPDX-License-Identifier: BSD-3-Clause
2a9643ea8Slogwang  * Copyright (C) IGEL Co.,Ltd.
3a9643ea8Slogwang  *  All rights reserved.
4a9643ea8Slogwang  */
5a9643ea8Slogwang 
6a9643ea8Slogwang #include <rte_mbuf.h>
7d30ea906Sjfb8856606 #include <rte_ethdev_driver.h>
82bfe3f2eSlogwang #include <rte_ethdev_vdev.h>
9a9643ea8Slogwang #include <rte_malloc.h>
10a9643ea8Slogwang #include <rte_memcpy.h>
112bfe3f2eSlogwang #include <rte_bus_vdev.h>
12a9643ea8Slogwang #include <rte_kvargs.h>
13a9643ea8Slogwang #include <rte_spinlock.h>
14a9643ea8Slogwang 
15a9643ea8Slogwang #define ETH_NULL_PACKET_SIZE_ARG	"size"
16a9643ea8Slogwang #define ETH_NULL_PACKET_COPY_ARG	"copy"
17*2d9fd380Sjfb8856606 #define ETH_NULL_PACKET_NO_RX_ARG	"no-rx"
18a9643ea8Slogwang 
19*2d9fd380Sjfb8856606 static unsigned int default_packet_size = 64;
20*2d9fd380Sjfb8856606 static unsigned int default_packet_copy;
21*2d9fd380Sjfb8856606 static unsigned int default_no_rx;
22a9643ea8Slogwang 
23a9643ea8Slogwang static const char *valid_arguments[] = {
24a9643ea8Slogwang 	ETH_NULL_PACKET_SIZE_ARG,
25a9643ea8Slogwang 	ETH_NULL_PACKET_COPY_ARG,
26*2d9fd380Sjfb8856606 	ETH_NULL_PACKET_NO_RX_ARG,
27a9643ea8Slogwang 	NULL
28a9643ea8Slogwang };
29a9643ea8Slogwang 
30a9643ea8Slogwang struct pmd_internals;
31a9643ea8Slogwang 
32a9643ea8Slogwang struct null_queue {
33a9643ea8Slogwang 	struct pmd_internals *internals;
34a9643ea8Slogwang 
35a9643ea8Slogwang 	struct rte_mempool *mb_pool;
36a9643ea8Slogwang 	struct rte_mbuf *dummy_packet;
37a9643ea8Slogwang 
38a9643ea8Slogwang 	rte_atomic64_t rx_pkts;
39a9643ea8Slogwang 	rte_atomic64_t tx_pkts;
40a9643ea8Slogwang };
41a9643ea8Slogwang 
42*2d9fd380Sjfb8856606 struct pmd_options {
43*2d9fd380Sjfb8856606 	unsigned int packet_copy;
44*2d9fd380Sjfb8856606 	unsigned int packet_size;
45*2d9fd380Sjfb8856606 	unsigned int no_rx;
46*2d9fd380Sjfb8856606 };
47*2d9fd380Sjfb8856606 
48a9643ea8Slogwang struct pmd_internals {
49*2d9fd380Sjfb8856606 	unsigned int packet_size;
50*2d9fd380Sjfb8856606 	unsigned int packet_copy;
51*2d9fd380Sjfb8856606 	unsigned int no_rx;
522bfe3f2eSlogwang 	uint16_t port_id;
53a9643ea8Slogwang 
54a9643ea8Slogwang 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
55a9643ea8Slogwang 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
56a9643ea8Slogwang 
574418919fSjohnjiang 	struct rte_ether_addr eth_addr;
58a9643ea8Slogwang 	/** Bit mask of RSS offloads, the bit offset also means flow type */
59a9643ea8Slogwang 	uint64_t flow_type_rss_offloads;
60a9643ea8Slogwang 
61a9643ea8Slogwang 	rte_spinlock_t rss_lock;
62a9643ea8Slogwang 
63a9643ea8Slogwang 	uint16_t reta_size;
64a9643ea8Slogwang 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
65a9643ea8Slogwang 			RTE_RETA_GROUP_SIZE];
66a9643ea8Slogwang 
67a9643ea8Slogwang 	uint8_t rss_key[40];                /**< 40-byte hash key. */
68a9643ea8Slogwang };
69a9643ea8Slogwang static struct rte_eth_link pmd_link = {
70a9643ea8Slogwang 	.link_speed = ETH_SPEED_NUM_10G,
71a9643ea8Slogwang 	.link_duplex = ETH_LINK_FULL_DUPLEX,
72a9643ea8Slogwang 	.link_status = ETH_LINK_DOWN,
73579bf1e2Sjfb8856606 	.link_autoneg = ETH_LINK_FIXED,
74a9643ea8Slogwang };
75a9643ea8Slogwang 
76*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(eth_null_logtype, pmd.net.null, NOTICE);
77d30ea906Sjfb8856606 
78d30ea906Sjfb8856606 #define PMD_LOG(level, fmt, args...) \
79d30ea906Sjfb8856606 	rte_log(RTE_LOG_ ## level, eth_null_logtype, \
80d30ea906Sjfb8856606 		"%s(): " fmt "\n", __func__, ##args)
81d30ea906Sjfb8856606 
82a9643ea8Slogwang static uint16_t
eth_null_rx(void * q,struct rte_mbuf ** bufs,uint16_t nb_bufs)83a9643ea8Slogwang eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
84a9643ea8Slogwang {
85a9643ea8Slogwang 	int i;
86a9643ea8Slogwang 	struct null_queue *h = q;
87*2d9fd380Sjfb8856606 	unsigned int packet_size;
88a9643ea8Slogwang 
89a9643ea8Slogwang 	if ((q == NULL) || (bufs == NULL))
90a9643ea8Slogwang 		return 0;
91a9643ea8Slogwang 
92a9643ea8Slogwang 	packet_size = h->internals->packet_size;
93d30ea906Sjfb8856606 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
94d30ea906Sjfb8856606 		return 0;
95d30ea906Sjfb8856606 
96a9643ea8Slogwang 	for (i = 0; i < nb_bufs; i++) {
97a9643ea8Slogwang 		bufs[i]->data_len = (uint16_t)packet_size;
98a9643ea8Slogwang 		bufs[i]->pkt_len = packet_size;
99a9643ea8Slogwang 		bufs[i]->port = h->internals->port_id;
100a9643ea8Slogwang 	}
101a9643ea8Slogwang 
102a9643ea8Slogwang 	rte_atomic64_add(&(h->rx_pkts), i);
103a9643ea8Slogwang 
104a9643ea8Slogwang 	return i;
105a9643ea8Slogwang }
106a9643ea8Slogwang 
107a9643ea8Slogwang static uint16_t
eth_null_copy_rx(void * q,struct rte_mbuf ** bufs,uint16_t nb_bufs)108a9643ea8Slogwang eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
109a9643ea8Slogwang {
110a9643ea8Slogwang 	int i;
111a9643ea8Slogwang 	struct null_queue *h = q;
112*2d9fd380Sjfb8856606 	unsigned int packet_size;
113a9643ea8Slogwang 
114a9643ea8Slogwang 	if ((q == NULL) || (bufs == NULL))
115a9643ea8Slogwang 		return 0;
116a9643ea8Slogwang 
117a9643ea8Slogwang 	packet_size = h->internals->packet_size;
118d30ea906Sjfb8856606 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
119d30ea906Sjfb8856606 		return 0;
120d30ea906Sjfb8856606 
121a9643ea8Slogwang 	for (i = 0; i < nb_bufs; i++) {
122a9643ea8Slogwang 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
123a9643ea8Slogwang 					packet_size);
124a9643ea8Slogwang 		bufs[i]->data_len = (uint16_t)packet_size;
125a9643ea8Slogwang 		bufs[i]->pkt_len = packet_size;
126a9643ea8Slogwang 		bufs[i]->port = h->internals->port_id;
127a9643ea8Slogwang 	}
128a9643ea8Slogwang 
129a9643ea8Slogwang 	rte_atomic64_add(&(h->rx_pkts), i);
130a9643ea8Slogwang 
131a9643ea8Slogwang 	return i;
132a9643ea8Slogwang }
133a9643ea8Slogwang 
134a9643ea8Slogwang static uint16_t
eth_null_no_rx(void * q __rte_unused,struct rte_mbuf ** bufs __rte_unused,uint16_t nb_bufs __rte_unused)135*2d9fd380Sjfb8856606 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
136*2d9fd380Sjfb8856606 		uint16_t nb_bufs __rte_unused)
137*2d9fd380Sjfb8856606 {
138*2d9fd380Sjfb8856606 	return 0;
139*2d9fd380Sjfb8856606 }
140*2d9fd380Sjfb8856606 
141*2d9fd380Sjfb8856606 static uint16_t
eth_null_tx(void * q,struct rte_mbuf ** bufs,uint16_t nb_bufs)142a9643ea8Slogwang eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143a9643ea8Slogwang {
144a9643ea8Slogwang 	int i;
145a9643ea8Slogwang 	struct null_queue *h = q;
146a9643ea8Slogwang 
147a9643ea8Slogwang 	if ((q == NULL) || (bufs == NULL))
148a9643ea8Slogwang 		return 0;
149a9643ea8Slogwang 
150a9643ea8Slogwang 	for (i = 0; i < nb_bufs; i++)
151a9643ea8Slogwang 		rte_pktmbuf_free(bufs[i]);
152a9643ea8Slogwang 
153a9643ea8Slogwang 	rte_atomic64_add(&(h->tx_pkts), i);
154a9643ea8Slogwang 
155a9643ea8Slogwang 	return i;
156a9643ea8Slogwang }
157a9643ea8Slogwang 
158a9643ea8Slogwang static uint16_t
eth_null_copy_tx(void * q,struct rte_mbuf ** bufs,uint16_t nb_bufs)159a9643ea8Slogwang eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
160a9643ea8Slogwang {
161a9643ea8Slogwang 	int i;
162a9643ea8Slogwang 	struct null_queue *h = q;
163*2d9fd380Sjfb8856606 	unsigned int packet_size;
164a9643ea8Slogwang 
165a9643ea8Slogwang 	if ((q == NULL) || (bufs == NULL))
166a9643ea8Slogwang 		return 0;
167a9643ea8Slogwang 
168a9643ea8Slogwang 	packet_size = h->internals->packet_size;
169a9643ea8Slogwang 	for (i = 0; i < nb_bufs; i++) {
170a9643ea8Slogwang 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
171a9643ea8Slogwang 					packet_size);
172a9643ea8Slogwang 		rte_pktmbuf_free(bufs[i]);
173a9643ea8Slogwang 	}
174a9643ea8Slogwang 
175a9643ea8Slogwang 	rte_atomic64_add(&(h->tx_pkts), i);
176a9643ea8Slogwang 
177a9643ea8Slogwang 	return i;
178a9643ea8Slogwang }
179a9643ea8Slogwang 
180a9643ea8Slogwang static int
eth_dev_configure(struct rte_eth_dev * dev __rte_unused)181a9643ea8Slogwang eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
182a9643ea8Slogwang {
183a9643ea8Slogwang 	return 0;
184a9643ea8Slogwang }
185a9643ea8Slogwang 
186a9643ea8Slogwang static int
eth_dev_start(struct rte_eth_dev * dev)187a9643ea8Slogwang eth_dev_start(struct rte_eth_dev *dev)
188a9643ea8Slogwang {
189a9643ea8Slogwang 	if (dev == NULL)
190a9643ea8Slogwang 		return -EINVAL;
191a9643ea8Slogwang 
192a9643ea8Slogwang 	dev->data->dev_link.link_status = ETH_LINK_UP;
193a9643ea8Slogwang 	return 0;
194a9643ea8Slogwang }
195a9643ea8Slogwang 
196*2d9fd380Sjfb8856606 static int
eth_dev_stop(struct rte_eth_dev * dev)197a9643ea8Slogwang eth_dev_stop(struct rte_eth_dev *dev)
198a9643ea8Slogwang {
199a9643ea8Slogwang 	if (dev == NULL)
200*2d9fd380Sjfb8856606 		return 0;
201a9643ea8Slogwang 
202a9643ea8Slogwang 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
203*2d9fd380Sjfb8856606 
204*2d9fd380Sjfb8856606 	return 0;
205a9643ea8Slogwang }
206a9643ea8Slogwang 
207a9643ea8Slogwang static int
eth_rx_queue_setup(struct rte_eth_dev * dev,uint16_t rx_queue_id,uint16_t nb_rx_desc __rte_unused,unsigned int socket_id __rte_unused,const struct rte_eth_rxconf * rx_conf __rte_unused,struct rte_mempool * mb_pool)208a9643ea8Slogwang eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
209a9643ea8Slogwang 		uint16_t nb_rx_desc __rte_unused,
210a9643ea8Slogwang 		unsigned int socket_id __rte_unused,
211a9643ea8Slogwang 		const struct rte_eth_rxconf *rx_conf __rte_unused,
212a9643ea8Slogwang 		struct rte_mempool *mb_pool)
213a9643ea8Slogwang {
214a9643ea8Slogwang 	struct rte_mbuf *dummy_packet;
215a9643ea8Slogwang 	struct pmd_internals *internals;
216*2d9fd380Sjfb8856606 	unsigned int packet_size;
217a9643ea8Slogwang 
218a9643ea8Slogwang 	if ((dev == NULL) || (mb_pool == NULL))
219a9643ea8Slogwang 		return -EINVAL;
220a9643ea8Slogwang 
221a9643ea8Slogwang 	internals = dev->data->dev_private;
222a9643ea8Slogwang 
223a9643ea8Slogwang 	if (rx_queue_id >= dev->data->nb_rx_queues)
224a9643ea8Slogwang 		return -ENODEV;
225a9643ea8Slogwang 
226a9643ea8Slogwang 	packet_size = internals->packet_size;
227a9643ea8Slogwang 
228a9643ea8Slogwang 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
229a9643ea8Slogwang 	dev->data->rx_queues[rx_queue_id] =
230a9643ea8Slogwang 		&internals->rx_null_queues[rx_queue_id];
231a9643ea8Slogwang 	dummy_packet = rte_zmalloc_socket(NULL,
232a9643ea8Slogwang 			packet_size, 0, dev->data->numa_node);
233a9643ea8Slogwang 	if (dummy_packet == NULL)
234a9643ea8Slogwang 		return -ENOMEM;
235a9643ea8Slogwang 
236a9643ea8Slogwang 	internals->rx_null_queues[rx_queue_id].internals = internals;
237a9643ea8Slogwang 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
238a9643ea8Slogwang 
239a9643ea8Slogwang 	return 0;
240a9643ea8Slogwang }
241a9643ea8Slogwang 
242a9643ea8Slogwang static int
eth_tx_queue_setup(struct rte_eth_dev * dev,uint16_t tx_queue_id,uint16_t nb_tx_desc __rte_unused,unsigned int socket_id __rte_unused,const struct rte_eth_txconf * tx_conf __rte_unused)243a9643ea8Slogwang eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
244a9643ea8Slogwang 		uint16_t nb_tx_desc __rte_unused,
245a9643ea8Slogwang 		unsigned int socket_id __rte_unused,
246a9643ea8Slogwang 		const struct rte_eth_txconf *tx_conf __rte_unused)
247a9643ea8Slogwang {
248a9643ea8Slogwang 	struct rte_mbuf *dummy_packet;
249a9643ea8Slogwang 	struct pmd_internals *internals;
250*2d9fd380Sjfb8856606 	unsigned int packet_size;
251a9643ea8Slogwang 
252a9643ea8Slogwang 	if (dev == NULL)
253a9643ea8Slogwang 		return -EINVAL;
254a9643ea8Slogwang 
255a9643ea8Slogwang 	internals = dev->data->dev_private;
256a9643ea8Slogwang 
257a9643ea8Slogwang 	if (tx_queue_id >= dev->data->nb_tx_queues)
258a9643ea8Slogwang 		return -ENODEV;
259a9643ea8Slogwang 
260a9643ea8Slogwang 	packet_size = internals->packet_size;
261a9643ea8Slogwang 
262a9643ea8Slogwang 	dev->data->tx_queues[tx_queue_id] =
263a9643ea8Slogwang 		&internals->tx_null_queues[tx_queue_id];
264a9643ea8Slogwang 	dummy_packet = rte_zmalloc_socket(NULL,
265a9643ea8Slogwang 			packet_size, 0, dev->data->numa_node);
266a9643ea8Slogwang 	if (dummy_packet == NULL)
267a9643ea8Slogwang 		return -ENOMEM;
268a9643ea8Slogwang 
269a9643ea8Slogwang 	internals->tx_null_queues[tx_queue_id].internals = internals;
270a9643ea8Slogwang 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
271a9643ea8Slogwang 
272a9643ea8Slogwang 	return 0;
273a9643ea8Slogwang }
274a9643ea8Slogwang 
275d30ea906Sjfb8856606 static int
eth_mtu_set(struct rte_eth_dev * dev __rte_unused,uint16_t mtu __rte_unused)276d30ea906Sjfb8856606 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
277d30ea906Sjfb8856606 {
278d30ea906Sjfb8856606 	return 0;
279d30ea906Sjfb8856606 }
280a9643ea8Slogwang 
2814418919fSjohnjiang static int
eth_dev_info(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)282a9643ea8Slogwang eth_dev_info(struct rte_eth_dev *dev,
283a9643ea8Slogwang 		struct rte_eth_dev_info *dev_info)
284a9643ea8Slogwang {
285a9643ea8Slogwang 	struct pmd_internals *internals;
286a9643ea8Slogwang 
287a9643ea8Slogwang 	if ((dev == NULL) || (dev_info == NULL))
2884418919fSjohnjiang 		return -EINVAL;
289a9643ea8Slogwang 
290a9643ea8Slogwang 	internals = dev->data->dev_private;
291a9643ea8Slogwang 	dev_info->max_mac_addrs = 1;
292a9643ea8Slogwang 	dev_info->max_rx_pktlen = (uint32_t)-1;
293a9643ea8Slogwang 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
294a9643ea8Slogwang 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
295a9643ea8Slogwang 	dev_info->min_rx_bufsize = 0;
296a9643ea8Slogwang 	dev_info->reta_size = internals->reta_size;
297a9643ea8Slogwang 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
2984418919fSjohnjiang 
2994418919fSjohnjiang 	return 0;
300a9643ea8Slogwang }
301a9643ea8Slogwang 
3022bfe3f2eSlogwang static int
eth_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * igb_stats)303a9643ea8Slogwang eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
304a9643ea8Slogwang {
305*2d9fd380Sjfb8856606 	unsigned int i, num_stats;
3064418919fSjohnjiang 	unsigned long rx_total = 0, tx_total = 0;
307a9643ea8Slogwang 	const struct pmd_internals *internal;
308a9643ea8Slogwang 
309a9643ea8Slogwang 	if ((dev == NULL) || (igb_stats == NULL))
3102bfe3f2eSlogwang 		return -EINVAL;
311a9643ea8Slogwang 
312a9643ea8Slogwang 	internal = dev->data->dev_private;
313*2d9fd380Sjfb8856606 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
314a9643ea8Slogwang 			RTE_MIN(dev->data->nb_rx_queues,
315a9643ea8Slogwang 				RTE_DIM(internal->rx_null_queues)));
316a9643ea8Slogwang 	for (i = 0; i < num_stats; i++) {
317a9643ea8Slogwang 		igb_stats->q_ipackets[i] =
318a9643ea8Slogwang 			internal->rx_null_queues[i].rx_pkts.cnt;
319a9643ea8Slogwang 		rx_total += igb_stats->q_ipackets[i];
320a9643ea8Slogwang 	}
321a9643ea8Slogwang 
322*2d9fd380Sjfb8856606 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
323a9643ea8Slogwang 			RTE_MIN(dev->data->nb_tx_queues,
324a9643ea8Slogwang 				RTE_DIM(internal->tx_null_queues)));
325a9643ea8Slogwang 	for (i = 0; i < num_stats; i++) {
326a9643ea8Slogwang 		igb_stats->q_opackets[i] =
327a9643ea8Slogwang 			internal->tx_null_queues[i].tx_pkts.cnt;
328a9643ea8Slogwang 		tx_total += igb_stats->q_opackets[i];
329a9643ea8Slogwang 	}
330a9643ea8Slogwang 
331a9643ea8Slogwang 	igb_stats->ipackets = rx_total;
332a9643ea8Slogwang 	igb_stats->opackets = tx_total;
3332bfe3f2eSlogwang 
3342bfe3f2eSlogwang 	return 0;
335a9643ea8Slogwang }
336a9643ea8Slogwang 
3374418919fSjohnjiang static int
eth_stats_reset(struct rte_eth_dev * dev)338a9643ea8Slogwang eth_stats_reset(struct rte_eth_dev *dev)
339a9643ea8Slogwang {
340*2d9fd380Sjfb8856606 	unsigned int i;
341a9643ea8Slogwang 	struct pmd_internals *internal;
342a9643ea8Slogwang 
343a9643ea8Slogwang 	if (dev == NULL)
3444418919fSjohnjiang 		return -EINVAL;
345a9643ea8Slogwang 
346a9643ea8Slogwang 	internal = dev->data->dev_private;
347a9643ea8Slogwang 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
348a9643ea8Slogwang 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
3494418919fSjohnjiang 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
350a9643ea8Slogwang 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
3514418919fSjohnjiang 
3524418919fSjohnjiang 	return 0;
353a9643ea8Slogwang }
354a9643ea8Slogwang 
355a9643ea8Slogwang static void
eth_queue_release(void * q)356a9643ea8Slogwang eth_queue_release(void *q)
357a9643ea8Slogwang {
358a9643ea8Slogwang 	struct null_queue *nq;
359a9643ea8Slogwang 
360a9643ea8Slogwang 	if (q == NULL)
361a9643ea8Slogwang 		return;
362a9643ea8Slogwang 
363a9643ea8Slogwang 	nq = q;
364a9643ea8Slogwang 	rte_free(nq->dummy_packet);
365a9643ea8Slogwang }
366a9643ea8Slogwang 
367a9643ea8Slogwang static int
eth_link_update(struct rte_eth_dev * dev __rte_unused,int wait_to_complete __rte_unused)368a9643ea8Slogwang eth_link_update(struct rte_eth_dev *dev __rte_unused,
369a9643ea8Slogwang 		int wait_to_complete __rte_unused) { return 0; }
370a9643ea8Slogwang 
371a9643ea8Slogwang static int
eth_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)372a9643ea8Slogwang eth_rss_reta_update(struct rte_eth_dev *dev,
373a9643ea8Slogwang 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
374a9643ea8Slogwang {
375a9643ea8Slogwang 	int i, j;
376a9643ea8Slogwang 	struct pmd_internals *internal = dev->data->dev_private;
377a9643ea8Slogwang 
378a9643ea8Slogwang 	if (reta_size != internal->reta_size)
379a9643ea8Slogwang 		return -EINVAL;
380a9643ea8Slogwang 
381a9643ea8Slogwang 	rte_spinlock_lock(&internal->rss_lock);
382a9643ea8Slogwang 
383a9643ea8Slogwang 	/* Copy RETA table */
384a9643ea8Slogwang 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
385a9643ea8Slogwang 		internal->reta_conf[i].mask = reta_conf[i].mask;
386a9643ea8Slogwang 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
387a9643ea8Slogwang 			if ((reta_conf[i].mask >> j) & 0x01)
388a9643ea8Slogwang 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
389a9643ea8Slogwang 	}
390a9643ea8Slogwang 
391a9643ea8Slogwang 	rte_spinlock_unlock(&internal->rss_lock);
392a9643ea8Slogwang 
393a9643ea8Slogwang 	return 0;
394a9643ea8Slogwang }
395a9643ea8Slogwang 
396a9643ea8Slogwang static int
eth_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)397a9643ea8Slogwang eth_rss_reta_query(struct rte_eth_dev *dev,
398a9643ea8Slogwang 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
399a9643ea8Slogwang {
400a9643ea8Slogwang 	int i, j;
401a9643ea8Slogwang 	struct pmd_internals *internal = dev->data->dev_private;
402a9643ea8Slogwang 
403a9643ea8Slogwang 	if (reta_size != internal->reta_size)
404a9643ea8Slogwang 		return -EINVAL;
405a9643ea8Slogwang 
406a9643ea8Slogwang 	rte_spinlock_lock(&internal->rss_lock);
407a9643ea8Slogwang 
408a9643ea8Slogwang 	/* Copy RETA table */
409a9643ea8Slogwang 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
410a9643ea8Slogwang 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
411a9643ea8Slogwang 			if ((reta_conf[i].mask >> j) & 0x01)
412a9643ea8Slogwang 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
413a9643ea8Slogwang 	}
414a9643ea8Slogwang 
415a9643ea8Slogwang 	rte_spinlock_unlock(&internal->rss_lock);
416a9643ea8Slogwang 
417a9643ea8Slogwang 	return 0;
418a9643ea8Slogwang }
419a9643ea8Slogwang 
420a9643ea8Slogwang static int
eth_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)421a9643ea8Slogwang eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
422a9643ea8Slogwang {
423a9643ea8Slogwang 	struct pmd_internals *internal = dev->data->dev_private;
424a9643ea8Slogwang 
425a9643ea8Slogwang 	rte_spinlock_lock(&internal->rss_lock);
426a9643ea8Slogwang 
427a9643ea8Slogwang 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
428a9643ea8Slogwang 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
429a9643ea8Slogwang 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
430a9643ea8Slogwang 
431a9643ea8Slogwang 	if (rss_conf->rss_key)
432a9643ea8Slogwang 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
433a9643ea8Slogwang 
434a9643ea8Slogwang 	rte_spinlock_unlock(&internal->rss_lock);
435a9643ea8Slogwang 
436a9643ea8Slogwang 	return 0;
437a9643ea8Slogwang }
438a9643ea8Slogwang 
439a9643ea8Slogwang static int
eth_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)440a9643ea8Slogwang eth_rss_hash_conf_get(struct rte_eth_dev *dev,
441a9643ea8Slogwang 		struct rte_eth_rss_conf *rss_conf)
442a9643ea8Slogwang {
443a9643ea8Slogwang 	struct pmd_internals *internal = dev->data->dev_private;
444a9643ea8Slogwang 
445a9643ea8Slogwang 	rte_spinlock_lock(&internal->rss_lock);
446a9643ea8Slogwang 
447a9643ea8Slogwang 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
448a9643ea8Slogwang 	if (rss_conf->rss_key)
449a9643ea8Slogwang 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
450a9643ea8Slogwang 
451a9643ea8Slogwang 	rte_spinlock_unlock(&internal->rss_lock);
452a9643ea8Slogwang 
453a9643ea8Slogwang 	return 0;
454a9643ea8Slogwang }
455a9643ea8Slogwang 
456d30ea906Sjfb8856606 static int
eth_mac_address_set(__rte_unused struct rte_eth_dev * dev,__rte_unused struct rte_ether_addr * addr)457d30ea906Sjfb8856606 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
4584418919fSjohnjiang 		    __rte_unused struct rte_ether_addr *addr)
459d30ea906Sjfb8856606 {
460d30ea906Sjfb8856606 	return 0;
461d30ea906Sjfb8856606 }
462d30ea906Sjfb8856606 
463*2d9fd380Sjfb8856606 static int
eth_dev_close(struct rte_eth_dev * dev)464*2d9fd380Sjfb8856606 eth_dev_close(struct rte_eth_dev *dev)
465*2d9fd380Sjfb8856606 {
466*2d9fd380Sjfb8856606 	PMD_LOG(INFO, "Closing null ethdev on NUMA socket %u",
467*2d9fd380Sjfb8856606 			rte_socket_id());
468*2d9fd380Sjfb8856606 
469*2d9fd380Sjfb8856606 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
470*2d9fd380Sjfb8856606 		return 0;
471*2d9fd380Sjfb8856606 
472*2d9fd380Sjfb8856606 	/* mac_addrs must not be freed alone because part of dev_private */
473*2d9fd380Sjfb8856606 	dev->data->mac_addrs = NULL;
474*2d9fd380Sjfb8856606 
475*2d9fd380Sjfb8856606 	return 0;
476*2d9fd380Sjfb8856606 }
477*2d9fd380Sjfb8856606 
478a9643ea8Slogwang static const struct eth_dev_ops ops = {
479*2d9fd380Sjfb8856606 	.dev_close = eth_dev_close,
480a9643ea8Slogwang 	.dev_start = eth_dev_start,
481a9643ea8Slogwang 	.dev_stop = eth_dev_stop,
482a9643ea8Slogwang 	.dev_configure = eth_dev_configure,
483a9643ea8Slogwang 	.dev_infos_get = eth_dev_info,
484a9643ea8Slogwang 	.rx_queue_setup = eth_rx_queue_setup,
485a9643ea8Slogwang 	.tx_queue_setup = eth_tx_queue_setup,
486a9643ea8Slogwang 	.rx_queue_release = eth_queue_release,
487a9643ea8Slogwang 	.tx_queue_release = eth_queue_release,
488d30ea906Sjfb8856606 	.mtu_set = eth_mtu_set,
489a9643ea8Slogwang 	.link_update = eth_link_update,
490d30ea906Sjfb8856606 	.mac_addr_set = eth_mac_address_set,
491a9643ea8Slogwang 	.stats_get = eth_stats_get,
492a9643ea8Slogwang 	.stats_reset = eth_stats_reset,
493a9643ea8Slogwang 	.reta_update = eth_rss_reta_update,
494a9643ea8Slogwang 	.reta_query = eth_rss_reta_query,
495a9643ea8Slogwang 	.rss_hash_update = eth_rss_hash_update,
496a9643ea8Slogwang 	.rss_hash_conf_get = eth_rss_hash_conf_get
497a9643ea8Slogwang };
498a9643ea8Slogwang 
4992bfe3f2eSlogwang static int
eth_dev_null_create(struct rte_vdev_device * dev,struct pmd_options * args)500*2d9fd380Sjfb8856606 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
501a9643ea8Slogwang {
502*2d9fd380Sjfb8856606 	const unsigned int nb_rx_queues = 1;
503*2d9fd380Sjfb8856606 	const unsigned int nb_tx_queues = 1;
504d30ea906Sjfb8856606 	struct rte_eth_dev_data *data;
505a9643ea8Slogwang 	struct pmd_internals *internals = NULL;
506a9643ea8Slogwang 	struct rte_eth_dev *eth_dev = NULL;
507a9643ea8Slogwang 
508a9643ea8Slogwang 	static const uint8_t default_rss_key[40] = {
509a9643ea8Slogwang 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
510a9643ea8Slogwang 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
511a9643ea8Slogwang 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
512a9643ea8Slogwang 		0xBE, 0xAC, 0x01, 0xFA
513a9643ea8Slogwang 	};
514a9643ea8Slogwang 
5152bfe3f2eSlogwang 	if (dev->device.numa_node == SOCKET_ID_ANY)
5162bfe3f2eSlogwang 		dev->device.numa_node = rte_socket_id();
517a9643ea8Slogwang 
518d30ea906Sjfb8856606 	PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
5192bfe3f2eSlogwang 		dev->device.numa_node);
520a9643ea8Slogwang 
5215af785ecSfengbojiang(姜凤波) 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
522d30ea906Sjfb8856606 	if (!eth_dev)
5235af785ecSfengbojiang(姜凤波) 		return -ENOMEM;
5245af785ecSfengbojiang(姜凤波) 
525a9643ea8Slogwang 	/* now put it all together
526a9643ea8Slogwang 	 * - store queue data in internals,
527a9643ea8Slogwang 	 * - store numa_node info in ethdev data
528a9643ea8Slogwang 	 * - point eth_dev_data to internals
529a9643ea8Slogwang 	 * - and point eth_dev structure to new eth_dev_data structure
530a9643ea8Slogwang 	 */
531a9643ea8Slogwang 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
532a9643ea8Slogwang 	 * so the nulls are local per-process */
533a9643ea8Slogwang 
5342bfe3f2eSlogwang 	internals = eth_dev->data->dev_private;
535*2d9fd380Sjfb8856606 	internals->packet_size = args->packet_size;
536*2d9fd380Sjfb8856606 	internals->packet_copy = args->packet_copy;
537*2d9fd380Sjfb8856606 	internals->no_rx = args->no_rx;
538a9643ea8Slogwang 	internals->port_id = eth_dev->data->port_id;
5394418919fSjohnjiang 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
540a9643ea8Slogwang 
541a9643ea8Slogwang 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
542a9643ea8Slogwang 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
543a9643ea8Slogwang 
544a9643ea8Slogwang 	rte_memcpy(internals->rss_key, default_rss_key, 40);
545a9643ea8Slogwang 
546d30ea906Sjfb8856606 	data = eth_dev->data;
547a9643ea8Slogwang 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
548a9643ea8Slogwang 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
549a9643ea8Slogwang 	data->dev_link = pmd_link;
550d30ea906Sjfb8856606 	data->mac_addrs = &internals->eth_addr;
5514418919fSjohnjiang 	data->promiscuous = 1;
5524418919fSjohnjiang 	data->all_multicast = 1;
553*2d9fd380Sjfb8856606 	data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
554a9643ea8Slogwang 
555a9643ea8Slogwang 	eth_dev->dev_ops = &ops;
556a9643ea8Slogwang 
557a9643ea8Slogwang 	/* finally assign rx and tx ops */
558*2d9fd380Sjfb8856606 	if (internals->packet_copy) {
559a9643ea8Slogwang 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
560a9643ea8Slogwang 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
561*2d9fd380Sjfb8856606 	} else if (internals->no_rx) {
562*2d9fd380Sjfb8856606 		eth_dev->rx_pkt_burst = eth_null_no_rx;
563*2d9fd380Sjfb8856606 		eth_dev->tx_pkt_burst = eth_null_tx;
564a9643ea8Slogwang 	} else {
565a9643ea8Slogwang 		eth_dev->rx_pkt_burst = eth_null_rx;
566a9643ea8Slogwang 		eth_dev->tx_pkt_burst = eth_null_tx;
567a9643ea8Slogwang 	}
568a9643ea8Slogwang 
569d30ea906Sjfb8856606 	rte_eth_dev_probing_finish(eth_dev);
570a9643ea8Slogwang 	return 0;
571a9643ea8Slogwang }
572a9643ea8Slogwang 
573a9643ea8Slogwang static inline int
get_packet_size_arg(const char * key __rte_unused,const char * value,void * extra_args)574a9643ea8Slogwang get_packet_size_arg(const char *key __rte_unused,
575a9643ea8Slogwang 		const char *value, void *extra_args)
576a9643ea8Slogwang {
577a9643ea8Slogwang 	const char *a = value;
578*2d9fd380Sjfb8856606 	unsigned int *packet_size = extra_args;
579a9643ea8Slogwang 
580a9643ea8Slogwang 	if ((value == NULL) || (extra_args == NULL))
581a9643ea8Slogwang 		return -EINVAL;
582a9643ea8Slogwang 
583*2d9fd380Sjfb8856606 	*packet_size = (unsigned int)strtoul(a, NULL, 0);
584a9643ea8Slogwang 	if (*packet_size == UINT_MAX)
585a9643ea8Slogwang 		return -1;
586a9643ea8Slogwang 
587a9643ea8Slogwang 	return 0;
588a9643ea8Slogwang }
589a9643ea8Slogwang 
590a9643ea8Slogwang static inline int
get_packet_copy_arg(const char * key __rte_unused,const char * value,void * extra_args)591a9643ea8Slogwang get_packet_copy_arg(const char *key __rte_unused,
592a9643ea8Slogwang 		const char *value, void *extra_args)
593a9643ea8Slogwang {
594a9643ea8Slogwang 	const char *a = value;
595*2d9fd380Sjfb8856606 	unsigned int *packet_copy = extra_args;
596a9643ea8Slogwang 
597a9643ea8Slogwang 	if ((value == NULL) || (extra_args == NULL))
598a9643ea8Slogwang 		return -EINVAL;
599a9643ea8Slogwang 
600*2d9fd380Sjfb8856606 	*packet_copy = (unsigned int)strtoul(a, NULL, 0);
601a9643ea8Slogwang 	if (*packet_copy == UINT_MAX)
602a9643ea8Slogwang 		return -1;
603a9643ea8Slogwang 
604a9643ea8Slogwang 	return 0;
605a9643ea8Slogwang }
606a9643ea8Slogwang 
607a9643ea8Slogwang static int
get_packet_no_rx_arg(const char * key __rte_unused,const char * value,void * extra_args)608*2d9fd380Sjfb8856606 get_packet_no_rx_arg(const char *key __rte_unused,
609*2d9fd380Sjfb8856606 		const char *value, void *extra_args)
610*2d9fd380Sjfb8856606 {
611*2d9fd380Sjfb8856606 	const char *a = value;
612*2d9fd380Sjfb8856606 	unsigned int no_rx;
613*2d9fd380Sjfb8856606 
614*2d9fd380Sjfb8856606 	if (value == NULL || extra_args == NULL)
615*2d9fd380Sjfb8856606 		return -EINVAL;
616*2d9fd380Sjfb8856606 
617*2d9fd380Sjfb8856606 	no_rx = (unsigned int)strtoul(a, NULL, 0);
618*2d9fd380Sjfb8856606 	if (no_rx != 0 && no_rx != 1)
619*2d9fd380Sjfb8856606 		return -1;
620*2d9fd380Sjfb8856606 
621*2d9fd380Sjfb8856606 	*(unsigned int *)extra_args = no_rx;
622*2d9fd380Sjfb8856606 	return 0;
623*2d9fd380Sjfb8856606 }
624*2d9fd380Sjfb8856606 
625*2d9fd380Sjfb8856606 static int
rte_pmd_null_probe(struct rte_vdev_device * dev)6262bfe3f2eSlogwang rte_pmd_null_probe(struct rte_vdev_device *dev)
627a9643ea8Slogwang {
6282bfe3f2eSlogwang 	const char *name, *params;
629*2d9fd380Sjfb8856606 	struct pmd_options args = {
630*2d9fd380Sjfb8856606 		.packet_copy = default_packet_copy,
631*2d9fd380Sjfb8856606 		.packet_size = default_packet_size,
632*2d9fd380Sjfb8856606 		.no_rx = default_no_rx,
633*2d9fd380Sjfb8856606 	};
634a9643ea8Slogwang 	struct rte_kvargs *kvlist = NULL;
635d30ea906Sjfb8856606 	struct rte_eth_dev *eth_dev;
636a9643ea8Slogwang 	int ret;
637a9643ea8Slogwang 
6382bfe3f2eSlogwang 	if (!dev)
639a9643ea8Slogwang 		return -EINVAL;
640a9643ea8Slogwang 
6412bfe3f2eSlogwang 	name = rte_vdev_device_name(dev);
6422bfe3f2eSlogwang 	params = rte_vdev_device_args(dev);
643d30ea906Sjfb8856606 	PMD_LOG(INFO, "Initializing pmd_null for %s", name);
644d30ea906Sjfb8856606 
645d30ea906Sjfb8856606 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
6460c6bd470Sfengbojiang 		struct pmd_internals *internals;
647d30ea906Sjfb8856606 		eth_dev = rte_eth_dev_attach_secondary(name);
648d30ea906Sjfb8856606 		if (!eth_dev) {
649d30ea906Sjfb8856606 			PMD_LOG(ERR, "Failed to probe %s", name);
650d30ea906Sjfb8856606 			return -1;
651d30ea906Sjfb8856606 		}
652d30ea906Sjfb8856606 		/* TODO: request info from primary to set up Rx and Tx */
653d30ea906Sjfb8856606 		eth_dev->dev_ops = &ops;
654d30ea906Sjfb8856606 		eth_dev->device = &dev->device;
6550c6bd470Sfengbojiang 		internals = eth_dev->data->dev_private;
6560c6bd470Sfengbojiang 		if (internals->packet_copy) {
6574418919fSjohnjiang 			eth_dev->rx_pkt_burst = eth_null_copy_rx;
6584418919fSjohnjiang 			eth_dev->tx_pkt_burst = eth_null_copy_tx;
659*2d9fd380Sjfb8856606 		} else if (internals->no_rx) {
660*2d9fd380Sjfb8856606 			eth_dev->rx_pkt_burst = eth_null_no_rx;
661*2d9fd380Sjfb8856606 			eth_dev->tx_pkt_burst = eth_null_tx;
6624418919fSjohnjiang 		} else {
6634418919fSjohnjiang 			eth_dev->rx_pkt_burst = eth_null_rx;
6644418919fSjohnjiang 			eth_dev->tx_pkt_burst = eth_null_tx;
6654418919fSjohnjiang 		}
666d30ea906Sjfb8856606 		rte_eth_dev_probing_finish(eth_dev);
667d30ea906Sjfb8856606 		return 0;
668d30ea906Sjfb8856606 	}
669a9643ea8Slogwang 
670a9643ea8Slogwang 	if (params != NULL) {
671a9643ea8Slogwang 		kvlist = rte_kvargs_parse(params, valid_arguments);
672a9643ea8Slogwang 		if (kvlist == NULL)
673a9643ea8Slogwang 			return -1;
674a9643ea8Slogwang 
675a9643ea8Slogwang 		ret = rte_kvargs_process(kvlist,
676a9643ea8Slogwang 				ETH_NULL_PACKET_SIZE_ARG,
677*2d9fd380Sjfb8856606 				&get_packet_size_arg, &args.packet_size);
678a9643ea8Slogwang 		if (ret < 0)
679a9643ea8Slogwang 			goto free_kvlist;
680a9643ea8Slogwang 
681a9643ea8Slogwang 
682a9643ea8Slogwang 		ret = rte_kvargs_process(kvlist,
683a9643ea8Slogwang 				ETH_NULL_PACKET_COPY_ARG,
684*2d9fd380Sjfb8856606 				&get_packet_copy_arg, &args.packet_copy);
685a9643ea8Slogwang 		if (ret < 0)
686a9643ea8Slogwang 			goto free_kvlist;
687*2d9fd380Sjfb8856606 
688*2d9fd380Sjfb8856606 		ret = rte_kvargs_process(kvlist,
689*2d9fd380Sjfb8856606 				ETH_NULL_PACKET_NO_RX_ARG,
690*2d9fd380Sjfb8856606 				&get_packet_no_rx_arg, &args.no_rx);
691*2d9fd380Sjfb8856606 		if (ret < 0)
692*2d9fd380Sjfb8856606 			goto free_kvlist;
693*2d9fd380Sjfb8856606 
694*2d9fd380Sjfb8856606 		if (args.no_rx && args.packet_copy) {
695*2d9fd380Sjfb8856606 			PMD_LOG(ERR,
696*2d9fd380Sjfb8856606 				"Both %s and %s arguments at the same time not supported",
697*2d9fd380Sjfb8856606 				ETH_NULL_PACKET_COPY_ARG,
698*2d9fd380Sjfb8856606 				ETH_NULL_PACKET_NO_RX_ARG);
699*2d9fd380Sjfb8856606 			goto free_kvlist;
700*2d9fd380Sjfb8856606 		}
701a9643ea8Slogwang 	}
702a9643ea8Slogwang 
703d30ea906Sjfb8856606 	PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
704*2d9fd380Sjfb8856606 			"packet copy is %s", args.packet_size,
705*2d9fd380Sjfb8856606 			args.packet_copy ? "enabled" : "disabled");
706a9643ea8Slogwang 
707*2d9fd380Sjfb8856606 	ret = eth_dev_null_create(dev, &args);
708a9643ea8Slogwang 
709a9643ea8Slogwang free_kvlist:
710a9643ea8Slogwang 	if (kvlist)
711a9643ea8Slogwang 		rte_kvargs_free(kvlist);
712a9643ea8Slogwang 	return ret;
713a9643ea8Slogwang }
714a9643ea8Slogwang 
715a9643ea8Slogwang static int
rte_pmd_null_remove(struct rte_vdev_device * dev)7162bfe3f2eSlogwang rte_pmd_null_remove(struct rte_vdev_device *dev)
717a9643ea8Slogwang {
718a9643ea8Slogwang 	struct rte_eth_dev *eth_dev = NULL;
719a9643ea8Slogwang 
7202bfe3f2eSlogwang 	if (!dev)
721a9643ea8Slogwang 		return -EINVAL;
722a9643ea8Slogwang 
723a9643ea8Slogwang 	/* find the ethdev entry */
7242bfe3f2eSlogwang 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
725a9643ea8Slogwang 	if (eth_dev == NULL)
726*2d9fd380Sjfb8856606 		return 0; /* port already released */
727a9643ea8Slogwang 
728*2d9fd380Sjfb8856606 	eth_dev_close(eth_dev);
729a9643ea8Slogwang 	rte_eth_dev_release_port(eth_dev);
730a9643ea8Slogwang 
731a9643ea8Slogwang 	return 0;
732a9643ea8Slogwang }
733a9643ea8Slogwang 
7342bfe3f2eSlogwang static struct rte_vdev_driver pmd_null_drv = {
7352bfe3f2eSlogwang 	.probe = rte_pmd_null_probe,
7362bfe3f2eSlogwang 	.remove = rte_pmd_null_remove,
737a9643ea8Slogwang };
738a9643ea8Slogwang 
7392bfe3f2eSlogwang RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
7402bfe3f2eSlogwang RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
7412bfe3f2eSlogwang RTE_PMD_REGISTER_PARAM_STRING(net_null,
742a9643ea8Slogwang 	"size=<int> "
743*2d9fd380Sjfb8856606 	"copy=<int> "
744*2d9fd380Sjfb8856606 	ETH_NULL_PACKET_NO_RX_ARG "=0|1");
745