xref: /f-stack/dpdk/lib/librte_ethdev/rte_ethdev.c (revision 5edfaa42)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17 
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_kvargs.h>
38 #include <rte_class.h>
39 #include <rte_ether.h>
40 #include <rte_telemetry.h>
41 
42 #include "rte_ethdev_trace.h"
43 #include "rte_ethdev.h"
44 #include "rte_ethdev_driver.h"
45 #include "ethdev_profile.h"
46 #include "ethdev_private.h"
47 
48 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
49 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
50 
51 /* spinlock for eth device callbacks */
52 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
53 
54 /* spinlock for add/remove rx callbacks */
55 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56 
57 /* spinlock for add/remove tx callbacks */
58 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
59 
60 /* spinlock for shared data allocation */
61 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
62 
63 /* store statistics names and its offset in stats structure  */
64 struct rte_eth_xstats_name_off {
65 	char name[RTE_ETH_XSTATS_NAME_SIZE];
66 	unsigned offset;
67 };
68 
69 /* Shared memory between primary and secondary processes. */
70 static struct {
71 	uint64_t next_owner_id;
72 	rte_spinlock_t ownership_lock;
73 	struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
74 } *eth_dev_shared_data;
75 
76 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
77 	{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
78 	{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
79 	{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
80 	{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
81 	{"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
82 	{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
83 	{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
84 	{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
85 		rx_nombuf)},
86 };
87 
88 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
89 
90 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
91 	{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
92 	{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
93 	{"errors", offsetof(struct rte_eth_stats, q_errors)},
94 };
95 
96 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
97 
98 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
99 	{"packets", offsetof(struct rte_eth_stats, q_opackets)},
100 	{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
101 };
102 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
103 
104 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
105 	{ DEV_RX_OFFLOAD_##_name, #_name }
106 
107 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)	\
108 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
109 
110 static const struct {
111 	uint64_t offload;
112 	const char *name;
113 } eth_dev_rx_offload_names[] = {
114 	RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115 	RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116 	RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117 	RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118 	RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119 	RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120 	RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121 	RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122 	RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123 	RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124 	RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125 	RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126 	RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127 	RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128 	RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129 	RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
133 	RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
134 };
135 
136 #undef RTE_RX_OFFLOAD_BIT2STR
137 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
138 
139 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
140 	{ DEV_TX_OFFLOAD_##_name, #_name }
141 
142 static const struct {
143 	uint64_t offload;
144 	const char *name;
145 } eth_dev_tx_offload_names[] = {
146 	RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
147 	RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
148 	RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
149 	RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
150 	RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
151 	RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
152 	RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
153 	RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
154 	RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
155 	RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
156 	RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
157 	RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
158 	RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
159 	RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
160 	RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
161 	RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
162 	RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
163 	RTE_TX_OFFLOAD_BIT2STR(SECURITY),
164 	RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
165 	RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
166 	RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
167 	RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
168 };
169 
170 #undef RTE_TX_OFFLOAD_BIT2STR
171 
172 /**
173  * The user application callback description.
174  *
175  * It contains callback address to be registered by user application,
176  * the pointer to the parameters for callback, and the event type.
177  */
178 struct rte_eth_dev_callback {
179 	TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
180 	rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
181 	void *cb_arg;                           /**< Parameter for callback */
182 	void *ret_param;                        /**< Return parameter */
183 	enum rte_eth_event_type event;          /**< Interrupt event type */
184 	uint32_t active;                        /**< Callback is executing */
185 };
186 
187 enum {
188 	STAT_QMAP_TX = 0,
189 	STAT_QMAP_RX
190 };
191 
192 int
193 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
194 {
195 	int ret;
196 	struct rte_devargs devargs = {.args = NULL};
197 	const char *bus_param_key;
198 	char *bus_str = NULL;
199 	char *cls_str = NULL;
200 	int str_size;
201 
202 	memset(iter, 0, sizeof(*iter));
203 
204 	/*
205 	 * The devargs string may use various syntaxes:
206 	 *   - 0000:08:00.0,representor=[1-3]
207 	 *   - pci:0000:06:00.0,representor=[0,5]
208 	 *   - class=eth,mac=00:11:22:33:44:55
209 	 * A new syntax is in development (not yet supported):
210 	 *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
211 	 */
212 
213 	/*
214 	 * Handle pure class filter (i.e. without any bus-level argument),
215 	 * from future new syntax.
216 	 * rte_devargs_parse() is not yet supporting the new syntax,
217 	 * that's why this simple case is temporarily parsed here.
218 	 */
219 #define iter_anybus_str "class=eth,"
220 	if (strncmp(devargs_str, iter_anybus_str,
221 			strlen(iter_anybus_str)) == 0) {
222 		iter->cls_str = devargs_str + strlen(iter_anybus_str);
223 		goto end;
224 	}
225 
226 	/* Split bus, device and parameters. */
227 	ret = rte_devargs_parse(&devargs, devargs_str);
228 	if (ret != 0)
229 		goto error;
230 
231 	/*
232 	 * Assume parameters of old syntax can match only at ethdev level.
233 	 * Extra parameters will be ignored, thanks to "+" prefix.
234 	 */
235 	str_size = strlen(devargs.args) + 2;
236 	cls_str = malloc(str_size);
237 	if (cls_str == NULL) {
238 		ret = -ENOMEM;
239 		goto error;
240 	}
241 	ret = snprintf(cls_str, str_size, "+%s", devargs.args);
242 	if (ret != str_size - 1) {
243 		ret = -EINVAL;
244 		goto error;
245 	}
246 	iter->cls_str = cls_str;
247 	free(devargs.args); /* allocated by rte_devargs_parse() */
248 	devargs.args = NULL;
249 
250 	iter->bus = devargs.bus;
251 	if (iter->bus->dev_iterate == NULL) {
252 		ret = -ENOTSUP;
253 		goto error;
254 	}
255 
256 	/* Convert bus args to new syntax for use with new API dev_iterate. */
257 	if (strcmp(iter->bus->name, "vdev") == 0) {
258 		bus_param_key = "name";
259 	} else if (strcmp(iter->bus->name, "pci") == 0) {
260 		bus_param_key = "addr";
261 	} else {
262 		ret = -ENOTSUP;
263 		goto error;
264 	}
265 	str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
266 	bus_str = malloc(str_size);
267 	if (bus_str == NULL) {
268 		ret = -ENOMEM;
269 		goto error;
270 	}
271 	ret = snprintf(bus_str, str_size, "%s=%s",
272 			bus_param_key, devargs.name);
273 	if (ret != str_size - 1) {
274 		ret = -EINVAL;
275 		goto error;
276 	}
277 	iter->bus_str = bus_str;
278 
279 end:
280 	iter->cls = rte_class_find_by_name("eth");
281 	return 0;
282 
283 error:
284 	if (ret == -ENOTSUP)
285 		RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
286 				iter->bus->name);
287 	free(devargs.args);
288 	free(bus_str);
289 	free(cls_str);
290 	return ret;
291 }
292 
293 uint16_t
294 rte_eth_iterator_next(struct rte_dev_iterator *iter)
295 {
296 	if (iter->cls == NULL) /* invalid ethdev iterator */
297 		return RTE_MAX_ETHPORTS;
298 
299 	do { /* loop to try all matching rte_device */
300 		/* If not pure ethdev filter and */
301 		if (iter->bus != NULL &&
302 				/* not in middle of rte_eth_dev iteration, */
303 				iter->class_device == NULL) {
304 			/* get next rte_device to try. */
305 			iter->device = iter->bus->dev_iterate(
306 					iter->device, iter->bus_str, iter);
307 			if (iter->device == NULL)
308 				break; /* no more rte_device candidate */
309 		}
310 		/* A device is matching bus part, need to check ethdev part. */
311 		iter->class_device = iter->cls->dev_iterate(
312 				iter->class_device, iter->cls_str, iter);
313 		if (iter->class_device != NULL)
314 			return eth_dev_to_id(iter->class_device); /* match */
315 	} while (iter->bus != NULL); /* need to try next rte_device */
316 
317 	/* No more ethdev port to iterate. */
318 	rte_eth_iterator_cleanup(iter);
319 	return RTE_MAX_ETHPORTS;
320 }
321 
322 void
323 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
324 {
325 	if (iter->bus_str == NULL)
326 		return; /* nothing to free in pure class filter */
327 	free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
328 	free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
329 	memset(iter, 0, sizeof(*iter));
330 }
331 
332 uint16_t
333 rte_eth_find_next(uint16_t port_id)
334 {
335 	while (port_id < RTE_MAX_ETHPORTS &&
336 			rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
337 		port_id++;
338 
339 	if (port_id >= RTE_MAX_ETHPORTS)
340 		return RTE_MAX_ETHPORTS;
341 
342 	return port_id;
343 }
344 
345 /*
346  * Macro to iterate over all valid ports for internal usage.
347  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
348  */
349 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
350 	for (port_id = rte_eth_find_next(0); \
351 	     port_id < RTE_MAX_ETHPORTS; \
352 	     port_id = rte_eth_find_next(port_id + 1))
353 
354 uint16_t
355 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
356 {
357 	port_id = rte_eth_find_next(port_id);
358 	while (port_id < RTE_MAX_ETHPORTS &&
359 			rte_eth_devices[port_id].device != parent)
360 		port_id = rte_eth_find_next(port_id + 1);
361 
362 	return port_id;
363 }
364 
365 uint16_t
366 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
367 {
368 	RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
369 	return rte_eth_find_next_of(port_id,
370 			rte_eth_devices[ref_port_id].device);
371 }
372 
373 static void
374 eth_dev_shared_data_prepare(void)
375 {
376 	const unsigned flags = 0;
377 	const struct rte_memzone *mz;
378 
379 	rte_spinlock_lock(&eth_dev_shared_data_lock);
380 
381 	if (eth_dev_shared_data == NULL) {
382 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
383 			/* Allocate port data and ownership shared memory. */
384 			mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
385 					sizeof(*eth_dev_shared_data),
386 					rte_socket_id(), flags);
387 		} else
388 			mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
389 		if (mz == NULL)
390 			rte_panic("Cannot allocate ethdev shared data\n");
391 
392 		eth_dev_shared_data = mz->addr;
393 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
394 			eth_dev_shared_data->next_owner_id =
395 					RTE_ETH_DEV_NO_OWNER + 1;
396 			rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
397 			memset(eth_dev_shared_data->data, 0,
398 			       sizeof(eth_dev_shared_data->data));
399 		}
400 	}
401 
402 	rte_spinlock_unlock(&eth_dev_shared_data_lock);
403 }
404 
405 static bool
406 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
407 {
408 	return ethdev->data->name[0] != '\0';
409 }
410 
411 static struct rte_eth_dev *
412 eth_dev_allocated(const char *name)
413 {
414 	uint16_t i;
415 
416 	RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
417 
418 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
419 		if (rte_eth_devices[i].data != NULL &&
420 		    strcmp(rte_eth_devices[i].data->name, name) == 0)
421 			return &rte_eth_devices[i];
422 	}
423 	return NULL;
424 }
425 
426 struct rte_eth_dev *
427 rte_eth_dev_allocated(const char *name)
428 {
429 	struct rte_eth_dev *ethdev;
430 
431 	eth_dev_shared_data_prepare();
432 
433 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
434 
435 	ethdev = eth_dev_allocated(name);
436 
437 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
438 
439 	return ethdev;
440 }
441 
442 static uint16_t
443 eth_dev_find_free_port(void)
444 {
445 	uint16_t i;
446 
447 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
448 		/* Using shared name field to find a free port. */
449 		if (eth_dev_shared_data->data[i].name[0] == '\0') {
450 			RTE_ASSERT(rte_eth_devices[i].state ==
451 				   RTE_ETH_DEV_UNUSED);
452 			return i;
453 		}
454 	}
455 	return RTE_MAX_ETHPORTS;
456 }
457 
458 static struct rte_eth_dev *
459 eth_dev_get(uint16_t port_id)
460 {
461 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
462 
463 	eth_dev->data = &eth_dev_shared_data->data[port_id];
464 
465 	return eth_dev;
466 }
467 
468 struct rte_eth_dev *
469 rte_eth_dev_allocate(const char *name)
470 {
471 	uint16_t port_id;
472 	struct rte_eth_dev *eth_dev = NULL;
473 	size_t name_len;
474 
475 	name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
476 	if (name_len == 0) {
477 		RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
478 		return NULL;
479 	}
480 
481 	if (name_len >= RTE_ETH_NAME_MAX_LEN) {
482 		RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
483 		return NULL;
484 	}
485 
486 	eth_dev_shared_data_prepare();
487 
488 	/* Synchronize port creation between primary and secondary threads. */
489 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
490 
491 	if (eth_dev_allocated(name) != NULL) {
492 		RTE_ETHDEV_LOG(ERR,
493 			"Ethernet device with name %s already allocated\n",
494 			name);
495 		goto unlock;
496 	}
497 
498 	port_id = eth_dev_find_free_port();
499 	if (port_id == RTE_MAX_ETHPORTS) {
500 		RTE_ETHDEV_LOG(ERR,
501 			"Reached maximum number of Ethernet ports\n");
502 		goto unlock;
503 	}
504 
505 	eth_dev = eth_dev_get(port_id);
506 	strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
507 	eth_dev->data->port_id = port_id;
508 	eth_dev->data->mtu = RTE_ETHER_MTU;
509 	pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
510 
511 unlock:
512 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
513 
514 	return eth_dev;
515 }
516 
517 /*
518  * Attach to a port already registered by the primary process, which
519  * makes sure that the same device would have the same port id both
520  * in the primary and secondary process.
521  */
522 struct rte_eth_dev *
523 rte_eth_dev_attach_secondary(const char *name)
524 {
525 	uint16_t i;
526 	struct rte_eth_dev *eth_dev = NULL;
527 
528 	eth_dev_shared_data_prepare();
529 
530 	/* Synchronize port attachment to primary port creation and release. */
531 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
532 
533 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
534 		if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
535 			break;
536 	}
537 	if (i == RTE_MAX_ETHPORTS) {
538 		RTE_ETHDEV_LOG(ERR,
539 			"Device %s is not driven by the primary process\n",
540 			name);
541 	} else {
542 		eth_dev = eth_dev_get(i);
543 		RTE_ASSERT(eth_dev->data->port_id == i);
544 	}
545 
546 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
547 	return eth_dev;
548 }
549 
550 int
551 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
552 {
553 	if (eth_dev == NULL)
554 		return -EINVAL;
555 
556 	eth_dev_shared_data_prepare();
557 
558 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
559 		rte_eth_dev_callback_process(eth_dev,
560 				RTE_ETH_EVENT_DESTROY, NULL);
561 
562 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
563 
564 	eth_dev->state = RTE_ETH_DEV_UNUSED;
565 	eth_dev->device = NULL;
566 	eth_dev->process_private = NULL;
567 	eth_dev->intr_handle = NULL;
568 	eth_dev->rx_pkt_burst = NULL;
569 	eth_dev->tx_pkt_burst = NULL;
570 	eth_dev->tx_pkt_prepare = NULL;
571 	eth_dev->rx_queue_count = NULL;
572 	eth_dev->rx_descriptor_done = NULL;
573 	eth_dev->rx_descriptor_status = NULL;
574 	eth_dev->tx_descriptor_status = NULL;
575 	eth_dev->dev_ops = NULL;
576 
577 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
578 		rte_free(eth_dev->data->rx_queues);
579 		rte_free(eth_dev->data->tx_queues);
580 		rte_free(eth_dev->data->mac_addrs);
581 		rte_free(eth_dev->data->hash_mac_addrs);
582 		rte_free(eth_dev->data->dev_private);
583 		pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
584 		memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
585 	}
586 
587 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
588 
589 	return 0;
590 }
591 
592 int
593 rte_eth_dev_is_valid_port(uint16_t port_id)
594 {
595 	if (port_id >= RTE_MAX_ETHPORTS ||
596 	    (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
597 		return 0;
598 	else
599 		return 1;
600 }
601 
602 static int
603 eth_is_valid_owner_id(uint64_t owner_id)
604 {
605 	if (owner_id == RTE_ETH_DEV_NO_OWNER ||
606 	    eth_dev_shared_data->next_owner_id <= owner_id)
607 		return 0;
608 	return 1;
609 }
610 
611 uint64_t
612 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
613 {
614 	port_id = rte_eth_find_next(port_id);
615 	while (port_id < RTE_MAX_ETHPORTS &&
616 			rte_eth_devices[port_id].data->owner.id != owner_id)
617 		port_id = rte_eth_find_next(port_id + 1);
618 
619 	return port_id;
620 }
621 
622 int
623 rte_eth_dev_owner_new(uint64_t *owner_id)
624 {
625 	eth_dev_shared_data_prepare();
626 
627 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
628 
629 	*owner_id = eth_dev_shared_data->next_owner_id++;
630 
631 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
632 	return 0;
633 }
634 
635 static int
636 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
637 		       const struct rte_eth_dev_owner *new_owner)
638 {
639 	struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
640 	struct rte_eth_dev_owner *port_owner;
641 
642 	if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
643 		RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
644 			port_id);
645 		return -ENODEV;
646 	}
647 
648 	if (!eth_is_valid_owner_id(new_owner->id) &&
649 	    !eth_is_valid_owner_id(old_owner_id)) {
650 		RTE_ETHDEV_LOG(ERR,
651 			"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
652 		       old_owner_id, new_owner->id);
653 		return -EINVAL;
654 	}
655 
656 	port_owner = &rte_eth_devices[port_id].data->owner;
657 	if (port_owner->id != old_owner_id) {
658 		RTE_ETHDEV_LOG(ERR,
659 			"Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
660 			port_id, port_owner->name, port_owner->id);
661 		return -EPERM;
662 	}
663 
664 	/* can not truncate (same structure) */
665 	strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
666 
667 	port_owner->id = new_owner->id;
668 
669 	RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
670 		port_id, new_owner->name, new_owner->id);
671 
672 	return 0;
673 }
674 
675 int
676 rte_eth_dev_owner_set(const uint16_t port_id,
677 		      const struct rte_eth_dev_owner *owner)
678 {
679 	int ret;
680 
681 	eth_dev_shared_data_prepare();
682 
683 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
684 
685 	ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
686 
687 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
688 	return ret;
689 }
690 
691 int
692 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
693 {
694 	const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
695 			{.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
696 	int ret;
697 
698 	eth_dev_shared_data_prepare();
699 
700 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
701 
702 	ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
703 
704 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
705 	return ret;
706 }
707 
708 int
709 rte_eth_dev_owner_delete(const uint64_t owner_id)
710 {
711 	uint16_t port_id;
712 	int ret = 0;
713 
714 	eth_dev_shared_data_prepare();
715 
716 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
717 
718 	if (eth_is_valid_owner_id(owner_id)) {
719 		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
720 			if (rte_eth_devices[port_id].data->owner.id == owner_id)
721 				memset(&rte_eth_devices[port_id].data->owner, 0,
722 				       sizeof(struct rte_eth_dev_owner));
723 		RTE_ETHDEV_LOG(NOTICE,
724 			"All port owners owned by %016"PRIx64" identifier have removed\n",
725 			owner_id);
726 	} else {
727 		RTE_ETHDEV_LOG(ERR,
728 			       "Invalid owner id=%016"PRIx64"\n",
729 			       owner_id);
730 		ret = -EINVAL;
731 	}
732 
733 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
734 
735 	return ret;
736 }
737 
738 int
739 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
740 {
741 	int ret = 0;
742 	struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
743 
744 	eth_dev_shared_data_prepare();
745 
746 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
747 
748 	if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
749 		RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
750 			port_id);
751 		ret = -ENODEV;
752 	} else {
753 		rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
754 	}
755 
756 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
757 	return ret;
758 }
759 
760 int
761 rte_eth_dev_socket_id(uint16_t port_id)
762 {
763 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
764 	return rte_eth_devices[port_id].data->numa_node;
765 }
766 
767 void *
768 rte_eth_dev_get_sec_ctx(uint16_t port_id)
769 {
770 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
771 	return rte_eth_devices[port_id].security_ctx;
772 }
773 
774 uint16_t
775 rte_eth_dev_count_avail(void)
776 {
777 	uint16_t p;
778 	uint16_t count;
779 
780 	count = 0;
781 
782 	RTE_ETH_FOREACH_DEV(p)
783 		count++;
784 
785 	return count;
786 }
787 
788 uint16_t
789 rte_eth_dev_count_total(void)
790 {
791 	uint16_t port, count = 0;
792 
793 	RTE_ETH_FOREACH_VALID_DEV(port)
794 		count++;
795 
796 	return count;
797 }
798 
799 int
800 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
801 {
802 	char *tmp;
803 
804 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
805 
806 	if (name == NULL) {
807 		RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
808 		return -EINVAL;
809 	}
810 
811 	/* shouldn't check 'rte_eth_devices[i].data',
812 	 * because it might be overwritten by VDEV PMD */
813 	tmp = eth_dev_shared_data->data[port_id].name;
814 	strcpy(name, tmp);
815 	return 0;
816 }
817 
818 int
819 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
820 {
821 	uint16_t pid;
822 
823 	if (name == NULL) {
824 		RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
825 		return -EINVAL;
826 	}
827 
828 	RTE_ETH_FOREACH_VALID_DEV(pid)
829 		if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
830 			*port_id = pid;
831 			return 0;
832 		}
833 
834 	return -ENODEV;
835 }
836 
837 static int
838 eth_err(uint16_t port_id, int ret)
839 {
840 	if (ret == 0)
841 		return 0;
842 	if (rte_eth_dev_is_removed(port_id))
843 		return -EIO;
844 	return ret;
845 }
846 
847 static int
848 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
849 {
850 	uint16_t old_nb_queues = dev->data->nb_rx_queues;
851 	void **rxq;
852 	unsigned i;
853 
854 	if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
855 		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
856 				sizeof(dev->data->rx_queues[0]) * nb_queues,
857 				RTE_CACHE_LINE_SIZE);
858 		if (dev->data->rx_queues == NULL) {
859 			dev->data->nb_rx_queues = 0;
860 			return -(ENOMEM);
861 		}
862 	} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
863 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
864 
865 		rxq = dev->data->rx_queues;
866 
867 		for (i = nb_queues; i < old_nb_queues; i++)
868 			(*dev->dev_ops->rx_queue_release)(rxq[i]);
869 		rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
870 				RTE_CACHE_LINE_SIZE);
871 		if (rxq == NULL)
872 			return -(ENOMEM);
873 		if (nb_queues > old_nb_queues) {
874 			uint16_t new_qs = nb_queues - old_nb_queues;
875 
876 			memset(rxq + old_nb_queues, 0,
877 				sizeof(rxq[0]) * new_qs);
878 		}
879 
880 		dev->data->rx_queues = rxq;
881 
882 	} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
883 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
884 
885 		rxq = dev->data->rx_queues;
886 
887 		for (i = nb_queues; i < old_nb_queues; i++)
888 			(*dev->dev_ops->rx_queue_release)(rxq[i]);
889 
890 		rte_free(dev->data->rx_queues);
891 		dev->data->rx_queues = NULL;
892 	}
893 	dev->data->nb_rx_queues = nb_queues;
894 	return 0;
895 }
896 
897 static int
898 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
899 {
900 	uint16_t port_id;
901 
902 	if (rx_queue_id >= dev->data->nb_rx_queues) {
903 		port_id = dev->data->port_id;
904 		RTE_ETHDEV_LOG(ERR,
905 			       "Invalid Rx queue_id=%u of device with port_id=%u\n",
906 			       rx_queue_id, port_id);
907 		return -EINVAL;
908 	}
909 
910 	if (dev->data->rx_queues[rx_queue_id] == NULL) {
911 		port_id = dev->data->port_id;
912 		RTE_ETHDEV_LOG(ERR,
913 			       "Queue %u of device with port_id=%u has not been setup\n",
914 			       rx_queue_id, port_id);
915 		return -EINVAL;
916 	}
917 
918 	return 0;
919 }
920 
921 static int
922 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
923 {
924 	uint16_t port_id;
925 
926 	if (tx_queue_id >= dev->data->nb_tx_queues) {
927 		port_id = dev->data->port_id;
928 		RTE_ETHDEV_LOG(ERR,
929 			       "Invalid Tx queue_id=%u of device with port_id=%u\n",
930 			       tx_queue_id, port_id);
931 		return -EINVAL;
932 	}
933 
934 	if (dev->data->tx_queues[tx_queue_id] == NULL) {
935 		port_id = dev->data->port_id;
936 		RTE_ETHDEV_LOG(ERR,
937 			       "Queue %u of device with port_id=%u has not been setup\n",
938 			       tx_queue_id, port_id);
939 		return -EINVAL;
940 	}
941 
942 	return 0;
943 }
944 
945 int
946 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
947 {
948 	struct rte_eth_dev *dev;
949 	int ret;
950 
951 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
952 
953 	dev = &rte_eth_devices[port_id];
954 	if (!dev->data->dev_started) {
955 		RTE_ETHDEV_LOG(ERR,
956 			"Port %u must be started before start any queue\n",
957 			port_id);
958 		return -EINVAL;
959 	}
960 
961 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
962 	if (ret != 0)
963 		return ret;
964 
965 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
966 
967 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
968 		RTE_ETHDEV_LOG(INFO,
969 			"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
970 			rx_queue_id, port_id);
971 		return -EINVAL;
972 	}
973 
974 	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
975 		RTE_ETHDEV_LOG(INFO,
976 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
977 			rx_queue_id, port_id);
978 		return 0;
979 	}
980 
981 	return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
982 							     rx_queue_id));
983 
984 }
985 
986 int
987 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
988 {
989 	struct rte_eth_dev *dev;
990 	int ret;
991 
992 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
993 
994 	dev = &rte_eth_devices[port_id];
995 
996 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
997 	if (ret != 0)
998 		return ret;
999 
1000 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
1001 
1002 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1003 		RTE_ETHDEV_LOG(INFO,
1004 			"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1005 			rx_queue_id, port_id);
1006 		return -EINVAL;
1007 	}
1008 
1009 	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1010 		RTE_ETHDEV_LOG(INFO,
1011 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1012 			rx_queue_id, port_id);
1013 		return 0;
1014 	}
1015 
1016 	return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1017 
1018 }
1019 
1020 int
1021 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1022 {
1023 	struct rte_eth_dev *dev;
1024 	int ret;
1025 
1026 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1027 
1028 	dev = &rte_eth_devices[port_id];
1029 	if (!dev->data->dev_started) {
1030 		RTE_ETHDEV_LOG(ERR,
1031 			"Port %u must be started before start any queue\n",
1032 			port_id);
1033 		return -EINVAL;
1034 	}
1035 
1036 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1037 	if (ret != 0)
1038 		return ret;
1039 
1040 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1041 
1042 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1043 		RTE_ETHDEV_LOG(INFO,
1044 			"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1045 			tx_queue_id, port_id);
1046 		return -EINVAL;
1047 	}
1048 
1049 	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1050 		RTE_ETHDEV_LOG(INFO,
1051 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1052 			tx_queue_id, port_id);
1053 		return 0;
1054 	}
1055 
1056 	return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1057 }
1058 
1059 int
1060 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1061 {
1062 	struct rte_eth_dev *dev;
1063 	int ret;
1064 
1065 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1066 
1067 	dev = &rte_eth_devices[port_id];
1068 
1069 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1070 	if (ret != 0)
1071 		return ret;
1072 
1073 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1074 
1075 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1076 		RTE_ETHDEV_LOG(INFO,
1077 			"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1078 			tx_queue_id, port_id);
1079 		return -EINVAL;
1080 	}
1081 
1082 	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1083 		RTE_ETHDEV_LOG(INFO,
1084 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1085 			tx_queue_id, port_id);
1086 		return 0;
1087 	}
1088 
1089 	return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1090 
1091 }
1092 
1093 static int
1094 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1095 {
1096 	uint16_t old_nb_queues = dev->data->nb_tx_queues;
1097 	void **txq;
1098 	unsigned i;
1099 
1100 	if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1101 		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1102 						   sizeof(dev->data->tx_queues[0]) * nb_queues,
1103 						   RTE_CACHE_LINE_SIZE);
1104 		if (dev->data->tx_queues == NULL) {
1105 			dev->data->nb_tx_queues = 0;
1106 			return -(ENOMEM);
1107 		}
1108 	} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1109 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1110 
1111 		txq = dev->data->tx_queues;
1112 
1113 		for (i = nb_queues; i < old_nb_queues; i++)
1114 			(*dev->dev_ops->tx_queue_release)(txq[i]);
1115 		txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1116 				  RTE_CACHE_LINE_SIZE);
1117 		if (txq == NULL)
1118 			return -ENOMEM;
1119 		if (nb_queues > old_nb_queues) {
1120 			uint16_t new_qs = nb_queues - old_nb_queues;
1121 
1122 			memset(txq + old_nb_queues, 0,
1123 			       sizeof(txq[0]) * new_qs);
1124 		}
1125 
1126 		dev->data->tx_queues = txq;
1127 
1128 	} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1129 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1130 
1131 		txq = dev->data->tx_queues;
1132 
1133 		for (i = nb_queues; i < old_nb_queues; i++)
1134 			(*dev->dev_ops->tx_queue_release)(txq[i]);
1135 
1136 		rte_free(dev->data->tx_queues);
1137 		dev->data->tx_queues = NULL;
1138 	}
1139 	dev->data->nb_tx_queues = nb_queues;
1140 	return 0;
1141 }
1142 
1143 uint32_t
1144 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1145 {
1146 	switch (speed) {
1147 	case ETH_SPEED_NUM_10M:
1148 		return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1149 	case ETH_SPEED_NUM_100M:
1150 		return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1151 	case ETH_SPEED_NUM_1G:
1152 		return ETH_LINK_SPEED_1G;
1153 	case ETH_SPEED_NUM_2_5G:
1154 		return ETH_LINK_SPEED_2_5G;
1155 	case ETH_SPEED_NUM_5G:
1156 		return ETH_LINK_SPEED_5G;
1157 	case ETH_SPEED_NUM_10G:
1158 		return ETH_LINK_SPEED_10G;
1159 	case ETH_SPEED_NUM_20G:
1160 		return ETH_LINK_SPEED_20G;
1161 	case ETH_SPEED_NUM_25G:
1162 		return ETH_LINK_SPEED_25G;
1163 	case ETH_SPEED_NUM_40G:
1164 		return ETH_LINK_SPEED_40G;
1165 	case ETH_SPEED_NUM_50G:
1166 		return ETH_LINK_SPEED_50G;
1167 	case ETH_SPEED_NUM_56G:
1168 		return ETH_LINK_SPEED_56G;
1169 	case ETH_SPEED_NUM_100G:
1170 		return ETH_LINK_SPEED_100G;
1171 	case ETH_SPEED_NUM_200G:
1172 		return ETH_LINK_SPEED_200G;
1173 	default:
1174 		return 0;
1175 	}
1176 }
1177 
1178 const char *
1179 rte_eth_dev_rx_offload_name(uint64_t offload)
1180 {
1181 	const char *name = "UNKNOWN";
1182 	unsigned int i;
1183 
1184 	for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1185 		if (offload == eth_dev_rx_offload_names[i].offload) {
1186 			name = eth_dev_rx_offload_names[i].name;
1187 			break;
1188 		}
1189 	}
1190 
1191 	return name;
1192 }
1193 
1194 const char *
1195 rte_eth_dev_tx_offload_name(uint64_t offload)
1196 {
1197 	const char *name = "UNKNOWN";
1198 	unsigned int i;
1199 
1200 	for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1201 		if (offload == eth_dev_tx_offload_names[i].offload) {
1202 			name = eth_dev_tx_offload_names[i].name;
1203 			break;
1204 		}
1205 	}
1206 
1207 	return name;
1208 }
1209 
1210 static inline int
1211 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1212 		   uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1213 {
1214 	int ret = 0;
1215 
1216 	if (dev_info_size == 0) {
1217 		if (config_size != max_rx_pkt_len) {
1218 			RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1219 				       " %u != %u is not allowed\n",
1220 				       port_id, config_size, max_rx_pkt_len);
1221 			ret = -EINVAL;
1222 		}
1223 	} else if (config_size > dev_info_size) {
1224 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1225 			       "> max allowed value %u\n", port_id, config_size,
1226 			       dev_info_size);
1227 		ret = -EINVAL;
1228 	} else if (config_size < RTE_ETHER_MIN_LEN) {
1229 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1230 			       "< min allowed value %u\n", port_id, config_size,
1231 			       (unsigned int)RTE_ETHER_MIN_LEN);
1232 		ret = -EINVAL;
1233 	}
1234 	return ret;
1235 }
1236 
1237 /*
1238  * Validate offloads that are requested through rte_eth_dev_configure against
1239  * the offloads successfully set by the ethernet device.
1240  *
1241  * @param port_id
1242  *   The port identifier of the Ethernet device.
1243  * @param req_offloads
1244  *   The offloads that have been requested through `rte_eth_dev_configure`.
1245  * @param set_offloads
1246  *   The offloads successfully set by the ethernet device.
1247  * @param offload_type
1248  *   The offload type i.e. Rx/Tx string.
1249  * @param offload_name
1250  *   The function that prints the offload name.
1251  * @return
1252  *   - (0) if validation successful.
1253  *   - (-EINVAL) if requested offload has been silently disabled.
1254  *
1255  */
1256 static int
1257 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1258 		  uint64_t set_offloads, const char *offload_type,
1259 		  const char *(*offload_name)(uint64_t))
1260 {
1261 	uint64_t offloads_diff = req_offloads ^ set_offloads;
1262 	uint64_t offload;
1263 	int ret = 0;
1264 
1265 	while (offloads_diff != 0) {
1266 		/* Check if any offload is requested but not enabled. */
1267 		offload = 1ULL << __builtin_ctzll(offloads_diff);
1268 		if (offload & req_offloads) {
1269 			RTE_ETHDEV_LOG(ERR,
1270 				"Port %u failed to enable %s offload %s\n",
1271 				port_id, offload_type, offload_name(offload));
1272 			ret = -EINVAL;
1273 		}
1274 
1275 		/* Check if offload couldn't be disabled. */
1276 		if (offload & set_offloads) {
1277 			RTE_ETHDEV_LOG(DEBUG,
1278 				"Port %u %s offload %s is not requested but enabled\n",
1279 				port_id, offload_type, offload_name(offload));
1280 		}
1281 
1282 		offloads_diff &= ~offload;
1283 	}
1284 
1285 	return ret;
1286 }
1287 
1288 int
1289 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1290 		      const struct rte_eth_conf *dev_conf)
1291 {
1292 	struct rte_eth_dev *dev;
1293 	struct rte_eth_dev_info dev_info;
1294 	struct rte_eth_conf orig_conf;
1295 	int diag;
1296 	int ret;
1297 
1298 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1299 
1300 	dev = &rte_eth_devices[port_id];
1301 
1302 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1303 
1304 	if (dev->data->dev_started) {
1305 		RTE_ETHDEV_LOG(ERR,
1306 			"Port %u must be stopped to allow configuration\n",
1307 			port_id);
1308 		return -EBUSY;
1309 	}
1310 
1311 	 /* Store original config, as rollback required on failure */
1312 	memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1313 
1314 	/*
1315 	 * Copy the dev_conf parameter into the dev structure.
1316 	 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1317 	 */
1318 	if (dev_conf != &dev->data->dev_conf)
1319 		memcpy(&dev->data->dev_conf, dev_conf,
1320 		       sizeof(dev->data->dev_conf));
1321 
1322 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1323 	if (ret != 0)
1324 		goto rollback;
1325 
1326 	/* If number of queues specified by application for both Rx and Tx is
1327 	 * zero, use driver preferred values. This cannot be done individually
1328 	 * as it is valid for either Tx or Rx (but not both) to be zero.
1329 	 * If driver does not provide any preferred valued, fall back on
1330 	 * EAL defaults.
1331 	 */
1332 	if (nb_rx_q == 0 && nb_tx_q == 0) {
1333 		nb_rx_q = dev_info.default_rxportconf.nb_queues;
1334 		if (nb_rx_q == 0)
1335 			nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1336 		nb_tx_q = dev_info.default_txportconf.nb_queues;
1337 		if (nb_tx_q == 0)
1338 			nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1339 	}
1340 
1341 	if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1342 		RTE_ETHDEV_LOG(ERR,
1343 			"Number of RX queues requested (%u) is greater than max supported(%d)\n",
1344 			nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1345 		ret = -EINVAL;
1346 		goto rollback;
1347 	}
1348 
1349 	if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1350 		RTE_ETHDEV_LOG(ERR,
1351 			"Number of TX queues requested (%u) is greater than max supported(%d)\n",
1352 			nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1353 		ret = -EINVAL;
1354 		goto rollback;
1355 	}
1356 
1357 	/*
1358 	 * Check that the numbers of RX and TX queues are not greater
1359 	 * than the maximum number of RX and TX queues supported by the
1360 	 * configured device.
1361 	 */
1362 	if (nb_rx_q > dev_info.max_rx_queues) {
1363 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1364 			port_id, nb_rx_q, dev_info.max_rx_queues);
1365 		ret = -EINVAL;
1366 		goto rollback;
1367 	}
1368 
1369 	if (nb_tx_q > dev_info.max_tx_queues) {
1370 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1371 			port_id, nb_tx_q, dev_info.max_tx_queues);
1372 		ret = -EINVAL;
1373 		goto rollback;
1374 	}
1375 
1376 	/* Check that the device supports requested interrupts */
1377 	if ((dev_conf->intr_conf.lsc == 1) &&
1378 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1379 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1380 			dev->device->driver->name);
1381 		ret = -EINVAL;
1382 		goto rollback;
1383 	}
1384 	if ((dev_conf->intr_conf.rmv == 1) &&
1385 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1386 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1387 			dev->device->driver->name);
1388 		ret = -EINVAL;
1389 		goto rollback;
1390 	}
1391 
1392 	/*
1393 	 * If jumbo frames are enabled, check that the maximum RX packet
1394 	 * length is supported by the configured device.
1395 	 */
1396 	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1397 		if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1398 			RTE_ETHDEV_LOG(ERR,
1399 				"Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1400 				port_id, dev_conf->rxmode.max_rx_pkt_len,
1401 				dev_info.max_rx_pktlen);
1402 			ret = -EINVAL;
1403 			goto rollback;
1404 		} else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1405 			RTE_ETHDEV_LOG(ERR,
1406 				"Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1407 				port_id, dev_conf->rxmode.max_rx_pkt_len,
1408 				(unsigned int)RTE_ETHER_MIN_LEN);
1409 			ret = -EINVAL;
1410 			goto rollback;
1411 		}
1412 	} else {
1413 		if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1414 			dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1415 			/* Use default value */
1416 			dev->data->dev_conf.rxmode.max_rx_pkt_len =
1417 							RTE_ETHER_MAX_LEN;
1418 	}
1419 
1420 	/*
1421 	 * If LRO is enabled, check that the maximum aggregated packet
1422 	 * size is supported by the configured device.
1423 	 */
1424 	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1425 		if (dev_conf->rxmode.max_lro_pkt_size == 0)
1426 			dev->data->dev_conf.rxmode.max_lro_pkt_size =
1427 				dev->data->dev_conf.rxmode.max_rx_pkt_len;
1428 		ret = eth_dev_check_lro_pkt_size(port_id,
1429 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1430 				dev->data->dev_conf.rxmode.max_rx_pkt_len,
1431 				dev_info.max_lro_pkt_size);
1432 		if (ret != 0)
1433 			goto rollback;
1434 	}
1435 
1436 	/* Any requested offloading must be within its device capabilities */
1437 	if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1438 	     dev_conf->rxmode.offloads) {
1439 		RTE_ETHDEV_LOG(ERR,
1440 			"Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1441 			"capabilities 0x%"PRIx64" in %s()\n",
1442 			port_id, dev_conf->rxmode.offloads,
1443 			dev_info.rx_offload_capa,
1444 			__func__);
1445 		ret = -EINVAL;
1446 		goto rollback;
1447 	}
1448 	if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1449 	     dev_conf->txmode.offloads) {
1450 		RTE_ETHDEV_LOG(ERR,
1451 			"Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1452 			"capabilities 0x%"PRIx64" in %s()\n",
1453 			port_id, dev_conf->txmode.offloads,
1454 			dev_info.tx_offload_capa,
1455 			__func__);
1456 		ret = -EINVAL;
1457 		goto rollback;
1458 	}
1459 
1460 	dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1461 		rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1462 
1463 	/* Check that device supports requested rss hash functions. */
1464 	if ((dev_info.flow_type_rss_offloads |
1465 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1466 	    dev_info.flow_type_rss_offloads) {
1467 		RTE_ETHDEV_LOG(ERR,
1468 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1469 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1470 			dev_info.flow_type_rss_offloads);
1471 		ret = -EINVAL;
1472 		goto rollback;
1473 	}
1474 
1475 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1476 	if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1477 	    (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1478 		RTE_ETHDEV_LOG(ERR,
1479 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1480 			port_id,
1481 			rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1482 		ret = -EINVAL;
1483 		goto rollback;
1484 	}
1485 
1486 	/*
1487 	 * Setup new number of RX/TX queues and reconfigure device.
1488 	 */
1489 	diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1490 	if (diag != 0) {
1491 		RTE_ETHDEV_LOG(ERR,
1492 			"Port%u eth_dev_rx_queue_config = %d\n",
1493 			port_id, diag);
1494 		ret = diag;
1495 		goto rollback;
1496 	}
1497 
1498 	diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1499 	if (diag != 0) {
1500 		RTE_ETHDEV_LOG(ERR,
1501 			"Port%u eth_dev_tx_queue_config = %d\n",
1502 			port_id, diag);
1503 		eth_dev_rx_queue_config(dev, 0);
1504 		ret = diag;
1505 		goto rollback;
1506 	}
1507 
1508 	diag = (*dev->dev_ops->dev_configure)(dev);
1509 	if (diag != 0) {
1510 		RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1511 			port_id, diag);
1512 		ret = eth_err(port_id, diag);
1513 		goto reset_queues;
1514 	}
1515 
1516 	/* Initialize Rx profiling if enabled at compilation time. */
1517 	diag = __rte_eth_dev_profile_init(port_id, dev);
1518 	if (diag != 0) {
1519 		RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1520 			port_id, diag);
1521 		ret = eth_err(port_id, diag);
1522 		goto reset_queues;
1523 	}
1524 
1525 	/* Validate Rx offloads. */
1526 	diag = eth_dev_validate_offloads(port_id,
1527 			dev_conf->rxmode.offloads,
1528 			dev->data->dev_conf.rxmode.offloads, "Rx",
1529 			rte_eth_dev_rx_offload_name);
1530 	if (diag != 0) {
1531 		ret = diag;
1532 		goto reset_queues;
1533 	}
1534 
1535 	/* Validate Tx offloads. */
1536 	diag = eth_dev_validate_offloads(port_id,
1537 			dev_conf->txmode.offloads,
1538 			dev->data->dev_conf.txmode.offloads, "Tx",
1539 			rte_eth_dev_tx_offload_name);
1540 	if (diag != 0) {
1541 		ret = diag;
1542 		goto reset_queues;
1543 	}
1544 
1545 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1546 	return 0;
1547 reset_queues:
1548 	eth_dev_rx_queue_config(dev, 0);
1549 	eth_dev_tx_queue_config(dev, 0);
1550 rollback:
1551 	memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1552 
1553 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1554 	return ret;
1555 }
1556 
1557 void
1558 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1559 {
1560 	if (dev->data->dev_started) {
1561 		RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1562 			dev->data->port_id);
1563 		return;
1564 	}
1565 
1566 	eth_dev_rx_queue_config(dev, 0);
1567 	eth_dev_tx_queue_config(dev, 0);
1568 
1569 	memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1570 }
1571 
1572 static void
1573 eth_dev_mac_restore(struct rte_eth_dev *dev,
1574 			struct rte_eth_dev_info *dev_info)
1575 {
1576 	struct rte_ether_addr *addr;
1577 	uint16_t i;
1578 	uint32_t pool = 0;
1579 	uint64_t pool_mask;
1580 
1581 	/* replay MAC address configuration including default MAC */
1582 	addr = &dev->data->mac_addrs[0];
1583 	if (*dev->dev_ops->mac_addr_set != NULL)
1584 		(*dev->dev_ops->mac_addr_set)(dev, addr);
1585 	else if (*dev->dev_ops->mac_addr_add != NULL)
1586 		(*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1587 
1588 	if (*dev->dev_ops->mac_addr_add != NULL) {
1589 		for (i = 1; i < dev_info->max_mac_addrs; i++) {
1590 			addr = &dev->data->mac_addrs[i];
1591 
1592 			/* skip zero address */
1593 			if (rte_is_zero_ether_addr(addr))
1594 				continue;
1595 
1596 			pool = 0;
1597 			pool_mask = dev->data->mac_pool_sel[i];
1598 
1599 			do {
1600 				if (pool_mask & 1ULL)
1601 					(*dev->dev_ops->mac_addr_add)(dev,
1602 						addr, i, pool);
1603 				pool_mask >>= 1;
1604 				pool++;
1605 			} while (pool_mask);
1606 		}
1607 	}
1608 }
1609 
1610 static int
1611 eth_dev_config_restore(struct rte_eth_dev *dev,
1612 		struct rte_eth_dev_info *dev_info, uint16_t port_id)
1613 {
1614 	int ret;
1615 
1616 	if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1617 		eth_dev_mac_restore(dev, dev_info);
1618 
1619 	/* replay promiscuous configuration */
1620 	/*
1621 	 * use callbacks directly since we don't need port_id check and
1622 	 * would like to bypass the same value set
1623 	 */
1624 	if (rte_eth_promiscuous_get(port_id) == 1 &&
1625 	    *dev->dev_ops->promiscuous_enable != NULL) {
1626 		ret = eth_err(port_id,
1627 			      (*dev->dev_ops->promiscuous_enable)(dev));
1628 		if (ret != 0 && ret != -ENOTSUP) {
1629 			RTE_ETHDEV_LOG(ERR,
1630 				"Failed to enable promiscuous mode for device (port %u): %s\n",
1631 				port_id, rte_strerror(-ret));
1632 			return ret;
1633 		}
1634 	} else if (rte_eth_promiscuous_get(port_id) == 0 &&
1635 		   *dev->dev_ops->promiscuous_disable != NULL) {
1636 		ret = eth_err(port_id,
1637 			      (*dev->dev_ops->promiscuous_disable)(dev));
1638 		if (ret != 0 && ret != -ENOTSUP) {
1639 			RTE_ETHDEV_LOG(ERR,
1640 				"Failed to disable promiscuous mode for device (port %u): %s\n",
1641 				port_id, rte_strerror(-ret));
1642 			return ret;
1643 		}
1644 	}
1645 
1646 	/* replay all multicast configuration */
1647 	/*
1648 	 * use callbacks directly since we don't need port_id check and
1649 	 * would like to bypass the same value set
1650 	 */
1651 	if (rte_eth_allmulticast_get(port_id) == 1 &&
1652 	    *dev->dev_ops->allmulticast_enable != NULL) {
1653 		ret = eth_err(port_id,
1654 			      (*dev->dev_ops->allmulticast_enable)(dev));
1655 		if (ret != 0 && ret != -ENOTSUP) {
1656 			RTE_ETHDEV_LOG(ERR,
1657 				"Failed to enable allmulticast mode for device (port %u): %s\n",
1658 				port_id, rte_strerror(-ret));
1659 			return ret;
1660 		}
1661 	} else if (rte_eth_allmulticast_get(port_id) == 0 &&
1662 		   *dev->dev_ops->allmulticast_disable != NULL) {
1663 		ret = eth_err(port_id,
1664 			      (*dev->dev_ops->allmulticast_disable)(dev));
1665 		if (ret != 0 && ret != -ENOTSUP) {
1666 			RTE_ETHDEV_LOG(ERR,
1667 				"Failed to disable allmulticast mode for device (port %u): %s\n",
1668 				port_id, rte_strerror(-ret));
1669 			return ret;
1670 		}
1671 	}
1672 
1673 	return 0;
1674 }
1675 
1676 int
1677 rte_eth_dev_start(uint16_t port_id)
1678 {
1679 	struct rte_eth_dev *dev;
1680 	struct rte_eth_dev_info dev_info;
1681 	int diag;
1682 	int ret, ret_stop;
1683 
1684 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1685 
1686 	dev = &rte_eth_devices[port_id];
1687 
1688 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1689 
1690 	if (dev->data->dev_started != 0) {
1691 		RTE_ETHDEV_LOG(INFO,
1692 			"Device with port_id=%"PRIu16" already started\n",
1693 			port_id);
1694 		return 0;
1695 	}
1696 
1697 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1698 	if (ret != 0)
1699 		return ret;
1700 
1701 	/* Lets restore MAC now if device does not support live change */
1702 	if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1703 		eth_dev_mac_restore(dev, &dev_info);
1704 
1705 	diag = (*dev->dev_ops->dev_start)(dev);
1706 	if (diag == 0)
1707 		dev->data->dev_started = 1;
1708 	else
1709 		return eth_err(port_id, diag);
1710 
1711 	ret = eth_dev_config_restore(dev, &dev_info, port_id);
1712 	if (ret != 0) {
1713 		RTE_ETHDEV_LOG(ERR,
1714 			"Error during restoring configuration for device (port %u): %s\n",
1715 			port_id, rte_strerror(-ret));
1716 		ret_stop = rte_eth_dev_stop(port_id);
1717 		if (ret_stop != 0) {
1718 			RTE_ETHDEV_LOG(ERR,
1719 				"Failed to stop device (port %u): %s\n",
1720 				port_id, rte_strerror(-ret_stop));
1721 		}
1722 
1723 		return ret;
1724 	}
1725 
1726 	if (dev->data->dev_conf.intr_conf.lsc == 0) {
1727 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1728 		(*dev->dev_ops->link_update)(dev, 0);
1729 	}
1730 
1731 	rte_ethdev_trace_start(port_id);
1732 	return 0;
1733 }
1734 
1735 int
1736 rte_eth_dev_stop(uint16_t port_id)
1737 {
1738 	struct rte_eth_dev *dev;
1739 	int ret;
1740 
1741 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1742 	dev = &rte_eth_devices[port_id];
1743 
1744 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1745 
1746 	if (dev->data->dev_started == 0) {
1747 		RTE_ETHDEV_LOG(INFO,
1748 			"Device with port_id=%"PRIu16" already stopped\n",
1749 			port_id);
1750 		return 0;
1751 	}
1752 
1753 	dev->data->dev_started = 0;
1754 	ret = (*dev->dev_ops->dev_stop)(dev);
1755 	rte_ethdev_trace_stop(port_id, ret);
1756 
1757 	return ret;
1758 }
1759 
1760 int
1761 rte_eth_dev_set_link_up(uint16_t port_id)
1762 {
1763 	struct rte_eth_dev *dev;
1764 
1765 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1766 
1767 	dev = &rte_eth_devices[port_id];
1768 
1769 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1770 	return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1771 }
1772 
1773 int
1774 rte_eth_dev_set_link_down(uint16_t port_id)
1775 {
1776 	struct rte_eth_dev *dev;
1777 
1778 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1779 
1780 	dev = &rte_eth_devices[port_id];
1781 
1782 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1783 	return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1784 }
1785 
1786 int
1787 rte_eth_dev_close(uint16_t port_id)
1788 {
1789 	struct rte_eth_dev *dev;
1790 	int firsterr, binerr;
1791 	int *lasterr = &firsterr;
1792 
1793 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1794 	dev = &rte_eth_devices[port_id];
1795 
1796 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1797 	*lasterr = (*dev->dev_ops->dev_close)(dev);
1798 	if (*lasterr != 0)
1799 		lasterr = &binerr;
1800 
1801 	rte_ethdev_trace_close(port_id);
1802 	*lasterr = rte_eth_dev_release_port(dev);
1803 
1804 	return eth_err(port_id, firsterr);
1805 }
1806 
1807 int
1808 rte_eth_dev_reset(uint16_t port_id)
1809 {
1810 	struct rte_eth_dev *dev;
1811 	int ret;
1812 
1813 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1814 	dev = &rte_eth_devices[port_id];
1815 
1816 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1817 
1818 	ret = rte_eth_dev_stop(port_id);
1819 	if (ret != 0) {
1820 		RTE_ETHDEV_LOG(ERR,
1821 			"Failed to stop device (port %u) before reset: %s - ignore\n",
1822 			port_id, rte_strerror(-ret));
1823 	}
1824 	ret = dev->dev_ops->dev_reset(dev);
1825 
1826 	return eth_err(port_id, ret);
1827 }
1828 
1829 int
1830 rte_eth_dev_is_removed(uint16_t port_id)
1831 {
1832 	struct rte_eth_dev *dev;
1833 	int ret;
1834 
1835 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1836 
1837 	dev = &rte_eth_devices[port_id];
1838 
1839 	if (dev->state == RTE_ETH_DEV_REMOVED)
1840 		return 1;
1841 
1842 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1843 
1844 	ret = dev->dev_ops->is_removed(dev);
1845 	if (ret != 0)
1846 		/* Device is physically removed. */
1847 		dev->state = RTE_ETH_DEV_REMOVED;
1848 
1849 	return ret;
1850 }
1851 
1852 static int
1853 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1854 			     uint16_t n_seg, uint32_t *mbp_buf_size,
1855 			     const struct rte_eth_dev_info *dev_info)
1856 {
1857 	const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1858 	struct rte_mempool *mp_first;
1859 	uint32_t offset_mask;
1860 	uint16_t seg_idx;
1861 
1862 	if (n_seg > seg_capa->max_nseg) {
1863 		RTE_ETHDEV_LOG(ERR,
1864 			       "Requested Rx segments %u exceed supported %u\n",
1865 			       n_seg, seg_capa->max_nseg);
1866 		return -EINVAL;
1867 	}
1868 	/*
1869 	 * Check the sizes and offsets against buffer sizes
1870 	 * for each segment specified in extended configuration.
1871 	 */
1872 	mp_first = rx_seg[0].mp;
1873 	offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1874 	for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1875 		struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1876 		uint32_t length = rx_seg[seg_idx].length;
1877 		uint32_t offset = rx_seg[seg_idx].offset;
1878 
1879 		if (mpl == NULL) {
1880 			RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1881 			return -EINVAL;
1882 		}
1883 		if (seg_idx != 0 && mp_first != mpl &&
1884 		    seg_capa->multi_pools == 0) {
1885 			RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1886 			return -ENOTSUP;
1887 		}
1888 		if (offset != 0) {
1889 			if (seg_capa->offset_allowed == 0) {
1890 				RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1891 				return -ENOTSUP;
1892 			}
1893 			if (offset & offset_mask) {
1894 				RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1895 					       offset,
1896 					       seg_capa->offset_align_log2);
1897 				return -EINVAL;
1898 			}
1899 		}
1900 		if (mpl->private_data_size <
1901 			sizeof(struct rte_pktmbuf_pool_private)) {
1902 			RTE_ETHDEV_LOG(ERR,
1903 				       "%s private_data_size %u < %u\n",
1904 				       mpl->name, mpl->private_data_size,
1905 				       (unsigned int)sizeof
1906 					(struct rte_pktmbuf_pool_private));
1907 			return -ENOSPC;
1908 		}
1909 		offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1910 		*mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1911 		length = length != 0 ? length : *mbp_buf_size;
1912 		if (*mbp_buf_size < length + offset) {
1913 			RTE_ETHDEV_LOG(ERR,
1914 				       "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1915 				       mpl->name, *mbp_buf_size,
1916 				       length + offset, length, offset);
1917 			return -EINVAL;
1918 		}
1919 	}
1920 	return 0;
1921 }
1922 
1923 int
1924 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1925 		       uint16_t nb_rx_desc, unsigned int socket_id,
1926 		       const struct rte_eth_rxconf *rx_conf,
1927 		       struct rte_mempool *mp)
1928 {
1929 	int ret;
1930 	uint32_t mbp_buf_size;
1931 	struct rte_eth_dev *dev;
1932 	struct rte_eth_dev_info dev_info;
1933 	struct rte_eth_rxconf local_conf;
1934 	void **rxq;
1935 
1936 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1937 
1938 	dev = &rte_eth_devices[port_id];
1939 	if (rx_queue_id >= dev->data->nb_rx_queues) {
1940 		RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1941 		return -EINVAL;
1942 	}
1943 
1944 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1945 
1946 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1947 	if (ret != 0)
1948 		return ret;
1949 
1950 	if (mp != NULL) {
1951 		/* Single pool configuration check. */
1952 		if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1953 			RTE_ETHDEV_LOG(ERR,
1954 				       "Ambiguous segment configuration\n");
1955 			return -EINVAL;
1956 		}
1957 		/*
1958 		 * Check the size of the mbuf data buffer, this value
1959 		 * must be provided in the private data of the memory pool.
1960 		 * First check that the memory pool(s) has a valid private data.
1961 		 */
1962 		if (mp->private_data_size <
1963 				sizeof(struct rte_pktmbuf_pool_private)) {
1964 			RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1965 				mp->name, mp->private_data_size,
1966 				(unsigned int)
1967 				sizeof(struct rte_pktmbuf_pool_private));
1968 			return -ENOSPC;
1969 		}
1970 		mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1971 		if (mbp_buf_size < dev_info.min_rx_bufsize +
1972 				   RTE_PKTMBUF_HEADROOM) {
1973 			RTE_ETHDEV_LOG(ERR,
1974 				       "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1975 				       mp->name, mbp_buf_size,
1976 				       RTE_PKTMBUF_HEADROOM +
1977 				       dev_info.min_rx_bufsize,
1978 				       RTE_PKTMBUF_HEADROOM,
1979 				       dev_info.min_rx_bufsize);
1980 			return -EINVAL;
1981 		}
1982 	} else {
1983 		const struct rte_eth_rxseg_split *rx_seg;
1984 		uint16_t n_seg;
1985 
1986 		/* Extended multi-segment configuration check. */
1987 		if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
1988 			RTE_ETHDEV_LOG(ERR,
1989 				       "Memory pool is null and no extended configuration provided\n");
1990 			return -EINVAL;
1991 		}
1992 
1993 		rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
1994 		n_seg = rx_conf->rx_nseg;
1995 
1996 		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1997 			ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
1998 							   &mbp_buf_size,
1999 							   &dev_info);
2000 			if (ret != 0)
2001 				return ret;
2002 		} else {
2003 			RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2004 			return -EINVAL;
2005 		}
2006 	}
2007 
2008 	/* Use default specified by driver, if nb_rx_desc is zero */
2009 	if (nb_rx_desc == 0) {
2010 		nb_rx_desc = dev_info.default_rxportconf.ring_size;
2011 		/* If driver default is also zero, fall back on EAL default */
2012 		if (nb_rx_desc == 0)
2013 			nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2014 	}
2015 
2016 	if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2017 			nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2018 			nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2019 
2020 		RTE_ETHDEV_LOG(ERR,
2021 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2022 			nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2023 			dev_info.rx_desc_lim.nb_min,
2024 			dev_info.rx_desc_lim.nb_align);
2025 		return -EINVAL;
2026 	}
2027 
2028 	if (dev->data->dev_started &&
2029 		!(dev_info.dev_capa &
2030 			RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2031 		return -EBUSY;
2032 
2033 	if (dev->data->dev_started &&
2034 		(dev->data->rx_queue_state[rx_queue_id] !=
2035 			RTE_ETH_QUEUE_STATE_STOPPED))
2036 		return -EBUSY;
2037 
2038 	rxq = dev->data->rx_queues;
2039 	if (rxq[rx_queue_id]) {
2040 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2041 					-ENOTSUP);
2042 		(*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2043 		rxq[rx_queue_id] = NULL;
2044 	}
2045 
2046 	if (rx_conf == NULL)
2047 		rx_conf = &dev_info.default_rxconf;
2048 
2049 	local_conf = *rx_conf;
2050 
2051 	/*
2052 	 * If an offloading has already been enabled in
2053 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2054 	 * so there is no need to enable it in this queue again.
2055 	 * The local_conf.offloads input to underlying PMD only carries
2056 	 * those offloadings which are only enabled on this queue and
2057 	 * not enabled on all queues.
2058 	 */
2059 	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2060 
2061 	/*
2062 	 * New added offloadings for this queue are those not enabled in
2063 	 * rte_eth_dev_configure() and they must be per-queue type.
2064 	 * A pure per-port offloading can't be enabled on a queue while
2065 	 * disabled on another queue. A pure per-port offloading can't
2066 	 * be enabled for any queue as new added one if it hasn't been
2067 	 * enabled in rte_eth_dev_configure().
2068 	 */
2069 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2070 	     local_conf.offloads) {
2071 		RTE_ETHDEV_LOG(ERR,
2072 			"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2073 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2074 			port_id, rx_queue_id, local_conf.offloads,
2075 			dev_info.rx_queue_offload_capa,
2076 			__func__);
2077 		return -EINVAL;
2078 	}
2079 
2080 	/*
2081 	 * If LRO is enabled, check that the maximum aggregated packet
2082 	 * size is supported by the configured device.
2083 	 */
2084 	if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2085 		if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2086 			dev->data->dev_conf.rxmode.max_lro_pkt_size =
2087 				dev->data->dev_conf.rxmode.max_rx_pkt_len;
2088 		int ret = eth_dev_check_lro_pkt_size(port_id,
2089 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
2090 				dev->data->dev_conf.rxmode.max_rx_pkt_len,
2091 				dev_info.max_lro_pkt_size);
2092 		if (ret != 0)
2093 			return ret;
2094 	}
2095 
2096 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2097 					      socket_id, &local_conf, mp);
2098 	if (!ret) {
2099 		if (!dev->data->min_rx_buf_size ||
2100 		    dev->data->min_rx_buf_size > mbp_buf_size)
2101 			dev->data->min_rx_buf_size = mbp_buf_size;
2102 	}
2103 
2104 	rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2105 		rx_conf, ret);
2106 	return eth_err(port_id, ret);
2107 }
2108 
2109 int
2110 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2111 			       uint16_t nb_rx_desc,
2112 			       const struct rte_eth_hairpin_conf *conf)
2113 {
2114 	int ret;
2115 	struct rte_eth_dev *dev;
2116 	struct rte_eth_hairpin_cap cap;
2117 	void **rxq;
2118 	int i;
2119 	int count;
2120 
2121 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2122 
2123 	dev = &rte_eth_devices[port_id];
2124 	if (rx_queue_id >= dev->data->nb_rx_queues) {
2125 		RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2126 		return -EINVAL;
2127 	}
2128 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2129 	if (ret != 0)
2130 		return ret;
2131 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2132 				-ENOTSUP);
2133 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2134 	if (nb_rx_desc == 0)
2135 		nb_rx_desc = cap.max_nb_desc;
2136 	if (nb_rx_desc > cap.max_nb_desc) {
2137 		RTE_ETHDEV_LOG(ERR,
2138 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2139 			nb_rx_desc, cap.max_nb_desc);
2140 		return -EINVAL;
2141 	}
2142 	if (conf->peer_count > cap.max_rx_2_tx) {
2143 		RTE_ETHDEV_LOG(ERR,
2144 			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2145 			conf->peer_count, cap.max_rx_2_tx);
2146 		return -EINVAL;
2147 	}
2148 	if (conf->peer_count == 0) {
2149 		RTE_ETHDEV_LOG(ERR,
2150 			"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2151 			conf->peer_count);
2152 		return -EINVAL;
2153 	}
2154 	for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2155 	     cap.max_nb_queues != UINT16_MAX; i++) {
2156 		if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2157 			count++;
2158 	}
2159 	if (count > cap.max_nb_queues) {
2160 		RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2161 		cap.max_nb_queues);
2162 		return -EINVAL;
2163 	}
2164 	if (dev->data->dev_started)
2165 		return -EBUSY;
2166 	rxq = dev->data->rx_queues;
2167 	if (rxq[rx_queue_id] != NULL) {
2168 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2169 					-ENOTSUP);
2170 		(*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2171 		rxq[rx_queue_id] = NULL;
2172 	}
2173 	ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2174 						      nb_rx_desc, conf);
2175 	if (ret == 0)
2176 		dev->data->rx_queue_state[rx_queue_id] =
2177 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2178 	return eth_err(port_id, ret);
2179 }
2180 
2181 int
2182 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2183 		       uint16_t nb_tx_desc, unsigned int socket_id,
2184 		       const struct rte_eth_txconf *tx_conf)
2185 {
2186 	struct rte_eth_dev *dev;
2187 	struct rte_eth_dev_info dev_info;
2188 	struct rte_eth_txconf local_conf;
2189 	void **txq;
2190 	int ret;
2191 
2192 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2193 
2194 	dev = &rte_eth_devices[port_id];
2195 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2196 		RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2197 		return -EINVAL;
2198 	}
2199 
2200 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2201 
2202 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2203 	if (ret != 0)
2204 		return ret;
2205 
2206 	/* Use default specified by driver, if nb_tx_desc is zero */
2207 	if (nb_tx_desc == 0) {
2208 		nb_tx_desc = dev_info.default_txportconf.ring_size;
2209 		/* If driver default is zero, fall back on EAL default */
2210 		if (nb_tx_desc == 0)
2211 			nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2212 	}
2213 	if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2214 	    nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2215 	    nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2216 		RTE_ETHDEV_LOG(ERR,
2217 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2218 			nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2219 			dev_info.tx_desc_lim.nb_min,
2220 			dev_info.tx_desc_lim.nb_align);
2221 		return -EINVAL;
2222 	}
2223 
2224 	if (dev->data->dev_started &&
2225 		!(dev_info.dev_capa &
2226 			RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2227 		return -EBUSY;
2228 
2229 	if (dev->data->dev_started &&
2230 		(dev->data->tx_queue_state[tx_queue_id] !=
2231 			RTE_ETH_QUEUE_STATE_STOPPED))
2232 		return -EBUSY;
2233 
2234 	txq = dev->data->tx_queues;
2235 	if (txq[tx_queue_id]) {
2236 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2237 					-ENOTSUP);
2238 		(*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2239 		txq[tx_queue_id] = NULL;
2240 	}
2241 
2242 	if (tx_conf == NULL)
2243 		tx_conf = &dev_info.default_txconf;
2244 
2245 	local_conf = *tx_conf;
2246 
2247 	/*
2248 	 * If an offloading has already been enabled in
2249 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2250 	 * so there is no need to enable it in this queue again.
2251 	 * The local_conf.offloads input to underlying PMD only carries
2252 	 * those offloadings which are only enabled on this queue and
2253 	 * not enabled on all queues.
2254 	 */
2255 	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2256 
2257 	/*
2258 	 * New added offloadings for this queue are those not enabled in
2259 	 * rte_eth_dev_configure() and they must be per-queue type.
2260 	 * A pure per-port offloading can't be enabled on a queue while
2261 	 * disabled on another queue. A pure per-port offloading can't
2262 	 * be enabled for any queue as new added one if it hasn't been
2263 	 * enabled in rte_eth_dev_configure().
2264 	 */
2265 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2266 	     local_conf.offloads) {
2267 		RTE_ETHDEV_LOG(ERR,
2268 			"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2269 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2270 			port_id, tx_queue_id, local_conf.offloads,
2271 			dev_info.tx_queue_offload_capa,
2272 			__func__);
2273 		return -EINVAL;
2274 	}
2275 
2276 	rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2277 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2278 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2279 }
2280 
2281 int
2282 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2283 			       uint16_t nb_tx_desc,
2284 			       const struct rte_eth_hairpin_conf *conf)
2285 {
2286 	struct rte_eth_dev *dev;
2287 	struct rte_eth_hairpin_cap cap;
2288 	void **txq;
2289 	int i;
2290 	int count;
2291 	int ret;
2292 
2293 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2294 	dev = &rte_eth_devices[port_id];
2295 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2296 		RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2297 		return -EINVAL;
2298 	}
2299 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2300 	if (ret != 0)
2301 		return ret;
2302 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2303 				-ENOTSUP);
2304 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2305 	if (nb_tx_desc == 0)
2306 		nb_tx_desc = cap.max_nb_desc;
2307 	if (nb_tx_desc > cap.max_nb_desc) {
2308 		RTE_ETHDEV_LOG(ERR,
2309 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2310 			nb_tx_desc, cap.max_nb_desc);
2311 		return -EINVAL;
2312 	}
2313 	if (conf->peer_count > cap.max_tx_2_rx) {
2314 		RTE_ETHDEV_LOG(ERR,
2315 			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2316 			conf->peer_count, cap.max_tx_2_rx);
2317 		return -EINVAL;
2318 	}
2319 	if (conf->peer_count == 0) {
2320 		RTE_ETHDEV_LOG(ERR,
2321 			"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2322 			conf->peer_count);
2323 		return -EINVAL;
2324 	}
2325 	for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2326 	     cap.max_nb_queues != UINT16_MAX; i++) {
2327 		if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2328 			count++;
2329 	}
2330 	if (count > cap.max_nb_queues) {
2331 		RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2332 		cap.max_nb_queues);
2333 		return -EINVAL;
2334 	}
2335 	if (dev->data->dev_started)
2336 		return -EBUSY;
2337 	txq = dev->data->tx_queues;
2338 	if (txq[tx_queue_id] != NULL) {
2339 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2340 					-ENOTSUP);
2341 		(*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2342 		txq[tx_queue_id] = NULL;
2343 	}
2344 	ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2345 		(dev, tx_queue_id, nb_tx_desc, conf);
2346 	if (ret == 0)
2347 		dev->data->tx_queue_state[tx_queue_id] =
2348 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2349 	return eth_err(port_id, ret);
2350 }
2351 
2352 int
2353 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2354 {
2355 	struct rte_eth_dev *dev;
2356 	int ret;
2357 
2358 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2359 	dev = &rte_eth_devices[tx_port];
2360 	if (dev->data->dev_started == 0) {
2361 		RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2362 		return -EBUSY;
2363 	}
2364 
2365 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2366 	ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2367 	if (ret != 0)
2368 		RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2369 			       " to Rx %d (%d - all ports)\n",
2370 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2371 
2372 	return ret;
2373 }
2374 
2375 int
2376 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2377 {
2378 	struct rte_eth_dev *dev;
2379 	int ret;
2380 
2381 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2382 	dev = &rte_eth_devices[tx_port];
2383 	if (dev->data->dev_started == 0) {
2384 		RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2385 		return -EBUSY;
2386 	}
2387 
2388 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2389 	ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2390 	if (ret != 0)
2391 		RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2392 			       " from Rx %d (%d - all ports)\n",
2393 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2394 
2395 	return ret;
2396 }
2397 
2398 int
2399 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2400 			       size_t len, uint32_t direction)
2401 {
2402 	struct rte_eth_dev *dev;
2403 	int ret;
2404 
2405 	if (peer_ports == NULL || len == 0)
2406 		return -EINVAL;
2407 
2408 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2409 	dev = &rte_eth_devices[port_id];
2410 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2411 				-ENOTSUP);
2412 
2413 	ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2414 						      len, direction);
2415 	if (ret < 0)
2416 		RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2417 			       port_id, direction ? "Rx" : "Tx");
2418 
2419 	return ret;
2420 }
2421 
2422 void
2423 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2424 		void *userdata __rte_unused)
2425 {
2426 	rte_pktmbuf_free_bulk(pkts, unsent);
2427 }
2428 
2429 void
2430 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2431 		void *userdata)
2432 {
2433 	uint64_t *count = userdata;
2434 
2435 	rte_pktmbuf_free_bulk(pkts, unsent);
2436 	*count += unsent;
2437 }
2438 
2439 int
2440 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2441 		buffer_tx_error_fn cbfn, void *userdata)
2442 {
2443 	buffer->error_callback = cbfn;
2444 	buffer->error_userdata = userdata;
2445 	return 0;
2446 }
2447 
2448 int
2449 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2450 {
2451 	int ret = 0;
2452 
2453 	if (buffer == NULL)
2454 		return -EINVAL;
2455 
2456 	buffer->size = size;
2457 	if (buffer->error_callback == NULL) {
2458 		ret = rte_eth_tx_buffer_set_err_callback(
2459 			buffer, rte_eth_tx_buffer_drop_callback, NULL);
2460 	}
2461 
2462 	return ret;
2463 }
2464 
2465 int
2466 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2467 {
2468 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2469 	int ret;
2470 
2471 	/* Validate Input Data. Bail if not valid or not supported. */
2472 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2473 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2474 
2475 	/* Call driver to free pending mbufs. */
2476 	ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2477 					       free_cnt);
2478 	return eth_err(port_id, ret);
2479 }
2480 
2481 int
2482 rte_eth_promiscuous_enable(uint16_t port_id)
2483 {
2484 	struct rte_eth_dev *dev;
2485 	int diag = 0;
2486 
2487 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2488 	dev = &rte_eth_devices[port_id];
2489 
2490 	if (dev->data->promiscuous == 1)
2491 		return 0;
2492 
2493 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2494 
2495 	diag = (*dev->dev_ops->promiscuous_enable)(dev);
2496 	dev->data->promiscuous = (diag == 0) ? 1 : 0;
2497 
2498 	return eth_err(port_id, diag);
2499 }
2500 
2501 int
2502 rte_eth_promiscuous_disable(uint16_t port_id)
2503 {
2504 	struct rte_eth_dev *dev;
2505 	int diag = 0;
2506 
2507 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2508 	dev = &rte_eth_devices[port_id];
2509 
2510 	if (dev->data->promiscuous == 0)
2511 		return 0;
2512 
2513 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2514 
2515 	dev->data->promiscuous = 0;
2516 	diag = (*dev->dev_ops->promiscuous_disable)(dev);
2517 	if (diag != 0)
2518 		dev->data->promiscuous = 1;
2519 
2520 	return eth_err(port_id, diag);
2521 }
2522 
2523 int
2524 rte_eth_promiscuous_get(uint16_t port_id)
2525 {
2526 	struct rte_eth_dev *dev;
2527 
2528 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2529 
2530 	dev = &rte_eth_devices[port_id];
2531 	return dev->data->promiscuous;
2532 }
2533 
2534 int
2535 rte_eth_allmulticast_enable(uint16_t port_id)
2536 {
2537 	struct rte_eth_dev *dev;
2538 	int diag;
2539 
2540 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2541 	dev = &rte_eth_devices[port_id];
2542 
2543 	if (dev->data->all_multicast == 1)
2544 		return 0;
2545 
2546 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2547 	diag = (*dev->dev_ops->allmulticast_enable)(dev);
2548 	dev->data->all_multicast = (diag == 0) ? 1 : 0;
2549 
2550 	return eth_err(port_id, diag);
2551 }
2552 
2553 int
2554 rte_eth_allmulticast_disable(uint16_t port_id)
2555 {
2556 	struct rte_eth_dev *dev;
2557 	int diag;
2558 
2559 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2560 	dev = &rte_eth_devices[port_id];
2561 
2562 	if (dev->data->all_multicast == 0)
2563 		return 0;
2564 
2565 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2566 	dev->data->all_multicast = 0;
2567 	diag = (*dev->dev_ops->allmulticast_disable)(dev);
2568 	if (diag != 0)
2569 		dev->data->all_multicast = 1;
2570 
2571 	return eth_err(port_id, diag);
2572 }
2573 
2574 int
2575 rte_eth_allmulticast_get(uint16_t port_id)
2576 {
2577 	struct rte_eth_dev *dev;
2578 
2579 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2580 
2581 	dev = &rte_eth_devices[port_id];
2582 	return dev->data->all_multicast;
2583 }
2584 
2585 int
2586 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2587 {
2588 	struct rte_eth_dev *dev;
2589 
2590 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2591 	dev = &rte_eth_devices[port_id];
2592 
2593 	if (dev->data->dev_conf.intr_conf.lsc &&
2594 	    dev->data->dev_started)
2595 		rte_eth_linkstatus_get(dev, eth_link);
2596 	else {
2597 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2598 		(*dev->dev_ops->link_update)(dev, 1);
2599 		*eth_link = dev->data->dev_link;
2600 	}
2601 
2602 	return 0;
2603 }
2604 
2605 int
2606 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2607 {
2608 	struct rte_eth_dev *dev;
2609 
2610 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2611 	dev = &rte_eth_devices[port_id];
2612 
2613 	if (dev->data->dev_conf.intr_conf.lsc &&
2614 	    dev->data->dev_started)
2615 		rte_eth_linkstatus_get(dev, eth_link);
2616 	else {
2617 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2618 		(*dev->dev_ops->link_update)(dev, 0);
2619 		*eth_link = dev->data->dev_link;
2620 	}
2621 
2622 	return 0;
2623 }
2624 
2625 const char *
2626 rte_eth_link_speed_to_str(uint32_t link_speed)
2627 {
2628 	switch (link_speed) {
2629 	case ETH_SPEED_NUM_NONE: return "None";
2630 	case ETH_SPEED_NUM_10M:  return "10 Mbps";
2631 	case ETH_SPEED_NUM_100M: return "100 Mbps";
2632 	case ETH_SPEED_NUM_1G:   return "1 Gbps";
2633 	case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2634 	case ETH_SPEED_NUM_5G:   return "5 Gbps";
2635 	case ETH_SPEED_NUM_10G:  return "10 Gbps";
2636 	case ETH_SPEED_NUM_20G:  return "20 Gbps";
2637 	case ETH_SPEED_NUM_25G:  return "25 Gbps";
2638 	case ETH_SPEED_NUM_40G:  return "40 Gbps";
2639 	case ETH_SPEED_NUM_50G:  return "50 Gbps";
2640 	case ETH_SPEED_NUM_56G:  return "56 Gbps";
2641 	case ETH_SPEED_NUM_100G: return "100 Gbps";
2642 	case ETH_SPEED_NUM_200G: return "200 Gbps";
2643 	case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2644 	default: return "Invalid";
2645 	}
2646 }
2647 
2648 int
2649 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2650 {
2651 	if (eth_link->link_status == ETH_LINK_DOWN)
2652 		return snprintf(str, len, "Link down");
2653 	else
2654 		return snprintf(str, len, "Link up at %s %s %s",
2655 			rte_eth_link_speed_to_str(eth_link->link_speed),
2656 			(eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2657 			"FDX" : "HDX",
2658 			(eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2659 			"Autoneg" : "Fixed");
2660 }
2661 
2662 int
2663 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2664 {
2665 	struct rte_eth_dev *dev;
2666 
2667 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2668 
2669 	dev = &rte_eth_devices[port_id];
2670 	memset(stats, 0, sizeof(*stats));
2671 
2672 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2673 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2674 	return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2675 }
2676 
2677 int
2678 rte_eth_stats_reset(uint16_t port_id)
2679 {
2680 	struct rte_eth_dev *dev;
2681 	int ret;
2682 
2683 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2684 	dev = &rte_eth_devices[port_id];
2685 
2686 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2687 	ret = (*dev->dev_ops->stats_reset)(dev);
2688 	if (ret != 0)
2689 		return eth_err(port_id, ret);
2690 
2691 	dev->data->rx_mbuf_alloc_failed = 0;
2692 
2693 	return 0;
2694 }
2695 
2696 static inline int
2697 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2698 {
2699 	uint16_t nb_rxqs, nb_txqs;
2700 	int count;
2701 
2702 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2703 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2704 
2705 	count = RTE_NB_STATS;
2706 	if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2707 		count += nb_rxqs * RTE_NB_RXQ_STATS;
2708 		count += nb_txqs * RTE_NB_TXQ_STATS;
2709 	}
2710 
2711 	return count;
2712 }
2713 
2714 static int
2715 eth_dev_get_xstats_count(uint16_t port_id)
2716 {
2717 	struct rte_eth_dev *dev;
2718 	int count;
2719 
2720 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2721 	dev = &rte_eth_devices[port_id];
2722 	if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2723 		count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2724 				NULL, 0);
2725 		if (count < 0)
2726 			return eth_err(port_id, count);
2727 	}
2728 	if (dev->dev_ops->xstats_get_names != NULL) {
2729 		count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2730 		if (count < 0)
2731 			return eth_err(port_id, count);
2732 	} else
2733 		count = 0;
2734 
2735 
2736 	count += eth_dev_get_xstats_basic_count(dev);
2737 
2738 	return count;
2739 }
2740 
2741 int
2742 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2743 		uint64_t *id)
2744 {
2745 	int cnt_xstats, idx_xstat;
2746 
2747 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2748 
2749 	if (!id) {
2750 		RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2751 		return -ENOMEM;
2752 	}
2753 
2754 	if (!xstat_name) {
2755 		RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2756 		return -ENOMEM;
2757 	}
2758 
2759 	/* Get count */
2760 	cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2761 	if (cnt_xstats  < 0) {
2762 		RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2763 		return -ENODEV;
2764 	}
2765 
2766 	/* Get id-name lookup table */
2767 	struct rte_eth_xstat_name xstats_names[cnt_xstats];
2768 
2769 	if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2770 			port_id, xstats_names, cnt_xstats, NULL)) {
2771 		RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2772 		return -1;
2773 	}
2774 
2775 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2776 		if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2777 			*id = idx_xstat;
2778 			return 0;
2779 		};
2780 	}
2781 
2782 	return -EINVAL;
2783 }
2784 
2785 /* retrieve basic stats names */
2786 static int
2787 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2788 	struct rte_eth_xstat_name *xstats_names)
2789 {
2790 	int cnt_used_entries = 0;
2791 	uint32_t idx, id_queue;
2792 	uint16_t num_q;
2793 
2794 	for (idx = 0; idx < RTE_NB_STATS; idx++) {
2795 		strlcpy(xstats_names[cnt_used_entries].name,
2796 			eth_dev_stats_strings[idx].name,
2797 			sizeof(xstats_names[0].name));
2798 		cnt_used_entries++;
2799 	}
2800 
2801 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2802 		return cnt_used_entries;
2803 
2804 	num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2805 	for (id_queue = 0; id_queue < num_q; id_queue++) {
2806 		for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2807 			snprintf(xstats_names[cnt_used_entries].name,
2808 				sizeof(xstats_names[0].name),
2809 				"rx_q%u_%s",
2810 				id_queue, eth_dev_rxq_stats_strings[idx].name);
2811 			cnt_used_entries++;
2812 		}
2813 
2814 	}
2815 	num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2816 	for (id_queue = 0; id_queue < num_q; id_queue++) {
2817 		for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2818 			snprintf(xstats_names[cnt_used_entries].name,
2819 				sizeof(xstats_names[0].name),
2820 				"tx_q%u_%s",
2821 				id_queue, eth_dev_txq_stats_strings[idx].name);
2822 			cnt_used_entries++;
2823 		}
2824 	}
2825 	return cnt_used_entries;
2826 }
2827 
2828 /* retrieve ethdev extended statistics names */
2829 int
2830 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2831 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
2832 	uint64_t *ids)
2833 {
2834 	struct rte_eth_xstat_name *xstats_names_copy;
2835 	unsigned int no_basic_stat_requested = 1;
2836 	unsigned int no_ext_stat_requested = 1;
2837 	unsigned int expected_entries;
2838 	unsigned int basic_count;
2839 	struct rte_eth_dev *dev;
2840 	unsigned int i;
2841 	int ret;
2842 
2843 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2844 	dev = &rte_eth_devices[port_id];
2845 
2846 	basic_count = eth_dev_get_xstats_basic_count(dev);
2847 	ret = eth_dev_get_xstats_count(port_id);
2848 	if (ret < 0)
2849 		return ret;
2850 	expected_entries = (unsigned int)ret;
2851 
2852 	/* Return max number of stats if no ids given */
2853 	if (!ids) {
2854 		if (!xstats_names)
2855 			return expected_entries;
2856 		else if (xstats_names && size < expected_entries)
2857 			return expected_entries;
2858 	}
2859 
2860 	if (ids && !xstats_names)
2861 		return -EINVAL;
2862 
2863 	if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2864 		uint64_t ids_copy[size];
2865 
2866 		for (i = 0; i < size; i++) {
2867 			if (ids[i] < basic_count) {
2868 				no_basic_stat_requested = 0;
2869 				break;
2870 			}
2871 
2872 			/*
2873 			 * Convert ids to xstats ids that PMD knows.
2874 			 * ids known by user are basic + extended stats.
2875 			 */
2876 			ids_copy[i] = ids[i] - basic_count;
2877 		}
2878 
2879 		if (no_basic_stat_requested)
2880 			return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2881 					xstats_names, ids_copy, size);
2882 	}
2883 
2884 	/* Retrieve all stats */
2885 	if (!ids) {
2886 		int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2887 				expected_entries);
2888 		if (num_stats < 0 || num_stats > (int)expected_entries)
2889 			return num_stats;
2890 		else
2891 			return expected_entries;
2892 	}
2893 
2894 	xstats_names_copy = calloc(expected_entries,
2895 		sizeof(struct rte_eth_xstat_name));
2896 
2897 	if (!xstats_names_copy) {
2898 		RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2899 		return -ENOMEM;
2900 	}
2901 
2902 	if (ids) {
2903 		for (i = 0; i < size; i++) {
2904 			if (ids[i] >= basic_count) {
2905 				no_ext_stat_requested = 0;
2906 				break;
2907 			}
2908 		}
2909 	}
2910 
2911 	/* Fill xstats_names_copy structure */
2912 	if (ids && no_ext_stat_requested) {
2913 		eth_basic_stats_get_names(dev, xstats_names_copy);
2914 	} else {
2915 		ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2916 			expected_entries);
2917 		if (ret < 0) {
2918 			free(xstats_names_copy);
2919 			return ret;
2920 		}
2921 	}
2922 
2923 	/* Filter stats */
2924 	for (i = 0; i < size; i++) {
2925 		if (ids[i] >= expected_entries) {
2926 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2927 			free(xstats_names_copy);
2928 			return -1;
2929 		}
2930 		xstats_names[i] = xstats_names_copy[ids[i]];
2931 	}
2932 
2933 	free(xstats_names_copy);
2934 	return size;
2935 }
2936 
2937 int
2938 rte_eth_xstats_get_names(uint16_t port_id,
2939 	struct rte_eth_xstat_name *xstats_names,
2940 	unsigned int size)
2941 {
2942 	struct rte_eth_dev *dev;
2943 	int cnt_used_entries;
2944 	int cnt_expected_entries;
2945 	int cnt_driver_entries;
2946 
2947 	cnt_expected_entries = eth_dev_get_xstats_count(port_id);
2948 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
2949 			(int)size < cnt_expected_entries)
2950 		return cnt_expected_entries;
2951 
2952 	/* port_id checked in eth_dev_get_xstats_count() */
2953 	dev = &rte_eth_devices[port_id];
2954 
2955 	cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
2956 
2957 	if (dev->dev_ops->xstats_get_names != NULL) {
2958 		/* If there are any driver-specific xstats, append them
2959 		 * to end of list.
2960 		 */
2961 		cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2962 			dev,
2963 			xstats_names + cnt_used_entries,
2964 			size - cnt_used_entries);
2965 		if (cnt_driver_entries < 0)
2966 			return eth_err(port_id, cnt_driver_entries);
2967 		cnt_used_entries += cnt_driver_entries;
2968 	}
2969 
2970 	return cnt_used_entries;
2971 }
2972 
2973 
2974 static int
2975 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2976 {
2977 	struct rte_eth_dev *dev;
2978 	struct rte_eth_stats eth_stats;
2979 	unsigned int count = 0, i, q;
2980 	uint64_t val, *stats_ptr;
2981 	uint16_t nb_rxqs, nb_txqs;
2982 	int ret;
2983 
2984 	ret = rte_eth_stats_get(port_id, &eth_stats);
2985 	if (ret < 0)
2986 		return ret;
2987 
2988 	dev = &rte_eth_devices[port_id];
2989 
2990 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2991 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2992 
2993 	/* global stats */
2994 	for (i = 0; i < RTE_NB_STATS; i++) {
2995 		stats_ptr = RTE_PTR_ADD(&eth_stats,
2996 					eth_dev_stats_strings[i].offset);
2997 		val = *stats_ptr;
2998 		xstats[count++].value = val;
2999 	}
3000 
3001 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3002 		return count;
3003 
3004 	/* per-rxq stats */
3005 	for (q = 0; q < nb_rxqs; q++) {
3006 		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3007 			stats_ptr = RTE_PTR_ADD(&eth_stats,
3008 					eth_dev_rxq_stats_strings[i].offset +
3009 					q * sizeof(uint64_t));
3010 			val = *stats_ptr;
3011 			xstats[count++].value = val;
3012 		}
3013 	}
3014 
3015 	/* per-txq stats */
3016 	for (q = 0; q < nb_txqs; q++) {
3017 		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3018 			stats_ptr = RTE_PTR_ADD(&eth_stats,
3019 					eth_dev_txq_stats_strings[i].offset +
3020 					q * sizeof(uint64_t));
3021 			val = *stats_ptr;
3022 			xstats[count++].value = val;
3023 		}
3024 	}
3025 	return count;
3026 }
3027 
3028 /* retrieve ethdev extended statistics */
3029 int
3030 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3031 			 uint64_t *values, unsigned int size)
3032 {
3033 	unsigned int no_basic_stat_requested = 1;
3034 	unsigned int no_ext_stat_requested = 1;
3035 	unsigned int num_xstats_filled;
3036 	unsigned int basic_count;
3037 	uint16_t expected_entries;
3038 	struct rte_eth_dev *dev;
3039 	unsigned int i;
3040 	int ret;
3041 
3042 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3043 	ret = eth_dev_get_xstats_count(port_id);
3044 	if (ret < 0)
3045 		return ret;
3046 	expected_entries = (uint16_t)ret;
3047 	struct rte_eth_xstat xstats[expected_entries];
3048 	dev = &rte_eth_devices[port_id];
3049 	basic_count = eth_dev_get_xstats_basic_count(dev);
3050 
3051 	/* Return max number of stats if no ids given */
3052 	if (!ids) {
3053 		if (!values)
3054 			return expected_entries;
3055 		else if (values && size < expected_entries)
3056 			return expected_entries;
3057 	}
3058 
3059 	if (ids && !values)
3060 		return -EINVAL;
3061 
3062 	if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3063 		unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3064 		uint64_t ids_copy[size];
3065 
3066 		for (i = 0; i < size; i++) {
3067 			if (ids[i] < basic_count) {
3068 				no_basic_stat_requested = 0;
3069 				break;
3070 			}
3071 
3072 			/*
3073 			 * Convert ids to xstats ids that PMD knows.
3074 			 * ids known by user are basic + extended stats.
3075 			 */
3076 			ids_copy[i] = ids[i] - basic_count;
3077 		}
3078 
3079 		if (no_basic_stat_requested)
3080 			return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3081 					values, size);
3082 	}
3083 
3084 	if (ids) {
3085 		for (i = 0; i < size; i++) {
3086 			if (ids[i] >= basic_count) {
3087 				no_ext_stat_requested = 0;
3088 				break;
3089 			}
3090 		}
3091 	}
3092 
3093 	/* Fill the xstats structure */
3094 	if (ids && no_ext_stat_requested)
3095 		ret = eth_basic_stats_get(port_id, xstats);
3096 	else
3097 		ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3098 
3099 	if (ret < 0)
3100 		return ret;
3101 	num_xstats_filled = (unsigned int)ret;
3102 
3103 	/* Return all stats */
3104 	if (!ids) {
3105 		for (i = 0; i < num_xstats_filled; i++)
3106 			values[i] = xstats[i].value;
3107 		return expected_entries;
3108 	}
3109 
3110 	/* Filter stats */
3111 	for (i = 0; i < size; i++) {
3112 		if (ids[i] >= expected_entries) {
3113 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3114 			return -1;
3115 		}
3116 		values[i] = xstats[ids[i]].value;
3117 	}
3118 	return size;
3119 }
3120 
3121 int
3122 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3123 	unsigned int n)
3124 {
3125 	struct rte_eth_dev *dev;
3126 	unsigned int count = 0, i;
3127 	signed int xcount = 0;
3128 	uint16_t nb_rxqs, nb_txqs;
3129 	int ret;
3130 
3131 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3132 
3133 	dev = &rte_eth_devices[port_id];
3134 
3135 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3136 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3137 
3138 	/* Return generic statistics */
3139 	count = RTE_NB_STATS;
3140 	if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3141 		count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3142 
3143 	/* implemented by the driver */
3144 	if (dev->dev_ops->xstats_get != NULL) {
3145 		/* Retrieve the xstats from the driver at the end of the
3146 		 * xstats struct.
3147 		 */
3148 		xcount = (*dev->dev_ops->xstats_get)(dev,
3149 				     xstats ? xstats + count : NULL,
3150 				     (n > count) ? n - count : 0);
3151 
3152 		if (xcount < 0)
3153 			return eth_err(port_id, xcount);
3154 	}
3155 
3156 	if (n < count + xcount || xstats == NULL)
3157 		return count + xcount;
3158 
3159 	/* now fill the xstats structure */
3160 	ret = eth_basic_stats_get(port_id, xstats);
3161 	if (ret < 0)
3162 		return ret;
3163 	count = ret;
3164 
3165 	for (i = 0; i < count; i++)
3166 		xstats[i].id = i;
3167 	/* add an offset to driver-specific stats */
3168 	for ( ; i < count + xcount; i++)
3169 		xstats[i].id += count;
3170 
3171 	return count + xcount;
3172 }
3173 
3174 /* reset ethdev extended statistics */
3175 int
3176 rte_eth_xstats_reset(uint16_t port_id)
3177 {
3178 	struct rte_eth_dev *dev;
3179 
3180 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3181 	dev = &rte_eth_devices[port_id];
3182 
3183 	/* implemented by the driver */
3184 	if (dev->dev_ops->xstats_reset != NULL)
3185 		return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3186 
3187 	/* fallback to default */
3188 	return rte_eth_stats_reset(port_id);
3189 }
3190 
3191 static int
3192 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3193 		uint8_t stat_idx, uint8_t is_rx)
3194 {
3195 	struct rte_eth_dev *dev;
3196 
3197 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3198 
3199 	dev = &rte_eth_devices[port_id];
3200 
3201 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3202 
3203 	if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3204 		return -EINVAL;
3205 
3206 	if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3207 		return -EINVAL;
3208 
3209 	if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3210 		return -EINVAL;
3211 
3212 	return (*dev->dev_ops->queue_stats_mapping_set)
3213 			(dev, queue_id, stat_idx, is_rx);
3214 }
3215 
3216 
3217 int
3218 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3219 		uint8_t stat_idx)
3220 {
3221 	return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3222 						tx_queue_id,
3223 						stat_idx, STAT_QMAP_TX));
3224 }
3225 
3226 
3227 int
3228 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3229 		uint8_t stat_idx)
3230 {
3231 	return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3232 						rx_queue_id,
3233 						stat_idx, STAT_QMAP_RX));
3234 }
3235 
3236 int
3237 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3238 {
3239 	struct rte_eth_dev *dev;
3240 
3241 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3242 	dev = &rte_eth_devices[port_id];
3243 
3244 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3245 	return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3246 							fw_version, fw_size));
3247 }
3248 
3249 int
3250 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3251 {
3252 	struct rte_eth_dev *dev;
3253 	const struct rte_eth_desc_lim lim = {
3254 		.nb_max = UINT16_MAX,
3255 		.nb_min = 0,
3256 		.nb_align = 1,
3257 		.nb_seg_max = UINT16_MAX,
3258 		.nb_mtu_seg_max = UINT16_MAX,
3259 	};
3260 	int diag;
3261 
3262 	/*
3263 	 * Init dev_info before port_id check since caller does not have
3264 	 * return status and does not know if get is successful or not.
3265 	 */
3266 	memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3267 	dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3268 
3269 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3270 	dev = &rte_eth_devices[port_id];
3271 
3272 	dev_info->rx_desc_lim = lim;
3273 	dev_info->tx_desc_lim = lim;
3274 	dev_info->device = dev->device;
3275 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3276 	dev_info->max_mtu = UINT16_MAX;
3277 
3278 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3279 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3280 	if (diag != 0) {
3281 		/* Cleanup already filled in device information */
3282 		memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3283 		return eth_err(port_id, diag);
3284 	}
3285 
3286 	/* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3287 	dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3288 			RTE_MAX_QUEUES_PER_PORT);
3289 	dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3290 			RTE_MAX_QUEUES_PER_PORT);
3291 
3292 	dev_info->driver_name = dev->device->driver->name;
3293 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3294 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3295 
3296 	dev_info->dev_flags = &dev->data->dev_flags;
3297 
3298 	return 0;
3299 }
3300 
3301 int
3302 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3303 				 uint32_t *ptypes, int num)
3304 {
3305 	int i, j;
3306 	struct rte_eth_dev *dev;
3307 	const uint32_t *all_ptypes;
3308 
3309 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3310 	dev = &rte_eth_devices[port_id];
3311 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3312 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3313 
3314 	if (!all_ptypes)
3315 		return 0;
3316 
3317 	for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3318 		if (all_ptypes[i] & ptype_mask) {
3319 			if (j < num)
3320 				ptypes[j] = all_ptypes[i];
3321 			j++;
3322 		}
3323 
3324 	return j;
3325 }
3326 
3327 int
3328 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3329 				 uint32_t *set_ptypes, unsigned int num)
3330 {
3331 	const uint32_t valid_ptype_masks[] = {
3332 		RTE_PTYPE_L2_MASK,
3333 		RTE_PTYPE_L3_MASK,
3334 		RTE_PTYPE_L4_MASK,
3335 		RTE_PTYPE_TUNNEL_MASK,
3336 		RTE_PTYPE_INNER_L2_MASK,
3337 		RTE_PTYPE_INNER_L3_MASK,
3338 		RTE_PTYPE_INNER_L4_MASK,
3339 	};
3340 	const uint32_t *all_ptypes;
3341 	struct rte_eth_dev *dev;
3342 	uint32_t unused_mask;
3343 	unsigned int i, j;
3344 	int ret;
3345 
3346 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3347 	dev = &rte_eth_devices[port_id];
3348 
3349 	if (num > 0 && set_ptypes == NULL)
3350 		return -EINVAL;
3351 
3352 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3353 			*dev->dev_ops->dev_ptypes_set == NULL) {
3354 		ret = 0;
3355 		goto ptype_unknown;
3356 	}
3357 
3358 	if (ptype_mask == 0) {
3359 		ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3360 				ptype_mask);
3361 		goto ptype_unknown;
3362 	}
3363 
3364 	unused_mask = ptype_mask;
3365 	for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3366 		uint32_t mask = ptype_mask & valid_ptype_masks[i];
3367 		if (mask && mask != valid_ptype_masks[i]) {
3368 			ret = -EINVAL;
3369 			goto ptype_unknown;
3370 		}
3371 		unused_mask &= ~valid_ptype_masks[i];
3372 	}
3373 
3374 	if (unused_mask) {
3375 		ret = -EINVAL;
3376 		goto ptype_unknown;
3377 	}
3378 
3379 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3380 	if (all_ptypes == NULL) {
3381 		ret = 0;
3382 		goto ptype_unknown;
3383 	}
3384 
3385 	/*
3386 	 * Accommodate as many set_ptypes as possible. If the supplied
3387 	 * set_ptypes array is insufficient fill it partially.
3388 	 */
3389 	for (i = 0, j = 0; set_ptypes != NULL &&
3390 				(all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3391 		if (ptype_mask & all_ptypes[i]) {
3392 			if (j < num - 1) {
3393 				set_ptypes[j] = all_ptypes[i];
3394 				j++;
3395 				continue;
3396 			}
3397 			break;
3398 		}
3399 	}
3400 
3401 	if (set_ptypes != NULL && j < num)
3402 		set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3403 
3404 	return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3405 
3406 ptype_unknown:
3407 	if (num > 0)
3408 		set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3409 
3410 	return ret;
3411 }
3412 
3413 int
3414 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3415 {
3416 	struct rte_eth_dev *dev;
3417 
3418 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3419 	dev = &rte_eth_devices[port_id];
3420 	rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3421 
3422 	return 0;
3423 }
3424 
3425 int
3426 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3427 {
3428 	struct rte_eth_dev *dev;
3429 
3430 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3431 
3432 	dev = &rte_eth_devices[port_id];
3433 	*mtu = dev->data->mtu;
3434 	return 0;
3435 }
3436 
3437 int
3438 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3439 {
3440 	int ret;
3441 	struct rte_eth_dev_info dev_info;
3442 	struct rte_eth_dev *dev;
3443 
3444 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3445 	dev = &rte_eth_devices[port_id];
3446 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3447 
3448 	/*
3449 	 * Check if the device supports dev_infos_get, if it does not
3450 	 * skip min_mtu/max_mtu validation here as this requires values
3451 	 * that are populated within the call to rte_eth_dev_info_get()
3452 	 * which relies on dev->dev_ops->dev_infos_get.
3453 	 */
3454 	if (*dev->dev_ops->dev_infos_get != NULL) {
3455 		ret = rte_eth_dev_info_get(port_id, &dev_info);
3456 		if (ret != 0)
3457 			return ret;
3458 
3459 		if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3460 			return -EINVAL;
3461 	}
3462 
3463 	ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3464 	if (!ret)
3465 		dev->data->mtu = mtu;
3466 
3467 	return eth_err(port_id, ret);
3468 }
3469 
3470 int
3471 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3472 {
3473 	struct rte_eth_dev *dev;
3474 	int ret;
3475 
3476 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3477 	dev = &rte_eth_devices[port_id];
3478 	if (!(dev->data->dev_conf.rxmode.offloads &
3479 	      DEV_RX_OFFLOAD_VLAN_FILTER)) {
3480 		RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3481 			port_id);
3482 		return -ENOSYS;
3483 	}
3484 
3485 	if (vlan_id > 4095) {
3486 		RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3487 			port_id, vlan_id);
3488 		return -EINVAL;
3489 	}
3490 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3491 
3492 	ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3493 	if (ret == 0) {
3494 		struct rte_vlan_filter_conf *vfc;
3495 		int vidx;
3496 		int vbit;
3497 
3498 		vfc = &dev->data->vlan_filter_conf;
3499 		vidx = vlan_id / 64;
3500 		vbit = vlan_id % 64;
3501 
3502 		if (on)
3503 			vfc->ids[vidx] |= UINT64_C(1) << vbit;
3504 		else
3505 			vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3506 	}
3507 
3508 	return eth_err(port_id, ret);
3509 }
3510 
3511 int
3512 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3513 				    int on)
3514 {
3515 	struct rte_eth_dev *dev;
3516 
3517 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3518 	dev = &rte_eth_devices[port_id];
3519 	if (rx_queue_id >= dev->data->nb_rx_queues) {
3520 		RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3521 		return -EINVAL;
3522 	}
3523 
3524 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3525 	(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3526 
3527 	return 0;
3528 }
3529 
3530 int
3531 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3532 				enum rte_vlan_type vlan_type,
3533 				uint16_t tpid)
3534 {
3535 	struct rte_eth_dev *dev;
3536 
3537 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3538 	dev = &rte_eth_devices[port_id];
3539 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3540 
3541 	return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3542 							       tpid));
3543 }
3544 
3545 int
3546 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3547 {
3548 	struct rte_eth_dev_info dev_info;
3549 	struct rte_eth_dev *dev;
3550 	int ret = 0;
3551 	int mask = 0;
3552 	int cur, org = 0;
3553 	uint64_t orig_offloads;
3554 	uint64_t dev_offloads;
3555 	uint64_t new_offloads;
3556 
3557 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3558 	dev = &rte_eth_devices[port_id];
3559 
3560 	/* save original values in case of failure */
3561 	orig_offloads = dev->data->dev_conf.rxmode.offloads;
3562 	dev_offloads = orig_offloads;
3563 
3564 	/* check which option changed by application */
3565 	cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3566 	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3567 	if (cur != org) {
3568 		if (cur)
3569 			dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3570 		else
3571 			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3572 		mask |= ETH_VLAN_STRIP_MASK;
3573 	}
3574 
3575 	cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3576 	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3577 	if (cur != org) {
3578 		if (cur)
3579 			dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3580 		else
3581 			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3582 		mask |= ETH_VLAN_FILTER_MASK;
3583 	}
3584 
3585 	cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3586 	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3587 	if (cur != org) {
3588 		if (cur)
3589 			dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3590 		else
3591 			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3592 		mask |= ETH_VLAN_EXTEND_MASK;
3593 	}
3594 
3595 	cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3596 	org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3597 	if (cur != org) {
3598 		if (cur)
3599 			dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3600 		else
3601 			dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3602 		mask |= ETH_QINQ_STRIP_MASK;
3603 	}
3604 
3605 	/*no change*/
3606 	if (mask == 0)
3607 		return ret;
3608 
3609 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3610 	if (ret != 0)
3611 		return ret;
3612 
3613 	/* Rx VLAN offloading must be within its device capabilities */
3614 	if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3615 		new_offloads = dev_offloads & ~orig_offloads;
3616 		RTE_ETHDEV_LOG(ERR,
3617 			"Ethdev port_id=%u requested new added VLAN offloads "
3618 			"0x%" PRIx64 " must be within Rx offloads capabilities "
3619 			"0x%" PRIx64 " in %s()\n",
3620 			port_id, new_offloads, dev_info.rx_offload_capa,
3621 			__func__);
3622 		return -EINVAL;
3623 	}
3624 
3625 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3626 	dev->data->dev_conf.rxmode.offloads = dev_offloads;
3627 	ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3628 	if (ret) {
3629 		/* hit an error restore  original values */
3630 		dev->data->dev_conf.rxmode.offloads = orig_offloads;
3631 	}
3632 
3633 	return eth_err(port_id, ret);
3634 }
3635 
3636 int
3637 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3638 {
3639 	struct rte_eth_dev *dev;
3640 	uint64_t *dev_offloads;
3641 	int ret = 0;
3642 
3643 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3644 	dev = &rte_eth_devices[port_id];
3645 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3646 
3647 	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3648 		ret |= ETH_VLAN_STRIP_OFFLOAD;
3649 
3650 	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3651 		ret |= ETH_VLAN_FILTER_OFFLOAD;
3652 
3653 	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3654 		ret |= ETH_VLAN_EXTEND_OFFLOAD;
3655 
3656 	if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3657 		ret |= ETH_QINQ_STRIP_OFFLOAD;
3658 
3659 	return ret;
3660 }
3661 
3662 int
3663 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3664 {
3665 	struct rte_eth_dev *dev;
3666 
3667 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3668 	dev = &rte_eth_devices[port_id];
3669 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3670 
3671 	return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3672 }
3673 
3674 int
3675 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3676 {
3677 	struct rte_eth_dev *dev;
3678 
3679 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3680 	dev = &rte_eth_devices[port_id];
3681 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3682 	memset(fc_conf, 0, sizeof(*fc_conf));
3683 	return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3684 }
3685 
3686 int
3687 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3688 {
3689 	struct rte_eth_dev *dev;
3690 
3691 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3692 	if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3693 		RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3694 		return -EINVAL;
3695 	}
3696 
3697 	dev = &rte_eth_devices[port_id];
3698 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3699 	return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3700 }
3701 
3702 int
3703 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3704 				   struct rte_eth_pfc_conf *pfc_conf)
3705 {
3706 	struct rte_eth_dev *dev;
3707 
3708 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3709 	if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3710 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3711 		return -EINVAL;
3712 	}
3713 
3714 	dev = &rte_eth_devices[port_id];
3715 	/* High water, low water validation are device specific */
3716 	if  (*dev->dev_ops->priority_flow_ctrl_set)
3717 		return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3718 					(dev, pfc_conf));
3719 	return -ENOTSUP;
3720 }
3721 
3722 static int
3723 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3724 			uint16_t reta_size)
3725 {
3726 	uint16_t i, num;
3727 
3728 	if (!reta_conf)
3729 		return -EINVAL;
3730 
3731 	num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3732 	for (i = 0; i < num; i++) {
3733 		if (reta_conf[i].mask)
3734 			return 0;
3735 	}
3736 
3737 	return -EINVAL;
3738 }
3739 
3740 static int
3741 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3742 			 uint16_t reta_size,
3743 			 uint16_t max_rxq)
3744 {
3745 	uint16_t i, idx, shift;
3746 
3747 	if (!reta_conf)
3748 		return -EINVAL;
3749 
3750 	if (max_rxq == 0) {
3751 		RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3752 		return -EINVAL;
3753 	}
3754 
3755 	for (i = 0; i < reta_size; i++) {
3756 		idx = i / RTE_RETA_GROUP_SIZE;
3757 		shift = i % RTE_RETA_GROUP_SIZE;
3758 		if ((reta_conf[idx].mask & (1ULL << shift)) &&
3759 			(reta_conf[idx].reta[shift] >= max_rxq)) {
3760 			RTE_ETHDEV_LOG(ERR,
3761 				"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3762 				idx, shift,
3763 				reta_conf[idx].reta[shift], max_rxq);
3764 			return -EINVAL;
3765 		}
3766 	}
3767 
3768 	return 0;
3769 }
3770 
3771 int
3772 rte_eth_dev_rss_reta_update(uint16_t port_id,
3773 			    struct rte_eth_rss_reta_entry64 *reta_conf,
3774 			    uint16_t reta_size)
3775 {
3776 	struct rte_eth_dev *dev;
3777 	int ret;
3778 
3779 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3780 	/* Check mask bits */
3781 	ret = eth_check_reta_mask(reta_conf, reta_size);
3782 	if (ret < 0)
3783 		return ret;
3784 
3785 	dev = &rte_eth_devices[port_id];
3786 
3787 	/* Check entry value */
3788 	ret = eth_check_reta_entry(reta_conf, reta_size,
3789 				dev->data->nb_rx_queues);
3790 	if (ret < 0)
3791 		return ret;
3792 
3793 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3794 	return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3795 							     reta_size));
3796 }
3797 
3798 int
3799 rte_eth_dev_rss_reta_query(uint16_t port_id,
3800 			   struct rte_eth_rss_reta_entry64 *reta_conf,
3801 			   uint16_t reta_size)
3802 {
3803 	struct rte_eth_dev *dev;
3804 	int ret;
3805 
3806 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3807 
3808 	/* Check mask bits */
3809 	ret = eth_check_reta_mask(reta_conf, reta_size);
3810 	if (ret < 0)
3811 		return ret;
3812 
3813 	dev = &rte_eth_devices[port_id];
3814 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3815 	return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3816 							    reta_size));
3817 }
3818 
3819 int
3820 rte_eth_dev_rss_hash_update(uint16_t port_id,
3821 			    struct rte_eth_rss_conf *rss_conf)
3822 {
3823 	struct rte_eth_dev *dev;
3824 	struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3825 	int ret;
3826 
3827 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3828 
3829 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3830 	if (ret != 0)
3831 		return ret;
3832 
3833 	rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3834 
3835 	dev = &rte_eth_devices[port_id];
3836 	if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3837 	    dev_info.flow_type_rss_offloads) {
3838 		RTE_ETHDEV_LOG(ERR,
3839 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3840 			port_id, rss_conf->rss_hf,
3841 			dev_info.flow_type_rss_offloads);
3842 		return -EINVAL;
3843 	}
3844 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3845 	return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3846 								 rss_conf));
3847 }
3848 
3849 int
3850 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3851 			      struct rte_eth_rss_conf *rss_conf)
3852 {
3853 	struct rte_eth_dev *dev;
3854 
3855 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3856 	dev = &rte_eth_devices[port_id];
3857 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3858 	return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3859 								   rss_conf));
3860 }
3861 
3862 int
3863 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3864 				struct rte_eth_udp_tunnel *udp_tunnel)
3865 {
3866 	struct rte_eth_dev *dev;
3867 
3868 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3869 	if (udp_tunnel == NULL) {
3870 		RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3871 		return -EINVAL;
3872 	}
3873 
3874 	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3875 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3876 		return -EINVAL;
3877 	}
3878 
3879 	dev = &rte_eth_devices[port_id];
3880 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3881 	return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3882 								udp_tunnel));
3883 }
3884 
3885 int
3886 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3887 				   struct rte_eth_udp_tunnel *udp_tunnel)
3888 {
3889 	struct rte_eth_dev *dev;
3890 
3891 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3892 	dev = &rte_eth_devices[port_id];
3893 
3894 	if (udp_tunnel == NULL) {
3895 		RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3896 		return -EINVAL;
3897 	}
3898 
3899 	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3900 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3901 		return -EINVAL;
3902 	}
3903 
3904 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3905 	return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3906 								udp_tunnel));
3907 }
3908 
3909 int
3910 rte_eth_led_on(uint16_t port_id)
3911 {
3912 	struct rte_eth_dev *dev;
3913 
3914 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3915 	dev = &rte_eth_devices[port_id];
3916 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3917 	return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3918 }
3919 
3920 int
3921 rte_eth_led_off(uint16_t port_id)
3922 {
3923 	struct rte_eth_dev *dev;
3924 
3925 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3926 	dev = &rte_eth_devices[port_id];
3927 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3928 	return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3929 }
3930 
3931 int
3932 rte_eth_fec_get_capability(uint16_t port_id,
3933 			   struct rte_eth_fec_capa *speed_fec_capa,
3934 			   unsigned int num)
3935 {
3936 	struct rte_eth_dev *dev;
3937 	int ret;
3938 
3939 	if (speed_fec_capa == NULL && num > 0)
3940 		return -EINVAL;
3941 
3942 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3943 	dev = &rte_eth_devices[port_id];
3944 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
3945 	ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
3946 
3947 	return ret;
3948 }
3949 
3950 int
3951 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
3952 {
3953 	struct rte_eth_dev *dev;
3954 
3955 	if (fec_capa == NULL)
3956 		return -EINVAL;
3957 
3958 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3959 	dev = &rte_eth_devices[port_id];
3960 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
3961 	return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
3962 }
3963 
3964 int
3965 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
3966 {
3967 	struct rte_eth_dev *dev;
3968 
3969 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3970 	dev = &rte_eth_devices[port_id];
3971 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
3972 	return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
3973 }
3974 
3975 /*
3976  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3977  * an empty spot.
3978  */
3979 static int
3980 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3981 {
3982 	struct rte_eth_dev_info dev_info;
3983 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3984 	unsigned i;
3985 	int ret;
3986 
3987 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3988 	if (ret != 0)
3989 		return -1;
3990 
3991 	for (i = 0; i < dev_info.max_mac_addrs; i++)
3992 		if (memcmp(addr, &dev->data->mac_addrs[i],
3993 				RTE_ETHER_ADDR_LEN) == 0)
3994 			return i;
3995 
3996 	return -1;
3997 }
3998 
3999 static const struct rte_ether_addr null_mac_addr;
4000 
4001 int
4002 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4003 			uint32_t pool)
4004 {
4005 	struct rte_eth_dev *dev;
4006 	int index;
4007 	uint64_t pool_mask;
4008 	int ret;
4009 
4010 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4011 	dev = &rte_eth_devices[port_id];
4012 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4013 
4014 	if (rte_is_zero_ether_addr(addr)) {
4015 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4016 			port_id);
4017 		return -EINVAL;
4018 	}
4019 	if (pool >= ETH_64_POOLS) {
4020 		RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
4021 		return -EINVAL;
4022 	}
4023 
4024 	index = eth_dev_get_mac_addr_index(port_id, addr);
4025 	if (index < 0) {
4026 		index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4027 		if (index < 0) {
4028 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4029 				port_id);
4030 			return -ENOSPC;
4031 		}
4032 	} else {
4033 		pool_mask = dev->data->mac_pool_sel[index];
4034 
4035 		/* Check if both MAC address and pool is already there, and do nothing */
4036 		if (pool_mask & (1ULL << pool))
4037 			return 0;
4038 	}
4039 
4040 	/* Update NIC */
4041 	ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4042 
4043 	if (ret == 0) {
4044 		/* Update address in NIC data structure */
4045 		rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4046 
4047 		/* Update pool bitmap in NIC data structure */
4048 		dev->data->mac_pool_sel[index] |= (1ULL << pool);
4049 	}
4050 
4051 	return eth_err(port_id, ret);
4052 }
4053 
4054 int
4055 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4056 {
4057 	struct rte_eth_dev *dev;
4058 	int index;
4059 
4060 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4061 	dev = &rte_eth_devices[port_id];
4062 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4063 
4064 	index = eth_dev_get_mac_addr_index(port_id, addr);
4065 	if (index == 0) {
4066 		RTE_ETHDEV_LOG(ERR,
4067 			"Port %u: Cannot remove default MAC address\n",
4068 			port_id);
4069 		return -EADDRINUSE;
4070 	} else if (index < 0)
4071 		return 0;  /* Do nothing if address wasn't found */
4072 
4073 	/* Update NIC */
4074 	(*dev->dev_ops->mac_addr_remove)(dev, index);
4075 
4076 	/* Update address in NIC data structure */
4077 	rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4078 
4079 	/* reset pool bitmap */
4080 	dev->data->mac_pool_sel[index] = 0;
4081 
4082 	return 0;
4083 }
4084 
4085 int
4086 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4087 {
4088 	struct rte_eth_dev *dev;
4089 	int ret;
4090 
4091 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4092 
4093 	if (!rte_is_valid_assigned_ether_addr(addr))
4094 		return -EINVAL;
4095 
4096 	dev = &rte_eth_devices[port_id];
4097 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4098 
4099 	ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4100 	if (ret < 0)
4101 		return ret;
4102 
4103 	/* Update default address in NIC data structure */
4104 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4105 
4106 	return 0;
4107 }
4108 
4109 
4110 /*
4111  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4112  * an empty spot.
4113  */
4114 static int
4115 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4116 		const struct rte_ether_addr *addr)
4117 {
4118 	struct rte_eth_dev_info dev_info;
4119 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4120 	unsigned i;
4121 	int ret;
4122 
4123 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4124 	if (ret != 0)
4125 		return -1;
4126 
4127 	if (!dev->data->hash_mac_addrs)
4128 		return -1;
4129 
4130 	for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4131 		if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4132 			RTE_ETHER_ADDR_LEN) == 0)
4133 			return i;
4134 
4135 	return -1;
4136 }
4137 
4138 int
4139 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4140 				uint8_t on)
4141 {
4142 	int index;
4143 	int ret;
4144 	struct rte_eth_dev *dev;
4145 
4146 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4147 
4148 	dev = &rte_eth_devices[port_id];
4149 	if (rte_is_zero_ether_addr(addr)) {
4150 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4151 			port_id);
4152 		return -EINVAL;
4153 	}
4154 
4155 	index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4156 	/* Check if it's already there, and do nothing */
4157 	if ((index >= 0) && on)
4158 		return 0;
4159 
4160 	if (index < 0) {
4161 		if (!on) {
4162 			RTE_ETHDEV_LOG(ERR,
4163 				"Port %u: the MAC address was not set in UTA\n",
4164 				port_id);
4165 			return -EINVAL;
4166 		}
4167 
4168 		index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4169 		if (index < 0) {
4170 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4171 				port_id);
4172 			return -ENOSPC;
4173 		}
4174 	}
4175 
4176 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4177 	ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4178 	if (ret == 0) {
4179 		/* Update address in NIC data structure */
4180 		if (on)
4181 			rte_ether_addr_copy(addr,
4182 					&dev->data->hash_mac_addrs[index]);
4183 		else
4184 			rte_ether_addr_copy(&null_mac_addr,
4185 					&dev->data->hash_mac_addrs[index]);
4186 	}
4187 
4188 	return eth_err(port_id, ret);
4189 }
4190 
4191 int
4192 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4193 {
4194 	struct rte_eth_dev *dev;
4195 
4196 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4197 
4198 	dev = &rte_eth_devices[port_id];
4199 
4200 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4201 	return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4202 								       on));
4203 }
4204 
4205 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4206 					uint16_t tx_rate)
4207 {
4208 	struct rte_eth_dev *dev;
4209 	struct rte_eth_dev_info dev_info;
4210 	struct rte_eth_link link;
4211 	int ret;
4212 
4213 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4214 
4215 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4216 	if (ret != 0)
4217 		return ret;
4218 
4219 	dev = &rte_eth_devices[port_id];
4220 	link = dev->data->dev_link;
4221 
4222 	if (queue_idx > dev_info.max_tx_queues) {
4223 		RTE_ETHDEV_LOG(ERR,
4224 			"Set queue rate limit:port %u: invalid queue id=%u\n",
4225 			port_id, queue_idx);
4226 		return -EINVAL;
4227 	}
4228 
4229 	if (tx_rate > link.link_speed) {
4230 		RTE_ETHDEV_LOG(ERR,
4231 			"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4232 			tx_rate, link.link_speed);
4233 		return -EINVAL;
4234 	}
4235 
4236 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4237 	return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4238 							queue_idx, tx_rate));
4239 }
4240 
4241 int
4242 rte_eth_mirror_rule_set(uint16_t port_id,
4243 			struct rte_eth_mirror_conf *mirror_conf,
4244 			uint8_t rule_id, uint8_t on)
4245 {
4246 	struct rte_eth_dev *dev;
4247 
4248 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4249 	if (mirror_conf->rule_type == 0) {
4250 		RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4251 		return -EINVAL;
4252 	}
4253 
4254 	if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4255 		RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4256 			ETH_64_POOLS - 1);
4257 		return -EINVAL;
4258 	}
4259 
4260 	if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4261 	     ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4262 	    (mirror_conf->pool_mask == 0)) {
4263 		RTE_ETHDEV_LOG(ERR,
4264 			"Invalid mirror pool, pool mask can not be 0\n");
4265 		return -EINVAL;
4266 	}
4267 
4268 	if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4269 	    mirror_conf->vlan.vlan_mask == 0) {
4270 		RTE_ETHDEV_LOG(ERR,
4271 			"Invalid vlan mask, vlan mask can not be 0\n");
4272 		return -EINVAL;
4273 	}
4274 
4275 	dev = &rte_eth_devices[port_id];
4276 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4277 
4278 	return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4279 						mirror_conf, rule_id, on));
4280 }
4281 
4282 int
4283 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4284 {
4285 	struct rte_eth_dev *dev;
4286 
4287 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4288 
4289 	dev = &rte_eth_devices[port_id];
4290 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4291 
4292 	return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
4293 								   rule_id));
4294 }
4295 
4296 RTE_INIT(eth_dev_init_cb_lists)
4297 {
4298 	uint16_t i;
4299 
4300 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4301 		TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4302 }
4303 
4304 int
4305 rte_eth_dev_callback_register(uint16_t port_id,
4306 			enum rte_eth_event_type event,
4307 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4308 {
4309 	struct rte_eth_dev *dev;
4310 	struct rte_eth_dev_callback *user_cb;
4311 	uint16_t next_port;
4312 	uint16_t last_port;
4313 
4314 	if (!cb_fn)
4315 		return -EINVAL;
4316 
4317 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4318 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4319 		return -EINVAL;
4320 	}
4321 
4322 	if (port_id == RTE_ETH_ALL) {
4323 		next_port = 0;
4324 		last_port = RTE_MAX_ETHPORTS - 1;
4325 	} else {
4326 		next_port = last_port = port_id;
4327 	}
4328 
4329 	rte_spinlock_lock(&eth_dev_cb_lock);
4330 
4331 	do {
4332 		dev = &rte_eth_devices[next_port];
4333 
4334 		TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4335 			if (user_cb->cb_fn == cb_fn &&
4336 				user_cb->cb_arg == cb_arg &&
4337 				user_cb->event == event) {
4338 				break;
4339 			}
4340 		}
4341 
4342 		/* create a new callback. */
4343 		if (user_cb == NULL) {
4344 			user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4345 				sizeof(struct rte_eth_dev_callback), 0);
4346 			if (user_cb != NULL) {
4347 				user_cb->cb_fn = cb_fn;
4348 				user_cb->cb_arg = cb_arg;
4349 				user_cb->event = event;
4350 				TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4351 						  user_cb, next);
4352 			} else {
4353 				rte_spinlock_unlock(&eth_dev_cb_lock);
4354 				rte_eth_dev_callback_unregister(port_id, event,
4355 								cb_fn, cb_arg);
4356 				return -ENOMEM;
4357 			}
4358 
4359 		}
4360 	} while (++next_port <= last_port);
4361 
4362 	rte_spinlock_unlock(&eth_dev_cb_lock);
4363 	return 0;
4364 }
4365 
4366 int
4367 rte_eth_dev_callback_unregister(uint16_t port_id,
4368 			enum rte_eth_event_type event,
4369 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4370 {
4371 	int ret;
4372 	struct rte_eth_dev *dev;
4373 	struct rte_eth_dev_callback *cb, *next;
4374 	uint16_t next_port;
4375 	uint16_t last_port;
4376 
4377 	if (!cb_fn)
4378 		return -EINVAL;
4379 
4380 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4381 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4382 		return -EINVAL;
4383 	}
4384 
4385 	if (port_id == RTE_ETH_ALL) {
4386 		next_port = 0;
4387 		last_port = RTE_MAX_ETHPORTS - 1;
4388 	} else {
4389 		next_port = last_port = port_id;
4390 	}
4391 
4392 	rte_spinlock_lock(&eth_dev_cb_lock);
4393 
4394 	do {
4395 		dev = &rte_eth_devices[next_port];
4396 		ret = 0;
4397 		for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4398 		     cb = next) {
4399 
4400 			next = TAILQ_NEXT(cb, next);
4401 
4402 			if (cb->cb_fn != cb_fn || cb->event != event ||
4403 			    (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4404 				continue;
4405 
4406 			/*
4407 			 * if this callback is not executing right now,
4408 			 * then remove it.
4409 			 */
4410 			if (cb->active == 0) {
4411 				TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4412 				rte_free(cb);
4413 			} else {
4414 				ret = -EAGAIN;
4415 			}
4416 		}
4417 	} while (++next_port <= last_port);
4418 
4419 	rte_spinlock_unlock(&eth_dev_cb_lock);
4420 	return ret;
4421 }
4422 
4423 int
4424 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4425 	enum rte_eth_event_type event, void *ret_param)
4426 {
4427 	struct rte_eth_dev_callback *cb_lst;
4428 	struct rte_eth_dev_callback dev_cb;
4429 	int rc = 0;
4430 
4431 	rte_spinlock_lock(&eth_dev_cb_lock);
4432 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4433 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4434 			continue;
4435 		dev_cb = *cb_lst;
4436 		cb_lst->active = 1;
4437 		if (ret_param != NULL)
4438 			dev_cb.ret_param = ret_param;
4439 
4440 		rte_spinlock_unlock(&eth_dev_cb_lock);
4441 		rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4442 				dev_cb.cb_arg, dev_cb.ret_param);
4443 		rte_spinlock_lock(&eth_dev_cb_lock);
4444 		cb_lst->active = 0;
4445 	}
4446 	rte_spinlock_unlock(&eth_dev_cb_lock);
4447 	return rc;
4448 }
4449 
4450 void
4451 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4452 {
4453 	if (dev == NULL)
4454 		return;
4455 
4456 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4457 
4458 	dev->state = RTE_ETH_DEV_ATTACHED;
4459 }
4460 
4461 int
4462 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4463 {
4464 	uint32_t vec;
4465 	struct rte_eth_dev *dev;
4466 	struct rte_intr_handle *intr_handle;
4467 	uint16_t qid;
4468 	int rc;
4469 
4470 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4471 
4472 	dev = &rte_eth_devices[port_id];
4473 
4474 	if (!dev->intr_handle) {
4475 		RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4476 		return -ENOTSUP;
4477 	}
4478 
4479 	intr_handle = dev->intr_handle;
4480 	if (!intr_handle->intr_vec) {
4481 		RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4482 		return -EPERM;
4483 	}
4484 
4485 	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4486 		vec = intr_handle->intr_vec[qid];
4487 		rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4488 		if (rc && rc != -EEXIST) {
4489 			RTE_ETHDEV_LOG(ERR,
4490 				"p %u q %u rx ctl error op %d epfd %d vec %u\n",
4491 				port_id, qid, op, epfd, vec);
4492 		}
4493 	}
4494 
4495 	return 0;
4496 }
4497 
4498 int
4499 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4500 {
4501 	struct rte_intr_handle *intr_handle;
4502 	struct rte_eth_dev *dev;
4503 	unsigned int efd_idx;
4504 	uint32_t vec;
4505 	int fd;
4506 
4507 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4508 
4509 	dev = &rte_eth_devices[port_id];
4510 
4511 	if (queue_id >= dev->data->nb_rx_queues) {
4512 		RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4513 		return -1;
4514 	}
4515 
4516 	if (!dev->intr_handle) {
4517 		RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4518 		return -1;
4519 	}
4520 
4521 	intr_handle = dev->intr_handle;
4522 	if (!intr_handle->intr_vec) {
4523 		RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4524 		return -1;
4525 	}
4526 
4527 	vec = intr_handle->intr_vec[queue_id];
4528 	efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4529 		(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4530 	fd = intr_handle->efds[efd_idx];
4531 
4532 	return fd;
4533 }
4534 
4535 static inline int
4536 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4537 		const char *ring_name)
4538 {
4539 	return snprintf(name, len, "eth_p%d_q%d_%s",
4540 			port_id, queue_id, ring_name);
4541 }
4542 
4543 const struct rte_memzone *
4544 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4545 			 uint16_t queue_id, size_t size, unsigned align,
4546 			 int socket_id)
4547 {
4548 	char z_name[RTE_MEMZONE_NAMESIZE];
4549 	const struct rte_memzone *mz;
4550 	int rc;
4551 
4552 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4553 			queue_id, ring_name);
4554 	if (rc >= RTE_MEMZONE_NAMESIZE) {
4555 		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4556 		rte_errno = ENAMETOOLONG;
4557 		return NULL;
4558 	}
4559 
4560 	mz = rte_memzone_lookup(z_name);
4561 	if (mz) {
4562 		if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4563 				size > mz->len ||
4564 				((uintptr_t)mz->addr & (align - 1)) != 0) {
4565 			RTE_ETHDEV_LOG(ERR,
4566 				"memzone %s does not justify the requested attributes\n",
4567 				mz->name);
4568 			return NULL;
4569 		}
4570 
4571 		return mz;
4572 	}
4573 
4574 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
4575 			RTE_MEMZONE_IOVA_CONTIG, align);
4576 }
4577 
4578 int
4579 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4580 		uint16_t queue_id)
4581 {
4582 	char z_name[RTE_MEMZONE_NAMESIZE];
4583 	const struct rte_memzone *mz;
4584 	int rc = 0;
4585 
4586 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4587 			queue_id, ring_name);
4588 	if (rc >= RTE_MEMZONE_NAMESIZE) {
4589 		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4590 		return -ENAMETOOLONG;
4591 	}
4592 
4593 	mz = rte_memzone_lookup(z_name);
4594 	if (mz)
4595 		rc = rte_memzone_free(mz);
4596 	else
4597 		rc = -ENOENT;
4598 
4599 	return rc;
4600 }
4601 
4602 int
4603 rte_eth_dev_create(struct rte_device *device, const char *name,
4604 	size_t priv_data_size,
4605 	ethdev_bus_specific_init ethdev_bus_specific_init,
4606 	void *bus_init_params,
4607 	ethdev_init_t ethdev_init, void *init_params)
4608 {
4609 	struct rte_eth_dev *ethdev;
4610 	int retval;
4611 
4612 	RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4613 
4614 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4615 		ethdev = rte_eth_dev_allocate(name);
4616 		if (!ethdev)
4617 			return -ENODEV;
4618 
4619 		if (priv_data_size) {
4620 			ethdev->data->dev_private = rte_zmalloc_socket(
4621 				name, priv_data_size, RTE_CACHE_LINE_SIZE,
4622 				device->numa_node);
4623 
4624 			if (!ethdev->data->dev_private) {
4625 				RTE_ETHDEV_LOG(ERR,
4626 					"failed to allocate private data\n");
4627 				retval = -ENOMEM;
4628 				goto probe_failed;
4629 			}
4630 		}
4631 	} else {
4632 		ethdev = rte_eth_dev_attach_secondary(name);
4633 		if (!ethdev) {
4634 			RTE_ETHDEV_LOG(ERR,
4635 				"secondary process attach failed, ethdev doesn't exist\n");
4636 			return  -ENODEV;
4637 		}
4638 	}
4639 
4640 	ethdev->device = device;
4641 
4642 	if (ethdev_bus_specific_init) {
4643 		retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4644 		if (retval) {
4645 			RTE_ETHDEV_LOG(ERR,
4646 				"ethdev bus specific initialisation failed\n");
4647 			goto probe_failed;
4648 		}
4649 	}
4650 
4651 	retval = ethdev_init(ethdev, init_params);
4652 	if (retval) {
4653 		RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4654 		goto probe_failed;
4655 	}
4656 
4657 	rte_eth_dev_probing_finish(ethdev);
4658 
4659 	return retval;
4660 
4661 probe_failed:
4662 	rte_eth_dev_release_port(ethdev);
4663 	return retval;
4664 }
4665 
4666 int
4667 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4668 	ethdev_uninit_t ethdev_uninit)
4669 {
4670 	int ret;
4671 
4672 	ethdev = rte_eth_dev_allocated(ethdev->data->name);
4673 	if (!ethdev)
4674 		return -ENODEV;
4675 
4676 	RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4677 
4678 	ret = ethdev_uninit(ethdev);
4679 	if (ret)
4680 		return ret;
4681 
4682 	return rte_eth_dev_release_port(ethdev);
4683 }
4684 
4685 int
4686 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4687 			  int epfd, int op, void *data)
4688 {
4689 	uint32_t vec;
4690 	struct rte_eth_dev *dev;
4691 	struct rte_intr_handle *intr_handle;
4692 	int rc;
4693 
4694 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4695 
4696 	dev = &rte_eth_devices[port_id];
4697 	if (queue_id >= dev->data->nb_rx_queues) {
4698 		RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4699 		return -EINVAL;
4700 	}
4701 
4702 	if (!dev->intr_handle) {
4703 		RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4704 		return -ENOTSUP;
4705 	}
4706 
4707 	intr_handle = dev->intr_handle;
4708 	if (!intr_handle->intr_vec) {
4709 		RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4710 		return -EPERM;
4711 	}
4712 
4713 	vec = intr_handle->intr_vec[queue_id];
4714 	rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4715 	if (rc && rc != -EEXIST) {
4716 		RTE_ETHDEV_LOG(ERR,
4717 			"p %u q %u rx ctl error op %d epfd %d vec %u\n",
4718 			port_id, queue_id, op, epfd, vec);
4719 		return rc;
4720 	}
4721 
4722 	return 0;
4723 }
4724 
4725 int
4726 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4727 			   uint16_t queue_id)
4728 {
4729 	struct rte_eth_dev *dev;
4730 	int ret;
4731 
4732 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4733 
4734 	dev = &rte_eth_devices[port_id];
4735 
4736 	ret = eth_dev_validate_rx_queue(dev, queue_id);
4737 	if (ret != 0)
4738 		return ret;
4739 
4740 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4741 	return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4742 								queue_id));
4743 }
4744 
4745 int
4746 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4747 			    uint16_t queue_id)
4748 {
4749 	struct rte_eth_dev *dev;
4750 	int ret;
4751 
4752 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4753 
4754 	dev = &rte_eth_devices[port_id];
4755 
4756 	ret = eth_dev_validate_rx_queue(dev, queue_id);
4757 	if (ret != 0)
4758 		return ret;
4759 
4760 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4761 	return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4762 								queue_id));
4763 }
4764 
4765 
4766 const struct rte_eth_rxtx_callback *
4767 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4768 		rte_rx_callback_fn fn, void *user_param)
4769 {
4770 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4771 	rte_errno = ENOTSUP;
4772 	return NULL;
4773 #endif
4774 	struct rte_eth_dev *dev;
4775 
4776 	/* check input parameters */
4777 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4778 		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4779 		rte_errno = EINVAL;
4780 		return NULL;
4781 	}
4782 	dev = &rte_eth_devices[port_id];
4783 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4784 		rte_errno = EINVAL;
4785 		return NULL;
4786 	}
4787 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4788 
4789 	if (cb == NULL) {
4790 		rte_errno = ENOMEM;
4791 		return NULL;
4792 	}
4793 
4794 	cb->fn.rx = fn;
4795 	cb->param = user_param;
4796 
4797 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4798 	/* Add the callbacks in fifo order. */
4799 	struct rte_eth_rxtx_callback *tail =
4800 		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4801 
4802 	if (!tail) {
4803 		/* Stores to cb->fn and cb->param should complete before
4804 		 * cb is visible to data plane.
4805 		 */
4806 		__atomic_store_n(
4807 			&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4808 			cb, __ATOMIC_RELEASE);
4809 
4810 	} else {
4811 		while (tail->next)
4812 			tail = tail->next;
4813 		/* Stores to cb->fn and cb->param should complete before
4814 		 * cb is visible to data plane.
4815 		 */
4816 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4817 	}
4818 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4819 
4820 	return cb;
4821 }
4822 
4823 const struct rte_eth_rxtx_callback *
4824 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4825 		rte_rx_callback_fn fn, void *user_param)
4826 {
4827 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4828 	rte_errno = ENOTSUP;
4829 	return NULL;
4830 #endif
4831 	/* check input parameters */
4832 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4833 		queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4834 		rte_errno = EINVAL;
4835 		return NULL;
4836 	}
4837 
4838 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4839 
4840 	if (cb == NULL) {
4841 		rte_errno = ENOMEM;
4842 		return NULL;
4843 	}
4844 
4845 	cb->fn.rx = fn;
4846 	cb->param = user_param;
4847 
4848 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4849 	/* Add the callbacks at first position */
4850 	cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4851 	/* Stores to cb->fn, cb->param and cb->next should complete before
4852 	 * cb is visible to data plane threads.
4853 	 */
4854 	__atomic_store_n(
4855 		&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4856 		cb, __ATOMIC_RELEASE);
4857 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4858 
4859 	return cb;
4860 }
4861 
4862 const struct rte_eth_rxtx_callback *
4863 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4864 		rte_tx_callback_fn fn, void *user_param)
4865 {
4866 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4867 	rte_errno = ENOTSUP;
4868 	return NULL;
4869 #endif
4870 	struct rte_eth_dev *dev;
4871 
4872 	/* check input parameters */
4873 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4874 		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4875 		rte_errno = EINVAL;
4876 		return NULL;
4877 	}
4878 
4879 	dev = &rte_eth_devices[port_id];
4880 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4881 		rte_errno = EINVAL;
4882 		return NULL;
4883 	}
4884 
4885 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4886 
4887 	if (cb == NULL) {
4888 		rte_errno = ENOMEM;
4889 		return NULL;
4890 	}
4891 
4892 	cb->fn.tx = fn;
4893 	cb->param = user_param;
4894 
4895 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
4896 	/* Add the callbacks in fifo order. */
4897 	struct rte_eth_rxtx_callback *tail =
4898 		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4899 
4900 	if (!tail) {
4901 		/* Stores to cb->fn and cb->param should complete before
4902 		 * cb is visible to data plane.
4903 		 */
4904 		__atomic_store_n(
4905 			&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4906 			cb, __ATOMIC_RELEASE);
4907 
4908 	} else {
4909 		while (tail->next)
4910 			tail = tail->next;
4911 		/* Stores to cb->fn and cb->param should complete before
4912 		 * cb is visible to data plane.
4913 		 */
4914 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4915 	}
4916 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4917 
4918 	return cb;
4919 }
4920 
4921 int
4922 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4923 		const struct rte_eth_rxtx_callback *user_cb)
4924 {
4925 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4926 	return -ENOTSUP;
4927 #endif
4928 	/* Check input parameters. */
4929 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4930 	if (user_cb == NULL ||
4931 			queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4932 		return -EINVAL;
4933 
4934 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4935 	struct rte_eth_rxtx_callback *cb;
4936 	struct rte_eth_rxtx_callback **prev_cb;
4937 	int ret = -EINVAL;
4938 
4939 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4940 	prev_cb = &dev->post_rx_burst_cbs[queue_id];
4941 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
4942 		cb = *prev_cb;
4943 		if (cb == user_cb) {
4944 			/* Remove the user cb from the callback list. */
4945 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4946 			ret = 0;
4947 			break;
4948 		}
4949 	}
4950 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4951 
4952 	return ret;
4953 }
4954 
4955 int
4956 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4957 		const struct rte_eth_rxtx_callback *user_cb)
4958 {
4959 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4960 	return -ENOTSUP;
4961 #endif
4962 	/* Check input parameters. */
4963 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4964 	if (user_cb == NULL ||
4965 			queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4966 		return -EINVAL;
4967 
4968 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4969 	int ret = -EINVAL;
4970 	struct rte_eth_rxtx_callback *cb;
4971 	struct rte_eth_rxtx_callback **prev_cb;
4972 
4973 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
4974 	prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4975 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
4976 		cb = *prev_cb;
4977 		if (cb == user_cb) {
4978 			/* Remove the user cb from the callback list. */
4979 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4980 			ret = 0;
4981 			break;
4982 		}
4983 	}
4984 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4985 
4986 	return ret;
4987 }
4988 
4989 int
4990 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4991 	struct rte_eth_rxq_info *qinfo)
4992 {
4993 	struct rte_eth_dev *dev;
4994 
4995 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4996 
4997 	if (qinfo == NULL)
4998 		return -EINVAL;
4999 
5000 	dev = &rte_eth_devices[port_id];
5001 	if (queue_id >= dev->data->nb_rx_queues) {
5002 		RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5003 		return -EINVAL;
5004 	}
5005 
5006 	if (dev->data->rx_queues == NULL ||
5007 			dev->data->rx_queues[queue_id] == NULL) {
5008 		RTE_ETHDEV_LOG(ERR,
5009 			       "Rx queue %"PRIu16" of device with port_id=%"
5010 			       PRIu16" has not been setup\n",
5011 			       queue_id, port_id);
5012 		return -EINVAL;
5013 	}
5014 
5015 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5016 		RTE_ETHDEV_LOG(INFO,
5017 			"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5018 			queue_id, port_id);
5019 		return -EINVAL;
5020 	}
5021 
5022 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5023 
5024 	memset(qinfo, 0, sizeof(*qinfo));
5025 	dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5026 	return 0;
5027 }
5028 
5029 int
5030 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5031 	struct rte_eth_txq_info *qinfo)
5032 {
5033 	struct rte_eth_dev *dev;
5034 
5035 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5036 
5037 	if (qinfo == NULL)
5038 		return -EINVAL;
5039 
5040 	dev = &rte_eth_devices[port_id];
5041 	if (queue_id >= dev->data->nb_tx_queues) {
5042 		RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5043 		return -EINVAL;
5044 	}
5045 
5046 	if (dev->data->tx_queues == NULL ||
5047 			dev->data->tx_queues[queue_id] == NULL) {
5048 		RTE_ETHDEV_LOG(ERR,
5049 			       "Tx queue %"PRIu16" of device with port_id=%"
5050 			       PRIu16" has not been setup\n",
5051 			       queue_id, port_id);
5052 		return -EINVAL;
5053 	}
5054 
5055 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5056 		RTE_ETHDEV_LOG(INFO,
5057 			"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5058 			queue_id, port_id);
5059 		return -EINVAL;
5060 	}
5061 
5062 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5063 
5064 	memset(qinfo, 0, sizeof(*qinfo));
5065 	dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5066 
5067 	return 0;
5068 }
5069 
5070 int
5071 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5072 			  struct rte_eth_burst_mode *mode)
5073 {
5074 	struct rte_eth_dev *dev;
5075 
5076 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5077 
5078 	if (mode == NULL)
5079 		return -EINVAL;
5080 
5081 	dev = &rte_eth_devices[port_id];
5082 
5083 	if (queue_id >= dev->data->nb_rx_queues) {
5084 		RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5085 		return -EINVAL;
5086 	}
5087 
5088 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5089 	memset(mode, 0, sizeof(*mode));
5090 	return eth_err(port_id,
5091 		       dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5092 }
5093 
5094 int
5095 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5096 			  struct rte_eth_burst_mode *mode)
5097 {
5098 	struct rte_eth_dev *dev;
5099 
5100 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5101 
5102 	if (mode == NULL)
5103 		return -EINVAL;
5104 
5105 	dev = &rte_eth_devices[port_id];
5106 
5107 	if (queue_id >= dev->data->nb_tx_queues) {
5108 		RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5109 		return -EINVAL;
5110 	}
5111 
5112 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5113 	memset(mode, 0, sizeof(*mode));
5114 	return eth_err(port_id,
5115 		       dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5116 }
5117 
5118 int
5119 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5120 			     struct rte_ether_addr *mc_addr_set,
5121 			     uint32_t nb_mc_addr)
5122 {
5123 	struct rte_eth_dev *dev;
5124 
5125 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5126 
5127 	dev = &rte_eth_devices[port_id];
5128 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5129 	return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5130 						mc_addr_set, nb_mc_addr));
5131 }
5132 
5133 int
5134 rte_eth_timesync_enable(uint16_t port_id)
5135 {
5136 	struct rte_eth_dev *dev;
5137 
5138 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5139 	dev = &rte_eth_devices[port_id];
5140 
5141 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5142 	return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5143 }
5144 
5145 int
5146 rte_eth_timesync_disable(uint16_t port_id)
5147 {
5148 	struct rte_eth_dev *dev;
5149 
5150 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5151 	dev = &rte_eth_devices[port_id];
5152 
5153 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5154 	return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5155 }
5156 
5157 int
5158 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5159 				   uint32_t flags)
5160 {
5161 	struct rte_eth_dev *dev;
5162 
5163 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5164 	dev = &rte_eth_devices[port_id];
5165 
5166 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5167 	return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5168 				(dev, timestamp, flags));
5169 }
5170 
5171 int
5172 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5173 				   struct timespec *timestamp)
5174 {
5175 	struct rte_eth_dev *dev;
5176 
5177 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5178 	dev = &rte_eth_devices[port_id];
5179 
5180 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5181 	return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5182 				(dev, timestamp));
5183 }
5184 
5185 int
5186 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5187 {
5188 	struct rte_eth_dev *dev;
5189 
5190 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5191 	dev = &rte_eth_devices[port_id];
5192 
5193 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5194 	return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
5195 								      delta));
5196 }
5197 
5198 int
5199 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5200 {
5201 	struct rte_eth_dev *dev;
5202 
5203 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5204 	dev = &rte_eth_devices[port_id];
5205 
5206 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5207 	return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5208 								timestamp));
5209 }
5210 
5211 int
5212 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5213 {
5214 	struct rte_eth_dev *dev;
5215 
5216 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5217 	dev = &rte_eth_devices[port_id];
5218 
5219 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5220 	return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5221 								timestamp));
5222 }
5223 
5224 int
5225 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5226 {
5227 	struct rte_eth_dev *dev;
5228 
5229 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5230 	dev = &rte_eth_devices[port_id];
5231 
5232 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5233 	return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5234 }
5235 
5236 int
5237 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5238 {
5239 	struct rte_eth_dev *dev;
5240 
5241 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5242 
5243 	dev = &rte_eth_devices[port_id];
5244 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5245 	return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5246 }
5247 
5248 int
5249 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5250 {
5251 	struct rte_eth_dev *dev;
5252 
5253 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5254 
5255 	dev = &rte_eth_devices[port_id];
5256 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5257 	return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5258 }
5259 
5260 int
5261 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5262 {
5263 	struct rte_eth_dev *dev;
5264 
5265 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5266 
5267 	dev = &rte_eth_devices[port_id];
5268 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5269 	return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5270 }
5271 
5272 int
5273 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5274 {
5275 	struct rte_eth_dev *dev;
5276 
5277 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5278 
5279 	dev = &rte_eth_devices[port_id];
5280 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5281 	return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5282 }
5283 
5284 int
5285 rte_eth_dev_get_module_info(uint16_t port_id,
5286 			    struct rte_eth_dev_module_info *modinfo)
5287 {
5288 	struct rte_eth_dev *dev;
5289 
5290 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5291 
5292 	dev = &rte_eth_devices[port_id];
5293 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5294 	return (*dev->dev_ops->get_module_info)(dev, modinfo);
5295 }
5296 
5297 int
5298 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5299 			      struct rte_dev_eeprom_info *info)
5300 {
5301 	struct rte_eth_dev *dev;
5302 
5303 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5304 
5305 	dev = &rte_eth_devices[port_id];
5306 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5307 	return (*dev->dev_ops->get_module_eeprom)(dev, info);
5308 }
5309 
5310 int
5311 rte_eth_dev_get_dcb_info(uint16_t port_id,
5312 			     struct rte_eth_dcb_info *dcb_info)
5313 {
5314 	struct rte_eth_dev *dev;
5315 
5316 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5317 
5318 	dev = &rte_eth_devices[port_id];
5319 	memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5320 
5321 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5322 	return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5323 }
5324 
5325 static void
5326 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5327 		const struct rte_eth_desc_lim *desc_lim)
5328 {
5329 	if (desc_lim->nb_align != 0)
5330 		*nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5331 
5332 	if (desc_lim->nb_max != 0)
5333 		*nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5334 
5335 	*nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5336 }
5337 
5338 int
5339 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5340 				 uint16_t *nb_rx_desc,
5341 				 uint16_t *nb_tx_desc)
5342 {
5343 	struct rte_eth_dev_info dev_info;
5344 	int ret;
5345 
5346 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5347 
5348 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5349 	if (ret != 0)
5350 		return ret;
5351 
5352 	if (nb_rx_desc != NULL)
5353 		eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5354 
5355 	if (nb_tx_desc != NULL)
5356 		eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5357 
5358 	return 0;
5359 }
5360 
5361 int
5362 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5363 				   struct rte_eth_hairpin_cap *cap)
5364 {
5365 	struct rte_eth_dev *dev;
5366 
5367 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5368 
5369 	dev = &rte_eth_devices[port_id];
5370 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5371 	memset(cap, 0, sizeof(*cap));
5372 	return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5373 }
5374 
5375 int
5376 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5377 {
5378 	if (dev->data->rx_queue_state[queue_id] ==
5379 	    RTE_ETH_QUEUE_STATE_HAIRPIN)
5380 		return 1;
5381 	return 0;
5382 }
5383 
5384 int
5385 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5386 {
5387 	if (dev->data->tx_queue_state[queue_id] ==
5388 	    RTE_ETH_QUEUE_STATE_HAIRPIN)
5389 		return 1;
5390 	return 0;
5391 }
5392 
5393 int
5394 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5395 {
5396 	struct rte_eth_dev *dev;
5397 
5398 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5399 
5400 	if (pool == NULL)
5401 		return -EINVAL;
5402 
5403 	dev = &rte_eth_devices[port_id];
5404 
5405 	if (*dev->dev_ops->pool_ops_supported == NULL)
5406 		return 1; /* all pools are supported */
5407 
5408 	return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5409 }
5410 
5411 /**
5412  * A set of values to describe the possible states of a switch domain.
5413  */
5414 enum rte_eth_switch_domain_state {
5415 	RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5416 	RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5417 };
5418 
5419 /**
5420  * Array of switch domains available for allocation. Array is sized to
5421  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5422  * ethdev ports in a single process.
5423  */
5424 static struct rte_eth_dev_switch {
5425 	enum rte_eth_switch_domain_state state;
5426 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
5427 
5428 int
5429 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5430 {
5431 	uint16_t i;
5432 
5433 	*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5434 
5435 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5436 		if (eth_dev_switch_domains[i].state ==
5437 			RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5438 			eth_dev_switch_domains[i].state =
5439 				RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5440 			*domain_id = i;
5441 			return 0;
5442 		}
5443 	}
5444 
5445 	return -ENOSPC;
5446 }
5447 
5448 int
5449 rte_eth_switch_domain_free(uint16_t domain_id)
5450 {
5451 	if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5452 		domain_id >= RTE_MAX_ETHPORTS)
5453 		return -EINVAL;
5454 
5455 	if (eth_dev_switch_domains[domain_id].state !=
5456 		RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5457 		return -EINVAL;
5458 
5459 	eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5460 
5461 	return 0;
5462 }
5463 
5464 static int
5465 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5466 {
5467 	int state;
5468 	struct rte_kvargs_pair *pair;
5469 	char *letter;
5470 
5471 	arglist->str = strdup(str_in);
5472 	if (arglist->str == NULL)
5473 		return -ENOMEM;
5474 
5475 	letter = arglist->str;
5476 	state = 0;
5477 	arglist->count = 0;
5478 	pair = &arglist->pairs[0];
5479 	while (1) {
5480 		switch (state) {
5481 		case 0: /* Initial */
5482 			if (*letter == '=')
5483 				return -EINVAL;
5484 			else if (*letter == '\0')
5485 				return 0;
5486 
5487 			state = 1;
5488 			pair->key = letter;
5489 			/* fall-thru */
5490 
5491 		case 1: /* Parsing key */
5492 			if (*letter == '=') {
5493 				*letter = '\0';
5494 				pair->value = letter + 1;
5495 				state = 2;
5496 			} else if (*letter == ',' || *letter == '\0')
5497 				return -EINVAL;
5498 			break;
5499 
5500 
5501 		case 2: /* Parsing value */
5502 			if (*letter == '[')
5503 				state = 3;
5504 			else if (*letter == ',') {
5505 				*letter = '\0';
5506 				arglist->count++;
5507 				pair = &arglist->pairs[arglist->count];
5508 				state = 0;
5509 			} else if (*letter == '\0') {
5510 				letter--;
5511 				arglist->count++;
5512 				pair = &arglist->pairs[arglist->count];
5513 				state = 0;
5514 			}
5515 			break;
5516 
5517 		case 3: /* Parsing list */
5518 			if (*letter == ']')
5519 				state = 2;
5520 			else if (*letter == '\0')
5521 				return -EINVAL;
5522 			break;
5523 		}
5524 		letter++;
5525 	}
5526 }
5527 
5528 int
5529 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5530 {
5531 	struct rte_kvargs args;
5532 	struct rte_kvargs_pair *pair;
5533 	unsigned int i;
5534 	int result = 0;
5535 
5536 	memset(eth_da, 0, sizeof(*eth_da));
5537 
5538 	result = eth_dev_devargs_tokenise(&args, dargs);
5539 	if (result < 0)
5540 		goto parse_cleanup;
5541 
5542 	for (i = 0; i < args.count; i++) {
5543 		pair = &args.pairs[i];
5544 		if (strcmp("representor", pair->key) == 0) {
5545 			result = rte_eth_devargs_parse_list(pair->value,
5546 				rte_eth_devargs_parse_representor_ports,
5547 				eth_da);
5548 			if (result < 0)
5549 				goto parse_cleanup;
5550 		}
5551 	}
5552 
5553 parse_cleanup:
5554 	if (args.str)
5555 		free(args.str);
5556 
5557 	return result;
5558 }
5559 
5560 static int
5561 eth_dev_handle_port_list(const char *cmd __rte_unused,
5562 		const char *params __rte_unused,
5563 		struct rte_tel_data *d)
5564 {
5565 	int port_id;
5566 
5567 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5568 	RTE_ETH_FOREACH_DEV(port_id)
5569 		rte_tel_data_add_array_int(d, port_id);
5570 	return 0;
5571 }
5572 
5573 static void
5574 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5575 		const char *stat_name)
5576 {
5577 	int q;
5578 	struct rte_tel_data *q_data = rte_tel_data_alloc();
5579 	rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5580 	for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5581 		rte_tel_data_add_array_u64(q_data, q_stats[q]);
5582 	rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5583 }
5584 
5585 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5586 
5587 static int
5588 eth_dev_handle_port_stats(const char *cmd __rte_unused,
5589 		const char *params,
5590 		struct rte_tel_data *d)
5591 {
5592 	struct rte_eth_stats stats;
5593 	int port_id, ret;
5594 
5595 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5596 		return -1;
5597 
5598 	port_id = atoi(params);
5599 	if (!rte_eth_dev_is_valid_port(port_id))
5600 		return -1;
5601 
5602 	ret = rte_eth_stats_get(port_id, &stats);
5603 	if (ret < 0)
5604 		return -1;
5605 
5606 	rte_tel_data_start_dict(d);
5607 	ADD_DICT_STAT(stats, ipackets);
5608 	ADD_DICT_STAT(stats, opackets);
5609 	ADD_DICT_STAT(stats, ibytes);
5610 	ADD_DICT_STAT(stats, obytes);
5611 	ADD_DICT_STAT(stats, imissed);
5612 	ADD_DICT_STAT(stats, ierrors);
5613 	ADD_DICT_STAT(stats, oerrors);
5614 	ADD_DICT_STAT(stats, rx_nombuf);
5615 	eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5616 	eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5617 	eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5618 	eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5619 	eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
5620 
5621 	return 0;
5622 }
5623 
5624 static int
5625 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
5626 		const char *params,
5627 		struct rte_tel_data *d)
5628 {
5629 	struct rte_eth_xstat *eth_xstats;
5630 	struct rte_eth_xstat_name *xstat_names;
5631 	int port_id, num_xstats;
5632 	int i, ret;
5633 	char *end_param;
5634 
5635 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5636 		return -1;
5637 
5638 	port_id = strtoul(params, &end_param, 0);
5639 	if (*end_param != '\0')
5640 		RTE_ETHDEV_LOG(NOTICE,
5641 			"Extra parameters passed to ethdev telemetry command, ignoring");
5642 	if (!rte_eth_dev_is_valid_port(port_id))
5643 		return -1;
5644 
5645 	num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5646 	if (num_xstats < 0)
5647 		return -1;
5648 
5649 	/* use one malloc for both names and stats */
5650 	eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5651 			sizeof(struct rte_eth_xstat_name)) * num_xstats);
5652 	if (eth_xstats == NULL)
5653 		return -1;
5654 	xstat_names = (void *)&eth_xstats[num_xstats];
5655 
5656 	ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5657 	if (ret < 0 || ret > num_xstats) {
5658 		free(eth_xstats);
5659 		return -1;
5660 	}
5661 
5662 	ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5663 	if (ret < 0 || ret > num_xstats) {
5664 		free(eth_xstats);
5665 		return -1;
5666 	}
5667 
5668 	rte_tel_data_start_dict(d);
5669 	for (i = 0; i < num_xstats; i++)
5670 		rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5671 				eth_xstats[i].value);
5672 	return 0;
5673 }
5674 
5675 static int
5676 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
5677 		const char *params,
5678 		struct rte_tel_data *d)
5679 {
5680 	static const char *status_str = "status";
5681 	int ret, port_id;
5682 	struct rte_eth_link link;
5683 	char *end_param;
5684 
5685 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5686 		return -1;
5687 
5688 	port_id = strtoul(params, &end_param, 0);
5689 	if (*end_param != '\0')
5690 		RTE_ETHDEV_LOG(NOTICE,
5691 			"Extra parameters passed to ethdev telemetry command, ignoring");
5692 	if (!rte_eth_dev_is_valid_port(port_id))
5693 		return -1;
5694 
5695 	ret = rte_eth_link_get(port_id, &link);
5696 	if (ret < 0)
5697 		return -1;
5698 
5699 	rte_tel_data_start_dict(d);
5700 	if (!link.link_status) {
5701 		rte_tel_data_add_dict_string(d, status_str, "DOWN");
5702 		return 0;
5703 	}
5704 	rte_tel_data_add_dict_string(d, status_str, "UP");
5705 	rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5706 	rte_tel_data_add_dict_string(d, "duplex",
5707 			(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
5708 				"full-duplex" : "half-duplex");
5709 	return 0;
5710 }
5711 
5712 int
5713 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
5714 				  struct rte_hairpin_peer_info *cur_info,
5715 				  struct rte_hairpin_peer_info *peer_info,
5716 				  uint32_t direction)
5717 {
5718 	struct rte_eth_dev *dev;
5719 
5720 	/* Current queue information is not mandatory. */
5721 	if (peer_info == NULL)
5722 		return -EINVAL;
5723 
5724 	/* No need to check the validity again. */
5725 	dev = &rte_eth_devices[peer_port];
5726 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
5727 				-ENOTSUP);
5728 
5729 	return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
5730 					cur_info, peer_info, direction);
5731 }
5732 
5733 int
5734 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
5735 				struct rte_hairpin_peer_info *peer_info,
5736 				uint32_t direction)
5737 {
5738 	struct rte_eth_dev *dev;
5739 
5740 	if (peer_info == NULL)
5741 		return -EINVAL;
5742 
5743 	/* No need to check the validity again. */
5744 	dev = &rte_eth_devices[cur_port];
5745 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
5746 				-ENOTSUP);
5747 
5748 	return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
5749 							peer_info, direction);
5750 }
5751 
5752 int
5753 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
5754 				  uint32_t direction)
5755 {
5756 	struct rte_eth_dev *dev;
5757 
5758 	/* No need to check the validity again. */
5759 	dev = &rte_eth_devices[cur_port];
5760 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
5761 				-ENOTSUP);
5762 
5763 	return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
5764 							  direction);
5765 }
5766 
5767 RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
5768 
5769 RTE_INIT(ethdev_init_telemetry)
5770 {
5771 	rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
5772 			"Returns list of available ethdev ports. Takes no parameters");
5773 	rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
5774 			"Returns the common stats for a port. Parameters: int port_id");
5775 	rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
5776 			"Returns the extended stats for a port. Parameters: int port_id");
5777 	rte_telemetry_register_cmd("/ethdev/link_status",
5778 			eth_dev_handle_port_link_status,
5779 			"Returns the link status for a port. Parameters: int port_id");
5780 }
5781