1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13
14 #include <rte_byteorder.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_interrupts.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_memzone.h>
21 #include <rte_launch.h>
22 #include <rte_eal.h>
23 #include <rte_per_lcore.h>
24 #include <rte_lcore.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_common.h>
27 #include <rte_mempool.h>
28 #include <rte_malloc.h>
29 #include <rte_mbuf.h>
30 #include <rte_errno.h>
31 #include <rte_spinlock.h>
32 #include <rte_string_fns.h>
33 #include <rte_class.h>
34 #include <rte_ether.h>
35 #include <rte_telemetry.h>
36
37 #include "rte_ethdev_trace.h"
38 #include "rte_ethdev.h"
39 #include "ethdev_driver.h"
40 #include "ethdev_profile.h"
41 #include "ethdev_private.h"
42
43 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
44
45 /* public fast-path API */
46 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
47
48 /* spinlock for add/remove Rx callbacks */
49 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
50
51 /* spinlock for add/remove Tx callbacks */
52 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* store statistics names and its offset in stats structure */
55 struct rte_eth_xstats_name_off {
56 char name[RTE_ETH_XSTATS_NAME_SIZE];
57 unsigned offset;
58 };
59
60 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
61 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
62 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
63 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
64 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
65 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
66 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
67 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
68 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
69 rx_nombuf)},
70 };
71
72 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
73
74 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
75 {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
76 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
77 {"errors", offsetof(struct rte_eth_stats, q_errors)},
78 };
79
80 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
81
82 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
83 {"packets", offsetof(struct rte_eth_stats, q_opackets)},
84 {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
85 };
86 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
87
88 #define RTE_RX_OFFLOAD_BIT2STR(_name) \
89 { RTE_ETH_RX_OFFLOAD_##_name, #_name }
90
91 static const struct {
92 uint64_t offload;
93 const char *name;
94 } eth_dev_rx_offload_names[] = {
95 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
96 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
97 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
98 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
99 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
100 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
101 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
102 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
103 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
104 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
105 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
106 RTE_RX_OFFLOAD_BIT2STR(SCATTER),
107 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
108 RTE_RX_OFFLOAD_BIT2STR(SECURITY),
109 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
110 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
111 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
112 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
113 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
114 };
115
116 #undef RTE_RX_OFFLOAD_BIT2STR
117 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
118
119 #define RTE_TX_OFFLOAD_BIT2STR(_name) \
120 { RTE_ETH_TX_OFFLOAD_##_name, #_name }
121
122 static const struct {
123 uint64_t offload;
124 const char *name;
125 } eth_dev_tx_offload_names[] = {
126 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
127 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
128 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
129 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
130 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
132 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
133 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
134 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
135 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
136 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
137 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
138 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
139 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
140 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
141 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
142 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
143 RTE_TX_OFFLOAD_BIT2STR(SECURITY),
144 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
145 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
146 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
147 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
148 };
149
150 #undef RTE_TX_OFFLOAD_BIT2STR
151
152 static const struct {
153 uint64_t offload;
154 const char *name;
155 } rte_eth_dev_capa_names[] = {
156 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
157 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
158 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
159 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
160 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
161 };
162
163 enum {
164 STAT_QMAP_TX = 0,
165 STAT_QMAP_RX
166 };
167
168 int
rte_eth_iterator_init(struct rte_dev_iterator * iter,const char * devargs_str)169 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
170 {
171 int ret;
172 struct rte_devargs devargs;
173 const char *bus_param_key;
174 char *bus_str = NULL;
175 char *cls_str = NULL;
176 int str_size;
177
178 if (iter == NULL) {
179 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
180 return -EINVAL;
181 }
182
183 if (devargs_str == NULL) {
184 RTE_ETHDEV_LOG(ERR,
185 "Cannot initialize iterator from NULL device description string\n");
186 return -EINVAL;
187 }
188
189 memset(iter, 0, sizeof(*iter));
190 memset(&devargs, 0, sizeof(devargs));
191
192 /*
193 * The devargs string may use various syntaxes:
194 * - 0000:08:00.0,representor=[1-3]
195 * - pci:0000:06:00.0,representor=[0,5]
196 * - class=eth,mac=00:11:22:33:44:55
197 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
198 */
199
200 /*
201 * Handle pure class filter (i.e. without any bus-level argument),
202 * from future new syntax.
203 * rte_devargs_parse() is not yet supporting the new syntax,
204 * that's why this simple case is temporarily parsed here.
205 */
206 #define iter_anybus_str "class=eth,"
207 if (strncmp(devargs_str, iter_anybus_str,
208 strlen(iter_anybus_str)) == 0) {
209 iter->cls_str = devargs_str + strlen(iter_anybus_str);
210 goto end;
211 }
212
213 /* Split bus, device and parameters. */
214 ret = rte_devargs_parse(&devargs, devargs_str);
215 if (ret != 0)
216 goto error;
217
218 /*
219 * Assume parameters of old syntax can match only at ethdev level.
220 * Extra parameters will be ignored, thanks to "+" prefix.
221 */
222 str_size = strlen(devargs.args) + 2;
223 cls_str = malloc(str_size);
224 if (cls_str == NULL) {
225 ret = -ENOMEM;
226 goto error;
227 }
228 ret = snprintf(cls_str, str_size, "+%s", devargs.args);
229 if (ret != str_size - 1) {
230 ret = -EINVAL;
231 goto error;
232 }
233 iter->cls_str = cls_str;
234
235 iter->bus = devargs.bus;
236 if (iter->bus->dev_iterate == NULL) {
237 ret = -ENOTSUP;
238 goto error;
239 }
240
241 /* Convert bus args to new syntax for use with new API dev_iterate. */
242 if ((strcmp(iter->bus->name, "vdev") == 0) ||
243 (strcmp(iter->bus->name, "fslmc") == 0) ||
244 (strcmp(iter->bus->name, "dpaa_bus") == 0)) {
245 bus_param_key = "name";
246 } else if (strcmp(iter->bus->name, "pci") == 0) {
247 bus_param_key = "addr";
248 } else {
249 ret = -ENOTSUP;
250 goto error;
251 }
252 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
253 bus_str = malloc(str_size);
254 if (bus_str == NULL) {
255 ret = -ENOMEM;
256 goto error;
257 }
258 ret = snprintf(bus_str, str_size, "%s=%s",
259 bus_param_key, devargs.name);
260 if (ret != str_size - 1) {
261 ret = -EINVAL;
262 goto error;
263 }
264 iter->bus_str = bus_str;
265
266 end:
267 iter->cls = rte_class_find_by_name("eth");
268 rte_devargs_reset(&devargs);
269 return 0;
270
271 error:
272 if (ret == -ENOTSUP)
273 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
274 iter->bus->name);
275 rte_devargs_reset(&devargs);
276 free(bus_str);
277 free(cls_str);
278 return ret;
279 }
280
281 uint16_t
rte_eth_iterator_next(struct rte_dev_iterator * iter)282 rte_eth_iterator_next(struct rte_dev_iterator *iter)
283 {
284 if (iter == NULL) {
285 RTE_ETHDEV_LOG(ERR,
286 "Cannot get next device from NULL iterator\n");
287 return RTE_MAX_ETHPORTS;
288 }
289
290 if (iter->cls == NULL) /* invalid ethdev iterator */
291 return RTE_MAX_ETHPORTS;
292
293 do { /* loop to try all matching rte_device */
294 /* If not pure ethdev filter and */
295 if (iter->bus != NULL &&
296 /* not in middle of rte_eth_dev iteration, */
297 iter->class_device == NULL) {
298 /* get next rte_device to try. */
299 iter->device = iter->bus->dev_iterate(
300 iter->device, iter->bus_str, iter);
301 if (iter->device == NULL)
302 break; /* no more rte_device candidate */
303 }
304 /* A device is matching bus part, need to check ethdev part. */
305 iter->class_device = iter->cls->dev_iterate(
306 iter->class_device, iter->cls_str, iter);
307 if (iter->class_device != NULL)
308 return eth_dev_to_id(iter->class_device); /* match */
309 } while (iter->bus != NULL); /* need to try next rte_device */
310
311 /* No more ethdev port to iterate. */
312 rte_eth_iterator_cleanup(iter);
313 return RTE_MAX_ETHPORTS;
314 }
315
316 void
rte_eth_iterator_cleanup(struct rte_dev_iterator * iter)317 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
318 {
319 if (iter == NULL) {
320 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
321 return;
322 }
323
324 if (iter->bus_str == NULL)
325 return; /* nothing to free in pure class filter */
326 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
327 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
328 memset(iter, 0, sizeof(*iter));
329 }
330
331 uint16_t
rte_eth_find_next(uint16_t port_id)332 rte_eth_find_next(uint16_t port_id)
333 {
334 while (port_id < RTE_MAX_ETHPORTS &&
335 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
336 port_id++;
337
338 if (port_id >= RTE_MAX_ETHPORTS)
339 return RTE_MAX_ETHPORTS;
340
341 return port_id;
342 }
343
344 /*
345 * Macro to iterate over all valid ports for internal usage.
346 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
347 */
348 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
349 for (port_id = rte_eth_find_next(0); \
350 port_id < RTE_MAX_ETHPORTS; \
351 port_id = rte_eth_find_next(port_id + 1))
352
353 uint16_t
rte_eth_find_next_of(uint16_t port_id,const struct rte_device * parent)354 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
355 {
356 port_id = rte_eth_find_next(port_id);
357 while (port_id < RTE_MAX_ETHPORTS &&
358 rte_eth_devices[port_id].device != parent)
359 port_id = rte_eth_find_next(port_id + 1);
360
361 return port_id;
362 }
363
364 uint16_t
rte_eth_find_next_sibling(uint16_t port_id,uint16_t ref_port_id)365 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
366 {
367 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
368 return rte_eth_find_next_of(port_id,
369 rte_eth_devices[ref_port_id].device);
370 }
371
372 static bool
eth_dev_is_allocated(const struct rte_eth_dev * ethdev)373 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
374 {
375 return ethdev->data->name[0] != '\0';
376 }
377
378 int
rte_eth_dev_is_valid_port(uint16_t port_id)379 rte_eth_dev_is_valid_port(uint16_t port_id)
380 {
381 if (port_id >= RTE_MAX_ETHPORTS ||
382 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
383 return 0;
384 else
385 return 1;
386 }
387
388 static int
eth_is_valid_owner_id(uint64_t owner_id)389 eth_is_valid_owner_id(uint64_t owner_id)
390 {
391 if (owner_id == RTE_ETH_DEV_NO_OWNER ||
392 eth_dev_shared_data->next_owner_id <= owner_id)
393 return 0;
394 return 1;
395 }
396
397 uint64_t
rte_eth_find_next_owned_by(uint16_t port_id,const uint64_t owner_id)398 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
399 {
400 port_id = rte_eth_find_next(port_id);
401 while (port_id < RTE_MAX_ETHPORTS &&
402 rte_eth_devices[port_id].data->owner.id != owner_id)
403 port_id = rte_eth_find_next(port_id + 1);
404
405 return port_id;
406 }
407
408 int
rte_eth_dev_owner_new(uint64_t * owner_id)409 rte_eth_dev_owner_new(uint64_t *owner_id)
410 {
411 if (owner_id == NULL) {
412 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
413 return -EINVAL;
414 }
415
416 eth_dev_shared_data_prepare();
417
418 rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
419
420 *owner_id = eth_dev_shared_data->next_owner_id++;
421
422 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
423 return 0;
424 }
425
426 static int
eth_dev_owner_set(const uint16_t port_id,const uint64_t old_owner_id,const struct rte_eth_dev_owner * new_owner)427 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
428 const struct rte_eth_dev_owner *new_owner)
429 {
430 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
431 struct rte_eth_dev_owner *port_owner;
432
433 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
434 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
435 port_id);
436 return -ENODEV;
437 }
438
439 if (new_owner == NULL) {
440 RTE_ETHDEV_LOG(ERR,
441 "Cannot set ethdev port %u owner from NULL owner\n",
442 port_id);
443 return -EINVAL;
444 }
445
446 if (!eth_is_valid_owner_id(new_owner->id) &&
447 !eth_is_valid_owner_id(old_owner_id)) {
448 RTE_ETHDEV_LOG(ERR,
449 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
450 old_owner_id, new_owner->id);
451 return -EINVAL;
452 }
453
454 port_owner = &rte_eth_devices[port_id].data->owner;
455 if (port_owner->id != old_owner_id) {
456 RTE_ETHDEV_LOG(ERR,
457 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
458 port_id, port_owner->name, port_owner->id);
459 return -EPERM;
460 }
461
462 /* can not truncate (same structure) */
463 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
464
465 port_owner->id = new_owner->id;
466
467 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
468 port_id, new_owner->name, new_owner->id);
469
470 return 0;
471 }
472
473 int
rte_eth_dev_owner_set(const uint16_t port_id,const struct rte_eth_dev_owner * owner)474 rte_eth_dev_owner_set(const uint16_t port_id,
475 const struct rte_eth_dev_owner *owner)
476 {
477 int ret;
478
479 eth_dev_shared_data_prepare();
480
481 rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
482
483 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
484
485 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
486 return ret;
487 }
488
489 int
rte_eth_dev_owner_unset(const uint16_t port_id,const uint64_t owner_id)490 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
491 {
492 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
493 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
494 int ret;
495
496 eth_dev_shared_data_prepare();
497
498 rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
499
500 ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
501
502 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
503 return ret;
504 }
505
506 int
rte_eth_dev_owner_delete(const uint64_t owner_id)507 rte_eth_dev_owner_delete(const uint64_t owner_id)
508 {
509 uint16_t port_id;
510 int ret = 0;
511
512 eth_dev_shared_data_prepare();
513
514 rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
515
516 if (eth_is_valid_owner_id(owner_id)) {
517 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
518 struct rte_eth_dev_data *data =
519 rte_eth_devices[port_id].data;
520 if (data != NULL && data->owner.id == owner_id)
521 memset(&data->owner, 0,
522 sizeof(struct rte_eth_dev_owner));
523 }
524 RTE_ETHDEV_LOG(NOTICE,
525 "All port owners owned by %016"PRIx64" identifier have removed\n",
526 owner_id);
527 } else {
528 RTE_ETHDEV_LOG(ERR,
529 "Invalid owner ID=%016"PRIx64"\n",
530 owner_id);
531 ret = -EINVAL;
532 }
533
534 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
535
536 return ret;
537 }
538
539 int
rte_eth_dev_owner_get(const uint16_t port_id,struct rte_eth_dev_owner * owner)540 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
541 {
542 struct rte_eth_dev *ethdev;
543
544 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
545 ethdev = &rte_eth_devices[port_id];
546
547 if (!eth_dev_is_allocated(ethdev)) {
548 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
549 port_id);
550 return -ENODEV;
551 }
552
553 if (owner == NULL) {
554 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
555 port_id);
556 return -EINVAL;
557 }
558
559 eth_dev_shared_data_prepare();
560
561 rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
562 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner));
563 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
564
565 return 0;
566 }
567
568 int
rte_eth_dev_socket_id(uint16_t port_id)569 rte_eth_dev_socket_id(uint16_t port_id)
570 {
571 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
572 return rte_eth_devices[port_id].data->numa_node;
573 }
574
575 void *
rte_eth_dev_get_sec_ctx(uint16_t port_id)576 rte_eth_dev_get_sec_ctx(uint16_t port_id)
577 {
578 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
579 return rte_eth_devices[port_id].security_ctx;
580 }
581
582 uint16_t
rte_eth_dev_count_avail(void)583 rte_eth_dev_count_avail(void)
584 {
585 uint16_t p;
586 uint16_t count;
587
588 count = 0;
589
590 RTE_ETH_FOREACH_DEV(p)
591 count++;
592
593 return count;
594 }
595
596 uint16_t
rte_eth_dev_count_total(void)597 rte_eth_dev_count_total(void)
598 {
599 uint16_t port, count = 0;
600
601 RTE_ETH_FOREACH_VALID_DEV(port)
602 count++;
603
604 return count;
605 }
606
607 int
rte_eth_dev_get_name_by_port(uint16_t port_id,char * name)608 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
609 {
610 char *tmp;
611
612 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
613
614 if (name == NULL) {
615 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
616 port_id);
617 return -EINVAL;
618 }
619
620 /* shouldn't check 'rte_eth_devices[i].data',
621 * because it might be overwritten by VDEV PMD */
622 tmp = eth_dev_shared_data->data[port_id].name;
623 strcpy(name, tmp);
624 return 0;
625 }
626
627 int
rte_eth_dev_get_port_by_name(const char * name,uint16_t * port_id)628 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
629 {
630 uint16_t pid;
631
632 if (name == NULL) {
633 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
634 return -EINVAL;
635 }
636
637 if (port_id == NULL) {
638 RTE_ETHDEV_LOG(ERR,
639 "Cannot get port ID to NULL for %s\n", name);
640 return -EINVAL;
641 }
642
643 RTE_ETH_FOREACH_VALID_DEV(pid)
644 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
645 *port_id = pid;
646 return 0;
647 }
648
649 return -ENODEV;
650 }
651
652 static int
eth_err(uint16_t port_id,int ret)653 eth_err(uint16_t port_id, int ret)
654 {
655 if (ret == 0)
656 return 0;
657 if (rte_eth_dev_is_removed(port_id))
658 return -EIO;
659 return ret;
660 }
661
662 static int
eth_dev_validate_rx_queue(const struct rte_eth_dev * dev,uint16_t rx_queue_id)663 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
664 {
665 uint16_t port_id;
666
667 if (rx_queue_id >= dev->data->nb_rx_queues) {
668 port_id = dev->data->port_id;
669 RTE_ETHDEV_LOG(ERR,
670 "Invalid Rx queue_id=%u of device with port_id=%u\n",
671 rx_queue_id, port_id);
672 return -EINVAL;
673 }
674
675 if (dev->data->rx_queues[rx_queue_id] == NULL) {
676 port_id = dev->data->port_id;
677 RTE_ETHDEV_LOG(ERR,
678 "Queue %u of device with port_id=%u has not been setup\n",
679 rx_queue_id, port_id);
680 return -EINVAL;
681 }
682
683 return 0;
684 }
685
686 static int
eth_dev_validate_tx_queue(const struct rte_eth_dev * dev,uint16_t tx_queue_id)687 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
688 {
689 uint16_t port_id;
690
691 if (tx_queue_id >= dev->data->nb_tx_queues) {
692 port_id = dev->data->port_id;
693 RTE_ETHDEV_LOG(ERR,
694 "Invalid Tx queue_id=%u of device with port_id=%u\n",
695 tx_queue_id, port_id);
696 return -EINVAL;
697 }
698
699 if (dev->data->tx_queues[tx_queue_id] == NULL) {
700 port_id = dev->data->port_id;
701 RTE_ETHDEV_LOG(ERR,
702 "Queue %u of device with port_id=%u has not been setup\n",
703 tx_queue_id, port_id);
704 return -EINVAL;
705 }
706
707 return 0;
708 }
709
710 int
rte_eth_dev_rx_queue_start(uint16_t port_id,uint16_t rx_queue_id)711 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
712 {
713 struct rte_eth_dev *dev;
714 int ret;
715
716 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
717 dev = &rte_eth_devices[port_id];
718
719 if (!dev->data->dev_started) {
720 RTE_ETHDEV_LOG(ERR,
721 "Port %u must be started before start any queue\n",
722 port_id);
723 return -EINVAL;
724 }
725
726 ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
727 if (ret != 0)
728 return ret;
729
730 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
731
732 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
733 RTE_ETHDEV_LOG(INFO,
734 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
735 rx_queue_id, port_id);
736 return -EINVAL;
737 }
738
739 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
740 RTE_ETHDEV_LOG(INFO,
741 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
742 rx_queue_id, port_id);
743 return 0;
744 }
745
746 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
747 }
748
749 int
rte_eth_dev_rx_queue_stop(uint16_t port_id,uint16_t rx_queue_id)750 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
751 {
752 struct rte_eth_dev *dev;
753 int ret;
754
755 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
756 dev = &rte_eth_devices[port_id];
757
758 ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
759 if (ret != 0)
760 return ret;
761
762 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
763
764 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
765 RTE_ETHDEV_LOG(INFO,
766 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
767 rx_queue_id, port_id);
768 return -EINVAL;
769 }
770
771 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
772 RTE_ETHDEV_LOG(INFO,
773 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
774 rx_queue_id, port_id);
775 return 0;
776 }
777
778 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
779 }
780
781 int
rte_eth_dev_tx_queue_start(uint16_t port_id,uint16_t tx_queue_id)782 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
783 {
784 struct rte_eth_dev *dev;
785 int ret;
786
787 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
788 dev = &rte_eth_devices[port_id];
789
790 if (!dev->data->dev_started) {
791 RTE_ETHDEV_LOG(ERR,
792 "Port %u must be started before start any queue\n",
793 port_id);
794 return -EINVAL;
795 }
796
797 ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
798 if (ret != 0)
799 return ret;
800
801 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
802
803 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
804 RTE_ETHDEV_LOG(INFO,
805 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
806 tx_queue_id, port_id);
807 return -EINVAL;
808 }
809
810 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
811 RTE_ETHDEV_LOG(INFO,
812 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
813 tx_queue_id, port_id);
814 return 0;
815 }
816
817 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
818 }
819
820 int
rte_eth_dev_tx_queue_stop(uint16_t port_id,uint16_t tx_queue_id)821 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
822 {
823 struct rte_eth_dev *dev;
824 int ret;
825
826 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
827 dev = &rte_eth_devices[port_id];
828
829 ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
830 if (ret != 0)
831 return ret;
832
833 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
834
835 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
836 RTE_ETHDEV_LOG(INFO,
837 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
838 tx_queue_id, port_id);
839 return -EINVAL;
840 }
841
842 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
843 RTE_ETHDEV_LOG(INFO,
844 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
845 tx_queue_id, port_id);
846 return 0;
847 }
848
849 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
850 }
851
852 uint32_t
rte_eth_speed_bitflag(uint32_t speed,int duplex)853 rte_eth_speed_bitflag(uint32_t speed, int duplex)
854 {
855 switch (speed) {
856 case RTE_ETH_SPEED_NUM_10M:
857 return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
858 case RTE_ETH_SPEED_NUM_100M:
859 return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
860 case RTE_ETH_SPEED_NUM_1G:
861 return RTE_ETH_LINK_SPEED_1G;
862 case RTE_ETH_SPEED_NUM_2_5G:
863 return RTE_ETH_LINK_SPEED_2_5G;
864 case RTE_ETH_SPEED_NUM_5G:
865 return RTE_ETH_LINK_SPEED_5G;
866 case RTE_ETH_SPEED_NUM_10G:
867 return RTE_ETH_LINK_SPEED_10G;
868 case RTE_ETH_SPEED_NUM_20G:
869 return RTE_ETH_LINK_SPEED_20G;
870 case RTE_ETH_SPEED_NUM_25G:
871 return RTE_ETH_LINK_SPEED_25G;
872 case RTE_ETH_SPEED_NUM_40G:
873 return RTE_ETH_LINK_SPEED_40G;
874 case RTE_ETH_SPEED_NUM_50G:
875 return RTE_ETH_LINK_SPEED_50G;
876 case RTE_ETH_SPEED_NUM_56G:
877 return RTE_ETH_LINK_SPEED_56G;
878 case RTE_ETH_SPEED_NUM_100G:
879 return RTE_ETH_LINK_SPEED_100G;
880 case RTE_ETH_SPEED_NUM_200G:
881 return RTE_ETH_LINK_SPEED_200G;
882 default:
883 return 0;
884 }
885 }
886
887 const char *
rte_eth_dev_rx_offload_name(uint64_t offload)888 rte_eth_dev_rx_offload_name(uint64_t offload)
889 {
890 const char *name = "UNKNOWN";
891 unsigned int i;
892
893 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
894 if (offload == eth_dev_rx_offload_names[i].offload) {
895 name = eth_dev_rx_offload_names[i].name;
896 break;
897 }
898 }
899
900 return name;
901 }
902
903 const char *
rte_eth_dev_tx_offload_name(uint64_t offload)904 rte_eth_dev_tx_offload_name(uint64_t offload)
905 {
906 const char *name = "UNKNOWN";
907 unsigned int i;
908
909 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
910 if (offload == eth_dev_tx_offload_names[i].offload) {
911 name = eth_dev_tx_offload_names[i].name;
912 break;
913 }
914 }
915
916 return name;
917 }
918
919 const char *
rte_eth_dev_capability_name(uint64_t capability)920 rte_eth_dev_capability_name(uint64_t capability)
921 {
922 const char *name = "UNKNOWN";
923 unsigned int i;
924
925 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
926 if (capability == rte_eth_dev_capa_names[i].offload) {
927 name = rte_eth_dev_capa_names[i].name;
928 break;
929 }
930 }
931
932 return name;
933 }
934
935 static inline int
eth_dev_check_lro_pkt_size(uint16_t port_id,uint32_t config_size,uint32_t max_rx_pkt_len,uint32_t dev_info_size)936 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
937 uint32_t max_rx_pkt_len, uint32_t dev_info_size)
938 {
939 int ret = 0;
940
941 if (dev_info_size == 0) {
942 if (config_size != max_rx_pkt_len) {
943 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
944 " %u != %u is not allowed\n",
945 port_id, config_size, max_rx_pkt_len);
946 ret = -EINVAL;
947 }
948 } else if (config_size > dev_info_size) {
949 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
950 "> max allowed value %u\n", port_id, config_size,
951 dev_info_size);
952 ret = -EINVAL;
953 } else if (config_size < RTE_ETHER_MIN_LEN) {
954 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
955 "< min allowed value %u\n", port_id, config_size,
956 (unsigned int)RTE_ETHER_MIN_LEN);
957 ret = -EINVAL;
958 }
959 return ret;
960 }
961
962 /*
963 * Validate offloads that are requested through rte_eth_dev_configure against
964 * the offloads successfully set by the Ethernet device.
965 *
966 * @param port_id
967 * The port identifier of the Ethernet device.
968 * @param req_offloads
969 * The offloads that have been requested through `rte_eth_dev_configure`.
970 * @param set_offloads
971 * The offloads successfully set by the Ethernet device.
972 * @param offload_type
973 * The offload type i.e. Rx/Tx string.
974 * @param offload_name
975 * The function that prints the offload name.
976 * @return
977 * - (0) if validation successful.
978 * - (-EINVAL) if requested offload has been silently disabled.
979 *
980 */
981 static int
eth_dev_validate_offloads(uint16_t port_id,uint64_t req_offloads,uint64_t set_offloads,const char * offload_type,const char * (* offload_name)(uint64_t))982 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
983 uint64_t set_offloads, const char *offload_type,
984 const char *(*offload_name)(uint64_t))
985 {
986 uint64_t offloads_diff = req_offloads ^ set_offloads;
987 uint64_t offload;
988 int ret = 0;
989
990 while (offloads_diff != 0) {
991 /* Check if any offload is requested but not enabled. */
992 offload = RTE_BIT64(__builtin_ctzll(offloads_diff));
993 if (offload & req_offloads) {
994 RTE_ETHDEV_LOG(ERR,
995 "Port %u failed to enable %s offload %s\n",
996 port_id, offload_type, offload_name(offload));
997 ret = -EINVAL;
998 }
999
1000 /* Check if offload couldn't be disabled. */
1001 if (offload & set_offloads) {
1002 RTE_ETHDEV_LOG(DEBUG,
1003 "Port %u %s offload %s is not requested but enabled\n",
1004 port_id, offload_type, offload_name(offload));
1005 }
1006
1007 offloads_diff &= ~offload;
1008 }
1009
1010 return ret;
1011 }
1012
1013 static uint32_t
eth_dev_get_overhead_len(uint32_t max_rx_pktlen,uint16_t max_mtu)1014 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1015 {
1016 uint32_t overhead_len;
1017
1018 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1019 overhead_len = max_rx_pktlen - max_mtu;
1020 else
1021 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1022
1023 return overhead_len;
1024 }
1025
1026 /* rte_eth_dev_info_get() should be called prior to this function */
1027 static int
eth_dev_validate_mtu(uint16_t port_id,struct rte_eth_dev_info * dev_info,uint16_t mtu)1028 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
1029 uint16_t mtu)
1030 {
1031 uint32_t overhead_len;
1032 uint32_t frame_size;
1033
1034 if (mtu < dev_info->min_mtu) {
1035 RTE_ETHDEV_LOG(ERR,
1036 "MTU (%u) < device min MTU (%u) for port_id %u\n",
1037 mtu, dev_info->min_mtu, port_id);
1038 return -EINVAL;
1039 }
1040 if (mtu > dev_info->max_mtu) {
1041 RTE_ETHDEV_LOG(ERR,
1042 "MTU (%u) > device max MTU (%u) for port_id %u\n",
1043 mtu, dev_info->max_mtu, port_id);
1044 return -EINVAL;
1045 }
1046
1047 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1048 dev_info->max_mtu);
1049 frame_size = mtu + overhead_len;
1050 if (frame_size < RTE_ETHER_MIN_LEN) {
1051 RTE_ETHDEV_LOG(ERR,
1052 "Frame size (%u) < min frame size (%u) for port_id %u\n",
1053 frame_size, RTE_ETHER_MIN_LEN, port_id);
1054 return -EINVAL;
1055 }
1056
1057 if (frame_size > dev_info->max_rx_pktlen) {
1058 RTE_ETHDEV_LOG(ERR,
1059 "Frame size (%u) > device max frame size (%u) for port_id %u\n",
1060 frame_size, dev_info->max_rx_pktlen, port_id);
1061 return -EINVAL;
1062 }
1063
1064 return 0;
1065 }
1066
1067 int
rte_eth_dev_configure(uint16_t port_id,uint16_t nb_rx_q,uint16_t nb_tx_q,const struct rte_eth_conf * dev_conf)1068 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1069 const struct rte_eth_conf *dev_conf)
1070 {
1071 struct rte_eth_dev *dev;
1072 struct rte_eth_dev_info dev_info;
1073 struct rte_eth_conf orig_conf;
1074 int diag;
1075 int ret;
1076 uint16_t old_mtu;
1077
1078 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1079 dev = &rte_eth_devices[port_id];
1080
1081 if (dev_conf == NULL) {
1082 RTE_ETHDEV_LOG(ERR,
1083 "Cannot configure ethdev port %u from NULL config\n",
1084 port_id);
1085 return -EINVAL;
1086 }
1087
1088 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1089
1090 if (dev->data->dev_started) {
1091 RTE_ETHDEV_LOG(ERR,
1092 "Port %u must be stopped to allow configuration\n",
1093 port_id);
1094 return -EBUSY;
1095 }
1096
1097 /*
1098 * Ensure that "dev_configured" is always 0 each time prepare to do
1099 * dev_configure() to avoid any non-anticipated behaviour.
1100 * And set to 1 when dev_configure() is executed successfully.
1101 */
1102 dev->data->dev_configured = 0;
1103
1104 /* Store original config, as rollback required on failure */
1105 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1106
1107 /*
1108 * Copy the dev_conf parameter into the dev structure.
1109 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1110 */
1111 if (dev_conf != &dev->data->dev_conf)
1112 memcpy(&dev->data->dev_conf, dev_conf,
1113 sizeof(dev->data->dev_conf));
1114
1115 /* Backup mtu for rollback */
1116 old_mtu = dev->data->mtu;
1117
1118 ret = rte_eth_dev_info_get(port_id, &dev_info);
1119 if (ret != 0)
1120 goto rollback;
1121
1122 /* If number of queues specified by application for both Rx and Tx is
1123 * zero, use driver preferred values. This cannot be done individually
1124 * as it is valid for either Tx or Rx (but not both) to be zero.
1125 * If driver does not provide any preferred valued, fall back on
1126 * EAL defaults.
1127 */
1128 if (nb_rx_q == 0 && nb_tx_q == 0) {
1129 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1130 if (nb_rx_q == 0)
1131 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1132 nb_tx_q = dev_info.default_txportconf.nb_queues;
1133 if (nb_tx_q == 0)
1134 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1135 }
1136
1137 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1138 RTE_ETHDEV_LOG(ERR,
1139 "Number of Rx queues requested (%u) is greater than max supported(%d)\n",
1140 nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1141 ret = -EINVAL;
1142 goto rollback;
1143 }
1144
1145 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1146 RTE_ETHDEV_LOG(ERR,
1147 "Number of Tx queues requested (%u) is greater than max supported(%d)\n",
1148 nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1149 ret = -EINVAL;
1150 goto rollback;
1151 }
1152
1153 /*
1154 * Check that the numbers of Rx and Tx queues are not greater
1155 * than the maximum number of Rx and Tx queues supported by the
1156 * configured device.
1157 */
1158 if (nb_rx_q > dev_info.max_rx_queues) {
1159 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1160 port_id, nb_rx_q, dev_info.max_rx_queues);
1161 ret = -EINVAL;
1162 goto rollback;
1163 }
1164
1165 if (nb_tx_q > dev_info.max_tx_queues) {
1166 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1167 port_id, nb_tx_q, dev_info.max_tx_queues);
1168 ret = -EINVAL;
1169 goto rollback;
1170 }
1171
1172 /* Check that the device supports requested interrupts */
1173 if ((dev_conf->intr_conf.lsc == 1) &&
1174 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1175 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1176 dev->device->driver->name);
1177 ret = -EINVAL;
1178 goto rollback;
1179 }
1180 if ((dev_conf->intr_conf.rmv == 1) &&
1181 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1182 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1183 dev->device->driver->name);
1184 ret = -EINVAL;
1185 goto rollback;
1186 }
1187
1188 if (dev_conf->rxmode.mtu == 0)
1189 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
1190
1191 ret = eth_dev_validate_mtu(port_id, &dev_info,
1192 dev->data->dev_conf.rxmode.mtu);
1193 if (ret != 0)
1194 goto rollback;
1195
1196 dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1197
1198 /*
1199 * If LRO is enabled, check that the maximum aggregated packet
1200 * size is supported by the configured device.
1201 */
1202 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1203 uint32_t max_rx_pktlen;
1204 uint32_t overhead_len;
1205
1206 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1207 dev_info.max_mtu);
1208 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1209 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1210 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1211 ret = eth_dev_check_lro_pkt_size(port_id,
1212 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1213 max_rx_pktlen,
1214 dev_info.max_lro_pkt_size);
1215 if (ret != 0)
1216 goto rollback;
1217 }
1218
1219 /* Any requested offloading must be within its device capabilities */
1220 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1221 dev_conf->rxmode.offloads) {
1222 RTE_ETHDEV_LOG(ERR,
1223 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1224 "capabilities 0x%"PRIx64" in %s()\n",
1225 port_id, dev_conf->rxmode.offloads,
1226 dev_info.rx_offload_capa,
1227 __func__);
1228 ret = -EINVAL;
1229 goto rollback;
1230 }
1231 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1232 dev_conf->txmode.offloads) {
1233 RTE_ETHDEV_LOG(ERR,
1234 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1235 "capabilities 0x%"PRIx64" in %s()\n",
1236 port_id, dev_conf->txmode.offloads,
1237 dev_info.tx_offload_capa,
1238 __func__);
1239 ret = -EINVAL;
1240 goto rollback;
1241 }
1242
1243 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1244 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1245
1246 /* Check that device supports requested rss hash functions. */
1247 if ((dev_info.flow_type_rss_offloads |
1248 dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1249 dev_info.flow_type_rss_offloads) {
1250 RTE_ETHDEV_LOG(ERR,
1251 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1252 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1253 dev_info.flow_type_rss_offloads);
1254 ret = -EINVAL;
1255 goto rollback;
1256 }
1257
1258 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1259 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
1260 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
1261 RTE_ETHDEV_LOG(ERR,
1262 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1263 port_id,
1264 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
1265 ret = -EINVAL;
1266 goto rollback;
1267 }
1268
1269 /*
1270 * Setup new number of Rx/Tx queues and reconfigure device.
1271 */
1272 diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1273 if (diag != 0) {
1274 RTE_ETHDEV_LOG(ERR,
1275 "Port%u eth_dev_rx_queue_config = %d\n",
1276 port_id, diag);
1277 ret = diag;
1278 goto rollback;
1279 }
1280
1281 diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1282 if (diag != 0) {
1283 RTE_ETHDEV_LOG(ERR,
1284 "Port%u eth_dev_tx_queue_config = %d\n",
1285 port_id, diag);
1286 eth_dev_rx_queue_config(dev, 0);
1287 ret = diag;
1288 goto rollback;
1289 }
1290
1291 diag = (*dev->dev_ops->dev_configure)(dev);
1292 if (diag != 0) {
1293 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1294 port_id, diag);
1295 ret = eth_err(port_id, diag);
1296 goto reset_queues;
1297 }
1298
1299 /* Initialize Rx profiling if enabled at compilation time. */
1300 diag = __rte_eth_dev_profile_init(port_id, dev);
1301 if (diag != 0) {
1302 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1303 port_id, diag);
1304 ret = eth_err(port_id, diag);
1305 goto reset_queues;
1306 }
1307
1308 /* Validate Rx offloads. */
1309 diag = eth_dev_validate_offloads(port_id,
1310 dev_conf->rxmode.offloads,
1311 dev->data->dev_conf.rxmode.offloads, "Rx",
1312 rte_eth_dev_rx_offload_name);
1313 if (diag != 0) {
1314 ret = diag;
1315 goto reset_queues;
1316 }
1317
1318 /* Validate Tx offloads. */
1319 diag = eth_dev_validate_offloads(port_id,
1320 dev_conf->txmode.offloads,
1321 dev->data->dev_conf.txmode.offloads, "Tx",
1322 rte_eth_dev_tx_offload_name);
1323 if (diag != 0) {
1324 ret = diag;
1325 goto reset_queues;
1326 }
1327
1328 dev->data->dev_configured = 1;
1329 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1330 return 0;
1331 reset_queues:
1332 eth_dev_rx_queue_config(dev, 0);
1333 eth_dev_tx_queue_config(dev, 0);
1334 rollback:
1335 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1336 if (old_mtu != dev->data->mtu)
1337 dev->data->mtu = old_mtu;
1338
1339 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1340 return ret;
1341 }
1342
1343 static void
eth_dev_mac_restore(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)1344 eth_dev_mac_restore(struct rte_eth_dev *dev,
1345 struct rte_eth_dev_info *dev_info)
1346 {
1347 struct rte_ether_addr *addr;
1348 uint16_t i;
1349 uint32_t pool = 0;
1350 uint64_t pool_mask;
1351
1352 /* replay MAC address configuration including default MAC */
1353 addr = &dev->data->mac_addrs[0];
1354 if (*dev->dev_ops->mac_addr_set != NULL)
1355 (*dev->dev_ops->mac_addr_set)(dev, addr);
1356 else if (*dev->dev_ops->mac_addr_add != NULL)
1357 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1358
1359 if (*dev->dev_ops->mac_addr_add != NULL) {
1360 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1361 addr = &dev->data->mac_addrs[i];
1362
1363 /* skip zero address */
1364 if (rte_is_zero_ether_addr(addr))
1365 continue;
1366
1367 pool = 0;
1368 pool_mask = dev->data->mac_pool_sel[i];
1369
1370 do {
1371 if (pool_mask & UINT64_C(1))
1372 (*dev->dev_ops->mac_addr_add)(dev,
1373 addr, i, pool);
1374 pool_mask >>= 1;
1375 pool++;
1376 } while (pool_mask);
1377 }
1378 }
1379 }
1380
1381 static int
eth_dev_config_restore(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info,uint16_t port_id)1382 eth_dev_config_restore(struct rte_eth_dev *dev,
1383 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1384 {
1385 int ret;
1386
1387 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1388 eth_dev_mac_restore(dev, dev_info);
1389
1390 /* replay promiscuous configuration */
1391 /*
1392 * use callbacks directly since we don't need port_id check and
1393 * would like to bypass the same value set
1394 */
1395 if (rte_eth_promiscuous_get(port_id) == 1 &&
1396 *dev->dev_ops->promiscuous_enable != NULL) {
1397 ret = eth_err(port_id,
1398 (*dev->dev_ops->promiscuous_enable)(dev));
1399 if (ret != 0 && ret != -ENOTSUP) {
1400 RTE_ETHDEV_LOG(ERR,
1401 "Failed to enable promiscuous mode for device (port %u): %s\n",
1402 port_id, rte_strerror(-ret));
1403 return ret;
1404 }
1405 } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1406 *dev->dev_ops->promiscuous_disable != NULL) {
1407 ret = eth_err(port_id,
1408 (*dev->dev_ops->promiscuous_disable)(dev));
1409 if (ret != 0 && ret != -ENOTSUP) {
1410 RTE_ETHDEV_LOG(ERR,
1411 "Failed to disable promiscuous mode for device (port %u): %s\n",
1412 port_id, rte_strerror(-ret));
1413 return ret;
1414 }
1415 }
1416
1417 /* replay all multicast configuration */
1418 /*
1419 * use callbacks directly since we don't need port_id check and
1420 * would like to bypass the same value set
1421 */
1422 if (rte_eth_allmulticast_get(port_id) == 1 &&
1423 *dev->dev_ops->allmulticast_enable != NULL) {
1424 ret = eth_err(port_id,
1425 (*dev->dev_ops->allmulticast_enable)(dev));
1426 if (ret != 0 && ret != -ENOTSUP) {
1427 RTE_ETHDEV_LOG(ERR,
1428 "Failed to enable allmulticast mode for device (port %u): %s\n",
1429 port_id, rte_strerror(-ret));
1430 return ret;
1431 }
1432 } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1433 *dev->dev_ops->allmulticast_disable != NULL) {
1434 ret = eth_err(port_id,
1435 (*dev->dev_ops->allmulticast_disable)(dev));
1436 if (ret != 0 && ret != -ENOTSUP) {
1437 RTE_ETHDEV_LOG(ERR,
1438 "Failed to disable allmulticast mode for device (port %u): %s\n",
1439 port_id, rte_strerror(-ret));
1440 return ret;
1441 }
1442 }
1443
1444 return 0;
1445 }
1446
1447 int
rte_eth_dev_start(uint16_t port_id)1448 rte_eth_dev_start(uint16_t port_id)
1449 {
1450 struct rte_eth_dev *dev;
1451 struct rte_eth_dev_info dev_info;
1452 int diag;
1453 int ret, ret_stop;
1454
1455 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1456 dev = &rte_eth_devices[port_id];
1457
1458 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1459
1460 if (dev->data->dev_configured == 0) {
1461 RTE_ETHDEV_LOG(INFO,
1462 "Device with port_id=%"PRIu16" is not configured.\n",
1463 port_id);
1464 return -EINVAL;
1465 }
1466
1467 if (dev->data->dev_started != 0) {
1468 RTE_ETHDEV_LOG(INFO,
1469 "Device with port_id=%"PRIu16" already started\n",
1470 port_id);
1471 return 0;
1472 }
1473
1474 ret = rte_eth_dev_info_get(port_id, &dev_info);
1475 if (ret != 0)
1476 return ret;
1477
1478 /* Lets restore MAC now if device does not support live change */
1479 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1480 eth_dev_mac_restore(dev, &dev_info);
1481
1482 diag = (*dev->dev_ops->dev_start)(dev);
1483 if (diag == 0)
1484 dev->data->dev_started = 1;
1485 else
1486 return eth_err(port_id, diag);
1487
1488 ret = eth_dev_config_restore(dev, &dev_info, port_id);
1489 if (ret != 0) {
1490 RTE_ETHDEV_LOG(ERR,
1491 "Error during restoring configuration for device (port %u): %s\n",
1492 port_id, rte_strerror(-ret));
1493 ret_stop = rte_eth_dev_stop(port_id);
1494 if (ret_stop != 0) {
1495 RTE_ETHDEV_LOG(ERR,
1496 "Failed to stop device (port %u): %s\n",
1497 port_id, rte_strerror(-ret_stop));
1498 }
1499
1500 return ret;
1501 }
1502
1503 if (dev->data->dev_conf.intr_conf.lsc == 0) {
1504 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1505 (*dev->dev_ops->link_update)(dev, 0);
1506 }
1507
1508 /* expose selection of PMD fast-path functions */
1509 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1510
1511 rte_ethdev_trace_start(port_id);
1512 return 0;
1513 }
1514
1515 int
rte_eth_dev_stop(uint16_t port_id)1516 rte_eth_dev_stop(uint16_t port_id)
1517 {
1518 struct rte_eth_dev *dev;
1519 int ret;
1520
1521 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1522 dev = &rte_eth_devices[port_id];
1523
1524 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1525
1526 if (dev->data->dev_started == 0) {
1527 RTE_ETHDEV_LOG(INFO,
1528 "Device with port_id=%"PRIu16" already stopped\n",
1529 port_id);
1530 return 0;
1531 }
1532
1533 /* point fast-path functions to dummy ones */
1534 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
1535
1536 dev->data->dev_started = 0;
1537 ret = (*dev->dev_ops->dev_stop)(dev);
1538 rte_ethdev_trace_stop(port_id, ret);
1539
1540 return ret;
1541 }
1542
1543 int
rte_eth_dev_set_link_up(uint16_t port_id)1544 rte_eth_dev_set_link_up(uint16_t port_id)
1545 {
1546 struct rte_eth_dev *dev;
1547
1548 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1549 dev = &rte_eth_devices[port_id];
1550
1551 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1552 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1553 }
1554
1555 int
rte_eth_dev_set_link_down(uint16_t port_id)1556 rte_eth_dev_set_link_down(uint16_t port_id)
1557 {
1558 struct rte_eth_dev *dev;
1559
1560 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1561 dev = &rte_eth_devices[port_id];
1562
1563 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1564 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1565 }
1566
1567 int
rte_eth_dev_close(uint16_t port_id)1568 rte_eth_dev_close(uint16_t port_id)
1569 {
1570 struct rte_eth_dev *dev;
1571 int firsterr, binerr;
1572 int *lasterr = &firsterr;
1573
1574 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1575 dev = &rte_eth_devices[port_id];
1576
1577 if (dev->data->dev_started) {
1578 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
1579 port_id);
1580 return -EINVAL;
1581 }
1582
1583 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1584 *lasterr = (*dev->dev_ops->dev_close)(dev);
1585 if (*lasterr != 0)
1586 lasterr = &binerr;
1587
1588 rte_ethdev_trace_close(port_id);
1589 *lasterr = rte_eth_dev_release_port(dev);
1590
1591 return firsterr;
1592 }
1593
1594 int
rte_eth_dev_reset(uint16_t port_id)1595 rte_eth_dev_reset(uint16_t port_id)
1596 {
1597 struct rte_eth_dev *dev;
1598 int ret;
1599
1600 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1601 dev = &rte_eth_devices[port_id];
1602
1603 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1604
1605 ret = rte_eth_dev_stop(port_id);
1606 if (ret != 0) {
1607 RTE_ETHDEV_LOG(ERR,
1608 "Failed to stop device (port %u) before reset: %s - ignore\n",
1609 port_id, rte_strerror(-ret));
1610 }
1611 ret = dev->dev_ops->dev_reset(dev);
1612
1613 return eth_err(port_id, ret);
1614 }
1615
1616 int
rte_eth_dev_is_removed(uint16_t port_id)1617 rte_eth_dev_is_removed(uint16_t port_id)
1618 {
1619 struct rte_eth_dev *dev;
1620 int ret;
1621
1622 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1623 dev = &rte_eth_devices[port_id];
1624
1625 if (dev->state == RTE_ETH_DEV_REMOVED)
1626 return 1;
1627
1628 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1629
1630 ret = dev->dev_ops->is_removed(dev);
1631 if (ret != 0)
1632 /* Device is physically removed. */
1633 dev->state = RTE_ETH_DEV_REMOVED;
1634
1635 return ret;
1636 }
1637
1638 static int
rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split * rx_seg,uint16_t n_seg,uint32_t * mbp_buf_size,const struct rte_eth_dev_info * dev_info)1639 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1640 uint16_t n_seg, uint32_t *mbp_buf_size,
1641 const struct rte_eth_dev_info *dev_info)
1642 {
1643 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1644 struct rte_mempool *mp_first;
1645 uint32_t offset_mask;
1646 uint16_t seg_idx;
1647
1648 if (n_seg > seg_capa->max_nseg) {
1649 RTE_ETHDEV_LOG(ERR,
1650 "Requested Rx segments %u exceed supported %u\n",
1651 n_seg, seg_capa->max_nseg);
1652 return -EINVAL;
1653 }
1654 /*
1655 * Check the sizes and offsets against buffer sizes
1656 * for each segment specified in extended configuration.
1657 */
1658 mp_first = rx_seg[0].mp;
1659 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
1660 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1661 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1662 uint32_t length = rx_seg[seg_idx].length;
1663 uint32_t offset = rx_seg[seg_idx].offset;
1664
1665 if (mpl == NULL) {
1666 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1667 return -EINVAL;
1668 }
1669 if (seg_idx != 0 && mp_first != mpl &&
1670 seg_capa->multi_pools == 0) {
1671 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1672 return -ENOTSUP;
1673 }
1674 if (offset != 0) {
1675 if (seg_capa->offset_allowed == 0) {
1676 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1677 return -ENOTSUP;
1678 }
1679 if (offset & offset_mask) {
1680 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1681 offset,
1682 seg_capa->offset_align_log2);
1683 return -EINVAL;
1684 }
1685 }
1686 if (mpl->private_data_size <
1687 sizeof(struct rte_pktmbuf_pool_private)) {
1688 RTE_ETHDEV_LOG(ERR,
1689 "%s private_data_size %u < %u\n",
1690 mpl->name, mpl->private_data_size,
1691 (unsigned int)sizeof
1692 (struct rte_pktmbuf_pool_private));
1693 return -ENOSPC;
1694 }
1695 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1696 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1697 length = length != 0 ? length : *mbp_buf_size;
1698 if (*mbp_buf_size < length + offset) {
1699 RTE_ETHDEV_LOG(ERR,
1700 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1701 mpl->name, *mbp_buf_size,
1702 length + offset, length, offset);
1703 return -EINVAL;
1704 }
1705 }
1706 return 0;
1707 }
1708
1709 int
rte_eth_rx_queue_setup(uint16_t port_id,uint16_t rx_queue_id,uint16_t nb_rx_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)1710 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1711 uint16_t nb_rx_desc, unsigned int socket_id,
1712 const struct rte_eth_rxconf *rx_conf,
1713 struct rte_mempool *mp)
1714 {
1715 int ret;
1716 uint32_t mbp_buf_size;
1717 struct rte_eth_dev *dev;
1718 struct rte_eth_dev_info dev_info;
1719 struct rte_eth_rxconf local_conf;
1720
1721 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1722 dev = &rte_eth_devices[port_id];
1723
1724 if (rx_queue_id >= dev->data->nb_rx_queues) {
1725 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1726 return -EINVAL;
1727 }
1728
1729 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1730
1731 ret = rte_eth_dev_info_get(port_id, &dev_info);
1732 if (ret != 0)
1733 return ret;
1734
1735 if (mp != NULL) {
1736 /* Single pool configuration check. */
1737 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1738 RTE_ETHDEV_LOG(ERR,
1739 "Ambiguous segment configuration\n");
1740 return -EINVAL;
1741 }
1742 /*
1743 * Check the size of the mbuf data buffer, this value
1744 * must be provided in the private data of the memory pool.
1745 * First check that the memory pool(s) has a valid private data.
1746 */
1747 if (mp->private_data_size <
1748 sizeof(struct rte_pktmbuf_pool_private)) {
1749 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1750 mp->name, mp->private_data_size,
1751 (unsigned int)
1752 sizeof(struct rte_pktmbuf_pool_private));
1753 return -ENOSPC;
1754 }
1755 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1756 if (mbp_buf_size < dev_info.min_rx_bufsize +
1757 RTE_PKTMBUF_HEADROOM) {
1758 RTE_ETHDEV_LOG(ERR,
1759 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1760 mp->name, mbp_buf_size,
1761 RTE_PKTMBUF_HEADROOM +
1762 dev_info.min_rx_bufsize,
1763 RTE_PKTMBUF_HEADROOM,
1764 dev_info.min_rx_bufsize);
1765 return -EINVAL;
1766 }
1767 } else {
1768 const struct rte_eth_rxseg_split *rx_seg;
1769 uint16_t n_seg;
1770
1771 /* Extended multi-segment configuration check. */
1772 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
1773 RTE_ETHDEV_LOG(ERR,
1774 "Memory pool is null and no extended configuration provided\n");
1775 return -EINVAL;
1776 }
1777
1778 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
1779 n_seg = rx_conf->rx_nseg;
1780
1781 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1782 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
1783 &mbp_buf_size,
1784 &dev_info);
1785 if (ret != 0)
1786 return ret;
1787 } else {
1788 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
1789 return -EINVAL;
1790 }
1791 }
1792
1793 /* Use default specified by driver, if nb_rx_desc is zero */
1794 if (nb_rx_desc == 0) {
1795 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1796 /* If driver default is also zero, fall back on EAL default */
1797 if (nb_rx_desc == 0)
1798 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1799 }
1800
1801 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1802 nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1803 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1804
1805 RTE_ETHDEV_LOG(ERR,
1806 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1807 nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1808 dev_info.rx_desc_lim.nb_min,
1809 dev_info.rx_desc_lim.nb_align);
1810 return -EINVAL;
1811 }
1812
1813 if (dev->data->dev_started &&
1814 !(dev_info.dev_capa &
1815 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1816 return -EBUSY;
1817
1818 if (dev->data->dev_started &&
1819 (dev->data->rx_queue_state[rx_queue_id] !=
1820 RTE_ETH_QUEUE_STATE_STOPPED))
1821 return -EBUSY;
1822
1823 eth_dev_rxq_release(dev, rx_queue_id);
1824
1825 if (rx_conf == NULL)
1826 rx_conf = &dev_info.default_rxconf;
1827
1828 local_conf = *rx_conf;
1829
1830 /*
1831 * If an offloading has already been enabled in
1832 * rte_eth_dev_configure(), it has been enabled on all queues,
1833 * so there is no need to enable it in this queue again.
1834 * The local_conf.offloads input to underlying PMD only carries
1835 * those offloadings which are only enabled on this queue and
1836 * not enabled on all queues.
1837 */
1838 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1839
1840 /*
1841 * New added offloadings for this queue are those not enabled in
1842 * rte_eth_dev_configure() and they must be per-queue type.
1843 * A pure per-port offloading can't be enabled on a queue while
1844 * disabled on another queue. A pure per-port offloading can't
1845 * be enabled for any queue as new added one if it hasn't been
1846 * enabled in rte_eth_dev_configure().
1847 */
1848 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1849 local_conf.offloads) {
1850 RTE_ETHDEV_LOG(ERR,
1851 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1852 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1853 port_id, rx_queue_id, local_conf.offloads,
1854 dev_info.rx_queue_offload_capa,
1855 __func__);
1856 return -EINVAL;
1857 }
1858
1859 if (local_conf.share_group > 0 &&
1860 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
1861 RTE_ETHDEV_LOG(ERR,
1862 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
1863 port_id, rx_queue_id, local_conf.share_group);
1864 return -EINVAL;
1865 }
1866
1867 /*
1868 * If LRO is enabled, check that the maximum aggregated packet
1869 * size is supported by the configured device.
1870 */
1871 /* Get the real Ethernet overhead length */
1872 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1873 uint32_t overhead_len;
1874 uint32_t max_rx_pktlen;
1875 int ret;
1876
1877 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1878 dev_info.max_mtu);
1879 max_rx_pktlen = dev->data->mtu + overhead_len;
1880 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1881 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1882 ret = eth_dev_check_lro_pkt_size(port_id,
1883 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1884 max_rx_pktlen,
1885 dev_info.max_lro_pkt_size);
1886 if (ret != 0)
1887 return ret;
1888 }
1889
1890 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1891 socket_id, &local_conf, mp);
1892 if (!ret) {
1893 if (!dev->data->min_rx_buf_size ||
1894 dev->data->min_rx_buf_size > mbp_buf_size)
1895 dev->data->min_rx_buf_size = mbp_buf_size;
1896 }
1897
1898 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
1899 rx_conf, ret);
1900 return eth_err(port_id, ret);
1901 }
1902
1903 int
rte_eth_rx_hairpin_queue_setup(uint16_t port_id,uint16_t rx_queue_id,uint16_t nb_rx_desc,const struct rte_eth_hairpin_conf * conf)1904 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1905 uint16_t nb_rx_desc,
1906 const struct rte_eth_hairpin_conf *conf)
1907 {
1908 int ret;
1909 struct rte_eth_dev *dev;
1910 struct rte_eth_hairpin_cap cap;
1911 int i;
1912 int count;
1913
1914 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1915 dev = &rte_eth_devices[port_id];
1916
1917 if (rx_queue_id >= dev->data->nb_rx_queues) {
1918 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1919 return -EINVAL;
1920 }
1921
1922 if (conf == NULL) {
1923 RTE_ETHDEV_LOG(ERR,
1924 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
1925 port_id);
1926 return -EINVAL;
1927 }
1928
1929 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1930 if (ret != 0)
1931 return ret;
1932 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1933 -ENOTSUP);
1934 /* if nb_rx_desc is zero use max number of desc from the driver. */
1935 if (nb_rx_desc == 0)
1936 nb_rx_desc = cap.max_nb_desc;
1937 if (nb_rx_desc > cap.max_nb_desc) {
1938 RTE_ETHDEV_LOG(ERR,
1939 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1940 nb_rx_desc, cap.max_nb_desc);
1941 return -EINVAL;
1942 }
1943 if (conf->peer_count > cap.max_rx_2_tx) {
1944 RTE_ETHDEV_LOG(ERR,
1945 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
1946 conf->peer_count, cap.max_rx_2_tx);
1947 return -EINVAL;
1948 }
1949 if (conf->peer_count == 0) {
1950 RTE_ETHDEV_LOG(ERR,
1951 "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
1952 conf->peer_count);
1953 return -EINVAL;
1954 }
1955 for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1956 cap.max_nb_queues != UINT16_MAX; i++) {
1957 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1958 count++;
1959 }
1960 if (count > cap.max_nb_queues) {
1961 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1962 cap.max_nb_queues);
1963 return -EINVAL;
1964 }
1965 if (dev->data->dev_started)
1966 return -EBUSY;
1967 eth_dev_rxq_release(dev, rx_queue_id);
1968 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1969 nb_rx_desc, conf);
1970 if (ret == 0)
1971 dev->data->rx_queue_state[rx_queue_id] =
1972 RTE_ETH_QUEUE_STATE_HAIRPIN;
1973 return eth_err(port_id, ret);
1974 }
1975
1976 int
rte_eth_tx_queue_setup(uint16_t port_id,uint16_t tx_queue_id,uint16_t nb_tx_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)1977 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1978 uint16_t nb_tx_desc, unsigned int socket_id,
1979 const struct rte_eth_txconf *tx_conf)
1980 {
1981 struct rte_eth_dev *dev;
1982 struct rte_eth_dev_info dev_info;
1983 struct rte_eth_txconf local_conf;
1984 int ret;
1985
1986 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1987 dev = &rte_eth_devices[port_id];
1988
1989 if (tx_queue_id >= dev->data->nb_tx_queues) {
1990 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
1991 return -EINVAL;
1992 }
1993
1994 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1995
1996 ret = rte_eth_dev_info_get(port_id, &dev_info);
1997 if (ret != 0)
1998 return ret;
1999
2000 /* Use default specified by driver, if nb_tx_desc is zero */
2001 if (nb_tx_desc == 0) {
2002 nb_tx_desc = dev_info.default_txportconf.ring_size;
2003 /* If driver default is zero, fall back on EAL default */
2004 if (nb_tx_desc == 0)
2005 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2006 }
2007 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2008 nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2009 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2010 RTE_ETHDEV_LOG(ERR,
2011 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2012 nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2013 dev_info.tx_desc_lim.nb_min,
2014 dev_info.tx_desc_lim.nb_align);
2015 return -EINVAL;
2016 }
2017
2018 if (dev->data->dev_started &&
2019 !(dev_info.dev_capa &
2020 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2021 return -EBUSY;
2022
2023 if (dev->data->dev_started &&
2024 (dev->data->tx_queue_state[tx_queue_id] !=
2025 RTE_ETH_QUEUE_STATE_STOPPED))
2026 return -EBUSY;
2027
2028 eth_dev_txq_release(dev, tx_queue_id);
2029
2030 if (tx_conf == NULL)
2031 tx_conf = &dev_info.default_txconf;
2032
2033 local_conf = *tx_conf;
2034
2035 /*
2036 * If an offloading has already been enabled in
2037 * rte_eth_dev_configure(), it has been enabled on all queues,
2038 * so there is no need to enable it in this queue again.
2039 * The local_conf.offloads input to underlying PMD only carries
2040 * those offloadings which are only enabled on this queue and
2041 * not enabled on all queues.
2042 */
2043 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2044
2045 /*
2046 * New added offloadings for this queue are those not enabled in
2047 * rte_eth_dev_configure() and they must be per-queue type.
2048 * A pure per-port offloading can't be enabled on a queue while
2049 * disabled on another queue. A pure per-port offloading can't
2050 * be enabled for any queue as new added one if it hasn't been
2051 * enabled in rte_eth_dev_configure().
2052 */
2053 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2054 local_conf.offloads) {
2055 RTE_ETHDEV_LOG(ERR,
2056 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2057 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2058 port_id, tx_queue_id, local_conf.offloads,
2059 dev_info.tx_queue_offload_capa,
2060 __func__);
2061 return -EINVAL;
2062 }
2063
2064 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2065 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2066 tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2067 }
2068
2069 int
rte_eth_tx_hairpin_queue_setup(uint16_t port_id,uint16_t tx_queue_id,uint16_t nb_tx_desc,const struct rte_eth_hairpin_conf * conf)2070 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2071 uint16_t nb_tx_desc,
2072 const struct rte_eth_hairpin_conf *conf)
2073 {
2074 struct rte_eth_dev *dev;
2075 struct rte_eth_hairpin_cap cap;
2076 int i;
2077 int count;
2078 int ret;
2079
2080 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2081 dev = &rte_eth_devices[port_id];
2082
2083 if (tx_queue_id >= dev->data->nb_tx_queues) {
2084 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2085 return -EINVAL;
2086 }
2087
2088 if (conf == NULL) {
2089 RTE_ETHDEV_LOG(ERR,
2090 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2091 port_id);
2092 return -EINVAL;
2093 }
2094
2095 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2096 if (ret != 0)
2097 return ret;
2098 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2099 -ENOTSUP);
2100 /* if nb_rx_desc is zero use max number of desc from the driver. */
2101 if (nb_tx_desc == 0)
2102 nb_tx_desc = cap.max_nb_desc;
2103 if (nb_tx_desc > cap.max_nb_desc) {
2104 RTE_ETHDEV_LOG(ERR,
2105 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2106 nb_tx_desc, cap.max_nb_desc);
2107 return -EINVAL;
2108 }
2109 if (conf->peer_count > cap.max_tx_2_rx) {
2110 RTE_ETHDEV_LOG(ERR,
2111 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2112 conf->peer_count, cap.max_tx_2_rx);
2113 return -EINVAL;
2114 }
2115 if (conf->peer_count == 0) {
2116 RTE_ETHDEV_LOG(ERR,
2117 "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2118 conf->peer_count);
2119 return -EINVAL;
2120 }
2121 for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2122 cap.max_nb_queues != UINT16_MAX; i++) {
2123 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2124 count++;
2125 }
2126 if (count > cap.max_nb_queues) {
2127 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2128 cap.max_nb_queues);
2129 return -EINVAL;
2130 }
2131 if (dev->data->dev_started)
2132 return -EBUSY;
2133 eth_dev_txq_release(dev, tx_queue_id);
2134 ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2135 (dev, tx_queue_id, nb_tx_desc, conf);
2136 if (ret == 0)
2137 dev->data->tx_queue_state[tx_queue_id] =
2138 RTE_ETH_QUEUE_STATE_HAIRPIN;
2139 return eth_err(port_id, ret);
2140 }
2141
2142 int
rte_eth_hairpin_bind(uint16_t tx_port,uint16_t rx_port)2143 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2144 {
2145 struct rte_eth_dev *dev;
2146 int ret;
2147
2148 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2149 dev = &rte_eth_devices[tx_port];
2150
2151 if (dev->data->dev_started == 0) {
2152 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2153 return -EBUSY;
2154 }
2155
2156 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2157 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2158 if (ret != 0)
2159 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2160 " to Rx %d (%d - all ports)\n",
2161 tx_port, rx_port, RTE_MAX_ETHPORTS);
2162
2163 return ret;
2164 }
2165
2166 int
rte_eth_hairpin_unbind(uint16_t tx_port,uint16_t rx_port)2167 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2168 {
2169 struct rte_eth_dev *dev;
2170 int ret;
2171
2172 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2173 dev = &rte_eth_devices[tx_port];
2174
2175 if (dev->data->dev_started == 0) {
2176 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2177 return -EBUSY;
2178 }
2179
2180 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2181 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2182 if (ret != 0)
2183 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2184 " from Rx %d (%d - all ports)\n",
2185 tx_port, rx_port, RTE_MAX_ETHPORTS);
2186
2187 return ret;
2188 }
2189
2190 int
rte_eth_hairpin_get_peer_ports(uint16_t port_id,uint16_t * peer_ports,size_t len,uint32_t direction)2191 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2192 size_t len, uint32_t direction)
2193 {
2194 struct rte_eth_dev *dev;
2195 int ret;
2196
2197 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2198 dev = &rte_eth_devices[port_id];
2199
2200 if (peer_ports == NULL) {
2201 RTE_ETHDEV_LOG(ERR,
2202 "Cannot get ethdev port %u hairpin peer ports to NULL\n",
2203 port_id);
2204 return -EINVAL;
2205 }
2206
2207 if (len == 0) {
2208 RTE_ETHDEV_LOG(ERR,
2209 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2210 port_id);
2211 return -EINVAL;
2212 }
2213
2214 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2215 -ENOTSUP);
2216
2217 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2218 len, direction);
2219 if (ret < 0)
2220 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2221 port_id, direction ? "Rx" : "Tx");
2222
2223 return ret;
2224 }
2225
2226 void
rte_eth_tx_buffer_drop_callback(struct rte_mbuf ** pkts,uint16_t unsent,void * userdata __rte_unused)2227 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2228 void *userdata __rte_unused)
2229 {
2230 rte_pktmbuf_free_bulk(pkts, unsent);
2231 }
2232
2233 void
rte_eth_tx_buffer_count_callback(struct rte_mbuf ** pkts,uint16_t unsent,void * userdata)2234 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2235 void *userdata)
2236 {
2237 uint64_t *count = userdata;
2238
2239 rte_pktmbuf_free_bulk(pkts, unsent);
2240 *count += unsent;
2241 }
2242
2243 int
rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer * buffer,buffer_tx_error_fn cbfn,void * userdata)2244 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2245 buffer_tx_error_fn cbfn, void *userdata)
2246 {
2247 if (buffer == NULL) {
2248 RTE_ETHDEV_LOG(ERR,
2249 "Cannot set Tx buffer error callback to NULL buffer\n");
2250 return -EINVAL;
2251 }
2252
2253 buffer->error_callback = cbfn;
2254 buffer->error_userdata = userdata;
2255 return 0;
2256 }
2257
2258 int
rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer * buffer,uint16_t size)2259 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2260 {
2261 int ret = 0;
2262
2263 if (buffer == NULL) {
2264 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2265 return -EINVAL;
2266 }
2267
2268 buffer->size = size;
2269 if (buffer->error_callback == NULL) {
2270 ret = rte_eth_tx_buffer_set_err_callback(
2271 buffer, rte_eth_tx_buffer_drop_callback, NULL);
2272 }
2273
2274 return ret;
2275 }
2276
2277 int
rte_eth_tx_done_cleanup(uint16_t port_id,uint16_t queue_id,uint32_t free_cnt)2278 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2279 {
2280 struct rte_eth_dev *dev;
2281 int ret;
2282
2283 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2284 dev = &rte_eth_devices[port_id];
2285
2286 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2287
2288 /* Call driver to free pending mbufs. */
2289 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2290 free_cnt);
2291 return eth_err(port_id, ret);
2292 }
2293
2294 int
rte_eth_promiscuous_enable(uint16_t port_id)2295 rte_eth_promiscuous_enable(uint16_t port_id)
2296 {
2297 struct rte_eth_dev *dev;
2298 int diag = 0;
2299
2300 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2301 dev = &rte_eth_devices[port_id];
2302
2303 if (dev->data->promiscuous == 1)
2304 return 0;
2305
2306 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2307
2308 diag = (*dev->dev_ops->promiscuous_enable)(dev);
2309 dev->data->promiscuous = (diag == 0) ? 1 : 0;
2310
2311 return eth_err(port_id, diag);
2312 }
2313
2314 int
rte_eth_promiscuous_disable(uint16_t port_id)2315 rte_eth_promiscuous_disable(uint16_t port_id)
2316 {
2317 struct rte_eth_dev *dev;
2318 int diag = 0;
2319
2320 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2321 dev = &rte_eth_devices[port_id];
2322
2323 if (dev->data->promiscuous == 0)
2324 return 0;
2325
2326 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2327
2328 dev->data->promiscuous = 0;
2329 diag = (*dev->dev_ops->promiscuous_disable)(dev);
2330 if (diag != 0)
2331 dev->data->promiscuous = 1;
2332
2333 return eth_err(port_id, diag);
2334 }
2335
2336 int
rte_eth_promiscuous_get(uint16_t port_id)2337 rte_eth_promiscuous_get(uint16_t port_id)
2338 {
2339 struct rte_eth_dev *dev;
2340
2341 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2342 dev = &rte_eth_devices[port_id];
2343
2344 return dev->data->promiscuous;
2345 }
2346
2347 int
rte_eth_allmulticast_enable(uint16_t port_id)2348 rte_eth_allmulticast_enable(uint16_t port_id)
2349 {
2350 struct rte_eth_dev *dev;
2351 int diag;
2352
2353 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2354 dev = &rte_eth_devices[port_id];
2355
2356 if (dev->data->all_multicast == 1)
2357 return 0;
2358
2359 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2360 diag = (*dev->dev_ops->allmulticast_enable)(dev);
2361 dev->data->all_multicast = (diag == 0) ? 1 : 0;
2362
2363 return eth_err(port_id, diag);
2364 }
2365
2366 int
rte_eth_allmulticast_disable(uint16_t port_id)2367 rte_eth_allmulticast_disable(uint16_t port_id)
2368 {
2369 struct rte_eth_dev *dev;
2370 int diag;
2371
2372 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2373 dev = &rte_eth_devices[port_id];
2374
2375 if (dev->data->all_multicast == 0)
2376 return 0;
2377
2378 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2379 dev->data->all_multicast = 0;
2380 diag = (*dev->dev_ops->allmulticast_disable)(dev);
2381 if (diag != 0)
2382 dev->data->all_multicast = 1;
2383
2384 return eth_err(port_id, diag);
2385 }
2386
2387 int
rte_eth_allmulticast_get(uint16_t port_id)2388 rte_eth_allmulticast_get(uint16_t port_id)
2389 {
2390 struct rte_eth_dev *dev;
2391
2392 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2393 dev = &rte_eth_devices[port_id];
2394
2395 return dev->data->all_multicast;
2396 }
2397
2398 int
rte_eth_link_get(uint16_t port_id,struct rte_eth_link * eth_link)2399 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2400 {
2401 struct rte_eth_dev *dev;
2402
2403 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2404 dev = &rte_eth_devices[port_id];
2405
2406 if (eth_link == NULL) {
2407 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2408 port_id);
2409 return -EINVAL;
2410 }
2411
2412 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2413 rte_eth_linkstatus_get(dev, eth_link);
2414 else {
2415 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2416 (*dev->dev_ops->link_update)(dev, 1);
2417 *eth_link = dev->data->dev_link;
2418 }
2419
2420 return 0;
2421 }
2422
2423 int
rte_eth_link_get_nowait(uint16_t port_id,struct rte_eth_link * eth_link)2424 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2425 {
2426 struct rte_eth_dev *dev;
2427
2428 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2429 dev = &rte_eth_devices[port_id];
2430
2431 if (eth_link == NULL) {
2432 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2433 port_id);
2434 return -EINVAL;
2435 }
2436
2437 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2438 rte_eth_linkstatus_get(dev, eth_link);
2439 else {
2440 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2441 (*dev->dev_ops->link_update)(dev, 0);
2442 *eth_link = dev->data->dev_link;
2443 }
2444
2445 return 0;
2446 }
2447
2448 const char *
rte_eth_link_speed_to_str(uint32_t link_speed)2449 rte_eth_link_speed_to_str(uint32_t link_speed)
2450 {
2451 switch (link_speed) {
2452 case RTE_ETH_SPEED_NUM_NONE: return "None";
2453 case RTE_ETH_SPEED_NUM_10M: return "10 Mbps";
2454 case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
2455 case RTE_ETH_SPEED_NUM_1G: return "1 Gbps";
2456 case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2457 case RTE_ETH_SPEED_NUM_5G: return "5 Gbps";
2458 case RTE_ETH_SPEED_NUM_10G: return "10 Gbps";
2459 case RTE_ETH_SPEED_NUM_20G: return "20 Gbps";
2460 case RTE_ETH_SPEED_NUM_25G: return "25 Gbps";
2461 case RTE_ETH_SPEED_NUM_40G: return "40 Gbps";
2462 case RTE_ETH_SPEED_NUM_50G: return "50 Gbps";
2463 case RTE_ETH_SPEED_NUM_56G: return "56 Gbps";
2464 case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
2465 case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
2466 case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2467 default: return "Invalid";
2468 }
2469 }
2470
2471 int
rte_eth_link_to_str(char * str,size_t len,const struct rte_eth_link * eth_link)2472 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2473 {
2474 if (str == NULL) {
2475 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2476 return -EINVAL;
2477 }
2478
2479 if (len == 0) {
2480 RTE_ETHDEV_LOG(ERR,
2481 "Cannot convert link to string with zero size\n");
2482 return -EINVAL;
2483 }
2484
2485 if (eth_link == NULL) {
2486 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2487 return -EINVAL;
2488 }
2489
2490 if (eth_link->link_status == RTE_ETH_LINK_DOWN)
2491 return snprintf(str, len, "Link down");
2492 else
2493 return snprintf(str, len, "Link up at %s %s %s",
2494 rte_eth_link_speed_to_str(eth_link->link_speed),
2495 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
2496 "FDX" : "HDX",
2497 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
2498 "Autoneg" : "Fixed");
2499 }
2500
2501 int
rte_eth_stats_get(uint16_t port_id,struct rte_eth_stats * stats)2502 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2503 {
2504 struct rte_eth_dev *dev;
2505
2506 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2507 dev = &rte_eth_devices[port_id];
2508
2509 if (stats == NULL) {
2510 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2511 port_id);
2512 return -EINVAL;
2513 }
2514
2515 memset(stats, 0, sizeof(*stats));
2516
2517 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2518 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2519 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2520 }
2521
2522 int
rte_eth_stats_reset(uint16_t port_id)2523 rte_eth_stats_reset(uint16_t port_id)
2524 {
2525 struct rte_eth_dev *dev;
2526 int ret;
2527
2528 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2529 dev = &rte_eth_devices[port_id];
2530
2531 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2532 ret = (*dev->dev_ops->stats_reset)(dev);
2533 if (ret != 0)
2534 return eth_err(port_id, ret);
2535
2536 dev->data->rx_mbuf_alloc_failed = 0;
2537
2538 return 0;
2539 }
2540
2541 static inline int
eth_dev_get_xstats_basic_count(struct rte_eth_dev * dev)2542 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2543 {
2544 uint16_t nb_rxqs, nb_txqs;
2545 int count;
2546
2547 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2548 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2549
2550 count = RTE_NB_STATS;
2551 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2552 count += nb_rxqs * RTE_NB_RXQ_STATS;
2553 count += nb_txqs * RTE_NB_TXQ_STATS;
2554 }
2555
2556 return count;
2557 }
2558
2559 static int
eth_dev_get_xstats_count(uint16_t port_id)2560 eth_dev_get_xstats_count(uint16_t port_id)
2561 {
2562 struct rte_eth_dev *dev;
2563 int count;
2564
2565 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2566 dev = &rte_eth_devices[port_id];
2567 if (dev->dev_ops->xstats_get_names != NULL) {
2568 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2569 if (count < 0)
2570 return eth_err(port_id, count);
2571 } else
2572 count = 0;
2573
2574
2575 count += eth_dev_get_xstats_basic_count(dev);
2576
2577 return count;
2578 }
2579
2580 int
rte_eth_xstats_get_id_by_name(uint16_t port_id,const char * xstat_name,uint64_t * id)2581 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2582 uint64_t *id)
2583 {
2584 int cnt_xstats, idx_xstat;
2585
2586 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2587
2588 if (xstat_name == NULL) {
2589 RTE_ETHDEV_LOG(ERR,
2590 "Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2591 port_id);
2592 return -ENOMEM;
2593 }
2594
2595 if (id == NULL) {
2596 RTE_ETHDEV_LOG(ERR,
2597 "Cannot get ethdev port %u xstats ID to NULL\n",
2598 port_id);
2599 return -ENOMEM;
2600 }
2601
2602 /* Get count */
2603 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2604 if (cnt_xstats < 0) {
2605 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2606 return -ENODEV;
2607 }
2608
2609 /* Get id-name lookup table */
2610 struct rte_eth_xstat_name xstats_names[cnt_xstats];
2611
2612 if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2613 port_id, xstats_names, cnt_xstats, NULL)) {
2614 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2615 return -1;
2616 }
2617
2618 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2619 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2620 *id = idx_xstat;
2621 return 0;
2622 };
2623 }
2624
2625 return -EINVAL;
2626 }
2627
2628 /* retrieve basic stats names */
2629 static int
eth_basic_stats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names)2630 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2631 struct rte_eth_xstat_name *xstats_names)
2632 {
2633 int cnt_used_entries = 0;
2634 uint32_t idx, id_queue;
2635 uint16_t num_q;
2636
2637 for (idx = 0; idx < RTE_NB_STATS; idx++) {
2638 strlcpy(xstats_names[cnt_used_entries].name,
2639 eth_dev_stats_strings[idx].name,
2640 sizeof(xstats_names[0].name));
2641 cnt_used_entries++;
2642 }
2643
2644 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2645 return cnt_used_entries;
2646
2647 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2648 for (id_queue = 0; id_queue < num_q; id_queue++) {
2649 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2650 snprintf(xstats_names[cnt_used_entries].name,
2651 sizeof(xstats_names[0].name),
2652 "rx_q%u_%s",
2653 id_queue, eth_dev_rxq_stats_strings[idx].name);
2654 cnt_used_entries++;
2655 }
2656
2657 }
2658 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2659 for (id_queue = 0; id_queue < num_q; id_queue++) {
2660 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2661 snprintf(xstats_names[cnt_used_entries].name,
2662 sizeof(xstats_names[0].name),
2663 "tx_q%u_%s",
2664 id_queue, eth_dev_txq_stats_strings[idx].name);
2665 cnt_used_entries++;
2666 }
2667 }
2668 return cnt_used_entries;
2669 }
2670
2671 /* retrieve ethdev extended statistics names */
2672 int
rte_eth_xstats_get_names_by_id(uint16_t port_id,struct rte_eth_xstat_name * xstats_names,unsigned int size,uint64_t * ids)2673 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2674 struct rte_eth_xstat_name *xstats_names, unsigned int size,
2675 uint64_t *ids)
2676 {
2677 struct rte_eth_xstat_name *xstats_names_copy;
2678 unsigned int no_basic_stat_requested = 1;
2679 unsigned int no_ext_stat_requested = 1;
2680 unsigned int expected_entries;
2681 unsigned int basic_count;
2682 struct rte_eth_dev *dev;
2683 unsigned int i;
2684 int ret;
2685
2686 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2687 dev = &rte_eth_devices[port_id];
2688
2689 basic_count = eth_dev_get_xstats_basic_count(dev);
2690 ret = eth_dev_get_xstats_count(port_id);
2691 if (ret < 0)
2692 return ret;
2693 expected_entries = (unsigned int)ret;
2694
2695 /* Return max number of stats if no ids given */
2696 if (!ids) {
2697 if (!xstats_names)
2698 return expected_entries;
2699 else if (xstats_names && size < expected_entries)
2700 return expected_entries;
2701 }
2702
2703 if (ids && !xstats_names)
2704 return -EINVAL;
2705
2706 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2707 uint64_t ids_copy[size];
2708
2709 for (i = 0; i < size; i++) {
2710 if (ids[i] < basic_count) {
2711 no_basic_stat_requested = 0;
2712 break;
2713 }
2714
2715 /*
2716 * Convert ids to xstats ids that PMD knows.
2717 * ids known by user are basic + extended stats.
2718 */
2719 ids_copy[i] = ids[i] - basic_count;
2720 }
2721
2722 if (no_basic_stat_requested)
2723 return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2724 ids_copy, xstats_names, size);
2725 }
2726
2727 /* Retrieve all stats */
2728 if (!ids) {
2729 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2730 expected_entries);
2731 if (num_stats < 0 || num_stats > (int)expected_entries)
2732 return num_stats;
2733 else
2734 return expected_entries;
2735 }
2736
2737 xstats_names_copy = calloc(expected_entries,
2738 sizeof(struct rte_eth_xstat_name));
2739
2740 if (!xstats_names_copy) {
2741 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2742 return -ENOMEM;
2743 }
2744
2745 if (ids) {
2746 for (i = 0; i < size; i++) {
2747 if (ids[i] >= basic_count) {
2748 no_ext_stat_requested = 0;
2749 break;
2750 }
2751 }
2752 }
2753
2754 /* Fill xstats_names_copy structure */
2755 if (ids && no_ext_stat_requested) {
2756 eth_basic_stats_get_names(dev, xstats_names_copy);
2757 } else {
2758 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2759 expected_entries);
2760 if (ret < 0) {
2761 free(xstats_names_copy);
2762 return ret;
2763 }
2764 }
2765
2766 /* Filter stats */
2767 for (i = 0; i < size; i++) {
2768 if (ids[i] >= expected_entries) {
2769 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2770 free(xstats_names_copy);
2771 return -1;
2772 }
2773 xstats_names[i] = xstats_names_copy[ids[i]];
2774 }
2775
2776 free(xstats_names_copy);
2777 return size;
2778 }
2779
2780 int
rte_eth_xstats_get_names(uint16_t port_id,struct rte_eth_xstat_name * xstats_names,unsigned int size)2781 rte_eth_xstats_get_names(uint16_t port_id,
2782 struct rte_eth_xstat_name *xstats_names,
2783 unsigned int size)
2784 {
2785 struct rte_eth_dev *dev;
2786 int cnt_used_entries;
2787 int cnt_expected_entries;
2788 int cnt_driver_entries;
2789
2790 cnt_expected_entries = eth_dev_get_xstats_count(port_id);
2791 if (xstats_names == NULL || cnt_expected_entries < 0 ||
2792 (int)size < cnt_expected_entries)
2793 return cnt_expected_entries;
2794
2795 /* port_id checked in eth_dev_get_xstats_count() */
2796 dev = &rte_eth_devices[port_id];
2797
2798 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
2799
2800 if (dev->dev_ops->xstats_get_names != NULL) {
2801 /* If there are any driver-specific xstats, append them
2802 * to end of list.
2803 */
2804 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2805 dev,
2806 xstats_names + cnt_used_entries,
2807 size - cnt_used_entries);
2808 if (cnt_driver_entries < 0)
2809 return eth_err(port_id, cnt_driver_entries);
2810 cnt_used_entries += cnt_driver_entries;
2811 }
2812
2813 return cnt_used_entries;
2814 }
2815
2816
2817 static int
eth_basic_stats_get(uint16_t port_id,struct rte_eth_xstat * xstats)2818 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2819 {
2820 struct rte_eth_dev *dev;
2821 struct rte_eth_stats eth_stats;
2822 unsigned int count = 0, i, q;
2823 uint64_t val, *stats_ptr;
2824 uint16_t nb_rxqs, nb_txqs;
2825 int ret;
2826
2827 ret = rte_eth_stats_get(port_id, ð_stats);
2828 if (ret < 0)
2829 return ret;
2830
2831 dev = &rte_eth_devices[port_id];
2832
2833 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2834 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2835
2836 /* global stats */
2837 for (i = 0; i < RTE_NB_STATS; i++) {
2838 stats_ptr = RTE_PTR_ADD(ð_stats,
2839 eth_dev_stats_strings[i].offset);
2840 val = *stats_ptr;
2841 xstats[count++].value = val;
2842 }
2843
2844 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2845 return count;
2846
2847 /* per-rxq stats */
2848 for (q = 0; q < nb_rxqs; q++) {
2849 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2850 stats_ptr = RTE_PTR_ADD(ð_stats,
2851 eth_dev_rxq_stats_strings[i].offset +
2852 q * sizeof(uint64_t));
2853 val = *stats_ptr;
2854 xstats[count++].value = val;
2855 }
2856 }
2857
2858 /* per-txq stats */
2859 for (q = 0; q < nb_txqs; q++) {
2860 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2861 stats_ptr = RTE_PTR_ADD(ð_stats,
2862 eth_dev_txq_stats_strings[i].offset +
2863 q * sizeof(uint64_t));
2864 val = *stats_ptr;
2865 xstats[count++].value = val;
2866 }
2867 }
2868 return count;
2869 }
2870
2871 /* retrieve ethdev extended statistics */
2872 int
rte_eth_xstats_get_by_id(uint16_t port_id,const uint64_t * ids,uint64_t * values,unsigned int size)2873 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2874 uint64_t *values, unsigned int size)
2875 {
2876 unsigned int no_basic_stat_requested = 1;
2877 unsigned int no_ext_stat_requested = 1;
2878 unsigned int num_xstats_filled;
2879 unsigned int basic_count;
2880 uint16_t expected_entries;
2881 struct rte_eth_dev *dev;
2882 unsigned int i;
2883 int ret;
2884
2885 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2886 dev = &rte_eth_devices[port_id];
2887
2888 ret = eth_dev_get_xstats_count(port_id);
2889 if (ret < 0)
2890 return ret;
2891 expected_entries = (uint16_t)ret;
2892 struct rte_eth_xstat xstats[expected_entries];
2893 basic_count = eth_dev_get_xstats_basic_count(dev);
2894
2895 /* Return max number of stats if no ids given */
2896 if (!ids) {
2897 if (!values)
2898 return expected_entries;
2899 else if (values && size < expected_entries)
2900 return expected_entries;
2901 }
2902
2903 if (ids && !values)
2904 return -EINVAL;
2905
2906 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2907 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
2908 uint64_t ids_copy[size];
2909
2910 for (i = 0; i < size; i++) {
2911 if (ids[i] < basic_count) {
2912 no_basic_stat_requested = 0;
2913 break;
2914 }
2915
2916 /*
2917 * Convert ids to xstats ids that PMD knows.
2918 * ids known by user are basic + extended stats.
2919 */
2920 ids_copy[i] = ids[i] - basic_count;
2921 }
2922
2923 if (no_basic_stat_requested)
2924 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2925 values, size);
2926 }
2927
2928 if (ids) {
2929 for (i = 0; i < size; i++) {
2930 if (ids[i] >= basic_count) {
2931 no_ext_stat_requested = 0;
2932 break;
2933 }
2934 }
2935 }
2936
2937 /* Fill the xstats structure */
2938 if (ids && no_ext_stat_requested)
2939 ret = eth_basic_stats_get(port_id, xstats);
2940 else
2941 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2942
2943 if (ret < 0)
2944 return ret;
2945 num_xstats_filled = (unsigned int)ret;
2946
2947 /* Return all stats */
2948 if (!ids) {
2949 for (i = 0; i < num_xstats_filled; i++)
2950 values[i] = xstats[i].value;
2951 return expected_entries;
2952 }
2953
2954 /* Filter stats */
2955 for (i = 0; i < size; i++) {
2956 if (ids[i] >= expected_entries) {
2957 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2958 return -1;
2959 }
2960 values[i] = xstats[ids[i]].value;
2961 }
2962 return size;
2963 }
2964
2965 int
rte_eth_xstats_get(uint16_t port_id,struct rte_eth_xstat * xstats,unsigned int n)2966 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2967 unsigned int n)
2968 {
2969 struct rte_eth_dev *dev;
2970 unsigned int count = 0, i;
2971 signed int xcount = 0;
2972 uint16_t nb_rxqs, nb_txqs;
2973 int ret;
2974
2975 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2976 dev = &rte_eth_devices[port_id];
2977
2978 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2979 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2980
2981 /* Return generic statistics */
2982 count = RTE_NB_STATS;
2983 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
2984 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
2985
2986 /* implemented by the driver */
2987 if (dev->dev_ops->xstats_get != NULL) {
2988 /* Retrieve the xstats from the driver at the end of the
2989 * xstats struct.
2990 */
2991 xcount = (*dev->dev_ops->xstats_get)(dev,
2992 xstats ? xstats + count : NULL,
2993 (n > count) ? n - count : 0);
2994
2995 if (xcount < 0)
2996 return eth_err(port_id, xcount);
2997 }
2998
2999 if (n < count + xcount || xstats == NULL)
3000 return count + xcount;
3001
3002 /* now fill the xstats structure */
3003 ret = eth_basic_stats_get(port_id, xstats);
3004 if (ret < 0)
3005 return ret;
3006 count = ret;
3007
3008 for (i = 0; i < count; i++)
3009 xstats[i].id = i;
3010 /* add an offset to driver-specific stats */
3011 for ( ; i < count + xcount; i++)
3012 xstats[i].id += count;
3013
3014 return count + xcount;
3015 }
3016
3017 /* reset ethdev extended statistics */
3018 int
rte_eth_xstats_reset(uint16_t port_id)3019 rte_eth_xstats_reset(uint16_t port_id)
3020 {
3021 struct rte_eth_dev *dev;
3022
3023 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3024 dev = &rte_eth_devices[port_id];
3025
3026 /* implemented by the driver */
3027 if (dev->dev_ops->xstats_reset != NULL)
3028 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3029
3030 /* fallback to default */
3031 return rte_eth_stats_reset(port_id);
3032 }
3033
3034 static int
eth_dev_set_queue_stats_mapping(uint16_t port_id,uint16_t queue_id,uint8_t stat_idx,uint8_t is_rx)3035 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3036 uint8_t stat_idx, uint8_t is_rx)
3037 {
3038 struct rte_eth_dev *dev;
3039
3040 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3041 dev = &rte_eth_devices[port_id];
3042
3043 if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3044 return -EINVAL;
3045
3046 if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3047 return -EINVAL;
3048
3049 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3050 return -EINVAL;
3051
3052 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3053 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3054 }
3055
3056 int
rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,uint16_t tx_queue_id,uint8_t stat_idx)3057 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3058 uint8_t stat_idx)
3059 {
3060 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3061 tx_queue_id,
3062 stat_idx, STAT_QMAP_TX));
3063 }
3064
3065 int
rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,uint16_t rx_queue_id,uint8_t stat_idx)3066 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3067 uint8_t stat_idx)
3068 {
3069 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3070 rx_queue_id,
3071 stat_idx, STAT_QMAP_RX));
3072 }
3073
3074 int
rte_eth_dev_fw_version_get(uint16_t port_id,char * fw_version,size_t fw_size)3075 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3076 {
3077 struct rte_eth_dev *dev;
3078
3079 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3080 dev = &rte_eth_devices[port_id];
3081
3082 if (fw_version == NULL && fw_size > 0) {
3083 RTE_ETHDEV_LOG(ERR,
3084 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3085 port_id);
3086 return -EINVAL;
3087 }
3088
3089 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3090 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3091 fw_version, fw_size));
3092 }
3093
3094 int
rte_eth_dev_info_get(uint16_t port_id,struct rte_eth_dev_info * dev_info)3095 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3096 {
3097 struct rte_eth_dev *dev;
3098 const struct rte_eth_desc_lim lim = {
3099 .nb_max = UINT16_MAX,
3100 .nb_min = 0,
3101 .nb_align = 1,
3102 .nb_seg_max = UINT16_MAX,
3103 .nb_mtu_seg_max = UINT16_MAX,
3104 };
3105 int diag;
3106
3107 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3108 dev = &rte_eth_devices[port_id];
3109
3110 if (dev_info == NULL) {
3111 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3112 port_id);
3113 return -EINVAL;
3114 }
3115
3116 /*
3117 * Init dev_info before port_id check since caller does not have
3118 * return status and does not know if get is successful or not.
3119 */
3120 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3121 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3122
3123 dev_info->rx_desc_lim = lim;
3124 dev_info->tx_desc_lim = lim;
3125 dev_info->device = dev->device;
3126 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
3127 RTE_ETHER_CRC_LEN;
3128 dev_info->max_mtu = UINT16_MAX;
3129
3130 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3131 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3132 if (diag != 0) {
3133 /* Cleanup already filled in device information */
3134 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3135 return eth_err(port_id, diag);
3136 }
3137
3138 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3139 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3140 RTE_MAX_QUEUES_PER_PORT);
3141 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3142 RTE_MAX_QUEUES_PER_PORT);
3143
3144 dev_info->driver_name = dev->device->driver->name;
3145 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3146 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3147
3148 dev_info->dev_flags = &dev->data->dev_flags;
3149
3150 return 0;
3151 }
3152
3153 int
rte_eth_dev_conf_get(uint16_t port_id,struct rte_eth_conf * dev_conf)3154 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3155 {
3156 struct rte_eth_dev *dev;
3157
3158 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3159 dev = &rte_eth_devices[port_id];
3160
3161 if (dev_conf == NULL) {
3162 RTE_ETHDEV_LOG(ERR,
3163 "Cannot get ethdev port %u configuration to NULL\n",
3164 port_id);
3165 return -EINVAL;
3166 }
3167
3168 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3169
3170 return 0;
3171 }
3172
3173 int
rte_eth_dev_get_supported_ptypes(uint16_t port_id,uint32_t ptype_mask,uint32_t * ptypes,int num)3174 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3175 uint32_t *ptypes, int num)
3176 {
3177 int i, j;
3178 struct rte_eth_dev *dev;
3179 const uint32_t *all_ptypes;
3180
3181 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3182 dev = &rte_eth_devices[port_id];
3183
3184 if (ptypes == NULL && num > 0) {
3185 RTE_ETHDEV_LOG(ERR,
3186 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3187 port_id);
3188 return -EINVAL;
3189 }
3190
3191 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3192 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3193
3194 if (!all_ptypes)
3195 return 0;
3196
3197 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3198 if (all_ptypes[i] & ptype_mask) {
3199 if (j < num)
3200 ptypes[j] = all_ptypes[i];
3201 j++;
3202 }
3203
3204 return j;
3205 }
3206
3207 int
rte_eth_dev_set_ptypes(uint16_t port_id,uint32_t ptype_mask,uint32_t * set_ptypes,unsigned int num)3208 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3209 uint32_t *set_ptypes, unsigned int num)
3210 {
3211 const uint32_t valid_ptype_masks[] = {
3212 RTE_PTYPE_L2_MASK,
3213 RTE_PTYPE_L3_MASK,
3214 RTE_PTYPE_L4_MASK,
3215 RTE_PTYPE_TUNNEL_MASK,
3216 RTE_PTYPE_INNER_L2_MASK,
3217 RTE_PTYPE_INNER_L3_MASK,
3218 RTE_PTYPE_INNER_L4_MASK,
3219 };
3220 const uint32_t *all_ptypes;
3221 struct rte_eth_dev *dev;
3222 uint32_t unused_mask;
3223 unsigned int i, j;
3224 int ret;
3225
3226 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3227 dev = &rte_eth_devices[port_id];
3228
3229 if (num > 0 && set_ptypes == NULL) {
3230 RTE_ETHDEV_LOG(ERR,
3231 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3232 port_id);
3233 return -EINVAL;
3234 }
3235
3236 if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3237 *dev->dev_ops->dev_ptypes_set == NULL) {
3238 ret = 0;
3239 goto ptype_unknown;
3240 }
3241
3242 if (ptype_mask == 0) {
3243 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3244 ptype_mask);
3245 goto ptype_unknown;
3246 }
3247
3248 unused_mask = ptype_mask;
3249 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3250 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3251 if (mask && mask != valid_ptype_masks[i]) {
3252 ret = -EINVAL;
3253 goto ptype_unknown;
3254 }
3255 unused_mask &= ~valid_ptype_masks[i];
3256 }
3257
3258 if (unused_mask) {
3259 ret = -EINVAL;
3260 goto ptype_unknown;
3261 }
3262
3263 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3264 if (all_ptypes == NULL) {
3265 ret = 0;
3266 goto ptype_unknown;
3267 }
3268
3269 /*
3270 * Accommodate as many set_ptypes as possible. If the supplied
3271 * set_ptypes array is insufficient fill it partially.
3272 */
3273 for (i = 0, j = 0; set_ptypes != NULL &&
3274 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3275 if (ptype_mask & all_ptypes[i]) {
3276 if (j < num - 1) {
3277 set_ptypes[j] = all_ptypes[i];
3278 j++;
3279 continue;
3280 }
3281 break;
3282 }
3283 }
3284
3285 if (set_ptypes != NULL && j < num)
3286 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3287
3288 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3289
3290 ptype_unknown:
3291 if (num > 0)
3292 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3293
3294 return ret;
3295 }
3296
3297 int
rte_eth_macaddrs_get(uint16_t port_id,struct rte_ether_addr * ma,unsigned int num)3298 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3299 unsigned int num)
3300 {
3301 int32_t ret;
3302 struct rte_eth_dev *dev;
3303 struct rte_eth_dev_info dev_info;
3304
3305 if (ma == NULL) {
3306 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__);
3307 return -EINVAL;
3308 }
3309
3310 /* will check for us that port_id is a valid one */
3311 ret = rte_eth_dev_info_get(port_id, &dev_info);
3312 if (ret != 0)
3313 return ret;
3314
3315 dev = &rte_eth_devices[port_id];
3316 num = RTE_MIN(dev_info.max_mac_addrs, num);
3317 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
3318
3319 return num;
3320 }
3321
3322 int
rte_eth_macaddr_get(uint16_t port_id,struct rte_ether_addr * mac_addr)3323 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3324 {
3325 struct rte_eth_dev *dev;
3326
3327 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3328 dev = &rte_eth_devices[port_id];
3329
3330 if (mac_addr == NULL) {
3331 RTE_ETHDEV_LOG(ERR,
3332 "Cannot get ethdev port %u MAC address to NULL\n",
3333 port_id);
3334 return -EINVAL;
3335 }
3336
3337 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3338
3339 return 0;
3340 }
3341
3342 int
rte_eth_dev_get_mtu(uint16_t port_id,uint16_t * mtu)3343 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3344 {
3345 struct rte_eth_dev *dev;
3346
3347 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3348 dev = &rte_eth_devices[port_id];
3349
3350 if (mtu == NULL) {
3351 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3352 port_id);
3353 return -EINVAL;
3354 }
3355
3356 *mtu = dev->data->mtu;
3357 return 0;
3358 }
3359
3360 int
rte_eth_dev_set_mtu(uint16_t port_id,uint16_t mtu)3361 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3362 {
3363 int ret;
3364 struct rte_eth_dev_info dev_info;
3365 struct rte_eth_dev *dev;
3366
3367 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3368 dev = &rte_eth_devices[port_id];
3369 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3370
3371 /*
3372 * Check if the device supports dev_infos_get, if it does not
3373 * skip min_mtu/max_mtu validation here as this requires values
3374 * that are populated within the call to rte_eth_dev_info_get()
3375 * which relies on dev->dev_ops->dev_infos_get.
3376 */
3377 if (*dev->dev_ops->dev_infos_get != NULL) {
3378 ret = rte_eth_dev_info_get(port_id, &dev_info);
3379 if (ret != 0)
3380 return ret;
3381
3382 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
3383 if (ret != 0)
3384 return ret;
3385 }
3386
3387 if (dev->data->dev_configured == 0) {
3388 RTE_ETHDEV_LOG(ERR,
3389 "Port %u must be configured before MTU set\n",
3390 port_id);
3391 return -EINVAL;
3392 }
3393
3394 ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3395 if (ret == 0)
3396 dev->data->mtu = mtu;
3397
3398 return eth_err(port_id, ret);
3399 }
3400
3401 int
rte_eth_dev_vlan_filter(uint16_t port_id,uint16_t vlan_id,int on)3402 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3403 {
3404 struct rte_eth_dev *dev;
3405 int ret;
3406
3407 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3408 dev = &rte_eth_devices[port_id];
3409
3410 if (!(dev->data->dev_conf.rxmode.offloads &
3411 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
3412 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
3413 port_id);
3414 return -ENOSYS;
3415 }
3416
3417 if (vlan_id > 4095) {
3418 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3419 port_id, vlan_id);
3420 return -EINVAL;
3421 }
3422 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3423
3424 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3425 if (ret == 0) {
3426 struct rte_vlan_filter_conf *vfc;
3427 int vidx;
3428 int vbit;
3429
3430 vfc = &dev->data->vlan_filter_conf;
3431 vidx = vlan_id / 64;
3432 vbit = vlan_id % 64;
3433
3434 if (on)
3435 vfc->ids[vidx] |= RTE_BIT64(vbit);
3436 else
3437 vfc->ids[vidx] &= ~RTE_BIT64(vbit);
3438 }
3439
3440 return eth_err(port_id, ret);
3441 }
3442
3443 int
rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id,uint16_t rx_queue_id,int on)3444 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3445 int on)
3446 {
3447 struct rte_eth_dev *dev;
3448
3449 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3450 dev = &rte_eth_devices[port_id];
3451
3452 if (rx_queue_id >= dev->data->nb_rx_queues) {
3453 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3454 return -EINVAL;
3455 }
3456
3457 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3458 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3459
3460 return 0;
3461 }
3462
3463 int
rte_eth_dev_set_vlan_ether_type(uint16_t port_id,enum rte_vlan_type vlan_type,uint16_t tpid)3464 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3465 enum rte_vlan_type vlan_type,
3466 uint16_t tpid)
3467 {
3468 struct rte_eth_dev *dev;
3469
3470 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3471 dev = &rte_eth_devices[port_id];
3472
3473 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3474 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3475 tpid));
3476 }
3477
3478 int
rte_eth_dev_set_vlan_offload(uint16_t port_id,int offload_mask)3479 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3480 {
3481 struct rte_eth_dev_info dev_info;
3482 struct rte_eth_dev *dev;
3483 int ret = 0;
3484 int mask = 0;
3485 int cur, org = 0;
3486 uint64_t orig_offloads;
3487 uint64_t dev_offloads;
3488 uint64_t new_offloads;
3489
3490 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3491 dev = &rte_eth_devices[port_id];
3492
3493 /* save original values in case of failure */
3494 orig_offloads = dev->data->dev_conf.rxmode.offloads;
3495 dev_offloads = orig_offloads;
3496
3497 /* check which option changed by application */
3498 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
3499 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
3500 if (cur != org) {
3501 if (cur)
3502 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3503 else
3504 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3505 mask |= RTE_ETH_VLAN_STRIP_MASK;
3506 }
3507
3508 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
3509 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
3510 if (cur != org) {
3511 if (cur)
3512 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3513 else
3514 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3515 mask |= RTE_ETH_VLAN_FILTER_MASK;
3516 }
3517
3518 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
3519 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
3520 if (cur != org) {
3521 if (cur)
3522 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3523 else
3524 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3525 mask |= RTE_ETH_VLAN_EXTEND_MASK;
3526 }
3527
3528 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
3529 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
3530 if (cur != org) {
3531 if (cur)
3532 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3533 else
3534 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3535 mask |= RTE_ETH_QINQ_STRIP_MASK;
3536 }
3537
3538 /*no change*/
3539 if (mask == 0)
3540 return ret;
3541
3542 ret = rte_eth_dev_info_get(port_id, &dev_info);
3543 if (ret != 0)
3544 return ret;
3545
3546 /* Rx VLAN offloading must be within its device capabilities */
3547 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3548 new_offloads = dev_offloads & ~orig_offloads;
3549 RTE_ETHDEV_LOG(ERR,
3550 "Ethdev port_id=%u requested new added VLAN offloads "
3551 "0x%" PRIx64 " must be within Rx offloads capabilities "
3552 "0x%" PRIx64 " in %s()\n",
3553 port_id, new_offloads, dev_info.rx_offload_capa,
3554 __func__);
3555 return -EINVAL;
3556 }
3557
3558 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3559 dev->data->dev_conf.rxmode.offloads = dev_offloads;
3560 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3561 if (ret) {
3562 /* hit an error restore original values */
3563 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3564 }
3565
3566 return eth_err(port_id, ret);
3567 }
3568
3569 int
rte_eth_dev_get_vlan_offload(uint16_t port_id)3570 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3571 {
3572 struct rte_eth_dev *dev;
3573 uint64_t *dev_offloads;
3574 int ret = 0;
3575
3576 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3577 dev = &rte_eth_devices[port_id];
3578 dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3579
3580 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
3581 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
3582
3583 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
3584 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
3585
3586 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
3587 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
3588
3589 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
3590 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
3591
3592 return ret;
3593 }
3594
3595 int
rte_eth_dev_set_vlan_pvid(uint16_t port_id,uint16_t pvid,int on)3596 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3597 {
3598 struct rte_eth_dev *dev;
3599
3600 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3601 dev = &rte_eth_devices[port_id];
3602
3603 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3604 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3605 }
3606
3607 int
rte_eth_dev_flow_ctrl_get(uint16_t port_id,struct rte_eth_fc_conf * fc_conf)3608 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3609 {
3610 struct rte_eth_dev *dev;
3611
3612 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3613 dev = &rte_eth_devices[port_id];
3614
3615 if (fc_conf == NULL) {
3616 RTE_ETHDEV_LOG(ERR,
3617 "Cannot get ethdev port %u flow control config to NULL\n",
3618 port_id);
3619 return -EINVAL;
3620 }
3621
3622 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3623 memset(fc_conf, 0, sizeof(*fc_conf));
3624 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3625 }
3626
3627 int
rte_eth_dev_flow_ctrl_set(uint16_t port_id,struct rte_eth_fc_conf * fc_conf)3628 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3629 {
3630 struct rte_eth_dev *dev;
3631
3632 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3633 dev = &rte_eth_devices[port_id];
3634
3635 if (fc_conf == NULL) {
3636 RTE_ETHDEV_LOG(ERR,
3637 "Cannot set ethdev port %u flow control from NULL config\n",
3638 port_id);
3639 return -EINVAL;
3640 }
3641
3642 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3643 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3644 return -EINVAL;
3645 }
3646
3647 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3648 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3649 }
3650
3651 int
rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,struct rte_eth_pfc_conf * pfc_conf)3652 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3653 struct rte_eth_pfc_conf *pfc_conf)
3654 {
3655 struct rte_eth_dev *dev;
3656
3657 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3658 dev = &rte_eth_devices[port_id];
3659
3660 if (pfc_conf == NULL) {
3661 RTE_ETHDEV_LOG(ERR,
3662 "Cannot set ethdev port %u priority flow control from NULL config\n",
3663 port_id);
3664 return -EINVAL;
3665 }
3666
3667 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3668 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3669 return -EINVAL;
3670 }
3671
3672 /* High water, low water validation are device specific */
3673 if (*dev->dev_ops->priority_flow_ctrl_set)
3674 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3675 (dev, pfc_conf));
3676 return -ENOTSUP;
3677 }
3678
3679 static int
validate_rx_pause_config(struct rte_eth_dev_info * dev_info,uint8_t tc_max,struct rte_eth_pfc_queue_conf * pfc_queue_conf)3680 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3681 struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3682 {
3683 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
3684 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3685 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
3686 RTE_ETHDEV_LOG(ERR,
3687 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n",
3688 pfc_queue_conf->rx_pause.tx_qid,
3689 dev_info->nb_tx_queues);
3690 return -EINVAL;
3691 }
3692
3693 if (pfc_queue_conf->rx_pause.tc >= tc_max) {
3694 RTE_ETHDEV_LOG(ERR,
3695 "PFC TC not in range for Rx pause requested:%d max:%d\n",
3696 pfc_queue_conf->rx_pause.tc, tc_max);
3697 return -EINVAL;
3698 }
3699 }
3700
3701 return 0;
3702 }
3703
3704 static int
validate_tx_pause_config(struct rte_eth_dev_info * dev_info,uint8_t tc_max,struct rte_eth_pfc_queue_conf * pfc_queue_conf)3705 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3706 struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3707 {
3708 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
3709 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3710 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
3711 RTE_ETHDEV_LOG(ERR,
3712 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n",
3713 pfc_queue_conf->tx_pause.rx_qid,
3714 dev_info->nb_rx_queues);
3715 return -EINVAL;
3716 }
3717
3718 if (pfc_queue_conf->tx_pause.tc >= tc_max) {
3719 RTE_ETHDEV_LOG(ERR,
3720 "PFC TC not in range for Tx pause requested:%d max:%d\n",
3721 pfc_queue_conf->tx_pause.tc, tc_max);
3722 return -EINVAL;
3723 }
3724 }
3725
3726 return 0;
3727 }
3728
3729 int
rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,struct rte_eth_pfc_queue_info * pfc_queue_info)3730 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
3731 struct rte_eth_pfc_queue_info *pfc_queue_info)
3732 {
3733 struct rte_eth_dev *dev;
3734
3735 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3736 dev = &rte_eth_devices[port_id];
3737
3738 if (pfc_queue_info == NULL) {
3739 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n",
3740 port_id);
3741 return -EINVAL;
3742 }
3743
3744 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3745 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3746 (dev, pfc_queue_info));
3747 return -ENOTSUP;
3748 }
3749
3750 int
rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,struct rte_eth_pfc_queue_conf * pfc_queue_conf)3751 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
3752 struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3753 {
3754 struct rte_eth_pfc_queue_info pfc_info;
3755 struct rte_eth_dev_info dev_info;
3756 struct rte_eth_dev *dev;
3757 int ret;
3758
3759 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3760 dev = &rte_eth_devices[port_id];
3761
3762 if (pfc_queue_conf == NULL) {
3763 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n",
3764 port_id);
3765 return -EINVAL;
3766 }
3767
3768 ret = rte_eth_dev_info_get(port_id, &dev_info);
3769 if (ret != 0)
3770 return ret;
3771
3772 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
3773 if (ret != 0)
3774 return ret;
3775
3776 if (pfc_info.tc_max == 0) {
3777 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n",
3778 port_id);
3779 return -ENOTSUP;
3780 }
3781
3782 /* Check requested mode supported or not */
3783 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
3784 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
3785 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n",
3786 port_id);
3787 return -EINVAL;
3788 }
3789
3790 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
3791 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
3792 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n",
3793 port_id);
3794 return -EINVAL;
3795 }
3796
3797 /* Validate Rx pause parameters */
3798 if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
3799 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
3800 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
3801 pfc_queue_conf);
3802 if (ret != 0)
3803 return ret;
3804 }
3805
3806 /* Validate Tx pause parameters */
3807 if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
3808 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
3809 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
3810 pfc_queue_conf);
3811 if (ret != 0)
3812 return ret;
3813 }
3814
3815 if (*dev->dev_ops->priority_flow_ctrl_queue_config)
3816 return eth_err(port_id,
3817 (*dev->dev_ops->priority_flow_ctrl_queue_config)(
3818 dev, pfc_queue_conf));
3819 return -ENOTSUP;
3820 }
3821
3822 static int
eth_check_reta_mask(struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)3823 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3824 uint16_t reta_size)
3825 {
3826 uint16_t i, num;
3827
3828 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
3829 for (i = 0; i < num; i++) {
3830 if (reta_conf[i].mask)
3831 return 0;
3832 }
3833
3834 return -EINVAL;
3835 }
3836
3837 static int
eth_check_reta_entry(struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size,uint16_t max_rxq)3838 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3839 uint16_t reta_size,
3840 uint16_t max_rxq)
3841 {
3842 uint16_t i, idx, shift;
3843
3844 if (max_rxq == 0) {
3845 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3846 return -EINVAL;
3847 }
3848
3849 for (i = 0; i < reta_size; i++) {
3850 idx = i / RTE_ETH_RETA_GROUP_SIZE;
3851 shift = i % RTE_ETH_RETA_GROUP_SIZE;
3852 if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
3853 (reta_conf[idx].reta[shift] >= max_rxq)) {
3854 RTE_ETHDEV_LOG(ERR,
3855 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3856 idx, shift,
3857 reta_conf[idx].reta[shift], max_rxq);
3858 return -EINVAL;
3859 }
3860 }
3861
3862 return 0;
3863 }
3864
3865 int
rte_eth_dev_rss_reta_update(uint16_t port_id,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)3866 rte_eth_dev_rss_reta_update(uint16_t port_id,
3867 struct rte_eth_rss_reta_entry64 *reta_conf,
3868 uint16_t reta_size)
3869 {
3870 enum rte_eth_rx_mq_mode mq_mode;
3871 struct rte_eth_dev *dev;
3872 int ret;
3873
3874 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3875 dev = &rte_eth_devices[port_id];
3876
3877 if (reta_conf == NULL) {
3878 RTE_ETHDEV_LOG(ERR,
3879 "Cannot update ethdev port %u RSS RETA to NULL\n",
3880 port_id);
3881 return -EINVAL;
3882 }
3883
3884 if (reta_size == 0) {
3885 RTE_ETHDEV_LOG(ERR,
3886 "Cannot update ethdev port %u RSS RETA with zero size\n",
3887 port_id);
3888 return -EINVAL;
3889 }
3890
3891 /* Check mask bits */
3892 ret = eth_check_reta_mask(reta_conf, reta_size);
3893 if (ret < 0)
3894 return ret;
3895
3896 /* Check entry value */
3897 ret = eth_check_reta_entry(reta_conf, reta_size,
3898 dev->data->nb_rx_queues);
3899 if (ret < 0)
3900 return ret;
3901
3902 mq_mode = dev->data->dev_conf.rxmode.mq_mode;
3903 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
3904 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
3905 return -ENOTSUP;
3906 }
3907
3908 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3909 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3910 reta_size));
3911 }
3912
3913 int
rte_eth_dev_rss_reta_query(uint16_t port_id,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)3914 rte_eth_dev_rss_reta_query(uint16_t port_id,
3915 struct rte_eth_rss_reta_entry64 *reta_conf,
3916 uint16_t reta_size)
3917 {
3918 struct rte_eth_dev *dev;
3919 int ret;
3920
3921 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3922 dev = &rte_eth_devices[port_id];
3923
3924 if (reta_conf == NULL) {
3925 RTE_ETHDEV_LOG(ERR,
3926 "Cannot query ethdev port %u RSS RETA from NULL config\n",
3927 port_id);
3928 return -EINVAL;
3929 }
3930
3931 /* Check mask bits */
3932 ret = eth_check_reta_mask(reta_conf, reta_size);
3933 if (ret < 0)
3934 return ret;
3935
3936 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3937 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3938 reta_size));
3939 }
3940
3941 int
rte_eth_dev_rss_hash_update(uint16_t port_id,struct rte_eth_rss_conf * rss_conf)3942 rte_eth_dev_rss_hash_update(uint16_t port_id,
3943 struct rte_eth_rss_conf *rss_conf)
3944 {
3945 struct rte_eth_dev *dev;
3946 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3947 enum rte_eth_rx_mq_mode mq_mode;
3948 int ret;
3949
3950 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3951 dev = &rte_eth_devices[port_id];
3952
3953 if (rss_conf == NULL) {
3954 RTE_ETHDEV_LOG(ERR,
3955 "Cannot update ethdev port %u RSS hash from NULL config\n",
3956 port_id);
3957 return -EINVAL;
3958 }
3959
3960 ret = rte_eth_dev_info_get(port_id, &dev_info);
3961 if (ret != 0)
3962 return ret;
3963
3964 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3965 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3966 dev_info.flow_type_rss_offloads) {
3967 RTE_ETHDEV_LOG(ERR,
3968 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3969 port_id, rss_conf->rss_hf,
3970 dev_info.flow_type_rss_offloads);
3971 return -EINVAL;
3972 }
3973
3974 mq_mode = dev->data->dev_conf.rxmode.mq_mode;
3975 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
3976 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
3977 return -ENOTSUP;
3978 }
3979
3980 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3981 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3982 rss_conf));
3983 }
3984
3985 int
rte_eth_dev_rss_hash_conf_get(uint16_t port_id,struct rte_eth_rss_conf * rss_conf)3986 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3987 struct rte_eth_rss_conf *rss_conf)
3988 {
3989 struct rte_eth_dev *dev;
3990
3991 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3992 dev = &rte_eth_devices[port_id];
3993
3994 if (rss_conf == NULL) {
3995 RTE_ETHDEV_LOG(ERR,
3996 "Cannot get ethdev port %u RSS hash config to NULL\n",
3997 port_id);
3998 return -EINVAL;
3999 }
4000
4001 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
4002 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4003 rss_conf));
4004 }
4005
4006 int
rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,struct rte_eth_udp_tunnel * udp_tunnel)4007 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4008 struct rte_eth_udp_tunnel *udp_tunnel)
4009 {
4010 struct rte_eth_dev *dev;
4011
4012 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4013 dev = &rte_eth_devices[port_id];
4014
4015 if (udp_tunnel == NULL) {
4016 RTE_ETHDEV_LOG(ERR,
4017 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4018 port_id);
4019 return -EINVAL;
4020 }
4021
4022 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4023 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4024 return -EINVAL;
4025 }
4026
4027 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
4028 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4029 udp_tunnel));
4030 }
4031
4032 int
rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,struct rte_eth_udp_tunnel * udp_tunnel)4033 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4034 struct rte_eth_udp_tunnel *udp_tunnel)
4035 {
4036 struct rte_eth_dev *dev;
4037
4038 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4039 dev = &rte_eth_devices[port_id];
4040
4041 if (udp_tunnel == NULL) {
4042 RTE_ETHDEV_LOG(ERR,
4043 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4044 port_id);
4045 return -EINVAL;
4046 }
4047
4048 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4049 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4050 return -EINVAL;
4051 }
4052
4053 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
4054 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4055 udp_tunnel));
4056 }
4057
4058 int
rte_eth_led_on(uint16_t port_id)4059 rte_eth_led_on(uint16_t port_id)
4060 {
4061 struct rte_eth_dev *dev;
4062
4063 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4064 dev = &rte_eth_devices[port_id];
4065
4066 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
4067 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4068 }
4069
4070 int
rte_eth_led_off(uint16_t port_id)4071 rte_eth_led_off(uint16_t port_id)
4072 {
4073 struct rte_eth_dev *dev;
4074
4075 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4076 dev = &rte_eth_devices[port_id];
4077
4078 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
4079 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4080 }
4081
4082 int
rte_eth_fec_get_capability(uint16_t port_id,struct rte_eth_fec_capa * speed_fec_capa,unsigned int num)4083 rte_eth_fec_get_capability(uint16_t port_id,
4084 struct rte_eth_fec_capa *speed_fec_capa,
4085 unsigned int num)
4086 {
4087 struct rte_eth_dev *dev;
4088 int ret;
4089
4090 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4091 dev = &rte_eth_devices[port_id];
4092
4093 if (speed_fec_capa == NULL && num > 0) {
4094 RTE_ETHDEV_LOG(ERR,
4095 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4096 port_id);
4097 return -EINVAL;
4098 }
4099
4100 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
4101 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4102
4103 return ret;
4104 }
4105
4106 int
rte_eth_fec_get(uint16_t port_id,uint32_t * fec_capa)4107 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4108 {
4109 struct rte_eth_dev *dev;
4110
4111 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4112 dev = &rte_eth_devices[port_id];
4113
4114 if (fec_capa == NULL) {
4115 RTE_ETHDEV_LOG(ERR,
4116 "Cannot get ethdev port %u current FEC mode to NULL\n",
4117 port_id);
4118 return -EINVAL;
4119 }
4120
4121 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
4122 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4123 }
4124
4125 int
rte_eth_fec_set(uint16_t port_id,uint32_t fec_capa)4126 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4127 {
4128 struct rte_eth_dev *dev;
4129
4130 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4131 dev = &rte_eth_devices[port_id];
4132
4133 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
4134 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4135 }
4136
4137 /*
4138 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4139 * an empty spot.
4140 */
4141 static int
eth_dev_get_mac_addr_index(uint16_t port_id,const struct rte_ether_addr * addr)4142 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4143 {
4144 struct rte_eth_dev_info dev_info;
4145 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4146 unsigned i;
4147 int ret;
4148
4149 ret = rte_eth_dev_info_get(port_id, &dev_info);
4150 if (ret != 0)
4151 return -1;
4152
4153 for (i = 0; i < dev_info.max_mac_addrs; i++)
4154 if (memcmp(addr, &dev->data->mac_addrs[i],
4155 RTE_ETHER_ADDR_LEN) == 0)
4156 return i;
4157
4158 return -1;
4159 }
4160
4161 static const struct rte_ether_addr null_mac_addr;
4162
4163 int
rte_eth_dev_mac_addr_add(uint16_t port_id,struct rte_ether_addr * addr,uint32_t pool)4164 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4165 uint32_t pool)
4166 {
4167 struct rte_eth_dev *dev;
4168 int index;
4169 uint64_t pool_mask;
4170 int ret;
4171
4172 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4173 dev = &rte_eth_devices[port_id];
4174
4175 if (addr == NULL) {
4176 RTE_ETHDEV_LOG(ERR,
4177 "Cannot add ethdev port %u MAC address from NULL address\n",
4178 port_id);
4179 return -EINVAL;
4180 }
4181
4182 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4183
4184 if (rte_is_zero_ether_addr(addr)) {
4185 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4186 port_id);
4187 return -EINVAL;
4188 }
4189 if (pool >= RTE_ETH_64_POOLS) {
4190 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
4191 return -EINVAL;
4192 }
4193
4194 index = eth_dev_get_mac_addr_index(port_id, addr);
4195 if (index < 0) {
4196 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4197 if (index < 0) {
4198 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4199 port_id);
4200 return -ENOSPC;
4201 }
4202 } else {
4203 pool_mask = dev->data->mac_pool_sel[index];
4204
4205 /* Check if both MAC address and pool is already there, and do nothing */
4206 if (pool_mask & RTE_BIT64(pool))
4207 return 0;
4208 }
4209
4210 /* Update NIC */
4211 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4212
4213 if (ret == 0) {
4214 /* Update address in NIC data structure */
4215 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4216
4217 /* Update pool bitmap in NIC data structure */
4218 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
4219 }
4220
4221 return eth_err(port_id, ret);
4222 }
4223
4224 int
rte_eth_dev_mac_addr_remove(uint16_t port_id,struct rte_ether_addr * addr)4225 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4226 {
4227 struct rte_eth_dev *dev;
4228 int index;
4229
4230 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4231 dev = &rte_eth_devices[port_id];
4232
4233 if (addr == NULL) {
4234 RTE_ETHDEV_LOG(ERR,
4235 "Cannot remove ethdev port %u MAC address from NULL address\n",
4236 port_id);
4237 return -EINVAL;
4238 }
4239
4240 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4241
4242 index = eth_dev_get_mac_addr_index(port_id, addr);
4243 if (index == 0) {
4244 RTE_ETHDEV_LOG(ERR,
4245 "Port %u: Cannot remove default MAC address\n",
4246 port_id);
4247 return -EADDRINUSE;
4248 } else if (index < 0)
4249 return 0; /* Do nothing if address wasn't found */
4250
4251 /* Update NIC */
4252 (*dev->dev_ops->mac_addr_remove)(dev, index);
4253
4254 /* Update address in NIC data structure */
4255 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4256
4257 /* reset pool bitmap */
4258 dev->data->mac_pool_sel[index] = 0;
4259
4260 return 0;
4261 }
4262
4263 int
rte_eth_dev_default_mac_addr_set(uint16_t port_id,struct rte_ether_addr * addr)4264 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4265 {
4266 struct rte_eth_dev *dev;
4267 int ret;
4268
4269 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4270 dev = &rte_eth_devices[port_id];
4271
4272 if (addr == NULL) {
4273 RTE_ETHDEV_LOG(ERR,
4274 "Cannot set ethdev port %u default MAC address from NULL address\n",
4275 port_id);
4276 return -EINVAL;
4277 }
4278
4279 if (!rte_is_valid_assigned_ether_addr(addr))
4280 return -EINVAL;
4281
4282 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4283
4284 ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4285 if (ret < 0)
4286 return ret;
4287
4288 /* Update default address in NIC data structure */
4289 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4290
4291 return 0;
4292 }
4293
4294
4295 /*
4296 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4297 * an empty spot.
4298 */
4299 static int
eth_dev_get_hash_mac_addr_index(uint16_t port_id,const struct rte_ether_addr * addr)4300 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4301 const struct rte_ether_addr *addr)
4302 {
4303 struct rte_eth_dev_info dev_info;
4304 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4305 unsigned i;
4306 int ret;
4307
4308 ret = rte_eth_dev_info_get(port_id, &dev_info);
4309 if (ret != 0)
4310 return -1;
4311
4312 if (!dev->data->hash_mac_addrs)
4313 return -1;
4314
4315 for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4316 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4317 RTE_ETHER_ADDR_LEN) == 0)
4318 return i;
4319
4320 return -1;
4321 }
4322
4323 int
rte_eth_dev_uc_hash_table_set(uint16_t port_id,struct rte_ether_addr * addr,uint8_t on)4324 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4325 uint8_t on)
4326 {
4327 int index;
4328 int ret;
4329 struct rte_eth_dev *dev;
4330
4331 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4332 dev = &rte_eth_devices[port_id];
4333
4334 if (addr == NULL) {
4335 RTE_ETHDEV_LOG(ERR,
4336 "Cannot set ethdev port %u unicast hash table from NULL address\n",
4337 port_id);
4338 return -EINVAL;
4339 }
4340
4341 if (rte_is_zero_ether_addr(addr)) {
4342 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4343 port_id);
4344 return -EINVAL;
4345 }
4346
4347 index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4348 /* Check if it's already there, and do nothing */
4349 if ((index >= 0) && on)
4350 return 0;
4351
4352 if (index < 0) {
4353 if (!on) {
4354 RTE_ETHDEV_LOG(ERR,
4355 "Port %u: the MAC address was not set in UTA\n",
4356 port_id);
4357 return -EINVAL;
4358 }
4359
4360 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4361 if (index < 0) {
4362 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4363 port_id);
4364 return -ENOSPC;
4365 }
4366 }
4367
4368 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4369 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4370 if (ret == 0) {
4371 /* Update address in NIC data structure */
4372 if (on)
4373 rte_ether_addr_copy(addr,
4374 &dev->data->hash_mac_addrs[index]);
4375 else
4376 rte_ether_addr_copy(&null_mac_addr,
4377 &dev->data->hash_mac_addrs[index]);
4378 }
4379
4380 return eth_err(port_id, ret);
4381 }
4382
4383 int
rte_eth_dev_uc_all_hash_table_set(uint16_t port_id,uint8_t on)4384 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4385 {
4386 struct rte_eth_dev *dev;
4387
4388 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4389 dev = &rte_eth_devices[port_id];
4390
4391 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4392 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4393 on));
4394 }
4395
rte_eth_set_queue_rate_limit(uint16_t port_id,uint16_t queue_idx,uint16_t tx_rate)4396 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4397 uint16_t tx_rate)
4398 {
4399 struct rte_eth_dev *dev;
4400 struct rte_eth_dev_info dev_info;
4401 struct rte_eth_link link;
4402 int ret;
4403
4404 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4405 dev = &rte_eth_devices[port_id];
4406
4407 ret = rte_eth_dev_info_get(port_id, &dev_info);
4408 if (ret != 0)
4409 return ret;
4410
4411 link = dev->data->dev_link;
4412
4413 if (queue_idx > dev_info.max_tx_queues) {
4414 RTE_ETHDEV_LOG(ERR,
4415 "Set queue rate limit:port %u: invalid queue ID=%u\n",
4416 port_id, queue_idx);
4417 return -EINVAL;
4418 }
4419
4420 if (tx_rate > link.link_speed) {
4421 RTE_ETHDEV_LOG(ERR,
4422 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4423 tx_rate, link.link_speed);
4424 return -EINVAL;
4425 }
4426
4427 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4428 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4429 queue_idx, tx_rate));
4430 }
4431
RTE_INIT(eth_dev_init_fp_ops)4432 RTE_INIT(eth_dev_init_fp_ops)
4433 {
4434 uint32_t i;
4435
4436 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
4437 eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
4438 }
4439
RTE_INIT(eth_dev_init_cb_lists)4440 RTE_INIT(eth_dev_init_cb_lists)
4441 {
4442 uint16_t i;
4443
4444 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4445 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4446 }
4447
4448 int
rte_eth_dev_callback_register(uint16_t port_id,enum rte_eth_event_type event,rte_eth_dev_cb_fn cb_fn,void * cb_arg)4449 rte_eth_dev_callback_register(uint16_t port_id,
4450 enum rte_eth_event_type event,
4451 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4452 {
4453 struct rte_eth_dev *dev;
4454 struct rte_eth_dev_callback *user_cb;
4455 uint16_t next_port;
4456 uint16_t last_port;
4457
4458 if (cb_fn == NULL) {
4459 RTE_ETHDEV_LOG(ERR,
4460 "Cannot register ethdev port %u callback from NULL\n",
4461 port_id);
4462 return -EINVAL;
4463 }
4464
4465 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4466 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4467 return -EINVAL;
4468 }
4469
4470 if (port_id == RTE_ETH_ALL) {
4471 next_port = 0;
4472 last_port = RTE_MAX_ETHPORTS - 1;
4473 } else {
4474 next_port = last_port = port_id;
4475 }
4476
4477 rte_spinlock_lock(ð_dev_cb_lock);
4478
4479 do {
4480 dev = &rte_eth_devices[next_port];
4481
4482 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4483 if (user_cb->cb_fn == cb_fn &&
4484 user_cb->cb_arg == cb_arg &&
4485 user_cb->event == event) {
4486 break;
4487 }
4488 }
4489
4490 /* create a new callback. */
4491 if (user_cb == NULL) {
4492 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4493 sizeof(struct rte_eth_dev_callback), 0);
4494 if (user_cb != NULL) {
4495 user_cb->cb_fn = cb_fn;
4496 user_cb->cb_arg = cb_arg;
4497 user_cb->event = event;
4498 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4499 user_cb, next);
4500 } else {
4501 rte_spinlock_unlock(ð_dev_cb_lock);
4502 rte_eth_dev_callback_unregister(port_id, event,
4503 cb_fn, cb_arg);
4504 return -ENOMEM;
4505 }
4506
4507 }
4508 } while (++next_port <= last_port);
4509
4510 rte_spinlock_unlock(ð_dev_cb_lock);
4511 return 0;
4512 }
4513
4514 int
rte_eth_dev_callback_unregister(uint16_t port_id,enum rte_eth_event_type event,rte_eth_dev_cb_fn cb_fn,void * cb_arg)4515 rte_eth_dev_callback_unregister(uint16_t port_id,
4516 enum rte_eth_event_type event,
4517 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4518 {
4519 int ret;
4520 struct rte_eth_dev *dev;
4521 struct rte_eth_dev_callback *cb, *next;
4522 uint16_t next_port;
4523 uint16_t last_port;
4524
4525 if (cb_fn == NULL) {
4526 RTE_ETHDEV_LOG(ERR,
4527 "Cannot unregister ethdev port %u callback from NULL\n",
4528 port_id);
4529 return -EINVAL;
4530 }
4531
4532 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4533 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4534 return -EINVAL;
4535 }
4536
4537 if (port_id == RTE_ETH_ALL) {
4538 next_port = 0;
4539 last_port = RTE_MAX_ETHPORTS - 1;
4540 } else {
4541 next_port = last_port = port_id;
4542 }
4543
4544 rte_spinlock_lock(ð_dev_cb_lock);
4545
4546 do {
4547 dev = &rte_eth_devices[next_port];
4548 ret = 0;
4549 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4550 cb = next) {
4551
4552 next = TAILQ_NEXT(cb, next);
4553
4554 if (cb->cb_fn != cb_fn || cb->event != event ||
4555 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4556 continue;
4557
4558 /*
4559 * if this callback is not executing right now,
4560 * then remove it.
4561 */
4562 if (cb->active == 0) {
4563 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4564 rte_free(cb);
4565 } else {
4566 ret = -EAGAIN;
4567 }
4568 }
4569 } while (++next_port <= last_port);
4570
4571 rte_spinlock_unlock(ð_dev_cb_lock);
4572 return ret;
4573 }
4574
4575 int
rte_eth_dev_rx_intr_ctl(uint16_t port_id,int epfd,int op,void * data)4576 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4577 {
4578 uint32_t vec;
4579 struct rte_eth_dev *dev;
4580 struct rte_intr_handle *intr_handle;
4581 uint16_t qid;
4582 int rc;
4583
4584 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4585 dev = &rte_eth_devices[port_id];
4586
4587 if (!dev->intr_handle) {
4588 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4589 return -ENOTSUP;
4590 }
4591
4592 intr_handle = dev->intr_handle;
4593 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4594 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4595 return -EPERM;
4596 }
4597
4598 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4599 vec = rte_intr_vec_list_index_get(intr_handle, qid);
4600 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4601 if (rc && rc != -EEXIST) {
4602 RTE_ETHDEV_LOG(ERR,
4603 "p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4604 port_id, qid, op, epfd, vec);
4605 }
4606 }
4607
4608 return 0;
4609 }
4610
4611 int
rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id,uint16_t queue_id)4612 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4613 {
4614 struct rte_intr_handle *intr_handle;
4615 struct rte_eth_dev *dev;
4616 unsigned int efd_idx;
4617 uint32_t vec;
4618 int fd;
4619
4620 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4621 dev = &rte_eth_devices[port_id];
4622
4623 if (queue_id >= dev->data->nb_rx_queues) {
4624 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4625 return -1;
4626 }
4627
4628 if (!dev->intr_handle) {
4629 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4630 return -1;
4631 }
4632
4633 intr_handle = dev->intr_handle;
4634 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4635 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4636 return -1;
4637 }
4638
4639 vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4640 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4641 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4642 fd = rte_intr_efds_index_get(intr_handle, efd_idx);
4643
4644 return fd;
4645 }
4646
4647 int
rte_eth_dev_rx_intr_ctl_q(uint16_t port_id,uint16_t queue_id,int epfd,int op,void * data)4648 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4649 int epfd, int op, void *data)
4650 {
4651 uint32_t vec;
4652 struct rte_eth_dev *dev;
4653 struct rte_intr_handle *intr_handle;
4654 int rc;
4655
4656 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4657 dev = &rte_eth_devices[port_id];
4658
4659 if (queue_id >= dev->data->nb_rx_queues) {
4660 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4661 return -EINVAL;
4662 }
4663
4664 if (!dev->intr_handle) {
4665 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4666 return -ENOTSUP;
4667 }
4668
4669 intr_handle = dev->intr_handle;
4670 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4671 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4672 return -EPERM;
4673 }
4674
4675 vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4676 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4677 if (rc && rc != -EEXIST) {
4678 RTE_ETHDEV_LOG(ERR,
4679 "p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4680 port_id, queue_id, op, epfd, vec);
4681 return rc;
4682 }
4683
4684 return 0;
4685 }
4686
4687 int
rte_eth_dev_rx_intr_enable(uint16_t port_id,uint16_t queue_id)4688 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4689 uint16_t queue_id)
4690 {
4691 struct rte_eth_dev *dev;
4692 int ret;
4693
4694 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4695 dev = &rte_eth_devices[port_id];
4696
4697 ret = eth_dev_validate_rx_queue(dev, queue_id);
4698 if (ret != 0)
4699 return ret;
4700
4701 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4702 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
4703 }
4704
4705 int
rte_eth_dev_rx_intr_disable(uint16_t port_id,uint16_t queue_id)4706 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4707 uint16_t queue_id)
4708 {
4709 struct rte_eth_dev *dev;
4710 int ret;
4711
4712 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4713 dev = &rte_eth_devices[port_id];
4714
4715 ret = eth_dev_validate_rx_queue(dev, queue_id);
4716 if (ret != 0)
4717 return ret;
4718
4719 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4720 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
4721 }
4722
4723
4724 const struct rte_eth_rxtx_callback *
rte_eth_add_rx_callback(uint16_t port_id,uint16_t queue_id,rte_rx_callback_fn fn,void * user_param)4725 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4726 rte_rx_callback_fn fn, void *user_param)
4727 {
4728 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4729 rte_errno = ENOTSUP;
4730 return NULL;
4731 #endif
4732 struct rte_eth_dev *dev;
4733
4734 /* check input parameters */
4735 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4736 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4737 rte_errno = EINVAL;
4738 return NULL;
4739 }
4740 dev = &rte_eth_devices[port_id];
4741 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4742 rte_errno = EINVAL;
4743 return NULL;
4744 }
4745 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4746
4747 if (cb == NULL) {
4748 rte_errno = ENOMEM;
4749 return NULL;
4750 }
4751
4752 cb->fn.rx = fn;
4753 cb->param = user_param;
4754
4755 rte_spinlock_lock(ð_dev_rx_cb_lock);
4756 /* Add the callbacks in fifo order. */
4757 struct rte_eth_rxtx_callback *tail =
4758 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4759
4760 if (!tail) {
4761 /* Stores to cb->fn and cb->param should complete before
4762 * cb is visible to data plane.
4763 */
4764 __atomic_store_n(
4765 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4766 cb, __ATOMIC_RELEASE);
4767
4768 } else {
4769 while (tail->next)
4770 tail = tail->next;
4771 /* Stores to cb->fn and cb->param should complete before
4772 * cb is visible to data plane.
4773 */
4774 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4775 }
4776 rte_spinlock_unlock(ð_dev_rx_cb_lock);
4777
4778 return cb;
4779 }
4780
4781 const struct rte_eth_rxtx_callback *
rte_eth_add_first_rx_callback(uint16_t port_id,uint16_t queue_id,rte_rx_callback_fn fn,void * user_param)4782 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4783 rte_rx_callback_fn fn, void *user_param)
4784 {
4785 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4786 rte_errno = ENOTSUP;
4787 return NULL;
4788 #endif
4789 /* check input parameters */
4790 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4791 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4792 rte_errno = EINVAL;
4793 return NULL;
4794 }
4795
4796 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4797
4798 if (cb == NULL) {
4799 rte_errno = ENOMEM;
4800 return NULL;
4801 }
4802
4803 cb->fn.rx = fn;
4804 cb->param = user_param;
4805
4806 rte_spinlock_lock(ð_dev_rx_cb_lock);
4807 /* Add the callbacks at first position */
4808 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4809 /* Stores to cb->fn, cb->param and cb->next should complete before
4810 * cb is visible to data plane threads.
4811 */
4812 __atomic_store_n(
4813 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4814 cb, __ATOMIC_RELEASE);
4815 rte_spinlock_unlock(ð_dev_rx_cb_lock);
4816
4817 return cb;
4818 }
4819
4820 const struct rte_eth_rxtx_callback *
rte_eth_add_tx_callback(uint16_t port_id,uint16_t queue_id,rte_tx_callback_fn fn,void * user_param)4821 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4822 rte_tx_callback_fn fn, void *user_param)
4823 {
4824 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4825 rte_errno = ENOTSUP;
4826 return NULL;
4827 #endif
4828 struct rte_eth_dev *dev;
4829
4830 /* check input parameters */
4831 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4832 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4833 rte_errno = EINVAL;
4834 return NULL;
4835 }
4836
4837 dev = &rte_eth_devices[port_id];
4838 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4839 rte_errno = EINVAL;
4840 return NULL;
4841 }
4842
4843 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4844
4845 if (cb == NULL) {
4846 rte_errno = ENOMEM;
4847 return NULL;
4848 }
4849
4850 cb->fn.tx = fn;
4851 cb->param = user_param;
4852
4853 rte_spinlock_lock(ð_dev_tx_cb_lock);
4854 /* Add the callbacks in fifo order. */
4855 struct rte_eth_rxtx_callback *tail =
4856 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4857
4858 if (!tail) {
4859 /* Stores to cb->fn and cb->param should complete before
4860 * cb is visible to data plane.
4861 */
4862 __atomic_store_n(
4863 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4864 cb, __ATOMIC_RELEASE);
4865
4866 } else {
4867 while (tail->next)
4868 tail = tail->next;
4869 /* Stores to cb->fn and cb->param should complete before
4870 * cb is visible to data plane.
4871 */
4872 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4873 }
4874 rte_spinlock_unlock(ð_dev_tx_cb_lock);
4875
4876 return cb;
4877 }
4878
4879 int
rte_eth_remove_rx_callback(uint16_t port_id,uint16_t queue_id,const struct rte_eth_rxtx_callback * user_cb)4880 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4881 const struct rte_eth_rxtx_callback *user_cb)
4882 {
4883 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4884 return -ENOTSUP;
4885 #endif
4886 /* Check input parameters. */
4887 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4888 if (user_cb == NULL ||
4889 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4890 return -EINVAL;
4891
4892 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4893 struct rte_eth_rxtx_callback *cb;
4894 struct rte_eth_rxtx_callback **prev_cb;
4895 int ret = -EINVAL;
4896
4897 rte_spinlock_lock(ð_dev_rx_cb_lock);
4898 prev_cb = &dev->post_rx_burst_cbs[queue_id];
4899 for (; *prev_cb != NULL; prev_cb = &cb->next) {
4900 cb = *prev_cb;
4901 if (cb == user_cb) {
4902 /* Remove the user cb from the callback list. */
4903 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4904 ret = 0;
4905 break;
4906 }
4907 }
4908 rte_spinlock_unlock(ð_dev_rx_cb_lock);
4909
4910 return ret;
4911 }
4912
4913 int
rte_eth_remove_tx_callback(uint16_t port_id,uint16_t queue_id,const struct rte_eth_rxtx_callback * user_cb)4914 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4915 const struct rte_eth_rxtx_callback *user_cb)
4916 {
4917 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4918 return -ENOTSUP;
4919 #endif
4920 /* Check input parameters. */
4921 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4922 if (user_cb == NULL ||
4923 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4924 return -EINVAL;
4925
4926 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4927 int ret = -EINVAL;
4928 struct rte_eth_rxtx_callback *cb;
4929 struct rte_eth_rxtx_callback **prev_cb;
4930
4931 rte_spinlock_lock(ð_dev_tx_cb_lock);
4932 prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4933 for (; *prev_cb != NULL; prev_cb = &cb->next) {
4934 cb = *prev_cb;
4935 if (cb == user_cb) {
4936 /* Remove the user cb from the callback list. */
4937 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4938 ret = 0;
4939 break;
4940 }
4941 }
4942 rte_spinlock_unlock(ð_dev_tx_cb_lock);
4943
4944 return ret;
4945 }
4946
4947 int
rte_eth_rx_queue_info_get(uint16_t port_id,uint16_t queue_id,struct rte_eth_rxq_info * qinfo)4948 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4949 struct rte_eth_rxq_info *qinfo)
4950 {
4951 struct rte_eth_dev *dev;
4952
4953 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4954 dev = &rte_eth_devices[port_id];
4955
4956 if (queue_id >= dev->data->nb_rx_queues) {
4957 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4958 return -EINVAL;
4959 }
4960
4961 if (qinfo == NULL) {
4962 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
4963 port_id, queue_id);
4964 return -EINVAL;
4965 }
4966
4967 if (dev->data->rx_queues == NULL ||
4968 dev->data->rx_queues[queue_id] == NULL) {
4969 RTE_ETHDEV_LOG(ERR,
4970 "Rx queue %"PRIu16" of device with port_id=%"
4971 PRIu16" has not been setup\n",
4972 queue_id, port_id);
4973 return -EINVAL;
4974 }
4975
4976 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4977 RTE_ETHDEV_LOG(INFO,
4978 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4979 queue_id, port_id);
4980 return -EINVAL;
4981 }
4982
4983 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4984
4985 memset(qinfo, 0, sizeof(*qinfo));
4986 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4987 qinfo->queue_state = dev->data->rx_queue_state[queue_id];
4988
4989 return 0;
4990 }
4991
4992 int
rte_eth_tx_queue_info_get(uint16_t port_id,uint16_t queue_id,struct rte_eth_txq_info * qinfo)4993 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4994 struct rte_eth_txq_info *qinfo)
4995 {
4996 struct rte_eth_dev *dev;
4997
4998 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4999 dev = &rte_eth_devices[port_id];
5000
5001 if (queue_id >= dev->data->nb_tx_queues) {
5002 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5003 return -EINVAL;
5004 }
5005
5006 if (qinfo == NULL) {
5007 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5008 port_id, queue_id);
5009 return -EINVAL;
5010 }
5011
5012 if (dev->data->tx_queues == NULL ||
5013 dev->data->tx_queues[queue_id] == NULL) {
5014 RTE_ETHDEV_LOG(ERR,
5015 "Tx queue %"PRIu16" of device with port_id=%"
5016 PRIu16" has not been setup\n",
5017 queue_id, port_id);
5018 return -EINVAL;
5019 }
5020
5021 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5022 RTE_ETHDEV_LOG(INFO,
5023 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5024 queue_id, port_id);
5025 return -EINVAL;
5026 }
5027
5028 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5029
5030 memset(qinfo, 0, sizeof(*qinfo));
5031 dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5032 qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5033
5034 return 0;
5035 }
5036
5037 int
rte_eth_rx_burst_mode_get(uint16_t port_id,uint16_t queue_id,struct rte_eth_burst_mode * mode)5038 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5039 struct rte_eth_burst_mode *mode)
5040 {
5041 struct rte_eth_dev *dev;
5042
5043 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5044 dev = &rte_eth_devices[port_id];
5045
5046 if (queue_id >= dev->data->nb_rx_queues) {
5047 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5048 return -EINVAL;
5049 }
5050
5051 if (mode == NULL) {
5052 RTE_ETHDEV_LOG(ERR,
5053 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5054 port_id, queue_id);
5055 return -EINVAL;
5056 }
5057
5058 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5059 memset(mode, 0, sizeof(*mode));
5060 return eth_err(port_id,
5061 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5062 }
5063
5064 int
rte_eth_tx_burst_mode_get(uint16_t port_id,uint16_t queue_id,struct rte_eth_burst_mode * mode)5065 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5066 struct rte_eth_burst_mode *mode)
5067 {
5068 struct rte_eth_dev *dev;
5069
5070 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5071 dev = &rte_eth_devices[port_id];
5072
5073 if (queue_id >= dev->data->nb_tx_queues) {
5074 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5075 return -EINVAL;
5076 }
5077
5078 if (mode == NULL) {
5079 RTE_ETHDEV_LOG(ERR,
5080 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5081 port_id, queue_id);
5082 return -EINVAL;
5083 }
5084
5085 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5086 memset(mode, 0, sizeof(*mode));
5087 return eth_err(port_id,
5088 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5089 }
5090
5091 int
rte_eth_get_monitor_addr(uint16_t port_id,uint16_t queue_id,struct rte_power_monitor_cond * pmc)5092 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5093 struct rte_power_monitor_cond *pmc)
5094 {
5095 struct rte_eth_dev *dev;
5096
5097 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5098 dev = &rte_eth_devices[port_id];
5099
5100 if (queue_id >= dev->data->nb_rx_queues) {
5101 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5102 return -EINVAL;
5103 }
5104
5105 if (pmc == NULL) {
5106 RTE_ETHDEV_LOG(ERR,
5107 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5108 port_id, queue_id);
5109 return -EINVAL;
5110 }
5111
5112 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
5113 return eth_err(port_id,
5114 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5115 }
5116
5117 int
rte_eth_dev_set_mc_addr_list(uint16_t port_id,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)5118 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5119 struct rte_ether_addr *mc_addr_set,
5120 uint32_t nb_mc_addr)
5121 {
5122 struct rte_eth_dev *dev;
5123
5124 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5125 dev = &rte_eth_devices[port_id];
5126
5127 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5128 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5129 mc_addr_set, nb_mc_addr));
5130 }
5131
5132 int
rte_eth_timesync_enable(uint16_t port_id)5133 rte_eth_timesync_enable(uint16_t port_id)
5134 {
5135 struct rte_eth_dev *dev;
5136
5137 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5138 dev = &rte_eth_devices[port_id];
5139
5140 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5141 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5142 }
5143
5144 int
rte_eth_timesync_disable(uint16_t port_id)5145 rte_eth_timesync_disable(uint16_t port_id)
5146 {
5147 struct rte_eth_dev *dev;
5148
5149 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5150 dev = &rte_eth_devices[port_id];
5151
5152 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5153 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5154 }
5155
5156 int
rte_eth_timesync_read_rx_timestamp(uint16_t port_id,struct timespec * timestamp,uint32_t flags)5157 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5158 uint32_t flags)
5159 {
5160 struct rte_eth_dev *dev;
5161
5162 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5163 dev = &rte_eth_devices[port_id];
5164
5165 if (timestamp == NULL) {
5166 RTE_ETHDEV_LOG(ERR,
5167 "Cannot read ethdev port %u Rx timestamp to NULL\n",
5168 port_id);
5169 return -EINVAL;
5170 }
5171
5172 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5173 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5174 (dev, timestamp, flags));
5175 }
5176
5177 int
rte_eth_timesync_read_tx_timestamp(uint16_t port_id,struct timespec * timestamp)5178 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5179 struct timespec *timestamp)
5180 {
5181 struct rte_eth_dev *dev;
5182
5183 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5184 dev = &rte_eth_devices[port_id];
5185
5186 if (timestamp == NULL) {
5187 RTE_ETHDEV_LOG(ERR,
5188 "Cannot read ethdev port %u Tx timestamp to NULL\n",
5189 port_id);
5190 return -EINVAL;
5191 }
5192
5193 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5194 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5195 (dev, timestamp));
5196 }
5197
5198 int
rte_eth_timesync_adjust_time(uint16_t port_id,int64_t delta)5199 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5200 {
5201 struct rte_eth_dev *dev;
5202
5203 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5204 dev = &rte_eth_devices[port_id];
5205
5206 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5207 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5208 }
5209
5210 int
rte_eth_timesync_read_time(uint16_t port_id,struct timespec * timestamp)5211 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5212 {
5213 struct rte_eth_dev *dev;
5214
5215 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5216 dev = &rte_eth_devices[port_id];
5217
5218 if (timestamp == NULL) {
5219 RTE_ETHDEV_LOG(ERR,
5220 "Cannot read ethdev port %u timesync time to NULL\n",
5221 port_id);
5222 return -EINVAL;
5223 }
5224
5225 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5226 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5227 timestamp));
5228 }
5229
5230 int
rte_eth_timesync_write_time(uint16_t port_id,const struct timespec * timestamp)5231 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5232 {
5233 struct rte_eth_dev *dev;
5234
5235 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5236 dev = &rte_eth_devices[port_id];
5237
5238 if (timestamp == NULL) {
5239 RTE_ETHDEV_LOG(ERR,
5240 "Cannot write ethdev port %u timesync from NULL time\n",
5241 port_id);
5242 return -EINVAL;
5243 }
5244
5245 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5246 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5247 timestamp));
5248 }
5249
5250 int
rte_eth_read_clock(uint16_t port_id,uint64_t * clock)5251 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5252 {
5253 struct rte_eth_dev *dev;
5254
5255 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5256 dev = &rte_eth_devices[port_id];
5257
5258 if (clock == NULL) {
5259 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5260 port_id);
5261 return -EINVAL;
5262 }
5263
5264 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5265 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5266 }
5267
5268 int
rte_eth_dev_get_reg_info(uint16_t port_id,struct rte_dev_reg_info * info)5269 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5270 {
5271 struct rte_eth_dev *dev;
5272
5273 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5274 dev = &rte_eth_devices[port_id];
5275
5276 if (info == NULL) {
5277 RTE_ETHDEV_LOG(ERR,
5278 "Cannot get ethdev port %u register info to NULL\n",
5279 port_id);
5280 return -EINVAL;
5281 }
5282
5283 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5284 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5285 }
5286
5287 int
rte_eth_dev_get_eeprom_length(uint16_t port_id)5288 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5289 {
5290 struct rte_eth_dev *dev;
5291
5292 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5293 dev = &rte_eth_devices[port_id];
5294
5295 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5296 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5297 }
5298
5299 int
rte_eth_dev_get_eeprom(uint16_t port_id,struct rte_dev_eeprom_info * info)5300 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5301 {
5302 struct rte_eth_dev *dev;
5303
5304 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5305 dev = &rte_eth_devices[port_id];
5306
5307 if (info == NULL) {
5308 RTE_ETHDEV_LOG(ERR,
5309 "Cannot get ethdev port %u EEPROM info to NULL\n",
5310 port_id);
5311 return -EINVAL;
5312 }
5313
5314 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5315 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5316 }
5317
5318 int
rte_eth_dev_set_eeprom(uint16_t port_id,struct rte_dev_eeprom_info * info)5319 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5320 {
5321 struct rte_eth_dev *dev;
5322
5323 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5324 dev = &rte_eth_devices[port_id];
5325
5326 if (info == NULL) {
5327 RTE_ETHDEV_LOG(ERR,
5328 "Cannot set ethdev port %u EEPROM from NULL info\n",
5329 port_id);
5330 return -EINVAL;
5331 }
5332
5333 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5334 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5335 }
5336
5337 int
rte_eth_dev_get_module_info(uint16_t port_id,struct rte_eth_dev_module_info * modinfo)5338 rte_eth_dev_get_module_info(uint16_t port_id,
5339 struct rte_eth_dev_module_info *modinfo)
5340 {
5341 struct rte_eth_dev *dev;
5342
5343 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5344 dev = &rte_eth_devices[port_id];
5345
5346 if (modinfo == NULL) {
5347 RTE_ETHDEV_LOG(ERR,
5348 "Cannot get ethdev port %u EEPROM module info to NULL\n",
5349 port_id);
5350 return -EINVAL;
5351 }
5352
5353 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5354 return (*dev->dev_ops->get_module_info)(dev, modinfo);
5355 }
5356
5357 int
rte_eth_dev_get_module_eeprom(uint16_t port_id,struct rte_dev_eeprom_info * info)5358 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5359 struct rte_dev_eeprom_info *info)
5360 {
5361 struct rte_eth_dev *dev;
5362
5363 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5364 dev = &rte_eth_devices[port_id];
5365
5366 if (info == NULL) {
5367 RTE_ETHDEV_LOG(ERR,
5368 "Cannot get ethdev port %u module EEPROM info to NULL\n",
5369 port_id);
5370 return -EINVAL;
5371 }
5372
5373 if (info->data == NULL) {
5374 RTE_ETHDEV_LOG(ERR,
5375 "Cannot get ethdev port %u module EEPROM data to NULL\n",
5376 port_id);
5377 return -EINVAL;
5378 }
5379
5380 if (info->length == 0) {
5381 RTE_ETHDEV_LOG(ERR,
5382 "Cannot get ethdev port %u module EEPROM to data with zero size\n",
5383 port_id);
5384 return -EINVAL;
5385 }
5386
5387 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5388 return (*dev->dev_ops->get_module_eeprom)(dev, info);
5389 }
5390
5391 int
rte_eth_dev_get_dcb_info(uint16_t port_id,struct rte_eth_dcb_info * dcb_info)5392 rte_eth_dev_get_dcb_info(uint16_t port_id,
5393 struct rte_eth_dcb_info *dcb_info)
5394 {
5395 struct rte_eth_dev *dev;
5396
5397 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5398 dev = &rte_eth_devices[port_id];
5399
5400 if (dcb_info == NULL) {
5401 RTE_ETHDEV_LOG(ERR,
5402 "Cannot get ethdev port %u DCB info to NULL\n",
5403 port_id);
5404 return -EINVAL;
5405 }
5406
5407 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5408
5409 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5410 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5411 }
5412
5413 static void
eth_dev_adjust_nb_desc(uint16_t * nb_desc,const struct rte_eth_desc_lim * desc_lim)5414 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5415 const struct rte_eth_desc_lim *desc_lim)
5416 {
5417 if (desc_lim->nb_align != 0)
5418 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5419
5420 if (desc_lim->nb_max != 0)
5421 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5422
5423 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5424 }
5425
5426 int
rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,uint16_t * nb_rx_desc,uint16_t * nb_tx_desc)5427 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5428 uint16_t *nb_rx_desc,
5429 uint16_t *nb_tx_desc)
5430 {
5431 struct rte_eth_dev_info dev_info;
5432 int ret;
5433
5434 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5435
5436 ret = rte_eth_dev_info_get(port_id, &dev_info);
5437 if (ret != 0)
5438 return ret;
5439
5440 if (nb_rx_desc != NULL)
5441 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5442
5443 if (nb_tx_desc != NULL)
5444 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5445
5446 return 0;
5447 }
5448
5449 int
rte_eth_dev_hairpin_capability_get(uint16_t port_id,struct rte_eth_hairpin_cap * cap)5450 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5451 struct rte_eth_hairpin_cap *cap)
5452 {
5453 struct rte_eth_dev *dev;
5454
5455 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5456 dev = &rte_eth_devices[port_id];
5457
5458 if (cap == NULL) {
5459 RTE_ETHDEV_LOG(ERR,
5460 "Cannot get ethdev port %u hairpin capability to NULL\n",
5461 port_id);
5462 return -EINVAL;
5463 }
5464
5465 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5466 memset(cap, 0, sizeof(*cap));
5467 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5468 }
5469
5470 int
rte_eth_dev_pool_ops_supported(uint16_t port_id,const char * pool)5471 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5472 {
5473 struct rte_eth_dev *dev;
5474
5475 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5476 dev = &rte_eth_devices[port_id];
5477
5478 if (pool == NULL) {
5479 RTE_ETHDEV_LOG(ERR,
5480 "Cannot test ethdev port %u mempool operation from NULL pool\n",
5481 port_id);
5482 return -EINVAL;
5483 }
5484
5485 if (*dev->dev_ops->pool_ops_supported == NULL)
5486 return 1; /* all pools are supported */
5487
5488 return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5489 }
5490
5491 static int
eth_dev_handle_port_list(const char * cmd __rte_unused,const char * params __rte_unused,struct rte_tel_data * d)5492 eth_dev_handle_port_list(const char *cmd __rte_unused,
5493 const char *params __rte_unused,
5494 struct rte_tel_data *d)
5495 {
5496 int port_id;
5497
5498 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5499 RTE_ETH_FOREACH_DEV(port_id)
5500 rte_tel_data_add_array_int(d, port_id);
5501 return 0;
5502 }
5503
5504 static void
eth_dev_add_port_queue_stats(struct rte_tel_data * d,uint64_t * q_stats,const char * stat_name)5505 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5506 const char *stat_name)
5507 {
5508 int q;
5509 struct rte_tel_data *q_data = rte_tel_data_alloc();
5510 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5511 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5512 rte_tel_data_add_array_u64(q_data, q_stats[q]);
5513 rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5514 }
5515
5516 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5517
5518 static int
eth_dev_handle_port_stats(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)5519 eth_dev_handle_port_stats(const char *cmd __rte_unused,
5520 const char *params,
5521 struct rte_tel_data *d)
5522 {
5523 struct rte_eth_stats stats;
5524 int port_id, ret;
5525
5526 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5527 return -1;
5528
5529 port_id = atoi(params);
5530 if (!rte_eth_dev_is_valid_port(port_id))
5531 return -1;
5532
5533 ret = rte_eth_stats_get(port_id, &stats);
5534 if (ret < 0)
5535 return -1;
5536
5537 rte_tel_data_start_dict(d);
5538 ADD_DICT_STAT(stats, ipackets);
5539 ADD_DICT_STAT(stats, opackets);
5540 ADD_DICT_STAT(stats, ibytes);
5541 ADD_DICT_STAT(stats, obytes);
5542 ADD_DICT_STAT(stats, imissed);
5543 ADD_DICT_STAT(stats, ierrors);
5544 ADD_DICT_STAT(stats, oerrors);
5545 ADD_DICT_STAT(stats, rx_nombuf);
5546 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5547 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5548 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5549 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5550 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
5551
5552 return 0;
5553 }
5554
5555 static int
eth_dev_handle_port_xstats(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)5556 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
5557 const char *params,
5558 struct rte_tel_data *d)
5559 {
5560 struct rte_eth_xstat *eth_xstats;
5561 struct rte_eth_xstat_name *xstat_names;
5562 int port_id, num_xstats;
5563 int i, ret;
5564 char *end_param;
5565
5566 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5567 return -1;
5568
5569 port_id = strtoul(params, &end_param, 0);
5570 if (*end_param != '\0')
5571 RTE_ETHDEV_LOG(NOTICE,
5572 "Extra parameters passed to ethdev telemetry command, ignoring");
5573 if (!rte_eth_dev_is_valid_port(port_id))
5574 return -1;
5575
5576 num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5577 if (num_xstats < 0)
5578 return -1;
5579
5580 /* use one malloc for both names and stats */
5581 eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5582 sizeof(struct rte_eth_xstat_name)) * num_xstats);
5583 if (eth_xstats == NULL)
5584 return -1;
5585 xstat_names = (void *)ð_xstats[num_xstats];
5586
5587 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5588 if (ret < 0 || ret > num_xstats) {
5589 free(eth_xstats);
5590 return -1;
5591 }
5592
5593 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5594 if (ret < 0 || ret > num_xstats) {
5595 free(eth_xstats);
5596 return -1;
5597 }
5598
5599 rte_tel_data_start_dict(d);
5600 for (i = 0; i < num_xstats; i++)
5601 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5602 eth_xstats[i].value);
5603 return 0;
5604 }
5605
5606 static int
eth_dev_handle_port_link_status(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)5607 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
5608 const char *params,
5609 struct rte_tel_data *d)
5610 {
5611 static const char *status_str = "status";
5612 int ret, port_id;
5613 struct rte_eth_link link;
5614 char *end_param;
5615
5616 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5617 return -1;
5618
5619 port_id = strtoul(params, &end_param, 0);
5620 if (*end_param != '\0')
5621 RTE_ETHDEV_LOG(NOTICE,
5622 "Extra parameters passed to ethdev telemetry command, ignoring");
5623 if (!rte_eth_dev_is_valid_port(port_id))
5624 return -1;
5625
5626 ret = rte_eth_link_get_nowait(port_id, &link);
5627 if (ret < 0)
5628 return -1;
5629
5630 rte_tel_data_start_dict(d);
5631 if (!link.link_status) {
5632 rte_tel_data_add_dict_string(d, status_str, "DOWN");
5633 return 0;
5634 }
5635 rte_tel_data_add_dict_string(d, status_str, "UP");
5636 rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5637 rte_tel_data_add_dict_string(d, "duplex",
5638 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
5639 "full-duplex" : "half-duplex");
5640 return 0;
5641 }
5642
5643 static int
eth_dev_handle_port_info(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)5644 eth_dev_handle_port_info(const char *cmd __rte_unused,
5645 const char *params,
5646 struct rte_tel_data *d)
5647 {
5648 struct rte_tel_data *rxq_state, *txq_state;
5649 char mac_addr[RTE_ETHER_ADDR_FMT_SIZE];
5650 struct rte_eth_dev *eth_dev;
5651 char *end_param;
5652 int port_id, i;
5653
5654 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5655 return -1;
5656
5657 port_id = strtoul(params, &end_param, 0);
5658 if (*end_param != '\0')
5659 RTE_ETHDEV_LOG(NOTICE,
5660 "Extra parameters passed to ethdev telemetry command, ignoring");
5661
5662 if (!rte_eth_dev_is_valid_port(port_id))
5663 return -EINVAL;
5664
5665 eth_dev = &rte_eth_devices[port_id];
5666
5667 rxq_state = rte_tel_data_alloc();
5668 if (!rxq_state)
5669 return -ENOMEM;
5670
5671 txq_state = rte_tel_data_alloc();
5672 if (!txq_state) {
5673 rte_tel_data_free(rxq_state);
5674 return -ENOMEM;
5675 }
5676
5677 rte_tel_data_start_dict(d);
5678 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name);
5679 rte_tel_data_add_dict_int(d, "state", eth_dev->state);
5680 rte_tel_data_add_dict_int(d, "nb_rx_queues",
5681 eth_dev->data->nb_rx_queues);
5682 rte_tel_data_add_dict_int(d, "nb_tx_queues",
5683 eth_dev->data->nb_tx_queues);
5684 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id);
5685 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu);
5686 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min",
5687 eth_dev->data->min_rx_buf_size);
5688 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail",
5689 eth_dev->data->rx_mbuf_alloc_failed);
5690 rte_ether_format_addr(mac_addr, sizeof(mac_addr),
5691 eth_dev->data->mac_addrs);
5692 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr);
5693 rte_tel_data_add_dict_int(d, "promiscuous",
5694 eth_dev->data->promiscuous);
5695 rte_tel_data_add_dict_int(d, "scattered_rx",
5696 eth_dev->data->scattered_rx);
5697 rte_tel_data_add_dict_int(d, "all_multicast",
5698 eth_dev->data->all_multicast);
5699 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started);
5700 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro);
5701 rte_tel_data_add_dict_int(d, "dev_configured",
5702 eth_dev->data->dev_configured);
5703
5704 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL);
5705 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
5706 rte_tel_data_add_array_int(rxq_state,
5707 eth_dev->data->rx_queue_state[i]);
5708
5709 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL);
5710 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
5711 rte_tel_data_add_array_int(txq_state,
5712 eth_dev->data->tx_queue_state[i]);
5713
5714 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0);
5715 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0);
5716 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node);
5717 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags);
5718 rte_tel_data_add_dict_int(d, "rx_offloads",
5719 eth_dev->data->dev_conf.rxmode.offloads);
5720 rte_tel_data_add_dict_int(d, "tx_offloads",
5721 eth_dev->data->dev_conf.txmode.offloads);
5722 rte_tel_data_add_dict_int(d, "ethdev_rss_hf",
5723 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
5724
5725 return 0;
5726 }
5727
5728 int
rte_eth_representor_info_get(uint16_t port_id,struct rte_eth_representor_info * info)5729 rte_eth_representor_info_get(uint16_t port_id,
5730 struct rte_eth_representor_info *info)
5731 {
5732 struct rte_eth_dev *dev;
5733
5734 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5735 dev = &rte_eth_devices[port_id];
5736
5737 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
5738 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
5739 }
5740
5741 int
rte_eth_rx_metadata_negotiate(uint16_t port_id,uint64_t * features)5742 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
5743 {
5744 struct rte_eth_dev *dev;
5745
5746 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5747 dev = &rte_eth_devices[port_id];
5748
5749 if (dev->data->dev_configured != 0) {
5750 RTE_ETHDEV_LOG(ERR,
5751 "The port (ID=%"PRIu16") is already configured\n",
5752 port_id);
5753 return -EBUSY;
5754 }
5755
5756 if (features == NULL) {
5757 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
5758 return -EINVAL;
5759 }
5760
5761 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP);
5762 return eth_err(port_id,
5763 (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
5764 }
5765
5766 int
rte_eth_ip_reassembly_capability_get(uint16_t port_id,struct rte_eth_ip_reassembly_params * reassembly_capa)5767 rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5768 struct rte_eth_ip_reassembly_params *reassembly_capa)
5769 {
5770 struct rte_eth_dev *dev;
5771
5772 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5773 dev = &rte_eth_devices[port_id];
5774
5775 if (dev->data->dev_configured == 0) {
5776 RTE_ETHDEV_LOG(ERR,
5777 "Device with port_id=%u is not configured.\n"
5778 "Cannot get IP reassembly capability\n",
5779 port_id);
5780 return -EINVAL;
5781 }
5782
5783 if (reassembly_capa == NULL) {
5784 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL");
5785 return -EINVAL;
5786 }
5787
5788 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_capability_get,
5789 -ENOTSUP);
5790 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
5791
5792 return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
5793 (dev, reassembly_capa));
5794 }
5795
5796 int
rte_eth_ip_reassembly_conf_get(uint16_t port_id,struct rte_eth_ip_reassembly_params * conf)5797 rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5798 struct rte_eth_ip_reassembly_params *conf)
5799 {
5800 struct rte_eth_dev *dev;
5801
5802 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5803 dev = &rte_eth_devices[port_id];
5804
5805 if (dev->data->dev_configured == 0) {
5806 RTE_ETHDEV_LOG(ERR,
5807 "Device with port_id=%u is not configured.\n"
5808 "Cannot get IP reassembly configuration\n",
5809 port_id);
5810 return -EINVAL;
5811 }
5812
5813 if (conf == NULL) {
5814 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL");
5815 return -EINVAL;
5816 }
5817
5818 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_get,
5819 -ENOTSUP);
5820 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
5821 return eth_err(port_id,
5822 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
5823 }
5824
5825 int
rte_eth_ip_reassembly_conf_set(uint16_t port_id,const struct rte_eth_ip_reassembly_params * conf)5826 rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5827 const struct rte_eth_ip_reassembly_params *conf)
5828 {
5829 struct rte_eth_dev *dev;
5830
5831 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5832 dev = &rte_eth_devices[port_id];
5833
5834 if (dev->data->dev_configured == 0) {
5835 RTE_ETHDEV_LOG(ERR,
5836 "Device with port_id=%u is not configured.\n"
5837 "Cannot set IP reassembly configuration",
5838 port_id);
5839 return -EINVAL;
5840 }
5841
5842 if (dev->data->dev_started != 0) {
5843 RTE_ETHDEV_LOG(ERR,
5844 "Device with port_id=%u started,\n"
5845 "cannot configure IP reassembly params.\n",
5846 port_id);
5847 return -EINVAL;
5848 }
5849
5850 if (conf == NULL) {
5851 RTE_ETHDEV_LOG(ERR,
5852 "Invalid IP reassembly configuration (NULL)\n");
5853 return -EINVAL;
5854 }
5855
5856 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_set,
5857 -ENOTSUP);
5858 return eth_err(port_id,
5859 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
5860 }
5861
5862 int
rte_eth_dev_priv_dump(uint16_t port_id,FILE * file)5863 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
5864 {
5865 struct rte_eth_dev *dev;
5866
5867 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5868 dev = &rte_eth_devices[port_id];
5869
5870 if (file == NULL) {
5871 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
5872 return -EINVAL;
5873 }
5874
5875 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_dev_priv_dump, -ENOTSUP);
5876 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
5877 }
5878
5879 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
5880
RTE_INIT(ethdev_init_telemetry)5881 RTE_INIT(ethdev_init_telemetry)
5882 {
5883 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
5884 "Returns list of available ethdev ports. Takes no parameters");
5885 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
5886 "Returns the common stats for a port. Parameters: int port_id");
5887 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
5888 "Returns the extended stats for a port. Parameters: int port_id");
5889 rte_telemetry_register_cmd("/ethdev/link_status",
5890 eth_dev_handle_port_link_status,
5891 "Returns the link status for a port. Parameters: int port_id");
5892 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info,
5893 "Returns the device info for a port. Parameters: int port_id");
5894 }
5895