1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
3 */
4
5 #include "rte_eth_ring.h"
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_malloc.h>
9 #include <rte_memcpy.h>
10 #include <rte_string_fns.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_errno.h>
14
15 #define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
16 #define ETH_RING_ACTION_CREATE "CREATE"
17 #define ETH_RING_ACTION_ATTACH "ATTACH"
18 #define ETH_RING_INTERNAL_ARG "internal"
19 #define ETH_RING_INTERNAL_ARG_MAX_LEN 19 /* "0x..16chars..\0" */
20
21 static const char *valid_arguments[] = {
22 ETH_RING_NUMA_NODE_ACTION_ARG,
23 ETH_RING_INTERNAL_ARG,
24 NULL
25 };
26
27 struct ring_internal_args {
28 struct rte_ring * const *rx_queues;
29 const unsigned int nb_rx_queues;
30 struct rte_ring * const *tx_queues;
31 const unsigned int nb_tx_queues;
32 const unsigned int numa_node;
33 void *addr; /* self addr for sanity check */
34 };
35
36 enum dev_action {
37 DEV_CREATE,
38 DEV_ATTACH
39 };
40
41 struct ring_queue {
42 struct rte_ring *rng;
43 rte_atomic64_t rx_pkts;
44 rte_atomic64_t tx_pkts;
45 };
46
47 struct pmd_internals {
48 unsigned int max_rx_queues;
49 unsigned int max_tx_queues;
50
51 struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
52 struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
53
54 struct rte_ether_addr address;
55 enum dev_action action;
56 };
57
58 static struct rte_eth_link pmd_link = {
59 .link_speed = ETH_SPEED_NUM_10G,
60 .link_duplex = ETH_LINK_FULL_DUPLEX,
61 .link_status = ETH_LINK_DOWN,
62 .link_autoneg = ETH_LINK_FIXED,
63 };
64
65 RTE_LOG_REGISTER(eth_ring_logtype, pmd.net.ring, NOTICE);
66
67 #define PMD_LOG(level, fmt, args...) \
68 rte_log(RTE_LOG_ ## level, eth_ring_logtype, \
69 "%s(): " fmt "\n", __func__, ##args)
70
71 static uint16_t
eth_ring_rx(void * q,struct rte_mbuf ** bufs,uint16_t nb_bufs)72 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
73 {
74 void **ptrs = (void *)&bufs[0];
75 struct ring_queue *r = q;
76 const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
77 ptrs, nb_bufs, NULL);
78 if (r->rng->flags & RING_F_SC_DEQ)
79 r->rx_pkts.cnt += nb_rx;
80 else
81 rte_atomic64_add(&(r->rx_pkts), nb_rx);
82 return nb_rx;
83 }
84
85 static uint16_t
eth_ring_tx(void * q,struct rte_mbuf ** bufs,uint16_t nb_bufs)86 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
87 {
88 void **ptrs = (void *)&bufs[0];
89 struct ring_queue *r = q;
90 const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
91 ptrs, nb_bufs, NULL);
92 if (r->rng->flags & RING_F_SP_ENQ)
93 r->tx_pkts.cnt += nb_tx;
94 else
95 rte_atomic64_add(&(r->tx_pkts), nb_tx);
96 return nb_tx;
97 }
98
99 static int
eth_dev_configure(struct rte_eth_dev * dev __rte_unused)100 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
101
102 static int
eth_dev_start(struct rte_eth_dev * dev)103 eth_dev_start(struct rte_eth_dev *dev)
104 {
105 dev->data->dev_link.link_status = ETH_LINK_UP;
106 return 0;
107 }
108
109 static int
eth_dev_stop(struct rte_eth_dev * dev)110 eth_dev_stop(struct rte_eth_dev *dev)
111 {
112 dev->data->dev_started = 0;
113 dev->data->dev_link.link_status = ETH_LINK_DOWN;
114 return 0;
115 }
116
117 static int
eth_dev_set_link_down(struct rte_eth_dev * dev)118 eth_dev_set_link_down(struct rte_eth_dev *dev)
119 {
120 dev->data->dev_link.link_status = ETH_LINK_DOWN;
121 return 0;
122 }
123
124 static int
eth_dev_set_link_up(struct rte_eth_dev * dev)125 eth_dev_set_link_up(struct rte_eth_dev *dev)
126 {
127 dev->data->dev_link.link_status = ETH_LINK_UP;
128 return 0;
129 }
130
131 static int
eth_rx_queue_setup(struct rte_eth_dev * dev,uint16_t rx_queue_id,uint16_t nb_rx_desc __rte_unused,unsigned int socket_id __rte_unused,const struct rte_eth_rxconf * rx_conf __rte_unused,struct rte_mempool * mb_pool __rte_unused)132 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
133 uint16_t nb_rx_desc __rte_unused,
134 unsigned int socket_id __rte_unused,
135 const struct rte_eth_rxconf *rx_conf __rte_unused,
136 struct rte_mempool *mb_pool __rte_unused)
137 {
138 struct pmd_internals *internals = dev->data->dev_private;
139
140 dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
141 return 0;
142 }
143
144 static int
eth_tx_queue_setup(struct rte_eth_dev * dev,uint16_t tx_queue_id,uint16_t nb_tx_desc __rte_unused,unsigned int socket_id __rte_unused,const struct rte_eth_txconf * tx_conf __rte_unused)145 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
146 uint16_t nb_tx_desc __rte_unused,
147 unsigned int socket_id __rte_unused,
148 const struct rte_eth_txconf *tx_conf __rte_unused)
149 {
150 struct pmd_internals *internals = dev->data->dev_private;
151
152 dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
153 return 0;
154 }
155
156
157 static int
eth_dev_info(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)158 eth_dev_info(struct rte_eth_dev *dev,
159 struct rte_eth_dev_info *dev_info)
160 {
161 struct pmd_internals *internals = dev->data->dev_private;
162
163 dev_info->max_mac_addrs = 1;
164 dev_info->max_rx_pktlen = (uint32_t)-1;
165 dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
166 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
167 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
168 dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
169 dev_info->min_rx_bufsize = 0;
170
171 return 0;
172 }
173
174 static int
eth_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)175 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
176 {
177 unsigned int i;
178 unsigned long rx_total = 0, tx_total = 0;
179 const struct pmd_internals *internal = dev->data->dev_private;
180
181 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
182 i < dev->data->nb_rx_queues; i++) {
183 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
184 rx_total += stats->q_ipackets[i];
185 }
186
187 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
188 i < dev->data->nb_tx_queues; i++) {
189 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
190 tx_total += stats->q_opackets[i];
191 }
192
193 stats->ipackets = rx_total;
194 stats->opackets = tx_total;
195
196 return 0;
197 }
198
199 static int
eth_stats_reset(struct rte_eth_dev * dev)200 eth_stats_reset(struct rte_eth_dev *dev)
201 {
202 unsigned int i;
203 struct pmd_internals *internal = dev->data->dev_private;
204
205 for (i = 0; i < dev->data->nb_rx_queues; i++)
206 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
207 for (i = 0; i < dev->data->nb_tx_queues; i++)
208 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
209
210 return 0;
211 }
212
213 static void
eth_mac_addr_remove(struct rte_eth_dev * dev __rte_unused,uint32_t index __rte_unused)214 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
215 uint32_t index __rte_unused)
216 {
217 }
218
219 static int
eth_mac_addr_add(struct rte_eth_dev * dev __rte_unused,struct rte_ether_addr * mac_addr __rte_unused,uint32_t index __rte_unused,uint32_t vmdq __rte_unused)220 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
221 struct rte_ether_addr *mac_addr __rte_unused,
222 uint32_t index __rte_unused,
223 uint32_t vmdq __rte_unused)
224 {
225 return 0;
226 }
227
228 static void
eth_queue_release(void * q __rte_unused)229 eth_queue_release(void *q __rte_unused) { ; }
230 static int
eth_link_update(struct rte_eth_dev * dev __rte_unused,int wait_to_complete __rte_unused)231 eth_link_update(struct rte_eth_dev *dev __rte_unused,
232 int wait_to_complete __rte_unused) { return 0; }
233
234 static int
eth_dev_close(struct rte_eth_dev * dev)235 eth_dev_close(struct rte_eth_dev *dev)
236 {
237 struct pmd_internals *internals = NULL;
238 struct ring_queue *r = NULL;
239 uint16_t i;
240 int ret;
241
242 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
243 return 0;
244
245 ret = eth_dev_stop(dev);
246
247 internals = dev->data->dev_private;
248 if (internals->action == DEV_CREATE) {
249 /*
250 * it is only necessary to delete the rings in rx_queues because
251 * they are the same used in tx_queues
252 */
253 for (i = 0; i < dev->data->nb_rx_queues; i++) {
254 r = dev->data->rx_queues[i];
255 rte_ring_free(r->rng);
256 }
257 }
258
259 /* mac_addrs must not be freed alone because part of dev_private */
260 dev->data->mac_addrs = NULL;
261
262 return ret;
263 }
264
265 static const struct eth_dev_ops ops = {
266 .dev_close = eth_dev_close,
267 .dev_start = eth_dev_start,
268 .dev_stop = eth_dev_stop,
269 .dev_set_link_up = eth_dev_set_link_up,
270 .dev_set_link_down = eth_dev_set_link_down,
271 .dev_configure = eth_dev_configure,
272 .dev_infos_get = eth_dev_info,
273 .rx_queue_setup = eth_rx_queue_setup,
274 .tx_queue_setup = eth_tx_queue_setup,
275 .rx_queue_release = eth_queue_release,
276 .tx_queue_release = eth_queue_release,
277 .link_update = eth_link_update,
278 .stats_get = eth_stats_get,
279 .stats_reset = eth_stats_reset,
280 .mac_addr_remove = eth_mac_addr_remove,
281 .mac_addr_add = eth_mac_addr_add,
282 };
283
284 static int
do_eth_dev_ring_create(const char * name,struct rte_vdev_device * vdev,struct rte_ring * const rx_queues[],const unsigned int nb_rx_queues,struct rte_ring * const tx_queues[],const unsigned int nb_tx_queues,const unsigned int numa_node,enum dev_action action,struct rte_eth_dev ** eth_dev_p)285 do_eth_dev_ring_create(const char *name,
286 struct rte_vdev_device *vdev,
287 struct rte_ring * const rx_queues[],
288 const unsigned int nb_rx_queues,
289 struct rte_ring *const tx_queues[],
290 const unsigned int nb_tx_queues,
291 const unsigned int numa_node, enum dev_action action,
292 struct rte_eth_dev **eth_dev_p)
293 {
294 struct rte_eth_dev_data *data = NULL;
295 struct pmd_internals *internals = NULL;
296 struct rte_eth_dev *eth_dev = NULL;
297 void **rx_queues_local = NULL;
298 void **tx_queues_local = NULL;
299 unsigned int i;
300
301 PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u",
302 numa_node);
303
304 rx_queues_local = rte_calloc_socket(name, nb_rx_queues,
305 sizeof(void *), 0, numa_node);
306 if (rx_queues_local == NULL) {
307 rte_errno = ENOMEM;
308 goto error;
309 }
310
311 tx_queues_local = rte_calloc_socket(name, nb_tx_queues,
312 sizeof(void *), 0, numa_node);
313 if (tx_queues_local == NULL) {
314 rte_errno = ENOMEM;
315 goto error;
316 }
317
318 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
319 if (internals == NULL) {
320 rte_errno = ENOMEM;
321 goto error;
322 }
323
324 /* reserve an ethdev entry */
325 eth_dev = rte_eth_dev_allocate(name);
326 if (eth_dev == NULL) {
327 rte_errno = ENOSPC;
328 goto error;
329 }
330
331 /* now put it all together
332 * - store EAL device in eth_dev,
333 * - store queue data in internals,
334 * - store numa_node info in eth_dev_data
335 * - point eth_dev_data to internals
336 * - and point eth_dev structure to new eth_dev_data structure
337 */
338
339 eth_dev->device = &vdev->device;
340
341 data = eth_dev->data;
342 data->rx_queues = rx_queues_local;
343 data->tx_queues = tx_queues_local;
344
345 internals->action = action;
346 internals->max_rx_queues = nb_rx_queues;
347 internals->max_tx_queues = nb_tx_queues;
348 for (i = 0; i < nb_rx_queues; i++) {
349 internals->rx_ring_queues[i].rng = rx_queues[i];
350 data->rx_queues[i] = &internals->rx_ring_queues[i];
351 }
352 for (i = 0; i < nb_tx_queues; i++) {
353 internals->tx_ring_queues[i].rng = tx_queues[i];
354 data->tx_queues[i] = &internals->tx_ring_queues[i];
355 }
356
357 data->dev_private = internals;
358 data->nb_rx_queues = (uint16_t)nb_rx_queues;
359 data->nb_tx_queues = (uint16_t)nb_tx_queues;
360 data->dev_link = pmd_link;
361 data->mac_addrs = &internals->address;
362 data->promiscuous = 1;
363 data->all_multicast = 1;
364 data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
365
366 eth_dev->dev_ops = &ops;
367 data->numa_node = numa_node;
368
369 /* finally assign rx and tx ops */
370 eth_dev->rx_pkt_burst = eth_ring_rx;
371 eth_dev->tx_pkt_burst = eth_ring_tx;
372
373 rte_eth_dev_probing_finish(eth_dev);
374 *eth_dev_p = eth_dev;
375
376 return data->port_id;
377
378 error:
379 rte_free(rx_queues_local);
380 rte_free(tx_queues_local);
381 rte_free(internals);
382
383 return -1;
384 }
385
386 int
rte_eth_from_rings(const char * name,struct rte_ring * const rx_queues[],const unsigned int nb_rx_queues,struct rte_ring * const tx_queues[],const unsigned int nb_tx_queues,const unsigned int numa_node)387 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
388 const unsigned int nb_rx_queues,
389 struct rte_ring *const tx_queues[],
390 const unsigned int nb_tx_queues,
391 const unsigned int numa_node)
392 {
393 struct ring_internal_args args = {
394 .rx_queues = rx_queues,
395 .nb_rx_queues = nb_rx_queues,
396 .tx_queues = tx_queues,
397 .nb_tx_queues = nb_tx_queues,
398 .numa_node = numa_node,
399 .addr = &args,
400 };
401 char args_str[32];
402 char ring_name[RTE_RING_NAMESIZE];
403 uint16_t port_id = RTE_MAX_ETHPORTS;
404 int ret;
405
406 /* do some parameter checking */
407 if (rx_queues == NULL && nb_rx_queues > 0) {
408 rte_errno = EINVAL;
409 return -1;
410 }
411 if (tx_queues == NULL && nb_tx_queues > 0) {
412 rte_errno = EINVAL;
413 return -1;
414 }
415 if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
416 rte_errno = EINVAL;
417 return -1;
418 }
419
420 snprintf(args_str, sizeof(args_str), "%s=%p",
421 ETH_RING_INTERNAL_ARG, &args);
422
423 ret = snprintf(ring_name, sizeof(ring_name), "net_ring_%s", name);
424 if (ret >= (int)sizeof(ring_name)) {
425 rte_errno = ENAMETOOLONG;
426 return -1;
427 }
428
429 ret = rte_vdev_init(ring_name, args_str);
430 if (ret) {
431 rte_errno = EINVAL;
432 return -1;
433 }
434
435 ret = rte_eth_dev_get_port_by_name(ring_name, &port_id);
436 if (ret) {
437 rte_errno = ENODEV;
438 return -1;
439 }
440
441 return port_id;
442 }
443
444 int
rte_eth_from_ring(struct rte_ring * r)445 rte_eth_from_ring(struct rte_ring *r)
446 {
447 return rte_eth_from_rings(r->name, &r, 1, &r, 1,
448 r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
449 }
450
451 static int
eth_dev_ring_create(const char * name,struct rte_vdev_device * vdev,const unsigned int numa_node,enum dev_action action,struct rte_eth_dev ** eth_dev)452 eth_dev_ring_create(const char *name,
453 struct rte_vdev_device *vdev,
454 const unsigned int numa_node,
455 enum dev_action action, struct rte_eth_dev **eth_dev)
456 {
457 /* rx and tx are so-called from point of view of first port.
458 * They are inverted from the point of view of second port
459 */
460 struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
461 unsigned int i;
462 char rng_name[RTE_RING_NAMESIZE];
463 unsigned int num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
464 RTE_PMD_RING_MAX_TX_RINGS);
465
466 for (i = 0; i < num_rings; i++) {
467 int cc;
468
469 cc = snprintf(rng_name, sizeof(rng_name),
470 "ETH_RXTX%u_%s", i, name);
471 if (cc >= (int)sizeof(rng_name)) {
472 rte_errno = ENAMETOOLONG;
473 return -1;
474 }
475
476 rxtx[i] = (action == DEV_CREATE) ?
477 rte_ring_create(rng_name, 1024, numa_node,
478 RING_F_SP_ENQ|RING_F_SC_DEQ) :
479 rte_ring_lookup(rng_name);
480 if (rxtx[i] == NULL)
481 return -1;
482 }
483
484 if (do_eth_dev_ring_create(name, vdev, rxtx, num_rings, rxtx, num_rings,
485 numa_node, action, eth_dev) < 0)
486 return -1;
487
488 return 0;
489 }
490
491 struct node_action_pair {
492 char name[PATH_MAX];
493 unsigned int node;
494 enum dev_action action;
495 };
496
497 struct node_action_list {
498 unsigned int total;
499 unsigned int count;
500 struct node_action_pair *list;
501 };
502
parse_kvlist(const char * key __rte_unused,const char * value,void * data)503 static int parse_kvlist(const char *key __rte_unused,
504 const char *value, void *data)
505 {
506 struct node_action_list *info = data;
507 int ret;
508 char *name;
509 char *action;
510 char *node;
511 char *end;
512
513 name = strdup(value);
514
515 ret = -EINVAL;
516
517 if (!name) {
518 PMD_LOG(WARNING, "command line parameter is empty for ring pmd!");
519 goto out;
520 }
521
522 node = strchr(name, ':');
523 if (!node) {
524 PMD_LOG(WARNING, "could not parse node value from %s",
525 name);
526 goto out;
527 }
528
529 *node = '\0';
530 node++;
531
532 action = strchr(node, ':');
533 if (!action) {
534 PMD_LOG(WARNING, "could not parse action value from %s",
535 node);
536 goto out;
537 }
538
539 *action = '\0';
540 action++;
541
542 /*
543 * Need to do some sanity checking here
544 */
545
546 if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
547 info->list[info->count].action = DEV_ATTACH;
548 else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
549 info->list[info->count].action = DEV_CREATE;
550 else
551 goto out;
552
553 errno = 0;
554 info->list[info->count].node = strtol(node, &end, 10);
555
556 if ((errno != 0) || (*end != '\0')) {
557 PMD_LOG(WARNING,
558 "node value %s is unparseable as a number", node);
559 goto out;
560 }
561
562 strlcpy(info->list[info->count].name, name,
563 sizeof(info->list[info->count].name));
564
565 info->count++;
566
567 ret = 0;
568 out:
569 free(name);
570 return ret;
571 }
572
573 static int
parse_internal_args(const char * key __rte_unused,const char * value,void * data)574 parse_internal_args(const char *key __rte_unused, const char *value,
575 void *data)
576 {
577 struct ring_internal_args **internal_args = data;
578 void *args;
579 int ret, n;
580
581 /* make sure 'value' is valid pointer length */
582 if (strnlen(value, ETH_RING_INTERNAL_ARG_MAX_LEN) >=
583 ETH_RING_INTERNAL_ARG_MAX_LEN) {
584 PMD_LOG(ERR, "Error parsing internal args, argument is too long");
585 return -1;
586 }
587
588 ret = sscanf(value, "%p%n", &args, &n);
589 if (ret == 0 || (size_t)n != strlen(value)) {
590 PMD_LOG(ERR, "Error parsing internal args");
591
592 return -1;
593 }
594
595 *internal_args = args;
596
597 if ((*internal_args)->addr != args)
598 return -1;
599
600 return 0;
601 }
602
603 static int
rte_pmd_ring_probe(struct rte_vdev_device * dev)604 rte_pmd_ring_probe(struct rte_vdev_device *dev)
605 {
606 const char *name, *params;
607 struct rte_kvargs *kvlist = NULL;
608 int ret = 0;
609 struct node_action_list *info = NULL;
610 struct rte_eth_dev *eth_dev = NULL;
611 struct ring_internal_args *internal_args;
612
613 name = rte_vdev_device_name(dev);
614 params = rte_vdev_device_args(dev);
615
616 PMD_LOG(INFO, "Initializing pmd_ring for %s", name);
617
618 if (params == NULL || params[0] == '\0') {
619 ret = eth_dev_ring_create(name, dev, rte_socket_id(), DEV_CREATE,
620 ð_dev);
621 if (ret == -1) {
622 PMD_LOG(INFO,
623 "Attach to pmd_ring for %s", name);
624 ret = eth_dev_ring_create(name, dev, rte_socket_id(),
625 DEV_ATTACH, ð_dev);
626 }
627 } else {
628 kvlist = rte_kvargs_parse(params, valid_arguments);
629
630 if (!kvlist) {
631 PMD_LOG(INFO,
632 "Ignoring unsupported parameters when creating rings-backed ethernet device");
633 ret = eth_dev_ring_create(name, dev, rte_socket_id(),
634 DEV_CREATE, ð_dev);
635 if (ret == -1) {
636 PMD_LOG(INFO,
637 "Attach to pmd_ring for %s",
638 name);
639 ret = eth_dev_ring_create(name, dev, rte_socket_id(),
640 DEV_ATTACH, ð_dev);
641 }
642
643 return ret;
644 }
645
646 if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
647 ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
648 parse_internal_args,
649 &internal_args);
650 if (ret < 0)
651 goto out_free;
652
653 ret = do_eth_dev_ring_create(name, dev,
654 internal_args->rx_queues,
655 internal_args->nb_rx_queues,
656 internal_args->tx_queues,
657 internal_args->nb_tx_queues,
658 internal_args->numa_node,
659 DEV_ATTACH,
660 ð_dev);
661 if (ret >= 0)
662 ret = 0;
663 } else {
664 ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
665 info = rte_zmalloc("struct node_action_list",
666 sizeof(struct node_action_list) +
667 (sizeof(struct node_action_pair) * ret),
668 0);
669 if (!info)
670 goto out_free;
671
672 info->total = ret;
673 info->list = (struct node_action_pair *)(info + 1);
674
675 ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
676 parse_kvlist, info);
677
678 if (ret < 0)
679 goto out_free;
680
681 for (info->count = 0; info->count < info->total; info->count++) {
682 ret = eth_dev_ring_create(info->list[info->count].name,
683 dev,
684 info->list[info->count].node,
685 info->list[info->count].action,
686 ð_dev);
687 if ((ret == -1) &&
688 (info->list[info->count].action == DEV_CREATE)) {
689 PMD_LOG(INFO,
690 "Attach to pmd_ring for %s",
691 name);
692 ret = eth_dev_ring_create(name, dev,
693 info->list[info->count].node,
694 DEV_ATTACH,
695 ð_dev);
696 }
697 }
698 }
699 }
700
701 out_free:
702 rte_kvargs_free(kvlist);
703 rte_free(info);
704 return ret;
705 }
706
707 static int
rte_pmd_ring_remove(struct rte_vdev_device * dev)708 rte_pmd_ring_remove(struct rte_vdev_device *dev)
709 {
710 const char *name = rte_vdev_device_name(dev);
711 struct rte_eth_dev *eth_dev = NULL;
712
713 PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name);
714
715 if (name == NULL)
716 return -EINVAL;
717
718 /* find an ethdev entry */
719 eth_dev = rte_eth_dev_allocated(name);
720 if (eth_dev == NULL)
721 return 0; /* port already released */
722
723 eth_dev_close(eth_dev);
724 rte_eth_dev_release_port(eth_dev);
725 return 0;
726 }
727
728 static struct rte_vdev_driver pmd_ring_drv = {
729 .probe = rte_pmd_ring_probe,
730 .remove = rte_pmd_ring_remove,
731 };
732
733 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
734 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
735 RTE_PMD_REGISTER_PARAM_STRING(net_ring,
736 ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");
737