1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <rte_mbuf.h>
6 #include <rte_ethdev.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_pci.h>
9 #include <rte_bus_pci.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_memory.h>
13 #include <rte_ring.h>
14
15 #include "virtual_pmd.h"
16
17 #define MAX_PKT_BURST 512
18
19 static const char *virtual_ethdev_driver_name = "Virtual PMD";
20
21 struct virtual_ethdev_private {
22 struct eth_dev_ops dev_ops;
23 struct rte_eth_stats eth_stats;
24
25 struct rte_ring *rx_queue;
26 struct rte_ring *tx_queue;
27
28 int tx_burst_fail_count;
29 };
30
31 struct virtual_ethdev_queue {
32 int port_id;
33 int queue_id;
34 };
35
36 static int
virtual_ethdev_start_success(struct rte_eth_dev * eth_dev __rte_unused)37 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
38 {
39 eth_dev->data->dev_started = 1;
40
41 return 0;
42 }
43
44 static int
virtual_ethdev_start_fail(struct rte_eth_dev * eth_dev __rte_unused)45 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused)
46 {
47 eth_dev->data->dev_started = 0;
48
49 return -1;
50 }
virtual_ethdev_stop(struct rte_eth_dev * eth_dev __rte_unused)51 static int virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
52 {
53 void *pkt = NULL;
54 struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
55
56 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
57 eth_dev->data->dev_started = 0;
58 while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
59 rte_pktmbuf_free(pkt);
60
61 while (rte_ring_dequeue(prv->tx_queue, &pkt) != -ENOENT)
62 rte_pktmbuf_free(pkt);
63
64 return 0;
65 }
66
67 static int
virtual_ethdev_close(struct rte_eth_dev * dev __rte_unused)68 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused)
69 {
70 return 0;
71 }
72
73 static int
virtual_ethdev_configure_success(struct rte_eth_dev * dev __rte_unused)74 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused)
75 {
76 return 0;
77 }
78
79 static int
virtual_ethdev_configure_fail(struct rte_eth_dev * dev __rte_unused)80 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused)
81 {
82 return -1;
83 }
84
85 static int
virtual_ethdev_info_get(struct rte_eth_dev * dev __rte_unused,struct rte_eth_dev_info * dev_info)86 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
87 struct rte_eth_dev_info *dev_info)
88 {
89 dev_info->driver_name = virtual_ethdev_driver_name;
90 dev_info->max_mac_addrs = 1;
91
92 dev_info->max_rx_pktlen = (uint32_t)2048;
93
94 dev_info->max_rx_queues = (uint16_t)128;
95 dev_info->max_tx_queues = (uint16_t)512;
96
97 dev_info->min_rx_bufsize = 0;
98
99 return 0;
100 }
101
102 static int
virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev * dev,uint16_t rx_queue_id,uint16_t nb_rx_desc __rte_unused,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf __rte_unused,struct rte_mempool * mb_pool __rte_unused)103 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev,
104 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused,
105 unsigned int socket_id,
106 const struct rte_eth_rxconf *rx_conf __rte_unused,
107 struct rte_mempool *mb_pool __rte_unused)
108 {
109 struct virtual_ethdev_queue *rx_q;
110
111 rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
112 sizeof(struct virtual_ethdev_queue), 0, socket_id);
113
114 if (rx_q == NULL)
115 return -1;
116
117 rx_q->port_id = dev->data->port_id;
118 rx_q->queue_id = rx_queue_id;
119
120 dev->data->rx_queues[rx_queue_id] = rx_q;
121
122 return 0;
123 }
124
125 static int
virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev * dev __rte_unused,uint16_t rx_queue_id __rte_unused,uint16_t nb_rx_desc __rte_unused,unsigned int socket_id __rte_unused,const struct rte_eth_rxconf * rx_conf __rte_unused,struct rte_mempool * mb_pool __rte_unused)126 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
127 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused,
128 unsigned int socket_id __rte_unused,
129 const struct rte_eth_rxconf *rx_conf __rte_unused,
130 struct rte_mempool *mb_pool __rte_unused)
131 {
132 return -1;
133 }
134
135 static int
virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev * dev,uint16_t tx_queue_id,uint16_t nb_tx_desc __rte_unused,unsigned int socket_id,const struct rte_eth_txconf * tx_conf __rte_unused)136 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev,
137 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused,
138 unsigned int socket_id,
139 const struct rte_eth_txconf *tx_conf __rte_unused)
140 {
141 struct virtual_ethdev_queue *tx_q;
142
143 tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
144 sizeof(struct virtual_ethdev_queue), 0, socket_id);
145
146 if (tx_q == NULL)
147 return -1;
148
149 tx_q->port_id = dev->data->port_id;
150 tx_q->queue_id = tx_queue_id;
151
152 dev->data->tx_queues[tx_queue_id] = tx_q;
153
154 return 0;
155 }
156
157 static int
virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev * dev __rte_unused,uint16_t tx_queue_id __rte_unused,uint16_t nb_tx_desc __rte_unused,unsigned int socket_id __rte_unused,const struct rte_eth_txconf * tx_conf __rte_unused)158 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
159 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused,
160 unsigned int socket_id __rte_unused,
161 const struct rte_eth_txconf *tx_conf __rte_unused)
162 {
163 return -1;
164 }
165
166 static void
virtual_ethdev_rx_queue_release(void * q __rte_unused)167 virtual_ethdev_rx_queue_release(void *q __rte_unused)
168 {
169 }
170
171 static void
virtual_ethdev_tx_queue_release(void * q __rte_unused)172 virtual_ethdev_tx_queue_release(void *q __rte_unused)
173 {
174 }
175
176 static int
virtual_ethdev_link_update_success(struct rte_eth_dev * bonded_eth_dev,int wait_to_complete __rte_unused)177 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
178 int wait_to_complete __rte_unused)
179 {
180 if (!bonded_eth_dev->data->dev_started)
181 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
182
183 return 0;
184 }
185
186 static int
virtual_ethdev_link_update_fail(struct rte_eth_dev * bonded_eth_dev __rte_unused,int wait_to_complete __rte_unused)187 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused,
188 int wait_to_complete __rte_unused)
189 {
190 return -1;
191 }
192
193 static int
virtual_ethdev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)194 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
195 {
196 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
197
198 if (stats)
199 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats));
200
201 return 0;
202 }
203
204 static int
virtual_ethdev_stats_reset(struct rte_eth_dev * dev)205 virtual_ethdev_stats_reset(struct rte_eth_dev *dev)
206 {
207 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
208 void *pkt = NULL;
209
210 while (rte_ring_dequeue(dev_private->tx_queue, &pkt) == -ENOBUFS)
211 rte_pktmbuf_free(pkt);
212
213 /* Reset internal statistics */
214 memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats));
215
216 return 0;
217 }
218
219 static int
virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev * dev __rte_unused)220 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused)
221 {
222 return 0;
223 }
224
225 static int
virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev * dev __rte_unused)226 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused)
227 {
228 return 0;
229 }
230
231 static int
virtual_ethdev_mac_address_set(__rte_unused struct rte_eth_dev * dev,__rte_unused struct rte_ether_addr * addr)232 virtual_ethdev_mac_address_set(__rte_unused struct rte_eth_dev *dev,
233 __rte_unused struct rte_ether_addr *addr)
234 {
235 return 0;
236 }
237
238 static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
239 .dev_configure = virtual_ethdev_configure_success,
240 .dev_start = virtual_ethdev_start_success,
241 .dev_stop = virtual_ethdev_stop,
242 .dev_close = virtual_ethdev_close,
243 .dev_infos_get = virtual_ethdev_info_get,
244 .rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
245 .tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
246 .rx_queue_release = virtual_ethdev_rx_queue_release,
247 .tx_queue_release = virtual_ethdev_tx_queue_release,
248 .link_update = virtual_ethdev_link_update_success,
249 .mac_addr_set = virtual_ethdev_mac_address_set,
250 .stats_get = virtual_ethdev_stats_get,
251 .stats_reset = virtual_ethdev_stats_reset,
252 .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable,
253 .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable
254 };
255
256 void
virtual_ethdev_start_fn_set_success(uint16_t port_id,uint8_t success)257 virtual_ethdev_start_fn_set_success(uint16_t port_id, uint8_t success)
258 {
259 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
260 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
261 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
262
263 if (success)
264 dev_ops->dev_start = virtual_ethdev_start_success;
265 else
266 dev_ops->dev_start = virtual_ethdev_start_fail;
267
268 }
269
270 void
virtual_ethdev_configure_fn_set_success(uint16_t port_id,uint8_t success)271 virtual_ethdev_configure_fn_set_success(uint16_t port_id, uint8_t success)
272 {
273 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
274 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
275 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
276
277 if (success)
278 dev_ops->dev_configure = virtual_ethdev_configure_success;
279 else
280 dev_ops->dev_configure = virtual_ethdev_configure_fail;
281 }
282
283 void
virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id,uint8_t success)284 virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
285 {
286 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
287 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
288 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
289
290 if (success)
291 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_success;
292 else
293 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_fail;
294 }
295
296 void
virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id,uint8_t success)297 virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
298 {
299 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
300 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
301 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
302
303 if (success)
304 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_success;
305 else
306 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_fail;
307 }
308
309 void
virtual_ethdev_link_update_fn_set_success(uint16_t port_id,uint8_t success)310 virtual_ethdev_link_update_fn_set_success(uint16_t port_id, uint8_t success)
311 {
312 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
313 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
314 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
315
316 if (success)
317 dev_ops->link_update = virtual_ethdev_link_update_success;
318 else
319 dev_ops->link_update = virtual_ethdev_link_update_fail;
320 }
321
322
323 static uint16_t
virtual_ethdev_rx_burst_success(void * queue __rte_unused,struct rte_mbuf ** bufs,uint16_t nb_pkts)324 virtual_ethdev_rx_burst_success(void *queue __rte_unused,
325 struct rte_mbuf **bufs,
326 uint16_t nb_pkts)
327 {
328 struct rte_eth_dev *vrtl_eth_dev;
329 struct virtual_ethdev_queue *pq_map;
330 struct virtual_ethdev_private *dev_private;
331
332 int rx_count, i;
333
334 pq_map = (struct virtual_ethdev_queue *)queue;
335 vrtl_eth_dev = &rte_eth_devices[pq_map->port_id];
336 dev_private = vrtl_eth_dev->data->dev_private;
337
338 rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
339 nb_pkts, NULL);
340
341 /* increments ipackets count */
342 dev_private->eth_stats.ipackets += rx_count;
343
344 /* increments ibytes count */
345 for (i = 0; i < rx_count; i++)
346 dev_private->eth_stats.ibytes += rte_pktmbuf_pkt_len(bufs[i]);
347
348 return rx_count;
349 }
350
351 static uint16_t
virtual_ethdev_rx_burst_fail(void * queue __rte_unused,struct rte_mbuf ** bufs __rte_unused,uint16_t nb_pkts __rte_unused)352 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
353 struct rte_mbuf **bufs __rte_unused,
354 uint16_t nb_pkts __rte_unused)
355 {
356 return 0;
357 }
358
359 static uint16_t
virtual_ethdev_tx_burst_success(void * queue,struct rte_mbuf ** bufs,uint16_t nb_pkts)360 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
361 uint16_t nb_pkts)
362 {
363 struct virtual_ethdev_queue *tx_q = queue;
364
365 struct rte_eth_dev *vrtl_eth_dev;
366 struct virtual_ethdev_private *dev_private;
367
368 int i;
369
370 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
371 dev_private = vrtl_eth_dev->data->dev_private;
372
373 if (!vrtl_eth_dev->data->dev_link.link_status)
374 nb_pkts = 0;
375 else
376 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
377 nb_pkts, NULL);
378
379 /* increment opacket count */
380 dev_private->eth_stats.opackets += nb_pkts;
381
382 /* increment obytes count */
383 for (i = 0; i < nb_pkts; i++)
384 dev_private->eth_stats.obytes += rte_pktmbuf_pkt_len(bufs[i]);
385
386 return nb_pkts;
387 }
388
389 static uint16_t
virtual_ethdev_tx_burst_fail(void * queue,struct rte_mbuf ** bufs,uint16_t nb_pkts)390 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
391 uint16_t nb_pkts)
392 {
393 struct rte_eth_dev *vrtl_eth_dev = NULL;
394 struct virtual_ethdev_queue *tx_q = NULL;
395 struct virtual_ethdev_private *dev_private = NULL;
396
397 int i;
398
399 tx_q = queue;
400 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
401 dev_private = vrtl_eth_dev->data->dev_private;
402
403 if (dev_private->tx_burst_fail_count < nb_pkts) {
404 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count;
405
406 /* increment opacket count */
407 dev_private->eth_stats.opackets += successfully_txd;
408
409 /* free packets in burst */
410 for (i = 0; i < successfully_txd; i++) {
411 /* free packets in burst */
412 if (bufs[i] != NULL)
413 rte_pktmbuf_free(bufs[i]);
414
415 bufs[i] = NULL;
416 }
417
418 return successfully_txd;
419 }
420
421 return 0;
422 }
423
424
425 void
virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id,uint8_t success)426 virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success)
427 {
428 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
429
430 if (success)
431 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
432 else
433 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail;
434 }
435
436
437 void
virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id,uint8_t success)438 virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success)
439 {
440 struct virtual_ethdev_private *dev_private = NULL;
441 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
442
443 dev_private = vrtl_eth_dev->data->dev_private;
444
445 if (success)
446 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
447 else
448 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail;
449
450 dev_private->tx_burst_fail_count = 0;
451 }
452
453 void
virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,uint8_t packet_fail_count)454 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
455 uint8_t packet_fail_count)
456 {
457 struct virtual_ethdev_private *dev_private = NULL;
458 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
459
460
461 dev_private = vrtl_eth_dev->data->dev_private;
462 dev_private->tx_burst_fail_count = packet_fail_count;
463 }
464
465 void
virtual_ethdev_set_link_status(uint16_t port_id,uint8_t link_status)466 virtual_ethdev_set_link_status(uint16_t port_id, uint8_t link_status)
467 {
468 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
469
470 vrtl_eth_dev->data->dev_link.link_status = link_status;
471 }
472
473 void
virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,uint8_t link_status)474 virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,
475 uint8_t link_status)
476 {
477 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
478
479 vrtl_eth_dev->data->dev_link.link_status = link_status;
480
481 rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC,
482 NULL);
483 }
484
485 int
virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id,struct rte_mbuf ** pkt_burst,int burst_length)486 virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id,
487 struct rte_mbuf **pkt_burst, int burst_length)
488 {
489 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
490 struct virtual_ethdev_private *dev_private =
491 vrtl_eth_dev->data->dev_private;
492
493 return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
494 burst_length, NULL);
495 }
496
497 int
virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id,struct rte_mbuf ** pkt_burst,int burst_length)498 virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id,
499 struct rte_mbuf **pkt_burst, int burst_length)
500 {
501 struct virtual_ethdev_private *dev_private;
502 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
503
504 dev_private = vrtl_eth_dev->data->dev_private;
505 return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
506 burst_length, NULL);
507 }
508
509
510 int
virtual_ethdev_create(const char * name,struct rte_ether_addr * mac_addr,uint8_t socket_id,uint8_t isr_support)511 virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
512 uint8_t socket_id, uint8_t isr_support)
513 {
514 struct rte_pci_device *pci_dev = NULL;
515 struct rte_eth_dev *eth_dev = NULL;
516 struct rte_pci_driver *pci_drv = NULL;
517 struct rte_pci_id *id_table = NULL;
518 struct virtual_ethdev_private *dev_private = NULL;
519 char name_buf[RTE_RING_NAMESIZE];
520
521
522 /* now do all data allocation - for eth_dev structure, dummy pci driver
523 * and internal (dev_private) data
524 */
525
526 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
527 if (pci_dev == NULL)
528 goto err;
529
530 pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
531 if (pci_drv == NULL)
532 goto err;
533
534 id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id);
535 if (id_table == NULL)
536 goto err;
537 id_table->device_id = 0xBEEF;
538
539 dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id);
540 if (dev_private == NULL)
541 goto err;
542
543 snprintf(name_buf, sizeof(name_buf), "%s_rxQ", name);
544 dev_private->rx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
545 0);
546 if (dev_private->rx_queue == NULL)
547 goto err;
548
549 snprintf(name_buf, sizeof(name_buf), "%s_txQ", name);
550 dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
551 0);
552 if (dev_private->tx_queue == NULL)
553 goto err;
554
555 /* reserve an ethdev entry */
556 eth_dev = rte_eth_dev_allocate(name);
557 if (eth_dev == NULL)
558 goto err;
559
560 pci_dev->device.numa_node = socket_id;
561 pci_dev->device.name = eth_dev->data->name;
562 pci_drv->driver.name = virtual_ethdev_driver_name;
563 pci_drv->id_table = id_table;
564
565 if (isr_support)
566 pci_drv->drv_flags |= RTE_PCI_DRV_INTR_LSC;
567 else
568 pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
569
570
571 eth_dev->device = &pci_dev->device;
572 eth_dev->device->driver = &pci_drv->driver;
573
574 eth_dev->data->nb_rx_queues = (uint16_t)1;
575 eth_dev->data->nb_tx_queues = (uint16_t)1;
576
577 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
578 eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
579 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
580
581 eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
582 if (eth_dev->data->mac_addrs == NULL)
583 goto err;
584
585 memcpy(eth_dev->data->mac_addrs, mac_addr,
586 sizeof(*eth_dev->data->mac_addrs));
587
588 eth_dev->data->dev_started = 0;
589 eth_dev->data->promiscuous = 0;
590 eth_dev->data->scattered_rx = 0;
591 eth_dev->data->all_multicast = 0;
592
593 eth_dev->data->dev_private = dev_private;
594
595 /* Copy default device operation functions */
596 dev_private->dev_ops = virtual_ethdev_default_dev_ops;
597 eth_dev->dev_ops = &dev_private->dev_ops;
598
599 pci_dev->device.driver = &pci_drv->driver;
600 eth_dev->device = &pci_dev->device;
601
602 eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
603 eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
604
605 rte_eth_dev_probing_finish(eth_dev);
606
607 return eth_dev->data->port_id;
608
609 err:
610 rte_free(pci_dev);
611 rte_free(pci_drv);
612 rte_free(id_table);
613 rte_free(dev_private);
614
615 return -1;
616 }
617