1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
3 */
4
5 #include <rte_atomic.h>
6 #include <rte_branch_prediction.h>
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_mbuf.h>
10 #include <ethdev_driver.h>
11 #include <ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_bus_vdev.h>
14 #include <rte_kvargs.h>
15 #include <rte_net.h>
16 #include <rte_debug.h>
17 #include <rte_ip.h>
18 #include <rte_string_fns.h>
19 #include <rte_ethdev.h>
20 #include <rte_errno.h>
21 #include <rte_cycles.h>
22
23 #include <sys/types.h>
24 #include <sys/stat.h>
25 #include <sys/socket.h>
26 #include <sys/ioctl.h>
27 #include <sys/utsname.h>
28 #include <sys/mman.h>
29 #include <errno.h>
30 #include <signal.h>
31 #include <stdbool.h>
32 #include <stdint.h>
33 #include <sys/uio.h>
34 #include <unistd.h>
35 #include <arpa/inet.h>
36 #include <net/if.h>
37 #include <linux/if_tun.h>
38 #include <linux/if_ether.h>
39 #include <fcntl.h>
40 #include <ctype.h>
41
42 #include <tap_rss.h>
43 #include <rte_eth_tap.h>
44 #include <tap_flow.h>
45 #include <tap_netlink.h>
46 #include <tap_tcmsgs.h>
47
48 /* Linux based path to the TUN device */
49 #define TUN_TAP_DEV_PATH "/dev/net/tun"
50 #define DEFAULT_TAP_NAME "dtap"
51 #define DEFAULT_TUN_NAME "dtun"
52
53 #define ETH_TAP_IFACE_ARG "iface"
54 #define ETH_TAP_REMOTE_ARG "remote"
55 #define ETH_TAP_MAC_ARG "mac"
56 #define ETH_TAP_MAC_FIXED "fixed"
57
58 #define ETH_TAP_USR_MAC_FMT "xx:xx:xx:xx:xx:xx"
59 #define ETH_TAP_CMP_MAC_FMT "0123456789ABCDEFabcdef"
60 #define ETH_TAP_MAC_ARG_FMT ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT
61
62 #define TAP_GSO_MBUFS_PER_CORE 128
63 #define TAP_GSO_MBUF_SEG_SIZE 128
64 #define TAP_GSO_MBUF_CACHE_SIZE 4
65 #define TAP_GSO_MBUFS_NUM \
66 (TAP_GSO_MBUFS_PER_CORE * TAP_GSO_MBUF_CACHE_SIZE)
67
68 /* IPC key for queue fds sync */
69 #define TAP_MP_KEY "tap_mp_sync_queues"
70 #define TAP_MP_REQ_START_RXTX "tap_mp_req_start_rxtx"
71
72 #define TAP_IOV_DEFAULT_MAX 1024
73
74 #define TAP_RX_OFFLOAD (RTE_ETH_RX_OFFLOAD_SCATTER | \
75 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
76 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
77 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
78
79 #define TAP_TX_OFFLOAD (RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
80 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
81 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
82 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
83 RTE_ETH_TX_OFFLOAD_TCP_TSO)
84
85 static int tap_devices_count;
86
87 static const char *tuntap_types[ETH_TUNTAP_TYPE_MAX] = {
88 "UNKNOWN", "TUN", "TAP"
89 };
90
91 static const char *valid_arguments[] = {
92 ETH_TAP_IFACE_ARG,
93 ETH_TAP_REMOTE_ARG,
94 ETH_TAP_MAC_ARG,
95 NULL
96 };
97
98 static volatile uint32_t tap_trigger; /* Rx trigger */
99
100 static struct rte_eth_link pmd_link = {
101 .link_speed = RTE_ETH_SPEED_NUM_10G,
102 .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
103 .link_status = RTE_ETH_LINK_DOWN,
104 .link_autoneg = RTE_ETH_LINK_FIXED,
105 };
106
107 static void
tap_trigger_cb(int sig __rte_unused)108 tap_trigger_cb(int sig __rte_unused)
109 {
110 /* Valid trigger values are nonzero */
111 tap_trigger = (tap_trigger + 1) | 0x80000000;
112 }
113
114 /* Specifies on what netdevices the ioctl should be applied */
115 enum ioctl_mode {
116 LOCAL_AND_REMOTE,
117 LOCAL_ONLY,
118 REMOTE_ONLY,
119 };
120
121 /* Message header to synchronize queues via IPC */
122 struct ipc_queues {
123 char port_name[RTE_DEV_NAME_MAX_LEN];
124 int rxq_count;
125 int txq_count;
126 /*
127 * The file descriptors are in the dedicated part
128 * of the Unix message to be translated by the kernel.
129 */
130 };
131
132 static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
133
134 /**
135 * Tun/Tap allocation routine
136 *
137 * @param[in] pmd
138 * Pointer to private structure.
139 *
140 * @param[in] is_keepalive
141 * Keepalive flag
142 *
143 * @return
144 * -1 on failure, fd on success
145 */
146 static int
tun_alloc(struct pmd_internals * pmd,int is_keepalive)147 tun_alloc(struct pmd_internals *pmd, int is_keepalive)
148 {
149 struct ifreq ifr;
150 #ifdef IFF_MULTI_QUEUE
151 unsigned int features;
152 #endif
153 int fd, signo, flags;
154
155 memset(&ifr, 0, sizeof(struct ifreq));
156
157 /*
158 * Do not set IFF_NO_PI as packet information header will be needed
159 * to check if a received packet has been truncated.
160 */
161 ifr.ifr_flags = (pmd->type == ETH_TUNTAP_TYPE_TAP) ?
162 IFF_TAP : IFF_TUN | IFF_POINTOPOINT;
163 strlcpy(ifr.ifr_name, pmd->name, IFNAMSIZ);
164
165 fd = open(TUN_TAP_DEV_PATH, O_RDWR);
166 if (fd < 0) {
167 TAP_LOG(ERR, "Unable to open %s interface", TUN_TAP_DEV_PATH);
168 goto error;
169 }
170
171 #ifdef IFF_MULTI_QUEUE
172 /* Grab the TUN features to verify we can work multi-queue */
173 if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
174 TAP_LOG(ERR, "unable to get TUN/TAP features");
175 goto error;
176 }
177 TAP_LOG(DEBUG, "%s Features %08x", TUN_TAP_DEV_PATH, features);
178
179 if (features & IFF_MULTI_QUEUE) {
180 TAP_LOG(DEBUG, " Multi-queue support for %d queues",
181 RTE_PMD_TAP_MAX_QUEUES);
182 ifr.ifr_flags |= IFF_MULTI_QUEUE;
183 } else
184 #endif
185 {
186 ifr.ifr_flags |= IFF_ONE_QUEUE;
187 TAP_LOG(DEBUG, " Single queue only support");
188 }
189
190 /* Set the TUN/TAP configuration and set the name if needed */
191 if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
192 TAP_LOG(WARNING, "Unable to set TUNSETIFF for %s: %s",
193 ifr.ifr_name, strerror(errno));
194 goto error;
195 }
196
197 /*
198 * Name passed to kernel might be wildcard like dtun%d
199 * and need to find the resulting device.
200 */
201 TAP_LOG(DEBUG, "Device name is '%s'", ifr.ifr_name);
202 strlcpy(pmd->name, ifr.ifr_name, RTE_ETH_NAME_MAX_LEN);
203
204 if (is_keepalive) {
205 /*
206 * Detach the TUN/TAP keep-alive queue
207 * to avoid traffic through it
208 */
209 ifr.ifr_flags = IFF_DETACH_QUEUE;
210 if (ioctl(fd, TUNSETQUEUE, (void *)&ifr) < 0) {
211 TAP_LOG(WARNING,
212 "Unable to detach keep-alive queue for %s: %s",
213 ifr.ifr_name, strerror(errno));
214 goto error;
215 }
216 }
217
218 flags = fcntl(fd, F_GETFL);
219 if (flags == -1) {
220 TAP_LOG(WARNING,
221 "Unable to get %s current flags\n",
222 ifr.ifr_name);
223 goto error;
224 }
225
226 /* Always set the file descriptor to non-blocking */
227 flags |= O_NONBLOCK;
228 if (fcntl(fd, F_SETFL, flags) < 0) {
229 TAP_LOG(WARNING,
230 "Unable to set %s to nonblocking: %s",
231 ifr.ifr_name, strerror(errno));
232 goto error;
233 }
234
235 /* Find a free realtime signal */
236 for (signo = SIGRTMIN + 1; signo < SIGRTMAX; signo++) {
237 struct sigaction sa;
238
239 if (sigaction(signo, NULL, &sa) == -1) {
240 TAP_LOG(WARNING,
241 "Unable to get current rt-signal %d handler",
242 signo);
243 goto error;
244 }
245
246 /* Already have the handler we want on this signal */
247 if (sa.sa_handler == tap_trigger_cb)
248 break;
249
250 /* Is handler in use by application */
251 if (sa.sa_handler != SIG_DFL) {
252 TAP_LOG(DEBUG,
253 "Skipping used rt-signal %d", signo);
254 continue;
255 }
256
257 sa = (struct sigaction) {
258 .sa_flags = SA_RESTART,
259 .sa_handler = tap_trigger_cb,
260 };
261
262 if (sigaction(signo, &sa, NULL) == -1) {
263 TAP_LOG(WARNING,
264 "Unable to set rt-signal %d handler\n", signo);
265 goto error;
266 }
267
268 /* Found a good signal to use */
269 TAP_LOG(DEBUG,
270 "Using rt-signal %d", signo);
271 break;
272 }
273
274 if (signo == SIGRTMAX) {
275 TAP_LOG(WARNING, "All rt-signals are in use\n");
276
277 /* Disable trigger globally in case of error */
278 tap_trigger = 0;
279 TAP_LOG(NOTICE, "No Rx trigger signal available\n");
280 } else {
281 /* Enable signal on file descriptor */
282 if (fcntl(fd, F_SETSIG, signo) < 0) {
283 TAP_LOG(WARNING, "Unable to set signo %d for fd %d: %s",
284 signo, fd, strerror(errno));
285 goto error;
286 }
287 if (fcntl(fd, F_SETFL, flags | O_ASYNC) < 0) {
288 TAP_LOG(WARNING, "Unable to set fcntl flags: %s",
289 strerror(errno));
290 goto error;
291 }
292
293 if (fcntl(fd, F_SETOWN, getpid()) < 0) {
294 TAP_LOG(WARNING, "Unable to set fcntl owner: %s",
295 strerror(errno));
296 goto error;
297 }
298 }
299 return fd;
300
301 error:
302 if (fd >= 0)
303 close(fd);
304 return -1;
305 }
306
307 static void
tap_verify_csum(struct rte_mbuf * mbuf)308 tap_verify_csum(struct rte_mbuf *mbuf)
309 {
310 uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
311 uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
312 uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
313 unsigned int l2_len = sizeof(struct rte_ether_hdr);
314 unsigned int l3_len;
315 uint16_t cksum = 0;
316 void *l3_hdr;
317 void *l4_hdr;
318 struct rte_udp_hdr *udp_hdr;
319
320 if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
321 l2_len += 4;
322 else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
323 l2_len += 8;
324 /* Don't verify checksum for packets with discontinuous L2 header */
325 if (unlikely(l2_len + sizeof(struct rte_ipv4_hdr) >
326 rte_pktmbuf_data_len(mbuf)))
327 return;
328 l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
329 if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
330 struct rte_ipv4_hdr *iph = l3_hdr;
331
332 l3_len = rte_ipv4_hdr_len(iph);
333 if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
334 return;
335 /* check that the total length reported by header is not
336 * greater than the total received size
337 */
338 if (l2_len + rte_be_to_cpu_16(iph->total_length) >
339 rte_pktmbuf_data_len(mbuf))
340 return;
341
342 cksum = ~rte_raw_cksum(iph, l3_len);
343 mbuf->ol_flags |= cksum ?
344 RTE_MBUF_F_RX_IP_CKSUM_BAD :
345 RTE_MBUF_F_RX_IP_CKSUM_GOOD;
346 } else if (l3 == RTE_PTYPE_L3_IPV6) {
347 struct rte_ipv6_hdr *iph = l3_hdr;
348
349 l3_len = sizeof(struct rte_ipv6_hdr);
350 /* check that the total length reported by header is not
351 * greater than the total received size
352 */
353 if (l2_len + l3_len + rte_be_to_cpu_16(iph->payload_len) >
354 rte_pktmbuf_data_len(mbuf))
355 return;
356 } else {
357 /* - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN cannot happen because
358 * mbuf->packet_type is filled by rte_net_get_ptype() which
359 * never returns this value.
360 * - IPv6 extensions are not supported.
361 */
362 return;
363 }
364 if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
365 int cksum_ok;
366
367 l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
368 /* Don't verify checksum for multi-segment packets. */
369 if (mbuf->nb_segs > 1)
370 return;
371 if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
372 if (l4 == RTE_PTYPE_L4_UDP) {
373 udp_hdr = (struct rte_udp_hdr *)l4_hdr;
374 if (udp_hdr->dgram_cksum == 0) {
375 /*
376 * For IPv4, a zero UDP checksum
377 * indicates that the sender did not
378 * generate one [RFC 768].
379 */
380 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
381 return;
382 }
383 }
384 cksum_ok = !rte_ipv4_udptcp_cksum_verify(l3_hdr,
385 l4_hdr);
386 } else { /* l3 == RTE_PTYPE_L3_IPV6, checked above */
387 cksum_ok = !rte_ipv6_udptcp_cksum_verify(l3_hdr,
388 l4_hdr);
389 }
390 mbuf->ol_flags |= cksum_ok ?
391 RTE_MBUF_F_RX_L4_CKSUM_GOOD : RTE_MBUF_F_RX_L4_CKSUM_BAD;
392 }
393 }
394
395 static void
tap_rxq_pool_free(struct rte_mbuf * pool)396 tap_rxq_pool_free(struct rte_mbuf *pool)
397 {
398 struct rte_mbuf *mbuf = pool;
399 uint16_t nb_segs = 1;
400
401 if (mbuf == NULL)
402 return;
403
404 while (mbuf->next) {
405 mbuf = mbuf->next;
406 nb_segs++;
407 }
408 pool->nb_segs = nb_segs;
409 rte_pktmbuf_free(pool);
410 }
411
412 /* Callback to handle the rx burst of packets to the correct interface and
413 * file descriptor(s) in a multi-queue setup.
414 */
415 static uint16_t
pmd_rx_burst(void * queue,struct rte_mbuf ** bufs,uint16_t nb_pkts)416 pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
417 {
418 struct rx_queue *rxq = queue;
419 struct pmd_process_private *process_private;
420 uint16_t num_rx;
421 unsigned long num_rx_bytes = 0;
422 uint32_t trigger = tap_trigger;
423
424 if (trigger == rxq->trigger_seen)
425 return 0;
426
427 process_private = rte_eth_devices[rxq->in_port].process_private;
428 for (num_rx = 0; num_rx < nb_pkts; ) {
429 struct rte_mbuf *mbuf = rxq->pool;
430 struct rte_mbuf *seg = NULL;
431 struct rte_mbuf *new_tail = NULL;
432 uint16_t data_off = rte_pktmbuf_headroom(mbuf);
433 int len;
434
435 len = readv(process_private->rxq_fds[rxq->queue_id],
436 *rxq->iovecs,
437 1 + (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ?
438 rxq->nb_rx_desc : 1));
439 if (len < (int)sizeof(struct tun_pi))
440 break;
441
442 /* Packet couldn't fit in the provided mbuf */
443 if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
444 rxq->stats.ierrors++;
445 continue;
446 }
447
448 len -= sizeof(struct tun_pi);
449
450 mbuf->pkt_len = len;
451 mbuf->port = rxq->in_port;
452 while (1) {
453 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
454
455 if (unlikely(!buf)) {
456 rxq->stats.rx_nombuf++;
457 /* No new buf has been allocated: do nothing */
458 if (!new_tail || !seg)
459 goto end;
460
461 seg->next = NULL;
462 tap_rxq_pool_free(mbuf);
463
464 goto end;
465 }
466 seg = seg ? seg->next : mbuf;
467 if (rxq->pool == mbuf)
468 rxq->pool = buf;
469 if (new_tail)
470 new_tail->next = buf;
471 new_tail = buf;
472 new_tail->next = seg->next;
473
474 /* iovecs[0] is reserved for packet info (pi) */
475 (*rxq->iovecs)[mbuf->nb_segs].iov_len =
476 buf->buf_len - data_off;
477 (*rxq->iovecs)[mbuf->nb_segs].iov_base =
478 (char *)buf->buf_addr + data_off;
479
480 seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
481 seg->data_off = data_off;
482
483 len -= seg->data_len;
484 if (len <= 0)
485 break;
486 mbuf->nb_segs++;
487 /* First segment has headroom, not the others */
488 data_off = 0;
489 }
490 seg->next = NULL;
491 mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
492 RTE_PTYPE_ALL_MASK);
493 if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
494 tap_verify_csum(mbuf);
495
496 /* account for the receive frame */
497 bufs[num_rx++] = mbuf;
498 num_rx_bytes += mbuf->pkt_len;
499 }
500 end:
501 rxq->stats.ipackets += num_rx;
502 rxq->stats.ibytes += num_rx_bytes;
503
504 if (trigger && num_rx < nb_pkts)
505 rxq->trigger_seen = trigger;
506
507 return num_rx;
508 }
509
510 /* Finalize l4 checksum calculation */
511 static void
tap_tx_l4_cksum(uint16_t * l4_cksum,uint16_t l4_phdr_cksum,uint32_t l4_raw_cksum)512 tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
513 uint32_t l4_raw_cksum)
514 {
515 if (l4_cksum) {
516 uint32_t cksum;
517
518 cksum = __rte_raw_cksum_reduce(l4_raw_cksum);
519 cksum += l4_phdr_cksum;
520
521 cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
522 cksum = (~cksum) & 0xffff;
523 if (cksum == 0)
524 cksum = 0xffff;
525 *l4_cksum = cksum;
526 }
527 }
528
529 /* Accumulate L4 raw checksums */
530 static void
tap_tx_l4_add_rcksum(char * l4_data,unsigned int l4_len,uint16_t * l4_cksum,uint32_t * l4_raw_cksum)531 tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,
532 uint32_t *l4_raw_cksum)
533 {
534 if (l4_cksum == NULL)
535 return;
536
537 *l4_raw_cksum = __rte_raw_cksum(l4_data, l4_len, *l4_raw_cksum);
538 }
539
540 /* L3 and L4 pseudo headers checksum offloads */
541 static void
tap_tx_l3_cksum(char * packet,uint64_t ol_flags,unsigned int l2_len,unsigned int l3_len,unsigned int l4_len,uint16_t ** l4_cksum,uint16_t * l4_phdr_cksum,uint32_t * l4_raw_cksum)542 tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
543 unsigned int l3_len, unsigned int l4_len, uint16_t **l4_cksum,
544 uint16_t *l4_phdr_cksum, uint32_t *l4_raw_cksum)
545 {
546 void *l3_hdr = packet + l2_len;
547
548 if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
549 struct rte_ipv4_hdr *iph = l3_hdr;
550 uint16_t cksum;
551
552 iph->hdr_checksum = 0;
553 cksum = rte_raw_cksum(iph, l3_len);
554 iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
555 }
556 if (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
557 void *l4_hdr;
558
559 l4_hdr = packet + l2_len + l3_len;
560 if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM)
561 *l4_cksum = &((struct rte_udp_hdr *)l4_hdr)->dgram_cksum;
562 else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM)
563 *l4_cksum = &((struct rte_tcp_hdr *)l4_hdr)->cksum;
564 else
565 return;
566 **l4_cksum = 0;
567 if (ol_flags & RTE_MBUF_F_TX_IPV4)
568 *l4_phdr_cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
569 else
570 *l4_phdr_cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
571 *l4_raw_cksum = __rte_raw_cksum(l4_hdr, l4_len, 0);
572 }
573 }
574
575 static inline int
tap_write_mbufs(struct tx_queue * txq,uint16_t num_mbufs,struct rte_mbuf ** pmbufs,uint16_t * num_packets,unsigned long * num_tx_bytes)576 tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
577 struct rte_mbuf **pmbufs,
578 uint16_t *num_packets, unsigned long *num_tx_bytes)
579 {
580 int i;
581 uint16_t l234_hlen;
582 struct pmd_process_private *process_private;
583
584 process_private = rte_eth_devices[txq->out_port].process_private;
585
586 for (i = 0; i < num_mbufs; i++) {
587 struct rte_mbuf *mbuf = pmbufs[i];
588 struct iovec iovecs[mbuf->nb_segs + 2];
589 struct tun_pi pi = { .flags = 0, .proto = 0x00 };
590 struct rte_mbuf *seg = mbuf;
591 char m_copy[mbuf->data_len];
592 int proto;
593 int n;
594 int j;
595 int k; /* current index in iovecs for copying segments */
596 uint16_t seg_len; /* length of first segment */
597 uint16_t nb_segs;
598 uint16_t *l4_cksum; /* l4 checksum (pseudo header + payload) */
599 uint32_t l4_raw_cksum = 0; /* TCP/UDP payload raw checksum */
600 uint16_t l4_phdr_cksum = 0; /* TCP/UDP pseudo header checksum */
601 uint16_t is_cksum = 0; /* in case cksum should be offloaded */
602
603 l4_cksum = NULL;
604 if (txq->type == ETH_TUNTAP_TYPE_TUN) {
605 /*
606 * TUN and TAP are created with IFF_NO_PI disabled.
607 * For TUN PMD this mandatory as fields are used by
608 * Kernel tun.c to determine whether its IP or non IP
609 * packets.
610 *
611 * The logic fetches the first byte of data from mbuf
612 * then compares whether its v4 or v6. If first byte
613 * is 4 or 6, then protocol field is updated.
614 */
615 char *buff_data = rte_pktmbuf_mtod(seg, void *);
616 proto = (*buff_data & 0xf0);
617 pi.proto = (proto == 0x40) ?
618 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) :
619 ((proto == 0x60) ?
620 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) :
621 0x00);
622 }
623
624 k = 0;
625 iovecs[k].iov_base = π
626 iovecs[k].iov_len = sizeof(pi);
627 k++;
628
629 nb_segs = mbuf->nb_segs;
630 if (txq->csum &&
631 ((mbuf->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4) ||
632 (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM ||
633 (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM))) {
634 is_cksum = 1;
635
636 /* Support only packets with at least layer 4
637 * header included in the first segment
638 */
639 seg_len = rte_pktmbuf_data_len(mbuf);
640 l234_hlen = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
641 if (seg_len < l234_hlen)
642 return -1;
643
644 /* To change checksums, work on a * copy of l2, l3
645 * headers + l4 pseudo header
646 */
647 rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
648 l234_hlen);
649 tap_tx_l3_cksum(m_copy, mbuf->ol_flags,
650 mbuf->l2_len, mbuf->l3_len, mbuf->l4_len,
651 &l4_cksum, &l4_phdr_cksum,
652 &l4_raw_cksum);
653 iovecs[k].iov_base = m_copy;
654 iovecs[k].iov_len = l234_hlen;
655 k++;
656
657 /* Update next iovecs[] beyond l2, l3, l4 headers */
658 if (seg_len > l234_hlen) {
659 iovecs[k].iov_len = seg_len - l234_hlen;
660 iovecs[k].iov_base =
661 rte_pktmbuf_mtod(seg, char *) +
662 l234_hlen;
663 tap_tx_l4_add_rcksum(iovecs[k].iov_base,
664 iovecs[k].iov_len, l4_cksum,
665 &l4_raw_cksum);
666 k++;
667 nb_segs++;
668 }
669 seg = seg->next;
670 }
671
672 for (j = k; j <= nb_segs; j++) {
673 iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
674 iovecs[j].iov_base = rte_pktmbuf_mtod(seg, void *);
675 if (is_cksum)
676 tap_tx_l4_add_rcksum(iovecs[j].iov_base,
677 iovecs[j].iov_len, l4_cksum,
678 &l4_raw_cksum);
679 seg = seg->next;
680 }
681
682 if (is_cksum)
683 tap_tx_l4_cksum(l4_cksum, l4_phdr_cksum, l4_raw_cksum);
684
685 /* copy the tx frame data */
686 n = writev(process_private->txq_fds[txq->queue_id], iovecs, j);
687 if (n <= 0)
688 return -1;
689
690 (*num_packets)++;
691 (*num_tx_bytes) += rte_pktmbuf_pkt_len(mbuf);
692 }
693 return 0;
694 }
695
696 /* Callback to handle sending packets from the tap interface
697 */
698 static uint16_t
pmd_tx_burst(void * queue,struct rte_mbuf ** bufs,uint16_t nb_pkts)699 pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
700 {
701 struct tx_queue *txq = queue;
702 uint16_t num_tx = 0;
703 uint16_t num_packets = 0;
704 unsigned long num_tx_bytes = 0;
705 uint32_t max_size;
706 int i;
707
708 if (unlikely(nb_pkts == 0))
709 return 0;
710
711 struct rte_mbuf *gso_mbufs[MAX_GSO_MBUFS];
712 max_size = *txq->mtu + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4);
713 for (i = 0; i < nb_pkts; i++) {
714 struct rte_mbuf *mbuf_in = bufs[num_tx];
715 struct rte_mbuf **mbuf;
716 uint16_t num_mbufs = 0;
717 uint16_t tso_segsz = 0;
718 int ret;
719 int num_tso_mbufs;
720 uint16_t hdrs_len;
721 uint64_t tso;
722
723 tso = mbuf_in->ol_flags & RTE_MBUF_F_TX_TCP_SEG;
724 if (tso) {
725 struct rte_gso_ctx *gso_ctx = &txq->gso_ctx;
726
727 /* TCP segmentation implies TCP checksum offload */
728 mbuf_in->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
729
730 /* gso size is calculated without RTE_ETHER_CRC_LEN */
731 hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +
732 mbuf_in->l4_len;
733 tso_segsz = mbuf_in->tso_segsz + hdrs_len;
734 if (unlikely(tso_segsz == hdrs_len) ||
735 tso_segsz > *txq->mtu) {
736 txq->stats.errs++;
737 break;
738 }
739 gso_ctx->gso_size = tso_segsz;
740 /* 'mbuf_in' packet to segment */
741 num_tso_mbufs = rte_gso_segment(mbuf_in,
742 gso_ctx, /* gso control block */
743 (struct rte_mbuf **)&gso_mbufs, /* out mbufs */
744 RTE_DIM(gso_mbufs)); /* max tso mbufs */
745
746 /* ret contains the number of new created mbufs */
747 if (num_tso_mbufs < 0)
748 break;
749
750 if (num_tso_mbufs >= 1) {
751 mbuf = gso_mbufs;
752 num_mbufs = num_tso_mbufs;
753 } else {
754 /* 0 means it can be transmitted directly
755 * without gso.
756 */
757 mbuf = &mbuf_in;
758 num_mbufs = 1;
759 }
760 } else {
761 /* stats.errs will be incremented */
762 if (rte_pktmbuf_pkt_len(mbuf_in) > max_size)
763 break;
764
765 /* ret 0 indicates no new mbufs were created */
766 num_tso_mbufs = 0;
767 mbuf = &mbuf_in;
768 num_mbufs = 1;
769 }
770
771 ret = tap_write_mbufs(txq, num_mbufs, mbuf,
772 &num_packets, &num_tx_bytes);
773 if (ret == -1) {
774 txq->stats.errs++;
775 /* free tso mbufs */
776 if (num_tso_mbufs > 0)
777 rte_pktmbuf_free_bulk(mbuf, num_tso_mbufs);
778 break;
779 }
780 num_tx++;
781 /* free original mbuf */
782 rte_pktmbuf_free(mbuf_in);
783 /* free tso mbufs */
784 if (num_tso_mbufs > 0)
785 rte_pktmbuf_free_bulk(mbuf, num_tso_mbufs);
786 }
787
788 txq->stats.opackets += num_packets;
789 txq->stats.errs += nb_pkts - num_tx;
790 txq->stats.obytes += num_tx_bytes;
791
792 return num_tx;
793 }
794
795 static const char *
tap_ioctl_req2str(unsigned long request)796 tap_ioctl_req2str(unsigned long request)
797 {
798 switch (request) {
799 case SIOCSIFFLAGS:
800 return "SIOCSIFFLAGS";
801 case SIOCGIFFLAGS:
802 return "SIOCGIFFLAGS";
803 case SIOCGIFHWADDR:
804 return "SIOCGIFHWADDR";
805 case SIOCSIFHWADDR:
806 return "SIOCSIFHWADDR";
807 case SIOCSIFMTU:
808 return "SIOCSIFMTU";
809 }
810 return "UNKNOWN";
811 }
812
813 static int
tap_ioctl(struct pmd_internals * pmd,unsigned long request,struct ifreq * ifr,int set,enum ioctl_mode mode)814 tap_ioctl(struct pmd_internals *pmd, unsigned long request,
815 struct ifreq *ifr, int set, enum ioctl_mode mode)
816 {
817 short req_flags = ifr->ifr_flags;
818 int remote = pmd->remote_if_index &&
819 (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
820
821 if (!pmd->remote_if_index && mode == REMOTE_ONLY)
822 return 0;
823 /*
824 * If there is a remote netdevice, apply ioctl on it, then apply it on
825 * the tap netdevice.
826 */
827 apply:
828 if (remote)
829 strlcpy(ifr->ifr_name, pmd->remote_iface, IFNAMSIZ);
830 else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
831 strlcpy(ifr->ifr_name, pmd->name, IFNAMSIZ);
832 switch (request) {
833 case SIOCSIFFLAGS:
834 /* fetch current flags to leave other flags untouched */
835 if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
836 goto error;
837 if (set)
838 ifr->ifr_flags |= req_flags;
839 else
840 ifr->ifr_flags &= ~req_flags;
841 break;
842 case SIOCGIFFLAGS:
843 case SIOCGIFHWADDR:
844 case SIOCSIFHWADDR:
845 case SIOCSIFMTU:
846 break;
847 default:
848 TAP_LOG(WARNING, "%s: ioctl() called with wrong arg",
849 pmd->name);
850 return -EINVAL;
851 }
852 if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
853 goto error;
854 if (remote-- && mode == LOCAL_AND_REMOTE)
855 goto apply;
856 return 0;
857
858 error:
859 TAP_LOG(DEBUG, "%s(%s) failed: %s(%d)", ifr->ifr_name,
860 tap_ioctl_req2str(request), strerror(errno), errno);
861 return -errno;
862 }
863
864 static int
tap_link_set_down(struct rte_eth_dev * dev)865 tap_link_set_down(struct rte_eth_dev *dev)
866 {
867 struct pmd_internals *pmd = dev->data->dev_private;
868 struct ifreq ifr = { .ifr_flags = IFF_UP };
869
870 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
871 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
872 }
873
874 static int
tap_link_set_up(struct rte_eth_dev * dev)875 tap_link_set_up(struct rte_eth_dev *dev)
876 {
877 struct pmd_internals *pmd = dev->data->dev_private;
878 struct ifreq ifr = { .ifr_flags = IFF_UP };
879
880 dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
881 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
882 }
883
884 static int
tap_mp_req_on_rxtx(struct rte_eth_dev * dev)885 tap_mp_req_on_rxtx(struct rte_eth_dev *dev)
886 {
887 struct rte_mp_msg msg;
888 struct ipc_queues *request_param = (struct ipc_queues *)msg.param;
889 int err;
890 int fd_iterator = 0;
891 struct pmd_process_private *process_private = dev->process_private;
892 int i;
893
894 memset(&msg, 0, sizeof(msg));
895 strlcpy(msg.name, TAP_MP_REQ_START_RXTX, sizeof(msg.name));
896 strlcpy(request_param->port_name, dev->data->name, sizeof(request_param->port_name));
897 msg.len_param = sizeof(*request_param);
898 for (i = 0; i < dev->data->nb_tx_queues; i++) {
899 msg.fds[fd_iterator++] = process_private->txq_fds[i];
900 msg.num_fds++;
901 request_param->txq_count++;
902 }
903 for (i = 0; i < dev->data->nb_rx_queues; i++) {
904 msg.fds[fd_iterator++] = process_private->rxq_fds[i];
905 msg.num_fds++;
906 request_param->rxq_count++;
907 }
908
909 err = rte_mp_sendmsg(&msg);
910 if (err < 0) {
911 TAP_LOG(ERR, "Failed to send start req to secondary %d",
912 rte_errno);
913 return -1;
914 }
915
916 return 0;
917 }
918
919 static int
tap_dev_start(struct rte_eth_dev * dev)920 tap_dev_start(struct rte_eth_dev *dev)
921 {
922 int err, i;
923
924 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
925 tap_mp_req_on_rxtx(dev);
926
927 err = tap_intr_handle_set(dev, 1);
928 if (err)
929 return err;
930
931 err = tap_link_set_up(dev);
932 if (err)
933 return err;
934
935 for (i = 0; i < dev->data->nb_tx_queues; i++)
936 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
937 for (i = 0; i < dev->data->nb_rx_queues; i++)
938 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
939
940 return err;
941 }
942
943 static int
tap_mp_req_start_rxtx(const struct rte_mp_msg * request,__rte_unused const void * peer)944 tap_mp_req_start_rxtx(const struct rte_mp_msg *request, __rte_unused const void *peer)
945 {
946 struct rte_eth_dev *dev;
947 const struct ipc_queues *request_param =
948 (const struct ipc_queues *)request->param;
949 int fd_iterator;
950 int queue;
951 struct pmd_process_private *process_private;
952
953 dev = rte_eth_dev_get_by_name(request_param->port_name);
954 if (!dev) {
955 TAP_LOG(ERR, "Failed to get dev for %s",
956 request_param->port_name);
957 return -1;
958 }
959 process_private = dev->process_private;
960 fd_iterator = 0;
961 TAP_LOG(DEBUG, "tap_attach rx_q:%d tx_q:%d\n", request_param->rxq_count,
962 request_param->txq_count);
963 for (queue = 0; queue < request_param->txq_count; queue++)
964 process_private->txq_fds[queue] = request->fds[fd_iterator++];
965 for (queue = 0; queue < request_param->rxq_count; queue++)
966 process_private->rxq_fds[queue] = request->fds[fd_iterator++];
967
968 return 0;
969 }
970
971 /* This function gets called when the current port gets stopped.
972 */
973 static int
tap_dev_stop(struct rte_eth_dev * dev)974 tap_dev_stop(struct rte_eth_dev *dev)
975 {
976 int i;
977
978 for (i = 0; i < dev->data->nb_tx_queues; i++)
979 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
980 for (i = 0; i < dev->data->nb_rx_queues; i++)
981 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
982
983 tap_intr_handle_set(dev, 0);
984 tap_link_set_down(dev);
985
986 return 0;
987 }
988
989 static int
tap_dev_configure(struct rte_eth_dev * dev)990 tap_dev_configure(struct rte_eth_dev *dev)
991 {
992 struct pmd_internals *pmd = dev->data->dev_private;
993
994 if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
995 TAP_LOG(ERR,
996 "%s: number of rx queues %d exceeds max num of queues %d",
997 dev->device->name,
998 dev->data->nb_rx_queues,
999 RTE_PMD_TAP_MAX_QUEUES);
1000 return -1;
1001 }
1002 if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
1003 TAP_LOG(ERR,
1004 "%s: number of tx queues %d exceeds max num of queues %d",
1005 dev->device->name,
1006 dev->data->nb_tx_queues,
1007 RTE_PMD_TAP_MAX_QUEUES);
1008 return -1;
1009 }
1010 if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) {
1011 TAP_LOG(ERR,
1012 "%s: number of rx queues %d must be equal to number of tx queues %d",
1013 dev->device->name,
1014 dev->data->nb_rx_queues,
1015 dev->data->nb_tx_queues);
1016 return -1;
1017 }
1018
1019 TAP_LOG(INFO, "%s: %s: TX configured queues number: %u",
1020 dev->device->name, pmd->name, dev->data->nb_tx_queues);
1021
1022 TAP_LOG(INFO, "%s: %s: RX configured queues number: %u",
1023 dev->device->name, pmd->name, dev->data->nb_rx_queues);
1024
1025 return 0;
1026 }
1027
1028 static uint32_t
tap_dev_speed_capa(void)1029 tap_dev_speed_capa(void)
1030 {
1031 uint32_t speed = pmd_link.link_speed;
1032 uint32_t capa = 0;
1033
1034 if (speed >= RTE_ETH_SPEED_NUM_10M)
1035 capa |= RTE_ETH_LINK_SPEED_10M;
1036 if (speed >= RTE_ETH_SPEED_NUM_100M)
1037 capa |= RTE_ETH_LINK_SPEED_100M;
1038 if (speed >= RTE_ETH_SPEED_NUM_1G)
1039 capa |= RTE_ETH_LINK_SPEED_1G;
1040 if (speed >= RTE_ETH_SPEED_NUM_5G)
1041 capa |= RTE_ETH_LINK_SPEED_2_5G;
1042 if (speed >= RTE_ETH_SPEED_NUM_5G)
1043 capa |= RTE_ETH_LINK_SPEED_5G;
1044 if (speed >= RTE_ETH_SPEED_NUM_10G)
1045 capa |= RTE_ETH_LINK_SPEED_10G;
1046 if (speed >= RTE_ETH_SPEED_NUM_20G)
1047 capa |= RTE_ETH_LINK_SPEED_20G;
1048 if (speed >= RTE_ETH_SPEED_NUM_25G)
1049 capa |= RTE_ETH_LINK_SPEED_25G;
1050 if (speed >= RTE_ETH_SPEED_NUM_40G)
1051 capa |= RTE_ETH_LINK_SPEED_40G;
1052 if (speed >= RTE_ETH_SPEED_NUM_50G)
1053 capa |= RTE_ETH_LINK_SPEED_50G;
1054 if (speed >= RTE_ETH_SPEED_NUM_56G)
1055 capa |= RTE_ETH_LINK_SPEED_56G;
1056 if (speed >= RTE_ETH_SPEED_NUM_100G)
1057 capa |= RTE_ETH_LINK_SPEED_100G;
1058
1059 return capa;
1060 }
1061
1062 static int
tap_dev_info(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)1063 tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1064 {
1065 struct pmd_internals *internals = dev->data->dev_private;
1066
1067 dev_info->if_index = internals->if_index;
1068 dev_info->max_mac_addrs = 1;
1069 dev_info->max_rx_pktlen = (uint32_t)RTE_ETHER_MAX_VLAN_FRAME_LEN;
1070 dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
1071 dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
1072 dev_info->min_rx_bufsize = 0;
1073 dev_info->speed_capa = tap_dev_speed_capa();
1074 dev_info->rx_queue_offload_capa = TAP_RX_OFFLOAD;
1075 dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa;
1076 dev_info->tx_queue_offload_capa = TAP_TX_OFFLOAD;
1077 dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa;
1078 dev_info->hash_key_size = TAP_RSS_HASH_KEY_SIZE;
1079 /*
1080 * limitation: TAP supports all of IP, UDP and TCP hash
1081 * functions together and not in partial combinations
1082 */
1083 dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
1084 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
1085
1086 return 0;
1087 }
1088
1089 static int
tap_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * tap_stats)1090 tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
1091 {
1092 unsigned int i, imax;
1093 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
1094 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
1095 unsigned long rx_nombuf = 0, ierrors = 0;
1096 const struct pmd_internals *pmd = dev->data->dev_private;
1097
1098 /* rx queue statistics */
1099 imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1100 dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1101 for (i = 0; i < imax; i++) {
1102 tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
1103 tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
1104 rx_total += tap_stats->q_ipackets[i];
1105 rx_bytes_total += tap_stats->q_ibytes[i];
1106 rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
1107 ierrors += pmd->rxq[i].stats.ierrors;
1108 }
1109
1110 /* tx queue statistics */
1111 imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1112 dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1113
1114 for (i = 0; i < imax; i++) {
1115 tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
1116 tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
1117 tx_total += tap_stats->q_opackets[i];
1118 tx_err_total += pmd->txq[i].stats.errs;
1119 tx_bytes_total += tap_stats->q_obytes[i];
1120 }
1121
1122 tap_stats->ipackets = rx_total;
1123 tap_stats->ibytes = rx_bytes_total;
1124 tap_stats->ierrors = ierrors;
1125 tap_stats->rx_nombuf = rx_nombuf;
1126 tap_stats->opackets = tx_total;
1127 tap_stats->oerrors = tx_err_total;
1128 tap_stats->obytes = tx_bytes_total;
1129 return 0;
1130 }
1131
1132 static int
tap_stats_reset(struct rte_eth_dev * dev)1133 tap_stats_reset(struct rte_eth_dev *dev)
1134 {
1135 int i;
1136 struct pmd_internals *pmd = dev->data->dev_private;
1137
1138 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1139 pmd->rxq[i].stats.ipackets = 0;
1140 pmd->rxq[i].stats.ibytes = 0;
1141 pmd->rxq[i].stats.ierrors = 0;
1142 pmd->rxq[i].stats.rx_nombuf = 0;
1143
1144 pmd->txq[i].stats.opackets = 0;
1145 pmd->txq[i].stats.errs = 0;
1146 pmd->txq[i].stats.obytes = 0;
1147 }
1148
1149 return 0;
1150 }
1151
1152 static int
tap_dev_close(struct rte_eth_dev * dev)1153 tap_dev_close(struct rte_eth_dev *dev)
1154 {
1155 int i;
1156 struct pmd_internals *internals = dev->data->dev_private;
1157 struct pmd_process_private *process_private = dev->process_private;
1158 struct rx_queue *rxq;
1159
1160 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1161 rte_free(dev->process_private);
1162 if (tap_devices_count == 1)
1163 rte_mp_action_unregister(TAP_MP_REQ_START_RXTX);
1164 tap_devices_count--;
1165 return 0;
1166 }
1167
1168 tap_link_set_down(dev);
1169 if (internals->nlsk_fd != -1) {
1170 tap_flow_flush(dev, NULL);
1171 tap_flow_implicit_flush(internals, NULL);
1172 tap_nl_final(internals->nlsk_fd);
1173 internals->nlsk_fd = -1;
1174 }
1175
1176 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1177 if (process_private->rxq_fds[i] != -1) {
1178 rxq = &internals->rxq[i];
1179 close(process_private->rxq_fds[i]);
1180 process_private->rxq_fds[i] = -1;
1181 tap_rxq_pool_free(rxq->pool);
1182 rte_free(rxq->iovecs);
1183 rxq->pool = NULL;
1184 rxq->iovecs = NULL;
1185 }
1186 if (process_private->txq_fds[i] != -1) {
1187 close(process_private->txq_fds[i]);
1188 process_private->txq_fds[i] = -1;
1189 }
1190 }
1191
1192 if (internals->remote_if_index) {
1193 /* Restore initial remote state */
1194 int ret = ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
1195 &internals->remote_initial_flags);
1196 if (ret)
1197 TAP_LOG(ERR, "restore remote state failed: %d", ret);
1198
1199 }
1200
1201 rte_mempool_free(internals->gso_ctx_mp);
1202 internals->gso_ctx_mp = NULL;
1203
1204 if (internals->ka_fd != -1) {
1205 close(internals->ka_fd);
1206 internals->ka_fd = -1;
1207 }
1208
1209 /* mac_addrs must not be freed alone because part of dev_private */
1210 dev->data->mac_addrs = NULL;
1211
1212 internals = dev->data->dev_private;
1213 TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
1214 tuntap_types[internals->type], rte_socket_id());
1215
1216 rte_intr_instance_free(internals->intr_handle);
1217
1218 if (internals->ioctl_sock != -1) {
1219 close(internals->ioctl_sock);
1220 internals->ioctl_sock = -1;
1221 }
1222 rte_free(dev->process_private);
1223 if (tap_devices_count == 1)
1224 rte_mp_action_unregister(TAP_MP_KEY);
1225 tap_devices_count--;
1226 /*
1227 * Since TUN device has no more opened file descriptors
1228 * it will be removed from kernel
1229 */
1230
1231 return 0;
1232 }
1233
1234 static void
tap_rx_queue_release(struct rte_eth_dev * dev,uint16_t qid)1235 tap_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1236 {
1237 struct rx_queue *rxq = dev->data->rx_queues[qid];
1238 struct pmd_process_private *process_private;
1239
1240 if (!rxq)
1241 return;
1242 process_private = rte_eth_devices[rxq->in_port].process_private;
1243 if (process_private->rxq_fds[rxq->queue_id] != -1) {
1244 close(process_private->rxq_fds[rxq->queue_id]);
1245 process_private->rxq_fds[rxq->queue_id] = -1;
1246 tap_rxq_pool_free(rxq->pool);
1247 rte_free(rxq->iovecs);
1248 rxq->pool = NULL;
1249 rxq->iovecs = NULL;
1250 }
1251 }
1252
1253 static void
tap_tx_queue_release(struct rte_eth_dev * dev,uint16_t qid)1254 tap_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1255 {
1256 struct tx_queue *txq = dev->data->tx_queues[qid];
1257 struct pmd_process_private *process_private;
1258
1259 if (!txq)
1260 return;
1261 process_private = rte_eth_devices[txq->out_port].process_private;
1262
1263 if (process_private->txq_fds[txq->queue_id] != -1) {
1264 close(process_private->txq_fds[txq->queue_id]);
1265 process_private->txq_fds[txq->queue_id] = -1;
1266 }
1267 }
1268
1269 static int
tap_link_update(struct rte_eth_dev * dev,int wait_to_complete __rte_unused)1270 tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
1271 {
1272 struct rte_eth_link *dev_link = &dev->data->dev_link;
1273 struct pmd_internals *pmd = dev->data->dev_private;
1274 struct ifreq ifr = { .ifr_flags = 0 };
1275
1276 if (pmd->remote_if_index) {
1277 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
1278 if (!(ifr.ifr_flags & IFF_UP) ||
1279 !(ifr.ifr_flags & IFF_RUNNING)) {
1280 dev_link->link_status = RTE_ETH_LINK_DOWN;
1281 return 0;
1282 }
1283 }
1284 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
1285 dev_link->link_status =
1286 ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
1287 RTE_ETH_LINK_UP :
1288 RTE_ETH_LINK_DOWN);
1289 return 0;
1290 }
1291
1292 static int
tap_promisc_enable(struct rte_eth_dev * dev)1293 tap_promisc_enable(struct rte_eth_dev *dev)
1294 {
1295 struct pmd_internals *pmd = dev->data->dev_private;
1296 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
1297 int ret;
1298
1299 ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1300 if (ret != 0)
1301 return ret;
1302
1303 if (pmd->remote_if_index && !pmd->flow_isolate) {
1304 dev->data->promiscuous = 1;
1305 ret = tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
1306 if (ret != 0) {
1307 /* Rollback promisc flag */
1308 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1309 /*
1310 * rte_eth_dev_promiscuous_enable() rollback
1311 * dev->data->promiscuous in the case of failure.
1312 */
1313 return ret;
1314 }
1315 }
1316
1317 return 0;
1318 }
1319
1320 static int
tap_promisc_disable(struct rte_eth_dev * dev)1321 tap_promisc_disable(struct rte_eth_dev *dev)
1322 {
1323 struct pmd_internals *pmd = dev->data->dev_private;
1324 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
1325 int ret;
1326
1327 ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1328 if (ret != 0)
1329 return ret;
1330
1331 if (pmd->remote_if_index && !pmd->flow_isolate) {
1332 dev->data->promiscuous = 0;
1333 ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
1334 if (ret != 0) {
1335 /* Rollback promisc flag */
1336 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1337 /*
1338 * rte_eth_dev_promiscuous_disable() rollback
1339 * dev->data->promiscuous in the case of failure.
1340 */
1341 return ret;
1342 }
1343 }
1344
1345 return 0;
1346 }
1347
1348 static int
tap_allmulti_enable(struct rte_eth_dev * dev)1349 tap_allmulti_enable(struct rte_eth_dev *dev)
1350 {
1351 struct pmd_internals *pmd = dev->data->dev_private;
1352 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
1353 int ret;
1354
1355 ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1356 if (ret != 0)
1357 return ret;
1358
1359 if (pmd->remote_if_index && !pmd->flow_isolate) {
1360 dev->data->all_multicast = 1;
1361 ret = tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
1362 if (ret != 0) {
1363 /* Rollback allmulti flag */
1364 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1365 /*
1366 * rte_eth_dev_allmulticast_enable() rollback
1367 * dev->data->all_multicast in the case of failure.
1368 */
1369 return ret;
1370 }
1371 }
1372
1373 return 0;
1374 }
1375
1376 static int
tap_allmulti_disable(struct rte_eth_dev * dev)1377 tap_allmulti_disable(struct rte_eth_dev *dev)
1378 {
1379 struct pmd_internals *pmd = dev->data->dev_private;
1380 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
1381 int ret;
1382
1383 ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1384 if (ret != 0)
1385 return ret;
1386
1387 if (pmd->remote_if_index && !pmd->flow_isolate) {
1388 dev->data->all_multicast = 0;
1389 ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
1390 if (ret != 0) {
1391 /* Rollback allmulti flag */
1392 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1393 /*
1394 * rte_eth_dev_allmulticast_disable() rollback
1395 * dev->data->all_multicast in the case of failure.
1396 */
1397 return ret;
1398 }
1399 }
1400
1401 return 0;
1402 }
1403
1404 static int
tap_mac_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)1405 tap_mac_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1406 {
1407 struct pmd_internals *pmd = dev->data->dev_private;
1408 enum ioctl_mode mode = LOCAL_ONLY;
1409 struct ifreq ifr;
1410 int ret;
1411
1412 if (pmd->type == ETH_TUNTAP_TYPE_TUN) {
1413 TAP_LOG(ERR, "%s: can't MAC address for TUN",
1414 dev->device->name);
1415 return -ENOTSUP;
1416 }
1417
1418 if (rte_is_zero_ether_addr(mac_addr)) {
1419 TAP_LOG(ERR, "%s: can't set an empty MAC address",
1420 dev->device->name);
1421 return -EINVAL;
1422 }
1423 /* Check the actual current MAC address on the tap netdevice */
1424 ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY);
1425 if (ret < 0)
1426 return ret;
1427 if (rte_is_same_ether_addr(
1428 (struct rte_ether_addr *)&ifr.ifr_hwaddr.sa_data,
1429 mac_addr))
1430 return 0;
1431 /* Check the current MAC address on the remote */
1432 ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY);
1433 if (ret < 0)
1434 return ret;
1435 if (!rte_is_same_ether_addr(
1436 (struct rte_ether_addr *)&ifr.ifr_hwaddr.sa_data,
1437 mac_addr))
1438 mode = LOCAL_AND_REMOTE;
1439 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1440 rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, RTE_ETHER_ADDR_LEN);
1441 ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);
1442 if (ret < 0)
1443 return ret;
1444 rte_memcpy(&pmd->eth_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1445 if (pmd->remote_if_index && !pmd->flow_isolate) {
1446 /* Replace MAC redirection rule after a MAC change */
1447 ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);
1448 if (ret < 0) {
1449 TAP_LOG(ERR,
1450 "%s: Couldn't delete MAC redirection rule",
1451 dev->device->name);
1452 return ret;
1453 }
1454 ret = tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC);
1455 if (ret < 0) {
1456 TAP_LOG(ERR,
1457 "%s: Couldn't add MAC redirection rule",
1458 dev->device->name);
1459 return ret;
1460 }
1461 }
1462
1463 return 0;
1464 }
1465
1466 static int
tap_gso_ctx_setup(struct rte_gso_ctx * gso_ctx,struct rte_eth_dev * dev)1467 tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
1468 {
1469 uint32_t gso_types;
1470 char pool_name[64];
1471 struct pmd_internals *pmd = dev->data->dev_private;
1472 int ret;
1473
1474 /* initialize GSO context */
1475 gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO;
1476 if (!pmd->gso_ctx_mp) {
1477 /*
1478 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
1479 * bytes size per mbuf use this pool for both direct and
1480 * indirect mbufs
1481 */
1482 ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
1483 dev->device->name);
1484 if (ret < 0 || ret >= (int)sizeof(pool_name)) {
1485 TAP_LOG(ERR,
1486 "%s: failed to create mbuf pool name for device %s,"
1487 "device name too long or output error, ret: %d\n",
1488 pmd->name, dev->device->name, ret);
1489 return -ENAMETOOLONG;
1490 }
1491 pmd->gso_ctx_mp = rte_pktmbuf_pool_create(pool_name,
1492 TAP_GSO_MBUFS_NUM, TAP_GSO_MBUF_CACHE_SIZE, 0,
1493 RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
1494 SOCKET_ID_ANY);
1495 if (!pmd->gso_ctx_mp) {
1496 TAP_LOG(ERR,
1497 "%s: failed to create mbuf pool for device %s\n",
1498 pmd->name, dev->device->name);
1499 return -1;
1500 }
1501 }
1502
1503 gso_ctx->direct_pool = pmd->gso_ctx_mp;
1504 gso_ctx->indirect_pool = pmd->gso_ctx_mp;
1505 gso_ctx->gso_types = gso_types;
1506 gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
1507 gso_ctx->flag = 0;
1508
1509 return 0;
1510 }
1511
1512 static int
tap_setup_queue(struct rte_eth_dev * dev,struct pmd_internals * internals,uint16_t qid,int is_rx)1513 tap_setup_queue(struct rte_eth_dev *dev,
1514 struct pmd_internals *internals,
1515 uint16_t qid,
1516 int is_rx)
1517 {
1518 int ret;
1519 int *fd;
1520 int *other_fd;
1521 const char *dir;
1522 struct pmd_internals *pmd = dev->data->dev_private;
1523 struct pmd_process_private *process_private = dev->process_private;
1524 struct rx_queue *rx = &internals->rxq[qid];
1525 struct tx_queue *tx = &internals->txq[qid];
1526 struct rte_gso_ctx *gso_ctx;
1527
1528 if (is_rx) {
1529 fd = &process_private->rxq_fds[qid];
1530 other_fd = &process_private->txq_fds[qid];
1531 dir = "rx";
1532 gso_ctx = NULL;
1533 } else {
1534 fd = &process_private->txq_fds[qid];
1535 other_fd = &process_private->rxq_fds[qid];
1536 dir = "tx";
1537 gso_ctx = &tx->gso_ctx;
1538 }
1539 if (*fd != -1) {
1540 /* fd for this queue already exists */
1541 TAP_LOG(DEBUG, "%s: fd %d for %s queue qid %d exists",
1542 pmd->name, *fd, dir, qid);
1543 gso_ctx = NULL;
1544 } else if (*other_fd != -1) {
1545 /* Only other_fd exists. dup it */
1546 *fd = dup(*other_fd);
1547 if (*fd < 0) {
1548 *fd = -1;
1549 TAP_LOG(ERR, "%s: dup() failed.", pmd->name);
1550 return -1;
1551 }
1552 TAP_LOG(DEBUG, "%s: dup fd %d for %s queue qid %d (%d)",
1553 pmd->name, *other_fd, dir, qid, *fd);
1554 } else {
1555 /* Both RX and TX fds do not exist (equal -1). Create fd */
1556 *fd = tun_alloc(pmd, 0);
1557 if (*fd < 0) {
1558 *fd = -1; /* restore original value */
1559 TAP_LOG(ERR, "%s: tun_alloc() failed.", pmd->name);
1560 return -1;
1561 }
1562 TAP_LOG(DEBUG, "%s: add %s queue for qid %d fd %d",
1563 pmd->name, dir, qid, *fd);
1564 }
1565
1566 tx->mtu = &dev->data->mtu;
1567 rx->rxmode = &dev->data->dev_conf.rxmode;
1568 if (gso_ctx) {
1569 ret = tap_gso_ctx_setup(gso_ctx, dev);
1570 if (ret)
1571 return -1;
1572 }
1573
1574 tx->type = pmd->type;
1575
1576 return *fd;
1577 }
1578
1579 static int
tap_rx_queue_setup(struct rte_eth_dev * dev,uint16_t rx_queue_id,uint16_t nb_rx_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf __rte_unused,struct rte_mempool * mp)1580 tap_rx_queue_setup(struct rte_eth_dev *dev,
1581 uint16_t rx_queue_id,
1582 uint16_t nb_rx_desc,
1583 unsigned int socket_id,
1584 const struct rte_eth_rxconf *rx_conf __rte_unused,
1585 struct rte_mempool *mp)
1586 {
1587 struct pmd_internals *internals = dev->data->dev_private;
1588 struct pmd_process_private *process_private = dev->process_private;
1589 struct rx_queue *rxq = &internals->rxq[rx_queue_id];
1590 struct rte_mbuf **tmp = &rxq->pool;
1591 long iov_max = sysconf(_SC_IOV_MAX);
1592
1593 if (iov_max <= 0) {
1594 TAP_LOG(WARNING,
1595 "_SC_IOV_MAX is not defined. Using %d as default",
1596 TAP_IOV_DEFAULT_MAX);
1597 iov_max = TAP_IOV_DEFAULT_MAX;
1598 }
1599 uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
1600 struct iovec (*iovecs)[nb_desc + 1];
1601 int data_off = RTE_PKTMBUF_HEADROOM;
1602 int ret = 0;
1603 int fd;
1604 int i;
1605
1606 if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
1607 TAP_LOG(WARNING,
1608 "nb_rx_queues %d too small or mempool NULL",
1609 dev->data->nb_rx_queues);
1610 return -1;
1611 }
1612
1613 rxq->mp = mp;
1614 rxq->trigger_seen = 1; /* force initial burst */
1615 rxq->in_port = dev->data->port_id;
1616 rxq->queue_id = rx_queue_id;
1617 rxq->nb_rx_desc = nb_desc;
1618 iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
1619 socket_id);
1620 if (!iovecs) {
1621 TAP_LOG(WARNING,
1622 "%s: Couldn't allocate %d RX descriptors",
1623 dev->device->name, nb_desc);
1624 return -ENOMEM;
1625 }
1626 rxq->iovecs = iovecs;
1627
1628 dev->data->rx_queues[rx_queue_id] = rxq;
1629 fd = tap_setup_queue(dev, internals, rx_queue_id, 1);
1630 if (fd == -1) {
1631 ret = fd;
1632 goto error;
1633 }
1634
1635 (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
1636 (*rxq->iovecs)[0].iov_base = &rxq->pi;
1637
1638 for (i = 1; i <= nb_desc; i++) {
1639 *tmp = rte_pktmbuf_alloc(rxq->mp);
1640 if (!*tmp) {
1641 TAP_LOG(WARNING,
1642 "%s: couldn't allocate memory for queue %d",
1643 dev->device->name, rx_queue_id);
1644 ret = -ENOMEM;
1645 goto error;
1646 }
1647 (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
1648 (*rxq->iovecs)[i].iov_base =
1649 (char *)(*tmp)->buf_addr + data_off;
1650 data_off = 0;
1651 tmp = &(*tmp)->next;
1652 }
1653
1654 TAP_LOG(DEBUG, " RX TUNTAP device name %s, qid %d on fd %d",
1655 internals->name, rx_queue_id,
1656 process_private->rxq_fds[rx_queue_id]);
1657
1658 return 0;
1659
1660 error:
1661 tap_rxq_pool_free(rxq->pool);
1662 rxq->pool = NULL;
1663 rte_free(rxq->iovecs);
1664 rxq->iovecs = NULL;
1665 return ret;
1666 }
1667
1668 static int
tap_tx_queue_setup(struct rte_eth_dev * dev,uint16_t tx_queue_id,uint16_t nb_tx_desc __rte_unused,unsigned int socket_id __rte_unused,const struct rte_eth_txconf * tx_conf)1669 tap_tx_queue_setup(struct rte_eth_dev *dev,
1670 uint16_t tx_queue_id,
1671 uint16_t nb_tx_desc __rte_unused,
1672 unsigned int socket_id __rte_unused,
1673 const struct rte_eth_txconf *tx_conf)
1674 {
1675 struct pmd_internals *internals = dev->data->dev_private;
1676 struct pmd_process_private *process_private = dev->process_private;
1677 struct tx_queue *txq;
1678 int ret;
1679 uint64_t offloads;
1680
1681 if (tx_queue_id >= dev->data->nb_tx_queues)
1682 return -1;
1683 dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
1684 txq = dev->data->tx_queues[tx_queue_id];
1685 txq->out_port = dev->data->port_id;
1686 txq->queue_id = tx_queue_id;
1687
1688 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1689 txq->csum = !!(offloads &
1690 (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1691 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1692 RTE_ETH_TX_OFFLOAD_TCP_CKSUM));
1693
1694 ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
1695 if (ret == -1)
1696 return -1;
1697 TAP_LOG(DEBUG,
1698 " TX TUNTAP device name %s, qid %d on fd %d csum %s",
1699 internals->name, tx_queue_id,
1700 process_private->txq_fds[tx_queue_id],
1701 txq->csum ? "on" : "off");
1702
1703 return 0;
1704 }
1705
1706 static int
tap_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)1707 tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1708 {
1709 struct pmd_internals *pmd = dev->data->dev_private;
1710 struct ifreq ifr = { .ifr_mtu = mtu };
1711
1712 return tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
1713 }
1714
1715 static int
tap_set_mc_addr_list(struct rte_eth_dev * dev __rte_unused,struct rte_ether_addr * mc_addr_set __rte_unused,uint32_t nb_mc_addr __rte_unused)1716 tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
1717 struct rte_ether_addr *mc_addr_set __rte_unused,
1718 uint32_t nb_mc_addr __rte_unused)
1719 {
1720 /*
1721 * Nothing to do actually: the tap has no filtering whatsoever, every
1722 * packet is received.
1723 */
1724 return 0;
1725 }
1726
1727 static int
tap_nl_msg_handler(struct nlmsghdr * nh,void * arg)1728 tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
1729 {
1730 struct rte_eth_dev *dev = arg;
1731 struct pmd_internals *pmd = dev->data->dev_private;
1732 struct ifinfomsg *info = NLMSG_DATA(nh);
1733
1734 if (nh->nlmsg_type != RTM_NEWLINK ||
1735 (info->ifi_index != pmd->if_index &&
1736 info->ifi_index != pmd->remote_if_index))
1737 return 0;
1738 return tap_link_update(dev, 0);
1739 }
1740
1741 static void
tap_dev_intr_handler(void * cb_arg)1742 tap_dev_intr_handler(void *cb_arg)
1743 {
1744 struct rte_eth_dev *dev = cb_arg;
1745 struct pmd_internals *pmd = dev->data->dev_private;
1746
1747 if (rte_intr_fd_get(pmd->intr_handle) >= 0)
1748 tap_nl_recv(rte_intr_fd_get(pmd->intr_handle),
1749 tap_nl_msg_handler, dev);
1750 }
1751
1752 static int
tap_lsc_intr_handle_set(struct rte_eth_dev * dev,int set)1753 tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
1754 {
1755 struct pmd_internals *pmd = dev->data->dev_private;
1756 int ret;
1757
1758 /* In any case, disable interrupt if the conf is no longer there. */
1759 if (!dev->data->dev_conf.intr_conf.lsc) {
1760 if (rte_intr_fd_get(pmd->intr_handle) != -1)
1761 goto clean;
1762
1763 return 0;
1764 }
1765 if (set) {
1766 rte_intr_fd_set(pmd->intr_handle, tap_nl_init(RTMGRP_LINK));
1767 if (unlikely(rte_intr_fd_get(pmd->intr_handle) == -1))
1768 return -EBADF;
1769 return rte_intr_callback_register(
1770 pmd->intr_handle, tap_dev_intr_handler, dev);
1771 }
1772
1773 clean:
1774 do {
1775 ret = rte_intr_callback_unregister(pmd->intr_handle,
1776 tap_dev_intr_handler, dev);
1777 if (ret >= 0) {
1778 break;
1779 } else if (ret == -EAGAIN) {
1780 rte_delay_ms(100);
1781 } else {
1782 TAP_LOG(ERR, "intr callback unregister failed: %d",
1783 ret);
1784 break;
1785 }
1786 } while (true);
1787
1788 if (rte_intr_fd_get(pmd->intr_handle) >= 0) {
1789 tap_nl_final(rte_intr_fd_get(pmd->intr_handle));
1790 rte_intr_fd_set(pmd->intr_handle, -1);
1791 }
1792
1793 return 0;
1794 }
1795
1796 static int
tap_intr_handle_set(struct rte_eth_dev * dev,int set)1797 tap_intr_handle_set(struct rte_eth_dev *dev, int set)
1798 {
1799 int err;
1800
1801 err = tap_lsc_intr_handle_set(dev, set);
1802 if (err < 0) {
1803 if (!set)
1804 tap_rx_intr_vec_set(dev, 0);
1805 return err;
1806 }
1807 err = tap_rx_intr_vec_set(dev, set);
1808 if (err && set)
1809 tap_lsc_intr_handle_set(dev, 0);
1810 return err;
1811 }
1812
1813 static const uint32_t*
tap_dev_supported_ptypes_get(struct rte_eth_dev * dev __rte_unused)1814 tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1815 {
1816 static const uint32_t ptypes[] = {
1817 RTE_PTYPE_INNER_L2_ETHER,
1818 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1819 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1820 RTE_PTYPE_INNER_L3_IPV4,
1821 RTE_PTYPE_INNER_L3_IPV4_EXT,
1822 RTE_PTYPE_INNER_L3_IPV6,
1823 RTE_PTYPE_INNER_L3_IPV6_EXT,
1824 RTE_PTYPE_INNER_L4_FRAG,
1825 RTE_PTYPE_INNER_L4_UDP,
1826 RTE_PTYPE_INNER_L4_TCP,
1827 RTE_PTYPE_INNER_L4_SCTP,
1828 RTE_PTYPE_L2_ETHER,
1829 RTE_PTYPE_L2_ETHER_VLAN,
1830 RTE_PTYPE_L2_ETHER_QINQ,
1831 RTE_PTYPE_L3_IPV4,
1832 RTE_PTYPE_L3_IPV4_EXT,
1833 RTE_PTYPE_L3_IPV6_EXT,
1834 RTE_PTYPE_L3_IPV6,
1835 RTE_PTYPE_L4_FRAG,
1836 RTE_PTYPE_L4_UDP,
1837 RTE_PTYPE_L4_TCP,
1838 RTE_PTYPE_L4_SCTP,
1839 };
1840
1841 return ptypes;
1842 }
1843
1844 static int
tap_flow_ctrl_get(struct rte_eth_dev * dev __rte_unused,struct rte_eth_fc_conf * fc_conf)1845 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
1846 struct rte_eth_fc_conf *fc_conf)
1847 {
1848 fc_conf->mode = RTE_ETH_FC_NONE;
1849 return 0;
1850 }
1851
1852 static int
tap_flow_ctrl_set(struct rte_eth_dev * dev __rte_unused,struct rte_eth_fc_conf * fc_conf)1853 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
1854 struct rte_eth_fc_conf *fc_conf)
1855 {
1856 if (fc_conf->mode != RTE_ETH_FC_NONE)
1857 return -ENOTSUP;
1858 return 0;
1859 }
1860
1861 /**
1862 * DPDK callback to update the RSS hash configuration.
1863 *
1864 * @param dev
1865 * Pointer to Ethernet device structure.
1866 * @param[in] rss_conf
1867 * RSS configuration data.
1868 *
1869 * @return
1870 * 0 on success, a negative errno value otherwise and rte_errno is set.
1871 */
1872 static int
tap_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1873 tap_rss_hash_update(struct rte_eth_dev *dev,
1874 struct rte_eth_rss_conf *rss_conf)
1875 {
1876 if (rss_conf->rss_hf & TAP_RSS_HF_MASK) {
1877 rte_errno = EINVAL;
1878 return -rte_errno;
1879 }
1880 if (rss_conf->rss_key && rss_conf->rss_key_len) {
1881 /*
1882 * Currently TAP RSS key is hard coded
1883 * and cannot be updated
1884 */
1885 TAP_LOG(ERR,
1886 "port %u RSS key cannot be updated",
1887 dev->data->port_id);
1888 rte_errno = EINVAL;
1889 return -rte_errno;
1890 }
1891 return 0;
1892 }
1893
1894 static int
tap_rx_queue_start(struct rte_eth_dev * dev,uint16_t rx_queue_id)1895 tap_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1896 {
1897 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1898
1899 return 0;
1900 }
1901
1902 static int
tap_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)1903 tap_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1904 {
1905 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1906
1907 return 0;
1908 }
1909
1910 static int
tap_rx_queue_stop(struct rte_eth_dev * dev,uint16_t rx_queue_id)1911 tap_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1912 {
1913 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1914
1915 return 0;
1916 }
1917
1918 static int
tap_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)1919 tap_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1920 {
1921 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1922
1923 return 0;
1924 }
1925 static const struct eth_dev_ops ops = {
1926 .dev_start = tap_dev_start,
1927 .dev_stop = tap_dev_stop,
1928 .dev_close = tap_dev_close,
1929 .dev_configure = tap_dev_configure,
1930 .dev_infos_get = tap_dev_info,
1931 .rx_queue_setup = tap_rx_queue_setup,
1932 .tx_queue_setup = tap_tx_queue_setup,
1933 .rx_queue_start = tap_rx_queue_start,
1934 .tx_queue_start = tap_tx_queue_start,
1935 .rx_queue_stop = tap_rx_queue_stop,
1936 .tx_queue_stop = tap_tx_queue_stop,
1937 .rx_queue_release = tap_rx_queue_release,
1938 .tx_queue_release = tap_tx_queue_release,
1939 .flow_ctrl_get = tap_flow_ctrl_get,
1940 .flow_ctrl_set = tap_flow_ctrl_set,
1941 .link_update = tap_link_update,
1942 .dev_set_link_up = tap_link_set_up,
1943 .dev_set_link_down = tap_link_set_down,
1944 .promiscuous_enable = tap_promisc_enable,
1945 .promiscuous_disable = tap_promisc_disable,
1946 .allmulticast_enable = tap_allmulti_enable,
1947 .allmulticast_disable = tap_allmulti_disable,
1948 .mac_addr_set = tap_mac_set,
1949 .mtu_set = tap_mtu_set,
1950 .set_mc_addr_list = tap_set_mc_addr_list,
1951 .stats_get = tap_stats_get,
1952 .stats_reset = tap_stats_reset,
1953 .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
1954 .rss_hash_update = tap_rss_hash_update,
1955 .flow_ops_get = tap_dev_flow_ops_get,
1956 };
1957
1958 static int
eth_dev_tap_create(struct rte_vdev_device * vdev,const char * tap_name,char * remote_iface,struct rte_ether_addr * mac_addr,enum rte_tuntap_type type)1959 eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
1960 char *remote_iface, struct rte_ether_addr *mac_addr,
1961 enum rte_tuntap_type type)
1962 {
1963 int numa_node = rte_socket_id();
1964 struct rte_eth_dev *dev;
1965 struct pmd_internals *pmd;
1966 struct pmd_process_private *process_private;
1967 const char *tuntap_name = tuntap_types[type];
1968 struct rte_eth_dev_data *data;
1969 struct ifreq ifr;
1970 int i;
1971
1972 TAP_LOG(DEBUG, "%s device on numa %u", tuntap_name, rte_socket_id());
1973
1974 dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
1975 if (!dev) {
1976 TAP_LOG(ERR, "%s Unable to allocate device struct",
1977 tuntap_name);
1978 goto error_exit_nodev;
1979 }
1980
1981 process_private = (struct pmd_process_private *)
1982 rte_zmalloc_socket(tap_name, sizeof(struct pmd_process_private),
1983 RTE_CACHE_LINE_SIZE, dev->device->numa_node);
1984
1985 if (process_private == NULL) {
1986 TAP_LOG(ERR, "Failed to alloc memory for process private");
1987 return -1;
1988 }
1989 pmd = dev->data->dev_private;
1990 dev->process_private = process_private;
1991 pmd->dev = dev;
1992 strlcpy(pmd->name, tap_name, sizeof(pmd->name));
1993 pmd->type = type;
1994 pmd->ka_fd = -1;
1995 pmd->nlsk_fd = -1;
1996 pmd->gso_ctx_mp = NULL;
1997
1998 pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
1999 if (pmd->ioctl_sock == -1) {
2000 TAP_LOG(ERR,
2001 "%s Unable to get a socket for management: %s",
2002 tuntap_name, strerror(errno));
2003 goto error_exit;
2004 }
2005
2006 /* Allocate interrupt instance */
2007 pmd->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
2008 if (pmd->intr_handle == NULL) {
2009 TAP_LOG(ERR, "Failed to allocate intr handle");
2010 goto error_exit;
2011 }
2012
2013 /* Setup some default values */
2014 data = dev->data;
2015 data->dev_private = pmd;
2016 data->dev_flags = RTE_ETH_DEV_INTR_LSC |
2017 RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2018 data->numa_node = numa_node;
2019
2020 data->dev_link = pmd_link;
2021 data->mac_addrs = &pmd->eth_addr;
2022 /* Set the number of RX and TX queues */
2023 data->nb_rx_queues = 0;
2024 data->nb_tx_queues = 0;
2025
2026 dev->dev_ops = &ops;
2027 dev->rx_pkt_burst = pmd_rx_burst;
2028 dev->tx_pkt_burst = pmd_tx_burst;
2029
2030 rte_intr_type_set(pmd->intr_handle, RTE_INTR_HANDLE_EXT);
2031 rte_intr_fd_set(pmd->intr_handle, -1);
2032 dev->intr_handle = pmd->intr_handle;
2033
2034 /* Presetup the fds to -1 as being not valid */
2035 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
2036 process_private->rxq_fds[i] = -1;
2037 process_private->txq_fds[i] = -1;
2038 }
2039
2040 if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
2041 if (rte_is_zero_ether_addr(mac_addr))
2042 rte_eth_random_addr((uint8_t *)&pmd->eth_addr);
2043 else
2044 rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(*mac_addr));
2045 }
2046
2047 /*
2048 * Allocate a TUN device keep-alive file descriptor that will only be
2049 * closed when the TUN device itself is closed or removed.
2050 * This keep-alive file descriptor will guarantee that the TUN device
2051 * exists even when all of its queues are closed
2052 */
2053 pmd->ka_fd = tun_alloc(pmd, 1);
2054 if (pmd->ka_fd == -1) {
2055 TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
2056 goto error_exit;
2057 }
2058 TAP_LOG(DEBUG, "allocated %s", pmd->name);
2059
2060 ifr.ifr_mtu = dev->data->mtu;
2061 if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
2062 goto error_exit;
2063
2064 if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
2065 memset(&ifr, 0, sizeof(struct ifreq));
2066 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
2067 rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
2068 RTE_ETHER_ADDR_LEN);
2069 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
2070 goto error_exit;
2071 }
2072
2073 /*
2074 * Set up everything related to rte_flow:
2075 * - netlink socket
2076 * - tap / remote if_index
2077 * - mandatory QDISCs
2078 * - rte_flow actual/implicit lists
2079 * - implicit rules
2080 */
2081 pmd->nlsk_fd = tap_nl_init(0);
2082 if (pmd->nlsk_fd == -1) {
2083 TAP_LOG(WARNING, "%s: failed to create netlink socket.",
2084 pmd->name);
2085 goto disable_rte_flow;
2086 }
2087 pmd->if_index = if_nametoindex(pmd->name);
2088 if (!pmd->if_index) {
2089 TAP_LOG(ERR, "%s: failed to get if_index.", pmd->name);
2090 goto disable_rte_flow;
2091 }
2092 if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
2093 TAP_LOG(ERR, "%s: failed to create multiq qdisc.",
2094 pmd->name);
2095 goto disable_rte_flow;
2096 }
2097 if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
2098 TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
2099 pmd->name);
2100 goto disable_rte_flow;
2101 }
2102 LIST_INIT(&pmd->flows);
2103
2104 if (strlen(remote_iface)) {
2105 pmd->remote_if_index = if_nametoindex(remote_iface);
2106 if (!pmd->remote_if_index) {
2107 TAP_LOG(ERR, "%s: failed to get %s if_index.",
2108 pmd->name, remote_iface);
2109 goto error_remote;
2110 }
2111 strlcpy(pmd->remote_iface, remote_iface, RTE_ETH_NAME_MAX_LEN);
2112
2113 /* Save state of remote device */
2114 tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY);
2115
2116 /* Replicate remote MAC address */
2117 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
2118 TAP_LOG(ERR, "%s: failed to get %s MAC address.",
2119 pmd->name, pmd->remote_iface);
2120 goto error_remote;
2121 }
2122 rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
2123 RTE_ETHER_ADDR_LEN);
2124 /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
2125 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
2126 TAP_LOG(ERR, "%s: failed to get %s MAC address.",
2127 pmd->name, remote_iface);
2128 goto error_remote;
2129 }
2130
2131 /*
2132 * Flush usually returns negative value because it tries to
2133 * delete every QDISC (and on a running device, one QDISC at
2134 * least is needed). Ignore negative return value.
2135 */
2136 qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
2137 if (qdisc_create_ingress(pmd->nlsk_fd,
2138 pmd->remote_if_index) < 0) {
2139 TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
2140 pmd->remote_iface);
2141 goto error_remote;
2142 }
2143 LIST_INIT(&pmd->implicit_flows);
2144 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
2145 tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
2146 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
2147 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
2148 TAP_LOG(ERR,
2149 "%s: failed to create implicit rules.",
2150 pmd->name);
2151 goto error_remote;
2152 }
2153 }
2154
2155 rte_eth_dev_probing_finish(dev);
2156 return 0;
2157
2158 disable_rte_flow:
2159 TAP_LOG(ERR, " Disabling rte flow support: %s(%d)",
2160 strerror(errno), errno);
2161 if (strlen(remote_iface)) {
2162 TAP_LOG(ERR, "Remote feature requires flow support.");
2163 goto error_exit;
2164 }
2165 rte_eth_dev_probing_finish(dev);
2166 return 0;
2167
2168 error_remote:
2169 TAP_LOG(ERR, " Can't set up remote feature: %s(%d)",
2170 strerror(errno), errno);
2171 tap_flow_implicit_flush(pmd, NULL);
2172
2173 error_exit:
2174 if (pmd->nlsk_fd != -1)
2175 close(pmd->nlsk_fd);
2176 if (pmd->ka_fd != -1)
2177 close(pmd->ka_fd);
2178 if (pmd->ioctl_sock != -1)
2179 close(pmd->ioctl_sock);
2180 /* mac_addrs must not be freed alone because part of dev_private */
2181 dev->data->mac_addrs = NULL;
2182 rte_eth_dev_release_port(dev);
2183 rte_intr_instance_free(pmd->intr_handle);
2184
2185 error_exit_nodev:
2186 TAP_LOG(ERR, "%s Unable to initialize %s",
2187 tuntap_name, rte_vdev_device_name(vdev));
2188
2189 return -EINVAL;
2190 }
2191
2192 /* make sure name is a possible Linux network device name */
2193 static bool
is_valid_iface(const char * name)2194 is_valid_iface(const char *name)
2195 {
2196 if (*name == '\0')
2197 return false;
2198
2199 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
2200 return false;
2201
2202 while (*name) {
2203 if (*name == '/' || *name == ':' || isspace(*name))
2204 return false;
2205 name++;
2206 }
2207 return true;
2208 }
2209
2210 static int
set_interface_name(const char * key __rte_unused,const char * value,void * extra_args)2211 set_interface_name(const char *key __rte_unused,
2212 const char *value,
2213 void *extra_args)
2214 {
2215 char *name = (char *)extra_args;
2216
2217 if (value) {
2218 if (!is_valid_iface(value)) {
2219 TAP_LOG(ERR, "TAP invalid remote interface name (%s)",
2220 value);
2221 return -1;
2222 }
2223 strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
2224 } else {
2225 /* use tap%d which causes kernel to choose next available */
2226 strlcpy(name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
2227 }
2228 return 0;
2229 }
2230
2231 static int
set_remote_iface(const char * key __rte_unused,const char * value,void * extra_args)2232 set_remote_iface(const char *key __rte_unused,
2233 const char *value,
2234 void *extra_args)
2235 {
2236 char *name = (char *)extra_args;
2237
2238 if (value) {
2239 if (!is_valid_iface(value)) {
2240 TAP_LOG(ERR, "TAP invalid remote interface name (%s)",
2241 value);
2242 return -1;
2243 }
2244 strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
2245 }
2246
2247 return 0;
2248 }
2249
parse_user_mac(struct rte_ether_addr * user_mac,const char * value)2250 static int parse_user_mac(struct rte_ether_addr *user_mac,
2251 const char *value)
2252 {
2253 unsigned int index = 0;
2254 char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL;
2255
2256 if (user_mac == NULL || value == NULL)
2257 return 0;
2258
2259 strlcpy(mac_temp, value, sizeof(mac_temp));
2260 mac_byte = strtok(mac_temp, ":");
2261
2262 while ((mac_byte != NULL) &&
2263 (strlen(mac_byte) <= 2) &&
2264 (strlen(mac_byte) == strspn(mac_byte,
2265 ETH_TAP_CMP_MAC_FMT))) {
2266 user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16);
2267 mac_byte = strtok(NULL, ":");
2268 }
2269
2270 return index;
2271 }
2272
2273 static int
set_mac_type(const char * key __rte_unused,const char * value,void * extra_args)2274 set_mac_type(const char *key __rte_unused,
2275 const char *value,
2276 void *extra_args)
2277 {
2278 struct rte_ether_addr *user_mac = extra_args;
2279
2280 if (!value)
2281 return 0;
2282
2283 if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) {
2284 static int iface_idx;
2285
2286 /* fixed mac = 00:64:74:61:70:<iface_idx> */
2287 memcpy((char *)user_mac->addr_bytes, "\0dtap",
2288 RTE_ETHER_ADDR_LEN);
2289 user_mac->addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
2290 iface_idx++ + '0';
2291 goto success;
2292 }
2293
2294 if (parse_user_mac(user_mac, value) != 6)
2295 goto error;
2296 success:
2297 TAP_LOG(DEBUG, "TAP user MAC param (%s)", value);
2298 return 0;
2299
2300 error:
2301 TAP_LOG(ERR, "TAP user MAC (%s) is not in format (%s|%s)",
2302 value, ETH_TAP_MAC_FIXED, ETH_TAP_USR_MAC_FMT);
2303 return -1;
2304 }
2305
2306 /*
2307 * Open a TUN interface device. TUN PMD
2308 * 1) sets tap_type as false
2309 * 2) intakes iface as argument.
2310 * 3) as interface is virtual set speed to 10G
2311 */
2312 static int
rte_pmd_tun_probe(struct rte_vdev_device * dev)2313 rte_pmd_tun_probe(struct rte_vdev_device *dev)
2314 {
2315 const char *name, *params;
2316 int ret;
2317 struct rte_kvargs *kvlist = NULL;
2318 char tun_name[RTE_ETH_NAME_MAX_LEN];
2319 char remote_iface[RTE_ETH_NAME_MAX_LEN];
2320 struct rte_eth_dev *eth_dev;
2321
2322 name = rte_vdev_device_name(dev);
2323 params = rte_vdev_device_args(dev);
2324 memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
2325
2326 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
2327 strlen(params) == 0) {
2328 eth_dev = rte_eth_dev_attach_secondary(name);
2329 if (!eth_dev) {
2330 TAP_LOG(ERR, "Failed to probe %s", name);
2331 return -1;
2332 }
2333 eth_dev->dev_ops = &ops;
2334 eth_dev->device = &dev->device;
2335 rte_eth_dev_probing_finish(eth_dev);
2336 return 0;
2337 }
2338
2339 /* use tun%d which causes kernel to choose next available */
2340 strlcpy(tun_name, DEFAULT_TUN_NAME "%d", RTE_ETH_NAME_MAX_LEN);
2341
2342 if (params && (params[0] != '\0')) {
2343 TAP_LOG(DEBUG, "parameters (%s)", params);
2344
2345 kvlist = rte_kvargs_parse(params, valid_arguments);
2346 if (kvlist) {
2347 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
2348 ret = rte_kvargs_process(kvlist,
2349 ETH_TAP_IFACE_ARG,
2350 &set_interface_name,
2351 tun_name);
2352
2353 if (ret == -1)
2354 goto leave;
2355 }
2356 }
2357 }
2358 pmd_link.link_speed = RTE_ETH_SPEED_NUM_10G;
2359
2360 TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name);
2361
2362 ret = eth_dev_tap_create(dev, tun_name, remote_iface, 0,
2363 ETH_TUNTAP_TYPE_TUN);
2364
2365 leave:
2366 if (ret == -1) {
2367 TAP_LOG(ERR, "Failed to create pmd for %s as %s",
2368 name, tun_name);
2369 }
2370 rte_kvargs_free(kvlist);
2371
2372 return ret;
2373 }
2374
2375 /* Request queue file descriptors from secondary to primary. */
2376 static int
tap_mp_attach_queues(const char * port_name,struct rte_eth_dev * dev)2377 tap_mp_attach_queues(const char *port_name, struct rte_eth_dev *dev)
2378 {
2379 int ret;
2380 struct timespec timeout = {.tv_sec = 1, .tv_nsec = 0};
2381 struct rte_mp_msg request, *reply;
2382 struct rte_mp_reply replies;
2383 struct ipc_queues *request_param = (struct ipc_queues *)request.param;
2384 struct ipc_queues *reply_param;
2385 struct pmd_process_private *process_private = dev->process_private;
2386 int queue, fd_iterator;
2387
2388 /* Prepare the request */
2389 memset(&request, 0, sizeof(request));
2390 strlcpy(request.name, TAP_MP_KEY, sizeof(request.name));
2391 strlcpy(request_param->port_name, port_name,
2392 sizeof(request_param->port_name));
2393 request.len_param = sizeof(*request_param);
2394 /* Send request and receive reply */
2395 ret = rte_mp_request_sync(&request, &replies, &timeout);
2396 if (ret < 0 || replies.nb_received != 1) {
2397 TAP_LOG(ERR, "Failed to request queues from primary: %d",
2398 rte_errno);
2399 return -1;
2400 }
2401 reply = &replies.msgs[0];
2402 reply_param = (struct ipc_queues *)reply->param;
2403 TAP_LOG(DEBUG, "Received IPC reply for %s", reply_param->port_name);
2404
2405 /* Attach the queues from received file descriptors */
2406 if (reply_param->rxq_count + reply_param->txq_count != reply->num_fds) {
2407 TAP_LOG(ERR, "Unexpected number of fds received");
2408 return -1;
2409 }
2410
2411 dev->data->nb_rx_queues = reply_param->rxq_count;
2412 dev->data->nb_tx_queues = reply_param->txq_count;
2413 fd_iterator = 0;
2414 for (queue = 0; queue < reply_param->rxq_count; queue++)
2415 process_private->rxq_fds[queue] = reply->fds[fd_iterator++];
2416 for (queue = 0; queue < reply_param->txq_count; queue++)
2417 process_private->txq_fds[queue] = reply->fds[fd_iterator++];
2418 free(reply);
2419 return 0;
2420 }
2421
2422 /* Send the queue file descriptors from the primary process to secondary. */
2423 static int
tap_mp_sync_queues(const struct rte_mp_msg * request,const void * peer)2424 tap_mp_sync_queues(const struct rte_mp_msg *request, const void *peer)
2425 {
2426 struct rte_eth_dev *dev;
2427 struct pmd_process_private *process_private;
2428 struct rte_mp_msg reply;
2429 const struct ipc_queues *request_param =
2430 (const struct ipc_queues *)request->param;
2431 struct ipc_queues *reply_param =
2432 (struct ipc_queues *)reply.param;
2433 int queue;
2434
2435 /* Get requested port */
2436 TAP_LOG(DEBUG, "Received IPC request for %s", request_param->port_name);
2437 dev = rte_eth_dev_get_by_name(request_param->port_name);
2438 if (!dev) {
2439 TAP_LOG(ERR, "Failed to get port id for %s",
2440 request_param->port_name);
2441 return -1;
2442 }
2443 process_private = dev->process_private;
2444
2445 /* Fill file descriptors for all queues */
2446 reply.num_fds = 0;
2447 reply_param->rxq_count = 0;
2448 if (dev->data->nb_rx_queues + dev->data->nb_tx_queues >
2449 RTE_MP_MAX_FD_NUM){
2450 TAP_LOG(ERR, "Number of rx/tx queues exceeds max number of fds");
2451 return -1;
2452 }
2453
2454 for (queue = 0; queue < dev->data->nb_rx_queues; queue++) {
2455 reply.fds[reply.num_fds++] = process_private->rxq_fds[queue];
2456 reply_param->rxq_count++;
2457 }
2458 RTE_ASSERT(reply_param->rxq_count == dev->data->nb_rx_queues);
2459
2460 reply_param->txq_count = 0;
2461 for (queue = 0; queue < dev->data->nb_tx_queues; queue++) {
2462 reply.fds[reply.num_fds++] = process_private->txq_fds[queue];
2463 reply_param->txq_count++;
2464 }
2465 RTE_ASSERT(reply_param->txq_count == dev->data->nb_tx_queues);
2466
2467 /* Send reply */
2468 strlcpy(reply.name, request->name, sizeof(reply.name));
2469 strlcpy(reply_param->port_name, request_param->port_name,
2470 sizeof(reply_param->port_name));
2471 reply.len_param = sizeof(*reply_param);
2472 if (rte_mp_reply(&reply, peer) < 0) {
2473 TAP_LOG(ERR, "Failed to reply an IPC request to sync queues");
2474 return -1;
2475 }
2476 return 0;
2477 }
2478
2479 /* Open a TAP interface device.
2480 */
2481 static int
rte_pmd_tap_probe(struct rte_vdev_device * dev)2482 rte_pmd_tap_probe(struct rte_vdev_device *dev)
2483 {
2484 const char *name, *params;
2485 int ret;
2486 struct rte_kvargs *kvlist = NULL;
2487 int speed;
2488 char tap_name[RTE_ETH_NAME_MAX_LEN];
2489 char remote_iface[RTE_ETH_NAME_MAX_LEN];
2490 struct rte_ether_addr user_mac = { .addr_bytes = {0} };
2491 struct rte_eth_dev *eth_dev;
2492 int tap_devices_count_increased = 0;
2493
2494 name = rte_vdev_device_name(dev);
2495 params = rte_vdev_device_args(dev);
2496
2497 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
2498 eth_dev = rte_eth_dev_attach_secondary(name);
2499 if (!eth_dev) {
2500 TAP_LOG(ERR, "Failed to probe %s", name);
2501 return -1;
2502 }
2503 eth_dev->dev_ops = &ops;
2504 eth_dev->device = &dev->device;
2505 eth_dev->rx_pkt_burst = pmd_rx_burst;
2506 eth_dev->tx_pkt_burst = pmd_tx_burst;
2507 if (!rte_eal_primary_proc_alive(NULL)) {
2508 TAP_LOG(ERR, "Primary process is missing");
2509 return -1;
2510 }
2511 eth_dev->process_private = (struct pmd_process_private *)
2512 rte_zmalloc_socket(name,
2513 sizeof(struct pmd_process_private),
2514 RTE_CACHE_LINE_SIZE,
2515 eth_dev->device->numa_node);
2516 if (eth_dev->process_private == NULL) {
2517 TAP_LOG(ERR,
2518 "Failed to alloc memory for process private");
2519 return -1;
2520 }
2521
2522 ret = tap_mp_attach_queues(name, eth_dev);
2523 if (ret != 0)
2524 return -1;
2525
2526 if (!tap_devices_count) {
2527 ret = rte_mp_action_register(TAP_MP_REQ_START_RXTX, tap_mp_req_start_rxtx);
2528 if (ret < 0 && rte_errno != ENOTSUP) {
2529 TAP_LOG(ERR, "tap: Failed to register IPC callback: %s",
2530 strerror(rte_errno));
2531 return -1;
2532 }
2533 }
2534 tap_devices_count++;
2535 rte_eth_dev_probing_finish(eth_dev);
2536 return 0;
2537 }
2538
2539 speed = RTE_ETH_SPEED_NUM_10G;
2540
2541 /* use tap%d which causes kernel to choose next available */
2542 strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
2543 memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
2544
2545 if (params && (params[0] != '\0')) {
2546 TAP_LOG(DEBUG, "parameters (%s)", params);
2547
2548 kvlist = rte_kvargs_parse(params, valid_arguments);
2549 if (kvlist) {
2550 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
2551 ret = rte_kvargs_process(kvlist,
2552 ETH_TAP_IFACE_ARG,
2553 &set_interface_name,
2554 tap_name);
2555 if (ret == -1)
2556 goto leave;
2557 }
2558
2559 if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
2560 ret = rte_kvargs_process(kvlist,
2561 ETH_TAP_REMOTE_ARG,
2562 &set_remote_iface,
2563 remote_iface);
2564 if (ret == -1)
2565 goto leave;
2566 }
2567
2568 if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
2569 ret = rte_kvargs_process(kvlist,
2570 ETH_TAP_MAC_ARG,
2571 &set_mac_type,
2572 &user_mac);
2573 if (ret == -1)
2574 goto leave;
2575 }
2576 }
2577 }
2578 pmd_link.link_speed = speed;
2579
2580 TAP_LOG(DEBUG, "Initializing pmd_tap for %s", name);
2581
2582 /* Register IPC feed callback */
2583 if (!tap_devices_count) {
2584 ret = rte_mp_action_register(TAP_MP_KEY, tap_mp_sync_queues);
2585 if (ret < 0 && rte_errno != ENOTSUP) {
2586 TAP_LOG(ERR, "tap: Failed to register IPC callback: %s",
2587 strerror(rte_errno));
2588 goto leave;
2589 }
2590 }
2591 tap_devices_count++;
2592 tap_devices_count_increased = 1;
2593 ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac,
2594 ETH_TUNTAP_TYPE_TAP);
2595
2596 leave:
2597 if (ret == -1) {
2598 TAP_LOG(ERR, "Failed to create pmd for %s as %s",
2599 name, tap_name);
2600 if (tap_devices_count_increased == 1) {
2601 if (tap_devices_count == 1)
2602 rte_mp_action_unregister(TAP_MP_KEY);
2603 tap_devices_count--;
2604 }
2605 }
2606 rte_kvargs_free(kvlist);
2607
2608 return ret;
2609 }
2610
2611 /* detach a TUNTAP device.
2612 */
2613 static int
rte_pmd_tap_remove(struct rte_vdev_device * dev)2614 rte_pmd_tap_remove(struct rte_vdev_device *dev)
2615 {
2616 struct rte_eth_dev *eth_dev = NULL;
2617
2618 /* find the ethdev entry */
2619 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
2620 if (!eth_dev)
2621 return 0;
2622
2623 tap_dev_close(eth_dev);
2624 rte_eth_dev_release_port(eth_dev);
2625
2626 return 0;
2627 }
2628
2629 static struct rte_vdev_driver pmd_tun_drv = {
2630 .probe = rte_pmd_tun_probe,
2631 .remove = rte_pmd_tap_remove,
2632 };
2633
2634 static struct rte_vdev_driver pmd_tap_drv = {
2635 .probe = rte_pmd_tap_probe,
2636 .remove = rte_pmd_tap_remove,
2637 };
2638
2639 RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
2640 RTE_PMD_REGISTER_VDEV(net_tun, pmd_tun_drv);
2641 RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
2642 RTE_PMD_REGISTER_PARAM_STRING(net_tun,
2643 ETH_TAP_IFACE_ARG "=<string> ");
2644 RTE_PMD_REGISTER_PARAM_STRING(net_tap,
2645 ETH_TAP_IFACE_ARG "=<string> "
2646 ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
2647 ETH_TAP_REMOTE_ARG "=<string>");
2648 RTE_LOG_REGISTER_DEFAULT(tap_logtype, NOTICE);
2649