1 /* 2 * Copyright (C) 2011-2012 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #define NM_BRIDGE 27 28 /* 29 * This module supports memory mapped access to network devices, 30 * see netmap(4). 31 * 32 * The module uses a large, memory pool allocated by the kernel 33 * and accessible as mmapped memory by multiple userspace threads/processes. 34 * The memory pool contains packet buffers and "netmap rings", 35 * i.e. user-accessible copies of the interface's queues. 36 * 37 * Access to the network card works like this: 38 * 1. a process/thread issues one or more open() on /dev/netmap, to create 39 * select()able file descriptor on which events are reported. 40 * 2. on each descriptor, the process issues an ioctl() to identify 41 * the interface that should report events to the file descriptor. 42 * 3. on each descriptor, the process issues an mmap() request to 43 * map the shared memory region within the process' address space. 44 * The list of interesting queues is indicated by a location in 45 * the shared memory region. 46 * 4. using the functions in the netmap(4) userspace API, a process 47 * can look up the occupation state of a queue, access memory buffers, 48 * and retrieve received packets or enqueue packets to transmit. 49 * 5. using some ioctl()s the process can synchronize the userspace view 50 * of the queue with the actual status in the kernel. This includes both 51 * receiving the notification of new packets, and transmitting new 52 * packets on the output interface. 53 * 6. select() or poll() can be used to wait for events on individual 54 * transmit or receive queues (or all queues for a given interface). 55 */ 56 57 #ifdef linux 58 #include "bsd_glue.h" 59 static netdev_tx_t linux_netmap_start(struct sk_buff *skb, struct net_device *dev); 60 #endif /* linux */ 61 62 #ifdef __APPLE__ 63 #include "osx_glue.h" 64 #endif /* __APPLE__ */ 65 66 #ifdef __FreeBSD__ 67 #include <sys/cdefs.h> /* prerequisite */ 68 __FBSDID("$FreeBSD$"); 69 70 #include <sys/types.h> 71 #include <sys/module.h> 72 #include <sys/errno.h> 73 #include <sys/param.h> /* defines used in kernel.h */ 74 #include <sys/jail.h> 75 #include <sys/kernel.h> /* types used in module initialization */ 76 #include <sys/conf.h> /* cdevsw struct */ 77 #include <sys/uio.h> /* uio struct */ 78 #include <sys/sockio.h> 79 #include <sys/socketvar.h> /* struct socket */ 80 #include <sys/malloc.h> 81 #include <sys/mman.h> /* PROT_EXEC */ 82 #include <sys/poll.h> 83 #include <sys/proc.h> 84 #include <vm/vm.h> /* vtophys */ 85 #include <vm/pmap.h> /* vtophys */ 86 #include <sys/socket.h> /* sockaddrs */ 87 #include <machine/bus.h> 88 #include <sys/selinfo.h> 89 #include <sys/sysctl.h> 90 #include <net/if.h> 91 #include <net/bpf.h> /* BIOCIMMEDIATE */ 92 #include <net/vnet.h> 93 #include <machine/bus.h> /* bus_dmamap_* */ 94 95 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); 96 #endif /* __FreeBSD__ */ 97 98 #include <net/netmap.h> 99 #include <dev/netmap/netmap_kern.h> 100 101 /* 102 * lock and unlock for the netmap memory allocator 103 */ 104 #define NMA_LOCK() mtx_lock(&nm_mem->nm_mtx); 105 #define NMA_UNLOCK() mtx_unlock(&nm_mem->nm_mtx); 106 struct netmap_mem_d; 107 static struct netmap_mem_d *nm_mem; /* Our memory allocator. */ 108 109 u_int netmap_total_buffers; 110 char *netmap_buffer_base; /* address of an invalid buffer */ 111 112 /* user-controlled variables */ 113 int netmap_verbose; 114 115 static int netmap_no_timestamp; /* don't timestamp on rxsync */ 116 117 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args"); 118 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose, 119 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode"); 120 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp, 121 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp"); 122 u_int netmap_buf_size = 2048; 123 TUNABLE_INT("hw.netmap.buf_size", (u_int *)&netmap_buf_size); 124 SYSCTL_INT(_dev_netmap, OID_AUTO, buf_size, 125 CTLFLAG_RD, &netmap_buf_size, 0, "Size of packet buffers"); 126 int netmap_mitigate = 1; 127 SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, ""); 128 int netmap_no_pendintr = 1; 129 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, 130 CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets."); 131 132 int netmap_drop = 0; /* debugging */ 133 int netmap_flags = 0; /* debug flags */ 134 int netmap_copy = 0; /* debugging, copy content */ 135 136 SYSCTL_INT(_dev_netmap, OID_AUTO, drop, CTLFLAG_RW, &netmap_drop, 0 , ""); 137 SYSCTL_INT(_dev_netmap, OID_AUTO, flags, CTLFLAG_RW, &netmap_flags, 0 , ""); 138 SYSCTL_INT(_dev_netmap, OID_AUTO, copy, CTLFLAG_RW, &netmap_copy, 0 , ""); 139 140 #ifdef NM_BRIDGE /* support for netmap bridge */ 141 142 /* 143 * system parameters. 144 * 145 * All switched ports have prefix NM_NAME. 146 * The switch has a max of NM_BDG_MAXPORTS ports (often stored in a bitmap, 147 * so a practical upper bound is 64). 148 * Each tx ring is read-write, whereas rx rings are readonly (XXX not done yet). 149 * The virtual interfaces use per-queue lock instead of core lock. 150 * In the tx loop, we aggregate traffic in batches to make all operations 151 * faster. The batch size is NM_BDG_BATCH 152 */ 153 #define NM_NAME "vale" /* prefix for the interface */ 154 #define NM_BDG_MAXPORTS 16 /* up to 64 ? */ 155 #define NM_BRIDGE_RINGSIZE 1024 /* in the device */ 156 #define NM_BDG_HASH 1024 /* forwarding table entries */ 157 #define NM_BDG_BATCH 1024 /* entries in the forwarding buffer */ 158 #define NM_BRIDGES 4 /* number of bridges */ 159 int netmap_bridge = NM_BDG_BATCH; /* bridge batch size */ 160 SYSCTL_INT(_dev_netmap, OID_AUTO, bridge, CTLFLAG_RW, &netmap_bridge, 0 , ""); 161 162 #ifdef linux 163 #define ADD_BDG_REF(ifp) (NA(ifp)->if_refcount++) 164 #define DROP_BDG_REF(ifp) (NA(ifp)->if_refcount-- <= 1) 165 #else /* !linux */ 166 #define ADD_BDG_REF(ifp) (ifp)->if_refcount++ 167 #define DROP_BDG_REF(ifp) refcount_release(&(ifp)->if_refcount) 168 #ifdef __FreeBSD__ 169 #include <sys/endian.h> 170 #include <sys/refcount.h> 171 #endif /* __FreeBSD__ */ 172 #define prefetch(x) __builtin_prefetch(x) 173 #endif /* !linux */ 174 175 static void bdg_netmap_attach(struct ifnet *ifp); 176 static int bdg_netmap_reg(struct ifnet *ifp, int onoff); 177 /* per-tx-queue entry */ 178 struct nm_bdg_fwd { /* forwarding entry for a bridge */ 179 void *buf; 180 uint64_t dst; /* dst mask */ 181 uint32_t src; /* src index ? */ 182 uint16_t len; /* src len */ 183 }; 184 185 struct nm_hash_ent { 186 uint64_t mac; /* the top 2 bytes are the epoch */ 187 uint64_t ports; 188 }; 189 190 /* 191 * Interfaces for a bridge are all in ports[]. 192 * The array has fixed size, an empty entry does not terminate 193 * the search. 194 */ 195 struct nm_bridge { 196 struct ifnet *bdg_ports[NM_BDG_MAXPORTS]; 197 int n_ports; 198 uint64_t act_ports; 199 int freelist; /* first buffer index */ 200 NM_SELINFO_T si; /* poll/select wait queue */ 201 NM_LOCK_T bdg_lock; /* protect the selinfo ? */ 202 203 /* the forwarding table, MAC+ports */ 204 struct nm_hash_ent ht[NM_BDG_HASH]; 205 206 int namelen; /* 0 means free */ 207 char basename[IFNAMSIZ]; 208 }; 209 210 struct nm_bridge nm_bridges[NM_BRIDGES]; 211 212 #define BDG_LOCK(b) mtx_lock(&(b)->bdg_lock) 213 #define BDG_UNLOCK(b) mtx_unlock(&(b)->bdg_lock) 214 215 /* 216 * NA(ifp)->bdg_port port index 217 */ 218 219 // XXX only for multiples of 64 bytes, non overlapped. 220 static inline void 221 pkt_copy(void *_src, void *_dst, int l) 222 { 223 uint64_t *src = _src; 224 uint64_t *dst = _dst; 225 if (unlikely(l >= 1024)) { 226 bcopy(src, dst, l); 227 return; 228 } 229 for (; likely(l > 0); l-=64) { 230 *dst++ = *src++; 231 *dst++ = *src++; 232 *dst++ = *src++; 233 *dst++ = *src++; 234 *dst++ = *src++; 235 *dst++ = *src++; 236 *dst++ = *src++; 237 *dst++ = *src++; 238 } 239 } 240 241 /* 242 * locate a bridge among the existing ones. 243 * a ':' in the name terminates the bridge name. Otherwise, just NM_NAME. 244 * We assume that this is called with a name of at least NM_NAME chars. 245 */ 246 static struct nm_bridge * 247 nm_find_bridge(const char *name) 248 { 249 int i, l, namelen, e; 250 struct nm_bridge *b = NULL; 251 252 namelen = strlen(NM_NAME); /* base length */ 253 l = strlen(name); /* actual length */ 254 for (i = namelen + 1; i < l; i++) { 255 if (name[i] == ':') { 256 namelen = i; 257 break; 258 } 259 } 260 if (namelen >= IFNAMSIZ) 261 namelen = IFNAMSIZ; 262 ND("--- prefix is '%.*s' ---", namelen, name); 263 264 /* use the first entry for locking */ 265 BDG_LOCK(nm_bridges); // XXX do better 266 for (e = -1, i = 1; i < NM_BRIDGES; i++) { 267 b = nm_bridges + i; 268 if (b->namelen == 0) 269 e = i; /* record empty slot */ 270 else if (strncmp(name, b->basename, namelen) == 0) { 271 ND("found '%.*s' at %d", namelen, name, i); 272 break; 273 } 274 } 275 if (i == NM_BRIDGES) { /* all full */ 276 if (e == -1) { /* no empty slot */ 277 b = NULL; 278 } else { 279 b = nm_bridges + e; 280 strncpy(b->basename, name, namelen); 281 b->namelen = namelen; 282 } 283 } 284 BDG_UNLOCK(nm_bridges); 285 return b; 286 } 287 #endif /* NM_BRIDGE */ 288 289 /*------------- memory allocator -----------------*/ 290 #ifdef NETMAP_MEM2 291 #include "netmap_mem2.c" 292 #else /* !NETMAP_MEM2 */ 293 #include "netmap_mem1.c" 294 #endif /* !NETMAP_MEM2 */ 295 /*------------ end of memory allocator ----------*/ 296 297 /* Structure associated to each thread which registered an interface. */ 298 struct netmap_priv_d { 299 struct netmap_if *np_nifp; /* netmap interface descriptor. */ 300 301 struct ifnet *np_ifp; /* device for which we hold a reference */ 302 int np_ringid; /* from the ioctl */ 303 u_int np_qfirst, np_qlast; /* range of rings to scan */ 304 uint16_t np_txpoll; 305 }; 306 307 308 /* 309 * File descriptor's private data destructor. 310 * 311 * Call nm_register(ifp,0) to stop netmap mode on the interface and 312 * revert to normal operation. We expect that np_ifp has not gone. 313 */ 314 static void 315 netmap_dtor_locked(void *data) 316 { 317 struct netmap_priv_d *priv = data; 318 struct ifnet *ifp = priv->np_ifp; 319 struct netmap_adapter *na = NA(ifp); 320 struct netmap_if *nifp = priv->np_nifp; 321 322 na->refcount--; 323 if (na->refcount <= 0) { /* last instance */ 324 u_int i, j, lim; 325 326 D("deleting last netmap instance for %s", ifp->if_xname); 327 /* 328 * there is a race here with *_netmap_task() and 329 * netmap_poll(), which don't run under NETMAP_REG_LOCK. 330 * na->refcount == 0 && na->ifp->if_capenable & IFCAP_NETMAP 331 * (aka NETMAP_DELETING(na)) are a unique marker that the 332 * device is dying. 333 * Before destroying stuff we sleep a bit, and then complete 334 * the job. NIOCREG should realize the condition and 335 * loop until they can continue; the other routines 336 * should check the condition at entry and quit if 337 * they cannot run. 338 */ 339 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 340 tsleep(na, 0, "NIOCUNREG", 4); 341 na->nm_lock(ifp, NETMAP_REG_LOCK, 0); 342 na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */ 343 /* Wake up any sleeping threads. netmap_poll will 344 * then return POLLERR 345 */ 346 for (i = 0; i < na->num_tx_rings + 1; i++) 347 selwakeuppri(&na->tx_rings[i].si, PI_NET); 348 for (i = 0; i < na->num_rx_rings + 1; i++) 349 selwakeuppri(&na->rx_rings[i].si, PI_NET); 350 selwakeuppri(&na->tx_si, PI_NET); 351 selwakeuppri(&na->rx_si, PI_NET); 352 /* release all buffers */ 353 NMA_LOCK(); 354 for (i = 0; i < na->num_tx_rings + 1; i++) { 355 struct netmap_ring *ring = na->tx_rings[i].ring; 356 lim = na->tx_rings[i].nkr_num_slots; 357 for (j = 0; j < lim; j++) 358 netmap_free_buf(nifp, ring->slot[j].buf_idx); 359 } 360 for (i = 0; i < na->num_rx_rings + 1; i++) { 361 struct netmap_ring *ring = na->rx_rings[i].ring; 362 lim = na->rx_rings[i].nkr_num_slots; 363 for (j = 0; j < lim; j++) 364 netmap_free_buf(nifp, ring->slot[j].buf_idx); 365 } 366 NMA_UNLOCK(); 367 netmap_free_rings(na); 368 wakeup(na); 369 } 370 netmap_if_free(nifp); 371 } 372 373 static void 374 nm_if_rele(struct ifnet *ifp) 375 { 376 #ifndef NM_BRIDGE 377 if_rele(ifp); 378 #else /* NM_BRIDGE */ 379 int i, full; 380 struct nm_bridge *b; 381 382 if (strncmp(ifp->if_xname, NM_NAME, sizeof(NM_NAME) - 1)) { 383 if_rele(ifp); 384 return; 385 } 386 if (!DROP_BDG_REF(ifp)) 387 return; 388 b = ifp->if_bridge; 389 BDG_LOCK(nm_bridges); 390 BDG_LOCK(b); 391 ND("want to disconnect %s from the bridge", ifp->if_xname); 392 full = 0; 393 for (i = 0; i < NM_BDG_MAXPORTS; i++) { 394 if (b->bdg_ports[i] == ifp) { 395 b->bdg_ports[i] = NULL; 396 bzero(ifp, sizeof(*ifp)); 397 free(ifp, M_DEVBUF); 398 break; 399 } 400 else if (b->bdg_ports[i] != NULL) 401 full = 1; 402 } 403 BDG_UNLOCK(b); 404 if (full == 0) { 405 ND("freeing bridge %d", b - nm_bridges); 406 b->namelen = 0; 407 } 408 BDG_UNLOCK(nm_bridges); 409 if (i == NM_BDG_MAXPORTS) 410 D("ouch, cannot find ifp to remove"); 411 #endif /* NM_BRIDGE */ 412 } 413 414 static void 415 netmap_dtor(void *data) 416 { 417 struct netmap_priv_d *priv = data; 418 struct ifnet *ifp = priv->np_ifp; 419 struct netmap_adapter *na = NA(ifp); 420 421 na->nm_lock(ifp, NETMAP_REG_LOCK, 0); 422 netmap_dtor_locked(data); 423 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 424 425 nm_if_rele(ifp); 426 bzero(priv, sizeof(*priv)); /* XXX for safety */ 427 free(priv, M_DEVBUF); 428 } 429 430 431 /* 432 * mmap(2) support for the "netmap" device. 433 * 434 * Expose all the memory previously allocated by our custom memory 435 * allocator: this way the user has only to issue a single mmap(2), and 436 * can work on all the data structures flawlessly. 437 * 438 * Return 0 on success, -1 otherwise. 439 */ 440 441 #ifdef __FreeBSD__ 442 static int 443 netmap_mmap(__unused struct cdev *dev, 444 #if __FreeBSD_version < 900000 445 vm_offset_t offset, vm_paddr_t *paddr, int nprot 446 #else 447 vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, 448 __unused vm_memattr_t *memattr 449 #endif 450 ) 451 { 452 if (nprot & PROT_EXEC) 453 return (-1); // XXX -1 or EINVAL ? 454 455 ND("request for offset 0x%x", (uint32_t)offset); 456 *paddr = netmap_ofstophys(offset); 457 458 return (0); 459 } 460 #endif /* __FreeBSD__ */ 461 462 463 /* 464 * Handlers for synchronization of the queues from/to the host. 465 * 466 * netmap_sync_to_host() passes packets up. We are called from a 467 * system call in user process context, and the only contention 468 * can be among multiple user threads erroneously calling 469 * this routine concurrently. In principle we should not even 470 * need to lock. 471 */ 472 static void 473 netmap_sync_to_host(struct netmap_adapter *na) 474 { 475 struct netmap_kring *kring = &na->tx_rings[na->num_tx_rings]; 476 struct netmap_ring *ring = kring->ring; 477 struct mbuf *head = NULL, *tail = NULL, *m; 478 u_int k, n, lim = kring->nkr_num_slots - 1; 479 480 k = ring->cur; 481 if (k > lim) { 482 netmap_ring_reinit(kring); 483 return; 484 } 485 // na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0); 486 487 /* Take packets from hwcur to cur and pass them up. 488 * In case of no buffers we give up. At the end of the loop, 489 * the queue is drained in all cases. 490 */ 491 for (n = kring->nr_hwcur; n != k;) { 492 struct netmap_slot *slot = &ring->slot[n]; 493 494 n = (n == lim) ? 0 : n + 1; 495 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE) { 496 D("bad pkt at %d len %d", n, slot->len); 497 continue; 498 } 499 m = m_devget(NMB(slot), slot->len, 0, na->ifp, NULL); 500 501 if (m == NULL) 502 break; 503 if (tail) 504 tail->m_nextpkt = m; 505 else 506 head = m; 507 tail = m; 508 m->m_nextpkt = NULL; 509 } 510 kring->nr_hwcur = k; 511 kring->nr_hwavail = ring->avail = lim; 512 // na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0); 513 514 /* send packets up, outside the lock */ 515 while ((m = head) != NULL) { 516 head = head->m_nextpkt; 517 m->m_nextpkt = NULL; 518 if (netmap_verbose & NM_VERB_HOST) 519 D("sending up pkt %p size %d", m, MBUF_LEN(m)); 520 NM_SEND_UP(na->ifp, m); 521 } 522 } 523 524 /* 525 * rxsync backend for packets coming from the host stack. 526 * They have been put in the queue by netmap_start() so we 527 * need to protect access to the kring using a lock. 528 * 529 * This routine also does the selrecord if called from the poll handler 530 * (we know because td != NULL). 531 * 532 * NOTE: on linux, selrecord() is defined as a macro and uses pwait 533 * as an additional hidden argument. 534 */ 535 static void 536 netmap_sync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait) 537 { 538 struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings]; 539 struct netmap_ring *ring = kring->ring; 540 u_int j, n, lim = kring->nkr_num_slots; 541 u_int k = ring->cur, resvd = ring->reserved; 542 543 (void)pwait; /* disable unused warnings */ 544 na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0); 545 if (k >= lim) { 546 netmap_ring_reinit(kring); 547 return; 548 } 549 /* new packets are already set in nr_hwavail */ 550 /* skip past packets that userspace has released */ 551 j = kring->nr_hwcur; 552 if (resvd > 0) { 553 if (resvd + ring->avail >= lim + 1) { 554 D("XXX invalid reserve/avail %d %d", resvd, ring->avail); 555 ring->reserved = resvd = 0; // XXX panic... 556 } 557 k = (k >= resvd) ? k - resvd : k + lim - resvd; 558 } 559 if (j != k) { 560 n = k >= j ? k - j : k + lim - j; 561 kring->nr_hwavail -= n; 562 kring->nr_hwcur = k; 563 } 564 k = ring->avail = kring->nr_hwavail - resvd; 565 if (k == 0 && td) 566 selrecord(td, &kring->si); 567 if (k && (netmap_verbose & NM_VERB_HOST)) 568 D("%d pkts from stack", k); 569 na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0); 570 } 571 572 573 /* 574 * get a refcounted reference to an interface. 575 * Return ENXIO if the interface does not exist, EINVAL if netmap 576 * is not supported by the interface. 577 * If successful, hold a reference. 578 */ 579 static int 580 get_ifp(const char *name, struct ifnet **ifp) 581 { 582 #ifdef NM_BRIDGE 583 struct ifnet *iter = NULL; 584 585 do { 586 struct nm_bridge *b; 587 int i, l, cand = -1; 588 589 if (strncmp(name, NM_NAME, sizeof(NM_NAME) - 1)) 590 break; 591 b = nm_find_bridge(name); 592 if (b == NULL) { 593 D("no bridges available for '%s'", name); 594 return (ENXIO); 595 } 596 /* XXX locking */ 597 BDG_LOCK(b); 598 /* lookup in the local list of ports */ 599 for (i = 0; i < NM_BDG_MAXPORTS; i++) { 600 iter = b->bdg_ports[i]; 601 if (iter == NULL) { 602 if (cand == -1) 603 cand = i; /* potential insert point */ 604 continue; 605 } 606 if (!strcmp(iter->if_xname, name)) { 607 ADD_BDG_REF(iter); 608 ND("found existing interface"); 609 BDG_UNLOCK(b); 610 break; 611 } 612 } 613 if (i < NM_BDG_MAXPORTS) /* already unlocked */ 614 break; 615 if (cand == -1) { 616 D("bridge full, cannot create new port"); 617 no_port: 618 BDG_UNLOCK(b); 619 *ifp = NULL; 620 return EINVAL; 621 } 622 ND("create new bridge port %s", name); 623 /* space for forwarding list after the ifnet */ 624 l = sizeof(*iter) + 625 sizeof(struct nm_bdg_fwd)*NM_BDG_BATCH ; 626 iter = malloc(l, M_DEVBUF, M_NOWAIT | M_ZERO); 627 if (!iter) 628 goto no_port; 629 strcpy(iter->if_xname, name); 630 bdg_netmap_attach(iter); 631 b->bdg_ports[cand] = iter; 632 iter->if_bridge = b; 633 ADD_BDG_REF(iter); 634 BDG_UNLOCK(b); 635 ND("attaching virtual bridge %p", b); 636 } while (0); 637 *ifp = iter; 638 if (! *ifp) 639 #endif /* NM_BRIDGE */ 640 *ifp = ifunit_ref(name); 641 if (*ifp == NULL) 642 return (ENXIO); 643 /* can do this if the capability exists and if_pspare[0] 644 * points to the netmap descriptor. 645 */ 646 if ((*ifp)->if_capabilities & IFCAP_NETMAP && NA(*ifp)) 647 return 0; /* valid pointer, we hold the refcount */ 648 nm_if_rele(*ifp); 649 return EINVAL; // not NETMAP capable 650 } 651 652 653 /* 654 * Error routine called when txsync/rxsync detects an error. 655 * Can't do much more than resetting cur = hwcur, avail = hwavail. 656 * Return 1 on reinit. 657 * 658 * This routine is only called by the upper half of the kernel. 659 * It only reads hwcur (which is changed only by the upper half, too) 660 * and hwavail (which may be changed by the lower half, but only on 661 * a tx ring and only to increase it, so any error will be recovered 662 * on the next call). For the above, we don't strictly need to call 663 * it under lock. 664 */ 665 int 666 netmap_ring_reinit(struct netmap_kring *kring) 667 { 668 struct netmap_ring *ring = kring->ring; 669 u_int i, lim = kring->nkr_num_slots - 1; 670 int errors = 0; 671 672 D("called for %s", kring->na->ifp->if_xname); 673 if (ring->cur > lim) 674 errors++; 675 for (i = 0; i <= lim; i++) { 676 u_int idx = ring->slot[i].buf_idx; 677 u_int len = ring->slot[i].len; 678 if (idx < 2 || idx >= netmap_total_buffers) { 679 if (!errors++) 680 D("bad buffer at slot %d idx %d len %d ", i, idx, len); 681 ring->slot[i].buf_idx = 0; 682 ring->slot[i].len = 0; 683 } else if (len > NETMAP_BUF_SIZE) { 684 ring->slot[i].len = 0; 685 if (!errors++) 686 D("bad len %d at slot %d idx %d", 687 len, i, idx); 688 } 689 } 690 if (errors) { 691 int pos = kring - kring->na->tx_rings; 692 int n = kring->na->num_tx_rings + 1; 693 694 D("total %d errors", errors); 695 errors++; 696 D("%s %s[%d] reinit, cur %d -> %d avail %d -> %d", 697 kring->na->ifp->if_xname, 698 pos < n ? "TX" : "RX", pos < n ? pos : pos - n, 699 ring->cur, kring->nr_hwcur, 700 ring->avail, kring->nr_hwavail); 701 ring->cur = kring->nr_hwcur; 702 ring->avail = kring->nr_hwavail; 703 } 704 return (errors ? 1 : 0); 705 } 706 707 708 /* 709 * Set the ring ID. For devices with a single queue, a request 710 * for all rings is the same as a single ring. 711 */ 712 static int 713 netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid) 714 { 715 struct ifnet *ifp = priv->np_ifp; 716 struct netmap_adapter *na = NA(ifp); 717 u_int i = ringid & NETMAP_RING_MASK; 718 /* initially (np_qfirst == np_qlast) we don't want to lock */ 719 int need_lock = (priv->np_qfirst != priv->np_qlast); 720 int lim = na->num_rx_rings; 721 722 if (na->num_tx_rings > lim) 723 lim = na->num_tx_rings; 724 if ( (ringid & NETMAP_HW_RING) && i >= lim) { 725 D("invalid ring id %d", i); 726 return (EINVAL); 727 } 728 if (need_lock) 729 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0); 730 priv->np_ringid = ringid; 731 if (ringid & NETMAP_SW_RING) { 732 priv->np_qfirst = NETMAP_SW_RING; 733 priv->np_qlast = 0; 734 } else if (ringid & NETMAP_HW_RING) { 735 priv->np_qfirst = i; 736 priv->np_qlast = i + 1; 737 } else { 738 priv->np_qfirst = 0; 739 priv->np_qlast = NETMAP_HW_RING ; 740 } 741 priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1; 742 if (need_lock) 743 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0); 744 if (ringid & NETMAP_SW_RING) 745 D("ringid %s set to SW RING", ifp->if_xname); 746 else if (ringid & NETMAP_HW_RING) 747 D("ringid %s set to HW RING %d", ifp->if_xname, 748 priv->np_qfirst); 749 else 750 D("ringid %s set to all %d HW RINGS", ifp->if_xname, lim); 751 return 0; 752 } 753 754 /* 755 * ioctl(2) support for the "netmap" device. 756 * 757 * Following a list of accepted commands: 758 * - NIOCGINFO 759 * - SIOCGIFADDR just for convenience 760 * - NIOCREGIF 761 * - NIOCUNREGIF 762 * - NIOCTXSYNC 763 * - NIOCRXSYNC 764 * 765 * Return 0 on success, errno otherwise. 766 */ 767 static int 768 netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data, 769 int fflag, struct thread *td) 770 { 771 struct netmap_priv_d *priv = NULL; 772 struct ifnet *ifp; 773 struct nmreq *nmr = (struct nmreq *) data; 774 struct netmap_adapter *na; 775 int error; 776 u_int i, lim; 777 struct netmap_if *nifp; 778 779 (void)dev; /* UNUSED */ 780 (void)fflag; /* UNUSED */ 781 #ifdef linux 782 #define devfs_get_cdevpriv(pp) \ 783 ({ *(struct netmap_priv_d **)pp = ((struct file *)td)->private_data; \ 784 (*pp ? 0 : ENOENT); }) 785 786 /* devfs_set_cdevpriv cannot fail on linux */ 787 #define devfs_set_cdevpriv(p, fn) \ 788 ({ ((struct file *)td)->private_data = p; (p ? 0 : EINVAL); }) 789 790 791 #define devfs_clear_cdevpriv() do { \ 792 netmap_dtor(priv); ((struct file *)td)->private_data = 0; \ 793 } while (0) 794 #endif /* linux */ 795 796 CURVNET_SET(TD_TO_VNET(td)); 797 798 error = devfs_get_cdevpriv((void **)&priv); 799 if (error != ENOENT && error != 0) { 800 CURVNET_RESTORE(); 801 return (error); 802 } 803 804 error = 0; /* Could be ENOENT */ 805 nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0'; /* truncate name */ 806 switch (cmd) { 807 case NIOCGINFO: /* return capabilities etc */ 808 /* memsize is always valid */ 809 nmr->nr_memsize = nm_mem->nm_totalsize; 810 nmr->nr_offset = 0; 811 nmr->nr_rx_rings = nmr->nr_tx_rings = 0; 812 nmr->nr_rx_slots = nmr->nr_tx_slots = 0; 813 if (nmr->nr_version != NETMAP_API) { 814 D("API mismatch got %d have %d", 815 nmr->nr_version, NETMAP_API); 816 nmr->nr_version = NETMAP_API; 817 error = EINVAL; 818 break; 819 } 820 if (nmr->nr_name[0] == '\0') /* just get memory info */ 821 break; 822 error = get_ifp(nmr->nr_name, &ifp); /* get a refcount */ 823 if (error) 824 break; 825 na = NA(ifp); /* retrieve netmap_adapter */ 826 nmr->nr_rx_rings = na->num_rx_rings; 827 nmr->nr_tx_rings = na->num_tx_rings; 828 nmr->nr_rx_slots = na->num_rx_desc; 829 nmr->nr_tx_slots = na->num_tx_desc; 830 nm_if_rele(ifp); /* return the refcount */ 831 break; 832 833 case NIOCREGIF: 834 if (nmr->nr_version != NETMAP_API) { 835 nmr->nr_version = NETMAP_API; 836 error = EINVAL; 837 break; 838 } 839 if (priv != NULL) { /* thread already registered */ 840 error = netmap_set_ringid(priv, nmr->nr_ringid); 841 break; 842 } 843 /* find the interface and a reference */ 844 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */ 845 if (error) 846 break; 847 na = NA(ifp); /* retrieve netmap adapter */ 848 /* 849 * Allocate the private per-thread structure. 850 * XXX perhaps we can use a blocking malloc ? 851 */ 852 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF, 853 M_NOWAIT | M_ZERO); 854 if (priv == NULL) { 855 error = ENOMEM; 856 nm_if_rele(ifp); /* return the refcount */ 857 break; 858 } 859 860 for (i = 10; i > 0; i--) { 861 na->nm_lock(ifp, NETMAP_REG_LOCK, 0); 862 if (!NETMAP_DELETING(na)) 863 break; 864 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 865 tsleep(na, 0, "NIOCREGIF", hz/10); 866 } 867 if (i == 0) { 868 D("too many NIOCREGIF attempts, give up"); 869 error = EINVAL; 870 free(priv, M_DEVBUF); 871 nm_if_rele(ifp); /* return the refcount */ 872 break; 873 } 874 875 priv->np_ifp = ifp; /* store the reference */ 876 error = netmap_set_ringid(priv, nmr->nr_ringid); 877 if (error) 878 goto error; 879 priv->np_nifp = nifp = netmap_if_new(nmr->nr_name, na); 880 if (nifp == NULL) { /* allocation failed */ 881 error = ENOMEM; 882 } else if (ifp->if_capenable & IFCAP_NETMAP) { 883 /* was already set */ 884 } else { 885 /* Otherwise set the card in netmap mode 886 * and make it use the shared buffers. 887 */ 888 for (i = 0 ; i < na->num_tx_rings + 1; i++) 889 mtx_init(&na->tx_rings[i].q_lock, "nm_txq_lock", MTX_NETWORK_LOCK, MTX_DEF); 890 for (i = 0 ; i < na->num_rx_rings + 1; i++) { 891 mtx_init(&na->rx_rings[i].q_lock, "nm_rxq_lock", MTX_NETWORK_LOCK, MTX_DEF); 892 } 893 error = na->nm_register(ifp, 1); /* mode on */ 894 if (error) 895 netmap_dtor_locked(priv); 896 } 897 898 if (error) { /* reg. failed, release priv and ref */ 899 error: 900 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 901 nm_if_rele(ifp); /* return the refcount */ 902 bzero(priv, sizeof(*priv)); 903 free(priv, M_DEVBUF); 904 break; 905 } 906 907 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 908 error = devfs_set_cdevpriv(priv, netmap_dtor); 909 910 if (error != 0) { 911 /* could not assign the private storage for the 912 * thread, call the destructor explicitly. 913 */ 914 netmap_dtor(priv); 915 break; 916 } 917 918 /* return the offset of the netmap_if object */ 919 nmr->nr_rx_rings = na->num_rx_rings; 920 nmr->nr_tx_rings = na->num_tx_rings; 921 nmr->nr_rx_slots = na->num_rx_desc; 922 nmr->nr_tx_slots = na->num_tx_desc; 923 nmr->nr_memsize = nm_mem->nm_totalsize; 924 nmr->nr_offset = netmap_if_offset(nifp); 925 break; 926 927 case NIOCUNREGIF: 928 if (priv == NULL) { 929 error = ENXIO; 930 break; 931 } 932 933 /* the interface is unregistered inside the 934 destructor of the private data. */ 935 devfs_clear_cdevpriv(); 936 break; 937 938 case NIOCTXSYNC: 939 case NIOCRXSYNC: 940 if (priv == NULL) { 941 error = ENXIO; 942 break; 943 } 944 ifp = priv->np_ifp; /* we have a reference */ 945 na = NA(ifp); /* retrieve netmap adapter */ 946 if (priv->np_qfirst == NETMAP_SW_RING) { /* host rings */ 947 if (cmd == NIOCTXSYNC) 948 netmap_sync_to_host(na); 949 else 950 netmap_sync_from_host(na, NULL, NULL); 951 break; 952 } 953 /* find the last ring to scan */ 954 lim = priv->np_qlast; 955 if (lim == NETMAP_HW_RING) 956 lim = (cmd == NIOCTXSYNC) ? 957 na->num_tx_rings : na->num_rx_rings; 958 959 for (i = priv->np_qfirst; i < lim; i++) { 960 if (cmd == NIOCTXSYNC) { 961 struct netmap_kring *kring = &na->tx_rings[i]; 962 if (netmap_verbose & NM_VERB_TXSYNC) 963 D("pre txsync ring %d cur %d hwcur %d", 964 i, kring->ring->cur, 965 kring->nr_hwcur); 966 na->nm_txsync(ifp, i, 1 /* do lock */); 967 if (netmap_verbose & NM_VERB_TXSYNC) 968 D("post txsync ring %d cur %d hwcur %d", 969 i, kring->ring->cur, 970 kring->nr_hwcur); 971 } else { 972 na->nm_rxsync(ifp, i, 1 /* do lock */); 973 microtime(&na->rx_rings[i].ring->ts); 974 } 975 } 976 977 break; 978 979 #ifdef __FreeBSD__ 980 case BIOCIMMEDIATE: 981 case BIOCGHDRCMPLT: 982 case BIOCSHDRCMPLT: 983 case BIOCSSEESENT: 984 D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT"); 985 break; 986 987 default: /* allow device-specific ioctls */ 988 { 989 struct socket so; 990 bzero(&so, sizeof(so)); 991 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */ 992 if (error) 993 break; 994 so.so_vnet = ifp->if_vnet; 995 // so->so_proto not null. 996 error = ifioctl(&so, cmd, data, td); 997 nm_if_rele(ifp); 998 break; 999 } 1000 1001 #else /* linux */ 1002 default: 1003 error = EOPNOTSUPP; 1004 #endif /* linux */ 1005 } 1006 1007 CURVNET_RESTORE(); 1008 return (error); 1009 } 1010 1011 1012 /* 1013 * select(2) and poll(2) handlers for the "netmap" device. 1014 * 1015 * Can be called for one or more queues. 1016 * Return true the event mask corresponding to ready events. 1017 * If there are no ready events, do a selrecord on either individual 1018 * selfd or on the global one. 1019 * Device-dependent parts (locking and sync of tx/rx rings) 1020 * are done through callbacks. 1021 * 1022 * On linux, arguments are really pwait, the poll table, and 'td' is struct file * 1023 * The first one is remapped to pwait as selrecord() uses the name as an 1024 * hidden argument. 1025 */ 1026 static int 1027 netmap_poll(struct cdev *dev, int events, struct thread *td) 1028 { 1029 struct netmap_priv_d *priv = NULL; 1030 struct netmap_adapter *na; 1031 struct ifnet *ifp; 1032 struct netmap_kring *kring; 1033 u_int core_lock, i, check_all, want_tx, want_rx, revents = 0; 1034 u_int lim_tx, lim_rx; 1035 enum {NO_CL, NEED_CL, LOCKED_CL }; /* see below */ 1036 void *pwait = dev; /* linux compatibility */ 1037 1038 (void)pwait; 1039 1040 if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL) 1041 return POLLERR; 1042 1043 ifp = priv->np_ifp; 1044 // XXX check for deleting() ? 1045 if ( (ifp->if_capenable & IFCAP_NETMAP) == 0) 1046 return POLLERR; 1047 1048 if (netmap_verbose & 0x8000) 1049 D("device %s events 0x%x", ifp->if_xname, events); 1050 want_tx = events & (POLLOUT | POLLWRNORM); 1051 want_rx = events & (POLLIN | POLLRDNORM); 1052 1053 na = NA(ifp); /* retrieve netmap adapter */ 1054 1055 lim_tx = na->num_tx_rings; 1056 lim_rx = na->num_rx_rings; 1057 /* how many queues we are scanning */ 1058 if (priv->np_qfirst == NETMAP_SW_RING) { 1059 if (priv->np_txpoll || want_tx) { 1060 /* push any packets up, then we are always ready */ 1061 kring = &na->tx_rings[lim_tx]; 1062 netmap_sync_to_host(na); 1063 revents |= want_tx; 1064 } 1065 if (want_rx) { 1066 kring = &na->rx_rings[lim_rx]; 1067 if (kring->ring->avail == 0) 1068 netmap_sync_from_host(na, td, dev); 1069 if (kring->ring->avail > 0) { 1070 revents |= want_rx; 1071 } 1072 } 1073 return (revents); 1074 } 1075 1076 /* 1077 * check_all is set if the card has more than one queue and 1078 * the client is polling all of them. If true, we sleep on 1079 * the "global" selfd, otherwise we sleep on individual selfd 1080 * (we can only sleep on one of them per direction). 1081 * The interrupt routine in the driver should always wake on 1082 * the individual selfd, and also on the global one if the card 1083 * has more than one ring. 1084 * 1085 * If the card has only one lock, we just use that. 1086 * If the card has separate ring locks, we just use those 1087 * unless we are doing check_all, in which case the whole 1088 * loop is wrapped by the global lock. 1089 * We acquire locks only when necessary: if poll is called 1090 * when buffers are available, we can just return without locks. 1091 * 1092 * rxsync() is only called if we run out of buffers on a POLLIN. 1093 * txsync() is called if we run out of buffers on POLLOUT, or 1094 * there are pending packets to send. The latter can be disabled 1095 * passing NETMAP_NO_TX_POLL in the NIOCREG call. 1096 */ 1097 check_all = (priv->np_qlast == NETMAP_HW_RING) && (lim_tx > 1 || lim_rx > 1); 1098 1099 /* 1100 * core_lock indicates what to do with the core lock. 1101 * The core lock is used when either the card has no individual 1102 * locks, or it has individual locks but we are cheking all 1103 * rings so we need the core lock to avoid missing wakeup events. 1104 * 1105 * It has three possible states: 1106 * NO_CL we don't need to use the core lock, e.g. 1107 * because we are protected by individual locks. 1108 * NEED_CL we need the core lock. In this case, when we 1109 * call the lock routine, move to LOCKED_CL 1110 * to remember to release the lock once done. 1111 * LOCKED_CL core lock is set, so we need to release it. 1112 */ 1113 core_lock = (check_all || !na->separate_locks) ? NEED_CL : NO_CL; 1114 #ifdef NM_BRIDGE 1115 /* the bridge uses separate locks */ 1116 if (na->nm_register == bdg_netmap_reg) { 1117 ND("not using core lock for %s", ifp->if_xname); 1118 core_lock = NO_CL; 1119 } 1120 #endif /* NM_BRIDGE */ 1121 if (priv->np_qlast != NETMAP_HW_RING) { 1122 lim_tx = lim_rx = priv->np_qlast; 1123 } 1124 1125 /* 1126 * We start with a lock free round which is good if we have 1127 * data available. If this fails, then lock and call the sync 1128 * routines. 1129 */ 1130 for (i = priv->np_qfirst; want_rx && i < lim_rx; i++) { 1131 kring = &na->rx_rings[i]; 1132 if (kring->ring->avail > 0) { 1133 revents |= want_rx; 1134 want_rx = 0; /* also breaks the loop */ 1135 } 1136 } 1137 for (i = priv->np_qfirst; want_tx && i < lim_tx; i++) { 1138 kring = &na->tx_rings[i]; 1139 if (kring->ring->avail > 0) { 1140 revents |= want_tx; 1141 want_tx = 0; /* also breaks the loop */ 1142 } 1143 } 1144 1145 /* 1146 * If we to push packets out (priv->np_txpoll) or want_tx is 1147 * still set, we do need to run the txsync calls (on all rings, 1148 * to avoid that the tx rings stall). 1149 */ 1150 if (priv->np_txpoll || want_tx) { 1151 for (i = priv->np_qfirst; i < lim_tx; i++) { 1152 kring = &na->tx_rings[i]; 1153 /* 1154 * Skip the current ring if want_tx == 0 1155 * (we have already done a successful sync on 1156 * a previous ring) AND kring->cur == kring->hwcur 1157 * (there are no pending transmissions for this ring). 1158 */ 1159 if (!want_tx && kring->ring->cur == kring->nr_hwcur) 1160 continue; 1161 if (core_lock == NEED_CL) { 1162 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0); 1163 core_lock = LOCKED_CL; 1164 } 1165 if (na->separate_locks) 1166 na->nm_lock(ifp, NETMAP_TX_LOCK, i); 1167 if (netmap_verbose & NM_VERB_TXSYNC) 1168 D("send %d on %s %d", 1169 kring->ring->cur, 1170 ifp->if_xname, i); 1171 if (na->nm_txsync(ifp, i, 0 /* no lock */)) 1172 revents |= POLLERR; 1173 1174 /* Check avail/call selrecord only if called with POLLOUT */ 1175 if (want_tx) { 1176 if (kring->ring->avail > 0) { 1177 /* stop at the first ring. We don't risk 1178 * starvation. 1179 */ 1180 revents |= want_tx; 1181 want_tx = 0; 1182 } else if (!check_all) 1183 selrecord(td, &kring->si); 1184 } 1185 if (na->separate_locks) 1186 na->nm_lock(ifp, NETMAP_TX_UNLOCK, i); 1187 } 1188 } 1189 1190 /* 1191 * now if want_rx is still set we need to lock and rxsync. 1192 * Do it on all rings because otherwise we starve. 1193 */ 1194 if (want_rx) { 1195 for (i = priv->np_qfirst; i < lim_rx; i++) { 1196 kring = &na->rx_rings[i]; 1197 if (core_lock == NEED_CL) { 1198 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0); 1199 core_lock = LOCKED_CL; 1200 } 1201 if (na->separate_locks) 1202 na->nm_lock(ifp, NETMAP_RX_LOCK, i); 1203 1204 if (na->nm_rxsync(ifp, i, 0 /* no lock */)) 1205 revents |= POLLERR; 1206 if (netmap_no_timestamp == 0 || 1207 kring->ring->flags & NR_TIMESTAMP) { 1208 microtime(&kring->ring->ts); 1209 } 1210 1211 if (kring->ring->avail > 0) 1212 revents |= want_rx; 1213 else if (!check_all) 1214 selrecord(td, &kring->si); 1215 if (na->separate_locks) 1216 na->nm_lock(ifp, NETMAP_RX_UNLOCK, i); 1217 } 1218 } 1219 if (check_all && revents == 0) { /* signal on the global queue */ 1220 if (want_tx) 1221 selrecord(td, &na->tx_si); 1222 if (want_rx) 1223 selrecord(td, &na->rx_si); 1224 } 1225 if (core_lock == LOCKED_CL) 1226 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0); 1227 1228 return (revents); 1229 } 1230 1231 /*------- driver support routines ------*/ 1232 1233 /* 1234 * default lock wrapper. 1235 */ 1236 static void 1237 netmap_lock_wrapper(struct ifnet *dev, int what, u_int queueid) 1238 { 1239 struct netmap_adapter *na = NA(dev); 1240 1241 switch (what) { 1242 #ifdef linux /* some system do not need lock on register */ 1243 case NETMAP_REG_LOCK: 1244 case NETMAP_REG_UNLOCK: 1245 break; 1246 #endif /* linux */ 1247 1248 case NETMAP_CORE_LOCK: 1249 mtx_lock(&na->core_lock); 1250 break; 1251 1252 case NETMAP_CORE_UNLOCK: 1253 mtx_unlock(&na->core_lock); 1254 break; 1255 1256 case NETMAP_TX_LOCK: 1257 mtx_lock(&na->tx_rings[queueid].q_lock); 1258 break; 1259 1260 case NETMAP_TX_UNLOCK: 1261 mtx_unlock(&na->tx_rings[queueid].q_lock); 1262 break; 1263 1264 case NETMAP_RX_LOCK: 1265 mtx_lock(&na->rx_rings[queueid].q_lock); 1266 break; 1267 1268 case NETMAP_RX_UNLOCK: 1269 mtx_unlock(&na->rx_rings[queueid].q_lock); 1270 break; 1271 } 1272 } 1273 1274 1275 /* 1276 * Initialize a ``netmap_adapter`` object created by driver on attach. 1277 * We allocate a block of memory with room for a struct netmap_adapter 1278 * plus two sets of N+2 struct netmap_kring (where N is the number 1279 * of hardware rings): 1280 * krings 0..N-1 are for the hardware queues. 1281 * kring N is for the host stack queue 1282 * kring N+1 is only used for the selinfo for all queues. 1283 * Return 0 on success, ENOMEM otherwise. 1284 * 1285 * na->num_tx_rings can be set for cards with different tx/rx setups 1286 */ 1287 int 1288 netmap_attach(struct netmap_adapter *na, int num_queues) 1289 { 1290 int n, size; 1291 void *buf; 1292 struct ifnet *ifp = na->ifp; 1293 1294 if (ifp == NULL) { 1295 D("ifp not set, giving up"); 1296 return EINVAL; 1297 } 1298 /* clear other fields ? */ 1299 na->refcount = 0; 1300 if (na->num_tx_rings == 0) 1301 na->num_tx_rings = num_queues; 1302 na->num_rx_rings = num_queues; 1303 /* on each direction we have N+1 resources 1304 * 0..n-1 are the hardware rings 1305 * n is the ring attached to the stack. 1306 */ 1307 n = na->num_rx_rings + na->num_tx_rings + 2; 1308 size = sizeof(*na) + n * sizeof(struct netmap_kring); 1309 1310 buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 1311 if (buf) { 1312 WNA(ifp) = buf; 1313 na->tx_rings = (void *)((char *)buf + sizeof(*na)); 1314 na->rx_rings = na->tx_rings + na->num_tx_rings + 1; 1315 bcopy(na, buf, sizeof(*na)); 1316 ifp->if_capabilities |= IFCAP_NETMAP; 1317 1318 na = buf; 1319 if (na->nm_lock == NULL) { 1320 ND("using default locks for %s", ifp->if_xname); 1321 na->nm_lock = netmap_lock_wrapper; 1322 /* core lock initialized here. 1323 * others initialized after netmap_if_new 1324 */ 1325 mtx_init(&na->core_lock, "netmap core lock", MTX_NETWORK_LOCK, MTX_DEF); 1326 } 1327 } 1328 #ifdef linux 1329 if (ifp->netdev_ops) { 1330 D("netdev_ops %p", ifp->netdev_ops); 1331 /* prepare a clone of the netdev ops */ 1332 na->nm_ndo = *ifp->netdev_ops; 1333 } 1334 na->nm_ndo.ndo_start_xmit = linux_netmap_start; 1335 #endif 1336 D("%s for %s", buf ? "ok" : "failed", ifp->if_xname); 1337 1338 return (buf ? 0 : ENOMEM); 1339 } 1340 1341 1342 /* 1343 * Free the allocated memory linked to the given ``netmap_adapter`` 1344 * object. 1345 */ 1346 void 1347 netmap_detach(struct ifnet *ifp) 1348 { 1349 u_int i; 1350 struct netmap_adapter *na = NA(ifp); 1351 1352 if (!na) 1353 return; 1354 1355 for (i = 0; i < na->num_tx_rings + 1; i++) { 1356 knlist_destroy(&na->tx_rings[i].si.si_note); 1357 mtx_destroy(&na->tx_rings[i].q_lock); 1358 } 1359 for (i = 0; i < na->num_rx_rings + 1; i++) { 1360 knlist_destroy(&na->rx_rings[i].si.si_note); 1361 mtx_destroy(&na->rx_rings[i].q_lock); 1362 } 1363 knlist_destroy(&na->tx_si.si_note); 1364 knlist_destroy(&na->rx_si.si_note); 1365 bzero(na, sizeof(*na)); 1366 WNA(ifp) = NULL; 1367 free(na, M_DEVBUF); 1368 } 1369 1370 1371 /* 1372 * Intercept packets from the network stack and pass them 1373 * to netmap as incoming packets on the 'software' ring. 1374 * We are not locked when called. 1375 */ 1376 int 1377 netmap_start(struct ifnet *ifp, struct mbuf *m) 1378 { 1379 struct netmap_adapter *na = NA(ifp); 1380 struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings]; 1381 u_int i, len = MBUF_LEN(m); 1382 u_int error = EBUSY, lim = kring->nkr_num_slots - 1; 1383 struct netmap_slot *slot; 1384 1385 if (netmap_verbose & NM_VERB_HOST) 1386 D("%s packet %d len %d from the stack", ifp->if_xname, 1387 kring->nr_hwcur + kring->nr_hwavail, len); 1388 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0); 1389 if (kring->nr_hwavail >= lim) { 1390 if (netmap_verbose) 1391 D("stack ring %s full\n", ifp->if_xname); 1392 goto done; /* no space */ 1393 } 1394 if (len > NETMAP_BUF_SIZE) { 1395 D("drop packet size %d > %d", len, NETMAP_BUF_SIZE); 1396 goto done; /* too long for us */ 1397 } 1398 1399 /* compute the insert position */ 1400 i = kring->nr_hwcur + kring->nr_hwavail; 1401 if (i > lim) 1402 i -= lim + 1; 1403 slot = &kring->ring->slot[i]; 1404 m_copydata(m, 0, len, NMB(slot)); 1405 slot->len = len; 1406 kring->nr_hwavail++; 1407 if (netmap_verbose & NM_VERB_HOST) 1408 D("wake up host ring %s %d", na->ifp->if_xname, na->num_rx_rings); 1409 selwakeuppri(&kring->si, PI_NET); 1410 error = 0; 1411 done: 1412 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0); 1413 1414 /* release the mbuf in either cases of success or failure. As an 1415 * alternative, put the mbuf in a free list and free the list 1416 * only when really necessary. 1417 */ 1418 m_freem(m); 1419 1420 return (error); 1421 } 1422 1423 1424 /* 1425 * netmap_reset() is called by the driver routines when reinitializing 1426 * a ring. The driver is in charge of locking to protect the kring. 1427 * If netmap mode is not set just return NULL. 1428 */ 1429 struct netmap_slot * 1430 netmap_reset(struct netmap_adapter *na, enum txrx tx, int n, 1431 u_int new_cur) 1432 { 1433 struct netmap_kring *kring; 1434 int new_hwofs, lim; 1435 1436 if (na == NULL) 1437 return NULL; /* no netmap support here */ 1438 if (!(na->ifp->if_capenable & IFCAP_NETMAP)) 1439 return NULL; /* nothing to reinitialize */ 1440 1441 if (tx == NR_TX) { 1442 kring = na->tx_rings + n; 1443 new_hwofs = kring->nr_hwcur - new_cur; 1444 } else { 1445 kring = na->rx_rings + n; 1446 new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur; 1447 } 1448 lim = kring->nkr_num_slots - 1; 1449 if (new_hwofs > lim) 1450 new_hwofs -= lim + 1; 1451 1452 /* Alwayws set the new offset value and realign the ring. */ 1453 kring->nkr_hwofs = new_hwofs; 1454 if (tx == NR_TX) 1455 kring->nr_hwavail = kring->nkr_num_slots - 1; 1456 D("new hwofs %d on %s %s[%d]", 1457 kring->nkr_hwofs, na->ifp->if_xname, 1458 tx == NR_TX ? "TX" : "RX", n); 1459 1460 #if 0 // def linux 1461 /* XXX check that the mappings are correct */ 1462 /* need ring_nr, adapter->pdev, direction */ 1463 buffer_info->dma = dma_map_single(&pdev->dev, addr, adapter->rx_buffer_len, DMA_FROM_DEVICE); 1464 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 1465 D("error mapping rx netmap buffer %d", i); 1466 // XXX fix error handling 1467 } 1468 1469 #endif /* linux */ 1470 /* 1471 * Wakeup on the individual and global lock 1472 * We do the wakeup here, but the ring is not yet reconfigured. 1473 * However, we are under lock so there are no races. 1474 */ 1475 selwakeuppri(&kring->si, PI_NET); 1476 selwakeuppri(tx == NR_TX ? &na->tx_si : &na->rx_si, PI_NET); 1477 return kring->ring->slot; 1478 } 1479 1480 1481 /* 1482 * Default functions to handle rx/tx interrupts 1483 * we have 4 cases: 1484 * 1 ring, single lock: 1485 * lock(core); wake(i=0); unlock(core) 1486 * N rings, single lock: 1487 * lock(core); wake(i); wake(N+1) unlock(core) 1488 * 1 ring, separate locks: (i=0) 1489 * lock(i); wake(i); unlock(i) 1490 * N rings, separate locks: 1491 * lock(i); wake(i); unlock(i); lock(core) wake(N+1) unlock(core) 1492 * work_done is non-null on the RX path. 1493 */ 1494 int 1495 netmap_rx_irq(struct ifnet *ifp, int q, int *work_done) 1496 { 1497 struct netmap_adapter *na; 1498 struct netmap_kring *r; 1499 NM_SELINFO_T *main_wq; 1500 1501 if (!(ifp->if_capenable & IFCAP_NETMAP)) 1502 return 0; 1503 na = NA(ifp); 1504 if (work_done) { /* RX path */ 1505 r = na->rx_rings + q; 1506 r->nr_kflags |= NKR_PENDINTR; 1507 main_wq = (na->num_rx_rings > 1) ? &na->rx_si : NULL; 1508 } else { /* tx path */ 1509 r = na->tx_rings + q; 1510 main_wq = (na->num_tx_rings > 1) ? &na->tx_si : NULL; 1511 work_done = &q; /* dummy */ 1512 } 1513 if (na->separate_locks) { 1514 mtx_lock(&r->q_lock); 1515 selwakeuppri(&r->si, PI_NET); 1516 mtx_unlock(&r->q_lock); 1517 if (main_wq) { 1518 mtx_lock(&na->core_lock); 1519 selwakeuppri(main_wq, PI_NET); 1520 mtx_unlock(&na->core_lock); 1521 } 1522 } else { 1523 mtx_lock(&na->core_lock); 1524 selwakeuppri(&r->si, PI_NET); 1525 if (main_wq) 1526 selwakeuppri(main_wq, PI_NET); 1527 mtx_unlock(&na->core_lock); 1528 } 1529 *work_done = 1; /* do not fire napi again */ 1530 return 1; 1531 } 1532 1533 1534 #ifdef linux /* linux-specific routines */ 1535 1536 /* 1537 * Remap linux arguments into the FreeBSD call. 1538 * - pwait is the poll table, passed as 'dev'; 1539 * If pwait == NULL someone else already woke up before. We can report 1540 * events but they are filtered upstream. 1541 * If pwait != NULL, then pwait->key contains the list of events. 1542 * - events is computed from pwait as above. 1543 * - file is passed as 'td'; 1544 */ 1545 static u_int 1546 linux_netmap_poll(struct file * file, struct poll_table_struct *pwait) 1547 { 1548 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) 1549 int events = pwait ? pwait->key : POLLIN | POLLOUT; 1550 #else /* in 3.4.0 field 'key' was renamed to '_key' */ 1551 int events = pwait ? pwait->_key : POLLIN | POLLOUT; 1552 #endif 1553 return netmap_poll((void *)pwait, events, (void *)file); 1554 } 1555 1556 static int 1557 linux_netmap_mmap(struct file *f, struct vm_area_struct *vma) 1558 { 1559 int lut_skip, i, j; 1560 int user_skip = 0; 1561 struct lut_entry *l_entry; 1562 const struct netmap_obj_pool *p[] = { 1563 nm_mem->nm_if_pool, 1564 nm_mem->nm_ring_pool, 1565 nm_mem->nm_buf_pool }; 1566 /* 1567 * vma->vm_start: start of mapping user address space 1568 * vma->vm_end: end of the mapping user address space 1569 */ 1570 1571 (void)f; /* UNUSED */ 1572 // XXX security checks 1573 1574 for (i = 0; i < 3; i++) { /* loop through obj_pools */ 1575 /* 1576 * In each pool memory is allocated in clusters 1577 * of size _clustsize , each containing clustentries 1578 * entries. For each object k we already store the 1579 * vtophys malling in lut[k] so we use that, scanning 1580 * the lut[] array in steps of clustentries, 1581 * and we map each cluster (not individual pages, 1582 * it would be overkill). 1583 */ 1584 for (lut_skip = 0, j = 0; j < p[i]->_numclusters; j++) { 1585 l_entry = &p[i]->lut[lut_skip]; 1586 if (remap_pfn_range(vma, vma->vm_start + user_skip, 1587 l_entry->paddr >> PAGE_SHIFT, p[i]->_clustsize, 1588 vma->vm_page_prot)) 1589 return -EAGAIN; // XXX check return value 1590 lut_skip += p[i]->clustentries; 1591 user_skip += p[i]->_clustsize; 1592 } 1593 } 1594 1595 return 0; 1596 } 1597 1598 static netdev_tx_t 1599 linux_netmap_start(struct sk_buff *skb, struct net_device *dev) 1600 { 1601 netmap_start(dev, skb); 1602 return (NETDEV_TX_OK); 1603 } 1604 1605 1606 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) // XXX was 38 1607 #define LIN_IOCTL_NAME .ioctl 1608 int 1609 linux_netmap_ioctl(struct inode *inode, struct file *file, u_int cmd, u_long data /* arg */) 1610 #else 1611 #define LIN_IOCTL_NAME .unlocked_ioctl 1612 long 1613 linux_netmap_ioctl(struct file *file, u_int cmd, u_long data /* arg */) 1614 #endif 1615 { 1616 int ret; 1617 struct nmreq nmr; 1618 bzero(&nmr, sizeof(nmr)); 1619 1620 if (data && copy_from_user(&nmr, (void *)data, sizeof(nmr) ) != 0) 1621 return -EFAULT; 1622 ret = netmap_ioctl(NULL, cmd, (caddr_t)&nmr, 0, (void *)file); 1623 if (data && copy_to_user((void*)data, &nmr, sizeof(nmr) ) != 0) 1624 return -EFAULT; 1625 return -ret; 1626 } 1627 1628 1629 static int 1630 netmap_release(struct inode *inode, struct file *file) 1631 { 1632 (void)inode; /* UNUSED */ 1633 if (file->private_data) 1634 netmap_dtor(file->private_data); 1635 return (0); 1636 } 1637 1638 1639 static struct file_operations netmap_fops = { 1640 .mmap = linux_netmap_mmap, 1641 LIN_IOCTL_NAME = linux_netmap_ioctl, 1642 .poll = linux_netmap_poll, 1643 .release = netmap_release, 1644 }; 1645 1646 static struct miscdevice netmap_cdevsw = { /* same name as FreeBSD */ 1647 MISC_DYNAMIC_MINOR, 1648 "netmap", 1649 &netmap_fops, 1650 }; 1651 1652 static int netmap_init(void); 1653 static void netmap_fini(void); 1654 1655 /* Errors have negative values on linux */ 1656 static int linux_netmap_init(void) 1657 { 1658 return -netmap_init(); 1659 } 1660 1661 module_init(linux_netmap_init); 1662 module_exit(netmap_fini); 1663 /* export certain symbols to other modules */ 1664 EXPORT_SYMBOL(netmap_attach); // driver attach routines 1665 EXPORT_SYMBOL(netmap_detach); // driver detach routines 1666 EXPORT_SYMBOL(netmap_ring_reinit); // ring init on error 1667 EXPORT_SYMBOL(netmap_buffer_lut); 1668 EXPORT_SYMBOL(netmap_total_buffers); // index check 1669 EXPORT_SYMBOL(netmap_buffer_base); 1670 EXPORT_SYMBOL(netmap_reset); // ring init routines 1671 EXPORT_SYMBOL(netmap_buf_size); 1672 EXPORT_SYMBOL(netmap_rx_irq); // default irq handler 1673 EXPORT_SYMBOL(netmap_no_pendintr); // XXX mitigation - should go away 1674 1675 1676 MODULE_AUTHOR("http://info.iet.unipi.it/~luigi/netmap/"); 1677 MODULE_DESCRIPTION("The netmap packet I/O framework"); 1678 MODULE_LICENSE("Dual BSD/GPL"); /* the code here is all BSD. */ 1679 1680 #else /* __FreeBSD__ */ 1681 1682 static struct cdevsw netmap_cdevsw = { 1683 .d_version = D_VERSION, 1684 .d_name = "netmap", 1685 .d_mmap = netmap_mmap, 1686 .d_ioctl = netmap_ioctl, 1687 .d_poll = netmap_poll, 1688 }; 1689 #endif /* __FreeBSD__ */ 1690 1691 #ifdef NM_BRIDGE 1692 /* 1693 *---- support for virtual bridge ----- 1694 */ 1695 1696 /* ----- FreeBSD if_bridge hash function ------- */ 1697 1698 /* 1699 * The following hash function is adapted from "Hash Functions" by Bob Jenkins 1700 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). 1701 * 1702 * http://www.burtleburtle.net/bob/hash/spooky.html 1703 */ 1704 #define mix(a, b, c) \ 1705 do { \ 1706 a -= b; a -= c; a ^= (c >> 13); \ 1707 b -= c; b -= a; b ^= (a << 8); \ 1708 c -= a; c -= b; c ^= (b >> 13); \ 1709 a -= b; a -= c; a ^= (c >> 12); \ 1710 b -= c; b -= a; b ^= (a << 16); \ 1711 c -= a; c -= b; c ^= (b >> 5); \ 1712 a -= b; a -= c; a ^= (c >> 3); \ 1713 b -= c; b -= a; b ^= (a << 10); \ 1714 c -= a; c -= b; c ^= (b >> 15); \ 1715 } while (/*CONSTCOND*/0) 1716 1717 static __inline uint32_t 1718 nm_bridge_rthash(const uint8_t *addr) 1719 { 1720 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key 1721 1722 b += addr[5] << 8; 1723 b += addr[4]; 1724 a += addr[3] << 24; 1725 a += addr[2] << 16; 1726 a += addr[1] << 8; 1727 a += addr[0]; 1728 1729 mix(a, b, c); 1730 #define BRIDGE_RTHASH_MASK (NM_BDG_HASH-1) 1731 return (c & BRIDGE_RTHASH_MASK); 1732 } 1733 1734 #undef mix 1735 1736 1737 static int 1738 bdg_netmap_reg(struct ifnet *ifp, int onoff) 1739 { 1740 int i, err = 0; 1741 struct nm_bridge *b = ifp->if_bridge; 1742 1743 BDG_LOCK(b); 1744 if (onoff) { 1745 /* the interface must be already in the list. 1746 * only need to mark the port as active 1747 */ 1748 ND("should attach %s to the bridge", ifp->if_xname); 1749 for (i=0; i < NM_BDG_MAXPORTS; i++) 1750 if (b->bdg_ports[i] == ifp) 1751 break; 1752 if (i == NM_BDG_MAXPORTS) { 1753 D("no more ports available"); 1754 err = EINVAL; 1755 goto done; 1756 } 1757 ND("setting %s in netmap mode", ifp->if_xname); 1758 ifp->if_capenable |= IFCAP_NETMAP; 1759 NA(ifp)->bdg_port = i; 1760 b->act_ports |= (1<<i); 1761 b->bdg_ports[i] = ifp; 1762 } else { 1763 /* should be in the list, too -- remove from the mask */ 1764 ND("removing %s from netmap mode", ifp->if_xname); 1765 ifp->if_capenable &= ~IFCAP_NETMAP; 1766 i = NA(ifp)->bdg_port; 1767 b->act_ports &= ~(1<<i); 1768 } 1769 done: 1770 BDG_UNLOCK(b); 1771 return err; 1772 } 1773 1774 1775 static int 1776 nm_bdg_flush(struct nm_bdg_fwd *ft, int n, struct ifnet *ifp) 1777 { 1778 int i, ifn; 1779 uint64_t all_dst, dst; 1780 uint32_t sh, dh; 1781 uint64_t mysrc = 1 << NA(ifp)->bdg_port; 1782 uint64_t smac, dmac; 1783 struct netmap_slot *slot; 1784 struct nm_bridge *b = ifp->if_bridge; 1785 1786 ND("prepare to send %d packets, act_ports 0x%x", n, b->act_ports); 1787 /* only consider valid destinations */ 1788 all_dst = (b->act_ports & ~mysrc); 1789 /* first pass: hash and find destinations */ 1790 for (i = 0; likely(i < n); i++) { 1791 uint8_t *buf = ft[i].buf; 1792 dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff; 1793 smac = le64toh(*(uint64_t *)(buf + 4)); 1794 smac >>= 16; 1795 if (unlikely(netmap_verbose)) { 1796 uint8_t *s = buf+6, *d = buf; 1797 D("%d len %4d %02x:%02x:%02x:%02x:%02x:%02x -> %02x:%02x:%02x:%02x:%02x:%02x", 1798 i, 1799 ft[i].len, 1800 s[0], s[1], s[2], s[3], s[4], s[5], 1801 d[0], d[1], d[2], d[3], d[4], d[5]); 1802 } 1803 /* 1804 * The hash is somewhat expensive, there might be some 1805 * worthwhile optimizations here. 1806 */ 1807 if ((buf[6] & 1) == 0) { /* valid src */ 1808 uint8_t *s = buf+6; 1809 sh = nm_bridge_rthash(buf+6); // XXX hash of source 1810 /* update source port forwarding entry */ 1811 b->ht[sh].mac = smac; /* XXX expire ? */ 1812 b->ht[sh].ports = mysrc; 1813 if (netmap_verbose) 1814 D("src %02x:%02x:%02x:%02x:%02x:%02x on port %d", 1815 s[0], s[1], s[2], s[3], s[4], s[5], NA(ifp)->bdg_port); 1816 } 1817 dst = 0; 1818 if ( (buf[0] & 1) == 0) { /* unicast */ 1819 uint8_t *d = buf; 1820 dh = nm_bridge_rthash(buf); // XXX hash of dst 1821 if (b->ht[dh].mac == dmac) { /* found dst */ 1822 dst = b->ht[dh].ports; 1823 if (netmap_verbose) 1824 D("dst %02x:%02x:%02x:%02x:%02x:%02x to port %x", 1825 d[0], d[1], d[2], d[3], d[4], d[5], (uint32_t)(dst >> 16)); 1826 } 1827 } 1828 if (dst == 0) 1829 dst = all_dst; 1830 dst &= all_dst; /* only consider valid ports */ 1831 if (unlikely(netmap_verbose)) 1832 D("pkt goes to ports 0x%x", (uint32_t)dst); 1833 ft[i].dst = dst; 1834 } 1835 1836 /* second pass, scan interfaces and forward */ 1837 all_dst = (b->act_ports & ~mysrc); 1838 for (ifn = 0; all_dst; ifn++) { 1839 struct ifnet *dst_ifp = b->bdg_ports[ifn]; 1840 struct netmap_adapter *na; 1841 struct netmap_kring *kring; 1842 struct netmap_ring *ring; 1843 int j, lim, sent, locked; 1844 1845 if (!dst_ifp) 1846 continue; 1847 ND("scan port %d %s", ifn, dst_ifp->if_xname); 1848 dst = 1 << ifn; 1849 if ((dst & all_dst) == 0) /* skip if not set */ 1850 continue; 1851 all_dst &= ~dst; /* clear current node */ 1852 na = NA(dst_ifp); 1853 1854 ring = NULL; 1855 kring = NULL; 1856 lim = sent = locked = 0; 1857 /* inside, scan slots */ 1858 for (i = 0; likely(i < n); i++) { 1859 if ((ft[i].dst & dst) == 0) 1860 continue; /* not here */ 1861 if (!locked) { 1862 kring = &na->rx_rings[0]; 1863 ring = kring->ring; 1864 lim = kring->nkr_num_slots - 1; 1865 na->nm_lock(dst_ifp, NETMAP_RX_LOCK, 0); 1866 locked = 1; 1867 } 1868 if (unlikely(kring->nr_hwavail >= lim)) { 1869 if (netmap_verbose) 1870 D("rx ring full on %s", ifp->if_xname); 1871 break; 1872 } 1873 j = kring->nr_hwcur + kring->nr_hwavail; 1874 if (j > lim) 1875 j -= kring->nkr_num_slots; 1876 slot = &ring->slot[j]; 1877 ND("send %d %d bytes at %s:%d", i, ft[i].len, dst_ifp->if_xname, j); 1878 pkt_copy(ft[i].buf, NMB(slot), ft[i].len); 1879 slot->len = ft[i].len; 1880 kring->nr_hwavail++; 1881 sent++; 1882 } 1883 if (locked) { 1884 ND("sent %d on %s", sent, dst_ifp->if_xname); 1885 if (sent) 1886 selwakeuppri(&kring->si, PI_NET); 1887 na->nm_lock(dst_ifp, NETMAP_RX_UNLOCK, 0); 1888 } 1889 } 1890 return 0; 1891 } 1892 1893 /* 1894 * main dispatch routine 1895 */ 1896 static int 1897 bdg_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock) 1898 { 1899 struct netmap_adapter *na = NA(ifp); 1900 struct netmap_kring *kring = &na->tx_rings[ring_nr]; 1901 struct netmap_ring *ring = kring->ring; 1902 int i, j, k, lim = kring->nkr_num_slots - 1; 1903 struct nm_bdg_fwd *ft = (struct nm_bdg_fwd *)(ifp + 1); 1904 int ft_i; /* position in the forwarding table */ 1905 1906 k = ring->cur; 1907 if (k > lim) 1908 return netmap_ring_reinit(kring); 1909 if (do_lock) 1910 na->nm_lock(ifp, NETMAP_TX_LOCK, ring_nr); 1911 1912 if (netmap_bridge <= 0) { /* testing only */ 1913 j = k; // used all 1914 goto done; 1915 } 1916 if (netmap_bridge > NM_BDG_BATCH) 1917 netmap_bridge = NM_BDG_BATCH; 1918 1919 ft_i = 0; /* start from 0 */ 1920 for (j = kring->nr_hwcur; likely(j != k); j = unlikely(j == lim) ? 0 : j+1) { 1921 struct netmap_slot *slot = &ring->slot[j]; 1922 int len = ft[ft_i].len = slot->len; 1923 char *buf = ft[ft_i].buf = NMB(slot); 1924 1925 prefetch(buf); 1926 if (unlikely(len < 14)) 1927 continue; 1928 if (unlikely(++ft_i == netmap_bridge)) 1929 ft_i = nm_bdg_flush(ft, ft_i, ifp); 1930 } 1931 if (ft_i) 1932 ft_i = nm_bdg_flush(ft, ft_i, ifp); 1933 /* count how many packets we sent */ 1934 i = k - j; 1935 if (i < 0) 1936 i += kring->nkr_num_slots; 1937 kring->nr_hwavail = kring->nkr_num_slots - 1 - i; 1938 if (j != k) 1939 D("early break at %d/ %d, avail %d", j, k, kring->nr_hwavail); 1940 1941 done: 1942 kring->nr_hwcur = j; 1943 ring->avail = kring->nr_hwavail; 1944 if (do_lock) 1945 na->nm_lock(ifp, NETMAP_TX_UNLOCK, ring_nr); 1946 1947 if (netmap_verbose) 1948 D("%s ring %d lock %d", ifp->if_xname, ring_nr, do_lock); 1949 return 0; 1950 } 1951 1952 static int 1953 bdg_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) 1954 { 1955 struct netmap_adapter *na = NA(ifp); 1956 struct netmap_kring *kring = &na->rx_rings[ring_nr]; 1957 struct netmap_ring *ring = kring->ring; 1958 u_int j, n, lim = kring->nkr_num_slots - 1; 1959 u_int k = ring->cur, resvd = ring->reserved; 1960 1961 ND("%s ring %d lock %d avail %d", 1962 ifp->if_xname, ring_nr, do_lock, kring->nr_hwavail); 1963 1964 if (k > lim) 1965 return netmap_ring_reinit(kring); 1966 if (do_lock) 1967 na->nm_lock(ifp, NETMAP_RX_LOCK, ring_nr); 1968 1969 /* skip past packets that userspace has released */ 1970 j = kring->nr_hwcur; /* netmap ring index */ 1971 if (resvd > 0) { 1972 if (resvd + ring->avail >= lim + 1) { 1973 D("XXX invalid reserve/avail %d %d", resvd, ring->avail); 1974 ring->reserved = resvd = 0; // XXX panic... 1975 } 1976 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; 1977 } 1978 1979 if (j != k) { /* userspace has released some packets. */ 1980 n = k - j; 1981 if (n < 0) 1982 n += kring->nkr_num_slots; 1983 ND("userspace releases %d packets", n); 1984 for (n = 0; likely(j != k); n++) { 1985 struct netmap_slot *slot = &ring->slot[j]; 1986 void *addr = NMB(slot); 1987 1988 if (addr == netmap_buffer_base) { /* bad buf */ 1989 if (do_lock) 1990 na->nm_lock(ifp, NETMAP_RX_UNLOCK, ring_nr); 1991 return netmap_ring_reinit(kring); 1992 } 1993 /* decrease refcount for buffer */ 1994 1995 slot->flags &= ~NS_BUF_CHANGED; 1996 j = unlikely(j == lim) ? 0 : j + 1; 1997 } 1998 kring->nr_hwavail -= n; 1999 kring->nr_hwcur = k; 2000 } 2001 /* tell userspace that there are new packets */ 2002 ring->avail = kring->nr_hwavail - resvd; 2003 2004 if (do_lock) 2005 na->nm_lock(ifp, NETMAP_RX_UNLOCK, ring_nr); 2006 return 0; 2007 } 2008 2009 static void 2010 bdg_netmap_attach(struct ifnet *ifp) 2011 { 2012 struct netmap_adapter na; 2013 2014 ND("attaching virtual bridge"); 2015 bzero(&na, sizeof(na)); 2016 2017 na.ifp = ifp; 2018 na.separate_locks = 1; 2019 na.num_tx_desc = NM_BRIDGE_RINGSIZE; 2020 na.num_rx_desc = NM_BRIDGE_RINGSIZE; 2021 na.nm_txsync = bdg_netmap_txsync; 2022 na.nm_rxsync = bdg_netmap_rxsync; 2023 na.nm_register = bdg_netmap_reg; 2024 netmap_attach(&na, 1); 2025 } 2026 2027 #endif /* NM_BRIDGE */ 2028 2029 static struct cdev *netmap_dev; /* /dev/netmap character device. */ 2030 2031 2032 /* 2033 * Module loader. 2034 * 2035 * Create the /dev/netmap device and initialize all global 2036 * variables. 2037 * 2038 * Return 0 on success, errno on failure. 2039 */ 2040 static int 2041 netmap_init(void) 2042 { 2043 int error; 2044 2045 error = netmap_memory_init(); 2046 if (error != 0) { 2047 printf("netmap: unable to initialize the memory allocator.\n"); 2048 return (error); 2049 } 2050 printf("netmap: loaded module with %d Mbytes\n", 2051 (int)(nm_mem->nm_totalsize >> 20)); 2052 netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660, 2053 "netmap"); 2054 2055 #ifdef NM_BRIDGE 2056 { 2057 int i; 2058 for (i = 0; i < NM_BRIDGES; i++) 2059 mtx_init(&nm_bridges[i].bdg_lock, "bdg lock", "bdg_lock", MTX_DEF); 2060 } 2061 #endif 2062 return (error); 2063 } 2064 2065 2066 /* 2067 * Module unloader. 2068 * 2069 * Free all the memory, and destroy the ``/dev/netmap`` device. 2070 */ 2071 static void 2072 netmap_fini(void) 2073 { 2074 destroy_dev(netmap_dev); 2075 netmap_memory_fini(); 2076 printf("netmap: unloaded module.\n"); 2077 } 2078 2079 2080 #ifdef __FreeBSD__ 2081 /* 2082 * Kernel entry point. 2083 * 2084 * Initialize/finalize the module and return. 2085 * 2086 * Return 0 on success, errno on failure. 2087 */ 2088 static int 2089 netmap_loader(__unused struct module *module, int event, __unused void *arg) 2090 { 2091 int error = 0; 2092 2093 switch (event) { 2094 case MOD_LOAD: 2095 error = netmap_init(); 2096 break; 2097 2098 case MOD_UNLOAD: 2099 netmap_fini(); 2100 break; 2101 2102 default: 2103 error = EOPNOTSUPP; 2104 break; 2105 } 2106 2107 return (error); 2108 } 2109 2110 2111 DEV_MODULE(netmap, netmap_loader, NULL); 2112 #endif /* __FreeBSD__ */ 2113