1 /* 2 * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* 27 * $FreeBSD$ 28 * $Id: netmap.c 9795 2011-12-02 11:39:08Z luigi $ 29 * 30 * This module supports memory mapped access to network devices, 31 * see netmap(4). 32 * 33 * The module uses a large, memory pool allocated by the kernel 34 * and accessible as mmapped memory by multiple userspace threads/processes. 35 * The memory pool contains packet buffers and "netmap rings", 36 * i.e. user-accessible copies of the interface's queues. 37 * 38 * Access to the network card works like this: 39 * 1. a process/thread issues one or more open() on /dev/netmap, to create 40 * select()able file descriptor on which events are reported. 41 * 2. on each descriptor, the process issues an ioctl() to identify 42 * the interface that should report events to the file descriptor. 43 * 3. on each descriptor, the process issues an mmap() request to 44 * map the shared memory region within the process' address space. 45 * The list of interesting queues is indicated by a location in 46 * the shared memory region. 47 * 4. using the functions in the netmap(4) userspace API, a process 48 * can look up the occupation state of a queue, access memory buffers, 49 * and retrieve received packets or enqueue packets to transmit. 50 * 5. using some ioctl()s the process can synchronize the userspace view 51 * of the queue with the actual status in the kernel. This includes both 52 * receiving the notification of new packets, and transmitting new 53 * packets on the output interface. 54 * 6. select() or poll() can be used to wait for events on individual 55 * transmit or receive queues (or all queues for a given interface). 56 */ 57 58 #include <sys/cdefs.h> /* prerequisite */ 59 __FBSDID("$FreeBSD$"); 60 61 #include <sys/types.h> 62 #include <sys/module.h> 63 #include <sys/errno.h> 64 #include <sys/param.h> /* defines used in kernel.h */ 65 #include <sys/jail.h> 66 #include <sys/kernel.h> /* types used in module initialization */ 67 #include <sys/conf.h> /* cdevsw struct */ 68 #include <sys/uio.h> /* uio struct */ 69 #include <sys/sockio.h> 70 #include <sys/socketvar.h> /* struct socket */ 71 #include <sys/malloc.h> 72 #include <sys/mman.h> /* PROT_EXEC */ 73 #include <sys/poll.h> 74 #include <sys/proc.h> 75 #include <vm/vm.h> /* vtophys */ 76 #include <vm/pmap.h> /* vtophys */ 77 #include <sys/socket.h> /* sockaddrs */ 78 #include <machine/bus.h> 79 #include <sys/selinfo.h> 80 #include <sys/sysctl.h> 81 #include <net/if.h> 82 #include <net/bpf.h> /* BIOCIMMEDIATE */ 83 #include <net/vnet.h> 84 #include <net/netmap.h> 85 #include <dev/netmap/netmap_kern.h> 86 #include <machine/bus.h> /* bus_dmamap_* */ 87 88 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); 89 90 /* 91 * lock and unlock for the netmap memory allocator 92 */ 93 #define NMA_LOCK() mtx_lock(&netmap_mem_d->nm_mtx); 94 #define NMA_UNLOCK() mtx_unlock(&netmap_mem_d->nm_mtx); 95 struct netmap_mem_d; 96 static struct netmap_mem_d *netmap_mem_d; /* Our memory allocator. */ 97 98 u_int netmap_total_buffers; 99 char *netmap_buffer_base; /* address of an invalid buffer */ 100 101 /* user-controlled variables */ 102 int netmap_verbose; 103 104 static int netmap_no_timestamp; /* don't timestamp on rxsync */ 105 106 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args"); 107 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose, 108 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode"); 109 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp, 110 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp"); 111 int netmap_buf_size = 2048; 112 TUNABLE_INT("hw.netmap.buf_size", &netmap_buf_size); 113 SYSCTL_INT(_dev_netmap, OID_AUTO, buf_size, 114 CTLFLAG_RD, &netmap_buf_size, 0, "Size of packet buffers"); 115 int netmap_mitigate = 1; 116 SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, ""); 117 int netmap_no_pendintr; 118 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, 119 CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets."); 120 121 122 123 /*----- memory allocator -----------------*/ 124 /* 125 * Here we have the low level routines for memory allocator 126 * and its primary users. 127 */ 128 129 /* 130 * Default amount of memory pre-allocated by the module. 131 * We start with a large size and then shrink our demand 132 * according to what is avalable when the module is loaded. 133 * At the moment the block is contiguous, but we can easily 134 * restrict our demand to smaller units (16..64k) 135 */ 136 #define NETMAP_MEMORY_SIZE (64 * 1024 * PAGE_SIZE) 137 static void * netmap_malloc(size_t size, const char *msg); 138 static void netmap_free(void *addr, const char *msg); 139 140 #define netmap_if_malloc(len) netmap_malloc(len, "nifp") 141 #define netmap_if_free(v) netmap_free((v), "nifp") 142 143 #define netmap_ring_malloc(len) netmap_malloc(len, "ring") 144 #define netmap_free_rings(na) \ 145 netmap_free((na)->tx_rings[0].ring, "shadow rings"); 146 147 /* 148 * Allocator for a pool of packet buffers. For each buffer we have 149 * one entry in the bitmap to signal the state. Allocation scans 150 * the bitmap, but since this is done only on attach, we are not 151 * too worried about performance 152 * XXX if we need to allocate small blocks, a translation 153 * table is used both for kernel virtual address and physical 154 * addresses. 155 */ 156 struct netmap_buf_pool { 157 u_int total_buffers; /* total buffers. */ 158 u_int free; 159 u_int bufsize; 160 char *base; /* buffer base address */ 161 uint32_t *bitmap; /* one bit per buffer, 1 means free */ 162 }; 163 struct netmap_buf_pool nm_buf_pool; 164 SYSCTL_INT(_dev_netmap, OID_AUTO, total_buffers, 165 CTLFLAG_RD, &nm_buf_pool.total_buffers, 0, "total_buffers"); 166 SYSCTL_INT(_dev_netmap, OID_AUTO, free_buffers, 167 CTLFLAG_RD, &nm_buf_pool.free, 0, "free_buffers"); 168 169 170 171 172 /* 173 * Allocate n buffers from the ring, and fill the slot. 174 * Buffer 0 is the 'junk' buffer. 175 */ 176 static void 177 netmap_new_bufs(struct netmap_if *nifp __unused, 178 struct netmap_slot *slot, u_int n) 179 { 180 struct netmap_buf_pool *p = &nm_buf_pool; 181 uint32_t bi = 0; /* index in the bitmap */ 182 uint32_t mask, j, i = 0; /* slot counter */ 183 184 if (n > p->free) { 185 D("only %d out of %d buffers available", i, n); 186 return; 187 } 188 /* termination is guaranteed by p->free */ 189 while (i < n && p->free > 0) { 190 uint32_t cur = p->bitmap[bi]; 191 if (cur == 0) { /* bitmask is fully used */ 192 bi++; 193 continue; 194 } 195 /* locate a slot */ 196 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) ; 197 p->bitmap[bi] &= ~mask; /* slot in use */ 198 p->free--; 199 slot[i].buf_idx = bi*32+j; 200 slot[i].len = p->bufsize; 201 slot[i].flags = NS_BUF_CHANGED; 202 i++; 203 } 204 ND("allocated %d buffers, %d available", n, p->free); 205 } 206 207 208 static void 209 netmap_free_buf(struct netmap_if *nifp __unused, uint32_t i) 210 { 211 struct netmap_buf_pool *p = &nm_buf_pool; 212 213 uint32_t pos, mask; 214 if (i >= p->total_buffers) { 215 D("invalid free index %d", i); 216 return; 217 } 218 pos = i / 32; 219 mask = 1 << (i % 32); 220 if (p->bitmap[pos] & mask) { 221 D("slot %d already free", i); 222 return; 223 } 224 p->bitmap[pos] |= mask; 225 p->free++; 226 } 227 228 229 /* Descriptor of the memory objects handled by our memory allocator. */ 230 struct netmap_mem_obj { 231 TAILQ_ENTRY(netmap_mem_obj) nmo_next; /* next object in the 232 chain. */ 233 int nmo_used; /* flag set on used memory objects. */ 234 size_t nmo_size; /* size of the memory area reserved for the 235 object. */ 236 void *nmo_data; /* pointer to the memory area. */ 237 }; 238 239 /* Wrap our memory objects to make them ``chainable``. */ 240 TAILQ_HEAD(netmap_mem_obj_h, netmap_mem_obj); 241 242 243 /* Descriptor of our custom memory allocator. */ 244 struct netmap_mem_d { 245 struct mtx nm_mtx; /* lock used to handle the chain of memory 246 objects. */ 247 struct netmap_mem_obj_h nm_molist; /* list of memory objects */ 248 size_t nm_size; /* total amount of memory used for rings etc. */ 249 size_t nm_totalsize; /* total amount of allocated memory 250 (the difference is used for buffers) */ 251 size_t nm_buf_start; /* offset of packet buffers. 252 This is page-aligned. */ 253 size_t nm_buf_len; /* total memory for buffers */ 254 void *nm_buffer; /* pointer to the whole pre-allocated memory 255 area. */ 256 }; 257 258 /* Shorthand to compute a netmap interface offset. */ 259 #define netmap_if_offset(v) \ 260 ((char *) (v) - (char *) netmap_mem_d->nm_buffer) 261 /* .. and get a physical address given a memory offset */ 262 #define netmap_ofstophys(o) \ 263 (vtophys(netmap_mem_d->nm_buffer) + (o)) 264 265 266 /*------ netmap memory allocator -------*/ 267 /* 268 * Request for a chunk of memory. 269 * 270 * Memory objects are arranged into a list, hence we need to walk this 271 * list until we find an object with the needed amount of data free. 272 * This sounds like a completely inefficient implementation, but given 273 * the fact that data allocation is done once, we can handle it 274 * flawlessly. 275 * 276 * Return NULL on failure. 277 */ 278 static void * 279 netmap_malloc(size_t size, __unused const char *msg) 280 { 281 struct netmap_mem_obj *mem_obj, *new_mem_obj; 282 void *ret = NULL; 283 284 NMA_LOCK(); 285 TAILQ_FOREACH(mem_obj, &netmap_mem_d->nm_molist, nmo_next) { 286 if (mem_obj->nmo_used != 0 || mem_obj->nmo_size < size) 287 continue; 288 289 new_mem_obj = malloc(sizeof(struct netmap_mem_obj), M_NETMAP, 290 M_WAITOK | M_ZERO); 291 TAILQ_INSERT_BEFORE(mem_obj, new_mem_obj, nmo_next); 292 293 new_mem_obj->nmo_used = 1; 294 new_mem_obj->nmo_size = size; 295 new_mem_obj->nmo_data = mem_obj->nmo_data; 296 memset(new_mem_obj->nmo_data, 0, new_mem_obj->nmo_size); 297 298 mem_obj->nmo_size -= size; 299 mem_obj->nmo_data = (char *) mem_obj->nmo_data + size; 300 if (mem_obj->nmo_size == 0) { 301 TAILQ_REMOVE(&netmap_mem_d->nm_molist, mem_obj, 302 nmo_next); 303 free(mem_obj, M_NETMAP); 304 } 305 306 ret = new_mem_obj->nmo_data; 307 308 break; 309 } 310 NMA_UNLOCK(); 311 ND("%s: %d bytes at %p", msg, size, ret); 312 313 return (ret); 314 } 315 316 /* 317 * Return the memory to the allocator. 318 * 319 * While freeing a memory object, we try to merge adjacent chunks in 320 * order to reduce memory fragmentation. 321 */ 322 static void 323 netmap_free(void *addr, const char *msg) 324 { 325 size_t size; 326 struct netmap_mem_obj *cur, *prev, *next; 327 328 if (addr == NULL) { 329 D("NULL addr for %s", msg); 330 return; 331 } 332 333 NMA_LOCK(); 334 TAILQ_FOREACH(cur, &netmap_mem_d->nm_molist, nmo_next) { 335 if (cur->nmo_data == addr && cur->nmo_used) 336 break; 337 } 338 if (cur == NULL) { 339 NMA_UNLOCK(); 340 D("invalid addr %s %p", msg, addr); 341 return; 342 } 343 344 size = cur->nmo_size; 345 cur->nmo_used = 0; 346 347 /* merge current chunk of memory with the previous one, 348 if present. */ 349 prev = TAILQ_PREV(cur, netmap_mem_obj_h, nmo_next); 350 if (prev && prev->nmo_used == 0) { 351 TAILQ_REMOVE(&netmap_mem_d->nm_molist, cur, nmo_next); 352 prev->nmo_size += cur->nmo_size; 353 free(cur, M_NETMAP); 354 cur = prev; 355 } 356 357 /* merge with the next one */ 358 next = TAILQ_NEXT(cur, nmo_next); 359 if (next && next->nmo_used == 0) { 360 TAILQ_REMOVE(&netmap_mem_d->nm_molist, next, nmo_next); 361 cur->nmo_size += next->nmo_size; 362 free(next, M_NETMAP); 363 } 364 NMA_UNLOCK(); 365 ND("freed %s %d bytes at %p", msg, size, addr); 366 } 367 368 369 /* 370 * Create and return a new ``netmap_if`` object, and possibly also 371 * rings and packet buffors. 372 * 373 * Return NULL on failure. 374 */ 375 static void * 376 netmap_if_new(const char *ifname, struct netmap_adapter *na) 377 { 378 struct netmap_if *nifp; 379 struct netmap_ring *ring; 380 char *buff; 381 u_int i, len, ofs; 382 u_int n = na->num_queues + 1; /* shorthand, include stack queue */ 383 384 /* 385 * the descriptor is followed inline by an array of offsets 386 * to the tx and rx rings in the shared memory region. 387 */ 388 len = sizeof(struct netmap_if) + 2 * n * sizeof(ssize_t); 389 nifp = netmap_if_malloc(len); 390 if (nifp == NULL) 391 return (NULL); 392 393 /* initialize base fields */ 394 *(int *)(uintptr_t)&nifp->ni_num_queues = na->num_queues; 395 strncpy(nifp->ni_name, ifname, IFNAMSIZ); 396 397 (na->refcount)++; /* XXX atomic ? we are under lock */ 398 if (na->refcount > 1) 399 goto final; 400 401 /* 402 * If this is the first instance, allocate the shadow rings and 403 * buffers for this card (one for each hw queue, one for the host). 404 * The rings are contiguous, but have variable size. 405 * The entire block is reachable at 406 * na->tx_rings[0].ring 407 */ 408 409 len = n * (2 * sizeof(struct netmap_ring) + 410 (na->num_tx_desc + na->num_rx_desc) * 411 sizeof(struct netmap_slot) ); 412 buff = netmap_ring_malloc(len); 413 if (buff == NULL) { 414 D("failed to allocate %d bytes for %s shadow ring", 415 len, ifname); 416 error: 417 (na->refcount)--; 418 netmap_if_free(nifp); 419 return (NULL); 420 } 421 /* do we have the bufers ? we are in need of num_tx_desc buffers for 422 * each tx ring and num_tx_desc buffers for each rx ring. */ 423 len = n * (na->num_tx_desc + na->num_rx_desc); 424 NMA_LOCK(); 425 if (nm_buf_pool.free < len) { 426 NMA_UNLOCK(); 427 netmap_free(buff, "not enough bufs"); 428 goto error; 429 } 430 /* 431 * in the kring, store the pointers to the shared rings 432 * and initialize the rings. We are under NMA_LOCK(). 433 */ 434 ofs = 0; 435 for (i = 0; i < n; i++) { 436 struct netmap_kring *kring; 437 int numdesc; 438 439 /* Transmit rings */ 440 kring = &na->tx_rings[i]; 441 numdesc = na->num_tx_desc; 442 bzero(kring, sizeof(*kring)); 443 kring->na = na; 444 445 ring = kring->ring = (struct netmap_ring *)(buff + ofs); 446 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 447 nm_buf_pool.base - (char *)ring; 448 ND("txring[%d] at %p ofs %d", i, ring, ring->buf_ofs); 449 *(uint32_t *)(uintptr_t)&ring->num_slots = 450 kring->nkr_num_slots = numdesc; 451 452 /* 453 * IMPORTANT: 454 * Always keep one slot empty, so we can detect new 455 * transmissions comparing cur and nr_hwcur (they are 456 * the same only if there are no new transmissions). 457 */ 458 ring->avail = kring->nr_hwavail = numdesc - 1; 459 ring->cur = kring->nr_hwcur = 0; 460 *(uint16_t *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 461 netmap_new_bufs(nifp, ring->slot, numdesc); 462 463 ofs += sizeof(struct netmap_ring) + 464 numdesc * sizeof(struct netmap_slot); 465 466 /* Receive rings */ 467 kring = &na->rx_rings[i]; 468 numdesc = na->num_rx_desc; 469 bzero(kring, sizeof(*kring)); 470 kring->na = na; 471 472 ring = kring->ring = (struct netmap_ring *)(buff + ofs); 473 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 474 nm_buf_pool.base - (char *)ring; 475 ND("rxring[%d] at %p offset %d", i, ring, ring->buf_ofs); 476 *(uint32_t *)(uintptr_t)&ring->num_slots = 477 kring->nkr_num_slots = numdesc; 478 ring->cur = kring->nr_hwcur = 0; 479 ring->avail = kring->nr_hwavail = 0; /* empty */ 480 *(uint16_t *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 481 netmap_new_bufs(nifp, ring->slot, numdesc); 482 ofs += sizeof(struct netmap_ring) + 483 numdesc * sizeof(struct netmap_slot); 484 } 485 NMA_UNLOCK(); 486 for (i = 0; i < n+1; i++) { 487 // XXX initialize the selrecord structs. 488 } 489 final: 490 /* 491 * fill the slots for the rx and tx queues. They contain the offset 492 * between the ring and nifp, so the information is usable in 493 * userspace to reach the ring from the nifp. 494 */ 495 for (i = 0; i < n; i++) { 496 char *base = (char *)nifp; 497 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 498 (char *)na->tx_rings[i].ring - base; 499 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n] = 500 (char *)na->rx_rings[i].ring - base; 501 } 502 return (nifp); 503 } 504 505 /* 506 * Initialize the memory allocator. 507 * 508 * Create the descriptor for the memory , allocate the pool of memory 509 * and initialize the list of memory objects with a single chunk 510 * containing the whole pre-allocated memory marked as free. 511 * 512 * Start with a large size, then halve as needed if we fail to 513 * allocate the block. While halving, always add one extra page 514 * because buffers 0 and 1 are used for special purposes. 515 * Return 0 on success, errno otherwise. 516 */ 517 static int 518 netmap_memory_init(void) 519 { 520 struct netmap_mem_obj *mem_obj; 521 void *buf = NULL; 522 int i, n, sz = NETMAP_MEMORY_SIZE; 523 int extra_sz = 0; // space for rings and two spare buffers 524 525 for (; sz >= 1<<20; sz >>=1) { 526 extra_sz = sz/200; 527 extra_sz = (extra_sz + 2*PAGE_SIZE - 1) & ~(PAGE_SIZE-1); 528 buf = contigmalloc(sz + extra_sz, 529 M_NETMAP, 530 M_WAITOK | M_ZERO, 531 0, /* low address */ 532 -1UL, /* high address */ 533 PAGE_SIZE, /* alignment */ 534 0 /* boundary */ 535 ); 536 if (buf) 537 break; 538 } 539 if (buf == NULL) 540 return (ENOMEM); 541 sz += extra_sz; 542 netmap_mem_d = malloc(sizeof(struct netmap_mem_d), M_NETMAP, 543 M_WAITOK | M_ZERO); 544 mtx_init(&netmap_mem_d->nm_mtx, "netmap memory allocator lock", NULL, 545 MTX_DEF); 546 TAILQ_INIT(&netmap_mem_d->nm_molist); 547 netmap_mem_d->nm_buffer = buf; 548 netmap_mem_d->nm_totalsize = sz; 549 550 /* 551 * A buffer takes 2k, a slot takes 8 bytes + ring overhead, 552 * so the ratio is 200:1. In other words, we can use 1/200 of 553 * the memory for the rings, and the rest for the buffers, 554 * and be sure we never run out. 555 */ 556 netmap_mem_d->nm_size = sz/200; 557 netmap_mem_d->nm_buf_start = 558 (netmap_mem_d->nm_size + PAGE_SIZE - 1) & ~(PAGE_SIZE-1); 559 netmap_mem_d->nm_buf_len = sz - netmap_mem_d->nm_buf_start; 560 561 nm_buf_pool.base = netmap_mem_d->nm_buffer; 562 nm_buf_pool.base += netmap_mem_d->nm_buf_start; 563 netmap_buffer_base = nm_buf_pool.base; 564 D("netmap_buffer_base %p (offset %d)", 565 netmap_buffer_base, (int)netmap_mem_d->nm_buf_start); 566 /* number of buffers, they all start as free */ 567 568 netmap_total_buffers = nm_buf_pool.total_buffers = 569 netmap_mem_d->nm_buf_len / NETMAP_BUF_SIZE; 570 nm_buf_pool.bufsize = NETMAP_BUF_SIZE; 571 572 D("Have %d MB, use %dKB for rings, %d buffers at %p", 573 (sz >> 20), (int)(netmap_mem_d->nm_size >> 10), 574 nm_buf_pool.total_buffers, nm_buf_pool.base); 575 576 /* allocate and initialize the bitmap. Entry 0 is considered 577 * always busy (used as default when there are no buffers left). 578 */ 579 n = (nm_buf_pool.total_buffers + 31) / 32; 580 nm_buf_pool.bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, 581 M_WAITOK | M_ZERO); 582 nm_buf_pool.bitmap[0] = ~3; /* slot 0 and 1 always busy */ 583 for (i = 1; i < n; i++) 584 nm_buf_pool.bitmap[i] = ~0; 585 nm_buf_pool.free = nm_buf_pool.total_buffers - 2; 586 587 mem_obj = malloc(sizeof(struct netmap_mem_obj), M_NETMAP, 588 M_WAITOK | M_ZERO); 589 TAILQ_INSERT_HEAD(&netmap_mem_d->nm_molist, mem_obj, nmo_next); 590 mem_obj->nmo_used = 0; 591 mem_obj->nmo_size = netmap_mem_d->nm_size; 592 mem_obj->nmo_data = netmap_mem_d->nm_buffer; 593 594 return (0); 595 } 596 597 598 /* 599 * Finalize the memory allocator. 600 * 601 * Free all the memory objects contained inside the list, and deallocate 602 * the pool of memory; finally free the memory allocator descriptor. 603 */ 604 static void 605 netmap_memory_fini(void) 606 { 607 struct netmap_mem_obj *mem_obj; 608 609 while (!TAILQ_EMPTY(&netmap_mem_d->nm_molist)) { 610 mem_obj = TAILQ_FIRST(&netmap_mem_d->nm_molist); 611 TAILQ_REMOVE(&netmap_mem_d->nm_molist, mem_obj, nmo_next); 612 if (mem_obj->nmo_used == 1) { 613 printf("netmap: leaked %d bytes at %p\n", 614 (int)mem_obj->nmo_size, 615 mem_obj->nmo_data); 616 } 617 free(mem_obj, M_NETMAP); 618 } 619 contigfree(netmap_mem_d->nm_buffer, netmap_mem_d->nm_totalsize, M_NETMAP); 620 // XXX mutex_destroy(nm_mtx); 621 free(netmap_mem_d, M_NETMAP); 622 } 623 /*------------- end of memory allocator -----------------*/ 624 625 626 /* Structure associated to each thread which registered an interface. */ 627 struct netmap_priv_d { 628 struct netmap_if *np_nifp; /* netmap interface descriptor. */ 629 630 struct ifnet *np_ifp; /* device for which we hold a reference */ 631 int np_ringid; /* from the ioctl */ 632 u_int np_qfirst, np_qlast; /* range of rings to scan */ 633 uint16_t np_txpoll; 634 }; 635 636 637 static struct cdev *netmap_dev; /* /dev/netmap character device. */ 638 639 640 static d_mmap_t netmap_mmap; 641 static d_ioctl_t netmap_ioctl; 642 static d_poll_t netmap_poll; 643 644 #ifdef NETMAP_KEVENT 645 static d_kqfilter_t netmap_kqfilter; 646 #endif 647 648 static struct cdevsw netmap_cdevsw = { 649 .d_version = D_VERSION, 650 .d_name = "netmap", 651 .d_mmap = netmap_mmap, 652 .d_ioctl = netmap_ioctl, 653 .d_poll = netmap_poll, 654 #ifdef NETMAP_KEVENT 655 .d_kqfilter = netmap_kqfilter, 656 #endif 657 }; 658 659 #ifdef NETMAP_KEVENT 660 static int netmap_kqread(struct knote *, long); 661 static int netmap_kqwrite(struct knote *, long); 662 static void netmap_kqdetach(struct knote *); 663 664 static struct filterops netmap_read_filterops = { 665 .f_isfd = 1, 666 .f_attach = NULL, 667 .f_detach = netmap_kqdetach, 668 .f_event = netmap_kqread, 669 }; 670 671 static struct filterops netmap_write_filterops = { 672 .f_isfd = 1, 673 .f_attach = NULL, 674 .f_detach = netmap_kqdetach, 675 .f_event = netmap_kqwrite, 676 }; 677 678 /* 679 * support for the kevent() system call. 680 * 681 * This is the kevent filter, and is executed each time a new event 682 * is triggered on the device. This function execute some operation 683 * depending on the received filter. 684 * 685 * The implementation should test the filters and should implement 686 * filter operations we are interested on (a full list in /sys/event.h). 687 * 688 * On a match we should: 689 * - set kn->kn_fop 690 * - set kn->kn_hook 691 * - call knlist_add() to deliver the event to the application. 692 * 693 * Return 0 if the event should be delivered to the application. 694 */ 695 static int 696 netmap_kqfilter(struct cdev *dev, struct knote *kn) 697 { 698 /* declare variables needed to read/write */ 699 700 switch(kn->kn_filter) { 701 case EVFILT_READ: 702 if (netmap_verbose) 703 D("%s kqfilter: EVFILT_READ" ifp->if_xname); 704 705 /* read operations */ 706 kn->kn_fop = &netmap_read_filterops; 707 break; 708 709 case EVFILT_WRITE: 710 if (netmap_verbose) 711 D("%s kqfilter: EVFILT_WRITE" ifp->if_xname); 712 713 /* write operations */ 714 kn->kn_fop = &netmap_write_filterops; 715 break; 716 717 default: 718 if (netmap_verbose) 719 D("%s kqfilter: invalid filter" ifp->if_xname); 720 return(EINVAL); 721 } 722 723 kn->kn_hook = 0;// 724 knlist_add(&netmap_sc->tun_rsel.si_note, kn, 0); 725 726 return (0); 727 } 728 #endif /* NETMAP_KEVENT */ 729 730 731 /* 732 * File descriptor's private data destructor. 733 * 734 * Call nm_register(ifp,0) to stop netmap mode on the interface and 735 * revert to normal operation. We expect that np_ifp has not gone. 736 */ 737 static void 738 netmap_dtor_locked(void *data) 739 { 740 struct netmap_priv_d *priv = data; 741 struct ifnet *ifp = priv->np_ifp; 742 struct netmap_adapter *na = NA(ifp); 743 struct netmap_if *nifp = priv->np_nifp; 744 745 na->refcount--; 746 if (na->refcount <= 0) { /* last instance */ 747 u_int i; 748 749 D("deleting last netmap instance for %s", ifp->if_xname); 750 /* 751 * there is a race here with *_netmap_task() and 752 * netmap_poll(), which don't run under NETMAP_CORE_LOCK. 753 * na->refcount == 0 && na->ifp->if_capenable & IFCAP_NETMAP 754 * (aka NETMAP_DELETING(na)) are a unique marker that the 755 * device is dying. 756 * Before destroying stuff we sleep a bit, and then complete 757 * the job. NIOCREG should realize the condition and 758 * loop until they can continue; the other routines 759 * should check the condition at entry and quit if 760 * they cannot run. 761 */ 762 na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0); 763 tsleep(na, 0, "NIOCUNREG", 4); 764 na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0); 765 na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */ 766 /* Wake up any sleeping threads. netmap_poll will 767 * then return POLLERR 768 */ 769 for (i = 0; i < na->num_queues + 2; i++) { 770 selwakeuppri(&na->tx_rings[i].si, PI_NET); 771 selwakeuppri(&na->rx_rings[i].si, PI_NET); 772 } 773 /* release all buffers */ 774 NMA_LOCK(); 775 for (i = 0; i < na->num_queues + 1; i++) { 776 int j, lim; 777 struct netmap_ring *ring; 778 779 ND("tx queue %d", i); 780 ring = na->tx_rings[i].ring; 781 lim = na->tx_rings[i].nkr_num_slots; 782 for (j = 0; j < lim; j++) 783 netmap_free_buf(nifp, ring->slot[j].buf_idx); 784 785 ND("rx queue %d", i); 786 ring = na->rx_rings[i].ring; 787 lim = na->rx_rings[i].nkr_num_slots; 788 for (j = 0; j < lim; j++) 789 netmap_free_buf(nifp, ring->slot[j].buf_idx); 790 } 791 NMA_UNLOCK(); 792 netmap_free_rings(na); 793 wakeup(na); 794 } 795 netmap_if_free(nifp); 796 } 797 798 799 static void 800 netmap_dtor(void *data) 801 { 802 struct netmap_priv_d *priv = data; 803 struct ifnet *ifp = priv->np_ifp; 804 struct netmap_adapter *na = NA(ifp); 805 806 na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0); 807 netmap_dtor_locked(data); 808 na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0); 809 810 if_rele(ifp); 811 bzero(priv, sizeof(*priv)); /* XXX for safety */ 812 free(priv, M_DEVBUF); 813 } 814 815 816 /* 817 * mmap(2) support for the "netmap" device. 818 * 819 * Expose all the memory previously allocated by our custom memory 820 * allocator: this way the user has only to issue a single mmap(2), and 821 * can work on all the data structures flawlessly. 822 * 823 * Return 0 on success, -1 otherwise. 824 */ 825 static int 826 #if __FreeBSD_version < 900000 827 netmap_mmap(__unused struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, 828 int nprot) 829 #else 830 netmap_mmap(__unused struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, 831 int nprot, __unused vm_memattr_t *memattr) 832 #endif 833 { 834 if (nprot & PROT_EXEC) 835 return (-1); // XXX -1 or EINVAL ? 836 837 ND("request for offset 0x%x", (uint32_t)offset); 838 *paddr = netmap_ofstophys(offset); 839 840 return (0); 841 } 842 843 844 /* 845 * Handlers for synchronization of the queues from/to the host. 846 * 847 * netmap_sync_to_host() passes packets up. We are called from a 848 * system call in user process context, and the only contention 849 * can be among multiple user threads erroneously calling 850 * this routine concurrently. In principle we should not even 851 * need to lock. 852 */ 853 static void 854 netmap_sync_to_host(struct netmap_adapter *na) 855 { 856 struct netmap_kring *kring = &na->tx_rings[na->num_queues]; 857 struct netmap_ring *ring = kring->ring; 858 struct mbuf *head = NULL, *tail = NULL, *m; 859 u_int k, n, lim = kring->nkr_num_slots - 1; 860 861 k = ring->cur; 862 if (k > lim) { 863 netmap_ring_reinit(kring); 864 return; 865 } 866 // na->nm_lock(na->ifp->if_softc, NETMAP_CORE_LOCK, 0); 867 868 /* Take packets from hwcur to cur and pass them up. 869 * In case of no buffers we give up. At the end of the loop, 870 * the queue is drained in all cases. 871 */ 872 for (n = kring->nr_hwcur; n != k;) { 873 struct netmap_slot *slot = &ring->slot[n]; 874 875 n = (n == lim) ? 0 : n + 1; 876 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE) { 877 D("bad pkt at %d len %d", n, slot->len); 878 continue; 879 } 880 m = m_devget(NMB(slot), slot->len, 0, na->ifp, NULL); 881 882 if (m == NULL) 883 break; 884 if (tail) 885 tail->m_nextpkt = m; 886 else 887 head = m; 888 tail = m; 889 m->m_nextpkt = NULL; 890 } 891 kring->nr_hwcur = k; 892 kring->nr_hwavail = ring->avail = lim; 893 // na->nm_lock(na->ifp->if_softc, NETMAP_CORE_UNLOCK, 0); 894 895 /* send packets up, outside the lock */ 896 while ((m = head) != NULL) { 897 head = head->m_nextpkt; 898 m->m_nextpkt = NULL; 899 m->m_pkthdr.rcvif = na->ifp; 900 if (netmap_verbose & NM_VERB_HOST) 901 D("sending up pkt %p size %d", m, m->m_pkthdr.len); 902 (na->ifp->if_input)(na->ifp, m); 903 } 904 } 905 906 /* 907 * rxsync backend for packets coming from the host stack. 908 * They have been put in the queue by netmap_start() so we 909 * need to protect access to the kring using a lock. 910 * 911 * This routine also does the selrecord if called from the poll handler 912 * (we know because td != NULL). 913 */ 914 static void 915 netmap_sync_from_host(struct netmap_adapter *na, struct thread *td) 916 { 917 struct netmap_kring *kring = &na->rx_rings[na->num_queues]; 918 struct netmap_ring *ring = kring->ring; 919 int error = 1, delta; 920 u_int k = ring->cur, lim = kring->nkr_num_slots; 921 922 na->nm_lock(na->ifp->if_softc, NETMAP_CORE_LOCK, 0); 923 if (k >= lim) /* bad value */ 924 goto done; 925 delta = k - kring->nr_hwcur; 926 if (delta < 0) 927 delta += lim; 928 kring->nr_hwavail -= delta; 929 if (kring->nr_hwavail < 0) /* error */ 930 goto done; 931 kring->nr_hwcur = k; 932 error = 0; 933 k = ring->avail = kring->nr_hwavail; 934 if (k == 0 && td) 935 selrecord(td, &kring->si); 936 if (k && (netmap_verbose & NM_VERB_HOST)) 937 D("%d pkts from stack", k); 938 done: 939 na->nm_lock(na->ifp->if_softc, NETMAP_CORE_UNLOCK, 0); 940 if (error) 941 netmap_ring_reinit(kring); 942 } 943 944 945 /* 946 * get a refcounted reference to an interface. 947 * Return ENXIO if the interface does not exist, EINVAL if netmap 948 * is not supported by the interface. 949 * If successful, hold a reference. 950 */ 951 static int 952 get_ifp(const char *name, struct ifnet **ifp) 953 { 954 *ifp = ifunit_ref(name); 955 if (*ifp == NULL) 956 return (ENXIO); 957 /* can do this if the capability exists and if_pspare[0] 958 * points to the netmap descriptor. 959 */ 960 if ((*ifp)->if_capabilities & IFCAP_NETMAP && NA(*ifp)) 961 return 0; /* valid pointer, we hold the refcount */ 962 if_rele(*ifp); 963 return EINVAL; // not NETMAP capable 964 } 965 966 967 /* 968 * Error routine called when txsync/rxsync detects an error. 969 * Can't do much more than resetting cur = hwcur, avail = hwavail. 970 * Return 1 on reinit. 971 * 972 * This routine is only called by the upper half of the kernel. 973 * It only reads hwcur (which is changed only by the upper half, too) 974 * and hwavail (which may be changed by the lower half, but only on 975 * a tx ring and only to increase it, so any error will be recovered 976 * on the next call). For the above, we don't strictly need to call 977 * it under lock. 978 */ 979 int 980 netmap_ring_reinit(struct netmap_kring *kring) 981 { 982 struct netmap_ring *ring = kring->ring; 983 u_int i, lim = kring->nkr_num_slots - 1; 984 int errors = 0; 985 986 D("called for %s", kring->na->ifp->if_xname); 987 if (ring->cur > lim) 988 errors++; 989 for (i = 0; i <= lim; i++) { 990 u_int idx = ring->slot[i].buf_idx; 991 u_int len = ring->slot[i].len; 992 if (idx < 2 || idx >= netmap_total_buffers) { 993 if (!errors++) 994 D("bad buffer at slot %d idx %d len %d ", i, idx, len); 995 ring->slot[i].buf_idx = 0; 996 ring->slot[i].len = 0; 997 } else if (len > NETMAP_BUF_SIZE) { 998 ring->slot[i].len = 0; 999 if (!errors++) 1000 D("bad len %d at slot %d idx %d", 1001 len, i, idx); 1002 } 1003 } 1004 if (errors) { 1005 int pos = kring - kring->na->tx_rings; 1006 int n = kring->na->num_queues + 2; 1007 1008 D("total %d errors", errors); 1009 errors++; 1010 D("%s %s[%d] reinit, cur %d -> %d avail %d -> %d", 1011 kring->na->ifp->if_xname, 1012 pos < n ? "TX" : "RX", pos < n ? pos : pos - n, 1013 ring->cur, kring->nr_hwcur, 1014 ring->avail, kring->nr_hwavail); 1015 ring->cur = kring->nr_hwcur; 1016 ring->avail = kring->nr_hwavail; 1017 } 1018 return (errors ? 1 : 0); 1019 } 1020 1021 1022 /* 1023 * Set the ring ID. For devices with a single queue, a request 1024 * for all rings is the same as a single ring. 1025 */ 1026 static int 1027 netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid) 1028 { 1029 struct ifnet *ifp = priv->np_ifp; 1030 struct netmap_adapter *na = NA(ifp); 1031 void *adapter = na->ifp->if_softc; /* shorthand */ 1032 u_int i = ringid & NETMAP_RING_MASK; 1033 /* first time we don't lock */ 1034 int need_lock = (priv->np_qfirst != priv->np_qlast); 1035 1036 if ( (ringid & NETMAP_HW_RING) && i >= na->num_queues) { 1037 D("invalid ring id %d", i); 1038 return (EINVAL); 1039 } 1040 if (need_lock) 1041 na->nm_lock(adapter, NETMAP_CORE_LOCK, 0); 1042 priv->np_ringid = ringid; 1043 if (ringid & NETMAP_SW_RING) { 1044 priv->np_qfirst = na->num_queues; 1045 priv->np_qlast = na->num_queues + 1; 1046 } else if (ringid & NETMAP_HW_RING) { 1047 priv->np_qfirst = i; 1048 priv->np_qlast = i + 1; 1049 } else { 1050 priv->np_qfirst = 0; 1051 priv->np_qlast = na->num_queues; 1052 } 1053 priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1; 1054 if (need_lock) 1055 na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0); 1056 if (ringid & NETMAP_SW_RING) 1057 D("ringid %s set to SW RING", ifp->if_xname); 1058 else if (ringid & NETMAP_HW_RING) 1059 D("ringid %s set to HW RING %d", ifp->if_xname, 1060 priv->np_qfirst); 1061 else 1062 D("ringid %s set to all %d HW RINGS", ifp->if_xname, 1063 priv->np_qlast); 1064 return 0; 1065 } 1066 1067 /* 1068 * ioctl(2) support for the "netmap" device. 1069 * 1070 * Following a list of accepted commands: 1071 * - NIOCGINFO 1072 * - SIOCGIFADDR just for convenience 1073 * - NIOCREGIF 1074 * - NIOCUNREGIF 1075 * - NIOCTXSYNC 1076 * - NIOCRXSYNC 1077 * 1078 * Return 0 on success, errno otherwise. 1079 */ 1080 static int 1081 netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data, 1082 __unused int fflag, struct thread *td) 1083 { 1084 struct netmap_priv_d *priv = NULL; 1085 struct ifnet *ifp; 1086 struct nmreq *nmr = (struct nmreq *) data; 1087 struct netmap_adapter *na; 1088 void *adapter; 1089 int error; 1090 u_int i; 1091 struct netmap_if *nifp; 1092 1093 CURVNET_SET(TD_TO_VNET(td)); 1094 1095 error = devfs_get_cdevpriv((void **)&priv); 1096 if (error != ENOENT && error != 0) { 1097 CURVNET_RESTORE(); 1098 return (error); 1099 } 1100 1101 error = 0; /* Could be ENOENT */ 1102 switch (cmd) { 1103 case NIOCGINFO: /* return capabilities etc */ 1104 /* memsize is always valid */ 1105 nmr->nr_memsize = netmap_mem_d->nm_totalsize; 1106 nmr->nr_offset = 0; 1107 nmr->nr_numrings = 0; 1108 nmr->nr_numslots = 0; 1109 if (nmr->nr_name[0] == '\0') /* just get memory info */ 1110 break; 1111 error = get_ifp(nmr->nr_name, &ifp); /* get a refcount */ 1112 if (error) 1113 break; 1114 na = NA(ifp); /* retrieve netmap_adapter */ 1115 nmr->nr_numrings = na->num_queues; 1116 nmr->nr_numslots = na->num_tx_desc; 1117 if_rele(ifp); /* return the refcount */ 1118 break; 1119 1120 case NIOCREGIF: 1121 if (priv != NULL) { /* thread already registered */ 1122 error = netmap_set_ringid(priv, nmr->nr_ringid); 1123 break; 1124 } 1125 /* find the interface and a reference */ 1126 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */ 1127 if (error) 1128 break; 1129 na = NA(ifp); /* retrieve netmap adapter */ 1130 adapter = na->ifp->if_softc; /* shorthand */ 1131 /* 1132 * Allocate the private per-thread structure. 1133 * XXX perhaps we can use a blocking malloc ? 1134 */ 1135 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF, 1136 M_NOWAIT | M_ZERO); 1137 if (priv == NULL) { 1138 error = ENOMEM; 1139 if_rele(ifp); /* return the refcount */ 1140 break; 1141 } 1142 1143 for (i = 10; i > 0; i--) { 1144 na->nm_lock(adapter, NETMAP_CORE_LOCK, 0); 1145 if (!NETMAP_DELETING(na)) 1146 break; 1147 na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0); 1148 tsleep(na, 0, "NIOCREGIF", hz/10); 1149 } 1150 if (i == 0) { 1151 D("too many NIOCREGIF attempts, give up"); 1152 error = EINVAL; 1153 free(priv, M_DEVBUF); 1154 if_rele(ifp); /* return the refcount */ 1155 break; 1156 } 1157 1158 priv->np_ifp = ifp; /* store the reference */ 1159 error = netmap_set_ringid(priv, nmr->nr_ringid); 1160 if (error) 1161 goto error; 1162 priv->np_nifp = nifp = netmap_if_new(nmr->nr_name, na); 1163 if (nifp == NULL) { /* allocation failed */ 1164 error = ENOMEM; 1165 } else if (ifp->if_capenable & IFCAP_NETMAP) { 1166 /* was already set */ 1167 } else { 1168 /* Otherwise set the card in netmap mode 1169 * and make it use the shared buffers. 1170 */ 1171 error = na->nm_register(ifp, 1); /* mode on */ 1172 if (error) 1173 netmap_dtor_locked(priv); 1174 } 1175 1176 if (error) { /* reg. failed, release priv and ref */ 1177 error: 1178 na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0); 1179 if_rele(ifp); /* return the refcount */ 1180 bzero(priv, sizeof(*priv)); 1181 free(priv, M_DEVBUF); 1182 break; 1183 } 1184 1185 na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0); 1186 error = devfs_set_cdevpriv(priv, netmap_dtor); 1187 1188 if (error != 0) { 1189 /* could not assign the private storage for the 1190 * thread, call the destructor explicitly. 1191 */ 1192 netmap_dtor(priv); 1193 break; 1194 } 1195 1196 /* return the offset of the netmap_if object */ 1197 nmr->nr_numrings = na->num_queues; 1198 nmr->nr_numslots = na->num_tx_desc; 1199 nmr->nr_memsize = netmap_mem_d->nm_totalsize; 1200 nmr->nr_offset = netmap_if_offset(nifp); 1201 break; 1202 1203 case NIOCUNREGIF: 1204 if (priv == NULL) { 1205 error = ENXIO; 1206 break; 1207 } 1208 1209 /* the interface is unregistered inside the 1210 destructor of the private data. */ 1211 devfs_clear_cdevpriv(); 1212 break; 1213 1214 case NIOCTXSYNC: 1215 case NIOCRXSYNC: 1216 if (priv == NULL) { 1217 error = ENXIO; 1218 break; 1219 } 1220 ifp = priv->np_ifp; /* we have a reference */ 1221 na = NA(ifp); /* retrieve netmap adapter */ 1222 adapter = ifp->if_softc; /* shorthand */ 1223 1224 if (priv->np_qfirst == na->num_queues) { 1225 /* queues to/from host */ 1226 if (cmd == NIOCTXSYNC) 1227 netmap_sync_to_host(na); 1228 else 1229 netmap_sync_from_host(na, NULL); 1230 break; 1231 } 1232 1233 for (i = priv->np_qfirst; i < priv->np_qlast; i++) { 1234 if (cmd == NIOCTXSYNC) { 1235 struct netmap_kring *kring = &na->tx_rings[i]; 1236 if (netmap_verbose & NM_VERB_TXSYNC) 1237 D("sync tx ring %d cur %d hwcur %d", 1238 i, kring->ring->cur, 1239 kring->nr_hwcur); 1240 na->nm_txsync(adapter, i, 1 /* do lock */); 1241 if (netmap_verbose & NM_VERB_TXSYNC) 1242 D("after sync tx ring %d cur %d hwcur %d", 1243 i, kring->ring->cur, 1244 kring->nr_hwcur); 1245 } else { 1246 na->nm_rxsync(adapter, i, 1 /* do lock */); 1247 microtime(&na->rx_rings[i].ring->ts); 1248 } 1249 } 1250 1251 break; 1252 1253 case BIOCIMMEDIATE: 1254 case BIOCGHDRCMPLT: 1255 case BIOCSHDRCMPLT: 1256 case BIOCSSEESENT: 1257 D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT"); 1258 break; 1259 1260 default: 1261 { 1262 /* 1263 * allow device calls 1264 */ 1265 struct socket so; 1266 bzero(&so, sizeof(so)); 1267 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */ 1268 if (error) 1269 break; 1270 so.so_vnet = ifp->if_vnet; 1271 // so->so_proto not null. 1272 error = ifioctl(&so, cmd, data, td); 1273 if_rele(ifp); 1274 } 1275 } 1276 1277 CURVNET_RESTORE(); 1278 return (error); 1279 } 1280 1281 1282 /* 1283 * select(2) and poll(2) handlers for the "netmap" device. 1284 * 1285 * Can be called for one or more queues. 1286 * Return true the event mask corresponding to ready events. 1287 * If there are no ready events, do a selrecord on either individual 1288 * selfd or on the global one. 1289 * Device-dependent parts (locking and sync of tx/rx rings) 1290 * are done through callbacks. 1291 */ 1292 static int 1293 netmap_poll(__unused struct cdev *dev, int events, struct thread *td) 1294 { 1295 struct netmap_priv_d *priv = NULL; 1296 struct netmap_adapter *na; 1297 struct ifnet *ifp; 1298 struct netmap_kring *kring; 1299 u_int core_lock, i, check_all, want_tx, want_rx, revents = 0; 1300 void *adapter; 1301 enum {NO_CL, NEED_CL, LOCKED_CL }; /* see below */ 1302 1303 if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL) 1304 return POLLERR; 1305 1306 ifp = priv->np_ifp; 1307 // XXX check for deleting() ? 1308 if ( (ifp->if_capenable & IFCAP_NETMAP) == 0) 1309 return POLLERR; 1310 1311 if (netmap_verbose & 0x8000) 1312 D("device %s events 0x%x", ifp->if_xname, events); 1313 want_tx = events & (POLLOUT | POLLWRNORM); 1314 want_rx = events & (POLLIN | POLLRDNORM); 1315 1316 adapter = ifp->if_softc; 1317 na = NA(ifp); /* retrieve netmap adapter */ 1318 1319 /* how many queues we are scanning */ 1320 i = priv->np_qfirst; 1321 if (i == na->num_queues) { /* from/to host */ 1322 if (priv->np_txpoll || want_tx) { 1323 /* push any packets up, then we are always ready */ 1324 kring = &na->tx_rings[i]; 1325 netmap_sync_to_host(na); 1326 revents |= want_tx; 1327 } 1328 if (want_rx) { 1329 kring = &na->rx_rings[i]; 1330 if (kring->ring->avail == 0) 1331 netmap_sync_from_host(na, td); 1332 if (kring->ring->avail > 0) { 1333 revents |= want_rx; 1334 } 1335 } 1336 return (revents); 1337 } 1338 1339 /* 1340 * check_all is set if the card has more than one queue and 1341 * the client is polling all of them. If true, we sleep on 1342 * the "global" selfd, otherwise we sleep on individual selfd 1343 * (we can only sleep on one of them per direction). 1344 * The interrupt routine in the driver should always wake on 1345 * the individual selfd, and also on the global one if the card 1346 * has more than one ring. 1347 * 1348 * If the card has only one lock, we just use that. 1349 * If the card has separate ring locks, we just use those 1350 * unless we are doing check_all, in which case the whole 1351 * loop is wrapped by the global lock. 1352 * We acquire locks only when necessary: if poll is called 1353 * when buffers are available, we can just return without locks. 1354 * 1355 * rxsync() is only called if we run out of buffers on a POLLIN. 1356 * txsync() is called if we run out of buffers on POLLOUT, or 1357 * there are pending packets to send. The latter can be disabled 1358 * passing NETMAP_NO_TX_POLL in the NIOCREG call. 1359 */ 1360 check_all = (i + 1 != priv->np_qlast); 1361 1362 /* 1363 * core_lock indicates what to do with the core lock. 1364 * The core lock is used when either the card has no individual 1365 * locks, or it has individual locks but we are cheking all 1366 * rings so we need the core lock to avoid missing wakeup events. 1367 * 1368 * It has three possible states: 1369 * NO_CL we don't need to use the core lock, e.g. 1370 * because we are protected by individual locks. 1371 * NEED_CL we need the core lock. In this case, when we 1372 * call the lock routine, move to LOCKED_CL 1373 * to remember to release the lock once done. 1374 * LOCKED_CL core lock is set, so we need to release it. 1375 */ 1376 core_lock = (check_all || !na->separate_locks) ? NEED_CL : NO_CL; 1377 /* 1378 * We start with a lock free round which is good if we have 1379 * data available. If this fails, then lock and call the sync 1380 * routines. 1381 */ 1382 for (i = priv->np_qfirst; want_rx && i < priv->np_qlast; i++) { 1383 kring = &na->rx_rings[i]; 1384 if (kring->ring->avail > 0) { 1385 revents |= want_rx; 1386 want_rx = 0; /* also breaks the loop */ 1387 } 1388 } 1389 for (i = priv->np_qfirst; want_tx && i < priv->np_qlast; i++) { 1390 kring = &na->tx_rings[i]; 1391 if (kring->ring->avail > 0) { 1392 revents |= want_tx; 1393 want_tx = 0; /* also breaks the loop */ 1394 } 1395 } 1396 1397 /* 1398 * If we to push packets out (priv->np_txpoll) or want_tx is 1399 * still set, we do need to run the txsync calls (on all rings, 1400 * to avoid that the tx rings stall). 1401 */ 1402 if (priv->np_txpoll || want_tx) { 1403 for (i = priv->np_qfirst; i < priv->np_qlast; i++) { 1404 kring = &na->tx_rings[i]; 1405 /* 1406 * Skip the current ring if want_tx == 0 1407 * (we have already done a successful sync on 1408 * a previous ring) AND kring->cur == kring->hwcur 1409 * (there are no pending transmissions for this ring). 1410 */ 1411 if (!want_tx && kring->ring->cur == kring->nr_hwcur) 1412 continue; 1413 if (core_lock == NEED_CL) { 1414 na->nm_lock(adapter, NETMAP_CORE_LOCK, 0); 1415 core_lock = LOCKED_CL; 1416 } 1417 if (na->separate_locks) 1418 na->nm_lock(adapter, NETMAP_TX_LOCK, i); 1419 if (netmap_verbose & NM_VERB_TXSYNC) 1420 D("send %d on %s %d", 1421 kring->ring->cur, 1422 ifp->if_xname, i); 1423 if (na->nm_txsync(adapter, i, 0 /* no lock */)) 1424 revents |= POLLERR; 1425 1426 /* Check avail/call selrecord only if called with POLLOUT */ 1427 if (want_tx) { 1428 if (kring->ring->avail > 0) { 1429 /* stop at the first ring. We don't risk 1430 * starvation. 1431 */ 1432 revents |= want_tx; 1433 want_tx = 0; 1434 } else if (!check_all) 1435 selrecord(td, &kring->si); 1436 } 1437 if (na->separate_locks) 1438 na->nm_lock(adapter, NETMAP_TX_UNLOCK, i); 1439 } 1440 } 1441 1442 /* 1443 * now if want_rx is still set we need to lock and rxsync. 1444 * Do it on all rings because otherwise we starve. 1445 */ 1446 if (want_rx) { 1447 for (i = priv->np_qfirst; i < priv->np_qlast; i++) { 1448 kring = &na->rx_rings[i]; 1449 if (core_lock == NEED_CL) { 1450 na->nm_lock(adapter, NETMAP_CORE_LOCK, 0); 1451 core_lock = LOCKED_CL; 1452 } 1453 if (na->separate_locks) 1454 na->nm_lock(adapter, NETMAP_RX_LOCK, i); 1455 1456 if (na->nm_rxsync(adapter, i, 0 /* no lock */)) 1457 revents |= POLLERR; 1458 if (netmap_no_timestamp == 0 || 1459 kring->ring->flags & NR_TIMESTAMP) { 1460 microtime(&kring->ring->ts); 1461 } 1462 1463 if (kring->ring->avail > 0) 1464 revents |= want_rx; 1465 else if (!check_all) 1466 selrecord(td, &kring->si); 1467 if (na->separate_locks) 1468 na->nm_lock(adapter, NETMAP_RX_UNLOCK, i); 1469 } 1470 } 1471 if (check_all && revents == 0) { 1472 i = na->num_queues + 1; /* the global queue */ 1473 if (want_tx) 1474 selrecord(td, &na->tx_rings[i].si); 1475 if (want_rx) 1476 selrecord(td, &na->rx_rings[i].si); 1477 } 1478 if (core_lock == LOCKED_CL) 1479 na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0); 1480 1481 return (revents); 1482 } 1483 1484 /*------- driver support routines ------*/ 1485 1486 /* 1487 * Initialize a ``netmap_adapter`` object created by driver on attach. 1488 * We allocate a block of memory with room for a struct netmap_adapter 1489 * plus two sets of N+2 struct netmap_kring (where N is the number 1490 * of hardware rings): 1491 * krings 0..N-1 are for the hardware queues. 1492 * kring N is for the host stack queue 1493 * kring N+1 is only used for the selinfo for all queues. 1494 * Return 0 on success, ENOMEM otherwise. 1495 */ 1496 int 1497 netmap_attach(struct netmap_adapter *na, int num_queues) 1498 { 1499 int n = num_queues + 2; 1500 int size = sizeof(*na) + 2 * n * sizeof(struct netmap_kring); 1501 void *buf; 1502 struct ifnet *ifp = na->ifp; 1503 1504 if (ifp == NULL) { 1505 D("ifp not set, giving up"); 1506 return EINVAL; 1507 } 1508 na->refcount = 0; 1509 na->num_queues = num_queues; 1510 1511 buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 1512 if (buf) { 1513 WNA(ifp) = buf; 1514 na->tx_rings = (void *)((char *)buf + sizeof(*na)); 1515 na->rx_rings = na->tx_rings + n; 1516 na->buff_size = NETMAP_BUF_SIZE; 1517 bcopy(na, buf, sizeof(*na)); 1518 ifp->if_capabilities |= IFCAP_NETMAP; 1519 } 1520 D("%s for %s", buf ? "ok" : "failed", ifp->if_xname); 1521 1522 return (buf ? 0 : ENOMEM); 1523 } 1524 1525 1526 /* 1527 * Free the allocated memory linked to the given ``netmap_adapter`` 1528 * object. 1529 */ 1530 void 1531 netmap_detach(struct ifnet *ifp) 1532 { 1533 u_int i; 1534 struct netmap_adapter *na = NA(ifp); 1535 1536 if (!na) 1537 return; 1538 1539 for (i = 0; i < na->num_queues + 2; i++) { 1540 knlist_destroy(&na->tx_rings[i].si.si_note); 1541 knlist_destroy(&na->rx_rings[i].si.si_note); 1542 } 1543 bzero(na, sizeof(*na)); 1544 WNA(ifp) = NULL; 1545 free(na, M_DEVBUF); 1546 } 1547 1548 1549 /* 1550 * Intercept packets from the network stack and pass them 1551 * to netmap as incoming packets on the 'software' ring. 1552 * We are not locked when called. 1553 */ 1554 int 1555 netmap_start(struct ifnet *ifp, struct mbuf *m) 1556 { 1557 struct netmap_adapter *na = NA(ifp); 1558 struct netmap_kring *kring = &na->rx_rings[na->num_queues]; 1559 u_int i, len = m->m_pkthdr.len; 1560 int error = EBUSY, lim = kring->nkr_num_slots - 1; 1561 struct netmap_slot *slot; 1562 1563 if (netmap_verbose & NM_VERB_HOST) 1564 D("%s packet %d len %d from the stack", ifp->if_xname, 1565 kring->nr_hwcur + kring->nr_hwavail, len); 1566 na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0); 1567 if (kring->nr_hwavail >= lim) { 1568 D("stack ring %s full\n", ifp->if_xname); 1569 goto done; /* no space */ 1570 } 1571 if (len > na->buff_size) { 1572 D("drop packet size %d > %d", len, na->buff_size); 1573 goto done; /* too long for us */ 1574 } 1575 1576 /* compute the insert position */ 1577 i = kring->nr_hwcur + kring->nr_hwavail; 1578 if (i > lim) 1579 i -= lim + 1; 1580 slot = &kring->ring->slot[i]; 1581 m_copydata(m, 0, len, NMB(slot)); 1582 slot->len = len; 1583 kring->nr_hwavail++; 1584 if (netmap_verbose & NM_VERB_HOST) 1585 D("wake up host ring %s %d", na->ifp->if_xname, na->num_queues); 1586 selwakeuppri(&kring->si, PI_NET); 1587 error = 0; 1588 done: 1589 na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0); 1590 1591 /* release the mbuf in either cases of success or failure. As an 1592 * alternative, put the mbuf in a free list and free the list 1593 * only when really necessary. 1594 */ 1595 m_freem(m); 1596 1597 return (error); 1598 } 1599 1600 1601 /* 1602 * netmap_reset() is called by the driver routines when reinitializing 1603 * a ring. The driver is in charge of locking to protect the kring. 1604 * If netmap mode is not set just return NULL. 1605 */ 1606 struct netmap_slot * 1607 netmap_reset(struct netmap_adapter *na, enum txrx tx, int n, 1608 u_int new_cur) 1609 { 1610 struct netmap_kring *kring; 1611 struct netmap_ring *ring; 1612 int new_hwofs, lim; 1613 1614 if (na == NULL) 1615 return NULL; /* no netmap support here */ 1616 if (!(na->ifp->if_capenable & IFCAP_NETMAP)) 1617 return NULL; /* nothing to reinitialize */ 1618 kring = tx == NR_TX ? na->tx_rings + n : na->rx_rings + n; 1619 ring = kring->ring; 1620 lim = kring->nkr_num_slots - 1; 1621 1622 if (tx == NR_TX) 1623 new_hwofs = kring->nr_hwcur - new_cur; 1624 else 1625 new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur; 1626 if (new_hwofs > lim) 1627 new_hwofs -= lim + 1; 1628 1629 /* Alwayws set the new offset value and realign the ring. */ 1630 kring->nkr_hwofs = new_hwofs; 1631 if (tx == NR_TX) 1632 kring->nr_hwavail = kring->nkr_num_slots - 1; 1633 D("new hwofs %d on %s %s[%d]", 1634 kring->nkr_hwofs, na->ifp->if_xname, 1635 tx == NR_TX ? "TX" : "RX", n); 1636 1637 /* 1638 * We do the wakeup here, but the ring is not yet reconfigured. 1639 * However, we are under lock so there are no races. 1640 */ 1641 selwakeuppri(&kring->si, PI_NET); 1642 selwakeuppri(&kring[na->num_queues + 1 - n].si, PI_NET); 1643 return kring->ring->slot; 1644 } 1645 1646 1647 /* 1648 * Module loader. 1649 * 1650 * Create the /dev/netmap device and initialize all global 1651 * variables. 1652 * 1653 * Return 0 on success, errno on failure. 1654 */ 1655 static int 1656 netmap_init(void) 1657 { 1658 int error; 1659 1660 1661 error = netmap_memory_init(); 1662 if (error != 0) { 1663 printf("netmap: unable to initialize the memory allocator."); 1664 return (error); 1665 } 1666 printf("netmap: loaded module with %d Mbytes\n", 1667 (int)(netmap_mem_d->nm_totalsize >> 20)); 1668 1669 netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660, 1670 "netmap"); 1671 1672 return (0); 1673 } 1674 1675 1676 /* 1677 * Module unloader. 1678 * 1679 * Free all the memory, and destroy the ``/dev/netmap`` device. 1680 */ 1681 static void 1682 netmap_fini(void) 1683 { 1684 destroy_dev(netmap_dev); 1685 1686 netmap_memory_fini(); 1687 1688 printf("netmap: unloaded module.\n"); 1689 } 1690 1691 1692 /* 1693 * Kernel entry point. 1694 * 1695 * Initialize/finalize the module and return. 1696 * 1697 * Return 0 on success, errno on failure. 1698 */ 1699 static int 1700 netmap_loader(__unused struct module *module, int event, __unused void *arg) 1701 { 1702 int error = 0; 1703 1704 switch (event) { 1705 case MOD_LOAD: 1706 error = netmap_init(); 1707 break; 1708 1709 case MOD_UNLOAD: 1710 netmap_fini(); 1711 break; 1712 1713 default: 1714 error = EOPNOTSUPP; 1715 break; 1716 } 1717 1718 return (error); 1719 } 1720 1721 1722 DEV_MODULE(netmap, netmap_loader, NULL); 1723