1ccdc3305SLuigi Rizzo /* 2ccdc3305SLuigi Rizzo * Copyright (C) 2012 Matteo Landi, Luigi Rizzo. All rights reserved. 3ccdc3305SLuigi Rizzo * 4ccdc3305SLuigi Rizzo * Redistribution and use in source and binary forms, with or without 5ccdc3305SLuigi Rizzo * modification, are permitted provided that the following conditions 6ccdc3305SLuigi Rizzo * are met: 7ccdc3305SLuigi Rizzo * 1. Redistributions of source code must retain the above copyright 8ccdc3305SLuigi Rizzo * notice, this list of conditions and the following disclaimer. 9ccdc3305SLuigi Rizzo * 2. Redistributions in binary form must reproduce the above copyright 10ccdc3305SLuigi Rizzo * notice, this list of conditions and the following disclaimer in the 11ccdc3305SLuigi Rizzo * documentation and/or other materials provided with the distribution. 12ccdc3305SLuigi Rizzo * 13ccdc3305SLuigi Rizzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14ccdc3305SLuigi Rizzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15ccdc3305SLuigi Rizzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16ccdc3305SLuigi Rizzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17ccdc3305SLuigi Rizzo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18ccdc3305SLuigi Rizzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19ccdc3305SLuigi Rizzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20ccdc3305SLuigi Rizzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21ccdc3305SLuigi Rizzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22ccdc3305SLuigi Rizzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23ccdc3305SLuigi Rizzo * SUCH DAMAGE. 24ccdc3305SLuigi Rizzo */ 25ccdc3305SLuigi Rizzo 26ccdc3305SLuigi Rizzo /* 27ccdc3305SLuigi Rizzo * $FreeBSD$ 28ccdc3305SLuigi Rizzo * $Id: netmap_mem2.c 10830 2012-03-22 18:06:01Z luigi $ 29ccdc3305SLuigi Rizzo * 30ccdc3305SLuigi Rizzo * New memory allocator for netmap 31ccdc3305SLuigi Rizzo */ 32ccdc3305SLuigi Rizzo 33ccdc3305SLuigi Rizzo /* 34ccdc3305SLuigi Rizzo * The new version allocates three regions: 35ccdc3305SLuigi Rizzo * nm_if_pool for the struct netmap_if 36ccdc3305SLuigi Rizzo * nm_ring_pool for the struct netmap_ring 37ccdc3305SLuigi Rizzo * nm_buf_pool for the packet buffers. 38ccdc3305SLuigi Rizzo * 39ccdc3305SLuigi Rizzo * All regions need to be page-sized as we export them to 40ccdc3305SLuigi Rizzo * userspace through mmap. Only the latter need to be dma-able, 41ccdc3305SLuigi Rizzo * but for convenience use the same type of allocator for all. 42ccdc3305SLuigi Rizzo * 43ccdc3305SLuigi Rizzo * Once mapped, the three regions are exported to userspace 44ccdc3305SLuigi Rizzo * as a contiguous block, starting from nm_if_pool. Each 45ccdc3305SLuigi Rizzo * cluster (and pool) is an integral number of pages. 46ccdc3305SLuigi Rizzo * [ . . . ][ . . . . . .][ . . . . . . . . . .] 47ccdc3305SLuigi Rizzo * nm_if nm_ring nm_buf 48ccdc3305SLuigi Rizzo * 49ccdc3305SLuigi Rizzo * The userspace areas contain offsets of the objects in userspace. 50ccdc3305SLuigi Rizzo * When (at init time) we write these offsets, we find out the index 51ccdc3305SLuigi Rizzo * of the object, and from there locate the offset from the beginning 52ccdc3305SLuigi Rizzo * of the region. 53ccdc3305SLuigi Rizzo * 54ccdc3305SLuigi Rizzo * Allocator for a pool of memory objects of the same size. 55ccdc3305SLuigi Rizzo * The pool is split into smaller clusters, whose size is a 56ccdc3305SLuigi Rizzo * multiple of the page size. The cluster size is chosen 57ccdc3305SLuigi Rizzo * to minimize the waste for a given max cluster size 58ccdc3305SLuigi Rizzo * (we do it by brute force, as we have relatively few object 59ccdc3305SLuigi Rizzo * per cluster). 60ccdc3305SLuigi Rizzo * 61ccdc3305SLuigi Rizzo * To be polite with the cache, objects are aligned to 62ccdc3305SLuigi Rizzo * the cache line, or 64 bytes. Sizes are rounded to multiple of 64. 63ccdc3305SLuigi Rizzo * For each object we have 64ccdc3305SLuigi Rizzo * one entry in the bitmap to signal the state. Allocation scans 65ccdc3305SLuigi Rizzo * the bitmap, but since this is done only on attach, we are not 66ccdc3305SLuigi Rizzo * too worried about performance 67ccdc3305SLuigi Rizzo */ 68ccdc3305SLuigi Rizzo 69ccdc3305SLuigi Rizzo /* 70ccdc3305SLuigi Rizzo * MEMORY SIZES: 71ccdc3305SLuigi Rizzo * 72ccdc3305SLuigi Rizzo * (all the parameters below will become tunables) 73ccdc3305SLuigi Rizzo * 74ccdc3305SLuigi Rizzo * struct netmap_if is variable size but small. 75ccdc3305SLuigi Rizzo * Assuming each NIC has 8+2 rings, (4+1 tx, 4+1 rx) the netmap_if 76ccdc3305SLuigi Rizzo * uses 120 bytes on a 64-bit machine. 77ccdc3305SLuigi Rizzo * We allocate NETMAP_IF_MAX_SIZE (1024) which should work even for 78ccdc3305SLuigi Rizzo * cards with 48 ring pairs. 79ccdc3305SLuigi Rizzo * The total number of 'struct netmap_if' could be slightly larger 80ccdc3305SLuigi Rizzo * that the total number of rings on all interfaces on the system. 81ccdc3305SLuigi Rizzo */ 82ccdc3305SLuigi Rizzo #define NETMAP_IF_MAX_SIZE 1024 83ccdc3305SLuigi Rizzo #define NETMAP_IF_MAX_NUM 512 84ccdc3305SLuigi Rizzo 85ccdc3305SLuigi Rizzo /* 86ccdc3305SLuigi Rizzo * netmap rings are up to 2..4k descriptors, 8 bytes each, 87ccdc3305SLuigi Rizzo * plus some glue at the beginning (32 bytes). 88ccdc3305SLuigi Rizzo * We set the default ring size to 9 pages (36K) and enable 89ccdc3305SLuigi Rizzo * a few hundreds of them. 90ccdc3305SLuigi Rizzo */ 91ccdc3305SLuigi Rizzo #define NETMAP_RING_MAX_SIZE (9*PAGE_SIZE) 92ccdc3305SLuigi Rizzo #define NETMAP_RING_MAX_NUM 200 /* approx 8MB */ 93ccdc3305SLuigi Rizzo 94ccdc3305SLuigi Rizzo /* 95ccdc3305SLuigi Rizzo * Buffers: the more the better. Buffer size is NETMAP_BUF_SIZE, 96ccdc3305SLuigi Rizzo * 2k or slightly less, aligned to 64 bytes. 97ccdc3305SLuigi Rizzo * A large 10G interface can have 2k*18 = 36k buffers per interface, 98ccdc3305SLuigi Rizzo * or about 72MB of memory. Up to us to use more. 99ccdc3305SLuigi Rizzo */ 100ccdc3305SLuigi Rizzo #ifndef CONSERVATIVE 101ccdc3305SLuigi Rizzo #define NETMAP_BUF_MAX_NUM 100000 /* 200MB */ 102ccdc3305SLuigi Rizzo #else /* CONSERVATIVE */ 103ccdc3305SLuigi Rizzo #define NETMAP_BUF_MAX_NUM 20000 /* 40MB */ 104ccdc3305SLuigi Rizzo #endif 105ccdc3305SLuigi Rizzo 106ccdc3305SLuigi Rizzo 107ccdc3305SLuigi Rizzo struct netmap_obj_pool { 108ccdc3305SLuigi Rizzo char name[16]; /* name of the allocator */ 109ccdc3305SLuigi Rizzo u_int objtotal; /* actual total number of objects. */ 110ccdc3305SLuigi Rizzo u_int objfree; /* number of free objects. */ 111ccdc3305SLuigi Rizzo u_int clustentries; /* actual objects per cluster */ 112ccdc3305SLuigi Rizzo 113ccdc3305SLuigi Rizzo /* the total memory space is _numclusters*_clustsize */ 114ccdc3305SLuigi Rizzo u_int _numclusters; /* how many clusters */ 115ccdc3305SLuigi Rizzo u_int _clustsize; /* cluster size */ 116ccdc3305SLuigi Rizzo u_int _objsize; /* actual object size */ 117ccdc3305SLuigi Rizzo 118ccdc3305SLuigi Rizzo u_int _memtotal; /* _numclusters*_clustsize */ 119ccdc3305SLuigi Rizzo struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 120ccdc3305SLuigi Rizzo uint32_t *bitmap; /* one bit per buffer, 1 means free */ 121ccdc3305SLuigi Rizzo }; 122ccdc3305SLuigi Rizzo 123ccdc3305SLuigi Rizzo struct netmap_mem_d { 124ccdc3305SLuigi Rizzo NM_LOCK_T nm_mtx; /* protect the allocator ? */ 125ccdc3305SLuigi Rizzo u_int nm_totalsize; /* shorthand */ 126ccdc3305SLuigi Rizzo 127ccdc3305SLuigi Rizzo /* pointers to the three allocators */ 128ccdc3305SLuigi Rizzo struct netmap_obj_pool *nm_if_pool; 129ccdc3305SLuigi Rizzo struct netmap_obj_pool *nm_ring_pool; 130ccdc3305SLuigi Rizzo struct netmap_obj_pool *nm_buf_pool; 131ccdc3305SLuigi Rizzo }; 132ccdc3305SLuigi Rizzo 133ccdc3305SLuigi Rizzo struct lut_entry *netmap_buffer_lut; /* exported */ 134ccdc3305SLuigi Rizzo 135ccdc3305SLuigi Rizzo 136ccdc3305SLuigi Rizzo /* 137ccdc3305SLuigi Rizzo * Convert a userspace offset to a phisical address. 138ccdc3305SLuigi Rizzo * XXX re-do in a simpler way. 139ccdc3305SLuigi Rizzo * 140ccdc3305SLuigi Rizzo * The idea here is to hide userspace applications the fact that pre-allocated 141ccdc3305SLuigi Rizzo * memory is not contiguous, but fragmented across different clusters and 142ccdc3305SLuigi Rizzo * smaller memory allocators. Consequently, first of all we need to find which 143ccdc3305SLuigi Rizzo * allocator is owning provided offset, then we need to find out the physical 144ccdc3305SLuigi Rizzo * address associated to target page (this is done using the look-up table. 145ccdc3305SLuigi Rizzo */ 146ccdc3305SLuigi Rizzo static inline vm_paddr_t 147ccdc3305SLuigi Rizzo netmap_ofstophys(vm_offset_t offset) 148ccdc3305SLuigi Rizzo { 149ccdc3305SLuigi Rizzo const struct netmap_obj_pool *p[] = { 150ccdc3305SLuigi Rizzo nm_mem->nm_if_pool, 151ccdc3305SLuigi Rizzo nm_mem->nm_ring_pool, 152ccdc3305SLuigi Rizzo nm_mem->nm_buf_pool }; 153ccdc3305SLuigi Rizzo int i; 154ccdc3305SLuigi Rizzo vm_offset_t o = offset; 155ccdc3305SLuigi Rizzo 156ccdc3305SLuigi Rizzo 157ccdc3305SLuigi Rizzo for (i = 0; i < 3; offset -= p[i]->_memtotal, i++) { 158ccdc3305SLuigi Rizzo if (offset >= p[i]->_memtotal) 159ccdc3305SLuigi Rizzo continue; 160ccdc3305SLuigi Rizzo // XXX now scan the clusters 161ccdc3305SLuigi Rizzo return p[i]->lut[offset / p[i]->_objsize].paddr + 162ccdc3305SLuigi Rizzo offset % p[i]->_objsize; 163ccdc3305SLuigi Rizzo } 164ccdc3305SLuigi Rizzo D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", o, 165*ce2cb792SLuigi Rizzo (u_int)p[0]->_memtotal, p[0]->_memtotal + p[1]->_memtotal, 166ccdc3305SLuigi Rizzo p[0]->_memtotal + p[1]->_memtotal + p[2]->_memtotal); 167ccdc3305SLuigi Rizzo return 0; // XXX bad address 168ccdc3305SLuigi Rizzo } 169ccdc3305SLuigi Rizzo 170ccdc3305SLuigi Rizzo /* 171ccdc3305SLuigi Rizzo * we store objects by kernel address, need to find the offset 172ccdc3305SLuigi Rizzo * within the pool to export the value to userspace. 173ccdc3305SLuigi Rizzo * Algorithm: scan until we find the cluster, then add the 174ccdc3305SLuigi Rizzo * actual offset in the cluster 175ccdc3305SLuigi Rizzo */ 176*ce2cb792SLuigi Rizzo static ssize_t 177ccdc3305SLuigi Rizzo netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 178ccdc3305SLuigi Rizzo { 179ccdc3305SLuigi Rizzo int i, k = p->clustentries, n = p->objtotal; 180ccdc3305SLuigi Rizzo ssize_t ofs = 0; 181ccdc3305SLuigi Rizzo 182ccdc3305SLuigi Rizzo for (i = 0; i < n; i += k, ofs += p->_clustsize) { 183ccdc3305SLuigi Rizzo const char *base = p->lut[i].vaddr; 184ccdc3305SLuigi Rizzo ssize_t relofs = (const char *) vaddr - base; 185ccdc3305SLuigi Rizzo 186ccdc3305SLuigi Rizzo if (relofs < 0 || relofs > p->_clustsize) 187ccdc3305SLuigi Rizzo continue; 188ccdc3305SLuigi Rizzo 189ccdc3305SLuigi Rizzo ofs = ofs + relofs; 190ccdc3305SLuigi Rizzo ND("%s: return offset %d (cluster %d) for pointer %p", 191ccdc3305SLuigi Rizzo p->name, ofs, i, vaddr); 192ccdc3305SLuigi Rizzo return ofs; 193ccdc3305SLuigi Rizzo } 194ccdc3305SLuigi Rizzo D("address %p is not contained inside any cluster (%s)", 195ccdc3305SLuigi Rizzo vaddr, p->name); 196ccdc3305SLuigi Rizzo return 0; /* An error occurred */ 197ccdc3305SLuigi Rizzo } 198ccdc3305SLuigi Rizzo 199ccdc3305SLuigi Rizzo /* Helper functions which convert virtual addresses to offsets */ 200ccdc3305SLuigi Rizzo #define netmap_if_offset(v) \ 201ccdc3305SLuigi Rizzo netmap_obj_offset(nm_mem->nm_if_pool, (v)) 202ccdc3305SLuigi Rizzo 203ccdc3305SLuigi Rizzo #define netmap_ring_offset(v) \ 204ccdc3305SLuigi Rizzo (nm_mem->nm_if_pool->_memtotal + \ 205ccdc3305SLuigi Rizzo netmap_obj_offset(nm_mem->nm_ring_pool, (v))) 206ccdc3305SLuigi Rizzo 207ccdc3305SLuigi Rizzo #define netmap_buf_offset(v) \ 208ccdc3305SLuigi Rizzo (nm_mem->nm_if_pool->_memtotal + \ 209ccdc3305SLuigi Rizzo nm_mem->nm_ring_pool->_memtotal + \ 210ccdc3305SLuigi Rizzo netmap_obj_offset(nm_mem->nm_buf_pool, (v))) 211ccdc3305SLuigi Rizzo 212ccdc3305SLuigi Rizzo 213ccdc3305SLuigi Rizzo static void * 214ccdc3305SLuigi Rizzo netmap_obj_malloc(struct netmap_obj_pool *p, int len) 215ccdc3305SLuigi Rizzo { 216ccdc3305SLuigi Rizzo uint32_t i = 0; /* index in the bitmap */ 217ccdc3305SLuigi Rizzo uint32_t mask, j; /* slot counter */ 218ccdc3305SLuigi Rizzo void *vaddr = NULL; 219ccdc3305SLuigi Rizzo 220ccdc3305SLuigi Rizzo if (len > p->_objsize) { 221ccdc3305SLuigi Rizzo D("%s request size %d too large", p->name, len); 222ccdc3305SLuigi Rizzo // XXX cannot reduce the size 223ccdc3305SLuigi Rizzo return NULL; 224ccdc3305SLuigi Rizzo } 225ccdc3305SLuigi Rizzo 226ccdc3305SLuigi Rizzo if (p->objfree == 0) { 227ccdc3305SLuigi Rizzo D("%s allocator: run out of memory", p->name); 228ccdc3305SLuigi Rizzo return NULL; 229ccdc3305SLuigi Rizzo } 230ccdc3305SLuigi Rizzo 231ccdc3305SLuigi Rizzo /* termination is guaranteed by p->free */ 232ccdc3305SLuigi Rizzo while (vaddr == NULL) { 233ccdc3305SLuigi Rizzo uint32_t cur = p->bitmap[i]; 234ccdc3305SLuigi Rizzo if (cur == 0) { /* bitmask is fully used */ 235ccdc3305SLuigi Rizzo i++; 236ccdc3305SLuigi Rizzo continue; 237ccdc3305SLuigi Rizzo } 238ccdc3305SLuigi Rizzo /* locate a slot */ 239ccdc3305SLuigi Rizzo for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 240ccdc3305SLuigi Rizzo ; 241ccdc3305SLuigi Rizzo 242ccdc3305SLuigi Rizzo p->bitmap[i] &= ~mask; /* mark object as in use */ 243ccdc3305SLuigi Rizzo p->objfree--; 244ccdc3305SLuigi Rizzo 245ccdc3305SLuigi Rizzo vaddr = p->lut[i * 32 + j].vaddr; 246ccdc3305SLuigi Rizzo } 247ccdc3305SLuigi Rizzo ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr); 248ccdc3305SLuigi Rizzo 249ccdc3305SLuigi Rizzo return vaddr; 250ccdc3305SLuigi Rizzo } 251ccdc3305SLuigi Rizzo 252ccdc3305SLuigi Rizzo 253ccdc3305SLuigi Rizzo /* 254ccdc3305SLuigi Rizzo * free by index, not by address 255ccdc3305SLuigi Rizzo */ 256ccdc3305SLuigi Rizzo static void 257ccdc3305SLuigi Rizzo netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 258ccdc3305SLuigi Rizzo { 259ccdc3305SLuigi Rizzo if (j >= p->objtotal) { 260ccdc3305SLuigi Rizzo D("invalid index %u, max %u", j, p->objtotal); 261ccdc3305SLuigi Rizzo return; 262ccdc3305SLuigi Rizzo } 263ccdc3305SLuigi Rizzo p->bitmap[j / 32] |= (1 << (j % 32)); 264ccdc3305SLuigi Rizzo p->objfree++; 265ccdc3305SLuigi Rizzo return; 266ccdc3305SLuigi Rizzo } 267ccdc3305SLuigi Rizzo 268ccdc3305SLuigi Rizzo static void 269ccdc3305SLuigi Rizzo netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 270ccdc3305SLuigi Rizzo { 271ccdc3305SLuigi Rizzo int i, j, n = p->_memtotal / p->_clustsize; 272ccdc3305SLuigi Rizzo 273ccdc3305SLuigi Rizzo for (i = 0, j = 0; i < n; i++, j += p->clustentries) { 274ccdc3305SLuigi Rizzo void *base = p->lut[i * p->clustentries].vaddr; 275ccdc3305SLuigi Rizzo ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 276ccdc3305SLuigi Rizzo 277ccdc3305SLuigi Rizzo /* Given address, is out of the scope of the current cluster.*/ 278ccdc3305SLuigi Rizzo if (vaddr < base || relofs > p->_clustsize) 279ccdc3305SLuigi Rizzo continue; 280ccdc3305SLuigi Rizzo 281ccdc3305SLuigi Rizzo j = j + relofs / p->_objsize; 282ccdc3305SLuigi Rizzo KASSERT(j != 0, ("Cannot free object 0")); 283ccdc3305SLuigi Rizzo netmap_obj_free(p, j); 284ccdc3305SLuigi Rizzo return; 285ccdc3305SLuigi Rizzo } 286ccdc3305SLuigi Rizzo ND("address %p is not contained inside any cluster (%s)", 287ccdc3305SLuigi Rizzo vaddr, p->name); 288ccdc3305SLuigi Rizzo } 289ccdc3305SLuigi Rizzo 290ccdc3305SLuigi Rizzo #define netmap_if_malloc(len) netmap_obj_malloc(nm_mem->nm_if_pool, len) 291ccdc3305SLuigi Rizzo #define netmap_if_free(v) netmap_obj_free_va(nm_mem->nm_if_pool, (v)) 292ccdc3305SLuigi Rizzo #define netmap_ring_malloc(len) netmap_obj_malloc(nm_mem->nm_ring_pool, len) 293ccdc3305SLuigi Rizzo #define netmap_buf_malloc() \ 294ccdc3305SLuigi Rizzo netmap_obj_malloc(nm_mem->nm_buf_pool, NETMAP_BUF_SIZE) 295ccdc3305SLuigi Rizzo 296ccdc3305SLuigi Rizzo 297ccdc3305SLuigi Rizzo /* Return the index associated to the given packet buffer */ 298ccdc3305SLuigi Rizzo #define netmap_buf_index(v) \ 299ccdc3305SLuigi Rizzo (netmap_obj_offset(nm_mem->nm_buf_pool, (v)) / nm_mem->nm_buf_pool->_objsize) 300ccdc3305SLuigi Rizzo 301ccdc3305SLuigi Rizzo 302ccdc3305SLuigi Rizzo static void 303ccdc3305SLuigi Rizzo netmap_new_bufs(struct netmap_if *nifp __unused, 304ccdc3305SLuigi Rizzo struct netmap_slot *slot, u_int n) 305ccdc3305SLuigi Rizzo { 306ccdc3305SLuigi Rizzo struct netmap_obj_pool *p = nm_mem->nm_buf_pool; 307ccdc3305SLuigi Rizzo uint32_t i = 0; /* slot counter */ 308ccdc3305SLuigi Rizzo 309ccdc3305SLuigi Rizzo for (i = 0; i < n; i++) { 310ccdc3305SLuigi Rizzo void *vaddr = netmap_buf_malloc(); 311ccdc3305SLuigi Rizzo if (vaddr == NULL) { 312ccdc3305SLuigi Rizzo D("unable to locate empty packet buffer"); 313ccdc3305SLuigi Rizzo goto cleanup; 314ccdc3305SLuigi Rizzo } 315ccdc3305SLuigi Rizzo 316ccdc3305SLuigi Rizzo slot[i].buf_idx = netmap_buf_index(vaddr); 317ccdc3305SLuigi Rizzo KASSERT(slot[i].buf_idx != 0, 318ccdc3305SLuigi Rizzo ("Assigning buf_idx=0 to just created slot")); 319ccdc3305SLuigi Rizzo slot[i].len = p->_objsize; 320ccdc3305SLuigi Rizzo slot[i].flags = NS_BUF_CHANGED; // XXX GAETANO hack 321ccdc3305SLuigi Rizzo } 322ccdc3305SLuigi Rizzo 323ccdc3305SLuigi Rizzo ND("allocated %d buffers, %d available", n, p->objfree); 324ccdc3305SLuigi Rizzo return; 325ccdc3305SLuigi Rizzo 326ccdc3305SLuigi Rizzo cleanup: 327ccdc3305SLuigi Rizzo for (i--; i >= 0; i--) { 328ccdc3305SLuigi Rizzo netmap_obj_free(nm_mem->nm_buf_pool, slot[i].buf_idx); 329ccdc3305SLuigi Rizzo } 330ccdc3305SLuigi Rizzo } 331ccdc3305SLuigi Rizzo 332ccdc3305SLuigi Rizzo 333ccdc3305SLuigi Rizzo static void 334ccdc3305SLuigi Rizzo netmap_free_buf(struct netmap_if *nifp, uint32_t i) 335ccdc3305SLuigi Rizzo { 336ccdc3305SLuigi Rizzo struct netmap_obj_pool *p = nm_mem->nm_buf_pool; 337ccdc3305SLuigi Rizzo if (i < 2 || i >= p->objtotal) { 338ccdc3305SLuigi Rizzo D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 339ccdc3305SLuigi Rizzo return; 340ccdc3305SLuigi Rizzo } 341ccdc3305SLuigi Rizzo netmap_obj_free(nm_mem->nm_buf_pool, i); 342ccdc3305SLuigi Rizzo } 343ccdc3305SLuigi Rizzo 344ccdc3305SLuigi Rizzo 345ccdc3305SLuigi Rizzo /* 346ccdc3305SLuigi Rizzo * Free all resources related to an allocator. 347ccdc3305SLuigi Rizzo */ 348ccdc3305SLuigi Rizzo static void 349ccdc3305SLuigi Rizzo netmap_destroy_obj_allocator(struct netmap_obj_pool *p) 350ccdc3305SLuigi Rizzo { 351ccdc3305SLuigi Rizzo if (p == NULL) 352ccdc3305SLuigi Rizzo return; 353ccdc3305SLuigi Rizzo if (p->bitmap) 354ccdc3305SLuigi Rizzo free(p->bitmap, M_NETMAP); 355ccdc3305SLuigi Rizzo if (p->lut) { 356ccdc3305SLuigi Rizzo int i; 357ccdc3305SLuigi Rizzo for (i = 0; i < p->objtotal; i += p->clustentries) { 358ccdc3305SLuigi Rizzo if (p->lut[i].vaddr) 359ccdc3305SLuigi Rizzo contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); 360ccdc3305SLuigi Rizzo } 361ccdc3305SLuigi Rizzo bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); 362ccdc3305SLuigi Rizzo free(p->lut, M_NETMAP); 363ccdc3305SLuigi Rizzo } 364ccdc3305SLuigi Rizzo bzero(p, sizeof(*p)); 365ccdc3305SLuigi Rizzo free(p, M_NETMAP); 366ccdc3305SLuigi Rizzo } 367ccdc3305SLuigi Rizzo 368ccdc3305SLuigi Rizzo /* 369ccdc3305SLuigi Rizzo * We receive a request for objtotal objects, of size objsize each. 370ccdc3305SLuigi Rizzo * Internally we may round up both numbers, as we allocate objects 371ccdc3305SLuigi Rizzo * in small clusters multiple of the page size. 372ccdc3305SLuigi Rizzo * In the allocator we don't need to store the objsize, 373ccdc3305SLuigi Rizzo * but we do need to keep track of objtotal' and clustentries, 374ccdc3305SLuigi Rizzo * as they are needed when freeing memory. 375ccdc3305SLuigi Rizzo * 376ccdc3305SLuigi Rizzo * XXX note -- userspace needs the buffers to be contiguous, 377ccdc3305SLuigi Rizzo * so we cannot afford gaps at the end of a cluster. 378ccdc3305SLuigi Rizzo */ 379ccdc3305SLuigi Rizzo static struct netmap_obj_pool * 380ccdc3305SLuigi Rizzo netmap_new_obj_allocator(const char *name, u_int objtotal, u_int objsize) 381ccdc3305SLuigi Rizzo { 382ccdc3305SLuigi Rizzo struct netmap_obj_pool *p; 383ccdc3305SLuigi Rizzo int i, n; 384ccdc3305SLuigi Rizzo u_int clustsize; /* the cluster size, multiple of page size */ 385ccdc3305SLuigi Rizzo u_int clustentries; /* how many objects per entry */ 386ccdc3305SLuigi Rizzo 387ccdc3305SLuigi Rizzo #define MAX_CLUSTSIZE (1<<17) 388ccdc3305SLuigi Rizzo #define LINE_ROUND 64 389ccdc3305SLuigi Rizzo if (objsize >= MAX_CLUSTSIZE) { 390ccdc3305SLuigi Rizzo /* we could do it but there is no point */ 391ccdc3305SLuigi Rizzo D("unsupported allocation for %d bytes", objsize); 392ccdc3305SLuigi Rizzo return NULL; 393ccdc3305SLuigi Rizzo } 394ccdc3305SLuigi Rizzo /* make sure objsize is a multiple of LINE_ROUND */ 395ccdc3305SLuigi Rizzo i = (objsize & (LINE_ROUND - 1)); 396ccdc3305SLuigi Rizzo if (i) { 397ccdc3305SLuigi Rizzo D("XXX aligning object by %d bytes", LINE_ROUND - i); 398ccdc3305SLuigi Rizzo objsize += LINE_ROUND - i; 399ccdc3305SLuigi Rizzo } 400ccdc3305SLuigi Rizzo /* 401ccdc3305SLuigi Rizzo * Compute number of objects using a brute-force approach: 402ccdc3305SLuigi Rizzo * given a max cluster size, 403ccdc3305SLuigi Rizzo * we try to fill it with objects keeping track of the 404ccdc3305SLuigi Rizzo * wasted space to the next page boundary. 405ccdc3305SLuigi Rizzo */ 406ccdc3305SLuigi Rizzo for (clustentries = 0, i = 1;; i++) { 407ccdc3305SLuigi Rizzo u_int delta, used = i * objsize; 408ccdc3305SLuigi Rizzo if (used > MAX_CLUSTSIZE) 409ccdc3305SLuigi Rizzo break; 410ccdc3305SLuigi Rizzo delta = used % PAGE_SIZE; 411ccdc3305SLuigi Rizzo if (delta == 0) { // exact solution 412ccdc3305SLuigi Rizzo clustentries = i; 413ccdc3305SLuigi Rizzo break; 414ccdc3305SLuigi Rizzo } 415ccdc3305SLuigi Rizzo if (delta > ( (clustentries*objsize) % PAGE_SIZE) ) 416ccdc3305SLuigi Rizzo clustentries = i; 417ccdc3305SLuigi Rizzo } 418ccdc3305SLuigi Rizzo // D("XXX --- ouch, delta %d (bad for buffers)", delta); 419ccdc3305SLuigi Rizzo /* compute clustsize and round to the next page */ 420ccdc3305SLuigi Rizzo clustsize = clustentries * objsize; 421ccdc3305SLuigi Rizzo i = (clustsize & (PAGE_SIZE - 1)); 422ccdc3305SLuigi Rizzo if (i) 423ccdc3305SLuigi Rizzo clustsize += PAGE_SIZE - i; 424ccdc3305SLuigi Rizzo D("objsize %d clustsize %d objects %d", 425ccdc3305SLuigi Rizzo objsize, clustsize, clustentries); 426ccdc3305SLuigi Rizzo 427ccdc3305SLuigi Rizzo p = malloc(sizeof(struct netmap_obj_pool), M_NETMAP, 428ccdc3305SLuigi Rizzo M_WAITOK | M_ZERO); 429ccdc3305SLuigi Rizzo if (p == NULL) { 430ccdc3305SLuigi Rizzo D("Unable to create '%s' allocator", name); 431ccdc3305SLuigi Rizzo return NULL; 432ccdc3305SLuigi Rizzo } 433ccdc3305SLuigi Rizzo /* 434ccdc3305SLuigi Rizzo * Allocate and initialize the lookup table. 435ccdc3305SLuigi Rizzo * 436ccdc3305SLuigi Rizzo * The number of clusters is n = ceil(objtotal/clustentries) 437ccdc3305SLuigi Rizzo * objtotal' = n * clustentries 438ccdc3305SLuigi Rizzo */ 439ccdc3305SLuigi Rizzo strncpy(p->name, name, sizeof(p->name)); 440ccdc3305SLuigi Rizzo p->clustentries = clustentries; 441ccdc3305SLuigi Rizzo p->_clustsize = clustsize; 442ccdc3305SLuigi Rizzo n = (objtotal + clustentries - 1) / clustentries; 443ccdc3305SLuigi Rizzo p->_numclusters = n; 444ccdc3305SLuigi Rizzo p->objtotal = n * clustentries; 445ccdc3305SLuigi Rizzo p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */ 446ccdc3305SLuigi Rizzo p->_objsize = objsize; 447ccdc3305SLuigi Rizzo p->_memtotal = p->_numclusters * p->_clustsize; 448ccdc3305SLuigi Rizzo 449ccdc3305SLuigi Rizzo p->lut = malloc(sizeof(struct lut_entry) * p->objtotal, 450ccdc3305SLuigi Rizzo M_NETMAP, M_WAITOK | M_ZERO); 451ccdc3305SLuigi Rizzo if (p->lut == NULL) { 452ccdc3305SLuigi Rizzo D("Unable to create lookup table for '%s' allocator", name); 453ccdc3305SLuigi Rizzo goto clean; 454ccdc3305SLuigi Rizzo } 455ccdc3305SLuigi Rizzo 456ccdc3305SLuigi Rizzo /* Allocate the bitmap */ 457ccdc3305SLuigi Rizzo n = (p->objtotal + 31) / 32; 458ccdc3305SLuigi Rizzo p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_WAITOK | M_ZERO); 459ccdc3305SLuigi Rizzo if (p->bitmap == NULL) { 460ccdc3305SLuigi Rizzo D("Unable to create bitmap (%d entries) for allocator '%s'", n, 461ccdc3305SLuigi Rizzo name); 462ccdc3305SLuigi Rizzo goto clean; 463ccdc3305SLuigi Rizzo } 464ccdc3305SLuigi Rizzo 465ccdc3305SLuigi Rizzo /* 466ccdc3305SLuigi Rizzo * Allocate clusters, init pointers and bitmap 467ccdc3305SLuigi Rizzo */ 468ccdc3305SLuigi Rizzo for (i = 0; i < p->objtotal;) { 469ccdc3305SLuigi Rizzo int lim = i + clustentries; 470ccdc3305SLuigi Rizzo char *clust; 471ccdc3305SLuigi Rizzo 472ccdc3305SLuigi Rizzo clust = contigmalloc(clustsize, M_NETMAP, M_WAITOK | M_ZERO, 473ccdc3305SLuigi Rizzo 0, -1UL, PAGE_SIZE, 0); 474ccdc3305SLuigi Rizzo if (clust == NULL) { 475ccdc3305SLuigi Rizzo /* 476ccdc3305SLuigi Rizzo * If we get here, there is a severe memory shortage, 477ccdc3305SLuigi Rizzo * so halve the allocated memory to reclaim some. 478ccdc3305SLuigi Rizzo */ 479ccdc3305SLuigi Rizzo D("Unable to create cluster at %d for '%s' allocator", 480ccdc3305SLuigi Rizzo i, name); 481ccdc3305SLuigi Rizzo lim = i / 2; 482ccdc3305SLuigi Rizzo for (; i >= lim; i--) { 483ccdc3305SLuigi Rizzo p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); 484ccdc3305SLuigi Rizzo if (i % clustentries == 0 && p->lut[i].vaddr) 485ccdc3305SLuigi Rizzo contigfree(p->lut[i].vaddr, 486ccdc3305SLuigi Rizzo p->_clustsize, M_NETMAP); 487ccdc3305SLuigi Rizzo } 488ccdc3305SLuigi Rizzo p->objtotal = i; 489ccdc3305SLuigi Rizzo p->objfree = p->objtotal - 2; 490ccdc3305SLuigi Rizzo p->_numclusters = i / clustentries; 491ccdc3305SLuigi Rizzo p->_memtotal = p->_numclusters * p->_clustsize; 492ccdc3305SLuigi Rizzo break; 493ccdc3305SLuigi Rizzo } 494ccdc3305SLuigi Rizzo for (; i < lim; i++, clust += objsize) { 495ccdc3305SLuigi Rizzo p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); 496ccdc3305SLuigi Rizzo p->lut[i].vaddr = clust; 497ccdc3305SLuigi Rizzo p->lut[i].paddr = vtophys(clust); 498ccdc3305SLuigi Rizzo } 499ccdc3305SLuigi Rizzo } 500ccdc3305SLuigi Rizzo p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */ 501ccdc3305SLuigi Rizzo D("Pre-allocated %d clusters (%d/%dKB) for '%s'", 502ccdc3305SLuigi Rizzo p->_numclusters, p->_clustsize >> 10, 503ccdc3305SLuigi Rizzo p->_memtotal >> 10, name); 504ccdc3305SLuigi Rizzo 505ccdc3305SLuigi Rizzo return p; 506ccdc3305SLuigi Rizzo 507ccdc3305SLuigi Rizzo clean: 508ccdc3305SLuigi Rizzo netmap_destroy_obj_allocator(p); 509ccdc3305SLuigi Rizzo return NULL; 510ccdc3305SLuigi Rizzo } 511ccdc3305SLuigi Rizzo 512ccdc3305SLuigi Rizzo static int 513ccdc3305SLuigi Rizzo netmap_memory_init(void) 514ccdc3305SLuigi Rizzo { 515ccdc3305SLuigi Rizzo struct netmap_obj_pool *p; 516ccdc3305SLuigi Rizzo 517ccdc3305SLuigi Rizzo nm_mem = malloc(sizeof(struct netmap_mem_d), M_NETMAP, 518ccdc3305SLuigi Rizzo M_WAITOK | M_ZERO); 519ccdc3305SLuigi Rizzo if (nm_mem == NULL) 520ccdc3305SLuigi Rizzo goto clean; 521ccdc3305SLuigi Rizzo 522ccdc3305SLuigi Rizzo p = netmap_new_obj_allocator("netmap_if", 523ccdc3305SLuigi Rizzo NETMAP_IF_MAX_NUM, NETMAP_IF_MAX_SIZE); 524ccdc3305SLuigi Rizzo if (p == NULL) 525ccdc3305SLuigi Rizzo goto clean; 526ccdc3305SLuigi Rizzo nm_mem->nm_if_pool = p; 527ccdc3305SLuigi Rizzo 528ccdc3305SLuigi Rizzo p = netmap_new_obj_allocator("netmap_ring", 529ccdc3305SLuigi Rizzo NETMAP_RING_MAX_NUM, NETMAP_RING_MAX_SIZE); 530ccdc3305SLuigi Rizzo if (p == NULL) 531ccdc3305SLuigi Rizzo goto clean; 532ccdc3305SLuigi Rizzo nm_mem->nm_ring_pool = p; 533ccdc3305SLuigi Rizzo 534ccdc3305SLuigi Rizzo p = netmap_new_obj_allocator("netmap_buf", 535ccdc3305SLuigi Rizzo NETMAP_BUF_MAX_NUM, NETMAP_BUF_SIZE); 536ccdc3305SLuigi Rizzo if (p == NULL) 537ccdc3305SLuigi Rizzo goto clean; 538ccdc3305SLuigi Rizzo netmap_total_buffers = p->objtotal; 539ccdc3305SLuigi Rizzo netmap_buffer_lut = p->lut; 540ccdc3305SLuigi Rizzo nm_mem->nm_buf_pool = p; 541ccdc3305SLuigi Rizzo netmap_buffer_base = p->lut[0].vaddr; 542ccdc3305SLuigi Rizzo 543ccdc3305SLuigi Rizzo mtx_init(&nm_mem->nm_mtx, "netmap memory allocator lock", NULL, 544ccdc3305SLuigi Rizzo MTX_DEF); 545ccdc3305SLuigi Rizzo nm_mem->nm_totalsize = 546ccdc3305SLuigi Rizzo nm_mem->nm_if_pool->_memtotal + 547ccdc3305SLuigi Rizzo nm_mem->nm_ring_pool->_memtotal + 548ccdc3305SLuigi Rizzo nm_mem->nm_buf_pool->_memtotal; 549ccdc3305SLuigi Rizzo 550ccdc3305SLuigi Rizzo D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers", 551ccdc3305SLuigi Rizzo nm_mem->nm_if_pool->_memtotal >> 10, 552ccdc3305SLuigi Rizzo nm_mem->nm_ring_pool->_memtotal >> 10, 553ccdc3305SLuigi Rizzo nm_mem->nm_buf_pool->_memtotal >> 20); 554ccdc3305SLuigi Rizzo return 0; 555ccdc3305SLuigi Rizzo 556ccdc3305SLuigi Rizzo clean: 557ccdc3305SLuigi Rizzo if (nm_mem) { 558ccdc3305SLuigi Rizzo netmap_destroy_obj_allocator(nm_mem->nm_ring_pool); 559ccdc3305SLuigi Rizzo netmap_destroy_obj_allocator(nm_mem->nm_if_pool); 560ccdc3305SLuigi Rizzo free(nm_mem, M_NETMAP); 561ccdc3305SLuigi Rizzo } 562ccdc3305SLuigi Rizzo return ENOMEM; 563ccdc3305SLuigi Rizzo } 564ccdc3305SLuigi Rizzo 565ccdc3305SLuigi Rizzo 566ccdc3305SLuigi Rizzo static void 567ccdc3305SLuigi Rizzo netmap_memory_fini(void) 568ccdc3305SLuigi Rizzo { 569ccdc3305SLuigi Rizzo if (!nm_mem) 570ccdc3305SLuigi Rizzo return; 571ccdc3305SLuigi Rizzo netmap_destroy_obj_allocator(nm_mem->nm_if_pool); 572ccdc3305SLuigi Rizzo netmap_destroy_obj_allocator(nm_mem->nm_ring_pool); 573ccdc3305SLuigi Rizzo netmap_destroy_obj_allocator(nm_mem->nm_buf_pool); 574ccdc3305SLuigi Rizzo mtx_destroy(&nm_mem->nm_mtx); 575ccdc3305SLuigi Rizzo free(nm_mem, M_NETMAP); 576ccdc3305SLuigi Rizzo } 577ccdc3305SLuigi Rizzo 578ccdc3305SLuigi Rizzo 579ccdc3305SLuigi Rizzo 580ccdc3305SLuigi Rizzo static void * 581ccdc3305SLuigi Rizzo netmap_if_new(const char *ifname, struct netmap_adapter *na) 582ccdc3305SLuigi Rizzo { 583ccdc3305SLuigi Rizzo struct netmap_if *nifp; 584ccdc3305SLuigi Rizzo struct netmap_ring *ring; 585ccdc3305SLuigi Rizzo ssize_t base; /* handy for relative offsets between rings and nifp */ 586ccdc3305SLuigi Rizzo u_int i, len, ndesc; 587ccdc3305SLuigi Rizzo u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */ 588ccdc3305SLuigi Rizzo u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */ 589ccdc3305SLuigi Rizzo struct netmap_kring *kring; 590ccdc3305SLuigi Rizzo 591ccdc3305SLuigi Rizzo NMA_LOCK(); 592ccdc3305SLuigi Rizzo /* 593ccdc3305SLuigi Rizzo * the descriptor is followed inline by an array of offsets 594ccdc3305SLuigi Rizzo * to the tx and rx rings in the shared memory region. 595ccdc3305SLuigi Rizzo */ 596ccdc3305SLuigi Rizzo len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t); 597ccdc3305SLuigi Rizzo nifp = netmap_if_malloc(len); 598ccdc3305SLuigi Rizzo if (nifp == NULL) { 599ccdc3305SLuigi Rizzo NMA_UNLOCK(); 600ccdc3305SLuigi Rizzo return NULL; 601ccdc3305SLuigi Rizzo } 602ccdc3305SLuigi Rizzo 603ccdc3305SLuigi Rizzo /* initialize base fields -- override const */ 604ccdc3305SLuigi Rizzo *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 605ccdc3305SLuigi Rizzo *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 606ccdc3305SLuigi Rizzo strncpy(nifp->ni_name, ifname, IFNAMSIZ); 607ccdc3305SLuigi Rizzo 608ccdc3305SLuigi Rizzo (na->refcount)++; /* XXX atomic ? we are under lock */ 609ccdc3305SLuigi Rizzo if (na->refcount > 1) { /* already setup, we are done */ 610ccdc3305SLuigi Rizzo NMA_UNLOCK(); 611ccdc3305SLuigi Rizzo goto final; 612ccdc3305SLuigi Rizzo } 613ccdc3305SLuigi Rizzo 614ccdc3305SLuigi Rizzo /* 615ccdc3305SLuigi Rizzo * First instance, allocate netmap rings and buffers for this card 616ccdc3305SLuigi Rizzo * The rings are contiguous, but have variable size. 617ccdc3305SLuigi Rizzo */ 618ccdc3305SLuigi Rizzo for (i = 0; i < ntx; i++) { /* Transmit rings */ 619ccdc3305SLuigi Rizzo kring = &na->tx_rings[i]; 620ccdc3305SLuigi Rizzo ndesc = na->num_tx_desc; 621ccdc3305SLuigi Rizzo bzero(kring, sizeof(*kring)); 622ccdc3305SLuigi Rizzo len = sizeof(struct netmap_ring) + 623ccdc3305SLuigi Rizzo ndesc * sizeof(struct netmap_slot); 624ccdc3305SLuigi Rizzo ring = netmap_ring_malloc(len); 625ccdc3305SLuigi Rizzo if (ring == NULL) { 626ccdc3305SLuigi Rizzo D("Cannot allocate tx_ring[%d] for %s", i, ifname); 627ccdc3305SLuigi Rizzo goto cleanup; 628ccdc3305SLuigi Rizzo } 629ccdc3305SLuigi Rizzo ND("txring[%d] at %p ofs %d", i, ring); 630ccdc3305SLuigi Rizzo kring->na = na; 631ccdc3305SLuigi Rizzo kring->ring = ring; 632ccdc3305SLuigi Rizzo *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; 633ccdc3305SLuigi Rizzo *(ssize_t *)(uintptr_t)&ring->buf_ofs = 634ccdc3305SLuigi Rizzo (nm_mem->nm_if_pool->_memtotal + 635ccdc3305SLuigi Rizzo nm_mem->nm_ring_pool->_memtotal) - 636ccdc3305SLuigi Rizzo netmap_ring_offset(ring); 637ccdc3305SLuigi Rizzo 638ccdc3305SLuigi Rizzo /* 639ccdc3305SLuigi Rizzo * IMPORTANT: 640ccdc3305SLuigi Rizzo * Always keep one slot empty, so we can detect new 641ccdc3305SLuigi Rizzo * transmissions comparing cur and nr_hwcur (they are 642ccdc3305SLuigi Rizzo * the same only if there are no new transmissions). 643ccdc3305SLuigi Rizzo */ 644ccdc3305SLuigi Rizzo ring->avail = kring->nr_hwavail = ndesc - 1; 645ccdc3305SLuigi Rizzo ring->cur = kring->nr_hwcur = 0; 646ccdc3305SLuigi Rizzo *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 647ccdc3305SLuigi Rizzo ND("initializing slots for txring[%d]", i); 648ccdc3305SLuigi Rizzo netmap_new_bufs(nifp, ring->slot, ndesc); 649ccdc3305SLuigi Rizzo } 650ccdc3305SLuigi Rizzo 651ccdc3305SLuigi Rizzo for (i = 0; i < nrx; i++) { /* Receive rings */ 652ccdc3305SLuigi Rizzo kring = &na->rx_rings[i]; 653ccdc3305SLuigi Rizzo ndesc = na->num_rx_desc; 654ccdc3305SLuigi Rizzo bzero(kring, sizeof(*kring)); 655ccdc3305SLuigi Rizzo len = sizeof(struct netmap_ring) + 656ccdc3305SLuigi Rizzo ndesc * sizeof(struct netmap_slot); 657ccdc3305SLuigi Rizzo ring = netmap_ring_malloc(len); 658ccdc3305SLuigi Rizzo if (ring == NULL) { 659ccdc3305SLuigi Rizzo D("Cannot allocate rx_ring[%d] for %s", i, ifname); 660ccdc3305SLuigi Rizzo goto cleanup; 661ccdc3305SLuigi Rizzo } 662ccdc3305SLuigi Rizzo ND("rxring[%d] at %p ofs %d", i, ring); 663ccdc3305SLuigi Rizzo 664ccdc3305SLuigi Rizzo kring->na = na; 665ccdc3305SLuigi Rizzo kring->ring = ring; 666ccdc3305SLuigi Rizzo *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; 667ccdc3305SLuigi Rizzo *(ssize_t *)(uintptr_t)&ring->buf_ofs = 668ccdc3305SLuigi Rizzo (nm_mem->nm_if_pool->_memtotal + 669ccdc3305SLuigi Rizzo nm_mem->nm_ring_pool->_memtotal) - 670ccdc3305SLuigi Rizzo netmap_ring_offset(ring); 671ccdc3305SLuigi Rizzo 672ccdc3305SLuigi Rizzo ring->cur = kring->nr_hwcur = 0; 673ccdc3305SLuigi Rizzo ring->avail = kring->nr_hwavail = 0; /* empty */ 674ccdc3305SLuigi Rizzo *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 675ccdc3305SLuigi Rizzo ND("initializing slots for rxring[%d]", i); 676ccdc3305SLuigi Rizzo netmap_new_bufs(nifp, ring->slot, ndesc); 677ccdc3305SLuigi Rizzo } 678ccdc3305SLuigi Rizzo NMA_UNLOCK(); 679ccdc3305SLuigi Rizzo #ifdef linux 680ccdc3305SLuigi Rizzo // XXX initialize the selrecord structs. 681ccdc3305SLuigi Rizzo for (i = 0; i < ntx; i++) 682ccdc3305SLuigi Rizzo init_waitqueue_head(&na->rx_rings[i].si); 683ccdc3305SLuigi Rizzo for (i = 0; i < nrx; i++) 684ccdc3305SLuigi Rizzo init_waitqueue_head(&na->tx_rings[i].si); 685ccdc3305SLuigi Rizzo init_waitqueue_head(&na->rx_si); 686ccdc3305SLuigi Rizzo init_waitqueue_head(&na->tx_si); 687ccdc3305SLuigi Rizzo #endif 688ccdc3305SLuigi Rizzo final: 689ccdc3305SLuigi Rizzo /* 690ccdc3305SLuigi Rizzo * fill the slots for the rx and tx rings. They contain the offset 691ccdc3305SLuigi Rizzo * between the ring and nifp, so the information is usable in 692ccdc3305SLuigi Rizzo * userspace to reach the ring from the nifp. 693ccdc3305SLuigi Rizzo */ 694ccdc3305SLuigi Rizzo base = netmap_if_offset(nifp); 695ccdc3305SLuigi Rizzo for (i = 0; i < ntx; i++) { 696ccdc3305SLuigi Rizzo *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 697ccdc3305SLuigi Rizzo netmap_ring_offset(na->tx_rings[i].ring) - base; 698ccdc3305SLuigi Rizzo } 699ccdc3305SLuigi Rizzo for (i = 0; i < nrx; i++) { 700ccdc3305SLuigi Rizzo *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] = 701ccdc3305SLuigi Rizzo netmap_ring_offset(na->rx_rings[i].ring) - base; 702ccdc3305SLuigi Rizzo } 703ccdc3305SLuigi Rizzo return (nifp); 704ccdc3305SLuigi Rizzo cleanup: 705ccdc3305SLuigi Rizzo // XXX missing 706ccdc3305SLuigi Rizzo NMA_UNLOCK(); 707ccdc3305SLuigi Rizzo return NULL; 708ccdc3305SLuigi Rizzo } 709ccdc3305SLuigi Rizzo 710ccdc3305SLuigi Rizzo static void 711ccdc3305SLuigi Rizzo netmap_free_rings(struct netmap_adapter *na) 712ccdc3305SLuigi Rizzo { 713ccdc3305SLuigi Rizzo int i; 714ccdc3305SLuigi Rizzo for (i = 0; i < na->num_tx_rings + 1; i++) 715ccdc3305SLuigi Rizzo netmap_obj_free_va(nm_mem->nm_ring_pool, 716ccdc3305SLuigi Rizzo na->tx_rings[i].ring); 717ccdc3305SLuigi Rizzo for (i = 0; i < na->num_rx_rings + 1; i++) 718ccdc3305SLuigi Rizzo netmap_obj_free_va(nm_mem->nm_ring_pool, 719ccdc3305SLuigi Rizzo na->rx_rings[i].ring); 720ccdc3305SLuigi Rizzo } 721