1ccdc3305SLuigi Rizzo /* 217885a7bSLuigi Rizzo * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved. 3ccdc3305SLuigi Rizzo * 4ccdc3305SLuigi Rizzo * Redistribution and use in source and binary forms, with or without 5ccdc3305SLuigi Rizzo * modification, are permitted provided that the following conditions 6ccdc3305SLuigi Rizzo * are met: 7ccdc3305SLuigi Rizzo * 1. Redistributions of source code must retain the above copyright 8ccdc3305SLuigi Rizzo * notice, this list of conditions and the following disclaimer. 9ccdc3305SLuigi Rizzo * 2. Redistributions in binary form must reproduce the above copyright 10ccdc3305SLuigi Rizzo * notice, this list of conditions and the following disclaimer in the 11ccdc3305SLuigi Rizzo * documentation and/or other materials provided with the distribution. 12ccdc3305SLuigi Rizzo * 13ccdc3305SLuigi Rizzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14ccdc3305SLuigi Rizzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15ccdc3305SLuigi Rizzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16ccdc3305SLuigi Rizzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17ccdc3305SLuigi Rizzo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18ccdc3305SLuigi Rizzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19ccdc3305SLuigi Rizzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20ccdc3305SLuigi Rizzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21ccdc3305SLuigi Rizzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22ccdc3305SLuigi Rizzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23ccdc3305SLuigi Rizzo * SUCH DAMAGE. 24ccdc3305SLuigi Rizzo */ 25ccdc3305SLuigi Rizzo 268241616dSLuigi Rizzo #ifdef linux 27ce3ee1e7SLuigi Rizzo #include "bsd_glue.h" 288241616dSLuigi Rizzo #endif /* linux */ 298241616dSLuigi Rizzo 30ce3ee1e7SLuigi Rizzo #ifdef __APPLE__ 31ce3ee1e7SLuigi Rizzo #include "osx_glue.h" 32ce3ee1e7SLuigi Rizzo #endif /* __APPLE__ */ 338241616dSLuigi Rizzo 34ce3ee1e7SLuigi Rizzo #ifdef __FreeBSD__ 35ce3ee1e7SLuigi Rizzo #include <sys/cdefs.h> /* prerequisite */ 36ce3ee1e7SLuigi Rizzo __FBSDID("$FreeBSD$"); 378241616dSLuigi Rizzo 38ce3ee1e7SLuigi Rizzo #include <sys/types.h> 39ce3ee1e7SLuigi Rizzo #include <sys/malloc.h> 40ce3ee1e7SLuigi Rizzo #include <sys/proc.h> 41ce3ee1e7SLuigi Rizzo #include <vm/vm.h> /* vtophys */ 42ce3ee1e7SLuigi Rizzo #include <vm/pmap.h> /* vtophys */ 43ce3ee1e7SLuigi Rizzo #include <sys/socket.h> /* sockaddrs */ 44ce3ee1e7SLuigi Rizzo #include <sys/selinfo.h> 45ce3ee1e7SLuigi Rizzo #include <sys/sysctl.h> 46ce3ee1e7SLuigi Rizzo #include <net/if.h> 47ce3ee1e7SLuigi Rizzo #include <net/if_var.h> 48ce3ee1e7SLuigi Rizzo #include <net/vnet.h> 49ce3ee1e7SLuigi Rizzo #include <machine/bus.h> /* bus_dmamap_* */ 50ce3ee1e7SLuigi Rizzo 51ce3ee1e7SLuigi Rizzo #endif /* __FreeBSD__ */ 52ce3ee1e7SLuigi Rizzo 53ce3ee1e7SLuigi Rizzo #include <net/netmap.h> 54ce3ee1e7SLuigi Rizzo #include <dev/netmap/netmap_kern.h> 55ce3ee1e7SLuigi Rizzo #include "netmap_mem2.h" 56ce3ee1e7SLuigi Rizzo 574bf50f18SLuigi Rizzo #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ 584bf50f18SLuigi Rizzo 594bf50f18SLuigi Rizzo #define NETMAP_POOL_MAX_NAMSZ 32 604bf50f18SLuigi Rizzo 614bf50f18SLuigi Rizzo 624bf50f18SLuigi Rizzo enum { 634bf50f18SLuigi Rizzo NETMAP_IF_POOL = 0, 644bf50f18SLuigi Rizzo NETMAP_RING_POOL, 654bf50f18SLuigi Rizzo NETMAP_BUF_POOL, 664bf50f18SLuigi Rizzo NETMAP_POOLS_NR 674bf50f18SLuigi Rizzo }; 684bf50f18SLuigi Rizzo 694bf50f18SLuigi Rizzo 704bf50f18SLuigi Rizzo struct netmap_obj_params { 714bf50f18SLuigi Rizzo u_int size; 724bf50f18SLuigi Rizzo u_int num; 734bf50f18SLuigi Rizzo }; 74*847bf383SLuigi Rizzo 754bf50f18SLuigi Rizzo struct netmap_obj_pool { 764bf50f18SLuigi Rizzo char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ 774bf50f18SLuigi Rizzo 784bf50f18SLuigi Rizzo /* ---------------------------------------------------*/ 794bf50f18SLuigi Rizzo /* these are only meaningful if the pool is finalized */ 804bf50f18SLuigi Rizzo /* (see 'finalized' field in netmap_mem_d) */ 814bf50f18SLuigi Rizzo u_int objtotal; /* actual total number of objects. */ 824bf50f18SLuigi Rizzo u_int memtotal; /* actual total memory space */ 834bf50f18SLuigi Rizzo u_int numclusters; /* actual number of clusters */ 844bf50f18SLuigi Rizzo 854bf50f18SLuigi Rizzo u_int objfree; /* number of free objects. */ 864bf50f18SLuigi Rizzo 874bf50f18SLuigi Rizzo struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 884bf50f18SLuigi Rizzo uint32_t *bitmap; /* one bit per buffer, 1 means free */ 894bf50f18SLuigi Rizzo uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ 904bf50f18SLuigi Rizzo /* ---------------------------------------------------*/ 914bf50f18SLuigi Rizzo 924bf50f18SLuigi Rizzo /* limits */ 934bf50f18SLuigi Rizzo u_int objminsize; /* minimum object size */ 944bf50f18SLuigi Rizzo u_int objmaxsize; /* maximum object size */ 954bf50f18SLuigi Rizzo u_int nummin; /* minimum number of objects */ 964bf50f18SLuigi Rizzo u_int nummax; /* maximum number of objects */ 974bf50f18SLuigi Rizzo 984bf50f18SLuigi Rizzo /* these are changed only by config */ 994bf50f18SLuigi Rizzo u_int _objtotal; /* total number of objects */ 1004bf50f18SLuigi Rizzo u_int _objsize; /* object size */ 1014bf50f18SLuigi Rizzo u_int _clustsize; /* cluster size */ 1024bf50f18SLuigi Rizzo u_int _clustentries; /* objects per cluster */ 1034bf50f18SLuigi Rizzo u_int _numclusters; /* number of clusters */ 1044bf50f18SLuigi Rizzo 1054bf50f18SLuigi Rizzo /* requested values */ 1064bf50f18SLuigi Rizzo u_int r_objtotal; 1074bf50f18SLuigi Rizzo u_int r_objsize; 1084bf50f18SLuigi Rizzo }; 1094bf50f18SLuigi Rizzo 110*847bf383SLuigi Rizzo #define NMA_LOCK_T NM_MTX_T 1114bf50f18SLuigi Rizzo 112*847bf383SLuigi Rizzo 113*847bf383SLuigi Rizzo struct netmap_mem_ops { 114*847bf383SLuigi Rizzo void (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*); 115*847bf383SLuigi Rizzo int (*nmd_get_info)(struct netmap_mem_d *, u_int *size, 116*847bf383SLuigi Rizzo u_int *memflags, uint16_t *id); 117*847bf383SLuigi Rizzo 118*847bf383SLuigi Rizzo vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t); 119*847bf383SLuigi Rizzo int (*nmd_config)(struct netmap_mem_d *); 120*847bf383SLuigi Rizzo int (*nmd_finalize)(struct netmap_mem_d *); 121*847bf383SLuigi Rizzo void (*nmd_deref)(struct netmap_mem_d *); 122*847bf383SLuigi Rizzo ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr); 123*847bf383SLuigi Rizzo void (*nmd_delete)(struct netmap_mem_d *); 124*847bf383SLuigi Rizzo 125*847bf383SLuigi Rizzo struct netmap_if * (*nmd_if_new)(struct netmap_adapter *); 126*847bf383SLuigi Rizzo void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *); 127*847bf383SLuigi Rizzo int (*nmd_rings_create)(struct netmap_adapter *); 128*847bf383SLuigi Rizzo void (*nmd_rings_delete)(struct netmap_adapter *); 129*847bf383SLuigi Rizzo }; 1304bf50f18SLuigi Rizzo 1314bf50f18SLuigi Rizzo typedef uint16_t nm_memid_t; 1324bf50f18SLuigi Rizzo 1334bf50f18SLuigi Rizzo struct netmap_mem_d { 1344bf50f18SLuigi Rizzo NMA_LOCK_T nm_mtx; /* protect the allocator */ 1354bf50f18SLuigi Rizzo u_int nm_totalsize; /* shorthand */ 1364bf50f18SLuigi Rizzo 1374bf50f18SLuigi Rizzo u_int flags; 1384bf50f18SLuigi Rizzo #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */ 1394bf50f18SLuigi Rizzo int lasterr; /* last error for curr config */ 140*847bf383SLuigi Rizzo int active; /* active users */ 141*847bf383SLuigi Rizzo int refcount; 1424bf50f18SLuigi Rizzo /* the three allocators */ 1434bf50f18SLuigi Rizzo struct netmap_obj_pool pools[NETMAP_POOLS_NR]; 1444bf50f18SLuigi Rizzo 1454bf50f18SLuigi Rizzo nm_memid_t nm_id; /* allocator identifier */ 1464bf50f18SLuigi Rizzo int nm_grp; /* iommu groupd id */ 1474bf50f18SLuigi Rizzo 1484bf50f18SLuigi Rizzo /* list of all existing allocators, sorted by nm_id */ 1494bf50f18SLuigi Rizzo struct netmap_mem_d *prev, *next; 150*847bf383SLuigi Rizzo 151*847bf383SLuigi Rizzo struct netmap_mem_ops *ops; 1524bf50f18SLuigi Rizzo }; 1534bf50f18SLuigi Rizzo 154*847bf383SLuigi Rizzo #define NMD_DEFCB(t0, name) \ 155*847bf383SLuigi Rizzo t0 \ 156*847bf383SLuigi Rizzo netmap_mem_##name(struct netmap_mem_d *nmd) \ 157*847bf383SLuigi Rizzo { \ 158*847bf383SLuigi Rizzo return nmd->ops->nmd_##name(nmd); \ 159*847bf383SLuigi Rizzo } 160*847bf383SLuigi Rizzo 161*847bf383SLuigi Rizzo #define NMD_DEFCB1(t0, name, t1) \ 162*847bf383SLuigi Rizzo t0 \ 163*847bf383SLuigi Rizzo netmap_mem_##name(struct netmap_mem_d *nmd, t1 a1) \ 164*847bf383SLuigi Rizzo { \ 165*847bf383SLuigi Rizzo return nmd->ops->nmd_##name(nmd, a1); \ 166*847bf383SLuigi Rizzo } 167*847bf383SLuigi Rizzo 168*847bf383SLuigi Rizzo #define NMD_DEFCB3(t0, name, t1, t2, t3) \ 169*847bf383SLuigi Rizzo t0 \ 170*847bf383SLuigi Rizzo netmap_mem_##name(struct netmap_mem_d *nmd, t1 a1, t2 a2, t3 a3) \ 171*847bf383SLuigi Rizzo { \ 172*847bf383SLuigi Rizzo return nmd->ops->nmd_##name(nmd, a1, a2, a3); \ 173*847bf383SLuigi Rizzo } 174*847bf383SLuigi Rizzo 175*847bf383SLuigi Rizzo #define NMD_DEFNACB(t0, name) \ 176*847bf383SLuigi Rizzo t0 \ 177*847bf383SLuigi Rizzo netmap_mem_##name(struct netmap_adapter *na) \ 178*847bf383SLuigi Rizzo { \ 179*847bf383SLuigi Rizzo return na->nm_mem->ops->nmd_##name(na); \ 180*847bf383SLuigi Rizzo } 181*847bf383SLuigi Rizzo 182*847bf383SLuigi Rizzo #define NMD_DEFNACB1(t0, name, t1) \ 183*847bf383SLuigi Rizzo t0 \ 184*847bf383SLuigi Rizzo netmap_mem_##name(struct netmap_adapter *na, t1 a1) \ 185*847bf383SLuigi Rizzo { \ 186*847bf383SLuigi Rizzo return na->nm_mem->ops->nmd_##name(na, a1); \ 187*847bf383SLuigi Rizzo } 188*847bf383SLuigi Rizzo 189*847bf383SLuigi Rizzo NMD_DEFCB1(void, get_lut, struct netmap_lut *); 190*847bf383SLuigi Rizzo NMD_DEFCB3(int, get_info, u_int *, u_int *, uint16_t *); 191*847bf383SLuigi Rizzo NMD_DEFCB1(vm_paddr_t, ofstophys, vm_ooffset_t); 192*847bf383SLuigi Rizzo static int netmap_mem_config(struct netmap_mem_d *); 193*847bf383SLuigi Rizzo NMD_DEFCB(int, config); 194*847bf383SLuigi Rizzo NMD_DEFCB1(ssize_t, if_offset, const void *); 195*847bf383SLuigi Rizzo NMD_DEFCB(void, delete); 196*847bf383SLuigi Rizzo 197*847bf383SLuigi Rizzo NMD_DEFNACB(struct netmap_if *, if_new); 198*847bf383SLuigi Rizzo NMD_DEFNACB1(void, if_delete, struct netmap_if *); 199*847bf383SLuigi Rizzo NMD_DEFNACB(int, rings_create); 200*847bf383SLuigi Rizzo NMD_DEFNACB(void, rings_delete); 201*847bf383SLuigi Rizzo 202*847bf383SLuigi Rizzo static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *); 203*847bf383SLuigi Rizzo static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *); 204*847bf383SLuigi Rizzo static int nm_mem_assign_group(struct netmap_mem_d *, struct device *); 205*847bf383SLuigi Rizzo 206*847bf383SLuigi Rizzo #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx) 207*847bf383SLuigi Rizzo #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx) 208*847bf383SLuigi Rizzo #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx) 209*847bf383SLuigi Rizzo #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx) 210*847bf383SLuigi Rizzo 211*847bf383SLuigi Rizzo #ifdef NM_DEBUG_MEM_PUTGET 212*847bf383SLuigi Rizzo #define NM_DBG_REFC(nmd, func, line) \ 213*847bf383SLuigi Rizzo printf("%s:%d mem[%d] -> %d\n", func, line, (nmd)->nm_id, (nmd)->refcount); 214*847bf383SLuigi Rizzo #else 215*847bf383SLuigi Rizzo #define NM_DBG_REFC(nmd, func, line) 216*847bf383SLuigi Rizzo #endif 217*847bf383SLuigi Rizzo 218*847bf383SLuigi Rizzo #ifdef NM_DEBUG_MEM_PUTGET 219*847bf383SLuigi Rizzo void __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line) 220*847bf383SLuigi Rizzo #else 221*847bf383SLuigi Rizzo void netmap_mem_get(struct netmap_mem_d *nmd) 222*847bf383SLuigi Rizzo #endif 223*847bf383SLuigi Rizzo { 224*847bf383SLuigi Rizzo NMA_LOCK(nmd); 225*847bf383SLuigi Rizzo nmd->refcount++; 226*847bf383SLuigi Rizzo NM_DBG_REFC(nmd, func, line); 227*847bf383SLuigi Rizzo NMA_UNLOCK(nmd); 228*847bf383SLuigi Rizzo } 229*847bf383SLuigi Rizzo 230*847bf383SLuigi Rizzo #ifdef NM_DEBUG_MEM_PUTGET 231*847bf383SLuigi Rizzo void __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line) 232*847bf383SLuigi Rizzo #else 233*847bf383SLuigi Rizzo void netmap_mem_put(struct netmap_mem_d *nmd) 234*847bf383SLuigi Rizzo #endif 235*847bf383SLuigi Rizzo { 236*847bf383SLuigi Rizzo int last; 237*847bf383SLuigi Rizzo NMA_LOCK(nmd); 238*847bf383SLuigi Rizzo last = (--nmd->refcount == 0); 239*847bf383SLuigi Rizzo NM_DBG_REFC(nmd, func, line); 240*847bf383SLuigi Rizzo NMA_UNLOCK(nmd); 241*847bf383SLuigi Rizzo if (last) 242*847bf383SLuigi Rizzo netmap_mem_delete(nmd); 243*847bf383SLuigi Rizzo } 244*847bf383SLuigi Rizzo 245*847bf383SLuigi Rizzo int 246*847bf383SLuigi Rizzo netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) 247*847bf383SLuigi Rizzo { 248*847bf383SLuigi Rizzo if (nm_mem_assign_group(nmd, na->pdev) < 0) { 249*847bf383SLuigi Rizzo return ENOMEM; 250*847bf383SLuigi Rizzo } else { 251*847bf383SLuigi Rizzo nmd->ops->nmd_finalize(nmd); 252*847bf383SLuigi Rizzo } 253*847bf383SLuigi Rizzo 254*847bf383SLuigi Rizzo if (!nmd->lasterr && na->pdev) 255*847bf383SLuigi Rizzo netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na); 256*847bf383SLuigi Rizzo 257*847bf383SLuigi Rizzo return nmd->lasterr; 258*847bf383SLuigi Rizzo } 259*847bf383SLuigi Rizzo 260*847bf383SLuigi Rizzo void 261*847bf383SLuigi Rizzo netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) 262*847bf383SLuigi Rizzo { 263*847bf383SLuigi Rizzo NMA_LOCK(nmd); 264*847bf383SLuigi Rizzo netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na); 265*847bf383SLuigi Rizzo NMA_UNLOCK(nmd); 266*847bf383SLuigi Rizzo return nmd->ops->nmd_deref(nmd); 267*847bf383SLuigi Rizzo } 268*847bf383SLuigi Rizzo 269*847bf383SLuigi Rizzo 2704bf50f18SLuigi Rizzo /* accessor functions */ 271*847bf383SLuigi Rizzo static void 272*847bf383SLuigi Rizzo netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 2734bf50f18SLuigi Rizzo { 274*847bf383SLuigi Rizzo lut->lut = nmd->pools[NETMAP_BUF_POOL].lut; 275*847bf383SLuigi Rizzo lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; 276*847bf383SLuigi Rizzo lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; 2774bf50f18SLuigi Rizzo } 2784bf50f18SLuigi Rizzo 2798241616dSLuigi Rizzo struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = { 2808241616dSLuigi Rizzo [NETMAP_IF_POOL] = { 2818241616dSLuigi Rizzo .size = 1024, 2828241616dSLuigi Rizzo .num = 100, 2838241616dSLuigi Rizzo }, 2848241616dSLuigi Rizzo [NETMAP_RING_POOL] = { 2858241616dSLuigi Rizzo .size = 9*PAGE_SIZE, 2868241616dSLuigi Rizzo .num = 200, 2878241616dSLuigi Rizzo }, 2888241616dSLuigi Rizzo [NETMAP_BUF_POOL] = { 2898241616dSLuigi Rizzo .size = 2048, 2908241616dSLuigi Rizzo .num = NETMAP_BUF_MAX_NUM, 2918241616dSLuigi Rizzo }, 2928241616dSLuigi Rizzo }; 2938241616dSLuigi Rizzo 294f0ea3689SLuigi Rizzo struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = { 295f0ea3689SLuigi Rizzo [NETMAP_IF_POOL] = { 296f0ea3689SLuigi Rizzo .size = 1024, 297f0ea3689SLuigi Rizzo .num = 1, 298f0ea3689SLuigi Rizzo }, 299f0ea3689SLuigi Rizzo [NETMAP_RING_POOL] = { 300f0ea3689SLuigi Rizzo .size = 5*PAGE_SIZE, 301f0ea3689SLuigi Rizzo .num = 4, 302f0ea3689SLuigi Rizzo }, 303f0ea3689SLuigi Rizzo [NETMAP_BUF_POOL] = { 304f0ea3689SLuigi Rizzo .size = 2048, 305f0ea3689SLuigi Rizzo .num = 4098, 306f0ea3689SLuigi Rizzo }, 307f0ea3689SLuigi Rizzo }; 308f0ea3689SLuigi Rizzo 309ccdc3305SLuigi Rizzo 3102579e2d7SLuigi Rizzo /* 3112579e2d7SLuigi Rizzo * nm_mem is the memory allocator used for all physical interfaces 3122579e2d7SLuigi Rizzo * running in netmap mode. 3132579e2d7SLuigi Rizzo * Virtual (VALE) ports will have each its own allocator. 3142579e2d7SLuigi Rizzo */ 315*847bf383SLuigi Rizzo extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */ 316ce3ee1e7SLuigi Rizzo struct netmap_mem_d nm_mem = { /* Our memory allocator. */ 3178241616dSLuigi Rizzo .pools = { 3188241616dSLuigi Rizzo [NETMAP_IF_POOL] = { 3198241616dSLuigi Rizzo .name = "netmap_if", 3208241616dSLuigi Rizzo .objminsize = sizeof(struct netmap_if), 3218241616dSLuigi Rizzo .objmaxsize = 4096, 3228241616dSLuigi Rizzo .nummin = 10, /* don't be stingy */ 3238241616dSLuigi Rizzo .nummax = 10000, /* XXX very large */ 3248241616dSLuigi Rizzo }, 3258241616dSLuigi Rizzo [NETMAP_RING_POOL] = { 3268241616dSLuigi Rizzo .name = "netmap_ring", 3278241616dSLuigi Rizzo .objminsize = sizeof(struct netmap_ring), 3288241616dSLuigi Rizzo .objmaxsize = 32*PAGE_SIZE, 3298241616dSLuigi Rizzo .nummin = 2, 3308241616dSLuigi Rizzo .nummax = 1024, 3318241616dSLuigi Rizzo }, 3328241616dSLuigi Rizzo [NETMAP_BUF_POOL] = { 3338241616dSLuigi Rizzo .name = "netmap_buf", 3348241616dSLuigi Rizzo .objminsize = 64, 3358241616dSLuigi Rizzo .objmaxsize = 65536, 3368241616dSLuigi Rizzo .nummin = 4, 3378241616dSLuigi Rizzo .nummax = 1000000, /* one million! */ 3388241616dSLuigi Rizzo }, 3398241616dSLuigi Rizzo }, 340f0ea3689SLuigi Rizzo 341f0ea3689SLuigi Rizzo .nm_id = 1, 3424bf50f18SLuigi Rizzo .nm_grp = -1, 343f0ea3689SLuigi Rizzo 344f0ea3689SLuigi Rizzo .prev = &nm_mem, 345f0ea3689SLuigi Rizzo .next = &nm_mem, 346*847bf383SLuigi Rizzo 347*847bf383SLuigi Rizzo .ops = &netmap_mem_global_ops 348ccdc3305SLuigi Rizzo }; 349ccdc3305SLuigi Rizzo 350ce3ee1e7SLuigi Rizzo 351f0ea3689SLuigi Rizzo struct netmap_mem_d *netmap_last_mem_d = &nm_mem; 352f0ea3689SLuigi Rizzo 353ce3ee1e7SLuigi Rizzo /* blueprint for the private memory allocators */ 354*847bf383SLuigi Rizzo extern struct netmap_mem_ops netmap_mem_private_ops; /* forward */ 355ce3ee1e7SLuigi Rizzo const struct netmap_mem_d nm_blueprint = { 356ce3ee1e7SLuigi Rizzo .pools = { 357ce3ee1e7SLuigi Rizzo [NETMAP_IF_POOL] = { 358ce3ee1e7SLuigi Rizzo .name = "%s_if", 359ce3ee1e7SLuigi Rizzo .objminsize = sizeof(struct netmap_if), 360ce3ee1e7SLuigi Rizzo .objmaxsize = 4096, 361ce3ee1e7SLuigi Rizzo .nummin = 1, 362f0ea3689SLuigi Rizzo .nummax = 100, 363ce3ee1e7SLuigi Rizzo }, 364ce3ee1e7SLuigi Rizzo [NETMAP_RING_POOL] = { 365ce3ee1e7SLuigi Rizzo .name = "%s_ring", 366ce3ee1e7SLuigi Rizzo .objminsize = sizeof(struct netmap_ring), 367ce3ee1e7SLuigi Rizzo .objmaxsize = 32*PAGE_SIZE, 368ce3ee1e7SLuigi Rizzo .nummin = 2, 369ce3ee1e7SLuigi Rizzo .nummax = 1024, 370ce3ee1e7SLuigi Rizzo }, 371ce3ee1e7SLuigi Rizzo [NETMAP_BUF_POOL] = { 372ce3ee1e7SLuigi Rizzo .name = "%s_buf", 373ce3ee1e7SLuigi Rizzo .objminsize = 64, 374ce3ee1e7SLuigi Rizzo .objmaxsize = 65536, 375ce3ee1e7SLuigi Rizzo .nummin = 4, 376ce3ee1e7SLuigi Rizzo .nummax = 1000000, /* one million! */ 377ce3ee1e7SLuigi Rizzo }, 378ce3ee1e7SLuigi Rizzo }, 379ce3ee1e7SLuigi Rizzo 380ce3ee1e7SLuigi Rizzo .flags = NETMAP_MEM_PRIVATE, 381*847bf383SLuigi Rizzo 382*847bf383SLuigi Rizzo .ops = &netmap_mem_private_ops 383ce3ee1e7SLuigi Rizzo }; 384ce3ee1e7SLuigi Rizzo 3858241616dSLuigi Rizzo /* memory allocator related sysctls */ 3868241616dSLuigi Rizzo 3878241616dSLuigi Rizzo #define STRINGIFY(x) #x 3888241616dSLuigi Rizzo 389ce3ee1e7SLuigi Rizzo 3908241616dSLuigi Rizzo #define DECLARE_SYSCTLS(id, name) \ 3918241616dSLuigi Rizzo SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 3928241616dSLuigi Rizzo CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 3938241616dSLuigi Rizzo SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 3948241616dSLuigi Rizzo CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 3958241616dSLuigi Rizzo SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 3968241616dSLuigi Rizzo CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 3978241616dSLuigi Rizzo SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 398f0ea3689SLuigi Rizzo CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \ 399f0ea3689SLuigi Rizzo SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \ 400f0ea3689SLuigi Rizzo CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \ 401f0ea3689SLuigi Rizzo "Default size of private netmap " STRINGIFY(name) "s"); \ 402f0ea3689SLuigi Rizzo SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \ 403f0ea3689SLuigi Rizzo CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \ 404f0ea3689SLuigi Rizzo "Default number of private netmap " STRINGIFY(name) "s") 4058241616dSLuigi Rizzo 406ce3ee1e7SLuigi Rizzo SYSCTL_DECL(_dev_netmap); 4078241616dSLuigi Rizzo DECLARE_SYSCTLS(NETMAP_IF_POOL, if); 4088241616dSLuigi Rizzo DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 4098241616dSLuigi Rizzo DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 410ccdc3305SLuigi Rizzo 411f0ea3689SLuigi Rizzo static int 412f0ea3689SLuigi Rizzo nm_mem_assign_id(struct netmap_mem_d *nmd) 413f0ea3689SLuigi Rizzo { 414f0ea3689SLuigi Rizzo nm_memid_t id; 415f0ea3689SLuigi Rizzo struct netmap_mem_d *scan = netmap_last_mem_d; 416f0ea3689SLuigi Rizzo int error = ENOMEM; 417f0ea3689SLuigi Rizzo 418f0ea3689SLuigi Rizzo NMA_LOCK(&nm_mem); 419f0ea3689SLuigi Rizzo 420f0ea3689SLuigi Rizzo do { 421f0ea3689SLuigi Rizzo /* we rely on unsigned wrap around */ 422f0ea3689SLuigi Rizzo id = scan->nm_id + 1; 423f0ea3689SLuigi Rizzo if (id == 0) /* reserve 0 as error value */ 424f0ea3689SLuigi Rizzo id = 1; 425f0ea3689SLuigi Rizzo scan = scan->next; 426f0ea3689SLuigi Rizzo if (id != scan->nm_id) { 427f0ea3689SLuigi Rizzo nmd->nm_id = id; 428f0ea3689SLuigi Rizzo nmd->prev = scan->prev; 429f0ea3689SLuigi Rizzo nmd->next = scan; 430f0ea3689SLuigi Rizzo scan->prev->next = nmd; 431f0ea3689SLuigi Rizzo scan->prev = nmd; 432f0ea3689SLuigi Rizzo netmap_last_mem_d = nmd; 433f0ea3689SLuigi Rizzo error = 0; 434f0ea3689SLuigi Rizzo break; 435f0ea3689SLuigi Rizzo } 436f0ea3689SLuigi Rizzo } while (scan != netmap_last_mem_d); 437f0ea3689SLuigi Rizzo 438f0ea3689SLuigi Rizzo NMA_UNLOCK(&nm_mem); 439f0ea3689SLuigi Rizzo return error; 440f0ea3689SLuigi Rizzo } 441f0ea3689SLuigi Rizzo 442f0ea3689SLuigi Rizzo static void 443f0ea3689SLuigi Rizzo nm_mem_release_id(struct netmap_mem_d *nmd) 444f0ea3689SLuigi Rizzo { 445f0ea3689SLuigi Rizzo NMA_LOCK(&nm_mem); 446f0ea3689SLuigi Rizzo 447f0ea3689SLuigi Rizzo nmd->prev->next = nmd->next; 448f0ea3689SLuigi Rizzo nmd->next->prev = nmd->prev; 449f0ea3689SLuigi Rizzo 450f0ea3689SLuigi Rizzo if (netmap_last_mem_d == nmd) 451f0ea3689SLuigi Rizzo netmap_last_mem_d = nmd->prev; 452f0ea3689SLuigi Rizzo 453f0ea3689SLuigi Rizzo nmd->prev = nmd->next = NULL; 454f0ea3689SLuigi Rizzo 455f0ea3689SLuigi Rizzo NMA_UNLOCK(&nm_mem); 456f0ea3689SLuigi Rizzo } 457f0ea3689SLuigi Rizzo 4584bf50f18SLuigi Rizzo static int 4594bf50f18SLuigi Rizzo nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev) 4604bf50f18SLuigi Rizzo { 4614bf50f18SLuigi Rizzo int err = 0, id; 4624bf50f18SLuigi Rizzo id = nm_iommu_group_id(dev); 4634bf50f18SLuigi Rizzo if (netmap_verbose) 4644bf50f18SLuigi Rizzo D("iommu_group %d", id); 4654bf50f18SLuigi Rizzo 4664bf50f18SLuigi Rizzo NMA_LOCK(nmd); 4674bf50f18SLuigi Rizzo 4684bf50f18SLuigi Rizzo if (nmd->nm_grp < 0) 4694bf50f18SLuigi Rizzo nmd->nm_grp = id; 4704bf50f18SLuigi Rizzo 4714bf50f18SLuigi Rizzo if (nmd->nm_grp != id) 4724bf50f18SLuigi Rizzo nmd->lasterr = err = ENOMEM; 4734bf50f18SLuigi Rizzo 4744bf50f18SLuigi Rizzo NMA_UNLOCK(nmd); 4754bf50f18SLuigi Rizzo return err; 4764bf50f18SLuigi Rizzo } 477f0ea3689SLuigi Rizzo 478ccdc3305SLuigi Rizzo /* 4792579e2d7SLuigi Rizzo * First, find the allocator that contains the requested offset, 4802579e2d7SLuigi Rizzo * then locate the cluster through a lookup table. 481ccdc3305SLuigi Rizzo */ 482*847bf383SLuigi Rizzo static vm_paddr_t 483*847bf383SLuigi Rizzo netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) 484ccdc3305SLuigi Rizzo { 485ccdc3305SLuigi Rizzo int i; 486ce3ee1e7SLuigi Rizzo vm_ooffset_t o = offset; 487ce3ee1e7SLuigi Rizzo vm_paddr_t pa; 488ce3ee1e7SLuigi Rizzo struct netmap_obj_pool *p; 489ccdc3305SLuigi Rizzo 490ce3ee1e7SLuigi Rizzo NMA_LOCK(nmd); 491ce3ee1e7SLuigi Rizzo p = nmd->pools; 492ce3ee1e7SLuigi Rizzo 493ce3ee1e7SLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { 494ce3ee1e7SLuigi Rizzo if (offset >= p[i].memtotal) 495ccdc3305SLuigi Rizzo continue; 4962579e2d7SLuigi Rizzo // now lookup the cluster's address 4974bf50f18SLuigi Rizzo pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) + 4988241616dSLuigi Rizzo offset % p[i]._objsize; 499ce3ee1e7SLuigi Rizzo NMA_UNLOCK(nmd); 500ce3ee1e7SLuigi Rizzo return pa; 501ccdc3305SLuigi Rizzo } 5028241616dSLuigi Rizzo /* this is only in case of errors */ 503b1123b01SLuigi Rizzo D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, 504ce3ee1e7SLuigi Rizzo p[NETMAP_IF_POOL].memtotal, 505ce3ee1e7SLuigi Rizzo p[NETMAP_IF_POOL].memtotal 506ce3ee1e7SLuigi Rizzo + p[NETMAP_RING_POOL].memtotal, 507ce3ee1e7SLuigi Rizzo p[NETMAP_IF_POOL].memtotal 508ce3ee1e7SLuigi Rizzo + p[NETMAP_RING_POOL].memtotal 509ce3ee1e7SLuigi Rizzo + p[NETMAP_BUF_POOL].memtotal); 510ce3ee1e7SLuigi Rizzo NMA_UNLOCK(nmd); 511ccdc3305SLuigi Rizzo return 0; // XXX bad address 512ccdc3305SLuigi Rizzo } 513ccdc3305SLuigi Rizzo 514*847bf383SLuigi Rizzo static int 515*847bf383SLuigi Rizzo netmap_mem2_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags, 516f0ea3689SLuigi Rizzo nm_memid_t *id) 517ce3ee1e7SLuigi Rizzo { 518ce3ee1e7SLuigi Rizzo int error = 0; 519ce3ee1e7SLuigi Rizzo NMA_LOCK(nmd); 520*847bf383SLuigi Rizzo error = netmap_mem_config(nmd); 521ce3ee1e7SLuigi Rizzo if (error) 522ce3ee1e7SLuigi Rizzo goto out; 5234bf50f18SLuigi Rizzo if (size) { 524ce3ee1e7SLuigi Rizzo if (nmd->flags & NETMAP_MEM_FINALIZED) { 525ce3ee1e7SLuigi Rizzo *size = nmd->nm_totalsize; 526ce3ee1e7SLuigi Rizzo } else { 527ce3ee1e7SLuigi Rizzo int i; 528ce3ee1e7SLuigi Rizzo *size = 0; 529ce3ee1e7SLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 530ce3ee1e7SLuigi Rizzo struct netmap_obj_pool *p = nmd->pools + i; 531ce3ee1e7SLuigi Rizzo *size += (p->_numclusters * p->_clustsize); 532ce3ee1e7SLuigi Rizzo } 533ce3ee1e7SLuigi Rizzo } 5344bf50f18SLuigi Rizzo } 5354bf50f18SLuigi Rizzo if (memflags) 536ce3ee1e7SLuigi Rizzo *memflags = nmd->flags; 5374bf50f18SLuigi Rizzo if (id) 538f0ea3689SLuigi Rizzo *id = nmd->nm_id; 539ce3ee1e7SLuigi Rizzo out: 540ce3ee1e7SLuigi Rizzo NMA_UNLOCK(nmd); 541ce3ee1e7SLuigi Rizzo return error; 542ce3ee1e7SLuigi Rizzo } 543ce3ee1e7SLuigi Rizzo 544ccdc3305SLuigi Rizzo /* 545ccdc3305SLuigi Rizzo * we store objects by kernel address, need to find the offset 546ccdc3305SLuigi Rizzo * within the pool to export the value to userspace. 547ccdc3305SLuigi Rizzo * Algorithm: scan until we find the cluster, then add the 548ccdc3305SLuigi Rizzo * actual offset in the cluster 549ccdc3305SLuigi Rizzo */ 550ce2cb792SLuigi Rizzo static ssize_t 551ccdc3305SLuigi Rizzo netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 552ccdc3305SLuigi Rizzo { 553ce3ee1e7SLuigi Rizzo int i, k = p->_clustentries, n = p->objtotal; 554ccdc3305SLuigi Rizzo ssize_t ofs = 0; 555ccdc3305SLuigi Rizzo 556ccdc3305SLuigi Rizzo for (i = 0; i < n; i += k, ofs += p->_clustsize) { 557ccdc3305SLuigi Rizzo const char *base = p->lut[i].vaddr; 558ccdc3305SLuigi Rizzo ssize_t relofs = (const char *) vaddr - base; 559ccdc3305SLuigi Rizzo 560aa76317cSLuigi Rizzo if (relofs < 0 || relofs >= p->_clustsize) 561ccdc3305SLuigi Rizzo continue; 562ccdc3305SLuigi Rizzo 563ccdc3305SLuigi Rizzo ofs = ofs + relofs; 564ccdc3305SLuigi Rizzo ND("%s: return offset %d (cluster %d) for pointer %p", 565ccdc3305SLuigi Rizzo p->name, ofs, i, vaddr); 566ccdc3305SLuigi Rizzo return ofs; 567ccdc3305SLuigi Rizzo } 568ccdc3305SLuigi Rizzo D("address %p is not contained inside any cluster (%s)", 569ccdc3305SLuigi Rizzo vaddr, p->name); 570ccdc3305SLuigi Rizzo return 0; /* An error occurred */ 571ccdc3305SLuigi Rizzo } 572ccdc3305SLuigi Rizzo 573ccdc3305SLuigi Rizzo /* Helper functions which convert virtual addresses to offsets */ 574ce3ee1e7SLuigi Rizzo #define netmap_if_offset(n, v) \ 575ce3ee1e7SLuigi Rizzo netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) 576ccdc3305SLuigi Rizzo 577ce3ee1e7SLuigi Rizzo #define netmap_ring_offset(n, v) \ 578ce3ee1e7SLuigi Rizzo ((n)->pools[NETMAP_IF_POOL].memtotal + \ 579ce3ee1e7SLuigi Rizzo netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) 580ccdc3305SLuigi Rizzo 581ce3ee1e7SLuigi Rizzo #define netmap_buf_offset(n, v) \ 582ce3ee1e7SLuigi Rizzo ((n)->pools[NETMAP_IF_POOL].memtotal + \ 583ce3ee1e7SLuigi Rizzo (n)->pools[NETMAP_RING_POOL].memtotal + \ 584ce3ee1e7SLuigi Rizzo netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v))) 585ccdc3305SLuigi Rizzo 586ccdc3305SLuigi Rizzo 587*847bf383SLuigi Rizzo static ssize_t 588*847bf383SLuigi Rizzo netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr) 589ce3ee1e7SLuigi Rizzo { 590ce3ee1e7SLuigi Rizzo ssize_t v; 591ce3ee1e7SLuigi Rizzo NMA_LOCK(nmd); 592ce3ee1e7SLuigi Rizzo v = netmap_if_offset(nmd, addr); 593ce3ee1e7SLuigi Rizzo NMA_UNLOCK(nmd); 594ce3ee1e7SLuigi Rizzo return v; 595ce3ee1e7SLuigi Rizzo } 596ce3ee1e7SLuigi Rizzo 5978241616dSLuigi Rizzo /* 5988241616dSLuigi Rizzo * report the index, and use start position as a hint, 5998241616dSLuigi Rizzo * otherwise buffer allocation becomes terribly expensive. 6008241616dSLuigi Rizzo */ 601ccdc3305SLuigi Rizzo static void * 602ce3ee1e7SLuigi Rizzo netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) 603ccdc3305SLuigi Rizzo { 604ccdc3305SLuigi Rizzo uint32_t i = 0; /* index in the bitmap */ 605ccdc3305SLuigi Rizzo uint32_t mask, j; /* slot counter */ 606ccdc3305SLuigi Rizzo void *vaddr = NULL; 607ccdc3305SLuigi Rizzo 608ccdc3305SLuigi Rizzo if (len > p->_objsize) { 609ccdc3305SLuigi Rizzo D("%s request size %d too large", p->name, len); 610ccdc3305SLuigi Rizzo // XXX cannot reduce the size 611ccdc3305SLuigi Rizzo return NULL; 612ccdc3305SLuigi Rizzo } 613ccdc3305SLuigi Rizzo 614ccdc3305SLuigi Rizzo if (p->objfree == 0) { 615f9790aebSLuigi Rizzo D("no more %s objects", p->name); 616ccdc3305SLuigi Rizzo return NULL; 617ccdc3305SLuigi Rizzo } 6188241616dSLuigi Rizzo if (start) 6198241616dSLuigi Rizzo i = *start; 620ccdc3305SLuigi Rizzo 6218241616dSLuigi Rizzo /* termination is guaranteed by p->free, but better check bounds on i */ 6228241616dSLuigi Rizzo while (vaddr == NULL && i < p->bitmap_slots) { 623ccdc3305SLuigi Rizzo uint32_t cur = p->bitmap[i]; 624ccdc3305SLuigi Rizzo if (cur == 0) { /* bitmask is fully used */ 625ccdc3305SLuigi Rizzo i++; 626ccdc3305SLuigi Rizzo continue; 627ccdc3305SLuigi Rizzo } 628ccdc3305SLuigi Rizzo /* locate a slot */ 629ccdc3305SLuigi Rizzo for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 630ccdc3305SLuigi Rizzo ; 631ccdc3305SLuigi Rizzo 632ccdc3305SLuigi Rizzo p->bitmap[i] &= ~mask; /* mark object as in use */ 633ccdc3305SLuigi Rizzo p->objfree--; 634ccdc3305SLuigi Rizzo 635ccdc3305SLuigi Rizzo vaddr = p->lut[i * 32 + j].vaddr; 6368241616dSLuigi Rizzo if (index) 6378241616dSLuigi Rizzo *index = i * 32 + j; 638ccdc3305SLuigi Rizzo } 639ccdc3305SLuigi Rizzo ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr); 640ccdc3305SLuigi Rizzo 6418241616dSLuigi Rizzo if (start) 6428241616dSLuigi Rizzo *start = i; 643ccdc3305SLuigi Rizzo return vaddr; 644ccdc3305SLuigi Rizzo } 645ccdc3305SLuigi Rizzo 646ccdc3305SLuigi Rizzo 647ccdc3305SLuigi Rizzo /* 648f0ea3689SLuigi Rizzo * free by index, not by address. 649f0ea3689SLuigi Rizzo * XXX should we also cleanup the content ? 650ccdc3305SLuigi Rizzo */ 651f0ea3689SLuigi Rizzo static int 652ccdc3305SLuigi Rizzo netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 653ccdc3305SLuigi Rizzo { 654f0ea3689SLuigi Rizzo uint32_t *ptr, mask; 655f0ea3689SLuigi Rizzo 656ccdc3305SLuigi Rizzo if (j >= p->objtotal) { 657ccdc3305SLuigi Rizzo D("invalid index %u, max %u", j, p->objtotal); 658f0ea3689SLuigi Rizzo return 1; 659ccdc3305SLuigi Rizzo } 660f0ea3689SLuigi Rizzo ptr = &p->bitmap[j / 32]; 661f0ea3689SLuigi Rizzo mask = (1 << (j % 32)); 662f0ea3689SLuigi Rizzo if (*ptr & mask) { 663f0ea3689SLuigi Rizzo D("ouch, double free on buffer %d", j); 664f0ea3689SLuigi Rizzo return 1; 665f0ea3689SLuigi Rizzo } else { 666f0ea3689SLuigi Rizzo *ptr |= mask; 667ccdc3305SLuigi Rizzo p->objfree++; 668f0ea3689SLuigi Rizzo return 0; 669f0ea3689SLuigi Rizzo } 670ccdc3305SLuigi Rizzo } 671ccdc3305SLuigi Rizzo 672f0ea3689SLuigi Rizzo /* 673f0ea3689SLuigi Rizzo * free by address. This is slow but is only used for a few 674f0ea3689SLuigi Rizzo * objects (rings, nifp) 675f0ea3689SLuigi Rizzo */ 676ccdc3305SLuigi Rizzo static void 677ccdc3305SLuigi Rizzo netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 678ccdc3305SLuigi Rizzo { 679ce3ee1e7SLuigi Rizzo u_int i, j, n = p->numclusters; 680ccdc3305SLuigi Rizzo 681ce3ee1e7SLuigi Rizzo for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { 682ce3ee1e7SLuigi Rizzo void *base = p->lut[i * p->_clustentries].vaddr; 683ccdc3305SLuigi Rizzo ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 684ccdc3305SLuigi Rizzo 685ccdc3305SLuigi Rizzo /* Given address, is out of the scope of the current cluster.*/ 686ede69cffSLuigi Rizzo if (vaddr < base || relofs >= p->_clustsize) 687ccdc3305SLuigi Rizzo continue; 688ccdc3305SLuigi Rizzo 689ccdc3305SLuigi Rizzo j = j + relofs / p->_objsize; 690ce3ee1e7SLuigi Rizzo /* KASSERT(j != 0, ("Cannot free object 0")); */ 691ccdc3305SLuigi Rizzo netmap_obj_free(p, j); 692ccdc3305SLuigi Rizzo return; 693ccdc3305SLuigi Rizzo } 694ae10d1afSLuigi Rizzo D("address %p is not contained inside any cluster (%s)", 695ccdc3305SLuigi Rizzo vaddr, p->name); 696ccdc3305SLuigi Rizzo } 697ccdc3305SLuigi Rizzo 6984bf50f18SLuigi Rizzo #define netmap_mem_bufsize(n) \ 6994bf50f18SLuigi Rizzo ((n)->pools[NETMAP_BUF_POOL]._objsize) 7004bf50f18SLuigi Rizzo 701ce3ee1e7SLuigi Rizzo #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) 702ce3ee1e7SLuigi Rizzo #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) 703ce3ee1e7SLuigi Rizzo #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) 704ce3ee1e7SLuigi Rizzo #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) 705ce3ee1e7SLuigi Rizzo #define netmap_buf_malloc(n, _pos, _index) \ 7064bf50f18SLuigi Rizzo netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index) 707ccdc3305SLuigi Rizzo 708ccdc3305SLuigi Rizzo 709f0ea3689SLuigi Rizzo #if 0 // XXX unused 710ccdc3305SLuigi Rizzo /* Return the index associated to the given packet buffer */ 711ce3ee1e7SLuigi Rizzo #define netmap_buf_index(n, v) \ 712ce3ee1e7SLuigi Rizzo (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) 713f0ea3689SLuigi Rizzo #endif 714f0ea3689SLuigi Rizzo 715f0ea3689SLuigi Rizzo /* 716f0ea3689SLuigi Rizzo * allocate extra buffers in a linked list. 717f0ea3689SLuigi Rizzo * returns the actual number. 718f0ea3689SLuigi Rizzo */ 719f0ea3689SLuigi Rizzo uint32_t 720f0ea3689SLuigi Rizzo netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n) 721f0ea3689SLuigi Rizzo { 722f0ea3689SLuigi Rizzo struct netmap_mem_d *nmd = na->nm_mem; 723f0ea3689SLuigi Rizzo uint32_t i, pos = 0; /* opaque, scan position in the bitmap */ 724f0ea3689SLuigi Rizzo 725f0ea3689SLuigi Rizzo NMA_LOCK(nmd); 726f0ea3689SLuigi Rizzo 727f0ea3689SLuigi Rizzo *head = 0; /* default, 'null' index ie empty list */ 728f0ea3689SLuigi Rizzo for (i = 0 ; i < n; i++) { 729f0ea3689SLuigi Rizzo uint32_t cur = *head; /* save current head */ 730f0ea3689SLuigi Rizzo uint32_t *p = netmap_buf_malloc(nmd, &pos, head); 731f0ea3689SLuigi Rizzo if (p == NULL) { 732f0ea3689SLuigi Rizzo D("no more buffers after %d of %d", i, n); 733f0ea3689SLuigi Rizzo *head = cur; /* restore */ 734f0ea3689SLuigi Rizzo break; 735f0ea3689SLuigi Rizzo } 736f0ea3689SLuigi Rizzo RD(5, "allocate buffer %d -> %d", *head, cur); 737f0ea3689SLuigi Rizzo *p = cur; /* link to previous head */ 738f0ea3689SLuigi Rizzo } 739f0ea3689SLuigi Rizzo 740f0ea3689SLuigi Rizzo NMA_UNLOCK(nmd); 741f0ea3689SLuigi Rizzo 742f0ea3689SLuigi Rizzo return i; 743f0ea3689SLuigi Rizzo } 744f0ea3689SLuigi Rizzo 745f0ea3689SLuigi Rizzo static void 746f0ea3689SLuigi Rizzo netmap_extra_free(struct netmap_adapter *na, uint32_t head) 747f0ea3689SLuigi Rizzo { 748*847bf383SLuigi Rizzo struct lut_entry *lut = na->na_lut.lut; 749f0ea3689SLuigi Rizzo struct netmap_mem_d *nmd = na->nm_mem; 750f0ea3689SLuigi Rizzo struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 751f0ea3689SLuigi Rizzo uint32_t i, cur, *buf; 752f0ea3689SLuigi Rizzo 753f0ea3689SLuigi Rizzo D("freeing the extra list"); 754f0ea3689SLuigi Rizzo for (i = 0; head >=2 && head < p->objtotal; i++) { 755f0ea3689SLuigi Rizzo cur = head; 756f0ea3689SLuigi Rizzo buf = lut[head].vaddr; 757f0ea3689SLuigi Rizzo head = *buf; 758f0ea3689SLuigi Rizzo *buf = 0; 759f0ea3689SLuigi Rizzo if (netmap_obj_free(p, cur)) 760f0ea3689SLuigi Rizzo break; 761f0ea3689SLuigi Rizzo } 762f0ea3689SLuigi Rizzo if (head != 0) 763f0ea3689SLuigi Rizzo D("breaking with head %d", head); 764f0ea3689SLuigi Rizzo D("freed %d buffers", i); 765f0ea3689SLuigi Rizzo } 766ccdc3305SLuigi Rizzo 767ccdc3305SLuigi Rizzo 7688241616dSLuigi Rizzo /* Return nonzero on error */ 7698241616dSLuigi Rizzo static int 770f9790aebSLuigi Rizzo netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 771ccdc3305SLuigi Rizzo { 772ce3ee1e7SLuigi Rizzo struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 773ce3ee1e7SLuigi Rizzo u_int i = 0; /* slot counter */ 7748241616dSLuigi Rizzo uint32_t pos = 0; /* slot in p->bitmap */ 7758241616dSLuigi Rizzo uint32_t index = 0; /* buffer index */ 776ccdc3305SLuigi Rizzo 777ccdc3305SLuigi Rizzo for (i = 0; i < n; i++) { 778ce3ee1e7SLuigi Rizzo void *vaddr = netmap_buf_malloc(nmd, &pos, &index); 779ccdc3305SLuigi Rizzo if (vaddr == NULL) { 780f9790aebSLuigi Rizzo D("no more buffers after %d of %d", i, n); 781ccdc3305SLuigi Rizzo goto cleanup; 782ccdc3305SLuigi Rizzo } 7838241616dSLuigi Rizzo slot[i].buf_idx = index; 784ccdc3305SLuigi Rizzo slot[i].len = p->_objsize; 785f9790aebSLuigi Rizzo slot[i].flags = 0; 786ccdc3305SLuigi Rizzo } 787ccdc3305SLuigi Rizzo 7888241616dSLuigi Rizzo ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); 7898241616dSLuigi Rizzo return (0); 790ccdc3305SLuigi Rizzo 791ccdc3305SLuigi Rizzo cleanup: 7924cf8455fSEd Maste while (i > 0) { 7934cf8455fSEd Maste i--; 7948241616dSLuigi Rizzo netmap_obj_free(p, slot[i].buf_idx); 795ccdc3305SLuigi Rizzo } 7968241616dSLuigi Rizzo bzero(slot, n * sizeof(slot[0])); 7978241616dSLuigi Rizzo return (ENOMEM); 798ccdc3305SLuigi Rizzo } 799ccdc3305SLuigi Rizzo 800f0ea3689SLuigi Rizzo static void 801f0ea3689SLuigi Rizzo netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index) 802f0ea3689SLuigi Rizzo { 803f0ea3689SLuigi Rizzo struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 804f0ea3689SLuigi Rizzo u_int i; 805f0ea3689SLuigi Rizzo 806f0ea3689SLuigi Rizzo for (i = 0; i < n; i++) { 807f0ea3689SLuigi Rizzo slot[i].buf_idx = index; 808f0ea3689SLuigi Rizzo slot[i].len = p->_objsize; 809f0ea3689SLuigi Rizzo slot[i].flags = 0; 810f0ea3689SLuigi Rizzo } 811f0ea3689SLuigi Rizzo } 812f0ea3689SLuigi Rizzo 813ccdc3305SLuigi Rizzo 814ccdc3305SLuigi Rizzo static void 815f9790aebSLuigi Rizzo netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) 816ccdc3305SLuigi Rizzo { 817ce3ee1e7SLuigi Rizzo struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 8188241616dSLuigi Rizzo 819ccdc3305SLuigi Rizzo if (i < 2 || i >= p->objtotal) { 820ccdc3305SLuigi Rizzo D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 821ccdc3305SLuigi Rizzo return; 822ccdc3305SLuigi Rizzo } 8238241616dSLuigi Rizzo netmap_obj_free(p, i); 824ccdc3305SLuigi Rizzo } 825ccdc3305SLuigi Rizzo 826f0ea3689SLuigi Rizzo 827f0ea3689SLuigi Rizzo static void 828f0ea3689SLuigi Rizzo netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 829f0ea3689SLuigi Rizzo { 830f0ea3689SLuigi Rizzo u_int i; 831f0ea3689SLuigi Rizzo 832f0ea3689SLuigi Rizzo for (i = 0; i < n; i++) { 833f0ea3689SLuigi Rizzo if (slot[i].buf_idx > 2) 834f0ea3689SLuigi Rizzo netmap_free_buf(nmd, slot[i].buf_idx); 835f0ea3689SLuigi Rizzo } 836f0ea3689SLuigi Rizzo } 837f0ea3689SLuigi Rizzo 8388241616dSLuigi Rizzo static void 8398241616dSLuigi Rizzo netmap_reset_obj_allocator(struct netmap_obj_pool *p) 8408241616dSLuigi Rizzo { 841ce3ee1e7SLuigi Rizzo 8428241616dSLuigi Rizzo if (p == NULL) 8438241616dSLuigi Rizzo return; 8448241616dSLuigi Rizzo if (p->bitmap) 8458241616dSLuigi Rizzo free(p->bitmap, M_NETMAP); 8468241616dSLuigi Rizzo p->bitmap = NULL; 8478241616dSLuigi Rizzo if (p->lut) { 848ce3ee1e7SLuigi Rizzo u_int i; 849ce3ee1e7SLuigi Rizzo size_t sz = p->_clustsize; 850ce3ee1e7SLuigi Rizzo 851dd4fcbc5SPatrick Kelsey /* 852dd4fcbc5SPatrick Kelsey * Free each cluster allocated in 853dd4fcbc5SPatrick Kelsey * netmap_finalize_obj_allocator(). The cluster start 854dd4fcbc5SPatrick Kelsey * addresses are stored at multiples of p->_clusterentries 855dd4fcbc5SPatrick Kelsey * in the lut. 856dd4fcbc5SPatrick Kelsey */ 857ce3ee1e7SLuigi Rizzo for (i = 0; i < p->objtotal; i += p->_clustentries) { 8588241616dSLuigi Rizzo if (p->lut[i].vaddr) 859ce3ee1e7SLuigi Rizzo contigfree(p->lut[i].vaddr, sz, M_NETMAP); 8608241616dSLuigi Rizzo } 8618241616dSLuigi Rizzo bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); 8628241616dSLuigi Rizzo #ifdef linux 8638241616dSLuigi Rizzo vfree(p->lut); 8648241616dSLuigi Rizzo #else 8658241616dSLuigi Rizzo free(p->lut, M_NETMAP); 8668241616dSLuigi Rizzo #endif 8678241616dSLuigi Rizzo } 8688241616dSLuigi Rizzo p->lut = NULL; 869ce3ee1e7SLuigi Rizzo p->objtotal = 0; 870ce3ee1e7SLuigi Rizzo p->memtotal = 0; 871ce3ee1e7SLuigi Rizzo p->numclusters = 0; 872ce3ee1e7SLuigi Rizzo p->objfree = 0; 8738241616dSLuigi Rizzo } 874ccdc3305SLuigi Rizzo 875ccdc3305SLuigi Rizzo /* 876ccdc3305SLuigi Rizzo * Free all resources related to an allocator. 877ccdc3305SLuigi Rizzo */ 878ccdc3305SLuigi Rizzo static void 879ccdc3305SLuigi Rizzo netmap_destroy_obj_allocator(struct netmap_obj_pool *p) 880ccdc3305SLuigi Rizzo { 881ccdc3305SLuigi Rizzo if (p == NULL) 882ccdc3305SLuigi Rizzo return; 8838241616dSLuigi Rizzo netmap_reset_obj_allocator(p); 884ccdc3305SLuigi Rizzo } 885ccdc3305SLuigi Rizzo 886ccdc3305SLuigi Rizzo /* 887ccdc3305SLuigi Rizzo * We receive a request for objtotal objects, of size objsize each. 888ccdc3305SLuigi Rizzo * Internally we may round up both numbers, as we allocate objects 889ccdc3305SLuigi Rizzo * in small clusters multiple of the page size. 890ce3ee1e7SLuigi Rizzo * We need to keep track of objtotal and clustentries, 891ccdc3305SLuigi Rizzo * as they are needed when freeing memory. 892ccdc3305SLuigi Rizzo * 893ccdc3305SLuigi Rizzo * XXX note -- userspace needs the buffers to be contiguous, 894ccdc3305SLuigi Rizzo * so we cannot afford gaps at the end of a cluster. 895ccdc3305SLuigi Rizzo */ 8968241616dSLuigi Rizzo 8978241616dSLuigi Rizzo 8988241616dSLuigi Rizzo /* call with NMA_LOCK held */ 8998241616dSLuigi Rizzo static int 9008241616dSLuigi Rizzo netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 901ccdc3305SLuigi Rizzo { 902ce3ee1e7SLuigi Rizzo int i; 903ccdc3305SLuigi Rizzo u_int clustsize; /* the cluster size, multiple of page size */ 904ccdc3305SLuigi Rizzo u_int clustentries; /* how many objects per entry */ 905ccdc3305SLuigi Rizzo 906ce3ee1e7SLuigi Rizzo /* we store the current request, so we can 907ce3ee1e7SLuigi Rizzo * detect configuration changes later */ 908ce3ee1e7SLuigi Rizzo p->r_objtotal = objtotal; 909ce3ee1e7SLuigi Rizzo p->r_objsize = objsize; 910ce3ee1e7SLuigi Rizzo 9114bf50f18SLuigi Rizzo #define MAX_CLUSTSIZE (1<<22) // 4 MB 91217885a7bSLuigi Rizzo #define LINE_ROUND NM_CACHE_ALIGN // 64 913ccdc3305SLuigi Rizzo if (objsize >= MAX_CLUSTSIZE) { 914ccdc3305SLuigi Rizzo /* we could do it but there is no point */ 915ccdc3305SLuigi Rizzo D("unsupported allocation for %d bytes", objsize); 916ce3ee1e7SLuigi Rizzo return EINVAL; 917ccdc3305SLuigi Rizzo } 918ccdc3305SLuigi Rizzo /* make sure objsize is a multiple of LINE_ROUND */ 919ccdc3305SLuigi Rizzo i = (objsize & (LINE_ROUND - 1)); 920ccdc3305SLuigi Rizzo if (i) { 921ccdc3305SLuigi Rizzo D("XXX aligning object by %d bytes", LINE_ROUND - i); 922ccdc3305SLuigi Rizzo objsize += LINE_ROUND - i; 923ccdc3305SLuigi Rizzo } 9248241616dSLuigi Rizzo if (objsize < p->objminsize || objsize > p->objmaxsize) { 9258241616dSLuigi Rizzo D("requested objsize %d out of range [%d, %d]", 9268241616dSLuigi Rizzo objsize, p->objminsize, p->objmaxsize); 927ce3ee1e7SLuigi Rizzo return EINVAL; 9288241616dSLuigi Rizzo } 9298241616dSLuigi Rizzo if (objtotal < p->nummin || objtotal > p->nummax) { 9308241616dSLuigi Rizzo D("requested objtotal %d out of range [%d, %d]", 9318241616dSLuigi Rizzo objtotal, p->nummin, p->nummax); 932ce3ee1e7SLuigi Rizzo return EINVAL; 9338241616dSLuigi Rizzo } 934ccdc3305SLuigi Rizzo /* 935ccdc3305SLuigi Rizzo * Compute number of objects using a brute-force approach: 936ccdc3305SLuigi Rizzo * given a max cluster size, 937ccdc3305SLuigi Rizzo * we try to fill it with objects keeping track of the 938ccdc3305SLuigi Rizzo * wasted space to the next page boundary. 939ccdc3305SLuigi Rizzo */ 940ccdc3305SLuigi Rizzo for (clustentries = 0, i = 1;; i++) { 941ccdc3305SLuigi Rizzo u_int delta, used = i * objsize; 942ccdc3305SLuigi Rizzo if (used > MAX_CLUSTSIZE) 943ccdc3305SLuigi Rizzo break; 944ccdc3305SLuigi Rizzo delta = used % PAGE_SIZE; 945ccdc3305SLuigi Rizzo if (delta == 0) { // exact solution 946ccdc3305SLuigi Rizzo clustentries = i; 947ccdc3305SLuigi Rizzo break; 948ccdc3305SLuigi Rizzo } 949ccdc3305SLuigi Rizzo } 9504bf50f18SLuigi Rizzo /* exact solution not found */ 9514bf50f18SLuigi Rizzo if (clustentries == 0) { 9524bf50f18SLuigi Rizzo D("unsupported allocation for %d bytes", objsize); 9534bf50f18SLuigi Rizzo return EINVAL; 9544bf50f18SLuigi Rizzo } 9554bf50f18SLuigi Rizzo /* compute clustsize */ 956ccdc3305SLuigi Rizzo clustsize = clustentries * objsize; 957ae10d1afSLuigi Rizzo if (netmap_verbose) 958ccdc3305SLuigi Rizzo D("objsize %d clustsize %d objects %d", 959ccdc3305SLuigi Rizzo objsize, clustsize, clustentries); 960ccdc3305SLuigi Rizzo 961ccdc3305SLuigi Rizzo /* 962ccdc3305SLuigi Rizzo * The number of clusters is n = ceil(objtotal/clustentries) 963ccdc3305SLuigi Rizzo * objtotal' = n * clustentries 964ccdc3305SLuigi Rizzo */ 965ce3ee1e7SLuigi Rizzo p->_clustentries = clustentries; 966ccdc3305SLuigi Rizzo p->_clustsize = clustsize; 967ce3ee1e7SLuigi Rizzo p->_numclusters = (objtotal + clustentries - 1) / clustentries; 968ce3ee1e7SLuigi Rizzo 969ce3ee1e7SLuigi Rizzo /* actual values (may be larger than requested) */ 9708241616dSLuigi Rizzo p->_objsize = objsize; 971ce3ee1e7SLuigi Rizzo p->_objtotal = p->_numclusters * clustentries; 972ccdc3305SLuigi Rizzo 9738241616dSLuigi Rizzo return 0; 9748241616dSLuigi Rizzo } 9758241616dSLuigi Rizzo 9768241616dSLuigi Rizzo 9778241616dSLuigi Rizzo /* call with NMA_LOCK held */ 9788241616dSLuigi Rizzo static int 9798241616dSLuigi Rizzo netmap_finalize_obj_allocator(struct netmap_obj_pool *p) 9808241616dSLuigi Rizzo { 981ce3ee1e7SLuigi Rizzo int i; /* must be signed */ 982ce3ee1e7SLuigi Rizzo size_t n; 983ce3ee1e7SLuigi Rizzo 984ce3ee1e7SLuigi Rizzo /* optimistically assume we have enough memory */ 985ce3ee1e7SLuigi Rizzo p->numclusters = p->_numclusters; 986ce3ee1e7SLuigi Rizzo p->objtotal = p->_objtotal; 9878241616dSLuigi Rizzo 9888241616dSLuigi Rizzo n = sizeof(struct lut_entry) * p->objtotal; 9898241616dSLuigi Rizzo #ifdef linux 9908241616dSLuigi Rizzo p->lut = vmalloc(n); 9918241616dSLuigi Rizzo #else 992d2b91851SEd Maste p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO); 9938241616dSLuigi Rizzo #endif 994ccdc3305SLuigi Rizzo if (p->lut == NULL) { 995ce3ee1e7SLuigi Rizzo D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name); 996ccdc3305SLuigi Rizzo goto clean; 997ccdc3305SLuigi Rizzo } 998ccdc3305SLuigi Rizzo 999ccdc3305SLuigi Rizzo /* Allocate the bitmap */ 1000ccdc3305SLuigi Rizzo n = (p->objtotal + 31) / 32; 1001d2b91851SEd Maste p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO); 1002ccdc3305SLuigi Rizzo if (p->bitmap == NULL) { 1003ce3ee1e7SLuigi Rizzo D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, 10048241616dSLuigi Rizzo p->name); 1005ccdc3305SLuigi Rizzo goto clean; 1006ccdc3305SLuigi Rizzo } 10078241616dSLuigi Rizzo p->bitmap_slots = n; 1008ccdc3305SLuigi Rizzo 1009ccdc3305SLuigi Rizzo /* 1010ccdc3305SLuigi Rizzo * Allocate clusters, init pointers and bitmap 1011ccdc3305SLuigi Rizzo */ 1012ce3ee1e7SLuigi Rizzo 1013ce3ee1e7SLuigi Rizzo n = p->_clustsize; 1014ce3ee1e7SLuigi Rizzo for (i = 0; i < (int)p->objtotal;) { 1015ce3ee1e7SLuigi Rizzo int lim = i + p->_clustentries; 1016ccdc3305SLuigi Rizzo char *clust; 1017ccdc3305SLuigi Rizzo 1018ce3ee1e7SLuigi Rizzo clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, 1019ce3ee1e7SLuigi Rizzo (size_t)0, -1UL, PAGE_SIZE, 0); 1020ccdc3305SLuigi Rizzo if (clust == NULL) { 1021ccdc3305SLuigi Rizzo /* 1022ccdc3305SLuigi Rizzo * If we get here, there is a severe memory shortage, 1023ccdc3305SLuigi Rizzo * so halve the allocated memory to reclaim some. 1024ccdc3305SLuigi Rizzo */ 1025ccdc3305SLuigi Rizzo D("Unable to create cluster at %d for '%s' allocator", 10268241616dSLuigi Rizzo i, p->name); 1027ce3ee1e7SLuigi Rizzo if (i < 2) /* nothing to halve */ 1028ce3ee1e7SLuigi Rizzo goto out; 1029ccdc3305SLuigi Rizzo lim = i / 2; 10308241616dSLuigi Rizzo for (i--; i >= lim; i--) { 1031ccdc3305SLuigi Rizzo p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); 1032ce3ee1e7SLuigi Rizzo if (i % p->_clustentries == 0 && p->lut[i].vaddr) 1033ccdc3305SLuigi Rizzo contigfree(p->lut[i].vaddr, 1034ce3ee1e7SLuigi Rizzo n, M_NETMAP); 1035dd4fcbc5SPatrick Kelsey p->lut[i].vaddr = NULL; 1036ccdc3305SLuigi Rizzo } 1037ce3ee1e7SLuigi Rizzo out: 1038ccdc3305SLuigi Rizzo p->objtotal = i; 1039ce3ee1e7SLuigi Rizzo /* we may have stopped in the middle of a cluster */ 1040ce3ee1e7SLuigi Rizzo p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; 1041ccdc3305SLuigi Rizzo break; 1042ccdc3305SLuigi Rizzo } 1043dd4fcbc5SPatrick Kelsey /* 1044dd4fcbc5SPatrick Kelsey * Set bitmap and lut state for all buffers in the current 1045dd4fcbc5SPatrick Kelsey * cluster. 1046dd4fcbc5SPatrick Kelsey * 1047dd4fcbc5SPatrick Kelsey * [i, lim) is the set of buffer indexes that cover the 1048dd4fcbc5SPatrick Kelsey * current cluster. 1049dd4fcbc5SPatrick Kelsey * 1050dd4fcbc5SPatrick Kelsey * 'clust' is really the address of the current buffer in 1051dd4fcbc5SPatrick Kelsey * the current cluster as we index through it with a stride 1052dd4fcbc5SPatrick Kelsey * of p->_objsize. 1053dd4fcbc5SPatrick Kelsey */ 10548241616dSLuigi Rizzo for (; i < lim; i++, clust += p->_objsize) { 1055ccdc3305SLuigi Rizzo p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); 1056ccdc3305SLuigi Rizzo p->lut[i].vaddr = clust; 1057ccdc3305SLuigi Rizzo p->lut[i].paddr = vtophys(clust); 1058ccdc3305SLuigi Rizzo } 1059ccdc3305SLuigi Rizzo } 1060ce3ee1e7SLuigi Rizzo p->objfree = p->objtotal; 1061ce3ee1e7SLuigi Rizzo p->memtotal = p->numclusters * p->_clustsize; 1062ce3ee1e7SLuigi Rizzo if (p->objfree == 0) 1063ce3ee1e7SLuigi Rizzo goto clean; 1064ae10d1afSLuigi Rizzo if (netmap_verbose) 1065ccdc3305SLuigi Rizzo D("Pre-allocated %d clusters (%d/%dKB) for '%s'", 1066ce3ee1e7SLuigi Rizzo p->numclusters, p->_clustsize >> 10, 1067ce3ee1e7SLuigi Rizzo p->memtotal >> 10, p->name); 1068ccdc3305SLuigi Rizzo 10698241616dSLuigi Rizzo return 0; 1070ccdc3305SLuigi Rizzo 1071ccdc3305SLuigi Rizzo clean: 10728241616dSLuigi Rizzo netmap_reset_obj_allocator(p); 10738241616dSLuigi Rizzo return ENOMEM; 10748241616dSLuigi Rizzo } 10758241616dSLuigi Rizzo 10768241616dSLuigi Rizzo /* call with lock held */ 10778241616dSLuigi Rizzo static int 1078ce3ee1e7SLuigi Rizzo netmap_memory_config_changed(struct netmap_mem_d *nmd) 10798241616dSLuigi Rizzo { 10808241616dSLuigi Rizzo int i; 10818241616dSLuigi Rizzo 10828241616dSLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1083ce3ee1e7SLuigi Rizzo if (nmd->pools[i].r_objsize != netmap_params[i].size || 1084ce3ee1e7SLuigi Rizzo nmd->pools[i].r_objtotal != netmap_params[i].num) 10858241616dSLuigi Rizzo return 1; 10868241616dSLuigi Rizzo } 10878241616dSLuigi Rizzo return 0; 10888241616dSLuigi Rizzo } 10898241616dSLuigi Rizzo 1090ce3ee1e7SLuigi Rizzo static void 1091ce3ee1e7SLuigi Rizzo netmap_mem_reset_all(struct netmap_mem_d *nmd) 1092ce3ee1e7SLuigi Rizzo { 1093ce3ee1e7SLuigi Rizzo int i; 1094f0ea3689SLuigi Rizzo 1095f0ea3689SLuigi Rizzo if (netmap_verbose) 1096ce3ee1e7SLuigi Rizzo D("resetting %p", nmd); 1097ce3ee1e7SLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1098ce3ee1e7SLuigi Rizzo netmap_reset_obj_allocator(&nmd->pools[i]); 1099ce3ee1e7SLuigi Rizzo } 1100ce3ee1e7SLuigi Rizzo nmd->flags &= ~NETMAP_MEM_FINALIZED; 1101ce3ee1e7SLuigi Rizzo } 1102ce3ee1e7SLuigi Rizzo 1103ce3ee1e7SLuigi Rizzo static int 11044bf50f18SLuigi Rizzo netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na) 11054bf50f18SLuigi Rizzo { 11064bf50f18SLuigi Rizzo int i, lim = p->_objtotal; 11074bf50f18SLuigi Rizzo 11084bf50f18SLuigi Rizzo if (na->pdev == NULL) 11094bf50f18SLuigi Rizzo return 0; 11104bf50f18SLuigi Rizzo 11114bf50f18SLuigi Rizzo #ifdef __FreeBSD__ 11124bf50f18SLuigi Rizzo (void)i; 11134bf50f18SLuigi Rizzo (void)lim; 11144bf50f18SLuigi Rizzo D("unsupported on FreeBSD"); 11154bf50f18SLuigi Rizzo #else /* linux */ 11164bf50f18SLuigi Rizzo for (i = 2; i < lim; i++) { 11174bf50f18SLuigi Rizzo netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr); 11184bf50f18SLuigi Rizzo } 11194bf50f18SLuigi Rizzo #endif /* linux */ 11204bf50f18SLuigi Rizzo 11214bf50f18SLuigi Rizzo return 0; 11224bf50f18SLuigi Rizzo } 11234bf50f18SLuigi Rizzo 11244bf50f18SLuigi Rizzo static int 11254bf50f18SLuigi Rizzo netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na) 11264bf50f18SLuigi Rizzo { 11274bf50f18SLuigi Rizzo #ifdef __FreeBSD__ 11284bf50f18SLuigi Rizzo D("unsupported on FreeBSD"); 11294bf50f18SLuigi Rizzo #else /* linux */ 11304bf50f18SLuigi Rizzo int i, lim = p->_objtotal; 11314bf50f18SLuigi Rizzo 11324bf50f18SLuigi Rizzo if (na->pdev == NULL) 11334bf50f18SLuigi Rizzo return 0; 11344bf50f18SLuigi Rizzo 11354bf50f18SLuigi Rizzo for (i = 2; i < lim; i++) { 11364bf50f18SLuigi Rizzo netmap_load_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr, 11374bf50f18SLuigi Rizzo p->lut[i].vaddr); 11384bf50f18SLuigi Rizzo } 11394bf50f18SLuigi Rizzo #endif /* linux */ 11404bf50f18SLuigi Rizzo 11414bf50f18SLuigi Rizzo return 0; 11424bf50f18SLuigi Rizzo } 11434bf50f18SLuigi Rizzo 11444bf50f18SLuigi Rizzo static int 1145ce3ee1e7SLuigi Rizzo netmap_mem_finalize_all(struct netmap_mem_d *nmd) 1146ce3ee1e7SLuigi Rizzo { 1147ce3ee1e7SLuigi Rizzo int i; 1148ce3ee1e7SLuigi Rizzo if (nmd->flags & NETMAP_MEM_FINALIZED) 1149ce3ee1e7SLuigi Rizzo return 0; 1150ce3ee1e7SLuigi Rizzo nmd->lasterr = 0; 1151ce3ee1e7SLuigi Rizzo nmd->nm_totalsize = 0; 1152ce3ee1e7SLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1153ce3ee1e7SLuigi Rizzo nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); 1154ce3ee1e7SLuigi Rizzo if (nmd->lasterr) 1155ce3ee1e7SLuigi Rizzo goto error; 1156ce3ee1e7SLuigi Rizzo nmd->nm_totalsize += nmd->pools[i].memtotal; 1157ce3ee1e7SLuigi Rizzo } 1158ce3ee1e7SLuigi Rizzo /* buffers 0 and 1 are reserved */ 1159ce3ee1e7SLuigi Rizzo nmd->pools[NETMAP_BUF_POOL].objfree -= 2; 1160ce3ee1e7SLuigi Rizzo nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3; 1161ce3ee1e7SLuigi Rizzo nmd->flags |= NETMAP_MEM_FINALIZED; 1162ce3ee1e7SLuigi Rizzo 1163f0ea3689SLuigi Rizzo if (netmap_verbose) 1164f0ea3689SLuigi Rizzo D("interfaces %d KB, rings %d KB, buffers %d MB", 1165ce3ee1e7SLuigi Rizzo nmd->pools[NETMAP_IF_POOL].memtotal >> 10, 1166ce3ee1e7SLuigi Rizzo nmd->pools[NETMAP_RING_POOL].memtotal >> 10, 1167ce3ee1e7SLuigi Rizzo nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); 1168ce3ee1e7SLuigi Rizzo 1169f0ea3689SLuigi Rizzo if (netmap_verbose) 1170ce3ee1e7SLuigi Rizzo D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); 1171ce3ee1e7SLuigi Rizzo 1172ce3ee1e7SLuigi Rizzo 1173ce3ee1e7SLuigi Rizzo return 0; 1174ce3ee1e7SLuigi Rizzo error: 1175ce3ee1e7SLuigi Rizzo netmap_mem_reset_all(nmd); 1176ce3ee1e7SLuigi Rizzo return nmd->lasterr; 1177ce3ee1e7SLuigi Rizzo } 1178ce3ee1e7SLuigi Rizzo 1179ce3ee1e7SLuigi Rizzo 1180ce3ee1e7SLuigi Rizzo 1181*847bf383SLuigi Rizzo static void 1182ce3ee1e7SLuigi Rizzo netmap_mem_private_delete(struct netmap_mem_d *nmd) 1183ce3ee1e7SLuigi Rizzo { 1184ce3ee1e7SLuigi Rizzo if (nmd == NULL) 1185ce3ee1e7SLuigi Rizzo return; 1186f0ea3689SLuigi Rizzo if (netmap_verbose) 1187ce3ee1e7SLuigi Rizzo D("deleting %p", nmd); 1188*847bf383SLuigi Rizzo if (nmd->active > 0) 1189*847bf383SLuigi Rizzo D("bug: deleting mem allocator with active=%d!", nmd->active); 1190f0ea3689SLuigi Rizzo nm_mem_release_id(nmd); 1191f0ea3689SLuigi Rizzo if (netmap_verbose) 1192ce3ee1e7SLuigi Rizzo D("done deleting %p", nmd); 1193ce3ee1e7SLuigi Rizzo NMA_LOCK_DESTROY(nmd); 1194ce3ee1e7SLuigi Rizzo free(nmd, M_DEVBUF); 1195ce3ee1e7SLuigi Rizzo } 1196ce3ee1e7SLuigi Rizzo 1197ce3ee1e7SLuigi Rizzo static int 1198ce3ee1e7SLuigi Rizzo netmap_mem_private_config(struct netmap_mem_d *nmd) 1199ce3ee1e7SLuigi Rizzo { 1200ce3ee1e7SLuigi Rizzo /* nothing to do, we are configured on creation 1201ce3ee1e7SLuigi Rizzo * and configuration never changes thereafter 1202ce3ee1e7SLuigi Rizzo */ 1203ce3ee1e7SLuigi Rizzo return 0; 1204ce3ee1e7SLuigi Rizzo } 1205ce3ee1e7SLuigi Rizzo 1206ce3ee1e7SLuigi Rizzo static int 1207ce3ee1e7SLuigi Rizzo netmap_mem_private_finalize(struct netmap_mem_d *nmd) 1208ce3ee1e7SLuigi Rizzo { 1209ce3ee1e7SLuigi Rizzo int err; 1210*847bf383SLuigi Rizzo NMA_LOCK(nmd); 1211*847bf383SLuigi Rizzo nmd->active++; 1212ce3ee1e7SLuigi Rizzo err = netmap_mem_finalize_all(nmd); 1213*847bf383SLuigi Rizzo NMA_UNLOCK(nmd); 1214ce3ee1e7SLuigi Rizzo return err; 1215ce3ee1e7SLuigi Rizzo 1216ce3ee1e7SLuigi Rizzo } 1217ce3ee1e7SLuigi Rizzo 1218f9790aebSLuigi Rizzo static void 1219f9790aebSLuigi Rizzo netmap_mem_private_deref(struct netmap_mem_d *nmd) 1220ce3ee1e7SLuigi Rizzo { 1221*847bf383SLuigi Rizzo NMA_LOCK(nmd); 1222*847bf383SLuigi Rizzo if (--nmd->active <= 0) 1223ce3ee1e7SLuigi Rizzo netmap_mem_reset_all(nmd); 1224*847bf383SLuigi Rizzo NMA_UNLOCK(nmd); 1225ce3ee1e7SLuigi Rizzo } 1226ce3ee1e7SLuigi Rizzo 1227f0ea3689SLuigi Rizzo 1228f0ea3689SLuigi Rizzo /* 1229f0ea3689SLuigi Rizzo * allocator for private memory 1230f0ea3689SLuigi Rizzo */ 1231ce3ee1e7SLuigi Rizzo struct netmap_mem_d * 1232f0ea3689SLuigi Rizzo netmap_mem_private_new(const char *name, u_int txr, u_int txd, 1233f0ea3689SLuigi Rizzo u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr) 1234ce3ee1e7SLuigi Rizzo { 1235ce3ee1e7SLuigi Rizzo struct netmap_mem_d *d = NULL; 1236ce3ee1e7SLuigi Rizzo struct netmap_obj_params p[NETMAP_POOLS_NR]; 1237f0ea3689SLuigi Rizzo int i, err; 1238f0ea3689SLuigi Rizzo u_int v, maxd; 1239ce3ee1e7SLuigi Rizzo 1240ce3ee1e7SLuigi Rizzo d = malloc(sizeof(struct netmap_mem_d), 1241ce3ee1e7SLuigi Rizzo M_DEVBUF, M_NOWAIT | M_ZERO); 1242f0ea3689SLuigi Rizzo if (d == NULL) { 1243f0ea3689SLuigi Rizzo err = ENOMEM; 1244f0ea3689SLuigi Rizzo goto error; 1245f0ea3689SLuigi Rizzo } 1246ce3ee1e7SLuigi Rizzo 1247ce3ee1e7SLuigi Rizzo *d = nm_blueprint; 1248ce3ee1e7SLuigi Rizzo 1249f0ea3689SLuigi Rizzo err = nm_mem_assign_id(d); 1250f0ea3689SLuigi Rizzo if (err) 1251f0ea3689SLuigi Rizzo goto error; 1252f0ea3689SLuigi Rizzo 1253f0ea3689SLuigi Rizzo /* account for the fake host rings */ 1254ce3ee1e7SLuigi Rizzo txr++; 1255ce3ee1e7SLuigi Rizzo rxr++; 1256ce3ee1e7SLuigi Rizzo 1257f0ea3689SLuigi Rizzo /* copy the min values */ 1258f0ea3689SLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1259f0ea3689SLuigi Rizzo p[i] = netmap_min_priv_params[i]; 1260f0ea3689SLuigi Rizzo } 1261f0ea3689SLuigi Rizzo 1262f0ea3689SLuigi Rizzo /* possibly increase them to fit user request */ 1263f0ea3689SLuigi Rizzo v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr); 1264f0ea3689SLuigi Rizzo if (p[NETMAP_IF_POOL].size < v) 1265f0ea3689SLuigi Rizzo p[NETMAP_IF_POOL].size = v; 1266f0ea3689SLuigi Rizzo v = 2 + 4 * npipes; 1267f0ea3689SLuigi Rizzo if (p[NETMAP_IF_POOL].num < v) 1268f0ea3689SLuigi Rizzo p[NETMAP_IF_POOL].num = v; 1269f0ea3689SLuigi Rizzo maxd = (txd > rxd) ? txd : rxd; 1270f0ea3689SLuigi Rizzo v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd; 1271f0ea3689SLuigi Rizzo if (p[NETMAP_RING_POOL].size < v) 1272f0ea3689SLuigi Rizzo p[NETMAP_RING_POOL].size = v; 1273f0ea3689SLuigi Rizzo /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake) 1274f0ea3689SLuigi Rizzo * and two rx rings (again, 1 normal and 1 fake host) 1275f0ea3689SLuigi Rizzo */ 1276f0ea3689SLuigi Rizzo v = txr + rxr + 8 * npipes; 1277f0ea3689SLuigi Rizzo if (p[NETMAP_RING_POOL].num < v) 1278f0ea3689SLuigi Rizzo p[NETMAP_RING_POOL].num = v; 1279f0ea3689SLuigi Rizzo /* for each pipe we only need the buffers for the 4 "real" rings. 1280f0ea3689SLuigi Rizzo * On the other end, the pipe ring dimension may be different from 1281f0ea3689SLuigi Rizzo * the parent port ring dimension. As a compromise, we allocate twice the 1282f0ea3689SLuigi Rizzo * space actually needed if the pipe rings were the same size as the parent rings 1283f0ea3689SLuigi Rizzo */ 1284f0ea3689SLuigi Rizzo v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs; 1285f0ea3689SLuigi Rizzo /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */ 1286f0ea3689SLuigi Rizzo if (p[NETMAP_BUF_POOL].num < v) 1287f0ea3689SLuigi Rizzo p[NETMAP_BUF_POOL].num = v; 1288f0ea3689SLuigi Rizzo 1289f0ea3689SLuigi Rizzo if (netmap_verbose) 1290ce3ee1e7SLuigi Rizzo D("req if %d*%d ring %d*%d buf %d*%d", 1291ce3ee1e7SLuigi Rizzo p[NETMAP_IF_POOL].num, 1292ce3ee1e7SLuigi Rizzo p[NETMAP_IF_POOL].size, 1293ce3ee1e7SLuigi Rizzo p[NETMAP_RING_POOL].num, 1294ce3ee1e7SLuigi Rizzo p[NETMAP_RING_POOL].size, 1295ce3ee1e7SLuigi Rizzo p[NETMAP_BUF_POOL].num, 1296ce3ee1e7SLuigi Rizzo p[NETMAP_BUF_POOL].size); 1297ce3ee1e7SLuigi Rizzo 1298ce3ee1e7SLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1299ce3ee1e7SLuigi Rizzo snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, 1300ce3ee1e7SLuigi Rizzo nm_blueprint.pools[i].name, 1301ce3ee1e7SLuigi Rizzo name); 1302f0ea3689SLuigi Rizzo err = netmap_config_obj_allocator(&d->pools[i], 1303f0ea3689SLuigi Rizzo p[i].num, p[i].size); 1304f0ea3689SLuigi Rizzo if (err) 1305ce3ee1e7SLuigi Rizzo goto error; 1306ce3ee1e7SLuigi Rizzo } 1307ce3ee1e7SLuigi Rizzo 1308ce3ee1e7SLuigi Rizzo d->flags &= ~NETMAP_MEM_FINALIZED; 1309ce3ee1e7SLuigi Rizzo 1310ce3ee1e7SLuigi Rizzo NMA_LOCK_INIT(d); 1311ce3ee1e7SLuigi Rizzo 1312ce3ee1e7SLuigi Rizzo return d; 1313ce3ee1e7SLuigi Rizzo error: 1314ce3ee1e7SLuigi Rizzo netmap_mem_private_delete(d); 1315f0ea3689SLuigi Rizzo if (perr) 1316f0ea3689SLuigi Rizzo *perr = err; 1317ce3ee1e7SLuigi Rizzo return NULL; 1318ce3ee1e7SLuigi Rizzo } 1319ce3ee1e7SLuigi Rizzo 13208241616dSLuigi Rizzo 13218241616dSLuigi Rizzo /* call with lock held */ 13228241616dSLuigi Rizzo static int 1323ce3ee1e7SLuigi Rizzo netmap_mem_global_config(struct netmap_mem_d *nmd) 13248241616dSLuigi Rizzo { 13258241616dSLuigi Rizzo int i; 13268241616dSLuigi Rizzo 1327*847bf383SLuigi Rizzo if (nmd->active) 1328ce3ee1e7SLuigi Rizzo /* already in use, we cannot change the configuration */ 1329ce3ee1e7SLuigi Rizzo goto out; 1330ce3ee1e7SLuigi Rizzo 1331ce3ee1e7SLuigi Rizzo if (!netmap_memory_config_changed(nmd)) 13328241616dSLuigi Rizzo goto out; 13338241616dSLuigi Rizzo 1334*847bf383SLuigi Rizzo ND("reconfiguring"); 13358241616dSLuigi Rizzo 1336ce3ee1e7SLuigi Rizzo if (nmd->flags & NETMAP_MEM_FINALIZED) { 13378241616dSLuigi Rizzo /* reset previous allocation */ 13388241616dSLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1339ce3ee1e7SLuigi Rizzo netmap_reset_obj_allocator(&nmd->pools[i]); 13408241616dSLuigi Rizzo } 1341ce3ee1e7SLuigi Rizzo nmd->flags &= ~NETMAP_MEM_FINALIZED; 13428241616dSLuigi Rizzo } 13438241616dSLuigi Rizzo 13448241616dSLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1345ce3ee1e7SLuigi Rizzo nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], 13468241616dSLuigi Rizzo netmap_params[i].num, netmap_params[i].size); 1347ce3ee1e7SLuigi Rizzo if (nmd->lasterr) 13488241616dSLuigi Rizzo goto out; 13498241616dSLuigi Rizzo } 13508241616dSLuigi Rizzo 13518241616dSLuigi Rizzo out: 13528241616dSLuigi Rizzo 1353ce3ee1e7SLuigi Rizzo return nmd->lasterr; 13548241616dSLuigi Rizzo } 13558241616dSLuigi Rizzo 13568241616dSLuigi Rizzo static int 1357ce3ee1e7SLuigi Rizzo netmap_mem_global_finalize(struct netmap_mem_d *nmd) 13588241616dSLuigi Rizzo { 1359ce3ee1e7SLuigi Rizzo int err; 13608241616dSLuigi Rizzo 13618241616dSLuigi Rizzo /* update configuration if changed */ 1362ce3ee1e7SLuigi Rizzo if (netmap_mem_global_config(nmd)) 13638241616dSLuigi Rizzo goto out; 13648241616dSLuigi Rizzo 1365*847bf383SLuigi Rizzo nmd->active++; 1366ce3ee1e7SLuigi Rizzo 1367ce3ee1e7SLuigi Rizzo if (nmd->flags & NETMAP_MEM_FINALIZED) { 13688241616dSLuigi Rizzo /* may happen if config is not changed */ 13698241616dSLuigi Rizzo ND("nothing to do"); 13708241616dSLuigi Rizzo goto out; 13718241616dSLuigi Rizzo } 13728241616dSLuigi Rizzo 1373ce3ee1e7SLuigi Rizzo if (netmap_mem_finalize_all(nmd)) 1374ce3ee1e7SLuigi Rizzo goto out; 13758241616dSLuigi Rizzo 1376ce3ee1e7SLuigi Rizzo nmd->lasterr = 0; 13778241616dSLuigi Rizzo 13788241616dSLuigi Rizzo out: 1379ce3ee1e7SLuigi Rizzo if (nmd->lasterr) 1380*847bf383SLuigi Rizzo nmd->active--; 1381ce3ee1e7SLuigi Rizzo err = nmd->lasterr; 13828241616dSLuigi Rizzo 1383ce3ee1e7SLuigi Rizzo return err; 13848241616dSLuigi Rizzo 1385ccdc3305SLuigi Rizzo } 1386ccdc3305SLuigi Rizzo 1387*847bf383SLuigi Rizzo static void 1388*847bf383SLuigi Rizzo netmap_mem_global_delete(struct netmap_mem_d *nmd) 1389ccdc3305SLuigi Rizzo { 13908241616dSLuigi Rizzo int i; 13918241616dSLuigi Rizzo 13928241616dSLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 13938241616dSLuigi Rizzo netmap_destroy_obj_allocator(&nm_mem.pools[i]); 13948241616dSLuigi Rizzo } 1395*847bf383SLuigi Rizzo 1396ce3ee1e7SLuigi Rizzo NMA_LOCK_DESTROY(&nm_mem); 13978241616dSLuigi Rizzo } 13988241616dSLuigi Rizzo 1399*847bf383SLuigi Rizzo int 1400*847bf383SLuigi Rizzo netmap_mem_init(void) 1401*847bf383SLuigi Rizzo { 1402*847bf383SLuigi Rizzo NMA_LOCK_INIT(&nm_mem); 1403*847bf383SLuigi Rizzo netmap_mem_get(&nm_mem); 1404*847bf383SLuigi Rizzo return (0); 1405*847bf383SLuigi Rizzo } 1406*847bf383SLuigi Rizzo 1407*847bf383SLuigi Rizzo void 1408*847bf383SLuigi Rizzo netmap_mem_fini(void) 1409*847bf383SLuigi Rizzo { 1410*847bf383SLuigi Rizzo netmap_mem_put(&nm_mem); 1411*847bf383SLuigi Rizzo } 1412*847bf383SLuigi Rizzo 14138241616dSLuigi Rizzo static void 14148241616dSLuigi Rizzo netmap_free_rings(struct netmap_adapter *na) 14158241616dSLuigi Rizzo { 1416*847bf383SLuigi Rizzo enum txrx t; 1417*847bf383SLuigi Rizzo 1418*847bf383SLuigi Rizzo for_rx_tx(t) { 1419*847bf383SLuigi Rizzo u_int i; 1420*847bf383SLuigi Rizzo for (i = 0; i < netmap_real_rings(na, t); i++) { 1421*847bf383SLuigi Rizzo struct netmap_kring *kring = &NMR(na, t)[i]; 1422*847bf383SLuigi Rizzo struct netmap_ring *ring = kring->ring; 1423*847bf383SLuigi Rizzo 1424f0ea3689SLuigi Rizzo if (ring == NULL) 1425f0ea3689SLuigi Rizzo continue; 1426f0ea3689SLuigi Rizzo netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots); 1427f0ea3689SLuigi Rizzo netmap_ring_free(na->nm_mem, ring); 1428f0ea3689SLuigi Rizzo kring->ring = NULL; 14298241616dSLuigi Rizzo } 1430ce3ee1e7SLuigi Rizzo } 1431ccdc3305SLuigi Rizzo } 1432ccdc3305SLuigi Rizzo 1433f9790aebSLuigi Rizzo /* call with NMA_LOCK held * 1434f9790aebSLuigi Rizzo * 1435f9790aebSLuigi Rizzo * Allocate netmap rings and buffers for this card 1436f9790aebSLuigi Rizzo * The rings are contiguous, but have variable size. 1437f0ea3689SLuigi Rizzo * The kring array must follow the layout described 1438f0ea3689SLuigi Rizzo * in netmap_krings_create(). 1439f9790aebSLuigi Rizzo */ 1440*847bf383SLuigi Rizzo static int 1441*847bf383SLuigi Rizzo netmap_mem2_rings_create(struct netmap_adapter *na) 1442f9790aebSLuigi Rizzo { 1443*847bf383SLuigi Rizzo enum txrx t; 1444f9790aebSLuigi Rizzo 1445f9790aebSLuigi Rizzo NMA_LOCK(na->nm_mem); 1446f9790aebSLuigi Rizzo 1447*847bf383SLuigi Rizzo for_rx_tx(t) { 1448*847bf383SLuigi Rizzo u_int i; 1449*847bf383SLuigi Rizzo 1450*847bf383SLuigi Rizzo for (i = 0; i <= nma_get_nrings(na, t); i++) { 1451*847bf383SLuigi Rizzo struct netmap_kring *kring = &NMR(na, t)[i]; 1452*847bf383SLuigi Rizzo struct netmap_ring *ring = kring->ring; 1453*847bf383SLuigi Rizzo u_int len, ndesc; 1454*847bf383SLuigi Rizzo 1455*847bf383SLuigi Rizzo if (ring) { 1456*847bf383SLuigi Rizzo ND("%s already created", kring->name); 1457f0ea3689SLuigi Rizzo continue; /* already created by somebody else */ 1458f0ea3689SLuigi Rizzo } 1459f9790aebSLuigi Rizzo ndesc = kring->nkr_num_slots; 1460f9790aebSLuigi Rizzo len = sizeof(struct netmap_ring) + 1461f9790aebSLuigi Rizzo ndesc * sizeof(struct netmap_slot); 1462f9790aebSLuigi Rizzo ring = netmap_ring_malloc(na->nm_mem, len); 1463f9790aebSLuigi Rizzo if (ring == NULL) { 1464*847bf383SLuigi Rizzo D("Cannot allocate %s_ring", nm_txrx2str(t)); 1465f9790aebSLuigi Rizzo goto cleanup; 1466f9790aebSLuigi Rizzo } 1467f6c2a31fSLuigi Rizzo ND("txring at %p", ring); 1468f9790aebSLuigi Rizzo kring->ring = ring; 1469f9790aebSLuigi Rizzo *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; 147017885a7bSLuigi Rizzo *(int64_t *)(uintptr_t)&ring->buf_ofs = 1471f9790aebSLuigi Rizzo (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + 1472f9790aebSLuigi Rizzo na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - 1473f9790aebSLuigi Rizzo netmap_ring_offset(na->nm_mem, ring); 1474f9790aebSLuigi Rizzo 147517885a7bSLuigi Rizzo /* copy values from kring */ 147617885a7bSLuigi Rizzo ring->head = kring->rhead; 147717885a7bSLuigi Rizzo ring->cur = kring->rcur; 147817885a7bSLuigi Rizzo ring->tail = kring->rtail; 1479f9790aebSLuigi Rizzo *(uint16_t *)(uintptr_t)&ring->nr_buf_size = 14804bf50f18SLuigi Rizzo netmap_mem_bufsize(na->nm_mem); 1481f0ea3689SLuigi Rizzo ND("%s h %d c %d t %d", kring->name, 1482f0ea3689SLuigi Rizzo ring->head, ring->cur, ring->tail); 1483*847bf383SLuigi Rizzo ND("initializing slots for %s_ring", nm_txrx2str(txrx)); 1484*847bf383SLuigi Rizzo if (i != nma_get_nrings(na, t) || (na->na_flags & NAF_HOST_RINGS)) { 1485f0ea3689SLuigi Rizzo /* this is a real ring */ 1486f9790aebSLuigi Rizzo if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { 1487*847bf383SLuigi Rizzo D("Cannot allocate buffers for %s_ring", nm_txrx2str(t)); 1488f9790aebSLuigi Rizzo goto cleanup; 1489f9790aebSLuigi Rizzo } 1490f0ea3689SLuigi Rizzo } else { 1491*847bf383SLuigi Rizzo /* this is a fake ring, set all indices to 0 */ 1492f0ea3689SLuigi Rizzo netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0); 1493f0ea3689SLuigi Rizzo } 1494*847bf383SLuigi Rizzo /* ring info */ 1495*847bf383SLuigi Rizzo *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id; 1496*847bf383SLuigi Rizzo *(uint16_t *)(uintptr_t)&ring->dir = kring->tx; 1497f0ea3689SLuigi Rizzo } 1498f9790aebSLuigi Rizzo } 1499f9790aebSLuigi Rizzo 1500f9790aebSLuigi Rizzo NMA_UNLOCK(na->nm_mem); 1501f9790aebSLuigi Rizzo 1502f9790aebSLuigi Rizzo return 0; 1503f9790aebSLuigi Rizzo 1504f9790aebSLuigi Rizzo cleanup: 1505f9790aebSLuigi Rizzo netmap_free_rings(na); 1506f9790aebSLuigi Rizzo 1507f9790aebSLuigi Rizzo NMA_UNLOCK(na->nm_mem); 1508f9790aebSLuigi Rizzo 1509f9790aebSLuigi Rizzo return ENOMEM; 1510f9790aebSLuigi Rizzo } 1511f9790aebSLuigi Rizzo 1512*847bf383SLuigi Rizzo static void 1513*847bf383SLuigi Rizzo netmap_mem2_rings_delete(struct netmap_adapter *na) 1514f9790aebSLuigi Rizzo { 1515f9790aebSLuigi Rizzo /* last instance, release bufs and rings */ 1516f9790aebSLuigi Rizzo NMA_LOCK(na->nm_mem); 1517f9790aebSLuigi Rizzo 1518f9790aebSLuigi Rizzo netmap_free_rings(na); 1519f9790aebSLuigi Rizzo 1520f9790aebSLuigi Rizzo NMA_UNLOCK(na->nm_mem); 1521f9790aebSLuigi Rizzo } 1522ccdc3305SLuigi Rizzo 1523ccdc3305SLuigi Rizzo 15248241616dSLuigi Rizzo /* call with NMA_LOCK held */ 1525ae10d1afSLuigi Rizzo /* 1526ae10d1afSLuigi Rizzo * Allocate the per-fd structure netmap_if. 1527ce3ee1e7SLuigi Rizzo * 1528ce3ee1e7SLuigi Rizzo * We assume that the configuration stored in na 1529ce3ee1e7SLuigi Rizzo * (number of tx/rx rings and descs) does not change while 1530ce3ee1e7SLuigi Rizzo * the interface is in netmap mode. 1531ae10d1afSLuigi Rizzo */ 1532*847bf383SLuigi Rizzo static struct netmap_if * 1533*847bf383SLuigi Rizzo netmap_mem2_if_new(struct netmap_adapter *na) 1534ccdc3305SLuigi Rizzo { 1535ccdc3305SLuigi Rizzo struct netmap_if *nifp; 1536ccdc3305SLuigi Rizzo ssize_t base; /* handy for relative offsets between rings and nifp */ 1537*847bf383SLuigi Rizzo u_int i, len, n[NR_TXRX], ntot; 1538*847bf383SLuigi Rizzo enum txrx t; 1539ccdc3305SLuigi Rizzo 1540*847bf383SLuigi Rizzo ntot = 0; 1541*847bf383SLuigi Rizzo for_rx_tx(t) { 1542f0ea3689SLuigi Rizzo /* account for the (eventually fake) host rings */ 1543*847bf383SLuigi Rizzo n[t] = nma_get_nrings(na, t) + 1; 1544*847bf383SLuigi Rizzo ntot += n[t]; 1545*847bf383SLuigi Rizzo } 1546ccdc3305SLuigi Rizzo /* 1547ccdc3305SLuigi Rizzo * the descriptor is followed inline by an array of offsets 1548ccdc3305SLuigi Rizzo * to the tx and rx rings in the shared memory region. 1549ccdc3305SLuigi Rizzo */ 1550ce3ee1e7SLuigi Rizzo 1551ce3ee1e7SLuigi Rizzo NMA_LOCK(na->nm_mem); 1552ce3ee1e7SLuigi Rizzo 1553*847bf383SLuigi Rizzo len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t)); 1554ce3ee1e7SLuigi Rizzo nifp = netmap_if_malloc(na->nm_mem, len); 1555ccdc3305SLuigi Rizzo if (nifp == NULL) { 1556ce3ee1e7SLuigi Rizzo NMA_UNLOCK(na->nm_mem); 1557ccdc3305SLuigi Rizzo return NULL; 1558ccdc3305SLuigi Rizzo } 1559ccdc3305SLuigi Rizzo 1560ccdc3305SLuigi Rizzo /* initialize base fields -- override const */ 1561ce3ee1e7SLuigi Rizzo *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 1562ce3ee1e7SLuigi Rizzo *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 15634bf50f18SLuigi Rizzo strncpy(nifp->ni_name, na->name, (size_t)IFNAMSIZ); 1564ccdc3305SLuigi Rizzo 1565ccdc3305SLuigi Rizzo /* 1566ccdc3305SLuigi Rizzo * fill the slots for the rx and tx rings. They contain the offset 1567ccdc3305SLuigi Rizzo * between the ring and nifp, so the information is usable in 1568ccdc3305SLuigi Rizzo * userspace to reach the ring from the nifp. 1569ccdc3305SLuigi Rizzo */ 1570ce3ee1e7SLuigi Rizzo base = netmap_if_offset(na->nm_mem, nifp); 1571*847bf383SLuigi Rizzo for (i = 0; i < n[NR_TX]; i++) { 1572ccdc3305SLuigi Rizzo *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 1573ce3ee1e7SLuigi Rizzo netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base; 1574ccdc3305SLuigi Rizzo } 1575*847bf383SLuigi Rizzo for (i = 0; i < n[NR_RX]; i++) { 1576*847bf383SLuigi Rizzo *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = 1577ce3ee1e7SLuigi Rizzo netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base; 1578ccdc3305SLuigi Rizzo } 1579ce3ee1e7SLuigi Rizzo 1580ce3ee1e7SLuigi Rizzo NMA_UNLOCK(na->nm_mem); 1581ce3ee1e7SLuigi Rizzo 1582ccdc3305SLuigi Rizzo return (nifp); 1583ccdc3305SLuigi Rizzo } 1584ccdc3305SLuigi Rizzo 1585*847bf383SLuigi Rizzo static void 1586*847bf383SLuigi Rizzo netmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) 1587ccdc3305SLuigi Rizzo { 1588ce3ee1e7SLuigi Rizzo if (nifp == NULL) 1589ce3ee1e7SLuigi Rizzo /* nothing to do */ 1590ce3ee1e7SLuigi Rizzo return; 1591ce3ee1e7SLuigi Rizzo NMA_LOCK(na->nm_mem); 1592f0ea3689SLuigi Rizzo if (nifp->ni_bufs_head) 1593f0ea3689SLuigi Rizzo netmap_extra_free(na, nifp->ni_bufs_head); 1594ce3ee1e7SLuigi Rizzo netmap_if_free(na->nm_mem, nifp); 1595ce3ee1e7SLuigi Rizzo 1596ce3ee1e7SLuigi Rizzo NMA_UNLOCK(na->nm_mem); 1597ce3ee1e7SLuigi Rizzo } 1598ce3ee1e7SLuigi Rizzo 1599ce3ee1e7SLuigi Rizzo static void 1600ce3ee1e7SLuigi Rizzo netmap_mem_global_deref(struct netmap_mem_d *nmd) 1601ce3ee1e7SLuigi Rizzo { 1602ce3ee1e7SLuigi Rizzo 1603*847bf383SLuigi Rizzo nmd->active--; 1604*847bf383SLuigi Rizzo if (!nmd->active) 16054bf50f18SLuigi Rizzo nmd->nm_grp = -1; 1606ae10d1afSLuigi Rizzo if (netmap_verbose) 1607*847bf383SLuigi Rizzo D("active = %d", nmd->active); 1608ce3ee1e7SLuigi Rizzo 1609ce3ee1e7SLuigi Rizzo } 1610ce3ee1e7SLuigi Rizzo 1611*847bf383SLuigi Rizzo struct netmap_mem_ops netmap_mem_global_ops = { 1612*847bf383SLuigi Rizzo .nmd_get_lut = netmap_mem2_get_lut, 1613*847bf383SLuigi Rizzo .nmd_get_info = netmap_mem2_get_info, 1614*847bf383SLuigi Rizzo .nmd_ofstophys = netmap_mem2_ofstophys, 1615*847bf383SLuigi Rizzo .nmd_config = netmap_mem_global_config, 1616*847bf383SLuigi Rizzo .nmd_finalize = netmap_mem_global_finalize, 1617*847bf383SLuigi Rizzo .nmd_deref = netmap_mem_global_deref, 1618*847bf383SLuigi Rizzo .nmd_delete = netmap_mem_global_delete, 1619*847bf383SLuigi Rizzo .nmd_if_offset = netmap_mem2_if_offset, 1620*847bf383SLuigi Rizzo .nmd_if_new = netmap_mem2_if_new, 1621*847bf383SLuigi Rizzo .nmd_if_delete = netmap_mem2_if_delete, 1622*847bf383SLuigi Rizzo .nmd_rings_create = netmap_mem2_rings_create, 1623*847bf383SLuigi Rizzo .nmd_rings_delete = netmap_mem2_rings_delete 1624*847bf383SLuigi Rizzo }; 1625*847bf383SLuigi Rizzo struct netmap_mem_ops netmap_mem_private_ops = { 1626*847bf383SLuigi Rizzo .nmd_get_lut = netmap_mem2_get_lut, 1627*847bf383SLuigi Rizzo .nmd_get_info = netmap_mem2_get_info, 1628*847bf383SLuigi Rizzo .nmd_ofstophys = netmap_mem2_ofstophys, 1629*847bf383SLuigi Rizzo .nmd_config = netmap_mem_private_config, 1630*847bf383SLuigi Rizzo .nmd_finalize = netmap_mem_private_finalize, 1631*847bf383SLuigi Rizzo .nmd_deref = netmap_mem_private_deref, 1632*847bf383SLuigi Rizzo .nmd_if_offset = netmap_mem2_if_offset, 1633*847bf383SLuigi Rizzo .nmd_delete = netmap_mem_private_delete, 1634*847bf383SLuigi Rizzo .nmd_if_new = netmap_mem2_if_new, 1635*847bf383SLuigi Rizzo .nmd_if_delete = netmap_mem2_if_delete, 1636*847bf383SLuigi Rizzo .nmd_rings_create = netmap_mem2_rings_create, 1637*847bf383SLuigi Rizzo .nmd_rings_delete = netmap_mem2_rings_delete 1638*847bf383SLuigi Rizzo }; 1639