1718cf2ccSPedro F. Giffuni /*- 2718cf2ccSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3718cf2ccSPedro F. Giffuni * 437e3a6d3SLuigi Rizzo * Copyright (C) 2012-2014 Matteo Landi 537e3a6d3SLuigi Rizzo * Copyright (C) 2012-2016 Luigi Rizzo 637e3a6d3SLuigi Rizzo * Copyright (C) 2012-2016 Giuseppe Lettieri 737e3a6d3SLuigi Rizzo * All rights reserved. 8ccdc3305SLuigi Rizzo * 9ccdc3305SLuigi Rizzo * Redistribution and use in source and binary forms, with or without 10ccdc3305SLuigi Rizzo * modification, are permitted provided that the following conditions 11ccdc3305SLuigi Rizzo * are met: 12ccdc3305SLuigi Rizzo * 1. Redistributions of source code must retain the above copyright 13ccdc3305SLuigi Rizzo * notice, this list of conditions and the following disclaimer. 14ccdc3305SLuigi Rizzo * 2. Redistributions in binary form must reproduce the above copyright 15ccdc3305SLuigi Rizzo * notice, this list of conditions and the following disclaimer in the 16ccdc3305SLuigi Rizzo * documentation and/or other materials provided with the distribution. 17ccdc3305SLuigi Rizzo * 18ccdc3305SLuigi Rizzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19ccdc3305SLuigi Rizzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20ccdc3305SLuigi Rizzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21ccdc3305SLuigi Rizzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22ccdc3305SLuigi Rizzo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23ccdc3305SLuigi Rizzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24ccdc3305SLuigi Rizzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25ccdc3305SLuigi Rizzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26ccdc3305SLuigi Rizzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27ccdc3305SLuigi Rizzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28ccdc3305SLuigi Rizzo * SUCH DAMAGE. 29ccdc3305SLuigi Rizzo */ 30ccdc3305SLuigi Rizzo 318241616dSLuigi Rizzo #ifdef linux 32ce3ee1e7SLuigi Rizzo #include "bsd_glue.h" 338241616dSLuigi Rizzo #endif /* linux */ 348241616dSLuigi Rizzo 35ce3ee1e7SLuigi Rizzo #ifdef __APPLE__ 36ce3ee1e7SLuigi Rizzo #include "osx_glue.h" 37ce3ee1e7SLuigi Rizzo #endif /* __APPLE__ */ 388241616dSLuigi Rizzo 39ce3ee1e7SLuigi Rizzo #ifdef __FreeBSD__ 40ce3ee1e7SLuigi Rizzo #include <sys/cdefs.h> /* prerequisite */ 41ce3ee1e7SLuigi Rizzo __FBSDID("$FreeBSD$"); 428241616dSLuigi Rizzo 43ce3ee1e7SLuigi Rizzo #include <sys/types.h> 44ce3ee1e7SLuigi Rizzo #include <sys/malloc.h> 4537e3a6d3SLuigi Rizzo #include <sys/kernel.h> /* MALLOC_DEFINE */ 46ce3ee1e7SLuigi Rizzo #include <sys/proc.h> 47ce3ee1e7SLuigi Rizzo #include <vm/vm.h> /* vtophys */ 48ce3ee1e7SLuigi Rizzo #include <vm/pmap.h> /* vtophys */ 49ce3ee1e7SLuigi Rizzo #include <sys/socket.h> /* sockaddrs */ 50ce3ee1e7SLuigi Rizzo #include <sys/selinfo.h> 51ce3ee1e7SLuigi Rizzo #include <sys/sysctl.h> 52ce3ee1e7SLuigi Rizzo #include <net/if.h> 53ce3ee1e7SLuigi Rizzo #include <net/if_var.h> 54ce3ee1e7SLuigi Rizzo #include <net/vnet.h> 55ce3ee1e7SLuigi Rizzo #include <machine/bus.h> /* bus_dmamap_* */ 56ce3ee1e7SLuigi Rizzo 5737e3a6d3SLuigi Rizzo /* M_NETMAP only used in here */ 5837e3a6d3SLuigi Rizzo MALLOC_DECLARE(M_NETMAP); 5937e3a6d3SLuigi Rizzo MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); 6037e3a6d3SLuigi Rizzo 61ce3ee1e7SLuigi Rizzo #endif /* __FreeBSD__ */ 62ce3ee1e7SLuigi Rizzo 6337e3a6d3SLuigi Rizzo #ifdef _WIN32 6437e3a6d3SLuigi Rizzo #include <win_glue.h> 6537e3a6d3SLuigi Rizzo #endif 6637e3a6d3SLuigi Rizzo 67ce3ee1e7SLuigi Rizzo #include <net/netmap.h> 68ce3ee1e7SLuigi Rizzo #include <dev/netmap/netmap_kern.h> 6937e3a6d3SLuigi Rizzo #include <net/netmap_virt.h> 70ce3ee1e7SLuigi Rizzo #include "netmap_mem2.h" 71ce3ee1e7SLuigi Rizzo 7237e3a6d3SLuigi Rizzo #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY 7337e3a6d3SLuigi Rizzo #define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */ 7437e3a6d3SLuigi Rizzo #else 754bf50f18SLuigi Rizzo #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ 7637e3a6d3SLuigi Rizzo #endif 774bf50f18SLuigi Rizzo 784bf50f18SLuigi Rizzo #define NETMAP_POOL_MAX_NAMSZ 32 794bf50f18SLuigi Rizzo 804bf50f18SLuigi Rizzo 814bf50f18SLuigi Rizzo enum { 824bf50f18SLuigi Rizzo NETMAP_IF_POOL = 0, 834bf50f18SLuigi Rizzo NETMAP_RING_POOL, 844bf50f18SLuigi Rizzo NETMAP_BUF_POOL, 854bf50f18SLuigi Rizzo NETMAP_POOLS_NR 864bf50f18SLuigi Rizzo }; 874bf50f18SLuigi Rizzo 884bf50f18SLuigi Rizzo 894bf50f18SLuigi Rizzo struct netmap_obj_params { 904bf50f18SLuigi Rizzo u_int size; 914bf50f18SLuigi Rizzo u_int num; 92c3e9b4dbSLuiz Otavio O Souza 93c3e9b4dbSLuiz Otavio O Souza u_int last_size; 94c3e9b4dbSLuiz Otavio O Souza u_int last_num; 954bf50f18SLuigi Rizzo }; 96847bf383SLuigi Rizzo 974bf50f18SLuigi Rizzo struct netmap_obj_pool { 984bf50f18SLuigi Rizzo char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ 994bf50f18SLuigi Rizzo 1004bf50f18SLuigi Rizzo /* ---------------------------------------------------*/ 1014bf50f18SLuigi Rizzo /* these are only meaningful if the pool is finalized */ 1024bf50f18SLuigi Rizzo /* (see 'finalized' field in netmap_mem_d) */ 1034bf50f18SLuigi Rizzo u_int objtotal; /* actual total number of objects. */ 1044bf50f18SLuigi Rizzo u_int memtotal; /* actual total memory space */ 1054bf50f18SLuigi Rizzo u_int numclusters; /* actual number of clusters */ 1064bf50f18SLuigi Rizzo 1074bf50f18SLuigi Rizzo u_int objfree; /* number of free objects. */ 1084bf50f18SLuigi Rizzo 1094bf50f18SLuigi Rizzo struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 1104bf50f18SLuigi Rizzo uint32_t *bitmap; /* one bit per buffer, 1 means free */ 111*4f80b14cSVincenzo Maffione uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */ 1124bf50f18SLuigi Rizzo uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ 1134bf50f18SLuigi Rizzo /* ---------------------------------------------------*/ 1144bf50f18SLuigi Rizzo 1154bf50f18SLuigi Rizzo /* limits */ 1164bf50f18SLuigi Rizzo u_int objminsize; /* minimum object size */ 1174bf50f18SLuigi Rizzo u_int objmaxsize; /* maximum object size */ 1184bf50f18SLuigi Rizzo u_int nummin; /* minimum number of objects */ 1194bf50f18SLuigi Rizzo u_int nummax; /* maximum number of objects */ 1204bf50f18SLuigi Rizzo 1214bf50f18SLuigi Rizzo /* these are changed only by config */ 1224bf50f18SLuigi Rizzo u_int _objtotal; /* total number of objects */ 1234bf50f18SLuigi Rizzo u_int _objsize; /* object size */ 1244bf50f18SLuigi Rizzo u_int _clustsize; /* cluster size */ 1254bf50f18SLuigi Rizzo u_int _clustentries; /* objects per cluster */ 1264bf50f18SLuigi Rizzo u_int _numclusters; /* number of clusters */ 1274bf50f18SLuigi Rizzo 1284bf50f18SLuigi Rizzo /* requested values */ 1294bf50f18SLuigi Rizzo u_int r_objtotal; 1304bf50f18SLuigi Rizzo u_int r_objsize; 1314bf50f18SLuigi Rizzo }; 1324bf50f18SLuigi Rizzo 133847bf383SLuigi Rizzo #define NMA_LOCK_T NM_MTX_T 1344bf50f18SLuigi Rizzo 135847bf383SLuigi Rizzo 136847bf383SLuigi Rizzo struct netmap_mem_ops { 13737e3a6d3SLuigi Rizzo int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*); 138*4f80b14cSVincenzo Maffione int (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size, 139847bf383SLuigi Rizzo u_int *memflags, uint16_t *id); 140847bf383SLuigi Rizzo 141847bf383SLuigi Rizzo vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t); 142847bf383SLuigi Rizzo int (*nmd_config)(struct netmap_mem_d *); 143847bf383SLuigi Rizzo int (*nmd_finalize)(struct netmap_mem_d *); 144847bf383SLuigi Rizzo void (*nmd_deref)(struct netmap_mem_d *); 145847bf383SLuigi Rizzo ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr); 146847bf383SLuigi Rizzo void (*nmd_delete)(struct netmap_mem_d *); 147847bf383SLuigi Rizzo 148c3e9b4dbSLuiz Otavio O Souza struct netmap_if * (*nmd_if_new)(struct netmap_adapter *, 149c3e9b4dbSLuiz Otavio O Souza struct netmap_priv_d *); 150847bf383SLuigi Rizzo void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *); 151847bf383SLuigi Rizzo int (*nmd_rings_create)(struct netmap_adapter *); 152847bf383SLuigi Rizzo void (*nmd_rings_delete)(struct netmap_adapter *); 153847bf383SLuigi Rizzo }; 1544bf50f18SLuigi Rizzo 1554bf50f18SLuigi Rizzo struct netmap_mem_d { 1564bf50f18SLuigi Rizzo NMA_LOCK_T nm_mtx; /* protect the allocator */ 1574bf50f18SLuigi Rizzo u_int nm_totalsize; /* shorthand */ 1584bf50f18SLuigi Rizzo 1594bf50f18SLuigi Rizzo u_int flags; 1604bf50f18SLuigi Rizzo #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */ 161c3e9b4dbSLuiz Otavio O Souza #define NETMAP_MEM_HIDDEN 0x8 /* beeing prepared */ 1624bf50f18SLuigi Rizzo int lasterr; /* last error for curr config */ 163847bf383SLuigi Rizzo int active; /* active users */ 164847bf383SLuigi Rizzo int refcount; 1654bf50f18SLuigi Rizzo /* the three allocators */ 1664bf50f18SLuigi Rizzo struct netmap_obj_pool pools[NETMAP_POOLS_NR]; 1674bf50f18SLuigi Rizzo 1684bf50f18SLuigi Rizzo nm_memid_t nm_id; /* allocator identifier */ 1694bf50f18SLuigi Rizzo int nm_grp; /* iommu groupd id */ 1704bf50f18SLuigi Rizzo 1714bf50f18SLuigi Rizzo /* list of all existing allocators, sorted by nm_id */ 1724bf50f18SLuigi Rizzo struct netmap_mem_d *prev, *next; 173847bf383SLuigi Rizzo 174847bf383SLuigi Rizzo struct netmap_mem_ops *ops; 175c3e9b4dbSLuiz Otavio O Souza 176c3e9b4dbSLuiz Otavio O Souza struct netmap_obj_params params[NETMAP_POOLS_NR]; 177c3e9b4dbSLuiz Otavio O Souza 178c3e9b4dbSLuiz Otavio O Souza #define NM_MEM_NAMESZ 16 179c3e9b4dbSLuiz Otavio O Souza char name[NM_MEM_NAMESZ]; 1804bf50f18SLuigi Rizzo }; 1814bf50f18SLuigi Rizzo 18237e3a6d3SLuigi Rizzo /* 18337e3a6d3SLuigi Rizzo * XXX need to fix the case of t0 == void 18437e3a6d3SLuigi Rizzo */ 185847bf383SLuigi Rizzo #define NMD_DEFCB(t0, name) \ 186847bf383SLuigi Rizzo t0 \ 187847bf383SLuigi Rizzo netmap_mem_##name(struct netmap_mem_d *nmd) \ 188847bf383SLuigi Rizzo { \ 189847bf383SLuigi Rizzo return nmd->ops->nmd_##name(nmd); \ 190847bf383SLuigi Rizzo } 191847bf383SLuigi Rizzo 192847bf383SLuigi Rizzo #define NMD_DEFCB1(t0, name, t1) \ 193847bf383SLuigi Rizzo t0 \ 194847bf383SLuigi Rizzo netmap_mem_##name(struct netmap_mem_d *nmd, t1 a1) \ 195847bf383SLuigi Rizzo { \ 196847bf383SLuigi Rizzo return nmd->ops->nmd_##name(nmd, a1); \ 197847bf383SLuigi Rizzo } 198847bf383SLuigi Rizzo 199847bf383SLuigi Rizzo #define NMD_DEFCB3(t0, name, t1, t2, t3) \ 200847bf383SLuigi Rizzo t0 \ 201847bf383SLuigi Rizzo netmap_mem_##name(struct netmap_mem_d *nmd, t1 a1, t2 a2, t3 a3) \ 202847bf383SLuigi Rizzo { \ 203847bf383SLuigi Rizzo return nmd->ops->nmd_##name(nmd, a1, a2, a3); \ 204847bf383SLuigi Rizzo } 205847bf383SLuigi Rizzo 206847bf383SLuigi Rizzo #define NMD_DEFNACB(t0, name) \ 207847bf383SLuigi Rizzo t0 \ 208847bf383SLuigi Rizzo netmap_mem_##name(struct netmap_adapter *na) \ 209847bf383SLuigi Rizzo { \ 210847bf383SLuigi Rizzo return na->nm_mem->ops->nmd_##name(na); \ 211847bf383SLuigi Rizzo } 212847bf383SLuigi Rizzo 213847bf383SLuigi Rizzo #define NMD_DEFNACB1(t0, name, t1) \ 214847bf383SLuigi Rizzo t0 \ 215847bf383SLuigi Rizzo netmap_mem_##name(struct netmap_adapter *na, t1 a1) \ 216847bf383SLuigi Rizzo { \ 217847bf383SLuigi Rizzo return na->nm_mem->ops->nmd_##name(na, a1); \ 218847bf383SLuigi Rizzo } 219847bf383SLuigi Rizzo 22037e3a6d3SLuigi Rizzo NMD_DEFCB1(int, get_lut, struct netmap_lut *); 221*4f80b14cSVincenzo Maffione NMD_DEFCB3(int, get_info, uint64_t *, u_int *, uint16_t *); 222847bf383SLuigi Rizzo NMD_DEFCB1(vm_paddr_t, ofstophys, vm_ooffset_t); 223847bf383SLuigi Rizzo static int netmap_mem_config(struct netmap_mem_d *); 224847bf383SLuigi Rizzo NMD_DEFCB(int, config); 225847bf383SLuigi Rizzo NMD_DEFCB1(ssize_t, if_offset, const void *); 226847bf383SLuigi Rizzo NMD_DEFCB(void, delete); 227847bf383SLuigi Rizzo 228c3e9b4dbSLuiz Otavio O Souza NMD_DEFNACB1(struct netmap_if *, if_new, struct netmap_priv_d *); 229847bf383SLuigi Rizzo NMD_DEFNACB1(void, if_delete, struct netmap_if *); 230847bf383SLuigi Rizzo NMD_DEFNACB(int, rings_create); 231847bf383SLuigi Rizzo NMD_DEFNACB(void, rings_delete); 232847bf383SLuigi Rizzo 233847bf383SLuigi Rizzo static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *); 234847bf383SLuigi Rizzo static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *); 23537e3a6d3SLuigi Rizzo static int nm_mem_assign_group(struct netmap_mem_d *, struct device *); 236c3e9b4dbSLuiz Otavio O Souza static void nm_mem_release_id(struct netmap_mem_d *); 237c3e9b4dbSLuiz Otavio O Souza 238c3e9b4dbSLuiz Otavio O Souza nm_memid_t 239c3e9b4dbSLuiz Otavio O Souza netmap_mem_get_id(struct netmap_mem_d *nmd) 240c3e9b4dbSLuiz Otavio O Souza { 241c3e9b4dbSLuiz Otavio O Souza return nmd->nm_id; 242c3e9b4dbSLuiz Otavio O Souza } 243847bf383SLuigi Rizzo 244847bf383SLuigi Rizzo #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx) 245847bf383SLuigi Rizzo #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx) 246847bf383SLuigi Rizzo #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx) 247*4f80b14cSVincenzo Maffione #define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx) 248847bf383SLuigi Rizzo #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx) 249847bf383SLuigi Rizzo 250847bf383SLuigi Rizzo #ifdef NM_DEBUG_MEM_PUTGET 251847bf383SLuigi Rizzo #define NM_DBG_REFC(nmd, func, line) \ 252c3e9b4dbSLuiz Otavio O Souza nm_prinf("%s:%d mem[%d] -> %d\n", func, line, (nmd)->nm_id, (nmd)->refcount); 253847bf383SLuigi Rizzo #else 254847bf383SLuigi Rizzo #define NM_DBG_REFC(nmd, func, line) 255847bf383SLuigi Rizzo #endif 256847bf383SLuigi Rizzo 257c3e9b4dbSLuiz Otavio O Souza /* circular list of all existing allocators */ 258c3e9b4dbSLuiz Otavio O Souza static struct netmap_mem_d *netmap_last_mem_d = &nm_mem; 259c3e9b4dbSLuiz Otavio O Souza NM_MTX_T nm_mem_list_lock; 260c3e9b4dbSLuiz Otavio O Souza 261c3e9b4dbSLuiz Otavio O Souza struct netmap_mem_d * 262c3e9b4dbSLuiz Otavio O Souza __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line) 263847bf383SLuigi Rizzo { 264c3e9b4dbSLuiz Otavio O Souza NM_MTX_LOCK(nm_mem_list_lock); 265847bf383SLuigi Rizzo nmd->refcount++; 266847bf383SLuigi Rizzo NM_DBG_REFC(nmd, func, line); 267c3e9b4dbSLuiz Otavio O Souza NM_MTX_UNLOCK(nm_mem_list_lock); 268c3e9b4dbSLuiz Otavio O Souza return nmd; 269847bf383SLuigi Rizzo } 270847bf383SLuigi Rizzo 271c3e9b4dbSLuiz Otavio O Souza void 272c3e9b4dbSLuiz Otavio O Souza __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line) 273847bf383SLuigi Rizzo { 274847bf383SLuigi Rizzo int last; 275c3e9b4dbSLuiz Otavio O Souza NM_MTX_LOCK(nm_mem_list_lock); 276847bf383SLuigi Rizzo last = (--nmd->refcount == 0); 277c3e9b4dbSLuiz Otavio O Souza if (last) 278c3e9b4dbSLuiz Otavio O Souza nm_mem_release_id(nmd); 279847bf383SLuigi Rizzo NM_DBG_REFC(nmd, func, line); 280c3e9b4dbSLuiz Otavio O Souza NM_MTX_UNLOCK(nm_mem_list_lock); 281847bf383SLuigi Rizzo if (last) 282847bf383SLuigi Rizzo netmap_mem_delete(nmd); 283847bf383SLuigi Rizzo } 284847bf383SLuigi Rizzo 285847bf383SLuigi Rizzo int 286847bf383SLuigi Rizzo netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) 287847bf383SLuigi Rizzo { 288847bf383SLuigi Rizzo if (nm_mem_assign_group(nmd, na->pdev) < 0) { 289847bf383SLuigi Rizzo return ENOMEM; 290847bf383SLuigi Rizzo } else { 29137e3a6d3SLuigi Rizzo NMA_LOCK(nmd); 29237e3a6d3SLuigi Rizzo nmd->lasterr = nmd->ops->nmd_finalize(nmd); 29337e3a6d3SLuigi Rizzo NMA_UNLOCK(nmd); 294847bf383SLuigi Rizzo } 295847bf383SLuigi Rizzo 296*4f80b14cSVincenzo Maffione if (!nmd->lasterr && na->pdev) { 297*4f80b14cSVincenzo Maffione nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na); 298*4f80b14cSVincenzo Maffione if (nmd->lasterr) { 299*4f80b14cSVincenzo Maffione netmap_mem_deref(nmd, na); 300*4f80b14cSVincenzo Maffione } 301*4f80b14cSVincenzo Maffione } 302847bf383SLuigi Rizzo 303847bf383SLuigi Rizzo return nmd->lasterr; 304847bf383SLuigi Rizzo } 305847bf383SLuigi Rizzo 306*4f80b14cSVincenzo Maffione static int 307*4f80b14cSVincenzo Maffione nm_isset(uint32_t *bitmap, u_int i) 308847bf383SLuigi Rizzo { 309*4f80b14cSVincenzo Maffione return bitmap[ (i>>5) ] & ( 1U << (i & 31U) ); 310*4f80b14cSVincenzo Maffione } 31137e3a6d3SLuigi Rizzo 31237e3a6d3SLuigi Rizzo 313*4f80b14cSVincenzo Maffione static int 314*4f80b14cSVincenzo Maffione netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p) 315*4f80b14cSVincenzo Maffione { 316*4f80b14cSVincenzo Maffione u_int n, j; 31737e3a6d3SLuigi Rizzo 318*4f80b14cSVincenzo Maffione if (p->bitmap == NULL) { 319*4f80b14cSVincenzo Maffione /* Allocate the bitmap */ 320*4f80b14cSVincenzo Maffione n = (p->objtotal + 31) / 32; 321*4f80b14cSVincenzo Maffione p->bitmap = nm_os_malloc(sizeof(uint32_t) * n); 322*4f80b14cSVincenzo Maffione if (p->bitmap == NULL) { 323*4f80b14cSVincenzo Maffione D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, 324*4f80b14cSVincenzo Maffione p->name); 325*4f80b14cSVincenzo Maffione return ENOMEM; 326*4f80b14cSVincenzo Maffione } 327*4f80b14cSVincenzo Maffione p->bitmap_slots = n; 328*4f80b14cSVincenzo Maffione } else { 329*4f80b14cSVincenzo Maffione memset(p->bitmap, 0, p->bitmap_slots); 330*4f80b14cSVincenzo Maffione } 331*4f80b14cSVincenzo Maffione 332*4f80b14cSVincenzo Maffione p->objfree = 0; 33337e3a6d3SLuigi Rizzo /* 33437e3a6d3SLuigi Rizzo * Set all the bits in the bitmap that have 33537e3a6d3SLuigi Rizzo * corresponding buffers to 1 to indicate they are 33637e3a6d3SLuigi Rizzo * free. 33737e3a6d3SLuigi Rizzo */ 33837e3a6d3SLuigi Rizzo for (j = 0; j < p->objtotal; j++) { 339*4f80b14cSVincenzo Maffione if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) { 340*4f80b14cSVincenzo Maffione D("skipping %s %d", p->name, j); 341*4f80b14cSVincenzo Maffione continue; 34237e3a6d3SLuigi Rizzo } 343*4f80b14cSVincenzo Maffione p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) ); 344*4f80b14cSVincenzo Maffione p->objfree++; 34537e3a6d3SLuigi Rizzo } 346*4f80b14cSVincenzo Maffione 347*4f80b14cSVincenzo Maffione ND("%s free %u", p->name, p->objfree); 348*4f80b14cSVincenzo Maffione if (p->objfree == 0) 349*4f80b14cSVincenzo Maffione return ENOMEM; 350*4f80b14cSVincenzo Maffione 351*4f80b14cSVincenzo Maffione return 0; 352*4f80b14cSVincenzo Maffione } 353*4f80b14cSVincenzo Maffione 354*4f80b14cSVincenzo Maffione static int 355*4f80b14cSVincenzo Maffione netmap_mem_init_bitmaps(struct netmap_mem_d *nmd) 356*4f80b14cSVincenzo Maffione { 357*4f80b14cSVincenzo Maffione int i, error = 0; 358*4f80b14cSVincenzo Maffione 359*4f80b14cSVincenzo Maffione for (i = 0; i < NETMAP_POOLS_NR; i++) { 360*4f80b14cSVincenzo Maffione struct netmap_obj_pool *p = &nmd->pools[i]; 361*4f80b14cSVincenzo Maffione 362*4f80b14cSVincenzo Maffione error = netmap_init_obj_allocator_bitmap(p); 363*4f80b14cSVincenzo Maffione if (error) 364*4f80b14cSVincenzo Maffione return error; 36537e3a6d3SLuigi Rizzo } 36637e3a6d3SLuigi Rizzo 36737e3a6d3SLuigi Rizzo /* 36837e3a6d3SLuigi Rizzo * buffers 0 and 1 are reserved 36937e3a6d3SLuigi Rizzo */ 370*4f80b14cSVincenzo Maffione if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) { 371*4f80b14cSVincenzo Maffione return ENOMEM; 372*4f80b14cSVincenzo Maffione } 373*4f80b14cSVincenzo Maffione 37437e3a6d3SLuigi Rizzo nmd->pools[NETMAP_BUF_POOL].objfree -= 2; 37537e3a6d3SLuigi Rizzo if (nmd->pools[NETMAP_BUF_POOL].bitmap) { 37637e3a6d3SLuigi Rizzo /* XXX This check is a workaround that prevents a 37737e3a6d3SLuigi Rizzo * NULL pointer crash which currently happens only 378844a6f0cSLuigi Rizzo * with ptnetmap guests. 379844a6f0cSLuigi Rizzo * Removed shared-info --> is the bug still there? */ 380*4f80b14cSVincenzo Maffione nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U; 38137e3a6d3SLuigi Rizzo } 382*4f80b14cSVincenzo Maffione return 0; 383*4f80b14cSVincenzo Maffione } 384*4f80b14cSVincenzo Maffione 385*4f80b14cSVincenzo Maffione int 386*4f80b14cSVincenzo Maffione netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) 387*4f80b14cSVincenzo Maffione { 388*4f80b14cSVincenzo Maffione int last_user = 0; 389*4f80b14cSVincenzo Maffione NMA_LOCK(nmd); 390*4f80b14cSVincenzo Maffione if (na->active_fds <= 0) 391*4f80b14cSVincenzo Maffione netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na); 392*4f80b14cSVincenzo Maffione if (nmd->active == 1) { 393*4f80b14cSVincenzo Maffione last_user = 1; 394*4f80b14cSVincenzo Maffione /* 395*4f80b14cSVincenzo Maffione * Reset the allocator when it falls out of use so that any 396*4f80b14cSVincenzo Maffione * pool resources leaked by unclean application exits are 397*4f80b14cSVincenzo Maffione * reclaimed. 398*4f80b14cSVincenzo Maffione */ 399*4f80b14cSVincenzo Maffione netmap_mem_init_bitmaps(nmd); 40037e3a6d3SLuigi Rizzo } 40137e3a6d3SLuigi Rizzo nmd->ops->nmd_deref(nmd); 40237e3a6d3SLuigi Rizzo 403847bf383SLuigi Rizzo NMA_UNLOCK(nmd); 404*4f80b14cSVincenzo Maffione return last_user; 405847bf383SLuigi Rizzo } 406847bf383SLuigi Rizzo 407847bf383SLuigi Rizzo 4084bf50f18SLuigi Rizzo /* accessor functions */ 40937e3a6d3SLuigi Rizzo static int 410847bf383SLuigi Rizzo netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 4114bf50f18SLuigi Rizzo { 412847bf383SLuigi Rizzo lut->lut = nmd->pools[NETMAP_BUF_POOL].lut; 413*4f80b14cSVincenzo Maffione #ifdef __FreeBSD__ 414*4f80b14cSVincenzo Maffione lut->plut = lut->lut; 415*4f80b14cSVincenzo Maffione #endif 416847bf383SLuigi Rizzo lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; 417847bf383SLuigi Rizzo lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; 41837e3a6d3SLuigi Rizzo 41937e3a6d3SLuigi Rizzo return 0; 4204bf50f18SLuigi Rizzo } 4214bf50f18SLuigi Rizzo 42237e3a6d3SLuigi Rizzo static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = { 423f0ea3689SLuigi Rizzo [NETMAP_IF_POOL] = { 424f0ea3689SLuigi Rizzo .size = 1024, 42537e3a6d3SLuigi Rizzo .num = 2, 426f0ea3689SLuigi Rizzo }, 427f0ea3689SLuigi Rizzo [NETMAP_RING_POOL] = { 428f0ea3689SLuigi Rizzo .size = 5*PAGE_SIZE, 429f0ea3689SLuigi Rizzo .num = 4, 430f0ea3689SLuigi Rizzo }, 431f0ea3689SLuigi Rizzo [NETMAP_BUF_POOL] = { 432f0ea3689SLuigi Rizzo .size = 2048, 433f0ea3689SLuigi Rizzo .num = 4098, 434f0ea3689SLuigi Rizzo }, 435f0ea3689SLuigi Rizzo }; 436f0ea3689SLuigi Rizzo 437ccdc3305SLuigi Rizzo 4382579e2d7SLuigi Rizzo /* 4392579e2d7SLuigi Rizzo * nm_mem is the memory allocator used for all physical interfaces 4402579e2d7SLuigi Rizzo * running in netmap mode. 4412579e2d7SLuigi Rizzo * Virtual (VALE) ports will have each its own allocator. 4422579e2d7SLuigi Rizzo */ 443847bf383SLuigi Rizzo extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */ 444ce3ee1e7SLuigi Rizzo struct netmap_mem_d nm_mem = { /* Our memory allocator. */ 4458241616dSLuigi Rizzo .pools = { 4468241616dSLuigi Rizzo [NETMAP_IF_POOL] = { 4478241616dSLuigi Rizzo .name = "netmap_if", 4488241616dSLuigi Rizzo .objminsize = sizeof(struct netmap_if), 4498241616dSLuigi Rizzo .objmaxsize = 4096, 4508241616dSLuigi Rizzo .nummin = 10, /* don't be stingy */ 4518241616dSLuigi Rizzo .nummax = 10000, /* XXX very large */ 4528241616dSLuigi Rizzo }, 4538241616dSLuigi Rizzo [NETMAP_RING_POOL] = { 4548241616dSLuigi Rizzo .name = "netmap_ring", 4558241616dSLuigi Rizzo .objminsize = sizeof(struct netmap_ring), 4568241616dSLuigi Rizzo .objmaxsize = 32*PAGE_SIZE, 4578241616dSLuigi Rizzo .nummin = 2, 4588241616dSLuigi Rizzo .nummax = 1024, 4598241616dSLuigi Rizzo }, 4608241616dSLuigi Rizzo [NETMAP_BUF_POOL] = { 4618241616dSLuigi Rizzo .name = "netmap_buf", 4628241616dSLuigi Rizzo .objminsize = 64, 4638241616dSLuigi Rizzo .objmaxsize = 65536, 4648241616dSLuigi Rizzo .nummin = 4, 4658241616dSLuigi Rizzo .nummax = 1000000, /* one million! */ 4668241616dSLuigi Rizzo }, 4678241616dSLuigi Rizzo }, 468f0ea3689SLuigi Rizzo 469c3e9b4dbSLuiz Otavio O Souza .params = { 470c3e9b4dbSLuiz Otavio O Souza [NETMAP_IF_POOL] = { 471c3e9b4dbSLuiz Otavio O Souza .size = 1024, 472c3e9b4dbSLuiz Otavio O Souza .num = 100, 473c3e9b4dbSLuiz Otavio O Souza }, 474c3e9b4dbSLuiz Otavio O Souza [NETMAP_RING_POOL] = { 475c3e9b4dbSLuiz Otavio O Souza .size = 9*PAGE_SIZE, 476c3e9b4dbSLuiz Otavio O Souza .num = 200, 477c3e9b4dbSLuiz Otavio O Souza }, 478c3e9b4dbSLuiz Otavio O Souza [NETMAP_BUF_POOL] = { 479c3e9b4dbSLuiz Otavio O Souza .size = 2048, 480c3e9b4dbSLuiz Otavio O Souza .num = NETMAP_BUF_MAX_NUM, 481c3e9b4dbSLuiz Otavio O Souza }, 482c3e9b4dbSLuiz Otavio O Souza }, 483c3e9b4dbSLuiz Otavio O Souza 484f0ea3689SLuigi Rizzo .nm_id = 1, 4854bf50f18SLuigi Rizzo .nm_grp = -1, 486f0ea3689SLuigi Rizzo 487f0ea3689SLuigi Rizzo .prev = &nm_mem, 488f0ea3689SLuigi Rizzo .next = &nm_mem, 489847bf383SLuigi Rizzo 490c3e9b4dbSLuiz Otavio O Souza .ops = &netmap_mem_global_ops, 491c3e9b4dbSLuiz Otavio O Souza 492c3e9b4dbSLuiz Otavio O Souza .name = "1" 493ccdc3305SLuigi Rizzo }; 494ccdc3305SLuigi Rizzo 495ce3ee1e7SLuigi Rizzo 496ce3ee1e7SLuigi Rizzo /* blueprint for the private memory allocators */ 49737e3a6d3SLuigi Rizzo /* XXX clang is not happy about using name as a print format */ 49837e3a6d3SLuigi Rizzo static const struct netmap_mem_d nm_blueprint = { 499ce3ee1e7SLuigi Rizzo .pools = { 500ce3ee1e7SLuigi Rizzo [NETMAP_IF_POOL] = { 501ce3ee1e7SLuigi Rizzo .name = "%s_if", 502ce3ee1e7SLuigi Rizzo .objminsize = sizeof(struct netmap_if), 503ce3ee1e7SLuigi Rizzo .objmaxsize = 4096, 504ce3ee1e7SLuigi Rizzo .nummin = 1, 505f0ea3689SLuigi Rizzo .nummax = 100, 506ce3ee1e7SLuigi Rizzo }, 507ce3ee1e7SLuigi Rizzo [NETMAP_RING_POOL] = { 508ce3ee1e7SLuigi Rizzo .name = "%s_ring", 509ce3ee1e7SLuigi Rizzo .objminsize = sizeof(struct netmap_ring), 510ce3ee1e7SLuigi Rizzo .objmaxsize = 32*PAGE_SIZE, 511ce3ee1e7SLuigi Rizzo .nummin = 2, 512ce3ee1e7SLuigi Rizzo .nummax = 1024, 513ce3ee1e7SLuigi Rizzo }, 514ce3ee1e7SLuigi Rizzo [NETMAP_BUF_POOL] = { 515ce3ee1e7SLuigi Rizzo .name = "%s_buf", 516ce3ee1e7SLuigi Rizzo .objminsize = 64, 517ce3ee1e7SLuigi Rizzo .objmaxsize = 65536, 518ce3ee1e7SLuigi Rizzo .nummin = 4, 519ce3ee1e7SLuigi Rizzo .nummax = 1000000, /* one million! */ 520ce3ee1e7SLuigi Rizzo }, 521ce3ee1e7SLuigi Rizzo }, 522ce3ee1e7SLuigi Rizzo 523c3e9b4dbSLuiz Otavio O Souza .nm_grp = -1, 524c3e9b4dbSLuiz Otavio O Souza 525ce3ee1e7SLuigi Rizzo .flags = NETMAP_MEM_PRIVATE, 526847bf383SLuigi Rizzo 527c3e9b4dbSLuiz Otavio O Souza .ops = &netmap_mem_global_ops, 528ce3ee1e7SLuigi Rizzo }; 529ce3ee1e7SLuigi Rizzo 5308241616dSLuigi Rizzo /* memory allocator related sysctls */ 5318241616dSLuigi Rizzo 5328241616dSLuigi Rizzo #define STRINGIFY(x) #x 5338241616dSLuigi Rizzo 534ce3ee1e7SLuigi Rizzo 5358241616dSLuigi Rizzo #define DECLARE_SYSCTLS(id, name) \ 53637e3a6d3SLuigi Rizzo SYSBEGIN(mem2_ ## name); \ 5378241616dSLuigi Rizzo SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 538c3e9b4dbSLuiz Otavio O Souza CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 5398241616dSLuigi Rizzo SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 5408241616dSLuigi Rizzo CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 5418241616dSLuigi Rizzo SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 542c3e9b4dbSLuiz Otavio O Souza CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 5438241616dSLuigi Rizzo SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 544f0ea3689SLuigi Rizzo CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \ 545f0ea3689SLuigi Rizzo SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \ 546f0ea3689SLuigi Rizzo CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \ 547f0ea3689SLuigi Rizzo "Default size of private netmap " STRINGIFY(name) "s"); \ 548f0ea3689SLuigi Rizzo SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \ 549f0ea3689SLuigi Rizzo CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \ 55037e3a6d3SLuigi Rizzo "Default number of private netmap " STRINGIFY(name) "s"); \ 55137e3a6d3SLuigi Rizzo SYSEND 5528241616dSLuigi Rizzo 553984ff0d9SEd Maste SYSCTL_DECL(_dev_netmap); 5548241616dSLuigi Rizzo DECLARE_SYSCTLS(NETMAP_IF_POOL, if); 5558241616dSLuigi Rizzo DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 5568241616dSLuigi Rizzo DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 557ccdc3305SLuigi Rizzo 558c3e9b4dbSLuiz Otavio O Souza /* call with nm_mem_list_lock held */ 559f0ea3689SLuigi Rizzo static int 56037e3a6d3SLuigi Rizzo nm_mem_assign_id_locked(struct netmap_mem_d *nmd) 561f0ea3689SLuigi Rizzo { 562f0ea3689SLuigi Rizzo nm_memid_t id; 563f0ea3689SLuigi Rizzo struct netmap_mem_d *scan = netmap_last_mem_d; 564f0ea3689SLuigi Rizzo int error = ENOMEM; 565f0ea3689SLuigi Rizzo 566f0ea3689SLuigi Rizzo do { 567f0ea3689SLuigi Rizzo /* we rely on unsigned wrap around */ 568f0ea3689SLuigi Rizzo id = scan->nm_id + 1; 569f0ea3689SLuigi Rizzo if (id == 0) /* reserve 0 as error value */ 570f0ea3689SLuigi Rizzo id = 1; 571f0ea3689SLuigi Rizzo scan = scan->next; 572f0ea3689SLuigi Rizzo if (id != scan->nm_id) { 573f0ea3689SLuigi Rizzo nmd->nm_id = id; 574f0ea3689SLuigi Rizzo nmd->prev = scan->prev; 575f0ea3689SLuigi Rizzo nmd->next = scan; 576f0ea3689SLuigi Rizzo scan->prev->next = nmd; 577f0ea3689SLuigi Rizzo scan->prev = nmd; 578f0ea3689SLuigi Rizzo netmap_last_mem_d = nmd; 579c3e9b4dbSLuiz Otavio O Souza nmd->refcount = 1; 580c3e9b4dbSLuiz Otavio O Souza NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); 581f0ea3689SLuigi Rizzo error = 0; 582f0ea3689SLuigi Rizzo break; 583f0ea3689SLuigi Rizzo } 584f0ea3689SLuigi Rizzo } while (scan != netmap_last_mem_d); 585f0ea3689SLuigi Rizzo 586f0ea3689SLuigi Rizzo return error; 587f0ea3689SLuigi Rizzo } 588f0ea3689SLuigi Rizzo 589c3e9b4dbSLuiz Otavio O Souza /* call with nm_mem_list_lock *not* held */ 59037e3a6d3SLuigi Rizzo static int 59137e3a6d3SLuigi Rizzo nm_mem_assign_id(struct netmap_mem_d *nmd) 59237e3a6d3SLuigi Rizzo { 59337e3a6d3SLuigi Rizzo int ret; 59437e3a6d3SLuigi Rizzo 595c3e9b4dbSLuiz Otavio O Souza NM_MTX_LOCK(nm_mem_list_lock); 59637e3a6d3SLuigi Rizzo ret = nm_mem_assign_id_locked(nmd); 597c3e9b4dbSLuiz Otavio O Souza NM_MTX_UNLOCK(nm_mem_list_lock); 59837e3a6d3SLuigi Rizzo 59937e3a6d3SLuigi Rizzo return ret; 60037e3a6d3SLuigi Rizzo } 60137e3a6d3SLuigi Rizzo 602c3e9b4dbSLuiz Otavio O Souza /* call with nm_mem_list_lock held */ 603f0ea3689SLuigi Rizzo static void 604f0ea3689SLuigi Rizzo nm_mem_release_id(struct netmap_mem_d *nmd) 605f0ea3689SLuigi Rizzo { 606f0ea3689SLuigi Rizzo nmd->prev->next = nmd->next; 607f0ea3689SLuigi Rizzo nmd->next->prev = nmd->prev; 608f0ea3689SLuigi Rizzo 609f0ea3689SLuigi Rizzo if (netmap_last_mem_d == nmd) 610f0ea3689SLuigi Rizzo netmap_last_mem_d = nmd->prev; 611f0ea3689SLuigi Rizzo 612f0ea3689SLuigi Rizzo nmd->prev = nmd->next = NULL; 613c3e9b4dbSLuiz Otavio O Souza } 614f0ea3689SLuigi Rizzo 615c3e9b4dbSLuiz Otavio O Souza struct netmap_mem_d * 616c3e9b4dbSLuiz Otavio O Souza netmap_mem_find(nm_memid_t id) 617c3e9b4dbSLuiz Otavio O Souza { 618c3e9b4dbSLuiz Otavio O Souza struct netmap_mem_d *nmd; 619c3e9b4dbSLuiz Otavio O Souza 620c3e9b4dbSLuiz Otavio O Souza NM_MTX_LOCK(nm_mem_list_lock); 621c3e9b4dbSLuiz Otavio O Souza nmd = netmap_last_mem_d; 622c3e9b4dbSLuiz Otavio O Souza do { 623c3e9b4dbSLuiz Otavio O Souza if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) { 624c3e9b4dbSLuiz Otavio O Souza nmd->refcount++; 625c3e9b4dbSLuiz Otavio O Souza NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); 626c3e9b4dbSLuiz Otavio O Souza NM_MTX_UNLOCK(nm_mem_list_lock); 627c3e9b4dbSLuiz Otavio O Souza return nmd; 628c3e9b4dbSLuiz Otavio O Souza } 629c3e9b4dbSLuiz Otavio O Souza nmd = nmd->next; 630c3e9b4dbSLuiz Otavio O Souza } while (nmd != netmap_last_mem_d); 631c3e9b4dbSLuiz Otavio O Souza NM_MTX_UNLOCK(nm_mem_list_lock); 632c3e9b4dbSLuiz Otavio O Souza return NULL; 633f0ea3689SLuigi Rizzo } 634f0ea3689SLuigi Rizzo 6354bf50f18SLuigi Rizzo static int 63637e3a6d3SLuigi Rizzo nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev) 6374bf50f18SLuigi Rizzo { 6384bf50f18SLuigi Rizzo int err = 0, id; 6394bf50f18SLuigi Rizzo id = nm_iommu_group_id(dev); 6404bf50f18SLuigi Rizzo if (netmap_verbose) 6414bf50f18SLuigi Rizzo D("iommu_group %d", id); 6424bf50f18SLuigi Rizzo 6434bf50f18SLuigi Rizzo NMA_LOCK(nmd); 6444bf50f18SLuigi Rizzo 6454bf50f18SLuigi Rizzo if (nmd->nm_grp < 0) 6464bf50f18SLuigi Rizzo nmd->nm_grp = id; 6474bf50f18SLuigi Rizzo 6484bf50f18SLuigi Rizzo if (nmd->nm_grp != id) 6494bf50f18SLuigi Rizzo nmd->lasterr = err = ENOMEM; 6504bf50f18SLuigi Rizzo 6514bf50f18SLuigi Rizzo NMA_UNLOCK(nmd); 6524bf50f18SLuigi Rizzo return err; 6534bf50f18SLuigi Rizzo } 654f0ea3689SLuigi Rizzo 655*4f80b14cSVincenzo Maffione static struct lut_entry * 656*4f80b14cSVincenzo Maffione nm_alloc_lut(u_int nobj) 657*4f80b14cSVincenzo Maffione { 658*4f80b14cSVincenzo Maffione size_t n = sizeof(struct lut_entry) * nobj; 659*4f80b14cSVincenzo Maffione struct lut_entry *lut; 660*4f80b14cSVincenzo Maffione #ifdef linux 661*4f80b14cSVincenzo Maffione lut = vmalloc(n); 662*4f80b14cSVincenzo Maffione #else 663*4f80b14cSVincenzo Maffione lut = nm_os_malloc(n); 664*4f80b14cSVincenzo Maffione #endif 665*4f80b14cSVincenzo Maffione return lut; 666*4f80b14cSVincenzo Maffione } 667*4f80b14cSVincenzo Maffione 668*4f80b14cSVincenzo Maffione static void 669*4f80b14cSVincenzo Maffione nm_free_lut(struct lut_entry *lut, u_int objtotal) 670*4f80b14cSVincenzo Maffione { 671*4f80b14cSVincenzo Maffione bzero(lut, sizeof(struct lut_entry) * objtotal); 672*4f80b14cSVincenzo Maffione #ifdef linux 673*4f80b14cSVincenzo Maffione vfree(lut); 674*4f80b14cSVincenzo Maffione #else 675*4f80b14cSVincenzo Maffione nm_os_free(lut); 676*4f80b14cSVincenzo Maffione #endif 677*4f80b14cSVincenzo Maffione } 678*4f80b14cSVincenzo Maffione 679*4f80b14cSVincenzo Maffione #if defined(linux) || defined(_WIN32) 680*4f80b14cSVincenzo Maffione static struct plut_entry * 681*4f80b14cSVincenzo Maffione nm_alloc_plut(u_int nobj) 682*4f80b14cSVincenzo Maffione { 683*4f80b14cSVincenzo Maffione size_t n = sizeof(struct plut_entry) * nobj; 684*4f80b14cSVincenzo Maffione struct plut_entry *lut; 685*4f80b14cSVincenzo Maffione lut = vmalloc(n); 686*4f80b14cSVincenzo Maffione return lut; 687*4f80b14cSVincenzo Maffione } 688*4f80b14cSVincenzo Maffione 689*4f80b14cSVincenzo Maffione static void 690*4f80b14cSVincenzo Maffione nm_free_plut(struct plut_entry * lut) 691*4f80b14cSVincenzo Maffione { 692*4f80b14cSVincenzo Maffione vfree(lut); 693*4f80b14cSVincenzo Maffione } 694*4f80b14cSVincenzo Maffione #endif /* linux or _WIN32 */ 695*4f80b14cSVincenzo Maffione 696*4f80b14cSVincenzo Maffione 697ccdc3305SLuigi Rizzo /* 6982579e2d7SLuigi Rizzo * First, find the allocator that contains the requested offset, 6992579e2d7SLuigi Rizzo * then locate the cluster through a lookup table. 700ccdc3305SLuigi Rizzo */ 701847bf383SLuigi Rizzo static vm_paddr_t 702847bf383SLuigi Rizzo netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) 703ccdc3305SLuigi Rizzo { 704ccdc3305SLuigi Rizzo int i; 705ce3ee1e7SLuigi Rizzo vm_ooffset_t o = offset; 706ce3ee1e7SLuigi Rizzo vm_paddr_t pa; 707ce3ee1e7SLuigi Rizzo struct netmap_obj_pool *p; 708ccdc3305SLuigi Rizzo 709*4f80b14cSVincenzo Maffione #if defined(__FreeBSD__) 710*4f80b14cSVincenzo Maffione /* This function is called by netmap_dev_pager_fault(), which holds a 711*4f80b14cSVincenzo Maffione * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we 712*4f80b14cSVincenzo Maffione * spin on the trylock. */ 713*4f80b14cSVincenzo Maffione NMA_SPINLOCK(nmd); 714*4f80b14cSVincenzo Maffione #else 715ce3ee1e7SLuigi Rizzo NMA_LOCK(nmd); 716*4f80b14cSVincenzo Maffione #endif 717ce3ee1e7SLuigi Rizzo p = nmd->pools; 718ce3ee1e7SLuigi Rizzo 719ce3ee1e7SLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { 720ce3ee1e7SLuigi Rizzo if (offset >= p[i].memtotal) 721ccdc3305SLuigi Rizzo continue; 7222579e2d7SLuigi Rizzo // now lookup the cluster's address 72337e3a6d3SLuigi Rizzo #ifndef _WIN32 7244bf50f18SLuigi Rizzo pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) + 7258241616dSLuigi Rizzo offset % p[i]._objsize; 72637e3a6d3SLuigi Rizzo #else 72737e3a6d3SLuigi Rizzo pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr); 72837e3a6d3SLuigi Rizzo pa.QuadPart += offset % p[i]._objsize; 72937e3a6d3SLuigi Rizzo #endif 730ce3ee1e7SLuigi Rizzo NMA_UNLOCK(nmd); 731ce3ee1e7SLuigi Rizzo return pa; 732ccdc3305SLuigi Rizzo } 7338241616dSLuigi Rizzo /* this is only in case of errors */ 734b1123b01SLuigi Rizzo D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, 735ce3ee1e7SLuigi Rizzo p[NETMAP_IF_POOL].memtotal, 736ce3ee1e7SLuigi Rizzo p[NETMAP_IF_POOL].memtotal 737ce3ee1e7SLuigi Rizzo + p[NETMAP_RING_POOL].memtotal, 738ce3ee1e7SLuigi Rizzo p[NETMAP_IF_POOL].memtotal 739ce3ee1e7SLuigi Rizzo + p[NETMAP_RING_POOL].memtotal 740ce3ee1e7SLuigi Rizzo + p[NETMAP_BUF_POOL].memtotal); 741ce3ee1e7SLuigi Rizzo NMA_UNLOCK(nmd); 74237e3a6d3SLuigi Rizzo #ifndef _WIN32 743*4f80b14cSVincenzo Maffione return 0; /* bad address */ 74437e3a6d3SLuigi Rizzo #else 74537e3a6d3SLuigi Rizzo vm_paddr_t res; 74637e3a6d3SLuigi Rizzo res.QuadPart = 0; 74737e3a6d3SLuigi Rizzo return res; 74837e3a6d3SLuigi Rizzo #endif 74937e3a6d3SLuigi Rizzo } 75037e3a6d3SLuigi Rizzo 75137e3a6d3SLuigi Rizzo #ifdef _WIN32 75237e3a6d3SLuigi Rizzo 75337e3a6d3SLuigi Rizzo /* 75437e3a6d3SLuigi Rizzo * win32_build_virtual_memory_for_userspace 75537e3a6d3SLuigi Rizzo * 75637e3a6d3SLuigi Rizzo * This function get all the object making part of the pools and maps 75737e3a6d3SLuigi Rizzo * a contiguous virtual memory space for the userspace 75837e3a6d3SLuigi Rizzo * It works this way 75937e3a6d3SLuigi Rizzo * 1 - allocate a Memory Descriptor List wide as the sum 76037e3a6d3SLuigi Rizzo * of the memory needed for the pools 76137e3a6d3SLuigi Rizzo * 2 - cycle all the objects in every pool and for every object do 76237e3a6d3SLuigi Rizzo * 76337e3a6d3SLuigi Rizzo * 2a - cycle all the objects in every pool, get the list 76437e3a6d3SLuigi Rizzo * of the physical address descriptors 76537e3a6d3SLuigi Rizzo * 2b - calculate the offset in the array of pages desciptor in the 76637e3a6d3SLuigi Rizzo * main MDL 76737e3a6d3SLuigi Rizzo * 2c - copy the descriptors of the object in the main MDL 76837e3a6d3SLuigi Rizzo * 76937e3a6d3SLuigi Rizzo * 3 - return the resulting MDL that needs to be mapped in userland 77037e3a6d3SLuigi Rizzo * 77137e3a6d3SLuigi Rizzo * In this way we will have an MDL that describes all the memory for the 77237e3a6d3SLuigi Rizzo * objects in a single object 77337e3a6d3SLuigi Rizzo */ 77437e3a6d3SLuigi Rizzo 77537e3a6d3SLuigi Rizzo PMDL 77637e3a6d3SLuigi Rizzo win32_build_user_vm_map(struct netmap_mem_d* nmd) 77737e3a6d3SLuigi Rizzo { 77837e3a6d3SLuigi Rizzo int i, j; 779*4f80b14cSVincenzo Maffione size_t memsize; 780*4f80b14cSVincenzo Maffione u_int memflags, ofs = 0; 78137e3a6d3SLuigi Rizzo PMDL mainMdl, tempMdl; 78237e3a6d3SLuigi Rizzo 78337e3a6d3SLuigi Rizzo if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) { 78437e3a6d3SLuigi Rizzo D("memory not finalised yet"); 78537e3a6d3SLuigi Rizzo return NULL; 78637e3a6d3SLuigi Rizzo } 78737e3a6d3SLuigi Rizzo 78837e3a6d3SLuigi Rizzo mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL); 78937e3a6d3SLuigi Rizzo if (mainMdl == NULL) { 79037e3a6d3SLuigi Rizzo D("failed to allocate mdl"); 79137e3a6d3SLuigi Rizzo return NULL; 79237e3a6d3SLuigi Rizzo } 79337e3a6d3SLuigi Rizzo 79437e3a6d3SLuigi Rizzo NMA_LOCK(nmd); 79537e3a6d3SLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 79637e3a6d3SLuigi Rizzo struct netmap_obj_pool *p = &nmd->pools[i]; 79737e3a6d3SLuigi Rizzo int clsz = p->_clustsize; 79837e3a6d3SLuigi Rizzo int clobjs = p->_clustentries; /* objects per cluster */ 79937e3a6d3SLuigi Rizzo int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz); 80037e3a6d3SLuigi Rizzo PPFN_NUMBER pSrc, pDst; 80137e3a6d3SLuigi Rizzo 80237e3a6d3SLuigi Rizzo /* each pool has a different cluster size so we need to reallocate */ 80337e3a6d3SLuigi Rizzo tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL); 80437e3a6d3SLuigi Rizzo if (tempMdl == NULL) { 80537e3a6d3SLuigi Rizzo NMA_UNLOCK(nmd); 80637e3a6d3SLuigi Rizzo D("fail to allocate tempMdl"); 80737e3a6d3SLuigi Rizzo IoFreeMdl(mainMdl); 80837e3a6d3SLuigi Rizzo return NULL; 80937e3a6d3SLuigi Rizzo } 81037e3a6d3SLuigi Rizzo pSrc = MmGetMdlPfnArray(tempMdl); 81137e3a6d3SLuigi Rizzo /* create one entry per cluster, the lut[] has one entry per object */ 81237e3a6d3SLuigi Rizzo for (j = 0; j < p->numclusters; j++, ofs += clsz) { 81337e3a6d3SLuigi Rizzo pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)]; 81437e3a6d3SLuigi Rizzo MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz); 81537e3a6d3SLuigi Rizzo MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */ 81637e3a6d3SLuigi Rizzo RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */ 81737e3a6d3SLuigi Rizzo mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */ 81837e3a6d3SLuigi Rizzo } 81937e3a6d3SLuigi Rizzo IoFreeMdl(tempMdl); 82037e3a6d3SLuigi Rizzo } 82137e3a6d3SLuigi Rizzo NMA_UNLOCK(nmd); 82237e3a6d3SLuigi Rizzo return mainMdl; 82337e3a6d3SLuigi Rizzo } 82437e3a6d3SLuigi Rizzo 82537e3a6d3SLuigi Rizzo #endif /* _WIN32 */ 82637e3a6d3SLuigi Rizzo 82737e3a6d3SLuigi Rizzo /* 82837e3a6d3SLuigi Rizzo * helper function for OS-specific mmap routines (currently only windows). 82937e3a6d3SLuigi Rizzo * Given an nmd and a pool index, returns the cluster size and number of clusters. 83037e3a6d3SLuigi Rizzo * Returns 0 if memory is finalised and the pool is valid, otherwise 1. 83137e3a6d3SLuigi Rizzo * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change. 83237e3a6d3SLuigi Rizzo */ 83337e3a6d3SLuigi Rizzo 83437e3a6d3SLuigi Rizzo int 83537e3a6d3SLuigi Rizzo netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters) 83637e3a6d3SLuigi Rizzo { 83737e3a6d3SLuigi Rizzo if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR) 83837e3a6d3SLuigi Rizzo return 1; /* invalid arguments */ 83937e3a6d3SLuigi Rizzo // NMA_LOCK_ASSERT(nmd); 84037e3a6d3SLuigi Rizzo if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { 84137e3a6d3SLuigi Rizzo *clustsize = *numclusters = 0; 84237e3a6d3SLuigi Rizzo return 1; /* not ready yet */ 84337e3a6d3SLuigi Rizzo } 84437e3a6d3SLuigi Rizzo *clustsize = nmd->pools[pool]._clustsize; 84537e3a6d3SLuigi Rizzo *numclusters = nmd->pools[pool].numclusters; 84637e3a6d3SLuigi Rizzo return 0; /* success */ 847ccdc3305SLuigi Rizzo } 848ccdc3305SLuigi Rizzo 849847bf383SLuigi Rizzo static int 850*4f80b14cSVincenzo Maffione netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size, u_int *memflags, 851f0ea3689SLuigi Rizzo nm_memid_t *id) 852ce3ee1e7SLuigi Rizzo { 853ce3ee1e7SLuigi Rizzo int error = 0; 854ce3ee1e7SLuigi Rizzo NMA_LOCK(nmd); 855847bf383SLuigi Rizzo error = netmap_mem_config(nmd); 856ce3ee1e7SLuigi Rizzo if (error) 857ce3ee1e7SLuigi Rizzo goto out; 8584bf50f18SLuigi Rizzo if (size) { 859ce3ee1e7SLuigi Rizzo if (nmd->flags & NETMAP_MEM_FINALIZED) { 860ce3ee1e7SLuigi Rizzo *size = nmd->nm_totalsize; 861ce3ee1e7SLuigi Rizzo } else { 862ce3ee1e7SLuigi Rizzo int i; 863ce3ee1e7SLuigi Rizzo *size = 0; 864ce3ee1e7SLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 865ce3ee1e7SLuigi Rizzo struct netmap_obj_pool *p = nmd->pools + i; 866ce3ee1e7SLuigi Rizzo *size += (p->_numclusters * p->_clustsize); 867ce3ee1e7SLuigi Rizzo } 868ce3ee1e7SLuigi Rizzo } 8694bf50f18SLuigi Rizzo } 8704bf50f18SLuigi Rizzo if (memflags) 871ce3ee1e7SLuigi Rizzo *memflags = nmd->flags; 8724bf50f18SLuigi Rizzo if (id) 873f0ea3689SLuigi Rizzo *id = nmd->nm_id; 874ce3ee1e7SLuigi Rizzo out: 875ce3ee1e7SLuigi Rizzo NMA_UNLOCK(nmd); 876ce3ee1e7SLuigi Rizzo return error; 877ce3ee1e7SLuigi Rizzo } 878ce3ee1e7SLuigi Rizzo 879ccdc3305SLuigi Rizzo /* 880ccdc3305SLuigi Rizzo * we store objects by kernel address, need to find the offset 881ccdc3305SLuigi Rizzo * within the pool to export the value to userspace. 882ccdc3305SLuigi Rizzo * Algorithm: scan until we find the cluster, then add the 883ccdc3305SLuigi Rizzo * actual offset in the cluster 884ccdc3305SLuigi Rizzo */ 885ce2cb792SLuigi Rizzo static ssize_t 886ccdc3305SLuigi Rizzo netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 887ccdc3305SLuigi Rizzo { 888ce3ee1e7SLuigi Rizzo int i, k = p->_clustentries, n = p->objtotal; 889ccdc3305SLuigi Rizzo ssize_t ofs = 0; 890ccdc3305SLuigi Rizzo 891ccdc3305SLuigi Rizzo for (i = 0; i < n; i += k, ofs += p->_clustsize) { 892ccdc3305SLuigi Rizzo const char *base = p->lut[i].vaddr; 893ccdc3305SLuigi Rizzo ssize_t relofs = (const char *) vaddr - base; 894ccdc3305SLuigi Rizzo 895aa76317cSLuigi Rizzo if (relofs < 0 || relofs >= p->_clustsize) 896ccdc3305SLuigi Rizzo continue; 897ccdc3305SLuigi Rizzo 898ccdc3305SLuigi Rizzo ofs = ofs + relofs; 899ccdc3305SLuigi Rizzo ND("%s: return offset %d (cluster %d) for pointer %p", 900ccdc3305SLuigi Rizzo p->name, ofs, i, vaddr); 901ccdc3305SLuigi Rizzo return ofs; 902ccdc3305SLuigi Rizzo } 903ccdc3305SLuigi Rizzo D("address %p is not contained inside any cluster (%s)", 904ccdc3305SLuigi Rizzo vaddr, p->name); 905ccdc3305SLuigi Rizzo return 0; /* An error occurred */ 906ccdc3305SLuigi Rizzo } 907ccdc3305SLuigi Rizzo 908ccdc3305SLuigi Rizzo /* Helper functions which convert virtual addresses to offsets */ 909ce3ee1e7SLuigi Rizzo #define netmap_if_offset(n, v) \ 910ce3ee1e7SLuigi Rizzo netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) 911ccdc3305SLuigi Rizzo 912ce3ee1e7SLuigi Rizzo #define netmap_ring_offset(n, v) \ 913ce3ee1e7SLuigi Rizzo ((n)->pools[NETMAP_IF_POOL].memtotal + \ 914ce3ee1e7SLuigi Rizzo netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) 915ccdc3305SLuigi Rizzo 916847bf383SLuigi Rizzo static ssize_t 917847bf383SLuigi Rizzo netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr) 918ce3ee1e7SLuigi Rizzo { 919ce3ee1e7SLuigi Rizzo ssize_t v; 920ce3ee1e7SLuigi Rizzo NMA_LOCK(nmd); 921ce3ee1e7SLuigi Rizzo v = netmap_if_offset(nmd, addr); 922ce3ee1e7SLuigi Rizzo NMA_UNLOCK(nmd); 923ce3ee1e7SLuigi Rizzo return v; 924ce3ee1e7SLuigi Rizzo } 925ce3ee1e7SLuigi Rizzo 9268241616dSLuigi Rizzo /* 9278241616dSLuigi Rizzo * report the index, and use start position as a hint, 9288241616dSLuigi Rizzo * otherwise buffer allocation becomes terribly expensive. 9298241616dSLuigi Rizzo */ 930ccdc3305SLuigi Rizzo static void * 931ce3ee1e7SLuigi Rizzo netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) 932ccdc3305SLuigi Rizzo { 933ccdc3305SLuigi Rizzo uint32_t i = 0; /* index in the bitmap */ 93437e3a6d3SLuigi Rizzo uint32_t mask, j = 0; /* slot counter */ 935ccdc3305SLuigi Rizzo void *vaddr = NULL; 936ccdc3305SLuigi Rizzo 937ccdc3305SLuigi Rizzo if (len > p->_objsize) { 938ccdc3305SLuigi Rizzo D("%s request size %d too large", p->name, len); 939ccdc3305SLuigi Rizzo return NULL; 940ccdc3305SLuigi Rizzo } 941ccdc3305SLuigi Rizzo 942ccdc3305SLuigi Rizzo if (p->objfree == 0) { 943f9790aebSLuigi Rizzo D("no more %s objects", p->name); 944ccdc3305SLuigi Rizzo return NULL; 945ccdc3305SLuigi Rizzo } 9468241616dSLuigi Rizzo if (start) 9478241616dSLuigi Rizzo i = *start; 948ccdc3305SLuigi Rizzo 9498241616dSLuigi Rizzo /* termination is guaranteed by p->free, but better check bounds on i */ 9508241616dSLuigi Rizzo while (vaddr == NULL && i < p->bitmap_slots) { 951ccdc3305SLuigi Rizzo uint32_t cur = p->bitmap[i]; 952ccdc3305SLuigi Rizzo if (cur == 0) { /* bitmask is fully used */ 953ccdc3305SLuigi Rizzo i++; 954ccdc3305SLuigi Rizzo continue; 955ccdc3305SLuigi Rizzo } 956ccdc3305SLuigi Rizzo /* locate a slot */ 957ccdc3305SLuigi Rizzo for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 958ccdc3305SLuigi Rizzo ; 959ccdc3305SLuigi Rizzo 960ccdc3305SLuigi Rizzo p->bitmap[i] &= ~mask; /* mark object as in use */ 961ccdc3305SLuigi Rizzo p->objfree--; 962ccdc3305SLuigi Rizzo 963ccdc3305SLuigi Rizzo vaddr = p->lut[i * 32 + j].vaddr; 9648241616dSLuigi Rizzo if (index) 9658241616dSLuigi Rizzo *index = i * 32 + j; 966ccdc3305SLuigi Rizzo } 96737e3a6d3SLuigi Rizzo ND("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr); 968ccdc3305SLuigi Rizzo 9698241616dSLuigi Rizzo if (start) 9708241616dSLuigi Rizzo *start = i; 971ccdc3305SLuigi Rizzo return vaddr; 972ccdc3305SLuigi Rizzo } 973ccdc3305SLuigi Rizzo 974ccdc3305SLuigi Rizzo 975ccdc3305SLuigi Rizzo /* 976f0ea3689SLuigi Rizzo * free by index, not by address. 977f0ea3689SLuigi Rizzo * XXX should we also cleanup the content ? 978ccdc3305SLuigi Rizzo */ 979f0ea3689SLuigi Rizzo static int 980ccdc3305SLuigi Rizzo netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 981ccdc3305SLuigi Rizzo { 982f0ea3689SLuigi Rizzo uint32_t *ptr, mask; 983f0ea3689SLuigi Rizzo 984ccdc3305SLuigi Rizzo if (j >= p->objtotal) { 985ccdc3305SLuigi Rizzo D("invalid index %u, max %u", j, p->objtotal); 986f0ea3689SLuigi Rizzo return 1; 987ccdc3305SLuigi Rizzo } 988f0ea3689SLuigi Rizzo ptr = &p->bitmap[j / 32]; 989f0ea3689SLuigi Rizzo mask = (1 << (j % 32)); 990f0ea3689SLuigi Rizzo if (*ptr & mask) { 991f0ea3689SLuigi Rizzo D("ouch, double free on buffer %d", j); 992f0ea3689SLuigi Rizzo return 1; 993f0ea3689SLuigi Rizzo } else { 994f0ea3689SLuigi Rizzo *ptr |= mask; 995ccdc3305SLuigi Rizzo p->objfree++; 996f0ea3689SLuigi Rizzo return 0; 997f0ea3689SLuigi Rizzo } 998ccdc3305SLuigi Rizzo } 999ccdc3305SLuigi Rizzo 1000f0ea3689SLuigi Rizzo /* 1001f0ea3689SLuigi Rizzo * free by address. This is slow but is only used for a few 1002f0ea3689SLuigi Rizzo * objects (rings, nifp) 1003f0ea3689SLuigi Rizzo */ 1004ccdc3305SLuigi Rizzo static void 1005ccdc3305SLuigi Rizzo netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 1006ccdc3305SLuigi Rizzo { 1007ce3ee1e7SLuigi Rizzo u_int i, j, n = p->numclusters; 1008ccdc3305SLuigi Rizzo 1009ce3ee1e7SLuigi Rizzo for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { 1010ce3ee1e7SLuigi Rizzo void *base = p->lut[i * p->_clustentries].vaddr; 1011ccdc3305SLuigi Rizzo ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 1012ccdc3305SLuigi Rizzo 1013ccdc3305SLuigi Rizzo /* Given address, is out of the scope of the current cluster.*/ 1014*4f80b14cSVincenzo Maffione if (base == NULL || vaddr < base || relofs >= p->_clustsize) 1015ccdc3305SLuigi Rizzo continue; 1016ccdc3305SLuigi Rizzo 1017ccdc3305SLuigi Rizzo j = j + relofs / p->_objsize; 1018ce3ee1e7SLuigi Rizzo /* KASSERT(j != 0, ("Cannot free object 0")); */ 1019ccdc3305SLuigi Rizzo netmap_obj_free(p, j); 1020ccdc3305SLuigi Rizzo return; 1021ccdc3305SLuigi Rizzo } 1022ae10d1afSLuigi Rizzo D("address %p is not contained inside any cluster (%s)", 1023ccdc3305SLuigi Rizzo vaddr, p->name); 1024ccdc3305SLuigi Rizzo } 1025ccdc3305SLuigi Rizzo 1026*4f80b14cSVincenzo Maffione unsigned 1027*4f80b14cSVincenzo Maffione netmap_mem_bufsize(struct netmap_mem_d *nmd) 1028*4f80b14cSVincenzo Maffione { 1029*4f80b14cSVincenzo Maffione return nmd->pools[NETMAP_BUF_POOL]._objsize; 1030*4f80b14cSVincenzo Maffione } 10314bf50f18SLuigi Rizzo 1032ce3ee1e7SLuigi Rizzo #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) 1033ce3ee1e7SLuigi Rizzo #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) 1034ce3ee1e7SLuigi Rizzo #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) 1035ce3ee1e7SLuigi Rizzo #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) 1036ce3ee1e7SLuigi Rizzo #define netmap_buf_malloc(n, _pos, _index) \ 10374bf50f18SLuigi Rizzo netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index) 1038ccdc3305SLuigi Rizzo 1039ccdc3305SLuigi Rizzo 1040*4f80b14cSVincenzo Maffione #if 0 /* currently unused */ 1041ccdc3305SLuigi Rizzo /* Return the index associated to the given packet buffer */ 1042ce3ee1e7SLuigi Rizzo #define netmap_buf_index(n, v) \ 1043ce3ee1e7SLuigi Rizzo (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) 1044f0ea3689SLuigi Rizzo #endif 1045f0ea3689SLuigi Rizzo 1046f0ea3689SLuigi Rizzo /* 1047f0ea3689SLuigi Rizzo * allocate extra buffers in a linked list. 1048f0ea3689SLuigi Rizzo * returns the actual number. 1049f0ea3689SLuigi Rizzo */ 1050f0ea3689SLuigi Rizzo uint32_t 1051f0ea3689SLuigi Rizzo netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n) 1052f0ea3689SLuigi Rizzo { 1053f0ea3689SLuigi Rizzo struct netmap_mem_d *nmd = na->nm_mem; 1054f0ea3689SLuigi Rizzo uint32_t i, pos = 0; /* opaque, scan position in the bitmap */ 1055f0ea3689SLuigi Rizzo 1056f0ea3689SLuigi Rizzo NMA_LOCK(nmd); 1057f0ea3689SLuigi Rizzo 1058f0ea3689SLuigi Rizzo *head = 0; /* default, 'null' index ie empty list */ 1059f0ea3689SLuigi Rizzo for (i = 0 ; i < n; i++) { 1060f0ea3689SLuigi Rizzo uint32_t cur = *head; /* save current head */ 1061f0ea3689SLuigi Rizzo uint32_t *p = netmap_buf_malloc(nmd, &pos, head); 1062f0ea3689SLuigi Rizzo if (p == NULL) { 1063f0ea3689SLuigi Rizzo D("no more buffers after %d of %d", i, n); 1064f0ea3689SLuigi Rizzo *head = cur; /* restore */ 1065f0ea3689SLuigi Rizzo break; 1066f0ea3689SLuigi Rizzo } 106737e3a6d3SLuigi Rizzo ND(5, "allocate buffer %d -> %d", *head, cur); 1068f0ea3689SLuigi Rizzo *p = cur; /* link to previous head */ 1069f0ea3689SLuigi Rizzo } 1070f0ea3689SLuigi Rizzo 1071f0ea3689SLuigi Rizzo NMA_UNLOCK(nmd); 1072f0ea3689SLuigi Rizzo 1073f0ea3689SLuigi Rizzo return i; 1074f0ea3689SLuigi Rizzo } 1075f0ea3689SLuigi Rizzo 1076f0ea3689SLuigi Rizzo static void 1077f0ea3689SLuigi Rizzo netmap_extra_free(struct netmap_adapter *na, uint32_t head) 1078f0ea3689SLuigi Rizzo { 1079847bf383SLuigi Rizzo struct lut_entry *lut = na->na_lut.lut; 1080f0ea3689SLuigi Rizzo struct netmap_mem_d *nmd = na->nm_mem; 1081f0ea3689SLuigi Rizzo struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1082f0ea3689SLuigi Rizzo uint32_t i, cur, *buf; 1083f0ea3689SLuigi Rizzo 108437e3a6d3SLuigi Rizzo ND("freeing the extra list"); 1085f0ea3689SLuigi Rizzo for (i = 0; head >=2 && head < p->objtotal; i++) { 1086f0ea3689SLuigi Rizzo cur = head; 1087f0ea3689SLuigi Rizzo buf = lut[head].vaddr; 1088f0ea3689SLuigi Rizzo head = *buf; 1089f0ea3689SLuigi Rizzo *buf = 0; 1090f0ea3689SLuigi Rizzo if (netmap_obj_free(p, cur)) 1091f0ea3689SLuigi Rizzo break; 1092f0ea3689SLuigi Rizzo } 1093f0ea3689SLuigi Rizzo if (head != 0) 1094f0ea3689SLuigi Rizzo D("breaking with head %d", head); 109537e3a6d3SLuigi Rizzo if (netmap_verbose) 1096f0ea3689SLuigi Rizzo D("freed %d buffers", i); 1097f0ea3689SLuigi Rizzo } 1098ccdc3305SLuigi Rizzo 1099ccdc3305SLuigi Rizzo 11008241616dSLuigi Rizzo /* Return nonzero on error */ 11018241616dSLuigi Rizzo static int 1102f9790aebSLuigi Rizzo netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 1103ccdc3305SLuigi Rizzo { 1104ce3ee1e7SLuigi Rizzo struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1105ce3ee1e7SLuigi Rizzo u_int i = 0; /* slot counter */ 11068241616dSLuigi Rizzo uint32_t pos = 0; /* slot in p->bitmap */ 11078241616dSLuigi Rizzo uint32_t index = 0; /* buffer index */ 1108ccdc3305SLuigi Rizzo 1109ccdc3305SLuigi Rizzo for (i = 0; i < n; i++) { 1110ce3ee1e7SLuigi Rizzo void *vaddr = netmap_buf_malloc(nmd, &pos, &index); 1111ccdc3305SLuigi Rizzo if (vaddr == NULL) { 1112f9790aebSLuigi Rizzo D("no more buffers after %d of %d", i, n); 1113ccdc3305SLuigi Rizzo goto cleanup; 1114ccdc3305SLuigi Rizzo } 11158241616dSLuigi Rizzo slot[i].buf_idx = index; 1116ccdc3305SLuigi Rizzo slot[i].len = p->_objsize; 1117f9790aebSLuigi Rizzo slot[i].flags = 0; 1118*4f80b14cSVincenzo Maffione slot[i].ptr = 0; 1119ccdc3305SLuigi Rizzo } 1120ccdc3305SLuigi Rizzo 11218241616dSLuigi Rizzo ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); 11228241616dSLuigi Rizzo return (0); 1123ccdc3305SLuigi Rizzo 1124ccdc3305SLuigi Rizzo cleanup: 11254cf8455fSEd Maste while (i > 0) { 11264cf8455fSEd Maste i--; 11278241616dSLuigi Rizzo netmap_obj_free(p, slot[i].buf_idx); 1128ccdc3305SLuigi Rizzo } 11298241616dSLuigi Rizzo bzero(slot, n * sizeof(slot[0])); 11308241616dSLuigi Rizzo return (ENOMEM); 1131ccdc3305SLuigi Rizzo } 1132ccdc3305SLuigi Rizzo 1133f0ea3689SLuigi Rizzo static void 1134f0ea3689SLuigi Rizzo netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index) 1135f0ea3689SLuigi Rizzo { 1136f0ea3689SLuigi Rizzo struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1137f0ea3689SLuigi Rizzo u_int i; 1138f0ea3689SLuigi Rizzo 1139f0ea3689SLuigi Rizzo for (i = 0; i < n; i++) { 1140f0ea3689SLuigi Rizzo slot[i].buf_idx = index; 1141f0ea3689SLuigi Rizzo slot[i].len = p->_objsize; 1142f0ea3689SLuigi Rizzo slot[i].flags = 0; 1143f0ea3689SLuigi Rizzo } 1144f0ea3689SLuigi Rizzo } 1145f0ea3689SLuigi Rizzo 1146ccdc3305SLuigi Rizzo 1147ccdc3305SLuigi Rizzo static void 1148f9790aebSLuigi Rizzo netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) 1149ccdc3305SLuigi Rizzo { 1150ce3ee1e7SLuigi Rizzo struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 11518241616dSLuigi Rizzo 1152ccdc3305SLuigi Rizzo if (i < 2 || i >= p->objtotal) { 1153ccdc3305SLuigi Rizzo D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 1154ccdc3305SLuigi Rizzo return; 1155ccdc3305SLuigi Rizzo } 11568241616dSLuigi Rizzo netmap_obj_free(p, i); 1157ccdc3305SLuigi Rizzo } 1158ccdc3305SLuigi Rizzo 1159f0ea3689SLuigi Rizzo 1160f0ea3689SLuigi Rizzo static void 1161f0ea3689SLuigi Rizzo netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 1162f0ea3689SLuigi Rizzo { 1163f0ea3689SLuigi Rizzo u_int i; 1164f0ea3689SLuigi Rizzo 1165f0ea3689SLuigi Rizzo for (i = 0; i < n; i++) { 1166f0ea3689SLuigi Rizzo if (slot[i].buf_idx > 2) 1167f0ea3689SLuigi Rizzo netmap_free_buf(nmd, slot[i].buf_idx); 1168f0ea3689SLuigi Rizzo } 1169f0ea3689SLuigi Rizzo } 1170f0ea3689SLuigi Rizzo 11718241616dSLuigi Rizzo static void 11728241616dSLuigi Rizzo netmap_reset_obj_allocator(struct netmap_obj_pool *p) 11738241616dSLuigi Rizzo { 1174ce3ee1e7SLuigi Rizzo 11758241616dSLuigi Rizzo if (p == NULL) 11768241616dSLuigi Rizzo return; 11778241616dSLuigi Rizzo if (p->bitmap) 1178c3e9b4dbSLuiz Otavio O Souza nm_os_free(p->bitmap); 11798241616dSLuigi Rizzo p->bitmap = NULL; 1180*4f80b14cSVincenzo Maffione if (p->invalid_bitmap) 1181*4f80b14cSVincenzo Maffione nm_os_free(p->invalid_bitmap); 1182*4f80b14cSVincenzo Maffione p->invalid_bitmap = NULL; 11838241616dSLuigi Rizzo if (p->lut) { 1184ce3ee1e7SLuigi Rizzo u_int i; 1185ce3ee1e7SLuigi Rizzo 1186dd4fcbc5SPatrick Kelsey /* 1187dd4fcbc5SPatrick Kelsey * Free each cluster allocated in 1188dd4fcbc5SPatrick Kelsey * netmap_finalize_obj_allocator(). The cluster start 1189dd4fcbc5SPatrick Kelsey * addresses are stored at multiples of p->_clusterentries 1190dd4fcbc5SPatrick Kelsey * in the lut. 1191dd4fcbc5SPatrick Kelsey */ 1192ce3ee1e7SLuigi Rizzo for (i = 0; i < p->objtotal; i += p->_clustentries) { 119337e3a6d3SLuigi Rizzo contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); 11948241616dSLuigi Rizzo } 1195*4f80b14cSVincenzo Maffione nm_free_lut(p->lut, p->objtotal); 11968241616dSLuigi Rizzo } 11978241616dSLuigi Rizzo p->lut = NULL; 1198ce3ee1e7SLuigi Rizzo p->objtotal = 0; 1199ce3ee1e7SLuigi Rizzo p->memtotal = 0; 1200ce3ee1e7SLuigi Rizzo p->numclusters = 0; 1201ce3ee1e7SLuigi Rizzo p->objfree = 0; 12028241616dSLuigi Rizzo } 1203ccdc3305SLuigi Rizzo 1204ccdc3305SLuigi Rizzo /* 1205ccdc3305SLuigi Rizzo * Free all resources related to an allocator. 1206ccdc3305SLuigi Rizzo */ 1207ccdc3305SLuigi Rizzo static void 1208ccdc3305SLuigi Rizzo netmap_destroy_obj_allocator(struct netmap_obj_pool *p) 1209ccdc3305SLuigi Rizzo { 1210ccdc3305SLuigi Rizzo if (p == NULL) 1211ccdc3305SLuigi Rizzo return; 12128241616dSLuigi Rizzo netmap_reset_obj_allocator(p); 1213ccdc3305SLuigi Rizzo } 1214ccdc3305SLuigi Rizzo 1215ccdc3305SLuigi Rizzo /* 1216ccdc3305SLuigi Rizzo * We receive a request for objtotal objects, of size objsize each. 1217ccdc3305SLuigi Rizzo * Internally we may round up both numbers, as we allocate objects 1218ccdc3305SLuigi Rizzo * in small clusters multiple of the page size. 1219ce3ee1e7SLuigi Rizzo * We need to keep track of objtotal and clustentries, 1220ccdc3305SLuigi Rizzo * as they are needed when freeing memory. 1221ccdc3305SLuigi Rizzo * 1222ccdc3305SLuigi Rizzo * XXX note -- userspace needs the buffers to be contiguous, 1223ccdc3305SLuigi Rizzo * so we cannot afford gaps at the end of a cluster. 1224ccdc3305SLuigi Rizzo */ 12258241616dSLuigi Rizzo 12268241616dSLuigi Rizzo 12278241616dSLuigi Rizzo /* call with NMA_LOCK held */ 12288241616dSLuigi Rizzo static int 12298241616dSLuigi Rizzo netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 1230ccdc3305SLuigi Rizzo { 1231ce3ee1e7SLuigi Rizzo int i; 1232ccdc3305SLuigi Rizzo u_int clustsize; /* the cluster size, multiple of page size */ 1233ccdc3305SLuigi Rizzo u_int clustentries; /* how many objects per entry */ 1234ccdc3305SLuigi Rizzo 1235ce3ee1e7SLuigi Rizzo /* we store the current request, so we can 1236ce3ee1e7SLuigi Rizzo * detect configuration changes later */ 1237ce3ee1e7SLuigi Rizzo p->r_objtotal = objtotal; 1238ce3ee1e7SLuigi Rizzo p->r_objsize = objsize; 1239ce3ee1e7SLuigi Rizzo 12404bf50f18SLuigi Rizzo #define MAX_CLUSTSIZE (1<<22) // 4 MB 124117885a7bSLuigi Rizzo #define LINE_ROUND NM_CACHE_ALIGN // 64 1242ccdc3305SLuigi Rizzo if (objsize >= MAX_CLUSTSIZE) { 1243ccdc3305SLuigi Rizzo /* we could do it but there is no point */ 1244ccdc3305SLuigi Rizzo D("unsupported allocation for %d bytes", objsize); 1245ce3ee1e7SLuigi Rizzo return EINVAL; 1246ccdc3305SLuigi Rizzo } 1247ccdc3305SLuigi Rizzo /* make sure objsize is a multiple of LINE_ROUND */ 1248ccdc3305SLuigi Rizzo i = (objsize & (LINE_ROUND - 1)); 1249ccdc3305SLuigi Rizzo if (i) { 1250ccdc3305SLuigi Rizzo D("XXX aligning object by %d bytes", LINE_ROUND - i); 1251ccdc3305SLuigi Rizzo objsize += LINE_ROUND - i; 1252ccdc3305SLuigi Rizzo } 12538241616dSLuigi Rizzo if (objsize < p->objminsize || objsize > p->objmaxsize) { 12548241616dSLuigi Rizzo D("requested objsize %d out of range [%d, %d]", 12558241616dSLuigi Rizzo objsize, p->objminsize, p->objmaxsize); 1256ce3ee1e7SLuigi Rizzo return EINVAL; 12578241616dSLuigi Rizzo } 12588241616dSLuigi Rizzo if (objtotal < p->nummin || objtotal > p->nummax) { 12598241616dSLuigi Rizzo D("requested objtotal %d out of range [%d, %d]", 12608241616dSLuigi Rizzo objtotal, p->nummin, p->nummax); 1261ce3ee1e7SLuigi Rizzo return EINVAL; 12628241616dSLuigi Rizzo } 1263ccdc3305SLuigi Rizzo /* 1264ccdc3305SLuigi Rizzo * Compute number of objects using a brute-force approach: 1265ccdc3305SLuigi Rizzo * given a max cluster size, 1266ccdc3305SLuigi Rizzo * we try to fill it with objects keeping track of the 1267ccdc3305SLuigi Rizzo * wasted space to the next page boundary. 1268ccdc3305SLuigi Rizzo */ 1269ccdc3305SLuigi Rizzo for (clustentries = 0, i = 1;; i++) { 1270ccdc3305SLuigi Rizzo u_int delta, used = i * objsize; 1271ccdc3305SLuigi Rizzo if (used > MAX_CLUSTSIZE) 1272ccdc3305SLuigi Rizzo break; 1273ccdc3305SLuigi Rizzo delta = used % PAGE_SIZE; 1274ccdc3305SLuigi Rizzo if (delta == 0) { // exact solution 1275ccdc3305SLuigi Rizzo clustentries = i; 1276ccdc3305SLuigi Rizzo break; 1277ccdc3305SLuigi Rizzo } 1278ccdc3305SLuigi Rizzo } 12794bf50f18SLuigi Rizzo /* exact solution not found */ 12804bf50f18SLuigi Rizzo if (clustentries == 0) { 12814bf50f18SLuigi Rizzo D("unsupported allocation for %d bytes", objsize); 12824bf50f18SLuigi Rizzo return EINVAL; 12834bf50f18SLuigi Rizzo } 12844bf50f18SLuigi Rizzo /* compute clustsize */ 1285ccdc3305SLuigi Rizzo clustsize = clustentries * objsize; 1286ae10d1afSLuigi Rizzo if (netmap_verbose) 1287ccdc3305SLuigi Rizzo D("objsize %d clustsize %d objects %d", 1288ccdc3305SLuigi Rizzo objsize, clustsize, clustentries); 1289ccdc3305SLuigi Rizzo 1290ccdc3305SLuigi Rizzo /* 1291ccdc3305SLuigi Rizzo * The number of clusters is n = ceil(objtotal/clustentries) 1292ccdc3305SLuigi Rizzo * objtotal' = n * clustentries 1293ccdc3305SLuigi Rizzo */ 1294ce3ee1e7SLuigi Rizzo p->_clustentries = clustentries; 1295ccdc3305SLuigi Rizzo p->_clustsize = clustsize; 1296ce3ee1e7SLuigi Rizzo p->_numclusters = (objtotal + clustentries - 1) / clustentries; 1297ce3ee1e7SLuigi Rizzo 1298ce3ee1e7SLuigi Rizzo /* actual values (may be larger than requested) */ 12998241616dSLuigi Rizzo p->_objsize = objsize; 1300ce3ee1e7SLuigi Rizzo p->_objtotal = p->_numclusters * clustentries; 1301ccdc3305SLuigi Rizzo 13028241616dSLuigi Rizzo return 0; 13038241616dSLuigi Rizzo } 13048241616dSLuigi Rizzo 13058241616dSLuigi Rizzo /* call with NMA_LOCK held */ 13068241616dSLuigi Rizzo static int 13078241616dSLuigi Rizzo netmap_finalize_obj_allocator(struct netmap_obj_pool *p) 13088241616dSLuigi Rizzo { 1309ce3ee1e7SLuigi Rizzo int i; /* must be signed */ 1310ce3ee1e7SLuigi Rizzo size_t n; 1311ce3ee1e7SLuigi Rizzo 1312*4f80b14cSVincenzo Maffione if (p->lut) { 1313*4f80b14cSVincenzo Maffione /* already finalized, nothing to do */ 1314*4f80b14cSVincenzo Maffione return 0; 1315*4f80b14cSVincenzo Maffione } 1316*4f80b14cSVincenzo Maffione 1317ce3ee1e7SLuigi Rizzo /* optimistically assume we have enough memory */ 1318ce3ee1e7SLuigi Rizzo p->numclusters = p->_numclusters; 1319ce3ee1e7SLuigi Rizzo p->objtotal = p->_objtotal; 13208241616dSLuigi Rizzo 132137e3a6d3SLuigi Rizzo p->lut = nm_alloc_lut(p->objtotal); 1322ccdc3305SLuigi Rizzo if (p->lut == NULL) { 132337e3a6d3SLuigi Rizzo D("Unable to create lookup table for '%s'", p->name); 1324ccdc3305SLuigi Rizzo goto clean; 1325ccdc3305SLuigi Rizzo } 1326ccdc3305SLuigi Rizzo 1327ccdc3305SLuigi Rizzo /* 1328*4f80b14cSVincenzo Maffione * Allocate clusters, init pointers 1329ccdc3305SLuigi Rizzo */ 1330ce3ee1e7SLuigi Rizzo 1331ce3ee1e7SLuigi Rizzo n = p->_clustsize; 1332ce3ee1e7SLuigi Rizzo for (i = 0; i < (int)p->objtotal;) { 1333ce3ee1e7SLuigi Rizzo int lim = i + p->_clustentries; 1334ccdc3305SLuigi Rizzo char *clust; 1335ccdc3305SLuigi Rizzo 133637e3a6d3SLuigi Rizzo /* 133737e3a6d3SLuigi Rizzo * XXX Note, we only need contigmalloc() for buffers attached 133837e3a6d3SLuigi Rizzo * to native interfaces. In all other cases (nifp, netmap rings 133937e3a6d3SLuigi Rizzo * and even buffers for VALE ports or emulated interfaces) we 134037e3a6d3SLuigi Rizzo * can live with standard malloc, because the hardware will not 134137e3a6d3SLuigi Rizzo * access the pages directly. 134237e3a6d3SLuigi Rizzo */ 1343ce3ee1e7SLuigi Rizzo clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, 1344ce3ee1e7SLuigi Rizzo (size_t)0, -1UL, PAGE_SIZE, 0); 1345ccdc3305SLuigi Rizzo if (clust == NULL) { 1346ccdc3305SLuigi Rizzo /* 1347ccdc3305SLuigi Rizzo * If we get here, there is a severe memory shortage, 1348ccdc3305SLuigi Rizzo * so halve the allocated memory to reclaim some. 1349ccdc3305SLuigi Rizzo */ 1350ccdc3305SLuigi Rizzo D("Unable to create cluster at %d for '%s' allocator", 13518241616dSLuigi Rizzo i, p->name); 1352ce3ee1e7SLuigi Rizzo if (i < 2) /* nothing to halve */ 1353ce3ee1e7SLuigi Rizzo goto out; 1354ccdc3305SLuigi Rizzo lim = i / 2; 13558241616dSLuigi Rizzo for (i--; i >= lim; i--) { 1356ce3ee1e7SLuigi Rizzo if (i % p->_clustentries == 0 && p->lut[i].vaddr) 1357ccdc3305SLuigi Rizzo contigfree(p->lut[i].vaddr, 1358ce3ee1e7SLuigi Rizzo n, M_NETMAP); 1359dd4fcbc5SPatrick Kelsey p->lut[i].vaddr = NULL; 1360ccdc3305SLuigi Rizzo } 1361ce3ee1e7SLuigi Rizzo out: 1362ccdc3305SLuigi Rizzo p->objtotal = i; 1363ce3ee1e7SLuigi Rizzo /* we may have stopped in the middle of a cluster */ 1364ce3ee1e7SLuigi Rizzo p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; 1365ccdc3305SLuigi Rizzo break; 1366ccdc3305SLuigi Rizzo } 1367dd4fcbc5SPatrick Kelsey /* 1368*4f80b14cSVincenzo Maffione * Set lut state for all buffers in the current cluster. 1369dd4fcbc5SPatrick Kelsey * 1370dd4fcbc5SPatrick Kelsey * [i, lim) is the set of buffer indexes that cover the 1371dd4fcbc5SPatrick Kelsey * current cluster. 1372dd4fcbc5SPatrick Kelsey * 1373dd4fcbc5SPatrick Kelsey * 'clust' is really the address of the current buffer in 1374dd4fcbc5SPatrick Kelsey * the current cluster as we index through it with a stride 1375dd4fcbc5SPatrick Kelsey * of p->_objsize. 1376dd4fcbc5SPatrick Kelsey */ 13778241616dSLuigi Rizzo for (; i < lim; i++, clust += p->_objsize) { 1378ccdc3305SLuigi Rizzo p->lut[i].vaddr = clust; 1379*4f80b14cSVincenzo Maffione #if !defined(linux) && !defined(_WIN32) 1380ccdc3305SLuigi Rizzo p->lut[i].paddr = vtophys(clust); 1381*4f80b14cSVincenzo Maffione #endif 1382ccdc3305SLuigi Rizzo } 1383ccdc3305SLuigi Rizzo } 1384ce3ee1e7SLuigi Rizzo p->memtotal = p->numclusters * p->_clustsize; 1385ae10d1afSLuigi Rizzo if (netmap_verbose) 1386ccdc3305SLuigi Rizzo D("Pre-allocated %d clusters (%d/%dKB) for '%s'", 1387ce3ee1e7SLuigi Rizzo p->numclusters, p->_clustsize >> 10, 1388ce3ee1e7SLuigi Rizzo p->memtotal >> 10, p->name); 1389ccdc3305SLuigi Rizzo 13908241616dSLuigi Rizzo return 0; 1391ccdc3305SLuigi Rizzo 1392ccdc3305SLuigi Rizzo clean: 13938241616dSLuigi Rizzo netmap_reset_obj_allocator(p); 13948241616dSLuigi Rizzo return ENOMEM; 13958241616dSLuigi Rizzo } 13968241616dSLuigi Rizzo 13978241616dSLuigi Rizzo /* call with lock held */ 13988241616dSLuigi Rizzo static int 1399c3e9b4dbSLuiz Otavio O Souza netmap_mem_params_changed(struct netmap_obj_params* p) 14008241616dSLuigi Rizzo { 1401c3e9b4dbSLuiz Otavio O Souza int i, rv = 0; 14028241616dSLuigi Rizzo 14038241616dSLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1404c3e9b4dbSLuiz Otavio O Souza if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) { 1405c3e9b4dbSLuiz Otavio O Souza p[i].last_size = p[i].size; 1406c3e9b4dbSLuiz Otavio O Souza p[i].last_num = p[i].num; 1407c3e9b4dbSLuiz Otavio O Souza rv = 1; 14088241616dSLuigi Rizzo } 1409c3e9b4dbSLuiz Otavio O Souza } 1410c3e9b4dbSLuiz Otavio O Souza return rv; 14118241616dSLuigi Rizzo } 14128241616dSLuigi Rizzo 1413ce3ee1e7SLuigi Rizzo static void 1414ce3ee1e7SLuigi Rizzo netmap_mem_reset_all(struct netmap_mem_d *nmd) 1415ce3ee1e7SLuigi Rizzo { 1416ce3ee1e7SLuigi Rizzo int i; 1417f0ea3689SLuigi Rizzo 1418f0ea3689SLuigi Rizzo if (netmap_verbose) 1419ce3ee1e7SLuigi Rizzo D("resetting %p", nmd); 1420ce3ee1e7SLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1421ce3ee1e7SLuigi Rizzo netmap_reset_obj_allocator(&nmd->pools[i]); 1422ce3ee1e7SLuigi Rizzo } 1423ce3ee1e7SLuigi Rizzo nmd->flags &= ~NETMAP_MEM_FINALIZED; 1424ce3ee1e7SLuigi Rizzo } 1425ce3ee1e7SLuigi Rizzo 1426ce3ee1e7SLuigi Rizzo static int 14274bf50f18SLuigi Rizzo netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na) 14284bf50f18SLuigi Rizzo { 14294bf50f18SLuigi Rizzo int i, lim = p->_objtotal; 1430*4f80b14cSVincenzo Maffione struct netmap_lut *lut = &na->na_lut; 14314bf50f18SLuigi Rizzo 1432c3e9b4dbSLuiz Otavio O Souza if (na == NULL || na->pdev == NULL) 14334bf50f18SLuigi Rizzo return 0; 14344bf50f18SLuigi Rizzo 143537e3a6d3SLuigi Rizzo #if defined(__FreeBSD__) 14364bf50f18SLuigi Rizzo (void)i; 14374bf50f18SLuigi Rizzo (void)lim; 1438*4f80b14cSVincenzo Maffione (void)lut; 14394bf50f18SLuigi Rizzo D("unsupported on FreeBSD"); 144037e3a6d3SLuigi Rizzo #elif defined(_WIN32) 144137e3a6d3SLuigi Rizzo (void)i; 144237e3a6d3SLuigi Rizzo (void)lim; 1443*4f80b14cSVincenzo Maffione (void)lut; 1444*4f80b14cSVincenzo Maffione D("unsupported on Windows"); 14454bf50f18SLuigi Rizzo #else /* linux */ 1446*4f80b14cSVincenzo Maffione ND("unmapping and freeing plut for %s", na->name); 1447*4f80b14cSVincenzo Maffione if (lut->plut == NULL) 1448*4f80b14cSVincenzo Maffione return 0; 1449*4f80b14cSVincenzo Maffione for (i = 0; i < lim; i += p->_clustentries) { 1450*4f80b14cSVincenzo Maffione if (lut->plut[i].paddr) 1451*4f80b14cSVincenzo Maffione netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize); 14524bf50f18SLuigi Rizzo } 1453*4f80b14cSVincenzo Maffione nm_free_plut(lut->plut); 1454*4f80b14cSVincenzo Maffione lut->plut = NULL; 14554bf50f18SLuigi Rizzo #endif /* linux */ 14564bf50f18SLuigi Rizzo 14574bf50f18SLuigi Rizzo return 0; 14584bf50f18SLuigi Rizzo } 14594bf50f18SLuigi Rizzo 14604bf50f18SLuigi Rizzo static int 14614bf50f18SLuigi Rizzo netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na) 14624bf50f18SLuigi Rizzo { 1463*4f80b14cSVincenzo Maffione int error = 0; 1464*4f80b14cSVincenzo Maffione int i, lim = p->objtotal; 1465*4f80b14cSVincenzo Maffione struct netmap_lut *lut = &na->na_lut; 14664bf50f18SLuigi Rizzo 14674bf50f18SLuigi Rizzo if (na->pdev == NULL) 14684bf50f18SLuigi Rizzo return 0; 14694bf50f18SLuigi Rizzo 1470*4f80b14cSVincenzo Maffione #if defined(__FreeBSD__) 1471*4f80b14cSVincenzo Maffione (void)i; 1472*4f80b14cSVincenzo Maffione (void)lim; 1473*4f80b14cSVincenzo Maffione (void)lut; 1474*4f80b14cSVincenzo Maffione D("unsupported on FreeBSD"); 1475*4f80b14cSVincenzo Maffione #elif defined(_WIN32) 1476*4f80b14cSVincenzo Maffione (void)i; 1477*4f80b14cSVincenzo Maffione (void)lim; 1478*4f80b14cSVincenzo Maffione (void)lut; 1479*4f80b14cSVincenzo Maffione D("unsupported on Windows"); 1480*4f80b14cSVincenzo Maffione #else /* linux */ 1481*4f80b14cSVincenzo Maffione 1482*4f80b14cSVincenzo Maffione if (lut->plut != NULL) { 1483*4f80b14cSVincenzo Maffione ND("plut already allocated for %s", na->name); 1484*4f80b14cSVincenzo Maffione return 0; 14854bf50f18SLuigi Rizzo } 1486*4f80b14cSVincenzo Maffione 1487*4f80b14cSVincenzo Maffione ND("allocating physical lut for %s", na->name); 1488*4f80b14cSVincenzo Maffione lut->plut = nm_alloc_plut(lim); 1489*4f80b14cSVincenzo Maffione if (lut->plut == NULL) { 1490*4f80b14cSVincenzo Maffione D("Failed to allocate physical lut for %s", na->name); 1491*4f80b14cSVincenzo Maffione return ENOMEM; 1492*4f80b14cSVincenzo Maffione } 1493*4f80b14cSVincenzo Maffione 1494*4f80b14cSVincenzo Maffione for (i = 0; i < lim; i += p->_clustentries) { 1495*4f80b14cSVincenzo Maffione lut->plut[i].paddr = 0; 1496*4f80b14cSVincenzo Maffione } 1497*4f80b14cSVincenzo Maffione 1498*4f80b14cSVincenzo Maffione for (i = 0; i < lim; i += p->_clustentries) { 1499*4f80b14cSVincenzo Maffione int j; 1500*4f80b14cSVincenzo Maffione 1501*4f80b14cSVincenzo Maffione if (p->lut[i].vaddr == NULL) 1502*4f80b14cSVincenzo Maffione continue; 1503*4f80b14cSVincenzo Maffione 1504*4f80b14cSVincenzo Maffione error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, 1505*4f80b14cSVincenzo Maffione p->lut[i].vaddr, p->_clustsize); 1506*4f80b14cSVincenzo Maffione if (error) { 1507*4f80b14cSVincenzo Maffione D("Failed to map cluster #%d from the %s pool", i, p->name); 1508*4f80b14cSVincenzo Maffione break; 1509*4f80b14cSVincenzo Maffione } 1510*4f80b14cSVincenzo Maffione 1511*4f80b14cSVincenzo Maffione for (j = 1; j < p->_clustentries; j++) { 1512*4f80b14cSVincenzo Maffione lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize; 1513*4f80b14cSVincenzo Maffione } 1514*4f80b14cSVincenzo Maffione } 1515*4f80b14cSVincenzo Maffione 1516*4f80b14cSVincenzo Maffione if (error) 1517*4f80b14cSVincenzo Maffione netmap_mem_unmap(p, na); 1518*4f80b14cSVincenzo Maffione 15194bf50f18SLuigi Rizzo #endif /* linux */ 15204bf50f18SLuigi Rizzo 1521*4f80b14cSVincenzo Maffione return error; 15224bf50f18SLuigi Rizzo } 15234bf50f18SLuigi Rizzo 15244bf50f18SLuigi Rizzo static int 1525ce3ee1e7SLuigi Rizzo netmap_mem_finalize_all(struct netmap_mem_d *nmd) 1526ce3ee1e7SLuigi Rizzo { 1527ce3ee1e7SLuigi Rizzo int i; 1528ce3ee1e7SLuigi Rizzo if (nmd->flags & NETMAP_MEM_FINALIZED) 1529ce3ee1e7SLuigi Rizzo return 0; 1530ce3ee1e7SLuigi Rizzo nmd->lasterr = 0; 1531ce3ee1e7SLuigi Rizzo nmd->nm_totalsize = 0; 1532ce3ee1e7SLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1533ce3ee1e7SLuigi Rizzo nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); 1534ce3ee1e7SLuigi Rizzo if (nmd->lasterr) 1535ce3ee1e7SLuigi Rizzo goto error; 1536ce3ee1e7SLuigi Rizzo nmd->nm_totalsize += nmd->pools[i].memtotal; 1537ce3ee1e7SLuigi Rizzo } 1538*4f80b14cSVincenzo Maffione nmd->lasterr = netmap_mem_init_bitmaps(nmd); 1539*4f80b14cSVincenzo Maffione if (nmd->lasterr) 1540*4f80b14cSVincenzo Maffione goto error; 1541*4f80b14cSVincenzo Maffione 1542ce3ee1e7SLuigi Rizzo nmd->flags |= NETMAP_MEM_FINALIZED; 1543ce3ee1e7SLuigi Rizzo 1544f0ea3689SLuigi Rizzo if (netmap_verbose) 1545f0ea3689SLuigi Rizzo D("interfaces %d KB, rings %d KB, buffers %d MB", 1546ce3ee1e7SLuigi Rizzo nmd->pools[NETMAP_IF_POOL].memtotal >> 10, 1547ce3ee1e7SLuigi Rizzo nmd->pools[NETMAP_RING_POOL].memtotal >> 10, 1548ce3ee1e7SLuigi Rizzo nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); 1549ce3ee1e7SLuigi Rizzo 1550f0ea3689SLuigi Rizzo if (netmap_verbose) 1551ce3ee1e7SLuigi Rizzo D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); 1552ce3ee1e7SLuigi Rizzo 1553ce3ee1e7SLuigi Rizzo 1554ce3ee1e7SLuigi Rizzo return 0; 1555ce3ee1e7SLuigi Rizzo error: 1556ce3ee1e7SLuigi Rizzo netmap_mem_reset_all(nmd); 1557ce3ee1e7SLuigi Rizzo return nmd->lasterr; 1558ce3ee1e7SLuigi Rizzo } 1559ce3ee1e7SLuigi Rizzo 1560f0ea3689SLuigi Rizzo /* 1561f0ea3689SLuigi Rizzo * allocator for private memory 1562f0ea3689SLuigi Rizzo */ 1563*4f80b14cSVincenzo Maffione static void * 1564*4f80b14cSVincenzo Maffione _netmap_mem_private_new(size_t size, struct netmap_obj_params *p, 1565*4f80b14cSVincenzo Maffione struct netmap_mem_ops *ops, int *perr) 1566ce3ee1e7SLuigi Rizzo { 1567ce3ee1e7SLuigi Rizzo struct netmap_mem_d *d = NULL; 1568c3e9b4dbSLuiz Otavio O Souza int i, err = 0; 1569ce3ee1e7SLuigi Rizzo 1570*4f80b14cSVincenzo Maffione d = nm_os_malloc(size); 1571f0ea3689SLuigi Rizzo if (d == NULL) { 1572f0ea3689SLuigi Rizzo err = ENOMEM; 1573f0ea3689SLuigi Rizzo goto error; 1574f0ea3689SLuigi Rizzo } 1575ce3ee1e7SLuigi Rizzo 1576ce3ee1e7SLuigi Rizzo *d = nm_blueprint; 1577*4f80b14cSVincenzo Maffione d->ops = ops; 1578ce3ee1e7SLuigi Rizzo 1579f0ea3689SLuigi Rizzo err = nm_mem_assign_id(d); 1580f0ea3689SLuigi Rizzo if (err) 1581*4f80b14cSVincenzo Maffione goto error_free; 1582c3e9b4dbSLuiz Otavio O Souza snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id); 1583f0ea3689SLuigi Rizzo 1584c3e9b4dbSLuiz Otavio O Souza for (i = 0; i < NETMAP_POOLS_NR; i++) { 1585c3e9b4dbSLuiz Otavio O Souza snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, 1586c3e9b4dbSLuiz Otavio O Souza nm_blueprint.pools[i].name, 1587c3e9b4dbSLuiz Otavio O Souza d->name); 1588c3e9b4dbSLuiz Otavio O Souza d->params[i].num = p[i].num; 1589c3e9b4dbSLuiz Otavio O Souza d->params[i].size = p[i].size; 1590c3e9b4dbSLuiz Otavio O Souza } 1591c3e9b4dbSLuiz Otavio O Souza 1592c3e9b4dbSLuiz Otavio O Souza NMA_LOCK_INIT(d); 1593c3e9b4dbSLuiz Otavio O Souza 1594c3e9b4dbSLuiz Otavio O Souza err = netmap_mem_config(d); 1595c3e9b4dbSLuiz Otavio O Souza if (err) 1596*4f80b14cSVincenzo Maffione goto error_rel_id; 1597c3e9b4dbSLuiz Otavio O Souza 1598c3e9b4dbSLuiz Otavio O Souza d->flags &= ~NETMAP_MEM_FINALIZED; 1599c3e9b4dbSLuiz Otavio O Souza 1600c3e9b4dbSLuiz Otavio O Souza return d; 1601c3e9b4dbSLuiz Otavio O Souza 1602*4f80b14cSVincenzo Maffione error_rel_id: 1603*4f80b14cSVincenzo Maffione NMA_LOCK_DESTROY(d); 1604*4f80b14cSVincenzo Maffione nm_mem_release_id(d); 1605*4f80b14cSVincenzo Maffione error_free: 1606*4f80b14cSVincenzo Maffione nm_os_free(d); 1607c3e9b4dbSLuiz Otavio O Souza error: 1608c3e9b4dbSLuiz Otavio O Souza if (perr) 1609c3e9b4dbSLuiz Otavio O Souza *perr = err; 1610c3e9b4dbSLuiz Otavio O Souza return NULL; 1611c3e9b4dbSLuiz Otavio O Souza } 1612c3e9b4dbSLuiz Otavio O Souza 1613c3e9b4dbSLuiz Otavio O Souza struct netmap_mem_d * 1614c3e9b4dbSLuiz Otavio O Souza netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd, 1615c3e9b4dbSLuiz Otavio O Souza u_int extra_bufs, u_int npipes, int *perr) 1616c3e9b4dbSLuiz Otavio O Souza { 1617c3e9b4dbSLuiz Otavio O Souza struct netmap_mem_d *d = NULL; 1618c3e9b4dbSLuiz Otavio O Souza struct netmap_obj_params p[NETMAP_POOLS_NR]; 1619*4f80b14cSVincenzo Maffione int i; 1620c3e9b4dbSLuiz Otavio O Souza u_int v, maxd; 1621f0ea3689SLuigi Rizzo /* account for the fake host rings */ 1622ce3ee1e7SLuigi Rizzo txr++; 1623ce3ee1e7SLuigi Rizzo rxr++; 1624ce3ee1e7SLuigi Rizzo 1625f0ea3689SLuigi Rizzo /* copy the min values */ 1626f0ea3689SLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1627f0ea3689SLuigi Rizzo p[i] = netmap_min_priv_params[i]; 1628f0ea3689SLuigi Rizzo } 1629f0ea3689SLuigi Rizzo 1630f0ea3689SLuigi Rizzo /* possibly increase them to fit user request */ 1631f0ea3689SLuigi Rizzo v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr); 1632f0ea3689SLuigi Rizzo if (p[NETMAP_IF_POOL].size < v) 1633f0ea3689SLuigi Rizzo p[NETMAP_IF_POOL].size = v; 1634f0ea3689SLuigi Rizzo v = 2 + 4 * npipes; 1635f0ea3689SLuigi Rizzo if (p[NETMAP_IF_POOL].num < v) 1636f0ea3689SLuigi Rizzo p[NETMAP_IF_POOL].num = v; 1637f0ea3689SLuigi Rizzo maxd = (txd > rxd) ? txd : rxd; 1638f0ea3689SLuigi Rizzo v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd; 1639f0ea3689SLuigi Rizzo if (p[NETMAP_RING_POOL].size < v) 1640f0ea3689SLuigi Rizzo p[NETMAP_RING_POOL].size = v; 1641f0ea3689SLuigi Rizzo /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake) 1642f0ea3689SLuigi Rizzo * and two rx rings (again, 1 normal and 1 fake host) 1643f0ea3689SLuigi Rizzo */ 1644f0ea3689SLuigi Rizzo v = txr + rxr + 8 * npipes; 1645f0ea3689SLuigi Rizzo if (p[NETMAP_RING_POOL].num < v) 1646f0ea3689SLuigi Rizzo p[NETMAP_RING_POOL].num = v; 1647f0ea3689SLuigi Rizzo /* for each pipe we only need the buffers for the 4 "real" rings. 1648f0ea3689SLuigi Rizzo * On the other end, the pipe ring dimension may be different from 1649f0ea3689SLuigi Rizzo * the parent port ring dimension. As a compromise, we allocate twice the 1650f0ea3689SLuigi Rizzo * space actually needed if the pipe rings were the same size as the parent rings 1651f0ea3689SLuigi Rizzo */ 1652f0ea3689SLuigi Rizzo v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs; 1653f0ea3689SLuigi Rizzo /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */ 1654f0ea3689SLuigi Rizzo if (p[NETMAP_BUF_POOL].num < v) 1655f0ea3689SLuigi Rizzo p[NETMAP_BUF_POOL].num = v; 1656f0ea3689SLuigi Rizzo 1657f0ea3689SLuigi Rizzo if (netmap_verbose) 1658ce3ee1e7SLuigi Rizzo D("req if %d*%d ring %d*%d buf %d*%d", 1659ce3ee1e7SLuigi Rizzo p[NETMAP_IF_POOL].num, 1660ce3ee1e7SLuigi Rizzo p[NETMAP_IF_POOL].size, 1661ce3ee1e7SLuigi Rizzo p[NETMAP_RING_POOL].num, 1662ce3ee1e7SLuigi Rizzo p[NETMAP_RING_POOL].size, 1663ce3ee1e7SLuigi Rizzo p[NETMAP_BUF_POOL].num, 1664ce3ee1e7SLuigi Rizzo p[NETMAP_BUF_POOL].size); 1665ce3ee1e7SLuigi Rizzo 1666*4f80b14cSVincenzo Maffione d = _netmap_mem_private_new(sizeof(*d), p, &netmap_mem_global_ops, perr); 1667ce3ee1e7SLuigi Rizzo 1668ce3ee1e7SLuigi Rizzo return d; 1669ce3ee1e7SLuigi Rizzo } 1670ce3ee1e7SLuigi Rizzo 16718241616dSLuigi Rizzo 16728241616dSLuigi Rizzo /* call with lock held */ 16738241616dSLuigi Rizzo static int 1674c3e9b4dbSLuiz Otavio O Souza netmap_mem2_config(struct netmap_mem_d *nmd) 16758241616dSLuigi Rizzo { 16768241616dSLuigi Rizzo int i; 16778241616dSLuigi Rizzo 1678847bf383SLuigi Rizzo if (nmd->active) 1679ce3ee1e7SLuigi Rizzo /* already in use, we cannot change the configuration */ 1680ce3ee1e7SLuigi Rizzo goto out; 1681ce3ee1e7SLuigi Rizzo 1682c3e9b4dbSLuiz Otavio O Souza if (!netmap_mem_params_changed(nmd->params)) 16838241616dSLuigi Rizzo goto out; 16848241616dSLuigi Rizzo 1685847bf383SLuigi Rizzo ND("reconfiguring"); 16868241616dSLuigi Rizzo 1687ce3ee1e7SLuigi Rizzo if (nmd->flags & NETMAP_MEM_FINALIZED) { 16888241616dSLuigi Rizzo /* reset previous allocation */ 16898241616dSLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1690ce3ee1e7SLuigi Rizzo netmap_reset_obj_allocator(&nmd->pools[i]); 16918241616dSLuigi Rizzo } 1692ce3ee1e7SLuigi Rizzo nmd->flags &= ~NETMAP_MEM_FINALIZED; 16938241616dSLuigi Rizzo } 16948241616dSLuigi Rizzo 16958241616dSLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1696ce3ee1e7SLuigi Rizzo nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], 1697c3e9b4dbSLuiz Otavio O Souza nmd->params[i].num, nmd->params[i].size); 1698ce3ee1e7SLuigi Rizzo if (nmd->lasterr) 16998241616dSLuigi Rizzo goto out; 17008241616dSLuigi Rizzo } 17018241616dSLuigi Rizzo 17028241616dSLuigi Rizzo out: 17038241616dSLuigi Rizzo 1704ce3ee1e7SLuigi Rizzo return nmd->lasterr; 17058241616dSLuigi Rizzo } 17068241616dSLuigi Rizzo 17078241616dSLuigi Rizzo static int 1708c3e9b4dbSLuiz Otavio O Souza netmap_mem2_finalize(struct netmap_mem_d *nmd) 17098241616dSLuigi Rizzo { 1710ce3ee1e7SLuigi Rizzo int err; 17118241616dSLuigi Rizzo 17128241616dSLuigi Rizzo /* update configuration if changed */ 1713*4f80b14cSVincenzo Maffione if (netmap_mem_config(nmd)) 1714c3e9b4dbSLuiz Otavio O Souza goto out1; 17158241616dSLuigi Rizzo 1716847bf383SLuigi Rizzo nmd->active++; 1717ce3ee1e7SLuigi Rizzo 1718ce3ee1e7SLuigi Rizzo if (nmd->flags & NETMAP_MEM_FINALIZED) { 17198241616dSLuigi Rizzo /* may happen if config is not changed */ 1720*4f80b14cSVincenzo Maffione D("nothing to do"); 17218241616dSLuigi Rizzo goto out; 17228241616dSLuigi Rizzo } 17238241616dSLuigi Rizzo 1724ce3ee1e7SLuigi Rizzo if (netmap_mem_finalize_all(nmd)) 1725ce3ee1e7SLuigi Rizzo goto out; 17268241616dSLuigi Rizzo 1727ce3ee1e7SLuigi Rizzo nmd->lasterr = 0; 17288241616dSLuigi Rizzo 17298241616dSLuigi Rizzo out: 1730ce3ee1e7SLuigi Rizzo if (nmd->lasterr) 1731847bf383SLuigi Rizzo nmd->active--; 1732c3e9b4dbSLuiz Otavio O Souza out1: 1733ce3ee1e7SLuigi Rizzo err = nmd->lasterr; 17348241616dSLuigi Rizzo 1735ce3ee1e7SLuigi Rizzo return err; 17368241616dSLuigi Rizzo 1737ccdc3305SLuigi Rizzo } 1738ccdc3305SLuigi Rizzo 1739847bf383SLuigi Rizzo static void 1740c3e9b4dbSLuiz Otavio O Souza netmap_mem2_delete(struct netmap_mem_d *nmd) 1741ccdc3305SLuigi Rizzo { 17428241616dSLuigi Rizzo int i; 17438241616dSLuigi Rizzo 17448241616dSLuigi Rizzo for (i = 0; i < NETMAP_POOLS_NR; i++) { 1745c3e9b4dbSLuiz Otavio O Souza netmap_destroy_obj_allocator(&nmd->pools[i]); 17468241616dSLuigi Rizzo } 1747847bf383SLuigi Rizzo 1748c3e9b4dbSLuiz Otavio O Souza NMA_LOCK_DESTROY(nmd); 1749c3e9b4dbSLuiz Otavio O Souza if (nmd != &nm_mem) 1750c3e9b4dbSLuiz Otavio O Souza nm_os_free(nmd); 17518241616dSLuigi Rizzo } 17528241616dSLuigi Rizzo 1753*4f80b14cSVincenzo Maffione #ifdef WITH_EXTMEM 1754*4f80b14cSVincenzo Maffione /* doubly linekd list of all existing external allocators */ 1755*4f80b14cSVincenzo Maffione static struct netmap_mem_ext *netmap_mem_ext_list = NULL; 1756*4f80b14cSVincenzo Maffione NM_MTX_T nm_mem_ext_list_lock; 1757*4f80b14cSVincenzo Maffione #endif /* WITH_EXTMEM */ 1758*4f80b14cSVincenzo Maffione 1759847bf383SLuigi Rizzo int 1760847bf383SLuigi Rizzo netmap_mem_init(void) 1761847bf383SLuigi Rizzo { 1762c3e9b4dbSLuiz Otavio O Souza NM_MTX_INIT(nm_mem_list_lock); 1763847bf383SLuigi Rizzo NMA_LOCK_INIT(&nm_mem); 1764847bf383SLuigi Rizzo netmap_mem_get(&nm_mem); 1765*4f80b14cSVincenzo Maffione #ifdef WITH_EXTMEM 1766*4f80b14cSVincenzo Maffione NM_MTX_INIT(nm_mem_ext_list_lock); 1767*4f80b14cSVincenzo Maffione #endif /* WITH_EXTMEM */ 1768847bf383SLuigi Rizzo return (0); 1769847bf383SLuigi Rizzo } 1770847bf383SLuigi Rizzo 1771847bf383SLuigi Rizzo void 1772847bf383SLuigi Rizzo netmap_mem_fini(void) 1773847bf383SLuigi Rizzo { 1774847bf383SLuigi Rizzo netmap_mem_put(&nm_mem); 1775847bf383SLuigi Rizzo } 1776847bf383SLuigi Rizzo 17778241616dSLuigi Rizzo static void 17788241616dSLuigi Rizzo netmap_free_rings(struct netmap_adapter *na) 17798241616dSLuigi Rizzo { 1780847bf383SLuigi Rizzo enum txrx t; 1781847bf383SLuigi Rizzo 1782847bf383SLuigi Rizzo for_rx_tx(t) { 1783847bf383SLuigi Rizzo u_int i; 178437e3a6d3SLuigi Rizzo for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { 1785847bf383SLuigi Rizzo struct netmap_kring *kring = &NMR(na, t)[i]; 1786847bf383SLuigi Rizzo struct netmap_ring *ring = kring->ring; 1787847bf383SLuigi Rizzo 178837e3a6d3SLuigi Rizzo if (ring == NULL || kring->users > 0 || (kring->nr_kflags & NKR_NEEDRING)) { 1789*4f80b14cSVincenzo Maffione if (netmap_verbose) 1790*4f80b14cSVincenzo Maffione D("NOT deleting ring %s (ring %p, users %d neekring %d)", 1791*4f80b14cSVincenzo Maffione kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); 1792f0ea3689SLuigi Rizzo continue; 179337e3a6d3SLuigi Rizzo } 1794*4f80b14cSVincenzo Maffione if (netmap_verbose) 1795*4f80b14cSVincenzo Maffione D("deleting ring %s", kring->name); 179637e3a6d3SLuigi Rizzo if (i != nma_get_nrings(na, t) || na->na_flags & NAF_HOST_RINGS) 1797f0ea3689SLuigi Rizzo netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots); 1798f0ea3689SLuigi Rizzo netmap_ring_free(na->nm_mem, ring); 1799f0ea3689SLuigi Rizzo kring->ring = NULL; 18008241616dSLuigi Rizzo } 1801ce3ee1e7SLuigi Rizzo } 1802ccdc3305SLuigi Rizzo } 1803ccdc3305SLuigi Rizzo 1804f9790aebSLuigi Rizzo /* call with NMA_LOCK held * 1805f9790aebSLuigi Rizzo * 1806f9790aebSLuigi Rizzo * Allocate netmap rings and buffers for this card 1807f9790aebSLuigi Rizzo * The rings are contiguous, but have variable size. 1808f0ea3689SLuigi Rizzo * The kring array must follow the layout described 1809f0ea3689SLuigi Rizzo * in netmap_krings_create(). 1810f9790aebSLuigi Rizzo */ 1811847bf383SLuigi Rizzo static int 1812847bf383SLuigi Rizzo netmap_mem2_rings_create(struct netmap_adapter *na) 1813f9790aebSLuigi Rizzo { 1814847bf383SLuigi Rizzo enum txrx t; 1815f9790aebSLuigi Rizzo 1816f9790aebSLuigi Rizzo NMA_LOCK(na->nm_mem); 1817f9790aebSLuigi Rizzo 1818847bf383SLuigi Rizzo for_rx_tx(t) { 1819847bf383SLuigi Rizzo u_int i; 1820847bf383SLuigi Rizzo 1821847bf383SLuigi Rizzo for (i = 0; i <= nma_get_nrings(na, t); i++) { 1822847bf383SLuigi Rizzo struct netmap_kring *kring = &NMR(na, t)[i]; 1823847bf383SLuigi Rizzo struct netmap_ring *ring = kring->ring; 1824847bf383SLuigi Rizzo u_int len, ndesc; 1825847bf383SLuigi Rizzo 182637e3a6d3SLuigi Rizzo if (ring || (!kring->users && !(kring->nr_kflags & NKR_NEEDRING))) { 182737e3a6d3SLuigi Rizzo /* uneeded, or already created by somebody else */ 1828*4f80b14cSVincenzo Maffione if (netmap_verbose) 1829*4f80b14cSVincenzo Maffione D("NOT creating ring %s (ring %p, users %d neekring %d)", 1830*4f80b14cSVincenzo Maffione kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); 183137e3a6d3SLuigi Rizzo continue; 1832f0ea3689SLuigi Rizzo } 1833*4f80b14cSVincenzo Maffione if (netmap_verbose) 1834*4f80b14cSVincenzo Maffione D("creating %s", kring->name); 1835f9790aebSLuigi Rizzo ndesc = kring->nkr_num_slots; 1836f9790aebSLuigi Rizzo len = sizeof(struct netmap_ring) + 1837f9790aebSLuigi Rizzo ndesc * sizeof(struct netmap_slot); 1838f9790aebSLuigi Rizzo ring = netmap_ring_malloc(na->nm_mem, len); 1839f9790aebSLuigi Rizzo if (ring == NULL) { 1840847bf383SLuigi Rizzo D("Cannot allocate %s_ring", nm_txrx2str(t)); 1841f9790aebSLuigi Rizzo goto cleanup; 1842f9790aebSLuigi Rizzo } 1843f6c2a31fSLuigi Rizzo ND("txring at %p", ring); 1844f9790aebSLuigi Rizzo kring->ring = ring; 1845f9790aebSLuigi Rizzo *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; 184617885a7bSLuigi Rizzo *(int64_t *)(uintptr_t)&ring->buf_ofs = 1847f9790aebSLuigi Rizzo (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + 1848f9790aebSLuigi Rizzo na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - 1849f9790aebSLuigi Rizzo netmap_ring_offset(na->nm_mem, ring); 1850f9790aebSLuigi Rizzo 185117885a7bSLuigi Rizzo /* copy values from kring */ 185217885a7bSLuigi Rizzo ring->head = kring->rhead; 185317885a7bSLuigi Rizzo ring->cur = kring->rcur; 185417885a7bSLuigi Rizzo ring->tail = kring->rtail; 1855*4f80b14cSVincenzo Maffione *(uint32_t *)(uintptr_t)&ring->nr_buf_size = 18564bf50f18SLuigi Rizzo netmap_mem_bufsize(na->nm_mem); 1857f0ea3689SLuigi Rizzo ND("%s h %d c %d t %d", kring->name, 1858f0ea3689SLuigi Rizzo ring->head, ring->cur, ring->tail); 1859847bf383SLuigi Rizzo ND("initializing slots for %s_ring", nm_txrx2str(txrx)); 1860847bf383SLuigi Rizzo if (i != nma_get_nrings(na, t) || (na->na_flags & NAF_HOST_RINGS)) { 1861f0ea3689SLuigi Rizzo /* this is a real ring */ 1862f9790aebSLuigi Rizzo if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { 1863847bf383SLuigi Rizzo D("Cannot allocate buffers for %s_ring", nm_txrx2str(t)); 1864f9790aebSLuigi Rizzo goto cleanup; 1865f9790aebSLuigi Rizzo } 1866f0ea3689SLuigi Rizzo } else { 1867847bf383SLuigi Rizzo /* this is a fake ring, set all indices to 0 */ 1868f0ea3689SLuigi Rizzo netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0); 1869f0ea3689SLuigi Rizzo } 1870847bf383SLuigi Rizzo /* ring info */ 1871847bf383SLuigi Rizzo *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id; 1872847bf383SLuigi Rizzo *(uint16_t *)(uintptr_t)&ring->dir = kring->tx; 1873f0ea3689SLuigi Rizzo } 1874f9790aebSLuigi Rizzo } 1875f9790aebSLuigi Rizzo 1876f9790aebSLuigi Rizzo NMA_UNLOCK(na->nm_mem); 1877f9790aebSLuigi Rizzo 1878f9790aebSLuigi Rizzo return 0; 1879f9790aebSLuigi Rizzo 1880f9790aebSLuigi Rizzo cleanup: 1881f9790aebSLuigi Rizzo netmap_free_rings(na); 1882f9790aebSLuigi Rizzo 1883f9790aebSLuigi Rizzo NMA_UNLOCK(na->nm_mem); 1884f9790aebSLuigi Rizzo 1885f9790aebSLuigi Rizzo return ENOMEM; 1886f9790aebSLuigi Rizzo } 1887f9790aebSLuigi Rizzo 1888847bf383SLuigi Rizzo static void 1889847bf383SLuigi Rizzo netmap_mem2_rings_delete(struct netmap_adapter *na) 1890f9790aebSLuigi Rizzo { 1891f9790aebSLuigi Rizzo /* last instance, release bufs and rings */ 1892f9790aebSLuigi Rizzo NMA_LOCK(na->nm_mem); 1893f9790aebSLuigi Rizzo 1894f9790aebSLuigi Rizzo netmap_free_rings(na); 1895f9790aebSLuigi Rizzo 1896f9790aebSLuigi Rizzo NMA_UNLOCK(na->nm_mem); 1897f9790aebSLuigi Rizzo } 1898ccdc3305SLuigi Rizzo 1899ccdc3305SLuigi Rizzo 19008241616dSLuigi Rizzo /* call with NMA_LOCK held */ 1901ae10d1afSLuigi Rizzo /* 1902ae10d1afSLuigi Rizzo * Allocate the per-fd structure netmap_if. 1903ce3ee1e7SLuigi Rizzo * 1904ce3ee1e7SLuigi Rizzo * We assume that the configuration stored in na 1905ce3ee1e7SLuigi Rizzo * (number of tx/rx rings and descs) does not change while 1906ce3ee1e7SLuigi Rizzo * the interface is in netmap mode. 1907ae10d1afSLuigi Rizzo */ 1908847bf383SLuigi Rizzo static struct netmap_if * 1909c3e9b4dbSLuiz Otavio O Souza netmap_mem2_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) 1910ccdc3305SLuigi Rizzo { 1911ccdc3305SLuigi Rizzo struct netmap_if *nifp; 1912ccdc3305SLuigi Rizzo ssize_t base; /* handy for relative offsets between rings and nifp */ 1913847bf383SLuigi Rizzo u_int i, len, n[NR_TXRX], ntot; 1914847bf383SLuigi Rizzo enum txrx t; 1915ccdc3305SLuigi Rizzo 1916847bf383SLuigi Rizzo ntot = 0; 1917847bf383SLuigi Rizzo for_rx_tx(t) { 1918f0ea3689SLuigi Rizzo /* account for the (eventually fake) host rings */ 1919847bf383SLuigi Rizzo n[t] = nma_get_nrings(na, t) + 1; 1920847bf383SLuigi Rizzo ntot += n[t]; 1921847bf383SLuigi Rizzo } 1922ccdc3305SLuigi Rizzo /* 1923ccdc3305SLuigi Rizzo * the descriptor is followed inline by an array of offsets 1924ccdc3305SLuigi Rizzo * to the tx and rx rings in the shared memory region. 1925ccdc3305SLuigi Rizzo */ 1926ce3ee1e7SLuigi Rizzo 1927ce3ee1e7SLuigi Rizzo NMA_LOCK(na->nm_mem); 1928ce3ee1e7SLuigi Rizzo 1929847bf383SLuigi Rizzo len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t)); 1930ce3ee1e7SLuigi Rizzo nifp = netmap_if_malloc(na->nm_mem, len); 1931ccdc3305SLuigi Rizzo if (nifp == NULL) { 1932ce3ee1e7SLuigi Rizzo NMA_UNLOCK(na->nm_mem); 1933ccdc3305SLuigi Rizzo return NULL; 1934ccdc3305SLuigi Rizzo } 1935ccdc3305SLuigi Rizzo 1936ccdc3305SLuigi Rizzo /* initialize base fields -- override const */ 1937ce3ee1e7SLuigi Rizzo *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 1938ce3ee1e7SLuigi Rizzo *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 19394bf50f18SLuigi Rizzo strncpy(nifp->ni_name, na->name, (size_t)IFNAMSIZ); 1940ccdc3305SLuigi Rizzo 1941ccdc3305SLuigi Rizzo /* 1942ccdc3305SLuigi Rizzo * fill the slots for the rx and tx rings. They contain the offset 1943ccdc3305SLuigi Rizzo * between the ring and nifp, so the information is usable in 1944ccdc3305SLuigi Rizzo * userspace to reach the ring from the nifp. 1945ccdc3305SLuigi Rizzo */ 1946ce3ee1e7SLuigi Rizzo base = netmap_if_offset(na->nm_mem, nifp); 1947847bf383SLuigi Rizzo for (i = 0; i < n[NR_TX]; i++) { 1948c3e9b4dbSLuiz Otavio O Souza /* XXX instead of ofs == 0 maybe use the offset of an error 1949c3e9b4dbSLuiz Otavio O Souza * ring, like we do for buffers? */ 1950c3e9b4dbSLuiz Otavio O Souza ssize_t ofs = 0; 1951c3e9b4dbSLuiz Otavio O Souza 1952c3e9b4dbSLuiz Otavio O Souza if (na->tx_rings[i].ring != NULL && i >= priv->np_qfirst[NR_TX] 1953c3e9b4dbSLuiz Otavio O Souza && i < priv->np_qlast[NR_TX]) { 1954c3e9b4dbSLuiz Otavio O Souza ofs = netmap_ring_offset(na->nm_mem, 1955c3e9b4dbSLuiz Otavio O Souza na->tx_rings[i].ring) - base; 195637e3a6d3SLuigi Rizzo } 1957c3e9b4dbSLuiz Otavio O Souza *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs; 1958ccdc3305SLuigi Rizzo } 1959847bf383SLuigi Rizzo for (i = 0; i < n[NR_RX]; i++) { 1960c3e9b4dbSLuiz Otavio O Souza /* XXX instead of ofs == 0 maybe use the offset of an error 1961c3e9b4dbSLuiz Otavio O Souza * ring, like we do for buffers? */ 1962c3e9b4dbSLuiz Otavio O Souza ssize_t ofs = 0; 1963c3e9b4dbSLuiz Otavio O Souza 1964c3e9b4dbSLuiz Otavio O Souza if (na->rx_rings[i].ring != NULL && i >= priv->np_qfirst[NR_RX] 1965c3e9b4dbSLuiz Otavio O Souza && i < priv->np_qlast[NR_RX]) { 1966c3e9b4dbSLuiz Otavio O Souza ofs = netmap_ring_offset(na->nm_mem, 1967c3e9b4dbSLuiz Otavio O Souza na->rx_rings[i].ring) - base; 196837e3a6d3SLuigi Rizzo } 1969c3e9b4dbSLuiz Otavio O Souza *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs; 1970ccdc3305SLuigi Rizzo } 1971ce3ee1e7SLuigi Rizzo 1972ce3ee1e7SLuigi Rizzo NMA_UNLOCK(na->nm_mem); 1973ce3ee1e7SLuigi Rizzo 1974ccdc3305SLuigi Rizzo return (nifp); 1975ccdc3305SLuigi Rizzo } 1976ccdc3305SLuigi Rizzo 1977847bf383SLuigi Rizzo static void 1978847bf383SLuigi Rizzo netmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) 1979ccdc3305SLuigi Rizzo { 1980ce3ee1e7SLuigi Rizzo if (nifp == NULL) 1981ce3ee1e7SLuigi Rizzo /* nothing to do */ 1982ce3ee1e7SLuigi Rizzo return; 1983ce3ee1e7SLuigi Rizzo NMA_LOCK(na->nm_mem); 1984f0ea3689SLuigi Rizzo if (nifp->ni_bufs_head) 1985f0ea3689SLuigi Rizzo netmap_extra_free(na, nifp->ni_bufs_head); 1986ce3ee1e7SLuigi Rizzo netmap_if_free(na->nm_mem, nifp); 1987ce3ee1e7SLuigi Rizzo 1988ce3ee1e7SLuigi Rizzo NMA_UNLOCK(na->nm_mem); 1989ce3ee1e7SLuigi Rizzo } 1990ce3ee1e7SLuigi Rizzo 1991ce3ee1e7SLuigi Rizzo static void 1992c3e9b4dbSLuiz Otavio O Souza netmap_mem2_deref(struct netmap_mem_d *nmd) 1993ce3ee1e7SLuigi Rizzo { 1994ce3ee1e7SLuigi Rizzo 1995847bf383SLuigi Rizzo nmd->active--; 1996847bf383SLuigi Rizzo if (!nmd->active) 19974bf50f18SLuigi Rizzo nmd->nm_grp = -1; 1998ae10d1afSLuigi Rizzo if (netmap_verbose) 1999847bf383SLuigi Rizzo D("active = %d", nmd->active); 2000ce3ee1e7SLuigi Rizzo 2001ce3ee1e7SLuigi Rizzo } 2002ce3ee1e7SLuigi Rizzo 2003847bf383SLuigi Rizzo struct netmap_mem_ops netmap_mem_global_ops = { 2004847bf383SLuigi Rizzo .nmd_get_lut = netmap_mem2_get_lut, 2005847bf383SLuigi Rizzo .nmd_get_info = netmap_mem2_get_info, 2006847bf383SLuigi Rizzo .nmd_ofstophys = netmap_mem2_ofstophys, 2007c3e9b4dbSLuiz Otavio O Souza .nmd_config = netmap_mem2_config, 2008c3e9b4dbSLuiz Otavio O Souza .nmd_finalize = netmap_mem2_finalize, 2009c3e9b4dbSLuiz Otavio O Souza .nmd_deref = netmap_mem2_deref, 2010c3e9b4dbSLuiz Otavio O Souza .nmd_delete = netmap_mem2_delete, 2011847bf383SLuigi Rizzo .nmd_if_offset = netmap_mem2_if_offset, 2012847bf383SLuigi Rizzo .nmd_if_new = netmap_mem2_if_new, 2013847bf383SLuigi Rizzo .nmd_if_delete = netmap_mem2_if_delete, 2014847bf383SLuigi Rizzo .nmd_rings_create = netmap_mem2_rings_create, 2015847bf383SLuigi Rizzo .nmd_rings_delete = netmap_mem2_rings_delete 2016847bf383SLuigi Rizzo }; 201737e3a6d3SLuigi Rizzo 2018844a6f0cSLuigi Rizzo int 2019c3e9b4dbSLuiz Otavio O Souza netmap_mem_pools_info_get(struct nmreq *nmr, struct netmap_mem_d *nmd) 2020844a6f0cSLuigi Rizzo { 2021844a6f0cSLuigi Rizzo uintptr_t *pp = (uintptr_t *)&nmr->nr_arg1; 2022844a6f0cSLuigi Rizzo struct netmap_pools_info *upi = (struct netmap_pools_info *)(*pp); 2023844a6f0cSLuigi Rizzo struct netmap_pools_info pi; 2024*4f80b14cSVincenzo Maffione uint64_t memsize; 2025844a6f0cSLuigi Rizzo uint16_t memid; 2026844a6f0cSLuigi Rizzo int ret; 2027844a6f0cSLuigi Rizzo 2028844a6f0cSLuigi Rizzo ret = netmap_mem_get_info(nmd, &memsize, NULL, &memid); 2029844a6f0cSLuigi Rizzo if (ret) { 2030844a6f0cSLuigi Rizzo return ret; 2031844a6f0cSLuigi Rizzo } 2032844a6f0cSLuigi Rizzo 2033844a6f0cSLuigi Rizzo pi.memsize = memsize; 2034844a6f0cSLuigi Rizzo pi.memid = memid; 2035c3e9b4dbSLuiz Otavio O Souza NMA_LOCK(nmd); 2036844a6f0cSLuigi Rizzo pi.if_pool_offset = 0; 2037844a6f0cSLuigi Rizzo pi.if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal; 2038844a6f0cSLuigi Rizzo pi.if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize; 2039844a6f0cSLuigi Rizzo 2040844a6f0cSLuigi Rizzo pi.ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal; 2041844a6f0cSLuigi Rizzo pi.ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal; 2042844a6f0cSLuigi Rizzo pi.ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize; 2043844a6f0cSLuigi Rizzo 2044844a6f0cSLuigi Rizzo pi.buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal + 2045844a6f0cSLuigi Rizzo nmd->pools[NETMAP_RING_POOL].memtotal; 2046844a6f0cSLuigi Rizzo pi.buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; 2047844a6f0cSLuigi Rizzo pi.buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; 2048c3e9b4dbSLuiz Otavio O Souza NMA_UNLOCK(nmd); 2049844a6f0cSLuigi Rizzo 2050844a6f0cSLuigi Rizzo ret = copyout(&pi, upi, sizeof(pi)); 2051844a6f0cSLuigi Rizzo if (ret) { 2052844a6f0cSLuigi Rizzo return ret; 2053844a6f0cSLuigi Rizzo } 2054844a6f0cSLuigi Rizzo 2055844a6f0cSLuigi Rizzo return 0; 2056844a6f0cSLuigi Rizzo } 2057844a6f0cSLuigi Rizzo 2058*4f80b14cSVincenzo Maffione #ifdef WITH_EXTMEM 2059*4f80b14cSVincenzo Maffione struct netmap_mem_ext { 2060*4f80b14cSVincenzo Maffione struct netmap_mem_d up; 2061*4f80b14cSVincenzo Maffione 2062*4f80b14cSVincenzo Maffione struct page **pages; 2063*4f80b14cSVincenzo Maffione int nr_pages; 2064*4f80b14cSVincenzo Maffione struct netmap_mem_ext *next, *prev; 2065*4f80b14cSVincenzo Maffione }; 2066*4f80b14cSVincenzo Maffione 2067*4f80b14cSVincenzo Maffione /* call with nm_mem_list_lock held */ 2068*4f80b14cSVincenzo Maffione static void 2069*4f80b14cSVincenzo Maffione netmap_mem_ext_register(struct netmap_mem_ext *e) 2070*4f80b14cSVincenzo Maffione { 2071*4f80b14cSVincenzo Maffione NM_MTX_LOCK(nm_mem_ext_list_lock); 2072*4f80b14cSVincenzo Maffione if (netmap_mem_ext_list) 2073*4f80b14cSVincenzo Maffione netmap_mem_ext_list->prev = e; 2074*4f80b14cSVincenzo Maffione e->next = netmap_mem_ext_list; 2075*4f80b14cSVincenzo Maffione netmap_mem_ext_list = e; 2076*4f80b14cSVincenzo Maffione e->prev = NULL; 2077*4f80b14cSVincenzo Maffione NM_MTX_UNLOCK(nm_mem_ext_list_lock); 2078*4f80b14cSVincenzo Maffione } 2079*4f80b14cSVincenzo Maffione 2080*4f80b14cSVincenzo Maffione /* call with nm_mem_list_lock held */ 2081*4f80b14cSVincenzo Maffione static void 2082*4f80b14cSVincenzo Maffione netmap_mem_ext_unregister(struct netmap_mem_ext *e) 2083*4f80b14cSVincenzo Maffione { 2084*4f80b14cSVincenzo Maffione if (e->prev) 2085*4f80b14cSVincenzo Maffione e->prev->next = e->next; 2086*4f80b14cSVincenzo Maffione else 2087*4f80b14cSVincenzo Maffione netmap_mem_ext_list = e->next; 2088*4f80b14cSVincenzo Maffione if (e->next) 2089*4f80b14cSVincenzo Maffione e->next->prev = e->prev; 2090*4f80b14cSVincenzo Maffione e->prev = e->next = NULL; 2091*4f80b14cSVincenzo Maffione } 2092*4f80b14cSVincenzo Maffione 2093*4f80b14cSVincenzo Maffione static int 2094*4f80b14cSVincenzo Maffione netmap_mem_ext_same_pages(struct netmap_mem_ext *e, struct page **pages, int nr_pages) 2095*4f80b14cSVincenzo Maffione { 2096*4f80b14cSVincenzo Maffione int i; 2097*4f80b14cSVincenzo Maffione 2098*4f80b14cSVincenzo Maffione if (e->nr_pages != nr_pages) 2099*4f80b14cSVincenzo Maffione return 0; 2100*4f80b14cSVincenzo Maffione 2101*4f80b14cSVincenzo Maffione for (i = 0; i < nr_pages; i++) 2102*4f80b14cSVincenzo Maffione if (pages[i] != e->pages[i]) 2103*4f80b14cSVincenzo Maffione return 0; 2104*4f80b14cSVincenzo Maffione 2105*4f80b14cSVincenzo Maffione return 1; 2106*4f80b14cSVincenzo Maffione } 2107*4f80b14cSVincenzo Maffione 2108*4f80b14cSVincenzo Maffione static struct netmap_mem_ext * 2109*4f80b14cSVincenzo Maffione netmap_mem_ext_search(struct page **pages, int nr_pages) 2110*4f80b14cSVincenzo Maffione { 2111*4f80b14cSVincenzo Maffione struct netmap_mem_ext *e; 2112*4f80b14cSVincenzo Maffione 2113*4f80b14cSVincenzo Maffione NM_MTX_LOCK(nm_mem_ext_list_lock); 2114*4f80b14cSVincenzo Maffione for (e = netmap_mem_ext_list; e; e = e->next) { 2115*4f80b14cSVincenzo Maffione if (netmap_mem_ext_same_pages(e, pages, nr_pages)) { 2116*4f80b14cSVincenzo Maffione netmap_mem_get(&e->up); 2117*4f80b14cSVincenzo Maffione break; 2118*4f80b14cSVincenzo Maffione } 2119*4f80b14cSVincenzo Maffione } 2120*4f80b14cSVincenzo Maffione NM_MTX_UNLOCK(nm_mem_ext_list_lock); 2121*4f80b14cSVincenzo Maffione return e; 2122*4f80b14cSVincenzo Maffione } 2123*4f80b14cSVincenzo Maffione 2124*4f80b14cSVincenzo Maffione 2125*4f80b14cSVincenzo Maffione static void 2126*4f80b14cSVincenzo Maffione netmap_mem_ext_free_pages(struct page **pages, int nr_pages) 2127*4f80b14cSVincenzo Maffione { 2128*4f80b14cSVincenzo Maffione int i; 2129*4f80b14cSVincenzo Maffione 2130*4f80b14cSVincenzo Maffione for (i = 0; i < nr_pages; i++) { 2131*4f80b14cSVincenzo Maffione kunmap(pages[i]); 2132*4f80b14cSVincenzo Maffione put_page(pages[i]); 2133*4f80b14cSVincenzo Maffione } 2134*4f80b14cSVincenzo Maffione nm_os_vfree(pages); 2135*4f80b14cSVincenzo Maffione } 2136*4f80b14cSVincenzo Maffione 2137*4f80b14cSVincenzo Maffione static void 2138*4f80b14cSVincenzo Maffione netmap_mem_ext_delete(struct netmap_mem_d *d) 2139*4f80b14cSVincenzo Maffione { 2140*4f80b14cSVincenzo Maffione int i; 2141*4f80b14cSVincenzo Maffione struct netmap_mem_ext *e = 2142*4f80b14cSVincenzo Maffione (struct netmap_mem_ext *)d; 2143*4f80b14cSVincenzo Maffione 2144*4f80b14cSVincenzo Maffione netmap_mem_ext_unregister(e); 2145*4f80b14cSVincenzo Maffione 2146*4f80b14cSVincenzo Maffione for (i = 0; i < NETMAP_POOLS_NR; i++) { 2147*4f80b14cSVincenzo Maffione struct netmap_obj_pool *p = &d->pools[i]; 2148*4f80b14cSVincenzo Maffione 2149*4f80b14cSVincenzo Maffione if (p->lut) { 2150*4f80b14cSVincenzo Maffione nm_free_lut(p->lut, p->objtotal); 2151*4f80b14cSVincenzo Maffione p->lut = NULL; 2152*4f80b14cSVincenzo Maffione } 2153*4f80b14cSVincenzo Maffione } 2154*4f80b14cSVincenzo Maffione if (e->pages) { 2155*4f80b14cSVincenzo Maffione netmap_mem_ext_free_pages(e->pages, e->nr_pages); 2156*4f80b14cSVincenzo Maffione e->pages = NULL; 2157*4f80b14cSVincenzo Maffione e->nr_pages = 0; 2158*4f80b14cSVincenzo Maffione } 2159*4f80b14cSVincenzo Maffione netmap_mem2_delete(d); 2160*4f80b14cSVincenzo Maffione } 2161*4f80b14cSVincenzo Maffione 2162*4f80b14cSVincenzo Maffione static int 2163*4f80b14cSVincenzo Maffione netmap_mem_ext_config(struct netmap_mem_d *nmd) 2164*4f80b14cSVincenzo Maffione { 2165*4f80b14cSVincenzo Maffione return 0; 2166*4f80b14cSVincenzo Maffione } 2167*4f80b14cSVincenzo Maffione 2168*4f80b14cSVincenzo Maffione struct netmap_mem_ops netmap_mem_ext_ops = { 2169*4f80b14cSVincenzo Maffione .nmd_get_lut = netmap_mem2_get_lut, 2170*4f80b14cSVincenzo Maffione .nmd_get_info = netmap_mem2_get_info, 2171*4f80b14cSVincenzo Maffione .nmd_ofstophys = netmap_mem2_ofstophys, 2172*4f80b14cSVincenzo Maffione .nmd_config = netmap_mem_ext_config, 2173*4f80b14cSVincenzo Maffione .nmd_finalize = netmap_mem2_finalize, 2174*4f80b14cSVincenzo Maffione .nmd_deref = netmap_mem2_deref, 2175*4f80b14cSVincenzo Maffione .nmd_delete = netmap_mem_ext_delete, 2176*4f80b14cSVincenzo Maffione .nmd_if_offset = netmap_mem2_if_offset, 2177*4f80b14cSVincenzo Maffione .nmd_if_new = netmap_mem2_if_new, 2178*4f80b14cSVincenzo Maffione .nmd_if_delete = netmap_mem2_if_delete, 2179*4f80b14cSVincenzo Maffione .nmd_rings_create = netmap_mem2_rings_create, 2180*4f80b14cSVincenzo Maffione .nmd_rings_delete = netmap_mem2_rings_delete 2181*4f80b14cSVincenzo Maffione }; 2182*4f80b14cSVincenzo Maffione 2183*4f80b14cSVincenzo Maffione struct netmap_mem_d * 2184*4f80b14cSVincenzo Maffione netmap_mem_ext_create(struct nmreq *nmr, int *perror) 2185*4f80b14cSVincenzo Maffione { 2186*4f80b14cSVincenzo Maffione uintptr_t p = *(uintptr_t *)&nmr->nr_arg1; 2187*4f80b14cSVincenzo Maffione struct netmap_pools_info pi; 2188*4f80b14cSVincenzo Maffione int error = 0; 2189*4f80b14cSVincenzo Maffione unsigned long end, start; 2190*4f80b14cSVincenzo Maffione int nr_pages, res, i, j; 2191*4f80b14cSVincenzo Maffione struct page **pages = NULL; 2192*4f80b14cSVincenzo Maffione struct netmap_mem_ext *nme; 2193*4f80b14cSVincenzo Maffione char *clust; 2194*4f80b14cSVincenzo Maffione size_t off; 2195*4f80b14cSVincenzo Maffione 2196*4f80b14cSVincenzo Maffione error = copyin((void *)p, &pi, sizeof(pi)); 2197*4f80b14cSVincenzo Maffione if (error) 2198*4f80b14cSVincenzo Maffione goto out; 2199*4f80b14cSVincenzo Maffione 2200*4f80b14cSVincenzo Maffione // XXX sanity checks 2201*4f80b14cSVincenzo Maffione if (pi.if_pool_objtotal == 0) 2202*4f80b14cSVincenzo Maffione pi.if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num; 2203*4f80b14cSVincenzo Maffione if (pi.if_pool_objsize == 0) 2204*4f80b14cSVincenzo Maffione pi.if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size; 2205*4f80b14cSVincenzo Maffione if (pi.ring_pool_objtotal == 0) 2206*4f80b14cSVincenzo Maffione pi.ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num; 2207*4f80b14cSVincenzo Maffione if (pi.ring_pool_objsize == 0) 2208*4f80b14cSVincenzo Maffione pi.ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size; 2209*4f80b14cSVincenzo Maffione if (pi.buf_pool_objtotal == 0) 2210*4f80b14cSVincenzo Maffione pi.buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num; 2211*4f80b14cSVincenzo Maffione if (pi.buf_pool_objsize == 0) 2212*4f80b14cSVincenzo Maffione pi.buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size; 2213*4f80b14cSVincenzo Maffione D("if %d %d ring %d %d buf %d %d", 2214*4f80b14cSVincenzo Maffione pi.if_pool_objtotal, pi.if_pool_objsize, 2215*4f80b14cSVincenzo Maffione pi.ring_pool_objtotal, pi.ring_pool_objsize, 2216*4f80b14cSVincenzo Maffione pi.buf_pool_objtotal, pi.buf_pool_objsize); 2217*4f80b14cSVincenzo Maffione 2218*4f80b14cSVincenzo Maffione end = (p + pi.memsize + PAGE_SIZE - 1) >> PAGE_SHIFT; 2219*4f80b14cSVincenzo Maffione start = p >> PAGE_SHIFT; 2220*4f80b14cSVincenzo Maffione nr_pages = end - start; 2221*4f80b14cSVincenzo Maffione 2222*4f80b14cSVincenzo Maffione pages = nm_os_vmalloc(nr_pages * sizeof(*pages)); 2223*4f80b14cSVincenzo Maffione if (pages == NULL) { 2224*4f80b14cSVincenzo Maffione error = ENOMEM; 2225*4f80b14cSVincenzo Maffione goto out; 2226*4f80b14cSVincenzo Maffione } 2227*4f80b14cSVincenzo Maffione 2228*4f80b14cSVincenzo Maffione #ifdef NETMAP_LINUX_HAVE_GUP_4ARGS 2229*4f80b14cSVincenzo Maffione res = get_user_pages_unlocked( 2230*4f80b14cSVincenzo Maffione p, 2231*4f80b14cSVincenzo Maffione nr_pages, 2232*4f80b14cSVincenzo Maffione pages, 2233*4f80b14cSVincenzo Maffione FOLL_WRITE | FOLL_GET | FOLL_SPLIT | FOLL_POPULATE); // XXX check other flags 2234*4f80b14cSVincenzo Maffione #elif defined(NETMAP_LINUX_HAVE_GUP_5ARGS) 2235*4f80b14cSVincenzo Maffione res = get_user_pages_unlocked( 2236*4f80b14cSVincenzo Maffione p, 2237*4f80b14cSVincenzo Maffione nr_pages, 2238*4f80b14cSVincenzo Maffione 1, /* write */ 2239*4f80b14cSVincenzo Maffione 0, /* don't force */ 2240*4f80b14cSVincenzo Maffione pages); 2241*4f80b14cSVincenzo Maffione #elif defined(NETMAP_LINUX_HAVE_GUP_7ARGS) 2242*4f80b14cSVincenzo Maffione res = get_user_pages_unlocked( 2243*4f80b14cSVincenzo Maffione current, 2244*4f80b14cSVincenzo Maffione current->mm, 2245*4f80b14cSVincenzo Maffione p, 2246*4f80b14cSVincenzo Maffione nr_pages, 2247*4f80b14cSVincenzo Maffione 1, /* write */ 2248*4f80b14cSVincenzo Maffione 0, /* don't force */ 2249*4f80b14cSVincenzo Maffione pages); 2250*4f80b14cSVincenzo Maffione #else 2251*4f80b14cSVincenzo Maffione down_read(¤t->mm->mmap_sem); 2252*4f80b14cSVincenzo Maffione res = get_user_pages( 2253*4f80b14cSVincenzo Maffione current, 2254*4f80b14cSVincenzo Maffione current->mm, 2255*4f80b14cSVincenzo Maffione p, 2256*4f80b14cSVincenzo Maffione nr_pages, 2257*4f80b14cSVincenzo Maffione 1, /* write */ 2258*4f80b14cSVincenzo Maffione 0, /* don't force */ 2259*4f80b14cSVincenzo Maffione pages, 2260*4f80b14cSVincenzo Maffione NULL); 2261*4f80b14cSVincenzo Maffione up_read(¤t->mm->mmap_sem); 2262*4f80b14cSVincenzo Maffione #endif /* NETMAP_LINUX_GUP */ 2263*4f80b14cSVincenzo Maffione 2264*4f80b14cSVincenzo Maffione if (res < nr_pages) { 2265*4f80b14cSVincenzo Maffione error = EFAULT; 2266*4f80b14cSVincenzo Maffione goto out_unmap; 2267*4f80b14cSVincenzo Maffione } 2268*4f80b14cSVincenzo Maffione 2269*4f80b14cSVincenzo Maffione nme = netmap_mem_ext_search(pages, nr_pages); 2270*4f80b14cSVincenzo Maffione if (nme) { 2271*4f80b14cSVincenzo Maffione netmap_mem_ext_free_pages(pages, nr_pages); 2272*4f80b14cSVincenzo Maffione return &nme->up; 2273*4f80b14cSVincenzo Maffione } 2274*4f80b14cSVincenzo Maffione D("not found, creating new"); 2275*4f80b14cSVincenzo Maffione 2276*4f80b14cSVincenzo Maffione nme = _netmap_mem_private_new(sizeof(*nme), 2277*4f80b14cSVincenzo Maffione (struct netmap_obj_params[]){ 2278*4f80b14cSVincenzo Maffione { pi.if_pool_objsize, pi.if_pool_objtotal }, 2279*4f80b14cSVincenzo Maffione { pi.ring_pool_objsize, pi.ring_pool_objtotal }, 2280*4f80b14cSVincenzo Maffione { pi.buf_pool_objsize, pi.buf_pool_objtotal }}, 2281*4f80b14cSVincenzo Maffione &netmap_mem_ext_ops, 2282*4f80b14cSVincenzo Maffione &error); 2283*4f80b14cSVincenzo Maffione if (nme == NULL) 2284*4f80b14cSVincenzo Maffione goto out_unmap; 2285*4f80b14cSVincenzo Maffione 2286*4f80b14cSVincenzo Maffione /* from now on pages will be released by nme destructor; 2287*4f80b14cSVincenzo Maffione * we let res = 0 to prevent release in out_unmap below 2288*4f80b14cSVincenzo Maffione */ 2289*4f80b14cSVincenzo Maffione res = 0; 2290*4f80b14cSVincenzo Maffione nme->pages = pages; 2291*4f80b14cSVincenzo Maffione nme->nr_pages = nr_pages; 2292*4f80b14cSVincenzo Maffione nme->up.flags |= NETMAP_MEM_EXT; 2293*4f80b14cSVincenzo Maffione 2294*4f80b14cSVincenzo Maffione clust = kmap(*pages); 2295*4f80b14cSVincenzo Maffione off = 0; 2296*4f80b14cSVincenzo Maffione for (i = 0; i < NETMAP_POOLS_NR; i++) { 2297*4f80b14cSVincenzo Maffione struct netmap_obj_pool *p = &nme->up.pools[i]; 2298*4f80b14cSVincenzo Maffione struct netmap_obj_params *o = &nme->up.params[i]; 2299*4f80b14cSVincenzo Maffione 2300*4f80b14cSVincenzo Maffione p->_objsize = o->size; 2301*4f80b14cSVincenzo Maffione p->_clustsize = o->size; 2302*4f80b14cSVincenzo Maffione p->_clustentries = 1; 2303*4f80b14cSVincenzo Maffione 2304*4f80b14cSVincenzo Maffione p->lut = nm_alloc_lut(o->num); 2305*4f80b14cSVincenzo Maffione if (p->lut == NULL) { 2306*4f80b14cSVincenzo Maffione error = ENOMEM; 2307*4f80b14cSVincenzo Maffione goto out_delete; 2308*4f80b14cSVincenzo Maffione } 2309*4f80b14cSVincenzo Maffione 2310*4f80b14cSVincenzo Maffione p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t); 2311*4f80b14cSVincenzo Maffione p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots); 2312*4f80b14cSVincenzo Maffione if (p->invalid_bitmap == NULL) { 2313*4f80b14cSVincenzo Maffione error = ENOMEM; 2314*4f80b14cSVincenzo Maffione goto out_delete; 2315*4f80b14cSVincenzo Maffione } 2316*4f80b14cSVincenzo Maffione 2317*4f80b14cSVincenzo Maffione if (nr_pages == 0) { 2318*4f80b14cSVincenzo Maffione p->objtotal = 0; 2319*4f80b14cSVincenzo Maffione p->memtotal = 0; 2320*4f80b14cSVincenzo Maffione p->objfree = 0; 2321*4f80b14cSVincenzo Maffione continue; 2322*4f80b14cSVincenzo Maffione } 2323*4f80b14cSVincenzo Maffione 2324*4f80b14cSVincenzo Maffione for (j = 0; j < o->num && nr_pages > 0; j++) { 2325*4f80b14cSVincenzo Maffione size_t noff; 2326*4f80b14cSVincenzo Maffione size_t skip; 2327*4f80b14cSVincenzo Maffione 2328*4f80b14cSVincenzo Maffione p->lut[j].vaddr = clust + off; 2329*4f80b14cSVincenzo Maffione ND("%s %d at %p", p->name, j, p->lut[j].vaddr); 2330*4f80b14cSVincenzo Maffione noff = off + p->_objsize; 2331*4f80b14cSVincenzo Maffione if (noff < PAGE_SIZE) { 2332*4f80b14cSVincenzo Maffione off = noff; 2333*4f80b14cSVincenzo Maffione continue; 2334*4f80b14cSVincenzo Maffione } 2335*4f80b14cSVincenzo Maffione ND("too big, recomputing offset..."); 2336*4f80b14cSVincenzo Maffione skip = PAGE_SIZE - (off & PAGE_MASK); 2337*4f80b14cSVincenzo Maffione while (noff >= PAGE_SIZE) { 2338*4f80b14cSVincenzo Maffione noff -= skip; 2339*4f80b14cSVincenzo Maffione pages++; 2340*4f80b14cSVincenzo Maffione nr_pages--; 2341*4f80b14cSVincenzo Maffione ND("noff %zu page %p nr_pages %d", noff, 2342*4f80b14cSVincenzo Maffione page_to_virt(*pages), nr_pages); 2343*4f80b14cSVincenzo Maffione if (noff > 0 && !nm_isset(p->invalid_bitmap, j) && 2344*4f80b14cSVincenzo Maffione (nr_pages == 0 || *pages != *(pages - 1) + 1)) 2345*4f80b14cSVincenzo Maffione { 2346*4f80b14cSVincenzo Maffione /* out of space or non contiguous, 2347*4f80b14cSVincenzo Maffione * drop this object 2348*4f80b14cSVincenzo Maffione * */ 2349*4f80b14cSVincenzo Maffione p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U); 2350*4f80b14cSVincenzo Maffione ND("non contiguous at off %zu, drop", noff); 2351*4f80b14cSVincenzo Maffione } 2352*4f80b14cSVincenzo Maffione if (nr_pages == 0) 2353*4f80b14cSVincenzo Maffione break; 2354*4f80b14cSVincenzo Maffione skip = PAGE_SIZE; 2355*4f80b14cSVincenzo Maffione } 2356*4f80b14cSVincenzo Maffione off = noff; 2357*4f80b14cSVincenzo Maffione if (nr_pages > 0) 2358*4f80b14cSVincenzo Maffione clust = kmap(*pages); 2359*4f80b14cSVincenzo Maffione } 2360*4f80b14cSVincenzo Maffione p->objtotal = j; 2361*4f80b14cSVincenzo Maffione p->numclusters = p->objtotal; 2362*4f80b14cSVincenzo Maffione p->memtotal = j * p->_objsize; 2363*4f80b14cSVincenzo Maffione ND("%d memtotal %u", j, p->memtotal); 2364*4f80b14cSVincenzo Maffione } 2365*4f80b14cSVincenzo Maffione 2366*4f80b14cSVincenzo Maffione /* skip the first netmap_if, where the pools info reside */ 2367*4f80b14cSVincenzo Maffione { 2368*4f80b14cSVincenzo Maffione struct netmap_obj_pool *p = &nme->up.pools[NETMAP_IF_POOL]; 2369*4f80b14cSVincenzo Maffione p->invalid_bitmap[0] |= 1U; 2370*4f80b14cSVincenzo Maffione } 2371*4f80b14cSVincenzo Maffione 2372*4f80b14cSVincenzo Maffione netmap_mem_ext_register(nme); 2373*4f80b14cSVincenzo Maffione 2374*4f80b14cSVincenzo Maffione return &nme->up; 2375*4f80b14cSVincenzo Maffione 2376*4f80b14cSVincenzo Maffione out_delete: 2377*4f80b14cSVincenzo Maffione netmap_mem_put(&nme->up); 2378*4f80b14cSVincenzo Maffione out_unmap: 2379*4f80b14cSVincenzo Maffione for (i = 0; i < res; i++) 2380*4f80b14cSVincenzo Maffione put_page(pages[i]); 2381*4f80b14cSVincenzo Maffione if (res) 2382*4f80b14cSVincenzo Maffione nm_os_free(pages); 2383*4f80b14cSVincenzo Maffione out: 2384*4f80b14cSVincenzo Maffione if (perror) 2385*4f80b14cSVincenzo Maffione *perror = error; 2386*4f80b14cSVincenzo Maffione return NULL; 2387*4f80b14cSVincenzo Maffione 2388*4f80b14cSVincenzo Maffione } 2389*4f80b14cSVincenzo Maffione #endif /* WITH_EXTMEM */ 2390*4f80b14cSVincenzo Maffione 2391*4f80b14cSVincenzo Maffione 239237e3a6d3SLuigi Rizzo #ifdef WITH_PTNETMAP_GUEST 239337e3a6d3SLuigi Rizzo struct mem_pt_if { 239437e3a6d3SLuigi Rizzo struct mem_pt_if *next; 239537e3a6d3SLuigi Rizzo struct ifnet *ifp; 239637e3a6d3SLuigi Rizzo unsigned int nifp_offset; 239737e3a6d3SLuigi Rizzo }; 239837e3a6d3SLuigi Rizzo 239937e3a6d3SLuigi Rizzo /* Netmap allocator for ptnetmap guests. */ 240037e3a6d3SLuigi Rizzo struct netmap_mem_ptg { 240137e3a6d3SLuigi Rizzo struct netmap_mem_d up; 240237e3a6d3SLuigi Rizzo 240337e3a6d3SLuigi Rizzo vm_paddr_t nm_paddr; /* physical address in the guest */ 240437e3a6d3SLuigi Rizzo void *nm_addr; /* virtual address in the guest */ 240537e3a6d3SLuigi Rizzo struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */ 2406844a6f0cSLuigi Rizzo nm_memid_t host_mem_id; /* allocator identifier in the host */ 2407844a6f0cSLuigi Rizzo struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */ 240837e3a6d3SLuigi Rizzo struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */ 240937e3a6d3SLuigi Rizzo }; 241037e3a6d3SLuigi Rizzo 241137e3a6d3SLuigi Rizzo /* Link a passthrough interface to a passthrough netmap allocator. */ 241237e3a6d3SLuigi Rizzo static int 241337e3a6d3SLuigi Rizzo netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, struct ifnet *ifp, 2414844a6f0cSLuigi Rizzo unsigned int nifp_offset) 241537e3a6d3SLuigi Rizzo { 241637e3a6d3SLuigi Rizzo struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2417c3e9b4dbSLuiz Otavio O Souza struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif)); 241837e3a6d3SLuigi Rizzo 241937e3a6d3SLuigi Rizzo if (!ptif) { 242037e3a6d3SLuigi Rizzo return ENOMEM; 242137e3a6d3SLuigi Rizzo } 242237e3a6d3SLuigi Rizzo 242337e3a6d3SLuigi Rizzo NMA_LOCK(nmd); 242437e3a6d3SLuigi Rizzo 242537e3a6d3SLuigi Rizzo ptif->ifp = ifp; 242637e3a6d3SLuigi Rizzo ptif->nifp_offset = nifp_offset; 242737e3a6d3SLuigi Rizzo 242837e3a6d3SLuigi Rizzo if (ptnmd->pt_ifs) { 242937e3a6d3SLuigi Rizzo ptif->next = ptnmd->pt_ifs; 243037e3a6d3SLuigi Rizzo } 243137e3a6d3SLuigi Rizzo ptnmd->pt_ifs = ptif; 243237e3a6d3SLuigi Rizzo 243337e3a6d3SLuigi Rizzo NMA_UNLOCK(nmd); 243437e3a6d3SLuigi Rizzo 243537e3a6d3SLuigi Rizzo D("added (ifp=%p,nifp_offset=%u)", ptif->ifp, ptif->nifp_offset); 243637e3a6d3SLuigi Rizzo 243737e3a6d3SLuigi Rizzo return 0; 243837e3a6d3SLuigi Rizzo } 243937e3a6d3SLuigi Rizzo 244037e3a6d3SLuigi Rizzo /* Called with NMA_LOCK(nmd) held. */ 244137e3a6d3SLuigi Rizzo static struct mem_pt_if * 244237e3a6d3SLuigi Rizzo netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, struct ifnet *ifp) 244337e3a6d3SLuigi Rizzo { 244437e3a6d3SLuigi Rizzo struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 244537e3a6d3SLuigi Rizzo struct mem_pt_if *curr; 244637e3a6d3SLuigi Rizzo 244737e3a6d3SLuigi Rizzo for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { 244837e3a6d3SLuigi Rizzo if (curr->ifp == ifp) { 244937e3a6d3SLuigi Rizzo return curr; 245037e3a6d3SLuigi Rizzo } 245137e3a6d3SLuigi Rizzo } 245237e3a6d3SLuigi Rizzo 245337e3a6d3SLuigi Rizzo return NULL; 245437e3a6d3SLuigi Rizzo } 245537e3a6d3SLuigi Rizzo 245637e3a6d3SLuigi Rizzo /* Unlink a passthrough interface from a passthrough netmap allocator. */ 245737e3a6d3SLuigi Rizzo int 245837e3a6d3SLuigi Rizzo netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp) 245937e3a6d3SLuigi Rizzo { 246037e3a6d3SLuigi Rizzo struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 246137e3a6d3SLuigi Rizzo struct mem_pt_if *prev = NULL; 246237e3a6d3SLuigi Rizzo struct mem_pt_if *curr; 246337e3a6d3SLuigi Rizzo int ret = -1; 246437e3a6d3SLuigi Rizzo 246537e3a6d3SLuigi Rizzo NMA_LOCK(nmd); 246637e3a6d3SLuigi Rizzo 246737e3a6d3SLuigi Rizzo for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { 246837e3a6d3SLuigi Rizzo if (curr->ifp == ifp) { 246937e3a6d3SLuigi Rizzo if (prev) { 247037e3a6d3SLuigi Rizzo prev->next = curr->next; 247137e3a6d3SLuigi Rizzo } else { 247237e3a6d3SLuigi Rizzo ptnmd->pt_ifs = curr->next; 247337e3a6d3SLuigi Rizzo } 247437e3a6d3SLuigi Rizzo D("removed (ifp=%p,nifp_offset=%u)", 247537e3a6d3SLuigi Rizzo curr->ifp, curr->nifp_offset); 2476c3e9b4dbSLuiz Otavio O Souza nm_os_free(curr); 247737e3a6d3SLuigi Rizzo ret = 0; 247837e3a6d3SLuigi Rizzo break; 247937e3a6d3SLuigi Rizzo } 248037e3a6d3SLuigi Rizzo prev = curr; 248137e3a6d3SLuigi Rizzo } 248237e3a6d3SLuigi Rizzo 248337e3a6d3SLuigi Rizzo NMA_UNLOCK(nmd); 248437e3a6d3SLuigi Rizzo 248537e3a6d3SLuigi Rizzo return ret; 248637e3a6d3SLuigi Rizzo } 248737e3a6d3SLuigi Rizzo 248837e3a6d3SLuigi Rizzo static int 248937e3a6d3SLuigi Rizzo netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 249037e3a6d3SLuigi Rizzo { 249137e3a6d3SLuigi Rizzo struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 249237e3a6d3SLuigi Rizzo 249337e3a6d3SLuigi Rizzo if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { 249437e3a6d3SLuigi Rizzo return EINVAL; 249537e3a6d3SLuigi Rizzo } 249637e3a6d3SLuigi Rizzo 249737e3a6d3SLuigi Rizzo *lut = ptnmd->buf_lut; 249837e3a6d3SLuigi Rizzo return 0; 249937e3a6d3SLuigi Rizzo } 250037e3a6d3SLuigi Rizzo 250137e3a6d3SLuigi Rizzo static int 2502*4f80b14cSVincenzo Maffione netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size, 250337e3a6d3SLuigi Rizzo u_int *memflags, uint16_t *id) 250437e3a6d3SLuigi Rizzo { 250537e3a6d3SLuigi Rizzo int error = 0; 250637e3a6d3SLuigi Rizzo 250737e3a6d3SLuigi Rizzo NMA_LOCK(nmd); 250837e3a6d3SLuigi Rizzo 250937e3a6d3SLuigi Rizzo error = nmd->ops->nmd_config(nmd); 251037e3a6d3SLuigi Rizzo if (error) 251137e3a6d3SLuigi Rizzo goto out; 251237e3a6d3SLuigi Rizzo 251337e3a6d3SLuigi Rizzo if (size) 251437e3a6d3SLuigi Rizzo *size = nmd->nm_totalsize; 251537e3a6d3SLuigi Rizzo if (memflags) 251637e3a6d3SLuigi Rizzo *memflags = nmd->flags; 251737e3a6d3SLuigi Rizzo if (id) 251837e3a6d3SLuigi Rizzo *id = nmd->nm_id; 251937e3a6d3SLuigi Rizzo 252037e3a6d3SLuigi Rizzo out: 252137e3a6d3SLuigi Rizzo NMA_UNLOCK(nmd); 252237e3a6d3SLuigi Rizzo 252337e3a6d3SLuigi Rizzo return error; 252437e3a6d3SLuigi Rizzo } 252537e3a6d3SLuigi Rizzo 252637e3a6d3SLuigi Rizzo static vm_paddr_t 252737e3a6d3SLuigi Rizzo netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) 252837e3a6d3SLuigi Rizzo { 252937e3a6d3SLuigi Rizzo struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 253037e3a6d3SLuigi Rizzo vm_paddr_t paddr; 253137e3a6d3SLuigi Rizzo /* if the offset is valid, just return csb->base_addr + off */ 253237e3a6d3SLuigi Rizzo paddr = (vm_paddr_t)(ptnmd->nm_paddr + off); 253337e3a6d3SLuigi Rizzo ND("off %lx padr %lx", off, (unsigned long)paddr); 253437e3a6d3SLuigi Rizzo return paddr; 253537e3a6d3SLuigi Rizzo } 253637e3a6d3SLuigi Rizzo 253737e3a6d3SLuigi Rizzo static int 253837e3a6d3SLuigi Rizzo netmap_mem_pt_guest_config(struct netmap_mem_d *nmd) 253937e3a6d3SLuigi Rizzo { 254037e3a6d3SLuigi Rizzo /* nothing to do, we are configured on creation 254137e3a6d3SLuigi Rizzo * and configuration never changes thereafter 254237e3a6d3SLuigi Rizzo */ 254337e3a6d3SLuigi Rizzo return 0; 254437e3a6d3SLuigi Rizzo } 254537e3a6d3SLuigi Rizzo 254637e3a6d3SLuigi Rizzo static int 254737e3a6d3SLuigi Rizzo netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd) 254837e3a6d3SLuigi Rizzo { 254937e3a6d3SLuigi Rizzo struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2550844a6f0cSLuigi Rizzo uint64_t mem_size; 2551844a6f0cSLuigi Rizzo uint32_t bufsize; 2552844a6f0cSLuigi Rizzo uint32_t nbuffers; 2553844a6f0cSLuigi Rizzo uint32_t poolofs; 2554844a6f0cSLuigi Rizzo vm_paddr_t paddr; 2555844a6f0cSLuigi Rizzo char *vaddr; 2556844a6f0cSLuigi Rizzo int i; 255737e3a6d3SLuigi Rizzo int error = 0; 255837e3a6d3SLuigi Rizzo 255937e3a6d3SLuigi Rizzo nmd->active++; 256037e3a6d3SLuigi Rizzo 256137e3a6d3SLuigi Rizzo if (nmd->flags & NETMAP_MEM_FINALIZED) 256237e3a6d3SLuigi Rizzo goto out; 256337e3a6d3SLuigi Rizzo 256437e3a6d3SLuigi Rizzo if (ptnmd->ptn_dev == NULL) { 256537e3a6d3SLuigi Rizzo D("ptnetmap memdev not attached"); 256637e3a6d3SLuigi Rizzo error = ENOMEM; 256737e3a6d3SLuigi Rizzo goto err; 256837e3a6d3SLuigi Rizzo } 2569844a6f0cSLuigi Rizzo /* Map memory through ptnetmap-memdev BAR. */ 257037e3a6d3SLuigi Rizzo error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr, 2571844a6f0cSLuigi Rizzo &ptnmd->nm_addr, &mem_size); 257237e3a6d3SLuigi Rizzo if (error) 257337e3a6d3SLuigi Rizzo goto err; 257437e3a6d3SLuigi Rizzo 2575844a6f0cSLuigi Rizzo /* Initialize the lut using the information contained in the 2576844a6f0cSLuigi Rizzo * ptnetmap memory device. */ 2577844a6f0cSLuigi Rizzo bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2578844a6f0cSLuigi Rizzo PTNET_MDEV_IO_BUF_POOL_OBJSZ); 2579844a6f0cSLuigi Rizzo nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2580844a6f0cSLuigi Rizzo PTNET_MDEV_IO_BUF_POOL_OBJNUM); 2581844a6f0cSLuigi Rizzo 2582844a6f0cSLuigi Rizzo /* allocate the lut */ 2583844a6f0cSLuigi Rizzo if (ptnmd->buf_lut.lut == NULL) { 2584844a6f0cSLuigi Rizzo D("allocating lut"); 2585844a6f0cSLuigi Rizzo ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers); 2586844a6f0cSLuigi Rizzo if (ptnmd->buf_lut.lut == NULL) { 2587844a6f0cSLuigi Rizzo D("lut allocation failed"); 2588844a6f0cSLuigi Rizzo return ENOMEM; 2589844a6f0cSLuigi Rizzo } 2590844a6f0cSLuigi Rizzo } 2591844a6f0cSLuigi Rizzo 2592844a6f0cSLuigi Rizzo /* we have physically contiguous memory mapped through PCI BAR */ 2593844a6f0cSLuigi Rizzo poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2594844a6f0cSLuigi Rizzo PTNET_MDEV_IO_BUF_POOL_OFS); 2595844a6f0cSLuigi Rizzo vaddr = (char *)(ptnmd->nm_addr) + poolofs; 2596844a6f0cSLuigi Rizzo paddr = ptnmd->nm_paddr + poolofs; 2597844a6f0cSLuigi Rizzo 2598844a6f0cSLuigi Rizzo for (i = 0; i < nbuffers; i++) { 2599844a6f0cSLuigi Rizzo ptnmd->buf_lut.lut[i].vaddr = vaddr; 2600844a6f0cSLuigi Rizzo vaddr += bufsize; 2601844a6f0cSLuigi Rizzo paddr += bufsize; 2602844a6f0cSLuigi Rizzo } 2603844a6f0cSLuigi Rizzo 2604844a6f0cSLuigi Rizzo ptnmd->buf_lut.objtotal = nbuffers; 2605844a6f0cSLuigi Rizzo ptnmd->buf_lut.objsize = bufsize; 2606844a6f0cSLuigi Rizzo nmd->nm_totalsize = (unsigned int)mem_size; 260737e3a6d3SLuigi Rizzo 260837e3a6d3SLuigi Rizzo nmd->flags |= NETMAP_MEM_FINALIZED; 260937e3a6d3SLuigi Rizzo out: 261037e3a6d3SLuigi Rizzo return 0; 261137e3a6d3SLuigi Rizzo err: 261237e3a6d3SLuigi Rizzo nmd->active--; 261337e3a6d3SLuigi Rizzo return error; 261437e3a6d3SLuigi Rizzo } 261537e3a6d3SLuigi Rizzo 261637e3a6d3SLuigi Rizzo static void 261737e3a6d3SLuigi Rizzo netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd) 261837e3a6d3SLuigi Rizzo { 261937e3a6d3SLuigi Rizzo struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 262037e3a6d3SLuigi Rizzo 262137e3a6d3SLuigi Rizzo nmd->active--; 262237e3a6d3SLuigi Rizzo if (nmd->active <= 0 && 262337e3a6d3SLuigi Rizzo (nmd->flags & NETMAP_MEM_FINALIZED)) { 262437e3a6d3SLuigi Rizzo nmd->flags &= ~NETMAP_MEM_FINALIZED; 262537e3a6d3SLuigi Rizzo /* unmap ptnetmap-memdev memory */ 262637e3a6d3SLuigi Rizzo if (ptnmd->ptn_dev) { 262737e3a6d3SLuigi Rizzo nm_os_pt_memdev_iounmap(ptnmd->ptn_dev); 262837e3a6d3SLuigi Rizzo } 26294dd44461SLuiz Otavio O Souza ptnmd->nm_addr = NULL; 263037e3a6d3SLuigi Rizzo ptnmd->nm_paddr = 0; 263137e3a6d3SLuigi Rizzo } 263237e3a6d3SLuigi Rizzo } 263337e3a6d3SLuigi Rizzo 263437e3a6d3SLuigi Rizzo static ssize_t 263537e3a6d3SLuigi Rizzo netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr) 263637e3a6d3SLuigi Rizzo { 263737e3a6d3SLuigi Rizzo struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 263837e3a6d3SLuigi Rizzo 263937e3a6d3SLuigi Rizzo return (const char *)(vaddr) - (char *)(ptnmd->nm_addr); 264037e3a6d3SLuigi Rizzo } 264137e3a6d3SLuigi Rizzo 264237e3a6d3SLuigi Rizzo static void 264337e3a6d3SLuigi Rizzo netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd) 264437e3a6d3SLuigi Rizzo { 264537e3a6d3SLuigi Rizzo if (nmd == NULL) 264637e3a6d3SLuigi Rizzo return; 264737e3a6d3SLuigi Rizzo if (netmap_verbose) 264837e3a6d3SLuigi Rizzo D("deleting %p", nmd); 264937e3a6d3SLuigi Rizzo if (nmd->active > 0) 265037e3a6d3SLuigi Rizzo D("bug: deleting mem allocator with active=%d!", nmd->active); 265137e3a6d3SLuigi Rizzo if (netmap_verbose) 265237e3a6d3SLuigi Rizzo D("done deleting %p", nmd); 265337e3a6d3SLuigi Rizzo NMA_LOCK_DESTROY(nmd); 2654c3e9b4dbSLuiz Otavio O Souza nm_os_free(nmd); 265537e3a6d3SLuigi Rizzo } 265637e3a6d3SLuigi Rizzo 265737e3a6d3SLuigi Rizzo static struct netmap_if * 2658c3e9b4dbSLuiz Otavio O Souza netmap_mem_pt_guest_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) 265937e3a6d3SLuigi Rizzo { 266037e3a6d3SLuigi Rizzo struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; 266137e3a6d3SLuigi Rizzo struct mem_pt_if *ptif; 266237e3a6d3SLuigi Rizzo struct netmap_if *nifp = NULL; 266337e3a6d3SLuigi Rizzo 266437e3a6d3SLuigi Rizzo NMA_LOCK(na->nm_mem); 266537e3a6d3SLuigi Rizzo 266637e3a6d3SLuigi Rizzo ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); 266737e3a6d3SLuigi Rizzo if (ptif == NULL) { 266837e3a6d3SLuigi Rizzo D("Error: interface %p is not in passthrough", na->ifp); 266937e3a6d3SLuigi Rizzo goto out; 267037e3a6d3SLuigi Rizzo } 267137e3a6d3SLuigi Rizzo 267237e3a6d3SLuigi Rizzo nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) + 267337e3a6d3SLuigi Rizzo ptif->nifp_offset); 267437e3a6d3SLuigi Rizzo NMA_UNLOCK(na->nm_mem); 267537e3a6d3SLuigi Rizzo out: 267637e3a6d3SLuigi Rizzo return nifp; 267737e3a6d3SLuigi Rizzo } 267837e3a6d3SLuigi Rizzo 267937e3a6d3SLuigi Rizzo static void 268037e3a6d3SLuigi Rizzo netmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) 268137e3a6d3SLuigi Rizzo { 268237e3a6d3SLuigi Rizzo struct mem_pt_if *ptif; 268337e3a6d3SLuigi Rizzo 268437e3a6d3SLuigi Rizzo NMA_LOCK(na->nm_mem); 268537e3a6d3SLuigi Rizzo ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); 268637e3a6d3SLuigi Rizzo if (ptif == NULL) { 268737e3a6d3SLuigi Rizzo D("Error: interface %p is not in passthrough", na->ifp); 268837e3a6d3SLuigi Rizzo } 268937e3a6d3SLuigi Rizzo NMA_UNLOCK(na->nm_mem); 269037e3a6d3SLuigi Rizzo } 269137e3a6d3SLuigi Rizzo 269237e3a6d3SLuigi Rizzo static int 269337e3a6d3SLuigi Rizzo netmap_mem_pt_guest_rings_create(struct netmap_adapter *na) 269437e3a6d3SLuigi Rizzo { 269537e3a6d3SLuigi Rizzo struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; 269637e3a6d3SLuigi Rizzo struct mem_pt_if *ptif; 269737e3a6d3SLuigi Rizzo struct netmap_if *nifp; 269837e3a6d3SLuigi Rizzo int i, error = -1; 269937e3a6d3SLuigi Rizzo 270037e3a6d3SLuigi Rizzo NMA_LOCK(na->nm_mem); 270137e3a6d3SLuigi Rizzo 270237e3a6d3SLuigi Rizzo ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); 270337e3a6d3SLuigi Rizzo if (ptif == NULL) { 270437e3a6d3SLuigi Rizzo D("Error: interface %p is not in passthrough", na->ifp); 270537e3a6d3SLuigi Rizzo goto out; 270637e3a6d3SLuigi Rizzo } 270737e3a6d3SLuigi Rizzo 270837e3a6d3SLuigi Rizzo 270937e3a6d3SLuigi Rizzo /* point each kring to the corresponding backend ring */ 271037e3a6d3SLuigi Rizzo nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset); 271137e3a6d3SLuigi Rizzo for (i = 0; i <= na->num_tx_rings; i++) { 271237e3a6d3SLuigi Rizzo struct netmap_kring *kring = na->tx_rings + i; 271337e3a6d3SLuigi Rizzo if (kring->ring) 271437e3a6d3SLuigi Rizzo continue; 271537e3a6d3SLuigi Rizzo kring->ring = (struct netmap_ring *) 271637e3a6d3SLuigi Rizzo ((char *)nifp + nifp->ring_ofs[i]); 271737e3a6d3SLuigi Rizzo } 271837e3a6d3SLuigi Rizzo for (i = 0; i <= na->num_rx_rings; i++) { 271937e3a6d3SLuigi Rizzo struct netmap_kring *kring = na->rx_rings + i; 272037e3a6d3SLuigi Rizzo if (kring->ring) 272137e3a6d3SLuigi Rizzo continue; 272237e3a6d3SLuigi Rizzo kring->ring = (struct netmap_ring *) 272337e3a6d3SLuigi Rizzo ((char *)nifp + 272437e3a6d3SLuigi Rizzo nifp->ring_ofs[i + na->num_tx_rings + 1]); 272537e3a6d3SLuigi Rizzo } 272637e3a6d3SLuigi Rizzo 272737e3a6d3SLuigi Rizzo error = 0; 272837e3a6d3SLuigi Rizzo out: 272937e3a6d3SLuigi Rizzo NMA_UNLOCK(na->nm_mem); 273037e3a6d3SLuigi Rizzo 273137e3a6d3SLuigi Rizzo return error; 273237e3a6d3SLuigi Rizzo } 273337e3a6d3SLuigi Rizzo 273437e3a6d3SLuigi Rizzo static void 273537e3a6d3SLuigi Rizzo netmap_mem_pt_guest_rings_delete(struct netmap_adapter *na) 273637e3a6d3SLuigi Rizzo { 273737e3a6d3SLuigi Rizzo #if 0 2738*4f80b14cSVincenzo Maffione enum txrx t; 2739*4f80b14cSVincenzo Maffione 2740*4f80b14cSVincenzo Maffione for_rx_tx(t) { 2741*4f80b14cSVincenzo Maffione u_int i; 2742*4f80b14cSVincenzo Maffione for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { 2743*4f80b14cSVincenzo Maffione struct netmap_kring *kring = &NMR(na, t)[i]; 2744*4f80b14cSVincenzo Maffione 2745*4f80b14cSVincenzo Maffione kring->ring = NULL; 2746*4f80b14cSVincenzo Maffione } 2747*4f80b14cSVincenzo Maffione } 274837e3a6d3SLuigi Rizzo #endif 274937e3a6d3SLuigi Rizzo } 275037e3a6d3SLuigi Rizzo 275137e3a6d3SLuigi Rizzo static struct netmap_mem_ops netmap_mem_pt_guest_ops = { 275237e3a6d3SLuigi Rizzo .nmd_get_lut = netmap_mem_pt_guest_get_lut, 275337e3a6d3SLuigi Rizzo .nmd_get_info = netmap_mem_pt_guest_get_info, 275437e3a6d3SLuigi Rizzo .nmd_ofstophys = netmap_mem_pt_guest_ofstophys, 275537e3a6d3SLuigi Rizzo .nmd_config = netmap_mem_pt_guest_config, 275637e3a6d3SLuigi Rizzo .nmd_finalize = netmap_mem_pt_guest_finalize, 275737e3a6d3SLuigi Rizzo .nmd_deref = netmap_mem_pt_guest_deref, 275837e3a6d3SLuigi Rizzo .nmd_if_offset = netmap_mem_pt_guest_if_offset, 275937e3a6d3SLuigi Rizzo .nmd_delete = netmap_mem_pt_guest_delete, 276037e3a6d3SLuigi Rizzo .nmd_if_new = netmap_mem_pt_guest_if_new, 276137e3a6d3SLuigi Rizzo .nmd_if_delete = netmap_mem_pt_guest_if_delete, 276237e3a6d3SLuigi Rizzo .nmd_rings_create = netmap_mem_pt_guest_rings_create, 276337e3a6d3SLuigi Rizzo .nmd_rings_delete = netmap_mem_pt_guest_rings_delete 276437e3a6d3SLuigi Rizzo }; 276537e3a6d3SLuigi Rizzo 2766c3e9b4dbSLuiz Otavio O Souza /* Called with nm_mem_list_lock held. */ 276737e3a6d3SLuigi Rizzo static struct netmap_mem_d * 2768844a6f0cSLuigi Rizzo netmap_mem_pt_guest_find_memid(nm_memid_t mem_id) 276937e3a6d3SLuigi Rizzo { 277037e3a6d3SLuigi Rizzo struct netmap_mem_d *mem = NULL; 277137e3a6d3SLuigi Rizzo struct netmap_mem_d *scan = netmap_last_mem_d; 277237e3a6d3SLuigi Rizzo 277337e3a6d3SLuigi Rizzo do { 277437e3a6d3SLuigi Rizzo /* find ptnetmap allocator through host ID */ 277537e3a6d3SLuigi Rizzo if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref && 2776844a6f0cSLuigi Rizzo ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) { 277737e3a6d3SLuigi Rizzo mem = scan; 2778c3e9b4dbSLuiz Otavio O Souza mem->refcount++; 2779c3e9b4dbSLuiz Otavio O Souza NM_DBG_REFC(mem, __FUNCTION__, __LINE__); 278037e3a6d3SLuigi Rizzo break; 278137e3a6d3SLuigi Rizzo } 278237e3a6d3SLuigi Rizzo scan = scan->next; 278337e3a6d3SLuigi Rizzo } while (scan != netmap_last_mem_d); 278437e3a6d3SLuigi Rizzo 278537e3a6d3SLuigi Rizzo return mem; 278637e3a6d3SLuigi Rizzo } 278737e3a6d3SLuigi Rizzo 2788c3e9b4dbSLuiz Otavio O Souza /* Called with nm_mem_list_lock held. */ 278937e3a6d3SLuigi Rizzo static struct netmap_mem_d * 2790844a6f0cSLuigi Rizzo netmap_mem_pt_guest_create(nm_memid_t mem_id) 279137e3a6d3SLuigi Rizzo { 279237e3a6d3SLuigi Rizzo struct netmap_mem_ptg *ptnmd; 279337e3a6d3SLuigi Rizzo int err = 0; 279437e3a6d3SLuigi Rizzo 2795c3e9b4dbSLuiz Otavio O Souza ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg)); 279637e3a6d3SLuigi Rizzo if (ptnmd == NULL) { 279737e3a6d3SLuigi Rizzo err = ENOMEM; 279837e3a6d3SLuigi Rizzo goto error; 279937e3a6d3SLuigi Rizzo } 280037e3a6d3SLuigi Rizzo 280137e3a6d3SLuigi Rizzo ptnmd->up.ops = &netmap_mem_pt_guest_ops; 2802844a6f0cSLuigi Rizzo ptnmd->host_mem_id = mem_id; 280337e3a6d3SLuigi Rizzo ptnmd->pt_ifs = NULL; 280437e3a6d3SLuigi Rizzo 280537e3a6d3SLuigi Rizzo /* Assign new id in the guest (We have the lock) */ 280637e3a6d3SLuigi Rizzo err = nm_mem_assign_id_locked(&ptnmd->up); 280737e3a6d3SLuigi Rizzo if (err) 280837e3a6d3SLuigi Rizzo goto error; 280937e3a6d3SLuigi Rizzo 281037e3a6d3SLuigi Rizzo ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED; 281137e3a6d3SLuigi Rizzo ptnmd->up.flags |= NETMAP_MEM_IO; 281237e3a6d3SLuigi Rizzo 281337e3a6d3SLuigi Rizzo NMA_LOCK_INIT(&ptnmd->up); 281437e3a6d3SLuigi Rizzo 2815c3e9b4dbSLuiz Otavio O Souza snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id); 2816c3e9b4dbSLuiz Otavio O Souza 2817c3e9b4dbSLuiz Otavio O Souza 281837e3a6d3SLuigi Rizzo return &ptnmd->up; 281937e3a6d3SLuigi Rizzo error: 282037e3a6d3SLuigi Rizzo netmap_mem_pt_guest_delete(&ptnmd->up); 282137e3a6d3SLuigi Rizzo return NULL; 282237e3a6d3SLuigi Rizzo } 282337e3a6d3SLuigi Rizzo 282437e3a6d3SLuigi Rizzo /* 282537e3a6d3SLuigi Rizzo * find host id in guest allocators and create guest allocator 282637e3a6d3SLuigi Rizzo * if it is not there 282737e3a6d3SLuigi Rizzo */ 282837e3a6d3SLuigi Rizzo static struct netmap_mem_d * 2829844a6f0cSLuigi Rizzo netmap_mem_pt_guest_get(nm_memid_t mem_id) 283037e3a6d3SLuigi Rizzo { 283137e3a6d3SLuigi Rizzo struct netmap_mem_d *nmd; 283237e3a6d3SLuigi Rizzo 2833c3e9b4dbSLuiz Otavio O Souza NM_MTX_LOCK(nm_mem_list_lock); 2834844a6f0cSLuigi Rizzo nmd = netmap_mem_pt_guest_find_memid(mem_id); 283537e3a6d3SLuigi Rizzo if (nmd == NULL) { 2836844a6f0cSLuigi Rizzo nmd = netmap_mem_pt_guest_create(mem_id); 283737e3a6d3SLuigi Rizzo } 2838c3e9b4dbSLuiz Otavio O Souza NM_MTX_UNLOCK(nm_mem_list_lock); 283937e3a6d3SLuigi Rizzo 284037e3a6d3SLuigi Rizzo return nmd; 284137e3a6d3SLuigi Rizzo } 284237e3a6d3SLuigi Rizzo 284337e3a6d3SLuigi Rizzo /* 284437e3a6d3SLuigi Rizzo * The guest allocator can be created by ptnetmap_memdev (during the device 2845844a6f0cSLuigi Rizzo * attach) or by ptnetmap device (ptnet), during the netmap_attach. 284637e3a6d3SLuigi Rizzo * 284737e3a6d3SLuigi Rizzo * The order is not important (we have different order in LINUX and FreeBSD). 284837e3a6d3SLuigi Rizzo * The first one, creates the device, and the second one simply attaches it. 284937e3a6d3SLuigi Rizzo */ 285037e3a6d3SLuigi Rizzo 285137e3a6d3SLuigi Rizzo /* Called when ptnetmap_memdev is attaching, to attach a new allocator in 285237e3a6d3SLuigi Rizzo * the guest */ 285337e3a6d3SLuigi Rizzo struct netmap_mem_d * 2854844a6f0cSLuigi Rizzo netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id) 285537e3a6d3SLuigi Rizzo { 285637e3a6d3SLuigi Rizzo struct netmap_mem_d *nmd; 285737e3a6d3SLuigi Rizzo struct netmap_mem_ptg *ptnmd; 285837e3a6d3SLuigi Rizzo 2859844a6f0cSLuigi Rizzo nmd = netmap_mem_pt_guest_get(mem_id); 286037e3a6d3SLuigi Rizzo 286137e3a6d3SLuigi Rizzo /* assign this device to the guest allocator */ 286237e3a6d3SLuigi Rizzo if (nmd) { 286337e3a6d3SLuigi Rizzo ptnmd = (struct netmap_mem_ptg *)nmd; 286437e3a6d3SLuigi Rizzo ptnmd->ptn_dev = ptn_dev; 286537e3a6d3SLuigi Rizzo } 286637e3a6d3SLuigi Rizzo 286737e3a6d3SLuigi Rizzo return nmd; 286837e3a6d3SLuigi Rizzo } 286937e3a6d3SLuigi Rizzo 2870844a6f0cSLuigi Rizzo /* Called when ptnet device is attaching */ 287137e3a6d3SLuigi Rizzo struct netmap_mem_d * 287237e3a6d3SLuigi Rizzo netmap_mem_pt_guest_new(struct ifnet *ifp, 287337e3a6d3SLuigi Rizzo unsigned int nifp_offset, 2874844a6f0cSLuigi Rizzo unsigned int memid) 287537e3a6d3SLuigi Rizzo { 287637e3a6d3SLuigi Rizzo struct netmap_mem_d *nmd; 287737e3a6d3SLuigi Rizzo 2878844a6f0cSLuigi Rizzo if (ifp == NULL) { 287937e3a6d3SLuigi Rizzo return NULL; 288037e3a6d3SLuigi Rizzo } 288137e3a6d3SLuigi Rizzo 2882844a6f0cSLuigi Rizzo nmd = netmap_mem_pt_guest_get((nm_memid_t)memid); 288337e3a6d3SLuigi Rizzo 288437e3a6d3SLuigi Rizzo if (nmd) { 2885844a6f0cSLuigi Rizzo netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset); 288637e3a6d3SLuigi Rizzo } 288737e3a6d3SLuigi Rizzo 288837e3a6d3SLuigi Rizzo return nmd; 288937e3a6d3SLuigi Rizzo } 289037e3a6d3SLuigi Rizzo 289137e3a6d3SLuigi Rizzo #endif /* WITH_PTNETMAP_GUEST */ 2892