1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2012-2014 Matteo Landi
5 * Copyright (C) 2012-2016 Luigi Rizzo
6 * Copyright (C) 2012-2016 Giuseppe Lettieri
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #ifdef linux
32 #include "bsd_glue.h"
33 #endif /* linux */
34
35 #ifdef __APPLE__
36 #include "osx_glue.h"
37 #endif /* __APPLE__ */
38
39 #ifdef __FreeBSD__
40 #include <sys/param.h>
41 #include <sys/domainset.h>
42 #include <sys/malloc.h>
43 #include <sys/kernel.h> /* MALLOC_DEFINE */
44 #include <sys/proc.h>
45 #include <vm/vm.h> /* vtophys */
46 #include <vm/pmap.h> /* vtophys */
47 #include <sys/socket.h> /* sockaddrs */
48 #include <sys/selinfo.h>
49 #include <sys/sysctl.h>
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/vnet.h>
53 #include <machine/bus.h> /* bus_dmamap_* */
54
55 /* M_NETMAP only used in here */
56 MALLOC_DECLARE(M_NETMAP);
57 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
58
59 #endif /* __FreeBSD__ */
60
61 #ifdef _WIN32
62 #include <win_glue.h>
63 #endif
64
65 #include <net/netmap.h>
66 #include <dev/netmap/netmap_kern.h>
67 #include <net/netmap_virt.h>
68 #include "netmap_mem2.h"
69
70 #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY
71 #define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */
72 #else
73 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */
74 #endif
75
76 #define NETMAP_POOL_MAX_NAMSZ 32
77
78
79 enum {
80 NETMAP_IF_POOL = 0,
81 NETMAP_RING_POOL,
82 NETMAP_BUF_POOL,
83 NETMAP_POOLS_NR
84 };
85
86
87 struct netmap_obj_params {
88 u_int size;
89 u_int num;
90
91 u_int last_size;
92 u_int last_num;
93 };
94
95 struct netmap_obj_pool {
96 char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */
97
98 /* ---------------------------------------------------*/
99 /* these are only meaningful if the pool is finalized */
100 /* (see 'finalized' field in netmap_mem_d) */
101 size_t memtotal; /* actual total memory space */
102
103 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
104 uint32_t *bitmap; /* one bit per buffer, 1 means free */
105 uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */
106 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */
107
108 u_int objtotal; /* actual total number of objects. */
109 u_int numclusters; /* actual number of clusters */
110 u_int objfree; /* number of free objects. */
111
112 int alloc_done; /* we have allocated the memory */
113 /* ---------------------------------------------------*/
114
115 /* limits */
116 u_int objminsize; /* minimum object size */
117 u_int objmaxsize; /* maximum object size */
118 u_int nummin; /* minimum number of objects */
119 u_int nummax; /* maximum number of objects */
120
121 /* these are changed only by config */
122 u_int _objtotal; /* total number of objects */
123 u_int _objsize; /* object size */
124 u_int _clustsize; /* cluster size */
125 u_int _clustentries; /* objects per cluster */
126 u_int _numclusters; /* number of clusters */
127
128 /* requested values */
129 u_int r_objtotal;
130 u_int r_objsize;
131 };
132
133 #define NMA_LOCK_T NM_MTX_T
134 #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx)
135 #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx)
136 #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx)
137 #define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx)
138 #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx)
139
140 struct netmap_mem_ops {
141 int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*);
142 int (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size,
143 u_int *memflags, uint16_t *id);
144
145 vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t);
146 int (*nmd_config)(struct netmap_mem_d *);
147 int (*nmd_finalize)(struct netmap_mem_d *, struct netmap_adapter *);
148 void (*nmd_deref)(struct netmap_mem_d *, struct netmap_adapter *);
149 ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr);
150 void (*nmd_delete)(struct netmap_mem_d *);
151
152 struct netmap_if * (*nmd_if_new)(struct netmap_mem_d *,
153 struct netmap_adapter *, struct netmap_priv_d *);
154 void (*nmd_if_delete)(struct netmap_mem_d *,
155 struct netmap_adapter *, struct netmap_if *);
156 int (*nmd_rings_create)(struct netmap_mem_d *,
157 struct netmap_adapter *);
158 void (*nmd_rings_delete)(struct netmap_mem_d *,
159 struct netmap_adapter *);
160 };
161
162 struct netmap_mem_d {
163 NMA_LOCK_T nm_mtx; /* protect the allocator */
164 size_t nm_totalsize; /* shorthand */
165
166 u_int flags;
167 #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */
168 #define NETMAP_MEM_HIDDEN 0x8 /* being prepared */
169 #define NETMAP_MEM_NOMAP 0x10 /* do not map/unmap pdevs */
170 int lasterr; /* last error for curr config */
171 int active; /* active users */
172 int refcount;
173 /* the three allocators */
174 struct netmap_obj_pool pools[NETMAP_POOLS_NR];
175
176 nm_memid_t nm_id; /* allocator identifier */
177 int nm_grp; /* iommu group id */
178 int nm_numa_domain; /* local NUMA domain */
179
180 /* list of all existing allocators, sorted by nm_id */
181 struct netmap_mem_d *prev, *next;
182
183 const struct netmap_mem_ops *ops;
184
185 struct netmap_obj_params params[NETMAP_POOLS_NR];
186
187 #define NM_MEM_NAMESZ 16
188 char name[NM_MEM_NAMESZ];
189 };
190
191 int
netmap_mem_get_lut(struct netmap_mem_d * nmd,struct netmap_lut * lut)192 netmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
193 {
194 int rv;
195
196 NMA_LOCK(nmd);
197 rv = nmd->ops->nmd_get_lut(nmd, lut);
198 NMA_UNLOCK(nmd);
199
200 return rv;
201 }
202
203 int
netmap_mem_get_info(struct netmap_mem_d * nmd,uint64_t * size,u_int * memflags,nm_memid_t * memid)204 netmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size,
205 u_int *memflags, nm_memid_t *memid)
206 {
207 int rv;
208
209 NMA_LOCK(nmd);
210 rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid);
211 NMA_UNLOCK(nmd);
212
213 return rv;
214 }
215
216 vm_paddr_t
netmap_mem_ofstophys(struct netmap_mem_d * nmd,vm_ooffset_t off)217 netmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
218 {
219 vm_paddr_t pa;
220
221 #if defined(__FreeBSD__)
222 /* This function is called by netmap_dev_pager_fault(), which holds a
223 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we
224 * spin on the trylock. */
225 NMA_SPINLOCK(nmd);
226 #else
227 NMA_LOCK(nmd);
228 #endif
229 pa = nmd->ops->nmd_ofstophys(nmd, off);
230 NMA_UNLOCK(nmd);
231
232 return pa;
233 }
234
235 static int
netmap_mem_config(struct netmap_mem_d * nmd)236 netmap_mem_config(struct netmap_mem_d *nmd)
237 {
238 if (nmd->active) {
239 /* already in use. Not fatal, but we
240 * cannot change the configuration
241 */
242 return 0;
243 }
244
245 return nmd->ops->nmd_config(nmd);
246 }
247
248 ssize_t
netmap_mem_if_offset(struct netmap_mem_d * nmd,const void * off)249 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off)
250 {
251 ssize_t rv;
252
253 NMA_LOCK(nmd);
254 rv = nmd->ops->nmd_if_offset(nmd, off);
255 NMA_UNLOCK(nmd);
256
257 return rv;
258 }
259
260 static void
netmap_mem_delete(struct netmap_mem_d * nmd)261 netmap_mem_delete(struct netmap_mem_d *nmd)
262 {
263 nmd->ops->nmd_delete(nmd);
264 }
265
266 struct netmap_if *
netmap_mem_if_new(struct netmap_adapter * na,struct netmap_priv_d * priv)267 netmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
268 {
269 struct netmap_if *nifp;
270 struct netmap_mem_d *nmd = na->nm_mem;
271
272 NMA_LOCK(nmd);
273 nifp = nmd->ops->nmd_if_new(nmd, na, priv);
274 NMA_UNLOCK(nmd);
275
276 return nifp;
277 }
278
279 void
netmap_mem_if_delete(struct netmap_adapter * na,struct netmap_if * nif)280 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif)
281 {
282 struct netmap_mem_d *nmd = na->nm_mem;
283
284 NMA_LOCK(nmd);
285 nmd->ops->nmd_if_delete(nmd, na, nif);
286 NMA_UNLOCK(nmd);
287 }
288
289 int
netmap_mem_rings_create(struct netmap_adapter * na)290 netmap_mem_rings_create(struct netmap_adapter *na)
291 {
292 int rv;
293 struct netmap_mem_d *nmd = na->nm_mem;
294
295 NMA_LOCK(nmd);
296 rv = nmd->ops->nmd_rings_create(nmd, na);
297 NMA_UNLOCK(nmd);
298
299 return rv;
300 }
301
302 void
netmap_mem_rings_delete(struct netmap_adapter * na)303 netmap_mem_rings_delete(struct netmap_adapter *na)
304 {
305 struct netmap_mem_d *nmd = na->nm_mem;
306
307 NMA_LOCK(nmd);
308 nmd->ops->nmd_rings_delete(nmd, na);
309 NMA_UNLOCK(nmd);
310 }
311
312 static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *);
313 static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *);
314 static int nm_mem_check_group(struct netmap_mem_d *, void *);
315 static void nm_mem_release_id(struct netmap_mem_d *);
316
317 nm_memid_t
netmap_mem_get_id(struct netmap_mem_d * nmd)318 netmap_mem_get_id(struct netmap_mem_d *nmd)
319 {
320 return nmd->nm_id;
321 }
322
323 #ifdef NM_DEBUG_MEM_PUTGET
324 #define NM_DBG_REFC(nmd, func, line) \
325 nm_prinf("%s:%d mem[%d:%d] -> %d", func, line, (nmd)->nm_id, (nmd)->nm_grp, (nmd)->refcount);
326 #else
327 #define NM_DBG_REFC(nmd, func, line)
328 #endif
329
330 /* circular list of all existing allocators */
331 static struct netmap_mem_d *netmap_last_mem_d = &nm_mem;
332 static NM_MTX_T nm_mem_list_lock;
333
334 struct netmap_mem_d *
__netmap_mem_get(struct netmap_mem_d * nmd,const char * func,int line)335 __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line)
336 {
337 NM_MTX_LOCK(nm_mem_list_lock);
338 nmd->refcount++;
339 NM_DBG_REFC(nmd, func, line);
340 NM_MTX_UNLOCK(nm_mem_list_lock);
341 return nmd;
342 }
343
344 void
__netmap_mem_put(struct netmap_mem_d * nmd,const char * func,int line)345 __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line)
346 {
347 int last;
348 NM_MTX_LOCK(nm_mem_list_lock);
349 last = (--nmd->refcount == 0);
350 if (last)
351 nm_mem_release_id(nmd);
352 NM_DBG_REFC(nmd, func, line);
353 NM_MTX_UNLOCK(nm_mem_list_lock);
354 if (last)
355 netmap_mem_delete(nmd);
356 }
357
358 int
netmap_mem_finalize(struct netmap_mem_d * nmd,struct netmap_adapter * na)359 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
360 {
361 int lasterr = 0;
362 if (nm_mem_check_group(nmd, na->pdev) < 0) {
363 return ENOMEM;
364 }
365
366 NMA_LOCK(nmd);
367
368 if (netmap_mem_config(nmd))
369 goto out;
370
371 nmd->active++;
372
373 nmd->lasterr = nmd->ops->nmd_finalize(nmd, na);
374
375 if (!nmd->lasterr && !(nmd->flags & NETMAP_MEM_NOMAP)) {
376 nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
377 }
378
379 out:
380 lasterr = nmd->lasterr;
381 NMA_UNLOCK(nmd);
382
383 if (lasterr)
384 netmap_mem_deref(nmd, na);
385
386 return lasterr;
387 }
388
389 static int
nm_isset(uint32_t * bitmap,u_int i)390 nm_isset(uint32_t *bitmap, u_int i)
391 {
392 return bitmap[ (i>>5) ] & ( 1U << (i & 31U) );
393 }
394
395
396 static int
netmap_init_obj_allocator_bitmap(struct netmap_obj_pool * p)397 netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p)
398 {
399 u_int n, j;
400
401 if (p->bitmap == NULL) {
402 /* Allocate the bitmap */
403 n = (p->objtotal + 31) / 32;
404 p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n);
405 if (p->bitmap == NULL) {
406 nm_prerr("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
407 p->name);
408 return ENOMEM;
409 }
410 p->bitmap_slots = n;
411 } else {
412 memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0]));
413 }
414
415 p->objfree = 0;
416 /*
417 * Set all the bits in the bitmap that have
418 * corresponding buffers to 1 to indicate they are
419 * free.
420 */
421 for (j = 0; j < p->objtotal; j++) {
422 if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) {
423 if (netmap_debug & NM_DEBUG_MEM)
424 nm_prinf("skipping %s %d", p->name, j);
425 continue;
426 }
427 p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) );
428 p->objfree++;
429 }
430
431 if (netmap_verbose)
432 nm_prinf("%s free %u", p->name, p->objfree);
433 if (p->objfree == 0) {
434 if (netmap_verbose)
435 nm_prerr("%s: no objects available", p->name);
436 return ENOMEM;
437 }
438
439 return 0;
440 }
441
442 static int
netmap_mem_init_bitmaps(struct netmap_mem_d * nmd)443 netmap_mem_init_bitmaps(struct netmap_mem_d *nmd)
444 {
445 int i, error = 0;
446
447 for (i = 0; i < NETMAP_POOLS_NR; i++) {
448 struct netmap_obj_pool *p = &nmd->pools[i];
449
450 error = netmap_init_obj_allocator_bitmap(p);
451 if (error)
452 return error;
453 }
454
455 /*
456 * buffers 0 and 1 are reserved
457 */
458 if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) {
459 nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name);
460 return ENOMEM;
461 }
462
463 nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
464 if (nmd->pools[NETMAP_BUF_POOL].bitmap) {
465 /* XXX This check is a workaround that prevents a
466 * NULL pointer crash which currently happens only
467 * with ptnetmap guests.
468 * Removed shared-info --> is the bug still there? */
469 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U;
470 }
471 return 0;
472 }
473
474 int
netmap_mem_deref(struct netmap_mem_d * nmd,struct netmap_adapter * na)475 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
476 {
477 int last_user = 0;
478 NMA_LOCK(nmd);
479 if (na->active_fds <= 0 && !(nmd->flags & NETMAP_MEM_NOMAP))
480 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
481 if (nmd->active == 1) {
482 last_user = 1;
483 /*
484 * Reset the allocator when it falls out of use so that any
485 * pool resources leaked by unclean application exits are
486 * reclaimed.
487 */
488 netmap_mem_init_bitmaps(nmd);
489 }
490 nmd->ops->nmd_deref(nmd, na);
491
492 nmd->active--;
493 if (last_user) {
494 nmd->lasterr = 0;
495 }
496
497 NMA_UNLOCK(nmd);
498 return last_user;
499 }
500
501
502 /* accessor functions */
503 static int
netmap_mem2_get_lut(struct netmap_mem_d * nmd,struct netmap_lut * lut)504 netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
505 {
506 lut->lut = nmd->pools[NETMAP_BUF_POOL].lut;
507 #ifdef __FreeBSD__
508 lut->plut = lut->lut;
509 #endif
510 lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
511 lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
512
513 return 0;
514 }
515
516 static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
517 [NETMAP_IF_POOL] = {
518 .size = 1024,
519 .num = 2,
520 },
521 [NETMAP_RING_POOL] = {
522 .size = 5*PAGE_SIZE,
523 .num = 4,
524 },
525 [NETMAP_BUF_POOL] = {
526 .size = 2048,
527 .num = 4098,
528 },
529 };
530
531
532 /*
533 * nm_mem is the memory allocator used for all physical interfaces
534 * running in netmap mode.
535 * Virtual (VALE) ports will have each its own allocator.
536 */
537 extern const struct netmap_mem_ops netmap_mem_global_ops; /* forward */
538 struct netmap_mem_d nm_mem = { /* Our memory allocator. */
539 .pools = {
540 [NETMAP_IF_POOL] = {
541 .name = "netmap_if",
542 .objminsize = sizeof(struct netmap_if),
543 .objmaxsize = 4096,
544 .nummin = 10, /* don't be stingy */
545 .nummax = 10000, /* XXX very large */
546 },
547 [NETMAP_RING_POOL] = {
548 .name = "netmap_ring",
549 .objminsize = sizeof(struct netmap_ring),
550 .objmaxsize = 32*PAGE_SIZE,
551 .nummin = 2,
552 .nummax = 1024,
553 },
554 [NETMAP_BUF_POOL] = {
555 .name = "netmap_buf",
556 .objminsize = 64,
557 .objmaxsize = 65536,
558 .nummin = 4,
559 .nummax = 1000000, /* one million! */
560 },
561 },
562
563 .params = {
564 [NETMAP_IF_POOL] = {
565 .size = 1024,
566 .num = 100,
567 },
568 [NETMAP_RING_POOL] = {
569 .size = 9*PAGE_SIZE,
570 .num = 200,
571 },
572 [NETMAP_BUF_POOL] = {
573 .size = 2048,
574 .num = NETMAP_BUF_MAX_NUM,
575 },
576 },
577
578 .nm_id = 1,
579 .nm_grp = -1,
580 .nm_numa_domain = -1,
581
582 .prev = &nm_mem,
583 .next = &nm_mem,
584
585 .ops = &netmap_mem_global_ops,
586
587 .name = "1"
588 };
589
590 static struct netmap_mem_d nm_mem_blueprint;
591
592 /* blueprint for the private memory allocators */
593 /* XXX clang is not happy about using name as a print format */
594 static const struct netmap_mem_d nm_blueprint = {
595 .pools = {
596 [NETMAP_IF_POOL] = {
597 .name = "%s_if",
598 .objminsize = sizeof(struct netmap_if),
599 .objmaxsize = 4096,
600 .nummin = 1,
601 .nummax = 100,
602 },
603 [NETMAP_RING_POOL] = {
604 .name = "%s_ring",
605 .objminsize = sizeof(struct netmap_ring),
606 .objmaxsize = 32*PAGE_SIZE,
607 .nummin = 2,
608 .nummax = 1024,
609 },
610 [NETMAP_BUF_POOL] = {
611 .name = "%s_buf",
612 .objminsize = 64,
613 .objmaxsize = 65536,
614 .nummin = 4,
615 .nummax = 1000000, /* one million! */
616 },
617 },
618
619 .nm_grp = -1,
620 .nm_numa_domain = -1,
621
622 .flags = NETMAP_MEM_PRIVATE,
623
624 .ops = &netmap_mem_global_ops,
625 };
626
627 /* memory allocator related sysctls */
628
629 #define STRINGIFY(x) #x
630
631 #define DECLARE_SYSCTLS(id, name) \
632 SYSBEGIN(mem2_ ## name); \
633 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
634 CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
635 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
636 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
637 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
638 CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
639 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
640 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
641 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
642 CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
643 "Default size of private netmap " STRINGIFY(name) "s"); \
644 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
645 CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
646 "Default number of private netmap " STRINGIFY(name) "s"); \
647 SYSEND
648
649 SYSCTL_DECL(_dev_netmap);
650 DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
651 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
652 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
653
654 int netmap_port_numa_affinity = 0;
655 SYSCTL_INT(_dev_netmap, OID_AUTO, port_numa_affinity,
656 CTLFLAG_RDTUN, &netmap_port_numa_affinity, 0,
657 "Use NUMA-local memory for memory pools when possible");
658
659 /* call with nm_mem_list_lock held */
660 static int
nm_mem_assign_id_locked(struct netmap_mem_d * nmd,int grp_id,int domain)661 nm_mem_assign_id_locked(struct netmap_mem_d *nmd, int grp_id, int domain)
662 {
663 nm_memid_t id;
664 struct netmap_mem_d *scan = netmap_last_mem_d;
665 int error = ENOMEM;
666
667 do {
668 /* we rely on unsigned wrap around */
669 id = scan->nm_id + 1;
670 if (id == 0) /* reserve 0 as error value */
671 id = 1;
672 scan = scan->next;
673 if (id != scan->nm_id) {
674 nmd->nm_id = id;
675 nmd->nm_grp = grp_id;
676 nmd->nm_numa_domain = domain;
677 nmd->prev = scan->prev;
678 nmd->next = scan;
679 scan->prev->next = nmd;
680 scan->prev = nmd;
681 netmap_last_mem_d = nmd;
682 nmd->refcount = 1;
683 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
684 error = 0;
685 break;
686 }
687 } while (scan != netmap_last_mem_d);
688
689 return error;
690 }
691
692 /* call with nm_mem_list_lock *not* held */
693 static int
nm_mem_assign_id(struct netmap_mem_d * nmd,int grp_id)694 nm_mem_assign_id(struct netmap_mem_d *nmd, int grp_id)
695 {
696 int ret;
697
698 NM_MTX_LOCK(nm_mem_list_lock);
699 ret = nm_mem_assign_id_locked(nmd, grp_id, -1);
700 NM_MTX_UNLOCK(nm_mem_list_lock);
701
702 return ret;
703 }
704
705 /* call with nm_mem_list_lock held */
706 static void
nm_mem_release_id(struct netmap_mem_d * nmd)707 nm_mem_release_id(struct netmap_mem_d *nmd)
708 {
709 nmd->prev->next = nmd->next;
710 nmd->next->prev = nmd->prev;
711
712 if (netmap_last_mem_d == nmd)
713 netmap_last_mem_d = nmd->prev;
714
715 nmd->prev = nmd->next = NULL;
716 }
717
718 struct netmap_mem_d *
netmap_mem_find(nm_memid_t id)719 netmap_mem_find(nm_memid_t id)
720 {
721 struct netmap_mem_d *nmd;
722
723 NM_MTX_LOCK(nm_mem_list_lock);
724 nmd = netmap_last_mem_d;
725 do {
726 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) {
727 nmd->refcount++;
728 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
729 NM_MTX_UNLOCK(nm_mem_list_lock);
730 return nmd;
731 }
732 nmd = nmd->next;
733 } while (nmd != netmap_last_mem_d);
734 NM_MTX_UNLOCK(nm_mem_list_lock);
735 return NULL;
736 }
737
738 static int
nm_mem_check_group(struct netmap_mem_d * nmd,void * dev)739 nm_mem_check_group(struct netmap_mem_d *nmd, void *dev)
740 {
741 int err = 0, id;
742
743 /* Skip not hw adapters.
744 * Vale port can use particular allocator through vale-ctl -m option
745 */
746 if (!dev)
747 return 0;
748 id = nm_iommu_group_id(dev);
749 if (netmap_debug & NM_DEBUG_MEM)
750 nm_prinf("iommu_group %d", id);
751
752 NMA_LOCK(nmd);
753
754 if (nmd->nm_grp != id) {
755 if (netmap_verbose)
756 nm_prerr("iommu group mismatch: %d vs %d",
757 nmd->nm_grp, id);
758 nmd->lasterr = err = ENOMEM;
759 }
760
761 NMA_UNLOCK(nmd);
762 return err;
763 }
764
765 static struct lut_entry *
nm_alloc_lut(u_int nobj)766 nm_alloc_lut(u_int nobj)
767 {
768 size_t n = sizeof(struct lut_entry) * nobj;
769 struct lut_entry *lut;
770 #ifdef linux
771 lut = vmalloc(n);
772 #else
773 lut = nm_os_malloc(n);
774 #endif
775 return lut;
776 }
777
778 static void
nm_free_lut(struct lut_entry * lut,u_int objtotal)779 nm_free_lut(struct lut_entry *lut, u_int objtotal)
780 {
781 bzero(lut, sizeof(struct lut_entry) * objtotal);
782 #ifdef linux
783 vfree(lut);
784 #else
785 nm_os_free(lut);
786 #endif
787 }
788
789 #if defined(linux) || defined(_WIN32)
790 static struct plut_entry *
nm_alloc_plut(u_int nobj)791 nm_alloc_plut(u_int nobj)
792 {
793 size_t n = sizeof(struct plut_entry) * nobj;
794 struct plut_entry *lut;
795 lut = vmalloc(n);
796 return lut;
797 }
798
799 static void
nm_free_plut(struct plut_entry * lut)800 nm_free_plut(struct plut_entry * lut)
801 {
802 vfree(lut);
803 }
804 #endif /* linux or _WIN32 */
805
806
807 /*
808 * First, find the allocator that contains the requested offset,
809 * then locate the cluster through a lookup table.
810 */
811 static vm_paddr_t
netmap_mem2_ofstophys(struct netmap_mem_d * nmd,vm_ooffset_t offset)812 netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
813 {
814 int i;
815 vm_ooffset_t o = offset;
816 vm_paddr_t pa;
817 struct netmap_obj_pool *p;
818
819 p = nmd->pools;
820
821 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
822 if (offset >= p[i].memtotal)
823 continue;
824 // now lookup the cluster's address
825 #ifndef _WIN32
826 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) +
827 offset % p[i]._objsize;
828 #else
829 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr);
830 pa.QuadPart += offset % p[i]._objsize;
831 #endif
832 return pa;
833 }
834 /* this is only in case of errors */
835 nm_prerr("invalid ofs 0x%x out of 0x%zx 0x%zx 0x%zx", (u_int)o,
836 p[NETMAP_IF_POOL].memtotal,
837 p[NETMAP_IF_POOL].memtotal
838 + p[NETMAP_RING_POOL].memtotal,
839 p[NETMAP_IF_POOL].memtotal
840 + p[NETMAP_RING_POOL].memtotal
841 + p[NETMAP_BUF_POOL].memtotal);
842 #ifndef _WIN32
843 return 0; /* bad address */
844 #else
845 vm_paddr_t res;
846 res.QuadPart = 0;
847 return res;
848 #endif
849 }
850
851 #ifdef _WIN32
852
853 /*
854 * win32_build_virtual_memory_for_userspace
855 *
856 * This function get all the object making part of the pools and maps
857 * a contiguous virtual memory space for the userspace
858 * It works this way
859 * 1 - allocate a Memory Descriptor List wide as the sum
860 * of the memory needed for the pools
861 * 2 - cycle all the objects in every pool and for every object do
862 *
863 * 2a - cycle all the objects in every pool, get the list
864 * of the physical address descriptors
865 * 2b - calculate the offset in the array of pages descriptor in the
866 * main MDL
867 * 2c - copy the descriptors of the object in the main MDL
868 *
869 * 3 - return the resulting MDL that needs to be mapped in userland
870 *
871 * In this way we will have an MDL that describes all the memory for the
872 * objects in a single object
873 */
874
875 PMDL
win32_build_user_vm_map(struct netmap_mem_d * nmd)876 win32_build_user_vm_map(struct netmap_mem_d* nmd)
877 {
878 u_int memflags, ofs = 0;
879 PMDL mainMdl, tempMdl;
880 uint64_t memsize;
881 int i, j;
882
883 if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) {
884 nm_prerr("memory not finalised yet");
885 return NULL;
886 }
887
888 mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL);
889 if (mainMdl == NULL) {
890 nm_prerr("failed to allocate mdl");
891 return NULL;
892 }
893
894 NMA_LOCK(nmd);
895 for (i = 0; i < NETMAP_POOLS_NR; i++) {
896 struct netmap_obj_pool *p = &nmd->pools[i];
897 int clsz = p->_clustsize;
898 int clobjs = p->_clustentries; /* objects per cluster */
899 int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz);
900 PPFN_NUMBER pSrc, pDst;
901
902 /* each pool has a different cluster size so we need to reallocate */
903 tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL);
904 if (tempMdl == NULL) {
905 NMA_UNLOCK(nmd);
906 nm_prerr("fail to allocate tempMdl");
907 IoFreeMdl(mainMdl);
908 return NULL;
909 }
910 pSrc = MmGetMdlPfnArray(tempMdl);
911 /* create one entry per cluster, the lut[] has one entry per object */
912 for (j = 0; j < p->numclusters; j++, ofs += clsz) {
913 pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)];
914 MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz);
915 MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */
916 RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */
917 mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */
918 }
919 IoFreeMdl(tempMdl);
920 }
921 NMA_UNLOCK(nmd);
922 return mainMdl;
923 }
924
925 #endif /* _WIN32 */
926
927 /*
928 * helper function for OS-specific mmap routines (currently only windows).
929 * Given an nmd and a pool index, returns the cluster size and number of clusters.
930 * Returns 0 if memory is finalised and the pool is valid, otherwise 1.
931 * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change.
932 */
933
934 int
netmap_mem2_get_pool_info(struct netmap_mem_d * nmd,u_int pool,u_int * clustsize,u_int * numclusters)935 netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters)
936 {
937 if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR)
938 return 1; /* invalid arguments */
939 // NMA_LOCK_ASSERT(nmd);
940 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
941 *clustsize = *numclusters = 0;
942 return 1; /* not ready yet */
943 }
944 *clustsize = nmd->pools[pool]._clustsize;
945 *numclusters = nmd->pools[pool].numclusters;
946 return 0; /* success */
947 }
948
949 static int
netmap_mem2_get_info(struct netmap_mem_d * nmd,uint64_t * size,u_int * memflags,nm_memid_t * id)950 netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size,
951 u_int *memflags, nm_memid_t *id)
952 {
953 int error = 0;
954 error = netmap_mem_config(nmd);
955 if (error)
956 goto out;
957 if (size) {
958 if (nmd->flags & NETMAP_MEM_FINALIZED) {
959 *size = nmd->nm_totalsize;
960 } else {
961 int i;
962 *size = 0;
963 for (i = 0; i < NETMAP_POOLS_NR; i++) {
964 struct netmap_obj_pool *p = nmd->pools + i;
965 *size += ((size_t)p->_numclusters * (size_t)p->_clustsize);
966 }
967 }
968 }
969 if (memflags)
970 *memflags = nmd->flags;
971 if (id)
972 *id = nmd->nm_id;
973 out:
974 return error;
975 }
976
977 /*
978 * we store objects by kernel address, need to find the offset
979 * within the pool to export the value to userspace.
980 * Algorithm: scan until we find the cluster, then add the
981 * actual offset in the cluster
982 */
983 static ssize_t
netmap_obj_offset(struct netmap_obj_pool * p,const void * vaddr)984 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
985 {
986 int i, k = p->_clustentries, n = p->objtotal;
987 ssize_t ofs = 0;
988
989 for (i = 0; i < n; i += k, ofs += p->_clustsize) {
990 const char *base = p->lut[i].vaddr;
991 ssize_t relofs = (const char *) vaddr - base;
992
993 if (relofs < 0 || relofs >= p->_clustsize)
994 continue;
995
996 ofs = ofs + relofs;
997 nm_prdis("%s: return offset %d (cluster %d) for pointer %p",
998 p->name, ofs, i, vaddr);
999 return ofs;
1000 }
1001 nm_prerr("address %p is not contained inside any cluster (%s)",
1002 vaddr, p->name);
1003 return 0; /* An error occurred */
1004 }
1005
1006 /* Helper functions which convert virtual addresses to offsets */
1007 #define netmap_if_offset(n, v) \
1008 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
1009
1010 #define netmap_ring_offset(n, v) \
1011 ((n)->pools[NETMAP_IF_POOL].memtotal + \
1012 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
1013
1014 static ssize_t
netmap_mem2_if_offset(struct netmap_mem_d * nmd,const void * addr)1015 netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr)
1016 {
1017 return netmap_if_offset(nmd, addr);
1018 }
1019
1020 /*
1021 * report the index, and use start position as a hint,
1022 * otherwise buffer allocation becomes terribly expensive.
1023 */
1024 static void *
netmap_obj_malloc(struct netmap_obj_pool * p,u_int len,uint32_t * start,uint32_t * index)1025 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
1026 {
1027 uint32_t i = 0; /* index in the bitmap */
1028 uint32_t mask, j = 0; /* slot counter */
1029 void *vaddr = NULL;
1030
1031 if (len > p->_objsize) {
1032 nm_prerr("%s request size %d too large", p->name, len);
1033 return NULL;
1034 }
1035
1036 if (p->objfree == 0) {
1037 nm_prerr("no more %s objects", p->name);
1038 return NULL;
1039 }
1040 if (start)
1041 i = *start;
1042
1043 /* termination is guaranteed by p->free, but better check bounds on i */
1044 while (vaddr == NULL && i < p->bitmap_slots) {
1045 uint32_t cur = p->bitmap[i];
1046 if (cur == 0) { /* bitmask is fully used */
1047 i++;
1048 continue;
1049 }
1050 /* locate a slot */
1051 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
1052 ;
1053
1054 p->bitmap[i] &= ~mask; /* mark object as in use */
1055 p->objfree--;
1056
1057 vaddr = p->lut[i * 32 + j].vaddr;
1058 if (index)
1059 *index = i * 32 + j;
1060 }
1061 nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
1062
1063 if (start)
1064 *start = i;
1065 return vaddr;
1066 }
1067
1068
1069 /*
1070 * free by index, not by address.
1071 * XXX should we also cleanup the content ?
1072 */
1073 static int
netmap_obj_free(struct netmap_obj_pool * p,uint32_t j)1074 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
1075 {
1076 uint32_t *ptr, mask;
1077
1078 if (j >= p->objtotal) {
1079 nm_prerr("invalid index %u, max %u", j, p->objtotal);
1080 return 1;
1081 }
1082 ptr = &p->bitmap[j / 32];
1083 mask = (1 << (j % 32));
1084 if (*ptr & mask) {
1085 nm_prerr("ouch, double free on buffer %d", j);
1086 return 1;
1087 } else {
1088 *ptr |= mask;
1089 p->objfree++;
1090 return 0;
1091 }
1092 }
1093
1094 /*
1095 * free by address. This is slow but is only used for a few
1096 * objects (rings, nifp)
1097 */
1098 static void
netmap_obj_free_va(struct netmap_obj_pool * p,void * vaddr)1099 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
1100 {
1101 u_int i, j, n = p->numclusters;
1102
1103 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
1104 void *base = p->lut[i * p->_clustentries].vaddr;
1105 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
1106
1107 /* Given address, is out of the scope of the current cluster.*/
1108 if (base == NULL || vaddr < base || relofs >= p->_clustsize)
1109 continue;
1110
1111 j = j + relofs / p->_objsize;
1112 /* KASSERT(j != 0, ("Cannot free object 0")); */
1113 netmap_obj_free(p, j);
1114 return;
1115 }
1116 nm_prerr("address %p is not contained inside any cluster (%s)",
1117 vaddr, p->name);
1118 }
1119
1120 unsigned
netmap_mem_bufsize(struct netmap_mem_d * nmd)1121 netmap_mem_bufsize(struct netmap_mem_d *nmd)
1122 {
1123 return nmd->pools[NETMAP_BUF_POOL]._objsize;
1124 }
1125
1126 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
1127 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
1128 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
1129 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
1130 #define netmap_buf_malloc(n, _pos, _index) \
1131 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
1132
1133
1134 #if 0 /* currently unused */
1135 /* Return the index associated to the given packet buffer */
1136 #define netmap_buf_index(n, v) \
1137 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
1138 #endif
1139
1140 /*
1141 * allocate extra buffers in a linked list.
1142 * returns the actual number.
1143 */
1144 uint32_t
netmap_extra_alloc(struct netmap_adapter * na,uint32_t * head,uint32_t n)1145 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
1146 {
1147 struct netmap_mem_d *nmd = na->nm_mem;
1148 uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
1149
1150 NMA_LOCK(nmd);
1151
1152 *head = 0; /* default, 'null' index ie empty list */
1153 for (i = 0 ; i < n; i++) {
1154 uint32_t cur = *head; /* save current head */
1155 uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
1156 if (p == NULL) {
1157 nm_prerr("no more buffers after %d of %d", i, n);
1158 *head = cur; /* restore */
1159 break;
1160 }
1161 nm_prdis(5, "allocate buffer %d -> %d", *head, cur);
1162 *p = cur; /* link to previous head */
1163 }
1164
1165 NMA_UNLOCK(nmd);
1166
1167 return i;
1168 }
1169
1170 static void
netmap_extra_free(struct netmap_adapter * na,uint32_t head)1171 netmap_extra_free(struct netmap_adapter *na, uint32_t head)
1172 {
1173 struct lut_entry *lut = na->na_lut.lut;
1174 struct netmap_mem_d *nmd = na->nm_mem;
1175 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1176 uint32_t i, cur, *buf;
1177
1178 nm_prdis("freeing the extra list");
1179 for (i = 0; head >=2 && head < p->objtotal; i++) {
1180 cur = head;
1181 buf = lut[head].vaddr;
1182 head = *buf;
1183 *buf = 0;
1184 if (netmap_obj_free(p, cur))
1185 break;
1186 }
1187 if (head != 0)
1188 nm_prerr("breaking with head %d", head);
1189 if (netmap_debug & NM_DEBUG_MEM)
1190 nm_prinf("freed %d buffers", i);
1191 }
1192
1193
1194 /* Return nonzero on error */
1195 static int
netmap_new_bufs(struct netmap_mem_d * nmd,struct netmap_slot * slot,u_int n)1196 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1197 {
1198 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1199 u_int i = 0; /* slot counter */
1200 uint32_t pos = 0; /* slot in p->bitmap */
1201 uint32_t index = 0; /* buffer index */
1202
1203 for (i = 0; i < n; i++) {
1204 void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
1205 if (vaddr == NULL) {
1206 nm_prerr("no more buffers after %d of %d", i, n);
1207 goto cleanup;
1208 }
1209 slot[i].buf_idx = index;
1210 slot[i].len = p->_objsize;
1211 slot[i].flags = 0;
1212 slot[i].ptr = 0;
1213 }
1214
1215 nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
1216 return (0);
1217
1218 cleanup:
1219 while (i > 0) {
1220 i--;
1221 netmap_obj_free(p, slot[i].buf_idx);
1222 }
1223 bzero(slot, n * sizeof(slot[0]));
1224 return (ENOMEM);
1225 }
1226
1227 static void
netmap_mem_set_ring(struct netmap_mem_d * nmd,struct netmap_slot * slot,u_int n,uint32_t index)1228 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
1229 {
1230 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1231 u_int i;
1232
1233 for (i = 0; i < n; i++) {
1234 slot[i].buf_idx = index;
1235 slot[i].len = p->_objsize;
1236 slot[i].flags = 0;
1237 }
1238 }
1239
1240
1241 static void
netmap_free_buf(struct netmap_mem_d * nmd,uint32_t i)1242 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
1243 {
1244 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1245
1246 if (i < 2 || i >= p->objtotal) {
1247 nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
1248 return;
1249 }
1250 netmap_obj_free(p, i);
1251 }
1252
1253
1254 static void
netmap_free_bufs(struct netmap_mem_d * nmd,struct netmap_slot * slot,u_int n)1255 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1256 {
1257 u_int i;
1258
1259 for (i = 0; i < n; i++) {
1260 if (slot[i].buf_idx > 1)
1261 netmap_free_buf(nmd, slot[i].buf_idx);
1262 }
1263 nm_prdis("%s: released some buffers, available: %u",
1264 p->name, p->objfree);
1265 }
1266
1267 static void
netmap_reset_obj_allocator(struct netmap_obj_pool * p)1268 netmap_reset_obj_allocator(struct netmap_obj_pool *p)
1269 {
1270
1271 if (p == NULL)
1272 return;
1273 if (p->bitmap)
1274 nm_os_free(p->bitmap);
1275 p->bitmap = NULL;
1276 if (p->invalid_bitmap)
1277 nm_os_free(p->invalid_bitmap);
1278 p->invalid_bitmap = NULL;
1279 if (!p->alloc_done) {
1280 /* allocation was done by somebody else.
1281 * Let them clean up after themselves.
1282 */
1283 return;
1284 }
1285 if (p->lut) {
1286 u_int i;
1287
1288 /*
1289 * Free each cluster allocated in
1290 * netmap_finalize_obj_allocator(). The cluster start
1291 * addresses are stored at multiples of p->_clusterentries
1292 * in the lut.
1293 */
1294 for (i = 0; i < p->objtotal; i += p->_clustentries) {
1295 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
1296 }
1297 nm_free_lut(p->lut, p->objtotal);
1298 }
1299 p->lut = NULL;
1300 p->objtotal = 0;
1301 p->memtotal = 0;
1302 p->numclusters = 0;
1303 p->objfree = 0;
1304 p->alloc_done = 0;
1305 }
1306
1307 /*
1308 * Free all resources related to an allocator.
1309 */
1310 static void
netmap_destroy_obj_allocator(struct netmap_obj_pool * p)1311 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
1312 {
1313 if (p == NULL)
1314 return;
1315 netmap_reset_obj_allocator(p);
1316 }
1317
1318 /*
1319 * We receive a request for objtotal objects, of size objsize each.
1320 * Internally we may round up both numbers, as we allocate objects
1321 * in small clusters multiple of the page size.
1322 * We need to keep track of objtotal and clustentries,
1323 * as they are needed when freeing memory.
1324 *
1325 * XXX note -- userspace needs the buffers to be contiguous,
1326 * so we cannot afford gaps at the end of a cluster.
1327 */
1328
1329
1330 /* call with NMA_LOCK held */
1331 static int
netmap_config_obj_allocator(struct netmap_obj_pool * p,u_int objtotal,u_int objsize)1332 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
1333 {
1334 int i;
1335 u_int clustsize; /* the cluster size, multiple of page size */
1336 u_int clustentries; /* how many objects per entry */
1337
1338 /* we store the current request, so we can
1339 * detect configuration changes later */
1340 p->r_objtotal = objtotal;
1341 p->r_objsize = objsize;
1342
1343 #define MAX_CLUSTSIZE (1<<22) // 4 MB
1344 #define LINE_ROUND NM_BUF_ALIGN // 64
1345 if (objsize >= MAX_CLUSTSIZE) {
1346 /* we could do it but there is no point */
1347 nm_prerr("unsupported allocation for %d bytes", objsize);
1348 return EINVAL;
1349 }
1350 /* make sure objsize is a multiple of LINE_ROUND */
1351 i = (objsize & (LINE_ROUND - 1));
1352 if (i) {
1353 nm_prinf("aligning object by %d bytes", LINE_ROUND - i);
1354 objsize += LINE_ROUND - i;
1355 }
1356 if (objsize < p->objminsize || objsize > p->objmaxsize) {
1357 nm_prerr("requested objsize %d out of range [%d, %d]",
1358 objsize, p->objminsize, p->objmaxsize);
1359 return EINVAL;
1360 }
1361 if (objtotal < p->nummin || objtotal > p->nummax) {
1362 nm_prerr("requested objtotal %d out of range [%d, %d]",
1363 objtotal, p->nummin, p->nummax);
1364 return EINVAL;
1365 }
1366 /*
1367 * Compute number of objects using a brute-force approach:
1368 * given a max cluster size,
1369 * we try to fill it with objects keeping track of the
1370 * wasted space to the next page boundary.
1371 */
1372 for (clustentries = 0, i = 1;; i++) {
1373 u_int delta, used = i * objsize;
1374 if (used > MAX_CLUSTSIZE)
1375 break;
1376 delta = used % PAGE_SIZE;
1377 if (delta == 0) { // exact solution
1378 clustentries = i;
1379 break;
1380 }
1381 }
1382 /* exact solution not found */
1383 if (clustentries == 0) {
1384 nm_prerr("unsupported allocation for %d bytes", objsize);
1385 return EINVAL;
1386 }
1387 /* compute clustsize */
1388 clustsize = clustentries * objsize;
1389 if (netmap_debug & NM_DEBUG_MEM)
1390 nm_prinf("objsize %d clustsize %d objects %d",
1391 objsize, clustsize, clustentries);
1392
1393 /*
1394 * The number of clusters is n = ceil(objtotal/clustentries)
1395 * objtotal' = n * clustentries
1396 */
1397 p->_clustentries = clustentries;
1398 p->_clustsize = clustsize;
1399 p->_numclusters = (objtotal + clustentries - 1) / clustentries;
1400
1401 /* actual values (may be larger than requested) */
1402 p->_objsize = objsize;
1403 p->_objtotal = p->_numclusters * clustentries;
1404
1405 return 0;
1406 }
1407
1408 /* call with NMA_LOCK held */
1409 static int
netmap_finalize_obj_allocator(struct netmap_mem_d * nmd,struct netmap_obj_pool * p)1410 netmap_finalize_obj_allocator(struct netmap_mem_d *nmd, struct netmap_obj_pool *p)
1411 {
1412 int i; /* must be signed */
1413 size_t n;
1414
1415 if (p->lut) {
1416 /* if the lut is already there we assume that also all the
1417 * clusters have already been allocated, possibly by somebody
1418 * else (e.g., extmem). In the latter case, the alloc_done flag
1419 * will remain at zero, so that we will not attempt to
1420 * deallocate the clusters by ourselves in
1421 * netmap_reset_obj_allocator.
1422 */
1423 return 0;
1424 }
1425
1426 /* optimistically assume we have enough memory */
1427 p->numclusters = p->_numclusters;
1428 p->objtotal = p->_objtotal;
1429 p->alloc_done = 1;
1430
1431 p->lut = nm_alloc_lut(p->objtotal);
1432 if (p->lut == NULL) {
1433 nm_prerr("Unable to create lookup table for '%s'", p->name);
1434 goto clean;
1435 }
1436
1437 /*
1438 * Allocate clusters, init pointers
1439 */
1440
1441 n = p->_clustsize;
1442 for (i = 0; i < (int)p->objtotal;) {
1443 int lim = i + p->_clustentries;
1444 char *clust;
1445
1446 /*
1447 * XXX Note, we only need contigmalloc() for buffers attached
1448 * to native interfaces. In all other cases (nifp, netmap rings
1449 * and even buffers for VALE ports or emulated interfaces) we
1450 * can live with standard malloc, because the hardware will not
1451 * access the pages directly.
1452 */
1453 if (nmd->nm_numa_domain == -1) {
1454 clust = contigmalloc(n, M_NETMAP,
1455 M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0);
1456 } else {
1457 struct domainset *ds;
1458
1459 ds = DOMAINSET_PREF(nmd->nm_numa_domain);
1460 clust = contigmalloc_domainset(n, M_NETMAP,
1461 ds, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0);
1462 }
1463 if (clust == NULL) {
1464 /*
1465 * If we get here, there is a severe memory shortage,
1466 * so halve the allocated memory to reclaim some.
1467 */
1468 nm_prerr("Unable to create cluster at %d for '%s' allocator",
1469 i, p->name);
1470 if (i < 2) /* nothing to halve */
1471 goto out;
1472 lim = i / 2;
1473 for (i--; i >= lim; i--) {
1474 if (i % p->_clustentries == 0 && p->lut[i].vaddr)
1475 contigfree(p->lut[i].vaddr,
1476 n, M_NETMAP);
1477 p->lut[i].vaddr = NULL;
1478 }
1479 out:
1480 p->objtotal = i;
1481 /* we may have stopped in the middle of a cluster */
1482 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
1483 break;
1484 }
1485 /*
1486 * Set lut state for all buffers in the current cluster.
1487 *
1488 * [i, lim) is the set of buffer indexes that cover the
1489 * current cluster.
1490 *
1491 * 'clust' is really the address of the current buffer in
1492 * the current cluster as we index through it with a stride
1493 * of p->_objsize.
1494 */
1495 for (; i < lim; i++, clust += p->_objsize) {
1496 p->lut[i].vaddr = clust;
1497 #if !defined(linux) && !defined(_WIN32)
1498 p->lut[i].paddr = vtophys(clust);
1499 #endif
1500 }
1501 }
1502 p->memtotal = (size_t)p->numclusters * (size_t)p->_clustsize;
1503 if (netmap_verbose)
1504 nm_prinf("Pre-allocated %d clusters (%d/%zuKB) for '%s'",
1505 p->numclusters, p->_clustsize >> 10,
1506 p->memtotal >> 10, p->name);
1507
1508 return 0;
1509
1510 clean:
1511 netmap_reset_obj_allocator(p);
1512 return ENOMEM;
1513 }
1514
1515 /* call with lock held */
1516 static int
netmap_mem_params_changed(struct netmap_obj_params * p)1517 netmap_mem_params_changed(struct netmap_obj_params* p)
1518 {
1519 int i, rv = 0;
1520
1521 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1522 if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) {
1523 p[i].last_size = p[i].size;
1524 p[i].last_num = p[i].num;
1525 rv = 1;
1526 }
1527 }
1528 return rv;
1529 }
1530
1531 static void
netmap_mem_reset_all(struct netmap_mem_d * nmd)1532 netmap_mem_reset_all(struct netmap_mem_d *nmd)
1533 {
1534 int i;
1535
1536 if (netmap_debug & NM_DEBUG_MEM)
1537 nm_prinf("resetting %p", nmd);
1538 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1539 netmap_reset_obj_allocator(&nmd->pools[i]);
1540 }
1541 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1542 }
1543
1544 static int
netmap_mem_unmap(struct netmap_obj_pool * p,struct netmap_adapter * na)1545 netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
1546 {
1547 int i, lim = p->objtotal;
1548 struct netmap_lut *lut;
1549 if (na == NULL || na->pdev == NULL)
1550 return 0;
1551
1552 lut = &na->na_lut;
1553
1554
1555
1556 #if defined(__FreeBSD__)
1557 /* On FreeBSD mapping and unmapping is performed by the txsync
1558 * and rxsync routine, packet by packet. */
1559 (void)i;
1560 (void)lim;
1561 (void)lut;
1562 #elif defined(_WIN32)
1563 (void)i;
1564 (void)lim;
1565 (void)lut;
1566 nm_prerr("unsupported on Windows");
1567 #else /* linux */
1568 nm_prdis("unmapping and freeing plut for %s", na->name);
1569 if (lut->plut == NULL || na->pdev == NULL)
1570 return 0;
1571 for (i = 0; i < lim; i += p->_clustentries) {
1572 if (lut->plut[i].paddr)
1573 netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize);
1574 }
1575 nm_free_plut(lut->plut);
1576 lut->plut = NULL;
1577 #endif /* linux */
1578
1579 return 0;
1580 }
1581
1582 static int
netmap_mem_map(struct netmap_obj_pool * p,struct netmap_adapter * na)1583 netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
1584 {
1585 int error = 0;
1586 int i, lim = p->objtotal;
1587 struct netmap_lut *lut = &na->na_lut;
1588
1589 if (na->pdev == NULL)
1590 return 0;
1591
1592 #if defined(__FreeBSD__)
1593 /* On FreeBSD mapping and unmapping is performed by the txsync
1594 * and rxsync routine, packet by packet. */
1595 (void)i;
1596 (void)lim;
1597 (void)lut;
1598 #elif defined(_WIN32)
1599 (void)i;
1600 (void)lim;
1601 (void)lut;
1602 nm_prerr("unsupported on Windows");
1603 #else /* linux */
1604
1605 if (lut->plut != NULL) {
1606 nm_prdis("plut already allocated for %s", na->name);
1607 return 0;
1608 }
1609
1610 nm_prdis("allocating physical lut for %s", na->name);
1611 lut->plut = nm_alloc_plut(lim);
1612 if (lut->plut == NULL) {
1613 nm_prerr("Failed to allocate physical lut for %s", na->name);
1614 return ENOMEM;
1615 }
1616
1617 for (i = 0; i < lim; i += p->_clustentries) {
1618 lut->plut[i].paddr = 0;
1619 }
1620
1621 for (i = 0; i < lim; i += p->_clustentries) {
1622 int j;
1623
1624 if (p->lut[i].vaddr == NULL)
1625 continue;
1626
1627 error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr,
1628 p->lut[i].vaddr, p->_clustsize);
1629 if (error) {
1630 nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name);
1631 break;
1632 }
1633
1634 for (j = 1; j < p->_clustentries; j++) {
1635 lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize;
1636 }
1637 }
1638
1639 if (error)
1640 netmap_mem_unmap(p, na);
1641
1642 #endif /* linux */
1643
1644 return error;
1645 }
1646
1647 static int
netmap_mem_finalize_all(struct netmap_mem_d * nmd)1648 netmap_mem_finalize_all(struct netmap_mem_d *nmd)
1649 {
1650 int i;
1651 if (nmd->flags & NETMAP_MEM_FINALIZED)
1652 return 0;
1653 nmd->lasterr = 0;
1654 nmd->nm_totalsize = 0;
1655 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1656 nmd->lasterr = netmap_finalize_obj_allocator(nmd, &nmd->pools[i]);
1657 if (nmd->lasterr)
1658 goto error;
1659 nmd->nm_totalsize += nmd->pools[i].memtotal;
1660 }
1661 nmd->nm_totalsize = (nmd->nm_totalsize + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
1662 nmd->lasterr = netmap_mem_init_bitmaps(nmd);
1663 if (nmd->lasterr)
1664 goto error;
1665
1666 nmd->flags |= NETMAP_MEM_FINALIZED;
1667
1668 if (netmap_verbose)
1669 nm_prinf("interfaces %zd KB, rings %zd KB, buffers %zd MB",
1670 nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1671 nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1672 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1673
1674 if (netmap_verbose)
1675 nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1676
1677
1678 return 0;
1679 error:
1680 netmap_mem_reset_all(nmd);
1681 return nmd->lasterr;
1682 }
1683
1684 /*
1685 * allocator for private memory
1686 */
1687 static void *
_netmap_mem_private_new(size_t size,struct netmap_obj_params * p,int grp_id,const struct netmap_mem_ops * ops,uint64_t memtotal,int * perr)1688 _netmap_mem_private_new(size_t size, struct netmap_obj_params *p, int grp_id,
1689 const struct netmap_mem_ops *ops, uint64_t memtotal, int *perr)
1690 {
1691 struct netmap_mem_d *d = NULL;
1692 int i, err = 0;
1693 int checksz = 0;
1694
1695 /* if memtotal is !=0 we check that the request fits the available
1696 * memory. Moreover, any surprlus memory is assigned to buffers.
1697 */
1698 checksz = (memtotal > 0);
1699
1700 d = nm_os_malloc(size);
1701 if (d == NULL) {
1702 err = ENOMEM;
1703 goto error;
1704 }
1705
1706 *d = nm_blueprint;
1707 d->ops = ops;
1708
1709 err = nm_mem_assign_id(d, grp_id);
1710 if (err)
1711 goto error_free;
1712 snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id);
1713
1714 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1715 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1716 nm_blueprint.pools[i].name,
1717 d->name);
1718 if (checksz) {
1719 uint64_t poolsz = (uint64_t)p[i].num * p[i].size;
1720 if (memtotal < poolsz) {
1721 nm_prerr("%s: request too large", d->pools[i].name);
1722 err = ENOMEM;
1723 goto error_rel_id;
1724 }
1725 memtotal -= poolsz;
1726 }
1727 d->params[i].num = p[i].num;
1728 d->params[i].size = p[i].size;
1729 }
1730 if (checksz && memtotal > 0) {
1731 uint64_t sz = d->params[NETMAP_BUF_POOL].size;
1732 uint64_t n = (memtotal + sz - 1) / sz;
1733
1734 if (n) {
1735 if (netmap_verbose) {
1736 nm_prinf("%s: adding %llu more buffers",
1737 d->pools[NETMAP_BUF_POOL].name,
1738 (unsigned long long)n);
1739 }
1740 d->params[NETMAP_BUF_POOL].num += n;
1741 }
1742 }
1743
1744 NMA_LOCK_INIT(d);
1745
1746 err = netmap_mem_config(d);
1747 if (err)
1748 goto error_destroy_lock;
1749
1750 d->flags &= ~NETMAP_MEM_FINALIZED;
1751
1752 return d;
1753
1754 error_destroy_lock:
1755 NMA_LOCK_DESTROY(d);
1756 error_rel_id:
1757 nm_mem_release_id(d);
1758 error_free:
1759 nm_os_free(d);
1760 error:
1761 if (perr)
1762 *perr = err;
1763 return NULL;
1764 }
1765
1766 struct netmap_mem_d *
netmap_mem_private_new(u_int txr,u_int txd,u_int rxr,u_int rxd,u_int extra_bufs,u_int npipes,int * perr)1767 netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd,
1768 u_int extra_bufs, u_int npipes, int *perr)
1769 {
1770 struct netmap_mem_d *d = NULL;
1771 struct netmap_obj_params p[NETMAP_POOLS_NR];
1772 int i;
1773 u_int v, maxd;
1774 /* account for the fake host rings */
1775 txr++;
1776 rxr++;
1777
1778 /* copy the min values */
1779 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1780 p[i] = netmap_min_priv_params[i];
1781 }
1782
1783 /* possibly increase them to fit user request */
1784 v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
1785 if (p[NETMAP_IF_POOL].size < v)
1786 p[NETMAP_IF_POOL].size = v;
1787 v = 2 + 4 * npipes;
1788 if (p[NETMAP_IF_POOL].num < v)
1789 p[NETMAP_IF_POOL].num = v;
1790 maxd = (txd > rxd) ? txd : rxd;
1791 v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
1792 if (p[NETMAP_RING_POOL].size < v)
1793 p[NETMAP_RING_POOL].size = v;
1794 /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
1795 * and two rx rings (again, 1 normal and 1 fake host)
1796 */
1797 v = txr + rxr + 8 * npipes;
1798 if (p[NETMAP_RING_POOL].num < v)
1799 p[NETMAP_RING_POOL].num = v;
1800 /* for each pipe we only need the buffers for the 4 "real" rings.
1801 * On the other end, the pipe ring dimension may be different from
1802 * the parent port ring dimension. As a compromise, we allocate twice the
1803 * space actually needed if the pipe rings were the same size as the parent rings
1804 */
1805 v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1806 /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1807 if (p[NETMAP_BUF_POOL].num < v)
1808 p[NETMAP_BUF_POOL].num = v;
1809
1810 if (netmap_verbose)
1811 nm_prinf("req if %d*%d ring %d*%d buf %d*%d",
1812 p[NETMAP_IF_POOL].num,
1813 p[NETMAP_IF_POOL].size,
1814 p[NETMAP_RING_POOL].num,
1815 p[NETMAP_RING_POOL].size,
1816 p[NETMAP_BUF_POOL].num,
1817 p[NETMAP_BUF_POOL].size);
1818
1819 d = _netmap_mem_private_new(sizeof(*d), p, -1, &netmap_mem_global_ops, 0, perr);
1820
1821 return d;
1822 }
1823
1824 /* Reference IOMMU and NUMA local allocator - find existing or create new,
1825 * for non-hw adapters, fall back to global allocator.
1826 */
1827 struct netmap_mem_d *
netmap_mem_get_allocator(struct netmap_adapter * na)1828 netmap_mem_get_allocator(struct netmap_adapter *na)
1829 {
1830 int i, domain, err, grp_id;
1831 struct netmap_mem_d *nmd;
1832
1833 if (na == NULL || na->pdev == NULL)
1834 return netmap_mem_get(&nm_mem);
1835
1836 domain = nm_numa_domain(na->pdev);
1837 grp_id = nm_iommu_group_id(na->pdev);
1838
1839 NM_MTX_LOCK(nm_mem_list_lock);
1840 nmd = netmap_last_mem_d;
1841 do {
1842 if (!(nmd->flags & NETMAP_MEM_HIDDEN) &&
1843 nmd->nm_grp == grp_id && nmd->nm_numa_domain == domain) {
1844 nmd->refcount++;
1845 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
1846 NM_MTX_UNLOCK(nm_mem_list_lock);
1847 return nmd;
1848 }
1849 nmd = nmd->next;
1850 } while (nmd != netmap_last_mem_d);
1851
1852 nmd = nm_os_malloc(sizeof(*nmd));
1853 if (nmd == NULL)
1854 goto error;
1855
1856 *nmd = nm_mem_blueprint;
1857
1858 err = nm_mem_assign_id_locked(nmd, grp_id, domain);
1859 if (err)
1860 goto error_free;
1861
1862 snprintf(nmd->name, sizeof(nmd->name), "%d", nmd->nm_id);
1863
1864 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1865 snprintf(nmd->pools[i].name, NETMAP_POOL_MAX_NAMSZ, "%s-%s",
1866 nm_mem_blueprint.pools[i].name, nmd->name);
1867 }
1868
1869 NMA_LOCK_INIT(nmd);
1870
1871 NM_MTX_UNLOCK(nm_mem_list_lock);
1872 return nmd;
1873
1874 error_free:
1875 nm_os_free(nmd);
1876 error:
1877 NM_MTX_UNLOCK(nm_mem_list_lock);
1878 return NULL;
1879 }
1880
1881 /* call with lock held */
1882 static int
netmap_mem2_config(struct netmap_mem_d * nmd)1883 netmap_mem2_config(struct netmap_mem_d *nmd)
1884 {
1885 int i;
1886
1887 if (!netmap_mem_params_changed(nmd->params))
1888 goto out;
1889
1890 nm_prdis("reconfiguring");
1891
1892 if (nmd->flags & NETMAP_MEM_FINALIZED) {
1893 /* reset previous allocation */
1894 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1895 netmap_reset_obj_allocator(&nmd->pools[i]);
1896 }
1897 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1898 }
1899
1900 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1901 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1902 nmd->params[i].num, nmd->params[i].size);
1903 if (nmd->lasterr)
1904 goto out;
1905 }
1906
1907 out:
1908
1909 return nmd->lasterr;
1910 }
1911
1912 static int
netmap_mem2_finalize(struct netmap_mem_d * nmd,struct netmap_adapter * na)1913 netmap_mem2_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1914 {
1915 if (nmd->flags & NETMAP_MEM_FINALIZED)
1916 goto out;
1917
1918 if (netmap_mem_finalize_all(nmd))
1919 goto out;
1920
1921 nmd->lasterr = 0;
1922
1923 out:
1924 return nmd->lasterr;
1925 }
1926
1927 static void
netmap_mem2_delete(struct netmap_mem_d * nmd)1928 netmap_mem2_delete(struct netmap_mem_d *nmd)
1929 {
1930 int i;
1931
1932 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1933 netmap_destroy_obj_allocator(&nmd->pools[i]);
1934 }
1935
1936 NMA_LOCK_DESTROY(nmd);
1937 if (nmd != &nm_mem)
1938 nm_os_free(nmd);
1939 }
1940
1941 #ifdef WITH_EXTMEM
1942 /* doubly linekd list of all existing external allocators */
1943 static struct netmap_mem_ext *netmap_mem_ext_list = NULL;
1944 NM_MTX_T nm_mem_ext_list_lock;
1945 #endif /* WITH_EXTMEM */
1946
1947 int
netmap_mem_init(void)1948 netmap_mem_init(void)
1949 {
1950 nm_mem_blueprint = nm_mem;
1951 NM_MTX_INIT(nm_mem_list_lock);
1952 NMA_LOCK_INIT(&nm_mem);
1953 netmap_mem_get(&nm_mem);
1954 #ifdef WITH_EXTMEM
1955 NM_MTX_INIT(nm_mem_ext_list_lock);
1956 #endif /* WITH_EXTMEM */
1957 return (0);
1958 }
1959
1960 void
netmap_mem_fini(void)1961 netmap_mem_fini(void)
1962 {
1963 netmap_mem_put(&nm_mem);
1964 }
1965
1966 static int
netmap_mem_ring_needed(struct netmap_kring * kring)1967 netmap_mem_ring_needed(struct netmap_kring *kring)
1968 {
1969 return kring->ring == NULL &&
1970 (kring->users > 0 ||
1971 (kring->nr_kflags & NKR_NEEDRING));
1972 }
1973
1974 static int
netmap_mem_ring_todelete(struct netmap_kring * kring)1975 netmap_mem_ring_todelete(struct netmap_kring *kring)
1976 {
1977 return kring->ring != NULL &&
1978 kring->users == 0 &&
1979 !(kring->nr_kflags & NKR_NEEDRING);
1980 }
1981
1982
1983 /* call with NMA_LOCK held *
1984 *
1985 * Allocate netmap rings and buffers for this card
1986 * The rings are contiguous, but have variable size.
1987 * The kring array must follow the layout described
1988 * in netmap_krings_create().
1989 */
1990 static int
netmap_mem2_rings_create(struct netmap_mem_d * nmd,struct netmap_adapter * na)1991 netmap_mem2_rings_create(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1992 {
1993 enum txrx t;
1994
1995 for_rx_tx(t) {
1996 u_int i;
1997
1998 for (i = 0; i < netmap_all_rings(na, t); i++) {
1999 struct netmap_kring *kring = NMR(na, t)[i];
2000 struct netmap_ring *ring = kring->ring;
2001 u_int len, ndesc;
2002
2003 if (!netmap_mem_ring_needed(kring)) {
2004 /* unneeded, or already created by somebody else */
2005 if (netmap_debug & NM_DEBUG_MEM)
2006 nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)",
2007 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
2008 continue;
2009 }
2010 if (netmap_debug & NM_DEBUG_MEM)
2011 nm_prinf("creating %s", kring->name);
2012 ndesc = kring->nkr_num_slots;
2013 len = sizeof(struct netmap_ring) +
2014 ndesc * sizeof(struct netmap_slot);
2015 ring = netmap_ring_malloc(nmd, len);
2016 if (ring == NULL) {
2017 nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t));
2018 goto cleanup;
2019 }
2020 nm_prdis("txring at %p", ring);
2021 kring->ring = ring;
2022 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
2023 *(int64_t *)(uintptr_t)&ring->buf_ofs =
2024 (nmd->pools[NETMAP_IF_POOL].memtotal +
2025 nmd->pools[NETMAP_RING_POOL].memtotal) -
2026 netmap_ring_offset(nmd, ring);
2027
2028 /* copy values from kring */
2029 ring->head = kring->rhead;
2030 ring->cur = kring->rcur;
2031 ring->tail = kring->rtail;
2032 *(uint32_t *)(uintptr_t)&ring->nr_buf_size =
2033 netmap_mem_bufsize(nmd);
2034 nm_prdis("%s h %d c %d t %d", kring->name,
2035 ring->head, ring->cur, ring->tail);
2036 nm_prdis("initializing slots for %s_ring", nm_txrx2str(t));
2037 if (!(kring->nr_kflags & NKR_FAKERING)) {
2038 /* this is a real ring */
2039 if (netmap_debug & NM_DEBUG_MEM)
2040 nm_prinf("allocating buffers for %s", kring->name);
2041 if (netmap_new_bufs(nmd, ring->slot, ndesc)) {
2042 nm_prerr("Cannot allocate buffers for %s_ring", nm_txrx2str(t));
2043 goto cleanup;
2044 }
2045 } else {
2046 /* this is a fake ring, set all indices to 0 */
2047 if (netmap_debug & NM_DEBUG_MEM)
2048 nm_prinf("NOT allocating buffers for %s", kring->name);
2049 netmap_mem_set_ring(nmd, ring->slot, ndesc, 0);
2050 }
2051 /* ring info */
2052 *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id;
2053 *(uint16_t *)(uintptr_t)&ring->dir = kring->tx;
2054 }
2055 }
2056
2057 return 0;
2058
2059 cleanup:
2060 /* we cannot actually cleanup here, since we don't own kring->users
2061 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement
2062 * the first or zero-out the second, then call netmap_free_rings()
2063 * to do the cleanup
2064 */
2065
2066 return ENOMEM;
2067 }
2068
2069 static void
netmap_mem2_rings_delete(struct netmap_mem_d * nmd,struct netmap_adapter * na)2070 netmap_mem2_rings_delete(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2071 {
2072 enum txrx t;
2073
2074 for_rx_tx(t) {
2075 u_int i;
2076 for (i = 0; i < netmap_all_rings(na, t); i++) {
2077 struct netmap_kring *kring = NMR(na, t)[i];
2078 struct netmap_ring *ring = kring->ring;
2079
2080 if (!netmap_mem_ring_todelete(kring)) {
2081 if (netmap_debug & NM_DEBUG_MEM)
2082 nm_prinf("NOT deleting ring %s (ring %p, users %d neekring %d)",
2083 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
2084 continue;
2085 }
2086 if (netmap_debug & NM_DEBUG_MEM)
2087 nm_prinf("deleting ring %s", kring->name);
2088 if (!(kring->nr_kflags & NKR_FAKERING)) {
2089 nm_prdis("freeing bufs for %s", kring->name);
2090 netmap_free_bufs(nmd, ring->slot, kring->nkr_num_slots);
2091 } else {
2092 nm_prdis("NOT freeing bufs for %s", kring->name);
2093 }
2094 netmap_ring_free(nmd, ring);
2095 kring->ring = NULL;
2096 }
2097 }
2098 }
2099
2100 /* call with NMA_LOCK held */
2101 /*
2102 * Allocate the per-fd structure netmap_if.
2103 *
2104 * We assume that the configuration stored in na
2105 * (number of tx/rx rings and descs) does not change while
2106 * the interface is in netmap mode.
2107 */
2108 static struct netmap_if *
netmap_mem2_if_new(struct netmap_mem_d * nmd,struct netmap_adapter * na,struct netmap_priv_d * priv)2109 netmap_mem2_if_new(struct netmap_mem_d *nmd,
2110 struct netmap_adapter *na, struct netmap_priv_d *priv)
2111 {
2112 struct netmap_if *nifp;
2113 ssize_t base; /* handy for relative offsets between rings and nifp */
2114 u_int i, len, n[NR_TXRX], ntot;
2115 enum txrx t;
2116
2117 ntot = 0;
2118 for_rx_tx(t) {
2119 /* account for the (eventually fake) host rings */
2120 n[t] = netmap_all_rings(na, t);
2121 ntot += n[t];
2122 }
2123 /*
2124 * the descriptor is followed inline by an array of offsets
2125 * to the tx and rx rings in the shared memory region.
2126 */
2127
2128 len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t));
2129 nifp = netmap_if_malloc(nmd, len);
2130 if (nifp == NULL) {
2131 return NULL;
2132 }
2133
2134 /* initialize base fields -- override const */
2135 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
2136 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
2137 *(u_int *)(uintptr_t)&nifp->ni_host_tx_rings =
2138 (na->num_host_tx_rings ? na->num_host_tx_rings : 1);
2139 *(u_int *)(uintptr_t)&nifp->ni_host_rx_rings =
2140 (na->num_host_rx_rings ? na->num_host_rx_rings : 1);
2141 strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name));
2142
2143 /*
2144 * fill the slots for the rx and tx rings. They contain the offset
2145 * between the ring and nifp, so the information is usable in
2146 * userspace to reach the ring from the nifp.
2147 */
2148 base = netmap_if_offset(nmd, nifp);
2149 for (i = 0; i < n[NR_TX]; i++) {
2150 /* XXX instead of ofs == 0 maybe use the offset of an error
2151 * ring, like we do for buffers? */
2152 ssize_t ofs = 0;
2153
2154 if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX]
2155 && i < priv->np_qlast[NR_TX]) {
2156 ofs = netmap_ring_offset(nmd,
2157 na->tx_rings[i]->ring) - base;
2158 }
2159 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs;
2160 }
2161 for (i = 0; i < n[NR_RX]; i++) {
2162 /* XXX instead of ofs == 0 maybe use the offset of an error
2163 * ring, like we do for buffers? */
2164 ssize_t ofs = 0;
2165
2166 if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX]
2167 && i < priv->np_qlast[NR_RX]) {
2168 ofs = netmap_ring_offset(nmd,
2169 na->rx_rings[i]->ring) - base;
2170 }
2171 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs;
2172 }
2173
2174 return (nifp);
2175 }
2176
2177 static void
netmap_mem2_if_delete(struct netmap_mem_d * nmd,struct netmap_adapter * na,struct netmap_if * nifp)2178 netmap_mem2_if_delete(struct netmap_mem_d *nmd,
2179 struct netmap_adapter *na, struct netmap_if *nifp)
2180 {
2181 if (nifp == NULL)
2182 /* nothing to do */
2183 return;
2184 if (nifp->ni_bufs_head)
2185 netmap_extra_free(na, nifp->ni_bufs_head);
2186 netmap_if_free(nmd, nifp);
2187 }
2188
2189 static void
netmap_mem2_deref(struct netmap_mem_d * nmd,struct netmap_adapter * na)2190 netmap_mem2_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2191 {
2192
2193 if (netmap_debug & NM_DEBUG_MEM)
2194 nm_prinf("active = %d", nmd->active);
2195
2196 }
2197
2198 const struct netmap_mem_ops netmap_mem_global_ops = {
2199 .nmd_get_lut = netmap_mem2_get_lut,
2200 .nmd_get_info = netmap_mem2_get_info,
2201 .nmd_ofstophys = netmap_mem2_ofstophys,
2202 .nmd_config = netmap_mem2_config,
2203 .nmd_finalize = netmap_mem2_finalize,
2204 .nmd_deref = netmap_mem2_deref,
2205 .nmd_delete = netmap_mem2_delete,
2206 .nmd_if_offset = netmap_mem2_if_offset,
2207 .nmd_if_new = netmap_mem2_if_new,
2208 .nmd_if_delete = netmap_mem2_if_delete,
2209 .nmd_rings_create = netmap_mem2_rings_create,
2210 .nmd_rings_delete = netmap_mem2_rings_delete
2211 };
2212
2213 int
netmap_mem_pools_info_get(struct nmreq_pools_info * req,struct netmap_mem_d * nmd)2214 netmap_mem_pools_info_get(struct nmreq_pools_info *req,
2215 struct netmap_mem_d *nmd)
2216 {
2217 int ret;
2218
2219 ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL,
2220 &req->nr_mem_id);
2221 if (ret) {
2222 return ret;
2223 }
2224
2225 NMA_LOCK(nmd);
2226 req->nr_if_pool_offset = 0;
2227 req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal;
2228 req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize;
2229
2230 req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal;
2231 req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal;
2232 req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize;
2233
2234 req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal +
2235 nmd->pools[NETMAP_RING_POOL].memtotal;
2236 req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
2237 req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
2238 NMA_UNLOCK(nmd);
2239
2240 return 0;
2241 }
2242
2243 #ifdef WITH_EXTMEM
2244 struct netmap_mem_ext {
2245 struct netmap_mem_d up;
2246
2247 struct nm_os_extmem *os;
2248 struct netmap_mem_ext *next, *prev;
2249 };
2250
2251 /* call with nm_mem_list_lock held */
2252 static void
netmap_mem_ext_register(struct netmap_mem_ext * e)2253 netmap_mem_ext_register(struct netmap_mem_ext *e)
2254 {
2255 NM_MTX_LOCK(nm_mem_ext_list_lock);
2256 if (netmap_mem_ext_list)
2257 netmap_mem_ext_list->prev = e;
2258 e->next = netmap_mem_ext_list;
2259 netmap_mem_ext_list = e;
2260 e->prev = NULL;
2261 NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2262 }
2263
2264 /* call with nm_mem_list_lock held */
2265 static void
netmap_mem_ext_unregister(struct netmap_mem_ext * e)2266 netmap_mem_ext_unregister(struct netmap_mem_ext *e)
2267 {
2268 if (e->prev)
2269 e->prev->next = e->next;
2270 else
2271 netmap_mem_ext_list = e->next;
2272 if (e->next)
2273 e->next->prev = e->prev;
2274 e->prev = e->next = NULL;
2275 }
2276
2277 static struct netmap_mem_ext *
netmap_mem_ext_search(struct nm_os_extmem * os)2278 netmap_mem_ext_search(struct nm_os_extmem *os)
2279 {
2280 struct netmap_mem_ext *e;
2281
2282 NM_MTX_LOCK(nm_mem_ext_list_lock);
2283 for (e = netmap_mem_ext_list; e; e = e->next) {
2284 if (nm_os_extmem_isequal(e->os, os)) {
2285 netmap_mem_get(&e->up);
2286 break;
2287 }
2288 }
2289 NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2290 return e;
2291 }
2292
2293
2294 static void
netmap_mem_ext_delete(struct netmap_mem_d * d)2295 netmap_mem_ext_delete(struct netmap_mem_d *d)
2296 {
2297 int i;
2298 struct netmap_mem_ext *e =
2299 (struct netmap_mem_ext *)d;
2300
2301 netmap_mem_ext_unregister(e);
2302
2303 for (i = 0; i < NETMAP_POOLS_NR; i++) {
2304 struct netmap_obj_pool *p = &d->pools[i];
2305
2306 if (p->lut) {
2307 nm_free_lut(p->lut, p->objtotal);
2308 p->lut = NULL;
2309 }
2310 }
2311 if (e->os)
2312 nm_os_extmem_delete(e->os);
2313 netmap_mem2_delete(d);
2314 }
2315
2316 static int
netmap_mem_ext_config(struct netmap_mem_d * nmd)2317 netmap_mem_ext_config(struct netmap_mem_d *nmd)
2318 {
2319 return 0;
2320 }
2321
2322 struct netmap_mem_ops netmap_mem_ext_ops = {
2323 .nmd_get_lut = netmap_mem2_get_lut,
2324 .nmd_get_info = netmap_mem2_get_info,
2325 .nmd_ofstophys = netmap_mem2_ofstophys,
2326 .nmd_config = netmap_mem_ext_config,
2327 .nmd_finalize = netmap_mem2_finalize,
2328 .nmd_deref = netmap_mem2_deref,
2329 .nmd_delete = netmap_mem_ext_delete,
2330 .nmd_if_offset = netmap_mem2_if_offset,
2331 .nmd_if_new = netmap_mem2_if_new,
2332 .nmd_if_delete = netmap_mem2_if_delete,
2333 .nmd_rings_create = netmap_mem2_rings_create,
2334 .nmd_rings_delete = netmap_mem2_rings_delete
2335 };
2336
2337 struct netmap_mem_d *
netmap_mem_ext_create(uint64_t usrptr,struct nmreq_pools_info * pi,int * perror)2338 netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
2339 {
2340 int error = 0;
2341 int i, j;
2342 struct netmap_mem_ext *nme;
2343 char *clust;
2344 size_t off;
2345 struct nm_os_extmem *os = NULL;
2346 int nr_pages;
2347
2348 // XXX sanity checks
2349 if (pi->nr_if_pool_objtotal == 0)
2350 pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num;
2351 if (pi->nr_if_pool_objsize == 0)
2352 pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size;
2353 if (pi->nr_ring_pool_objtotal == 0)
2354 pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num;
2355 if (pi->nr_ring_pool_objsize == 0)
2356 pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size;
2357 if (pi->nr_buf_pool_objtotal == 0)
2358 pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num;
2359 if (pi->nr_buf_pool_objsize == 0)
2360 pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size;
2361 if (netmap_verbose & NM_DEBUG_MEM)
2362 nm_prinf("if %d %d ring %d %d buf %d %d",
2363 pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize,
2364 pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize,
2365 pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize);
2366
2367 os = nm_os_extmem_create(usrptr, pi, &error);
2368 if (os == NULL) {
2369 nm_prerr("os extmem creation failed");
2370 goto out;
2371 }
2372
2373 nme = netmap_mem_ext_search(os);
2374 if (nme) {
2375 nm_os_extmem_delete(os);
2376 return &nme->up;
2377 }
2378 if (netmap_verbose & NM_DEBUG_MEM)
2379 nm_prinf("not found, creating new");
2380
2381 nme = _netmap_mem_private_new(sizeof(*nme),
2382
2383 (struct netmap_obj_params[]){
2384 { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal },
2385 { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal },
2386 { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }},
2387 -1,
2388 &netmap_mem_ext_ops,
2389 pi->nr_memsize,
2390 &error);
2391 if (nme == NULL)
2392 goto out_unmap;
2393
2394 nr_pages = nm_os_extmem_nr_pages(os);
2395
2396 /* from now on pages will be released by nme destructor;
2397 * we let res = 0 to prevent release in out_unmap below
2398 */
2399 nme->os = os;
2400 os = NULL; /* pass ownership */
2401
2402 clust = nm_os_extmem_nextpage(nme->os);
2403 off = 0;
2404 for (i = 0; i < NETMAP_POOLS_NR; i++) {
2405 struct netmap_obj_pool *p = &nme->up.pools[i];
2406 struct netmap_obj_params *o = &nme->up.params[i];
2407
2408 p->_objsize = o->size;
2409 p->_clustsize = o->size;
2410 p->_clustentries = 1;
2411
2412 p->lut = nm_alloc_lut(o->num);
2413 if (p->lut == NULL) {
2414 error = ENOMEM;
2415 goto out_delete;
2416 }
2417
2418 p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t);
2419 p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots);
2420 if (p->invalid_bitmap == NULL) {
2421 error = ENOMEM;
2422 goto out_delete;
2423 }
2424
2425 if (nr_pages == 0) {
2426 p->objtotal = 0;
2427 p->memtotal = 0;
2428 p->objfree = 0;
2429 continue;
2430 }
2431
2432 for (j = 0; j < o->num && nr_pages > 0; j++) {
2433 size_t noff;
2434
2435 p->lut[j].vaddr = clust + off;
2436 #if !defined(linux) && !defined(_WIN32)
2437 p->lut[j].paddr = vtophys(p->lut[j].vaddr);
2438 #endif
2439 nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr);
2440 noff = off + p->_objsize;
2441 if (noff < PAGE_SIZE) {
2442 off = noff;
2443 continue;
2444 }
2445 nm_prdis("too big, recomputing offset...");
2446 while (noff >= PAGE_SIZE) {
2447 char *old_clust = clust;
2448 noff -= PAGE_SIZE;
2449 clust = nm_os_extmem_nextpage(nme->os);
2450 nr_pages--;
2451 nm_prdis("noff %zu page %p nr_pages %d", noff,
2452 page_to_virt(*pages), nr_pages);
2453 if (noff > 0 && !nm_isset(p->invalid_bitmap, j) &&
2454 (nr_pages == 0 ||
2455 old_clust + PAGE_SIZE != clust))
2456 {
2457 /* out of space or non contiguous,
2458 * drop this object
2459 * */
2460 p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U);
2461 nm_prdis("non contiguous at off %zu, drop", noff);
2462 }
2463 if (nr_pages == 0)
2464 break;
2465 }
2466 off = noff;
2467 }
2468 p->objtotal = j;
2469 p->numclusters = p->objtotal;
2470 p->memtotal = j * (size_t)p->_objsize;
2471 nm_prdis("%d memtotal %zu", j, p->memtotal);
2472 }
2473
2474 netmap_mem_ext_register(nme);
2475
2476 return &nme->up;
2477
2478 out_delete:
2479 netmap_mem_put(&nme->up);
2480 out_unmap:
2481 if (os)
2482 nm_os_extmem_delete(os);
2483 out:
2484 if (perror)
2485 *perror = error;
2486 return NULL;
2487
2488 }
2489 #endif /* WITH_EXTMEM */
2490
2491
2492 #ifdef WITH_PTNETMAP
2493 struct mem_pt_if {
2494 struct mem_pt_if *next;
2495 if_t ifp;
2496 unsigned int nifp_offset;
2497 };
2498
2499 /* Netmap allocator for ptnetmap guests. */
2500 struct netmap_mem_ptg {
2501 struct netmap_mem_d up;
2502
2503 vm_paddr_t nm_paddr; /* physical address in the guest */
2504 void *nm_addr; /* virtual address in the guest */
2505 struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */
2506 nm_memid_t host_mem_id; /* allocator identifier in the host */
2507 struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */
2508 struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */
2509 };
2510
2511 /* Link a passthrough interface to a passthrough netmap allocator. */
2512 static int
netmap_mem_pt_guest_ifp_add(struct netmap_mem_d * nmd,if_t ifp,unsigned int nifp_offset)2513 netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, if_t ifp,
2514 unsigned int nifp_offset)
2515 {
2516 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2517 struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif));
2518
2519 if (!ptif) {
2520 return ENOMEM;
2521 }
2522
2523 NMA_LOCK(nmd);
2524
2525 ptif->ifp = ifp;
2526 ptif->nifp_offset = nifp_offset;
2527
2528 if (ptnmd->pt_ifs) {
2529 ptif->next = ptnmd->pt_ifs;
2530 }
2531 ptnmd->pt_ifs = ptif;
2532
2533 NMA_UNLOCK(nmd);
2534
2535 nm_prinf("ifp=%s,nifp_offset=%u",
2536 if_name(ptif->ifp), ptif->nifp_offset);
2537
2538 return 0;
2539 }
2540
2541 /* Called with NMA_LOCK(nmd) held. */
2542 static struct mem_pt_if *
netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d * nmd,if_t ifp)2543 netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, if_t ifp)
2544 {
2545 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2546 struct mem_pt_if *curr;
2547
2548 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2549 if (curr->ifp == ifp) {
2550 return curr;
2551 }
2552 }
2553
2554 return NULL;
2555 }
2556
2557 /* Unlink a passthrough interface from a passthrough netmap allocator. */
2558 int
netmap_mem_pt_guest_ifp_del(struct netmap_mem_d * nmd,if_t ifp)2559 netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, if_t ifp)
2560 {
2561 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2562 struct mem_pt_if *prev = NULL;
2563 struct mem_pt_if *curr;
2564 int ret = -1;
2565
2566 NMA_LOCK(nmd);
2567
2568 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2569 if (curr->ifp == ifp) {
2570 if (prev) {
2571 prev->next = curr->next;
2572 } else {
2573 ptnmd->pt_ifs = curr->next;
2574 }
2575 nm_prinf("removed (ifp=%s,nifp_offset=%u)",
2576 if_name(curr->ifp), curr->nifp_offset);
2577 nm_os_free(curr);
2578 ret = 0;
2579 break;
2580 }
2581 prev = curr;
2582 }
2583
2584 NMA_UNLOCK(nmd);
2585
2586 return ret;
2587 }
2588
2589 static int
netmap_mem_pt_guest_get_lut(struct netmap_mem_d * nmd,struct netmap_lut * lut)2590 netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
2591 {
2592 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2593
2594 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
2595 return EINVAL;
2596 }
2597
2598 *lut = ptnmd->buf_lut;
2599 return 0;
2600 }
2601
2602 static int
netmap_mem_pt_guest_get_info(struct netmap_mem_d * nmd,uint64_t * size,u_int * memflags,uint16_t * id)2603 netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size,
2604 u_int *memflags, uint16_t *id)
2605 {
2606 int error = 0;
2607
2608 error = nmd->ops->nmd_config(nmd);
2609 if (error)
2610 goto out;
2611
2612 if (size)
2613 *size = nmd->nm_totalsize;
2614 if (memflags)
2615 *memflags = nmd->flags;
2616 if (id)
2617 *id = nmd->nm_id;
2618
2619 out:
2620
2621 return error;
2622 }
2623
2624 static vm_paddr_t
netmap_mem_pt_guest_ofstophys(struct netmap_mem_d * nmd,vm_ooffset_t off)2625 netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
2626 {
2627 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2628 vm_paddr_t paddr;
2629 /* if the offset is valid, just return csb->base_addr + off */
2630 paddr = (vm_paddr_t)(ptnmd->nm_paddr + off);
2631 nm_prdis("off %lx padr %lx", off, (unsigned long)paddr);
2632 return paddr;
2633 }
2634
2635 static int
netmap_mem_pt_guest_config(struct netmap_mem_d * nmd)2636 netmap_mem_pt_guest_config(struct netmap_mem_d *nmd)
2637 {
2638 /* nothing to do, we are configured on creation
2639 * and configuration never changes thereafter
2640 */
2641 return 0;
2642 }
2643
2644 static int
netmap_mem_pt_guest_finalize(struct netmap_mem_d * nmd,struct netmap_adapter * na)2645 netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2646 {
2647 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2648 uint64_t mem_size;
2649 uint32_t bufsize;
2650 uint32_t nbuffers;
2651 uint32_t poolofs;
2652 vm_paddr_t paddr;
2653 char *vaddr;
2654 int i;
2655 int error = 0;
2656
2657 if (nmd->flags & NETMAP_MEM_FINALIZED)
2658 goto out;
2659
2660 if (ptnmd->ptn_dev == NULL) {
2661 nm_prerr("ptnetmap memdev not attached");
2662 error = ENOMEM;
2663 goto out;
2664 }
2665 /* Map memory through ptnetmap-memdev BAR. */
2666 error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr,
2667 &ptnmd->nm_addr, &mem_size);
2668 if (error)
2669 goto out;
2670
2671 /* Initialize the lut using the information contained in the
2672 * ptnetmap memory device. */
2673 bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2674 PTNET_MDEV_IO_BUF_POOL_OBJSZ);
2675 nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2676 PTNET_MDEV_IO_BUF_POOL_OBJNUM);
2677
2678 /* allocate the lut */
2679 if (ptnmd->buf_lut.lut == NULL) {
2680 nm_prinf("allocating lut");
2681 ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers);
2682 if (ptnmd->buf_lut.lut == NULL) {
2683 nm_prerr("lut allocation failed");
2684 return ENOMEM;
2685 }
2686 }
2687
2688 /* we have physically contiguous memory mapped through PCI BAR */
2689 poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2690 PTNET_MDEV_IO_BUF_POOL_OFS);
2691 vaddr = (char *)(ptnmd->nm_addr) + poolofs;
2692 paddr = ptnmd->nm_paddr + poolofs;
2693
2694 for (i = 0; i < nbuffers; i++) {
2695 ptnmd->buf_lut.lut[i].vaddr = vaddr;
2696 vaddr += bufsize;
2697 paddr += bufsize;
2698 }
2699
2700 ptnmd->buf_lut.objtotal = nbuffers;
2701 ptnmd->buf_lut.objsize = bufsize;
2702 nmd->nm_totalsize = mem_size;
2703
2704 /* Initialize these fields as are needed by
2705 * netmap_mem_bufsize().
2706 * XXX please improve this, why do we need this
2707 * replication? maybe we nmd->pools[] should no be
2708 * there for the guest allocator? */
2709 nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize;
2710 nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers;
2711
2712 nmd->flags |= NETMAP_MEM_FINALIZED;
2713 out:
2714 return error;
2715 }
2716
2717 static void
netmap_mem_pt_guest_deref(struct netmap_mem_d * nmd,struct netmap_adapter * na)2718 netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2719 {
2720 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2721
2722 if (nmd->active == 1 &&
2723 (nmd->flags & NETMAP_MEM_FINALIZED)) {
2724 nmd->flags &= ~NETMAP_MEM_FINALIZED;
2725 /* unmap ptnetmap-memdev memory */
2726 if (ptnmd->ptn_dev) {
2727 nm_os_pt_memdev_iounmap(ptnmd->ptn_dev);
2728 }
2729 ptnmd->nm_addr = NULL;
2730 ptnmd->nm_paddr = 0;
2731 }
2732 }
2733
2734 static ssize_t
netmap_mem_pt_guest_if_offset(struct netmap_mem_d * nmd,const void * vaddr)2735 netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr)
2736 {
2737 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2738
2739 return (const char *)(vaddr) - (char *)(ptnmd->nm_addr);
2740 }
2741
2742 static void
netmap_mem_pt_guest_delete(struct netmap_mem_d * nmd)2743 netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd)
2744 {
2745 if (nmd == NULL)
2746 return;
2747 if (netmap_verbose)
2748 nm_prinf("deleting %p", nmd);
2749 if (nmd->active > 0)
2750 nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active);
2751 if (netmap_verbose)
2752 nm_prinf("done deleting %p", nmd);
2753 NMA_LOCK_DESTROY(nmd);
2754 nm_os_free(nmd);
2755 }
2756
2757 static struct netmap_if *
netmap_mem_pt_guest_if_new(struct netmap_mem_d * nmd,struct netmap_adapter * na,struct netmap_priv_d * priv)2758 netmap_mem_pt_guest_if_new(struct netmap_mem_d *nmd,
2759 struct netmap_adapter *na, struct netmap_priv_d *priv)
2760 {
2761 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2762 struct mem_pt_if *ptif;
2763 struct netmap_if *nifp = NULL;
2764
2765 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2766 if (ptif == NULL) {
2767 nm_prerr("interface %s is not in passthrough", na->name);
2768 goto out;
2769 }
2770
2771 nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) +
2772 ptif->nifp_offset);
2773 out:
2774 return nifp;
2775 }
2776
2777 static void
netmap_mem_pt_guest_if_delete(struct netmap_mem_d * nmd,struct netmap_adapter * na,struct netmap_if * nifp)2778 netmap_mem_pt_guest_if_delete(struct netmap_mem_d * nmd,
2779 struct netmap_adapter *na, struct netmap_if *nifp)
2780 {
2781 struct mem_pt_if *ptif;
2782
2783 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2784 if (ptif == NULL) {
2785 nm_prerr("interface %s is not in passthrough", na->name);
2786 }
2787 }
2788
2789 static int
netmap_mem_pt_guest_rings_create(struct netmap_mem_d * nmd,struct netmap_adapter * na)2790 netmap_mem_pt_guest_rings_create(struct netmap_mem_d *nmd,
2791 struct netmap_adapter *na)
2792 {
2793 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2794 struct mem_pt_if *ptif;
2795 struct netmap_if *nifp;
2796 int i, error = -1;
2797
2798 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2799 if (ptif == NULL) {
2800 nm_prerr("interface %s is not in passthrough", na->name);
2801 goto out;
2802 }
2803
2804
2805 /* point each kring to the corresponding backend ring */
2806 nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset);
2807 for (i = 0; i < netmap_all_rings(na, NR_TX); i++) {
2808 struct netmap_kring *kring = na->tx_rings[i];
2809 if (kring->ring)
2810 continue;
2811 kring->ring = (struct netmap_ring *)
2812 ((char *)nifp + nifp->ring_ofs[i]);
2813 }
2814 for (i = 0; i < netmap_all_rings(na, NR_RX); i++) {
2815 struct netmap_kring *kring = na->rx_rings[i];
2816 if (kring->ring)
2817 continue;
2818 kring->ring = (struct netmap_ring *)
2819 ((char *)nifp +
2820 nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]);
2821 }
2822
2823 error = 0;
2824 out:
2825 return error;
2826 }
2827
2828 static void
netmap_mem_pt_guest_rings_delete(struct netmap_mem_d * nmd,struct netmap_adapter * na)2829 netmap_mem_pt_guest_rings_delete(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2830 {
2831 #if 0
2832 enum txrx t;
2833
2834 for_rx_tx(t) {
2835 u_int i;
2836 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) {
2837 struct netmap_kring *kring = &NMR(na, t)[i];
2838
2839 kring->ring = NULL;
2840 }
2841 }
2842 #endif
2843 (void)nmd;
2844 (void)na;
2845 }
2846
2847 static struct netmap_mem_ops netmap_mem_pt_guest_ops = {
2848 .nmd_get_lut = netmap_mem_pt_guest_get_lut,
2849 .nmd_get_info = netmap_mem_pt_guest_get_info,
2850 .nmd_ofstophys = netmap_mem_pt_guest_ofstophys,
2851 .nmd_config = netmap_mem_pt_guest_config,
2852 .nmd_finalize = netmap_mem_pt_guest_finalize,
2853 .nmd_deref = netmap_mem_pt_guest_deref,
2854 .nmd_if_offset = netmap_mem_pt_guest_if_offset,
2855 .nmd_delete = netmap_mem_pt_guest_delete,
2856 .nmd_if_new = netmap_mem_pt_guest_if_new,
2857 .nmd_if_delete = netmap_mem_pt_guest_if_delete,
2858 .nmd_rings_create = netmap_mem_pt_guest_rings_create,
2859 .nmd_rings_delete = netmap_mem_pt_guest_rings_delete
2860 };
2861
2862 /* Called with nm_mem_list_lock held. */
2863 static struct netmap_mem_d *
netmap_mem_pt_guest_find_memid(nm_memid_t mem_id)2864 netmap_mem_pt_guest_find_memid(nm_memid_t mem_id)
2865 {
2866 struct netmap_mem_d *mem = NULL;
2867 struct netmap_mem_d *scan = netmap_last_mem_d;
2868
2869 do {
2870 /* find ptnetmap allocator through host ID */
2871 if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref &&
2872 ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) {
2873 mem = scan;
2874 mem->refcount++;
2875 NM_DBG_REFC(mem, __FUNCTION__, __LINE__);
2876 break;
2877 }
2878 scan = scan->next;
2879 } while (scan != netmap_last_mem_d);
2880
2881 return mem;
2882 }
2883
2884 /* Called with nm_mem_list_lock held. */
2885 static struct netmap_mem_d *
netmap_mem_pt_guest_create(nm_memid_t mem_id)2886 netmap_mem_pt_guest_create(nm_memid_t mem_id)
2887 {
2888 struct netmap_mem_ptg *ptnmd;
2889 int err = 0;
2890
2891 ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg));
2892 if (ptnmd == NULL) {
2893 err = ENOMEM;
2894 goto error;
2895 }
2896
2897 ptnmd->up.ops = &netmap_mem_pt_guest_ops;
2898 ptnmd->host_mem_id = mem_id;
2899 ptnmd->pt_ifs = NULL;
2900
2901 /* Assign new id in the guest (We have the lock) */
2902 err = nm_mem_assign_id_locked(&ptnmd->up, -1, -1);
2903 if (err)
2904 goto error;
2905
2906 ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED;
2907 ptnmd->up.flags |= NETMAP_MEM_IO;
2908
2909 NMA_LOCK_INIT(&ptnmd->up);
2910
2911 snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id);
2912
2913
2914 return &ptnmd->up;
2915 error:
2916 netmap_mem_pt_guest_delete(&ptnmd->up);
2917 return NULL;
2918 }
2919
2920 /*
2921 * find host id in guest allocators and create guest allocator
2922 * if it is not there
2923 */
2924 static struct netmap_mem_d *
netmap_mem_pt_guest_get(nm_memid_t mem_id)2925 netmap_mem_pt_guest_get(nm_memid_t mem_id)
2926 {
2927 struct netmap_mem_d *nmd;
2928
2929 NM_MTX_LOCK(nm_mem_list_lock);
2930 nmd = netmap_mem_pt_guest_find_memid(mem_id);
2931 if (nmd == NULL) {
2932 nmd = netmap_mem_pt_guest_create(mem_id);
2933 }
2934 NM_MTX_UNLOCK(nm_mem_list_lock);
2935
2936 return nmd;
2937 }
2938
2939 /*
2940 * The guest allocator can be created by ptnetmap_memdev (during the device
2941 * attach) or by ptnetmap device (ptnet), during the netmap_attach.
2942 *
2943 * The order is not important (we have different order in LINUX and FreeBSD).
2944 * The first one, creates the device, and the second one simply attaches it.
2945 */
2946
2947 /* Called when ptnetmap_memdev is attaching, to attach a new allocator in
2948 * the guest */
2949 struct netmap_mem_d *
netmap_mem_pt_guest_attach(struct ptnetmap_memdev * ptn_dev,nm_memid_t mem_id)2950 netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id)
2951 {
2952 struct netmap_mem_d *nmd;
2953 struct netmap_mem_ptg *ptnmd;
2954
2955 nmd = netmap_mem_pt_guest_get(mem_id);
2956
2957 /* assign this device to the guest allocator */
2958 if (nmd) {
2959 ptnmd = (struct netmap_mem_ptg *)nmd;
2960 ptnmd->ptn_dev = ptn_dev;
2961 }
2962
2963 return nmd;
2964 }
2965
2966 /* Called when ptnet device is attaching */
2967 struct netmap_mem_d *
netmap_mem_pt_guest_new(if_t ifp,unsigned int nifp_offset,unsigned int memid)2968 netmap_mem_pt_guest_new(if_t ifp,
2969 unsigned int nifp_offset,
2970 unsigned int memid)
2971 {
2972 struct netmap_mem_d *nmd;
2973
2974 if (ifp == NULL) {
2975 return NULL;
2976 }
2977
2978 nmd = netmap_mem_pt_guest_get((nm_memid_t)memid);
2979
2980 if (nmd) {
2981 netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset);
2982 }
2983
2984 return nmd;
2985 }
2986
2987 #endif /* WITH_PTNETMAP */
2988