xref: /freebsd-14.2/sys/dev/netmap/netmap_mem2.c (revision ccdc3305)
1*ccdc3305SLuigi Rizzo /*
2*ccdc3305SLuigi Rizzo  * Copyright (C) 2012 Matteo Landi, Luigi Rizzo. All rights reserved.
3*ccdc3305SLuigi Rizzo  *
4*ccdc3305SLuigi Rizzo  * Redistribution and use in source and binary forms, with or without
5*ccdc3305SLuigi Rizzo  * modification, are permitted provided that the following conditions
6*ccdc3305SLuigi Rizzo  * are met:
7*ccdc3305SLuigi Rizzo  *   1. Redistributions of source code must retain the above copyright
8*ccdc3305SLuigi Rizzo  *      notice, this list of conditions and the following disclaimer.
9*ccdc3305SLuigi Rizzo  *   2. Redistributions in binary form must reproduce the above copyright
10*ccdc3305SLuigi Rizzo  *      notice, this list of conditions and the following disclaimer in the
11*ccdc3305SLuigi Rizzo  *    documentation and/or other materials provided with the distribution.
12*ccdc3305SLuigi Rizzo  *
13*ccdc3305SLuigi Rizzo  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14*ccdc3305SLuigi Rizzo  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15*ccdc3305SLuigi Rizzo  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16*ccdc3305SLuigi Rizzo  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17*ccdc3305SLuigi Rizzo  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18*ccdc3305SLuigi Rizzo  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19*ccdc3305SLuigi Rizzo  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20*ccdc3305SLuigi Rizzo  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21*ccdc3305SLuigi Rizzo  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22*ccdc3305SLuigi Rizzo  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23*ccdc3305SLuigi Rizzo  * SUCH DAMAGE.
24*ccdc3305SLuigi Rizzo  */
25*ccdc3305SLuigi Rizzo 
26*ccdc3305SLuigi Rizzo /*
27*ccdc3305SLuigi Rizzo  * $FreeBSD$
28*ccdc3305SLuigi Rizzo  * $Id: netmap_mem2.c 10830 2012-03-22 18:06:01Z luigi $
29*ccdc3305SLuigi Rizzo  *
30*ccdc3305SLuigi Rizzo  * New memory allocator for netmap
31*ccdc3305SLuigi Rizzo  */
32*ccdc3305SLuigi Rizzo 
33*ccdc3305SLuigi Rizzo /*
34*ccdc3305SLuigi Rizzo  * The new version allocates three regions:
35*ccdc3305SLuigi Rizzo  *	nm_if_pool      for the struct netmap_if
36*ccdc3305SLuigi Rizzo  *	nm_ring_pool    for the struct netmap_ring
37*ccdc3305SLuigi Rizzo  *	nm_buf_pool    for the packet buffers.
38*ccdc3305SLuigi Rizzo  *
39*ccdc3305SLuigi Rizzo  * All regions need to be page-sized as we export them to
40*ccdc3305SLuigi Rizzo  * userspace through mmap. Only the latter need to be dma-able,
41*ccdc3305SLuigi Rizzo  * but for convenience use the same type of allocator for all.
42*ccdc3305SLuigi Rizzo  *
43*ccdc3305SLuigi Rizzo  * Once mapped, the three regions are exported to userspace
44*ccdc3305SLuigi Rizzo  * as a contiguous block, starting from nm_if_pool. Each
45*ccdc3305SLuigi Rizzo  * cluster (and pool) is an integral number of pages.
46*ccdc3305SLuigi Rizzo  *   [ . . . ][ . . . . . .][ . . . . . . . . . .]
47*ccdc3305SLuigi Rizzo  *    nm_if     nm_ring            nm_buf
48*ccdc3305SLuigi Rizzo  *
49*ccdc3305SLuigi Rizzo  * The userspace areas contain offsets of the objects in userspace.
50*ccdc3305SLuigi Rizzo  * When (at init time) we write these offsets, we find out the index
51*ccdc3305SLuigi Rizzo  * of the object, and from there locate the offset from the beginning
52*ccdc3305SLuigi Rizzo  * of the region.
53*ccdc3305SLuigi Rizzo  *
54*ccdc3305SLuigi Rizzo  * Allocator for a pool of memory objects of the same size.
55*ccdc3305SLuigi Rizzo  * The pool is split into smaller clusters, whose size is a
56*ccdc3305SLuigi Rizzo  * multiple of the page size. The cluster size is chosen
57*ccdc3305SLuigi Rizzo  * to minimize the waste for a given max cluster size
58*ccdc3305SLuigi Rizzo  * (we do it by brute force, as we have relatively few object
59*ccdc3305SLuigi Rizzo  * per cluster).
60*ccdc3305SLuigi Rizzo  *
61*ccdc3305SLuigi Rizzo  * To be polite with the cache, objects are aligned to
62*ccdc3305SLuigi Rizzo  * the cache line, or 64 bytes. Sizes are rounded to multiple of 64.
63*ccdc3305SLuigi Rizzo  * For each object we have
64*ccdc3305SLuigi Rizzo  * one entry in the bitmap to signal the state. Allocation scans
65*ccdc3305SLuigi Rizzo  * the bitmap, but since this is done only on attach, we are not
66*ccdc3305SLuigi Rizzo  * too worried about performance
67*ccdc3305SLuigi Rizzo  */
68*ccdc3305SLuigi Rizzo 
69*ccdc3305SLuigi Rizzo /*
70*ccdc3305SLuigi Rizzo  *	MEMORY SIZES:
71*ccdc3305SLuigi Rizzo  *
72*ccdc3305SLuigi Rizzo  * (all the parameters below will become tunables)
73*ccdc3305SLuigi Rizzo  *
74*ccdc3305SLuigi Rizzo  * struct netmap_if is variable size but small.
75*ccdc3305SLuigi Rizzo  * Assuming each NIC has 8+2 rings, (4+1 tx, 4+1 rx) the netmap_if
76*ccdc3305SLuigi Rizzo  * uses 120 bytes on a 64-bit machine.
77*ccdc3305SLuigi Rizzo  * We allocate NETMAP_IF_MAX_SIZE  (1024) which should work even for
78*ccdc3305SLuigi Rizzo  * cards with 48 ring pairs.
79*ccdc3305SLuigi Rizzo  * The total number of 'struct netmap_if' could be slightly larger
80*ccdc3305SLuigi Rizzo  * that the total number of rings on all interfaces on the system.
81*ccdc3305SLuigi Rizzo  */
82*ccdc3305SLuigi Rizzo #define NETMAP_IF_MAX_SIZE      1024
83*ccdc3305SLuigi Rizzo #define NETMAP_IF_MAX_NUM       512
84*ccdc3305SLuigi Rizzo 
85*ccdc3305SLuigi Rizzo /*
86*ccdc3305SLuigi Rizzo  * netmap rings are up to 2..4k descriptors, 8 bytes each,
87*ccdc3305SLuigi Rizzo  * plus some glue at the beginning (32 bytes).
88*ccdc3305SLuigi Rizzo  * We set the default ring size to 9 pages (36K) and enable
89*ccdc3305SLuigi Rizzo  * a few hundreds of them.
90*ccdc3305SLuigi Rizzo  */
91*ccdc3305SLuigi Rizzo #define NETMAP_RING_MAX_SIZE    (9*PAGE_SIZE)
92*ccdc3305SLuigi Rizzo #define NETMAP_RING_MAX_NUM     200	/* approx 8MB */
93*ccdc3305SLuigi Rizzo 
94*ccdc3305SLuigi Rizzo /*
95*ccdc3305SLuigi Rizzo  * Buffers: the more the better. Buffer size is NETMAP_BUF_SIZE,
96*ccdc3305SLuigi Rizzo  * 2k or slightly less, aligned to 64 bytes.
97*ccdc3305SLuigi Rizzo  * A large 10G interface can have 2k*18 = 36k buffers per interface,
98*ccdc3305SLuigi Rizzo  * or about 72MB of memory. Up to us to use more.
99*ccdc3305SLuigi Rizzo  */
100*ccdc3305SLuigi Rizzo #ifndef CONSERVATIVE
101*ccdc3305SLuigi Rizzo #define NETMAP_BUF_MAX_NUM      100000  /* 200MB */
102*ccdc3305SLuigi Rizzo #else /* CONSERVATIVE */
103*ccdc3305SLuigi Rizzo #define NETMAP_BUF_MAX_NUM      20000   /* 40MB */
104*ccdc3305SLuigi Rizzo #endif
105*ccdc3305SLuigi Rizzo 
106*ccdc3305SLuigi Rizzo 
107*ccdc3305SLuigi Rizzo struct netmap_obj_pool {
108*ccdc3305SLuigi Rizzo 	char name[16];		/* name of the allocator */
109*ccdc3305SLuigi Rizzo 	u_int objtotal;         /* actual total number of objects. */
110*ccdc3305SLuigi Rizzo 	u_int objfree;          /* number of free objects. */
111*ccdc3305SLuigi Rizzo 	u_int clustentries;	/* actual objects per cluster */
112*ccdc3305SLuigi Rizzo 
113*ccdc3305SLuigi Rizzo 	/* the total memory space is _numclusters*_clustsize */
114*ccdc3305SLuigi Rizzo 	u_int _numclusters;	/* how many clusters */
115*ccdc3305SLuigi Rizzo 	u_int _clustsize;        /* cluster size */
116*ccdc3305SLuigi Rizzo 	u_int _objsize;		/* actual object size */
117*ccdc3305SLuigi Rizzo 
118*ccdc3305SLuigi Rizzo 	u_int _memtotal;	/* _numclusters*_clustsize */
119*ccdc3305SLuigi Rizzo 	struct lut_entry *lut;  /* virt,phys addresses, objtotal entries */
120*ccdc3305SLuigi Rizzo 	uint32_t *bitmap;       /* one bit per buffer, 1 means free */
121*ccdc3305SLuigi Rizzo };
122*ccdc3305SLuigi Rizzo 
123*ccdc3305SLuigi Rizzo struct netmap_mem_d {
124*ccdc3305SLuigi Rizzo 	NM_LOCK_T nm_mtx; /* protect the allocator ? */
125*ccdc3305SLuigi Rizzo 	u_int nm_totalsize; /* shorthand */
126*ccdc3305SLuigi Rizzo 
127*ccdc3305SLuigi Rizzo 	/* pointers to the three allocators */
128*ccdc3305SLuigi Rizzo 	struct netmap_obj_pool *nm_if_pool;
129*ccdc3305SLuigi Rizzo 	struct netmap_obj_pool *nm_ring_pool;
130*ccdc3305SLuigi Rizzo 	struct netmap_obj_pool *nm_buf_pool;
131*ccdc3305SLuigi Rizzo };
132*ccdc3305SLuigi Rizzo 
133*ccdc3305SLuigi Rizzo struct lut_entry *netmap_buffer_lut;	/* exported */
134*ccdc3305SLuigi Rizzo 
135*ccdc3305SLuigi Rizzo 
136*ccdc3305SLuigi Rizzo /*
137*ccdc3305SLuigi Rizzo  * Convert a userspace offset to a phisical address.
138*ccdc3305SLuigi Rizzo  * XXX re-do in a simpler way.
139*ccdc3305SLuigi Rizzo  *
140*ccdc3305SLuigi Rizzo  * The idea here is to hide userspace applications the fact that pre-allocated
141*ccdc3305SLuigi Rizzo  * memory is not contiguous, but fragmented across different clusters and
142*ccdc3305SLuigi Rizzo  * smaller memory allocators. Consequently, first of all we need to find which
143*ccdc3305SLuigi Rizzo  * allocator is owning provided offset, then we need to find out the physical
144*ccdc3305SLuigi Rizzo  * address associated to target page (this is done using the look-up table.
145*ccdc3305SLuigi Rizzo  */
146*ccdc3305SLuigi Rizzo static inline vm_paddr_t
147*ccdc3305SLuigi Rizzo netmap_ofstophys(vm_offset_t offset)
148*ccdc3305SLuigi Rizzo {
149*ccdc3305SLuigi Rizzo 	const struct netmap_obj_pool *p[] = {
150*ccdc3305SLuigi Rizzo 		nm_mem->nm_if_pool,
151*ccdc3305SLuigi Rizzo 		nm_mem->nm_ring_pool,
152*ccdc3305SLuigi Rizzo 		nm_mem->nm_buf_pool };
153*ccdc3305SLuigi Rizzo 	int i;
154*ccdc3305SLuigi Rizzo 	vm_offset_t o = offset;
155*ccdc3305SLuigi Rizzo 
156*ccdc3305SLuigi Rizzo 
157*ccdc3305SLuigi Rizzo 	for (i = 0; i < 3; offset -= p[i]->_memtotal, i++) {
158*ccdc3305SLuigi Rizzo 		if (offset >= p[i]->_memtotal)
159*ccdc3305SLuigi Rizzo 			continue;
160*ccdc3305SLuigi Rizzo 		// XXX now scan the clusters
161*ccdc3305SLuigi Rizzo 		return p[i]->lut[offset / p[i]->_objsize].paddr +
162*ccdc3305SLuigi Rizzo 			offset % p[i]->_objsize;
163*ccdc3305SLuigi Rizzo 	}
164*ccdc3305SLuigi Rizzo 	D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", o,
165*ccdc3305SLuigi Rizzo 		p[0]->_memtotal, p[0]->_memtotal + p[1]->_memtotal,
166*ccdc3305SLuigi Rizzo 		p[0]->_memtotal + p[1]->_memtotal + p[2]->_memtotal);
167*ccdc3305SLuigi Rizzo 	return 0;	// XXX bad address
168*ccdc3305SLuigi Rizzo }
169*ccdc3305SLuigi Rizzo 
170*ccdc3305SLuigi Rizzo /*
171*ccdc3305SLuigi Rizzo  * we store objects by kernel address, need to find the offset
172*ccdc3305SLuigi Rizzo  * within the pool to export the value to userspace.
173*ccdc3305SLuigi Rizzo  * Algorithm: scan until we find the cluster, then add the
174*ccdc3305SLuigi Rizzo  * actual offset in the cluster
175*ccdc3305SLuigi Rizzo  */
176*ccdc3305SLuigi Rizzo ssize_t
177*ccdc3305SLuigi Rizzo netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
178*ccdc3305SLuigi Rizzo {
179*ccdc3305SLuigi Rizzo 	int i, k = p->clustentries, n = p->objtotal;
180*ccdc3305SLuigi Rizzo 	ssize_t ofs = 0;
181*ccdc3305SLuigi Rizzo 
182*ccdc3305SLuigi Rizzo 	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
183*ccdc3305SLuigi Rizzo 		const char *base = p->lut[i].vaddr;
184*ccdc3305SLuigi Rizzo 		ssize_t relofs = (const char *) vaddr - base;
185*ccdc3305SLuigi Rizzo 
186*ccdc3305SLuigi Rizzo 		if (relofs < 0 || relofs > p->_clustsize)
187*ccdc3305SLuigi Rizzo 			continue;
188*ccdc3305SLuigi Rizzo 
189*ccdc3305SLuigi Rizzo 		ofs = ofs + relofs;
190*ccdc3305SLuigi Rizzo 		ND("%s: return offset %d (cluster %d) for pointer %p",
191*ccdc3305SLuigi Rizzo 		    p->name, ofs, i, vaddr);
192*ccdc3305SLuigi Rizzo 		return ofs;
193*ccdc3305SLuigi Rizzo 	}
194*ccdc3305SLuigi Rizzo 	D("address %p is not contained inside any cluster (%s)",
195*ccdc3305SLuigi Rizzo 	    vaddr, p->name);
196*ccdc3305SLuigi Rizzo 	return 0; /* An error occurred */
197*ccdc3305SLuigi Rizzo }
198*ccdc3305SLuigi Rizzo 
199*ccdc3305SLuigi Rizzo /* Helper functions which convert virtual addresses to offsets */
200*ccdc3305SLuigi Rizzo #define netmap_if_offset(v)					\
201*ccdc3305SLuigi Rizzo 	netmap_obj_offset(nm_mem->nm_if_pool, (v))
202*ccdc3305SLuigi Rizzo 
203*ccdc3305SLuigi Rizzo #define netmap_ring_offset(v)					\
204*ccdc3305SLuigi Rizzo     (nm_mem->nm_if_pool->_memtotal + 				\
205*ccdc3305SLuigi Rizzo 	netmap_obj_offset(nm_mem->nm_ring_pool, (v)))
206*ccdc3305SLuigi Rizzo 
207*ccdc3305SLuigi Rizzo #define netmap_buf_offset(v)					\
208*ccdc3305SLuigi Rizzo     (nm_mem->nm_if_pool->_memtotal +				\
209*ccdc3305SLuigi Rizzo 	nm_mem->nm_ring_pool->_memtotal +			\
210*ccdc3305SLuigi Rizzo 	netmap_obj_offset(nm_mem->nm_buf_pool, (v)))
211*ccdc3305SLuigi Rizzo 
212*ccdc3305SLuigi Rizzo 
213*ccdc3305SLuigi Rizzo static void *
214*ccdc3305SLuigi Rizzo netmap_obj_malloc(struct netmap_obj_pool *p, int len)
215*ccdc3305SLuigi Rizzo {
216*ccdc3305SLuigi Rizzo 	uint32_t i = 0;			/* index in the bitmap */
217*ccdc3305SLuigi Rizzo 	uint32_t mask, j;		/* slot counter */
218*ccdc3305SLuigi Rizzo 	void *vaddr = NULL;
219*ccdc3305SLuigi Rizzo 
220*ccdc3305SLuigi Rizzo 	if (len > p->_objsize) {
221*ccdc3305SLuigi Rizzo 		D("%s request size %d too large", p->name, len);
222*ccdc3305SLuigi Rizzo 		// XXX cannot reduce the size
223*ccdc3305SLuigi Rizzo 		return NULL;
224*ccdc3305SLuigi Rizzo 	}
225*ccdc3305SLuigi Rizzo 
226*ccdc3305SLuigi Rizzo 	if (p->objfree == 0) {
227*ccdc3305SLuigi Rizzo 		D("%s allocator: run out of memory", p->name);
228*ccdc3305SLuigi Rizzo 		return NULL;
229*ccdc3305SLuigi Rizzo 	}
230*ccdc3305SLuigi Rizzo 
231*ccdc3305SLuigi Rizzo 	/* termination is guaranteed by p->free */
232*ccdc3305SLuigi Rizzo 	while (vaddr == NULL) {
233*ccdc3305SLuigi Rizzo 		uint32_t cur = p->bitmap[i];
234*ccdc3305SLuigi Rizzo 		if (cur == 0) { /* bitmask is fully used */
235*ccdc3305SLuigi Rizzo 			i++;
236*ccdc3305SLuigi Rizzo 			continue;
237*ccdc3305SLuigi Rizzo 		}
238*ccdc3305SLuigi Rizzo 		/* locate a slot */
239*ccdc3305SLuigi Rizzo 		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
240*ccdc3305SLuigi Rizzo 			;
241*ccdc3305SLuigi Rizzo 
242*ccdc3305SLuigi Rizzo 		p->bitmap[i] &= ~mask; /* mark object as in use */
243*ccdc3305SLuigi Rizzo 		p->objfree--;
244*ccdc3305SLuigi Rizzo 
245*ccdc3305SLuigi Rizzo 		vaddr = p->lut[i * 32 + j].vaddr;
246*ccdc3305SLuigi Rizzo 	}
247*ccdc3305SLuigi Rizzo 	ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
248*ccdc3305SLuigi Rizzo 
249*ccdc3305SLuigi Rizzo 	return vaddr;
250*ccdc3305SLuigi Rizzo }
251*ccdc3305SLuigi Rizzo 
252*ccdc3305SLuigi Rizzo 
253*ccdc3305SLuigi Rizzo /*
254*ccdc3305SLuigi Rizzo  * free by index, not by address
255*ccdc3305SLuigi Rizzo  */
256*ccdc3305SLuigi Rizzo static void
257*ccdc3305SLuigi Rizzo netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
258*ccdc3305SLuigi Rizzo {
259*ccdc3305SLuigi Rizzo 	if (j >= p->objtotal) {
260*ccdc3305SLuigi Rizzo 		D("invalid index %u, max %u", j, p->objtotal);
261*ccdc3305SLuigi Rizzo 		return;
262*ccdc3305SLuigi Rizzo 	}
263*ccdc3305SLuigi Rizzo 	p->bitmap[j / 32] |= (1 << (j % 32));
264*ccdc3305SLuigi Rizzo 	p->objfree++;
265*ccdc3305SLuigi Rizzo 	return;
266*ccdc3305SLuigi Rizzo }
267*ccdc3305SLuigi Rizzo 
268*ccdc3305SLuigi Rizzo static void
269*ccdc3305SLuigi Rizzo netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
270*ccdc3305SLuigi Rizzo {
271*ccdc3305SLuigi Rizzo 	int i, j, n = p->_memtotal / p->_clustsize;
272*ccdc3305SLuigi Rizzo 
273*ccdc3305SLuigi Rizzo 	for (i = 0, j = 0; i < n; i++, j += p->clustentries) {
274*ccdc3305SLuigi Rizzo 		void *base = p->lut[i * p->clustentries].vaddr;
275*ccdc3305SLuigi Rizzo 		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
276*ccdc3305SLuigi Rizzo 
277*ccdc3305SLuigi Rizzo 		/* Given address, is out of the scope of the current cluster.*/
278*ccdc3305SLuigi Rizzo 		if (vaddr < base || relofs > p->_clustsize)
279*ccdc3305SLuigi Rizzo 			continue;
280*ccdc3305SLuigi Rizzo 
281*ccdc3305SLuigi Rizzo 		j = j + relofs / p->_objsize;
282*ccdc3305SLuigi Rizzo 		KASSERT(j != 0, ("Cannot free object 0"));
283*ccdc3305SLuigi Rizzo 		netmap_obj_free(p, j);
284*ccdc3305SLuigi Rizzo 		return;
285*ccdc3305SLuigi Rizzo 	}
286*ccdc3305SLuigi Rizzo 	ND("address %p is not contained inside any cluster (%s)",
287*ccdc3305SLuigi Rizzo 	    vaddr, p->name);
288*ccdc3305SLuigi Rizzo }
289*ccdc3305SLuigi Rizzo 
290*ccdc3305SLuigi Rizzo #define netmap_if_malloc(len)	netmap_obj_malloc(nm_mem->nm_if_pool, len)
291*ccdc3305SLuigi Rizzo #define netmap_if_free(v)	netmap_obj_free_va(nm_mem->nm_if_pool, (v))
292*ccdc3305SLuigi Rizzo #define netmap_ring_malloc(len)	netmap_obj_malloc(nm_mem->nm_ring_pool, len)
293*ccdc3305SLuigi Rizzo #define netmap_buf_malloc()			\
294*ccdc3305SLuigi Rizzo 	netmap_obj_malloc(nm_mem->nm_buf_pool, NETMAP_BUF_SIZE)
295*ccdc3305SLuigi Rizzo 
296*ccdc3305SLuigi Rizzo 
297*ccdc3305SLuigi Rizzo /* Return the index associated to the given packet buffer */
298*ccdc3305SLuigi Rizzo #define netmap_buf_index(v)						\
299*ccdc3305SLuigi Rizzo     (netmap_obj_offset(nm_mem->nm_buf_pool, (v)) / nm_mem->nm_buf_pool->_objsize)
300*ccdc3305SLuigi Rizzo 
301*ccdc3305SLuigi Rizzo 
302*ccdc3305SLuigi Rizzo static void
303*ccdc3305SLuigi Rizzo netmap_new_bufs(struct netmap_if *nifp __unused,
304*ccdc3305SLuigi Rizzo                 struct netmap_slot *slot, u_int n)
305*ccdc3305SLuigi Rizzo {
306*ccdc3305SLuigi Rizzo 	struct netmap_obj_pool *p = nm_mem->nm_buf_pool;
307*ccdc3305SLuigi Rizzo 	uint32_t i = 0;	/* slot counter */
308*ccdc3305SLuigi Rizzo 
309*ccdc3305SLuigi Rizzo 	for (i = 0; i < n; i++) {
310*ccdc3305SLuigi Rizzo 		void *vaddr = netmap_buf_malloc();
311*ccdc3305SLuigi Rizzo 		if (vaddr == NULL) {
312*ccdc3305SLuigi Rizzo 			D("unable to locate empty packet buffer");
313*ccdc3305SLuigi Rizzo 			goto cleanup;
314*ccdc3305SLuigi Rizzo 		}
315*ccdc3305SLuigi Rizzo 
316*ccdc3305SLuigi Rizzo 		slot[i].buf_idx = netmap_buf_index(vaddr);
317*ccdc3305SLuigi Rizzo 		KASSERT(slot[i].buf_idx != 0,
318*ccdc3305SLuigi Rizzo 		    ("Assigning buf_idx=0 to just created slot"));
319*ccdc3305SLuigi Rizzo 		slot[i].len = p->_objsize;
320*ccdc3305SLuigi Rizzo 		slot[i].flags = NS_BUF_CHANGED; // XXX GAETANO hack
321*ccdc3305SLuigi Rizzo 	}
322*ccdc3305SLuigi Rizzo 
323*ccdc3305SLuigi Rizzo 	ND("allocated %d buffers, %d available", n, p->objfree);
324*ccdc3305SLuigi Rizzo 	return;
325*ccdc3305SLuigi Rizzo 
326*ccdc3305SLuigi Rizzo cleanup:
327*ccdc3305SLuigi Rizzo 	for (i--; i >= 0; i--) {
328*ccdc3305SLuigi Rizzo 		netmap_obj_free(nm_mem->nm_buf_pool, slot[i].buf_idx);
329*ccdc3305SLuigi Rizzo 	}
330*ccdc3305SLuigi Rizzo }
331*ccdc3305SLuigi Rizzo 
332*ccdc3305SLuigi Rizzo 
333*ccdc3305SLuigi Rizzo static void
334*ccdc3305SLuigi Rizzo netmap_free_buf(struct netmap_if *nifp, uint32_t i)
335*ccdc3305SLuigi Rizzo {
336*ccdc3305SLuigi Rizzo 	struct netmap_obj_pool *p = nm_mem->nm_buf_pool;
337*ccdc3305SLuigi Rizzo 	if (i < 2 || i >= p->objtotal) {
338*ccdc3305SLuigi Rizzo 		D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
339*ccdc3305SLuigi Rizzo 		return;
340*ccdc3305SLuigi Rizzo 	}
341*ccdc3305SLuigi Rizzo 	netmap_obj_free(nm_mem->nm_buf_pool, i);
342*ccdc3305SLuigi Rizzo }
343*ccdc3305SLuigi Rizzo 
344*ccdc3305SLuigi Rizzo 
345*ccdc3305SLuigi Rizzo /*
346*ccdc3305SLuigi Rizzo  * Free all resources related to an allocator.
347*ccdc3305SLuigi Rizzo  */
348*ccdc3305SLuigi Rizzo static void
349*ccdc3305SLuigi Rizzo netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
350*ccdc3305SLuigi Rizzo {
351*ccdc3305SLuigi Rizzo 	if (p == NULL)
352*ccdc3305SLuigi Rizzo 		return;
353*ccdc3305SLuigi Rizzo 	if (p->bitmap)
354*ccdc3305SLuigi Rizzo 		free(p->bitmap, M_NETMAP);
355*ccdc3305SLuigi Rizzo 	if (p->lut) {
356*ccdc3305SLuigi Rizzo 		int i;
357*ccdc3305SLuigi Rizzo 		for (i = 0; i < p->objtotal; i += p->clustentries) {
358*ccdc3305SLuigi Rizzo 			if (p->lut[i].vaddr)
359*ccdc3305SLuigi Rizzo 				contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
360*ccdc3305SLuigi Rizzo 		}
361*ccdc3305SLuigi Rizzo 		bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
362*ccdc3305SLuigi Rizzo 		free(p->lut, M_NETMAP);
363*ccdc3305SLuigi Rizzo 	}
364*ccdc3305SLuigi Rizzo 	bzero(p, sizeof(*p));
365*ccdc3305SLuigi Rizzo 	free(p, M_NETMAP);
366*ccdc3305SLuigi Rizzo }
367*ccdc3305SLuigi Rizzo 
368*ccdc3305SLuigi Rizzo /*
369*ccdc3305SLuigi Rizzo  * We receive a request for objtotal objects, of size objsize each.
370*ccdc3305SLuigi Rizzo  * Internally we may round up both numbers, as we allocate objects
371*ccdc3305SLuigi Rizzo  * in small clusters multiple of the page size.
372*ccdc3305SLuigi Rizzo  * In the allocator we don't need to store the objsize,
373*ccdc3305SLuigi Rizzo  * but we do need to keep track of objtotal' and clustentries,
374*ccdc3305SLuigi Rizzo  * as they are needed when freeing memory.
375*ccdc3305SLuigi Rizzo  *
376*ccdc3305SLuigi Rizzo  * XXX note -- userspace needs the buffers to be contiguous,
377*ccdc3305SLuigi Rizzo  *	so we cannot afford gaps at the end of a cluster.
378*ccdc3305SLuigi Rizzo  */
379*ccdc3305SLuigi Rizzo static struct netmap_obj_pool *
380*ccdc3305SLuigi Rizzo netmap_new_obj_allocator(const char *name, u_int objtotal, u_int objsize)
381*ccdc3305SLuigi Rizzo {
382*ccdc3305SLuigi Rizzo 	struct netmap_obj_pool *p;
383*ccdc3305SLuigi Rizzo 	int i, n;
384*ccdc3305SLuigi Rizzo 	u_int clustsize;	/* the cluster size, multiple of page size */
385*ccdc3305SLuigi Rizzo 	u_int clustentries;	/* how many objects per entry */
386*ccdc3305SLuigi Rizzo 
387*ccdc3305SLuigi Rizzo #define MAX_CLUSTSIZE	(1<<17)
388*ccdc3305SLuigi Rizzo #define LINE_ROUND	64
389*ccdc3305SLuigi Rizzo 	if (objsize >= MAX_CLUSTSIZE) {
390*ccdc3305SLuigi Rizzo 		/* we could do it but there is no point */
391*ccdc3305SLuigi Rizzo 		D("unsupported allocation for %d bytes", objsize);
392*ccdc3305SLuigi Rizzo 		return NULL;
393*ccdc3305SLuigi Rizzo 	}
394*ccdc3305SLuigi Rizzo 	/* make sure objsize is a multiple of LINE_ROUND */
395*ccdc3305SLuigi Rizzo 	i = (objsize & (LINE_ROUND - 1));
396*ccdc3305SLuigi Rizzo 	if (i) {
397*ccdc3305SLuigi Rizzo 		D("XXX aligning object by %d bytes", LINE_ROUND - i);
398*ccdc3305SLuigi Rizzo 		objsize += LINE_ROUND - i;
399*ccdc3305SLuigi Rizzo 	}
400*ccdc3305SLuigi Rizzo 	/*
401*ccdc3305SLuigi Rizzo 	 * Compute number of objects using a brute-force approach:
402*ccdc3305SLuigi Rizzo 	 * given a max cluster size,
403*ccdc3305SLuigi Rizzo 	 * we try to fill it with objects keeping track of the
404*ccdc3305SLuigi Rizzo 	 * wasted space to the next page boundary.
405*ccdc3305SLuigi Rizzo 	 */
406*ccdc3305SLuigi Rizzo 	for (clustentries = 0, i = 1;; i++) {
407*ccdc3305SLuigi Rizzo 		u_int delta, used = i * objsize;
408*ccdc3305SLuigi Rizzo 		if (used > MAX_CLUSTSIZE)
409*ccdc3305SLuigi Rizzo 			break;
410*ccdc3305SLuigi Rizzo 		delta = used % PAGE_SIZE;
411*ccdc3305SLuigi Rizzo 		if (delta == 0) { // exact solution
412*ccdc3305SLuigi Rizzo 			clustentries = i;
413*ccdc3305SLuigi Rizzo 			break;
414*ccdc3305SLuigi Rizzo 		}
415*ccdc3305SLuigi Rizzo 		if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
416*ccdc3305SLuigi Rizzo 			clustentries = i;
417*ccdc3305SLuigi Rizzo 	}
418*ccdc3305SLuigi Rizzo 	// D("XXX --- ouch, delta %d (bad for buffers)", delta);
419*ccdc3305SLuigi Rizzo 	/* compute clustsize and round to the next page */
420*ccdc3305SLuigi Rizzo 	clustsize = clustentries * objsize;
421*ccdc3305SLuigi Rizzo 	i =  (clustsize & (PAGE_SIZE - 1));
422*ccdc3305SLuigi Rizzo 	if (i)
423*ccdc3305SLuigi Rizzo 		clustsize += PAGE_SIZE - i;
424*ccdc3305SLuigi Rizzo 	D("objsize %d clustsize %d objects %d",
425*ccdc3305SLuigi Rizzo 		objsize, clustsize, clustentries);
426*ccdc3305SLuigi Rizzo 
427*ccdc3305SLuigi Rizzo 	p = malloc(sizeof(struct netmap_obj_pool), M_NETMAP,
428*ccdc3305SLuigi Rizzo 	    M_WAITOK | M_ZERO);
429*ccdc3305SLuigi Rizzo 	if (p == NULL) {
430*ccdc3305SLuigi Rizzo 		D("Unable to create '%s' allocator", name);
431*ccdc3305SLuigi Rizzo 		return NULL;
432*ccdc3305SLuigi Rizzo 	}
433*ccdc3305SLuigi Rizzo 	/*
434*ccdc3305SLuigi Rizzo 	 * Allocate and initialize the lookup table.
435*ccdc3305SLuigi Rizzo 	 *
436*ccdc3305SLuigi Rizzo 	 * The number of clusters is n = ceil(objtotal/clustentries)
437*ccdc3305SLuigi Rizzo 	 * objtotal' = n * clustentries
438*ccdc3305SLuigi Rizzo 	 */
439*ccdc3305SLuigi Rizzo 	strncpy(p->name, name, sizeof(p->name));
440*ccdc3305SLuigi Rizzo 	p->clustentries = clustentries;
441*ccdc3305SLuigi Rizzo 	p->_clustsize = clustsize;
442*ccdc3305SLuigi Rizzo 	n = (objtotal + clustentries - 1) / clustentries;
443*ccdc3305SLuigi Rizzo 	p->_numclusters = n;
444*ccdc3305SLuigi Rizzo 	p->objtotal = n * clustentries;
445*ccdc3305SLuigi Rizzo 	p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */
446*ccdc3305SLuigi Rizzo 	p->_objsize = objsize;
447*ccdc3305SLuigi Rizzo 	p->_memtotal = p->_numclusters * p->_clustsize;
448*ccdc3305SLuigi Rizzo 
449*ccdc3305SLuigi Rizzo 	p->lut = malloc(sizeof(struct lut_entry) * p->objtotal,
450*ccdc3305SLuigi Rizzo 	    M_NETMAP, M_WAITOK | M_ZERO);
451*ccdc3305SLuigi Rizzo 	if (p->lut == NULL) {
452*ccdc3305SLuigi Rizzo 		D("Unable to create lookup table for '%s' allocator", name);
453*ccdc3305SLuigi Rizzo 		goto clean;
454*ccdc3305SLuigi Rizzo 	}
455*ccdc3305SLuigi Rizzo 
456*ccdc3305SLuigi Rizzo 	/* Allocate the bitmap */
457*ccdc3305SLuigi Rizzo 	n = (p->objtotal + 31) / 32;
458*ccdc3305SLuigi Rizzo 	p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_WAITOK | M_ZERO);
459*ccdc3305SLuigi Rizzo 	if (p->bitmap == NULL) {
460*ccdc3305SLuigi Rizzo 		D("Unable to create bitmap (%d entries) for allocator '%s'", n,
461*ccdc3305SLuigi Rizzo 		    name);
462*ccdc3305SLuigi Rizzo 		goto clean;
463*ccdc3305SLuigi Rizzo 	}
464*ccdc3305SLuigi Rizzo 
465*ccdc3305SLuigi Rizzo 	/*
466*ccdc3305SLuigi Rizzo 	 * Allocate clusters, init pointers and bitmap
467*ccdc3305SLuigi Rizzo 	 */
468*ccdc3305SLuigi Rizzo 	for (i = 0; i < p->objtotal;) {
469*ccdc3305SLuigi Rizzo 		int lim = i + clustentries;
470*ccdc3305SLuigi Rizzo 		char *clust;
471*ccdc3305SLuigi Rizzo 
472*ccdc3305SLuigi Rizzo 		clust = contigmalloc(clustsize, M_NETMAP, M_WAITOK | M_ZERO,
473*ccdc3305SLuigi Rizzo 		    0, -1UL, PAGE_SIZE, 0);
474*ccdc3305SLuigi Rizzo 		if (clust == NULL) {
475*ccdc3305SLuigi Rizzo 			/*
476*ccdc3305SLuigi Rizzo 			 * If we get here, there is a severe memory shortage,
477*ccdc3305SLuigi Rizzo 			 * so halve the allocated memory to reclaim some.
478*ccdc3305SLuigi Rizzo 			 */
479*ccdc3305SLuigi Rizzo 			D("Unable to create cluster at %d for '%s' allocator",
480*ccdc3305SLuigi Rizzo 			    i, name);
481*ccdc3305SLuigi Rizzo 			lim = i / 2;
482*ccdc3305SLuigi Rizzo 			for (; i >= lim; i--) {
483*ccdc3305SLuigi Rizzo 				p->bitmap[ (i>>5) ] &=  ~( 1 << (i & 31) );
484*ccdc3305SLuigi Rizzo 				if (i % clustentries == 0 && p->lut[i].vaddr)
485*ccdc3305SLuigi Rizzo 					contigfree(p->lut[i].vaddr,
486*ccdc3305SLuigi Rizzo 						p->_clustsize, M_NETMAP);
487*ccdc3305SLuigi Rizzo 			}
488*ccdc3305SLuigi Rizzo 			p->objtotal = i;
489*ccdc3305SLuigi Rizzo 			p->objfree = p->objtotal - 2;
490*ccdc3305SLuigi Rizzo 			p->_numclusters = i / clustentries;
491*ccdc3305SLuigi Rizzo 			p->_memtotal = p->_numclusters * p->_clustsize;
492*ccdc3305SLuigi Rizzo 			break;
493*ccdc3305SLuigi Rizzo 		}
494*ccdc3305SLuigi Rizzo 		for (; i < lim; i++, clust += objsize) {
495*ccdc3305SLuigi Rizzo 			p->bitmap[ (i>>5) ] |=  ( 1 << (i & 31) );
496*ccdc3305SLuigi Rizzo 			p->lut[i].vaddr = clust;
497*ccdc3305SLuigi Rizzo 			p->lut[i].paddr = vtophys(clust);
498*ccdc3305SLuigi Rizzo 		}
499*ccdc3305SLuigi Rizzo 	}
500*ccdc3305SLuigi Rizzo 	p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */
501*ccdc3305SLuigi Rizzo 	D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
502*ccdc3305SLuigi Rizzo 	    p->_numclusters, p->_clustsize >> 10,
503*ccdc3305SLuigi Rizzo 	    p->_memtotal >> 10, name);
504*ccdc3305SLuigi Rizzo 
505*ccdc3305SLuigi Rizzo 	return p;
506*ccdc3305SLuigi Rizzo 
507*ccdc3305SLuigi Rizzo clean:
508*ccdc3305SLuigi Rizzo 	netmap_destroy_obj_allocator(p);
509*ccdc3305SLuigi Rizzo 	return NULL;
510*ccdc3305SLuigi Rizzo }
511*ccdc3305SLuigi Rizzo 
512*ccdc3305SLuigi Rizzo static int
513*ccdc3305SLuigi Rizzo netmap_memory_init(void)
514*ccdc3305SLuigi Rizzo {
515*ccdc3305SLuigi Rizzo 	struct netmap_obj_pool *p;
516*ccdc3305SLuigi Rizzo 
517*ccdc3305SLuigi Rizzo 	nm_mem = malloc(sizeof(struct netmap_mem_d), M_NETMAP,
518*ccdc3305SLuigi Rizzo 			      M_WAITOK | M_ZERO);
519*ccdc3305SLuigi Rizzo 	if (nm_mem == NULL)
520*ccdc3305SLuigi Rizzo 		goto clean;
521*ccdc3305SLuigi Rizzo 
522*ccdc3305SLuigi Rizzo 	p = netmap_new_obj_allocator("netmap_if",
523*ccdc3305SLuigi Rizzo 	    NETMAP_IF_MAX_NUM, NETMAP_IF_MAX_SIZE);
524*ccdc3305SLuigi Rizzo 	if (p == NULL)
525*ccdc3305SLuigi Rizzo 		goto clean;
526*ccdc3305SLuigi Rizzo 	nm_mem->nm_if_pool = p;
527*ccdc3305SLuigi Rizzo 
528*ccdc3305SLuigi Rizzo 	p = netmap_new_obj_allocator("netmap_ring",
529*ccdc3305SLuigi Rizzo 	    NETMAP_RING_MAX_NUM, NETMAP_RING_MAX_SIZE);
530*ccdc3305SLuigi Rizzo 	if (p == NULL)
531*ccdc3305SLuigi Rizzo 		goto clean;
532*ccdc3305SLuigi Rizzo 	nm_mem->nm_ring_pool = p;
533*ccdc3305SLuigi Rizzo 
534*ccdc3305SLuigi Rizzo 	p = netmap_new_obj_allocator("netmap_buf",
535*ccdc3305SLuigi Rizzo 	    NETMAP_BUF_MAX_NUM, NETMAP_BUF_SIZE);
536*ccdc3305SLuigi Rizzo 	if (p == NULL)
537*ccdc3305SLuigi Rizzo 		goto clean;
538*ccdc3305SLuigi Rizzo 	netmap_total_buffers = p->objtotal;
539*ccdc3305SLuigi Rizzo 	netmap_buffer_lut = p->lut;
540*ccdc3305SLuigi Rizzo 	nm_mem->nm_buf_pool = p;
541*ccdc3305SLuigi Rizzo 	netmap_buffer_base = p->lut[0].vaddr;
542*ccdc3305SLuigi Rizzo 
543*ccdc3305SLuigi Rizzo 	mtx_init(&nm_mem->nm_mtx, "netmap memory allocator lock", NULL,
544*ccdc3305SLuigi Rizzo 		 MTX_DEF);
545*ccdc3305SLuigi Rizzo 	nm_mem->nm_totalsize =
546*ccdc3305SLuigi Rizzo 	    nm_mem->nm_if_pool->_memtotal +
547*ccdc3305SLuigi Rizzo 	    nm_mem->nm_ring_pool->_memtotal +
548*ccdc3305SLuigi Rizzo 	    nm_mem->nm_buf_pool->_memtotal;
549*ccdc3305SLuigi Rizzo 
550*ccdc3305SLuigi Rizzo 	D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
551*ccdc3305SLuigi Rizzo 	    nm_mem->nm_if_pool->_memtotal >> 10,
552*ccdc3305SLuigi Rizzo 	    nm_mem->nm_ring_pool->_memtotal >> 10,
553*ccdc3305SLuigi Rizzo 	    nm_mem->nm_buf_pool->_memtotal >> 20);
554*ccdc3305SLuigi Rizzo 	return 0;
555*ccdc3305SLuigi Rizzo 
556*ccdc3305SLuigi Rizzo clean:
557*ccdc3305SLuigi Rizzo 	if (nm_mem) {
558*ccdc3305SLuigi Rizzo 		netmap_destroy_obj_allocator(nm_mem->nm_ring_pool);
559*ccdc3305SLuigi Rizzo 		netmap_destroy_obj_allocator(nm_mem->nm_if_pool);
560*ccdc3305SLuigi Rizzo 		free(nm_mem, M_NETMAP);
561*ccdc3305SLuigi Rizzo 	}
562*ccdc3305SLuigi Rizzo 	return ENOMEM;
563*ccdc3305SLuigi Rizzo }
564*ccdc3305SLuigi Rizzo 
565*ccdc3305SLuigi Rizzo 
566*ccdc3305SLuigi Rizzo static void
567*ccdc3305SLuigi Rizzo netmap_memory_fini(void)
568*ccdc3305SLuigi Rizzo {
569*ccdc3305SLuigi Rizzo 	if (!nm_mem)
570*ccdc3305SLuigi Rizzo 		return;
571*ccdc3305SLuigi Rizzo 	netmap_destroy_obj_allocator(nm_mem->nm_if_pool);
572*ccdc3305SLuigi Rizzo 	netmap_destroy_obj_allocator(nm_mem->nm_ring_pool);
573*ccdc3305SLuigi Rizzo 	netmap_destroy_obj_allocator(nm_mem->nm_buf_pool);
574*ccdc3305SLuigi Rizzo 	mtx_destroy(&nm_mem->nm_mtx);
575*ccdc3305SLuigi Rizzo 	free(nm_mem, M_NETMAP);
576*ccdc3305SLuigi Rizzo }
577*ccdc3305SLuigi Rizzo 
578*ccdc3305SLuigi Rizzo 
579*ccdc3305SLuigi Rizzo 
580*ccdc3305SLuigi Rizzo static void *
581*ccdc3305SLuigi Rizzo netmap_if_new(const char *ifname, struct netmap_adapter *na)
582*ccdc3305SLuigi Rizzo {
583*ccdc3305SLuigi Rizzo 	struct netmap_if *nifp;
584*ccdc3305SLuigi Rizzo 	struct netmap_ring *ring;
585*ccdc3305SLuigi Rizzo 	ssize_t base; /* handy for relative offsets between rings and nifp */
586*ccdc3305SLuigi Rizzo 	u_int i, len, ndesc;
587*ccdc3305SLuigi Rizzo 	u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
588*ccdc3305SLuigi Rizzo 	u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
589*ccdc3305SLuigi Rizzo 	struct netmap_kring *kring;
590*ccdc3305SLuigi Rizzo 
591*ccdc3305SLuigi Rizzo 	NMA_LOCK();
592*ccdc3305SLuigi Rizzo 	/*
593*ccdc3305SLuigi Rizzo 	 * the descriptor is followed inline by an array of offsets
594*ccdc3305SLuigi Rizzo 	 * to the tx and rx rings in the shared memory region.
595*ccdc3305SLuigi Rizzo 	 */
596*ccdc3305SLuigi Rizzo 	len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
597*ccdc3305SLuigi Rizzo 	nifp = netmap_if_malloc(len);
598*ccdc3305SLuigi Rizzo 	if (nifp == NULL) {
599*ccdc3305SLuigi Rizzo 		NMA_UNLOCK();
600*ccdc3305SLuigi Rizzo 		return NULL;
601*ccdc3305SLuigi Rizzo 	}
602*ccdc3305SLuigi Rizzo 
603*ccdc3305SLuigi Rizzo 	/* initialize base fields -- override const */
604*ccdc3305SLuigi Rizzo 	*(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
605*ccdc3305SLuigi Rizzo 	*(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
606*ccdc3305SLuigi Rizzo 	strncpy(nifp->ni_name, ifname, IFNAMSIZ);
607*ccdc3305SLuigi Rizzo 
608*ccdc3305SLuigi Rizzo 	(na->refcount)++;	/* XXX atomic ? we are under lock */
609*ccdc3305SLuigi Rizzo 	if (na->refcount > 1) { /* already setup, we are done */
610*ccdc3305SLuigi Rizzo 		NMA_UNLOCK();
611*ccdc3305SLuigi Rizzo 		goto final;
612*ccdc3305SLuigi Rizzo 	}
613*ccdc3305SLuigi Rizzo 
614*ccdc3305SLuigi Rizzo 	/*
615*ccdc3305SLuigi Rizzo 	 * First instance, allocate netmap rings and buffers for this card
616*ccdc3305SLuigi Rizzo 	 * The rings are contiguous, but have variable size.
617*ccdc3305SLuigi Rizzo 	 */
618*ccdc3305SLuigi Rizzo 	for (i = 0; i < ntx; i++) { /* Transmit rings */
619*ccdc3305SLuigi Rizzo 		kring = &na->tx_rings[i];
620*ccdc3305SLuigi Rizzo 		ndesc = na->num_tx_desc;
621*ccdc3305SLuigi Rizzo 		bzero(kring, sizeof(*kring));
622*ccdc3305SLuigi Rizzo 		len = sizeof(struct netmap_ring) +
623*ccdc3305SLuigi Rizzo 			  ndesc * sizeof(struct netmap_slot);
624*ccdc3305SLuigi Rizzo 		ring = netmap_ring_malloc(len);
625*ccdc3305SLuigi Rizzo 		if (ring == NULL) {
626*ccdc3305SLuigi Rizzo 			D("Cannot allocate tx_ring[%d] for %s", i, ifname);
627*ccdc3305SLuigi Rizzo 			goto cleanup;
628*ccdc3305SLuigi Rizzo 		}
629*ccdc3305SLuigi Rizzo 		ND("txring[%d] at %p ofs %d", i, ring);
630*ccdc3305SLuigi Rizzo 		kring->na = na;
631*ccdc3305SLuigi Rizzo 		kring->ring = ring;
632*ccdc3305SLuigi Rizzo 		*(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
633*ccdc3305SLuigi Rizzo 		*(ssize_t *)(uintptr_t)&ring->buf_ofs =
634*ccdc3305SLuigi Rizzo 		    (nm_mem->nm_if_pool->_memtotal +
635*ccdc3305SLuigi Rizzo 			nm_mem->nm_ring_pool->_memtotal) -
636*ccdc3305SLuigi Rizzo 			netmap_ring_offset(ring);
637*ccdc3305SLuigi Rizzo 
638*ccdc3305SLuigi Rizzo 		/*
639*ccdc3305SLuigi Rizzo 		 * IMPORTANT:
640*ccdc3305SLuigi Rizzo 		 * Always keep one slot empty, so we can detect new
641*ccdc3305SLuigi Rizzo 		 * transmissions comparing cur and nr_hwcur (they are
642*ccdc3305SLuigi Rizzo 		 * the same only if there are no new transmissions).
643*ccdc3305SLuigi Rizzo 		 */
644*ccdc3305SLuigi Rizzo 		ring->avail = kring->nr_hwavail = ndesc - 1;
645*ccdc3305SLuigi Rizzo 		ring->cur = kring->nr_hwcur = 0;
646*ccdc3305SLuigi Rizzo 		*(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
647*ccdc3305SLuigi Rizzo 		ND("initializing slots for txring[%d]", i);
648*ccdc3305SLuigi Rizzo 		netmap_new_bufs(nifp, ring->slot, ndesc);
649*ccdc3305SLuigi Rizzo 	}
650*ccdc3305SLuigi Rizzo 
651*ccdc3305SLuigi Rizzo 	for (i = 0; i < nrx; i++) { /* Receive rings */
652*ccdc3305SLuigi Rizzo 		kring = &na->rx_rings[i];
653*ccdc3305SLuigi Rizzo 		ndesc = na->num_rx_desc;
654*ccdc3305SLuigi Rizzo 		bzero(kring, sizeof(*kring));
655*ccdc3305SLuigi Rizzo 		len = sizeof(struct netmap_ring) +
656*ccdc3305SLuigi Rizzo 			  ndesc * sizeof(struct netmap_slot);
657*ccdc3305SLuigi Rizzo 		ring = netmap_ring_malloc(len);
658*ccdc3305SLuigi Rizzo 		if (ring == NULL) {
659*ccdc3305SLuigi Rizzo 			D("Cannot allocate rx_ring[%d] for %s", i, ifname);
660*ccdc3305SLuigi Rizzo 			goto cleanup;
661*ccdc3305SLuigi Rizzo 		}
662*ccdc3305SLuigi Rizzo 		ND("rxring[%d] at %p ofs %d", i, ring);
663*ccdc3305SLuigi Rizzo 
664*ccdc3305SLuigi Rizzo 		kring->na = na;
665*ccdc3305SLuigi Rizzo 		kring->ring = ring;
666*ccdc3305SLuigi Rizzo 		*(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
667*ccdc3305SLuigi Rizzo 		*(ssize_t *)(uintptr_t)&ring->buf_ofs =
668*ccdc3305SLuigi Rizzo 		    (nm_mem->nm_if_pool->_memtotal +
669*ccdc3305SLuigi Rizzo 		        nm_mem->nm_ring_pool->_memtotal) -
670*ccdc3305SLuigi Rizzo 			netmap_ring_offset(ring);
671*ccdc3305SLuigi Rizzo 
672*ccdc3305SLuigi Rizzo 		ring->cur = kring->nr_hwcur = 0;
673*ccdc3305SLuigi Rizzo 		ring->avail = kring->nr_hwavail = 0; /* empty */
674*ccdc3305SLuigi Rizzo 		*(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
675*ccdc3305SLuigi Rizzo 		ND("initializing slots for rxring[%d]", i);
676*ccdc3305SLuigi Rizzo 		netmap_new_bufs(nifp, ring->slot, ndesc);
677*ccdc3305SLuigi Rizzo 	}
678*ccdc3305SLuigi Rizzo 	NMA_UNLOCK();
679*ccdc3305SLuigi Rizzo #ifdef linux
680*ccdc3305SLuigi Rizzo 	// XXX initialize the selrecord structs.
681*ccdc3305SLuigi Rizzo 	for (i = 0; i < ntx; i++)
682*ccdc3305SLuigi Rizzo 		init_waitqueue_head(&na->rx_rings[i].si);
683*ccdc3305SLuigi Rizzo 	for (i = 0; i < nrx; i++)
684*ccdc3305SLuigi Rizzo 		init_waitqueue_head(&na->tx_rings[i].si);
685*ccdc3305SLuigi Rizzo 	init_waitqueue_head(&na->rx_si);
686*ccdc3305SLuigi Rizzo 	init_waitqueue_head(&na->tx_si);
687*ccdc3305SLuigi Rizzo #endif
688*ccdc3305SLuigi Rizzo final:
689*ccdc3305SLuigi Rizzo 	/*
690*ccdc3305SLuigi Rizzo 	 * fill the slots for the rx and tx rings. They contain the offset
691*ccdc3305SLuigi Rizzo 	 * between the ring and nifp, so the information is usable in
692*ccdc3305SLuigi Rizzo 	 * userspace to reach the ring from the nifp.
693*ccdc3305SLuigi Rizzo 	 */
694*ccdc3305SLuigi Rizzo 	base = netmap_if_offset(nifp);
695*ccdc3305SLuigi Rizzo 	for (i = 0; i < ntx; i++) {
696*ccdc3305SLuigi Rizzo 		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
697*ccdc3305SLuigi Rizzo 			netmap_ring_offset(na->tx_rings[i].ring) - base;
698*ccdc3305SLuigi Rizzo 	}
699*ccdc3305SLuigi Rizzo 	for (i = 0; i < nrx; i++) {
700*ccdc3305SLuigi Rizzo 		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
701*ccdc3305SLuigi Rizzo 			netmap_ring_offset(na->rx_rings[i].ring) - base;
702*ccdc3305SLuigi Rizzo 	}
703*ccdc3305SLuigi Rizzo 	return (nifp);
704*ccdc3305SLuigi Rizzo cleanup:
705*ccdc3305SLuigi Rizzo 	// XXX missing
706*ccdc3305SLuigi Rizzo 	NMA_UNLOCK();
707*ccdc3305SLuigi Rizzo 	return NULL;
708*ccdc3305SLuigi Rizzo }
709*ccdc3305SLuigi Rizzo 
710*ccdc3305SLuigi Rizzo static void
711*ccdc3305SLuigi Rizzo netmap_free_rings(struct netmap_adapter *na)
712*ccdc3305SLuigi Rizzo {
713*ccdc3305SLuigi Rizzo 	int i;
714*ccdc3305SLuigi Rizzo 	for (i = 0; i < na->num_tx_rings + 1; i++)
715*ccdc3305SLuigi Rizzo 		netmap_obj_free_va(nm_mem->nm_ring_pool,
716*ccdc3305SLuigi Rizzo 			na->tx_rings[i].ring);
717*ccdc3305SLuigi Rizzo 	for (i = 0; i < na->num_rx_rings + 1; i++)
718*ccdc3305SLuigi Rizzo 		netmap_obj_free_va(nm_mem->nm_ring_pool,
719*ccdc3305SLuigi Rizzo 			na->rx_rings[i].ring);
720*ccdc3305SLuigi Rizzo }
721