1f9790aebSLuigi Rizzo /*
237e3a6d3SLuigi Rizzo  * Copyright (C) 2013-2016 Vincenzo Maffione
337e3a6d3SLuigi Rizzo  * Copyright (C) 2013-2016 Luigi Rizzo
437e3a6d3SLuigi Rizzo  * All rights reserved.
5f9790aebSLuigi Rizzo  *
6f9790aebSLuigi Rizzo  * Redistribution and use in source and binary forms, with or without
7f9790aebSLuigi Rizzo  * modification, are permitted provided that the following conditions
8f9790aebSLuigi Rizzo  * are met:
9f9790aebSLuigi Rizzo  *   1. Redistributions of source code must retain the above copyright
10f9790aebSLuigi Rizzo  *      notice, this list of conditions and the following disclaimer.
11f9790aebSLuigi Rizzo  *   2. Redistributions in binary form must reproduce the above copyright
12f9790aebSLuigi Rizzo  *      notice, this list of conditions and the following disclaimer in the
13f9790aebSLuigi Rizzo  *      documentation and/or other materials provided with the distribution.
14f9790aebSLuigi Rizzo  *
15f9790aebSLuigi Rizzo  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16f9790aebSLuigi Rizzo  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17f9790aebSLuigi Rizzo  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18f9790aebSLuigi Rizzo  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19f9790aebSLuigi Rizzo  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20f9790aebSLuigi Rizzo  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21f9790aebSLuigi Rizzo  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22f9790aebSLuigi Rizzo  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23f9790aebSLuigi Rizzo  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24f9790aebSLuigi Rizzo  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25f9790aebSLuigi Rizzo  * SUCH DAMAGE.
26f9790aebSLuigi Rizzo  */
27f9790aebSLuigi Rizzo 
28f9790aebSLuigi Rizzo /*
29f9790aebSLuigi Rizzo  * This module implements netmap support on top of standard,
30f9790aebSLuigi Rizzo  * unmodified device drivers.
31f9790aebSLuigi Rizzo  *
32f9790aebSLuigi Rizzo  * A NIOCREGIF request is handled here if the device does not
33f9790aebSLuigi Rizzo  * have native support. TX and RX rings are emulated as follows:
34f9790aebSLuigi Rizzo  *
35f9790aebSLuigi Rizzo  * NIOCREGIF
36f9790aebSLuigi Rizzo  *	We preallocate a block of TX mbufs (roughly as many as
37f9790aebSLuigi Rizzo  *	tx descriptors; the number is not critical) to speed up
38f9790aebSLuigi Rizzo  *	operation during transmissions. The refcount on most of
39f9790aebSLuigi Rizzo  *	these buffers is artificially bumped up so we can recycle
40f9790aebSLuigi Rizzo  *	them more easily. Also, the destructor is intercepted
41f9790aebSLuigi Rizzo  *	so we use it as an interrupt notification to wake up
42f9790aebSLuigi Rizzo  *	processes blocked on a poll().
43f9790aebSLuigi Rizzo  *
44f9790aebSLuigi Rizzo  *	For each receive ring we allocate one "struct mbq"
45f9790aebSLuigi Rizzo  *	(an mbuf tailq plus a spinlock). We intercept packets
46f9790aebSLuigi Rizzo  *	(through if_input)
47f9790aebSLuigi Rizzo  *	on the receive path and put them in the mbq from which
48f9790aebSLuigi Rizzo  *	netmap receive routines can grab them.
49f9790aebSLuigi Rizzo  *
50f9790aebSLuigi Rizzo  * TX:
51f9790aebSLuigi Rizzo  *	in the generic_txsync() routine, netmap buffers are copied
52f9790aebSLuigi Rizzo  *	(or linked, in a future) to the preallocated mbufs
53f9790aebSLuigi Rizzo  *	and pushed to the transmit queue. Some of these mbufs
54f9790aebSLuigi Rizzo  *	(those with NS_REPORT, or otherwise every half ring)
55f9790aebSLuigi Rizzo  *	have the refcount=1, others have refcount=2.
56f9790aebSLuigi Rizzo  *	When the destructor is invoked, we take that as
57f9790aebSLuigi Rizzo  *	a notification that all mbufs up to that one in
58f9790aebSLuigi Rizzo  *	the specific ring have been completed, and generate
59f9790aebSLuigi Rizzo  *	the equivalent of a transmit interrupt.
60f9790aebSLuigi Rizzo  *
61f9790aebSLuigi Rizzo  * RX:
62f9790aebSLuigi Rizzo  *
63f9790aebSLuigi Rizzo  */
64f9790aebSLuigi Rizzo 
65f9790aebSLuigi Rizzo #ifdef __FreeBSD__
66f9790aebSLuigi Rizzo 
67f9790aebSLuigi Rizzo #include <sys/cdefs.h> /* prerequisite */
68f9790aebSLuigi Rizzo __FBSDID("$FreeBSD$");
69f9790aebSLuigi Rizzo 
70f9790aebSLuigi Rizzo #include <sys/types.h>
71f9790aebSLuigi Rizzo #include <sys/errno.h>
72f9790aebSLuigi Rizzo #include <sys/malloc.h>
73f9790aebSLuigi Rizzo #include <sys/lock.h>   /* PROT_EXEC */
74f9790aebSLuigi Rizzo #include <sys/rwlock.h>
75f9790aebSLuigi Rizzo #include <sys/socket.h> /* sockaddrs */
76f9790aebSLuigi Rizzo #include <sys/selinfo.h>
77f9790aebSLuigi Rizzo #include <net/if.h>
78f9790aebSLuigi Rizzo #include <net/if_var.h>
79f9790aebSLuigi Rizzo #include <machine/bus.h>        /* bus_dmamap_* in netmap_kern.h */
80f9790aebSLuigi Rizzo 
81f9790aebSLuigi Rizzo // XXX temporary - D() defined here
82f9790aebSLuigi Rizzo #include <net/netmap.h>
83f9790aebSLuigi Rizzo #include <dev/netmap/netmap_kern.h>
84f9790aebSLuigi Rizzo #include <dev/netmap/netmap_mem2.h>
85f9790aebSLuigi Rizzo 
86e4166283SLuigi Rizzo #define rtnl_lock()	ND("rtnl_lock called")
87e4166283SLuigi Rizzo #define rtnl_unlock()	ND("rtnl_unlock called")
88f0ea3689SLuigi Rizzo #define MBUF_RXQ(m)	((m)->m_pkthdr.flowid)
89f9790aebSLuigi Rizzo #define smp_mb()
90f9790aebSLuigi Rizzo 
91f9790aebSLuigi Rizzo /*
925899a007SLuigi Rizzo  * FreeBSD mbuf allocator/deallocator in emulation mode:
9337e3a6d3SLuigi Rizzo  */
9437e3a6d3SLuigi Rizzo #if __FreeBSD_version < 1100000
9537e3a6d3SLuigi Rizzo 
9637e3a6d3SLuigi Rizzo /*
9737e3a6d3SLuigi Rizzo  * For older versions of FreeBSD:
985899a007SLuigi Rizzo  *
995899a007SLuigi Rizzo  * We allocate EXT_PACKET mbuf+clusters, but need to set M_NOFREE
1005899a007SLuigi Rizzo  * so that the destructor, if invoked, will not free the packet.
1015899a007SLuigi Rizzo  * In principle we should set the destructor only on demand,
1025899a007SLuigi Rizzo  * but since there might be a race we better do it on allocation.
1035899a007SLuigi Rizzo  * As a consequence, we also need to set the destructor or we
1045899a007SLuigi Rizzo  * would leak buffers.
105f9790aebSLuigi Rizzo  */
106f9790aebSLuigi Rizzo 
1074bf50f18SLuigi Rizzo /* mbuf destructor, also need to change the type to EXT_EXTREF,
108f9790aebSLuigi Rizzo  * add an M_NOFREE flag, and then clear the flag and
109f9790aebSLuigi Rizzo  * chain into uma_zfree(zone_pack, mf)
110f9790aebSLuigi Rizzo  * (or reinstall the buffer ?)
111f9790aebSLuigi Rizzo  */
112*c3e9b4dbSLuiz Otavio O Souza #define SET_MBUF_DESTRUCTOR(m, fn)	do {		\
113*c3e9b4dbSLuiz Otavio O Souza 	(m)->m_ext.ext_free = (void *)fn;	\
114*c3e9b4dbSLuiz Otavio O Souza 	(m)->m_ext.ext_type = EXT_EXTREF;	\
115*c3e9b4dbSLuiz Otavio O Souza } while (0)
116f9790aebSLuigi Rizzo 
11737e3a6d3SLuigi Rizzo static int
11837e3a6d3SLuigi Rizzo void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2)
119e4166283SLuigi Rizzo {
1204bf50f18SLuigi Rizzo 	/* restore original mbuf */
1214bf50f18SLuigi Rizzo 	m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1;
1224bf50f18SLuigi Rizzo 	m->m_ext.ext_arg1 = NULL;
123e4166283SLuigi Rizzo 	m->m_ext.ext_type = EXT_PACKET;
124e4166283SLuigi Rizzo 	m->m_ext.ext_free = NULL;
12537e3a6d3SLuigi Rizzo 	if (MBUF_REFCNT(m) == 0)
1264bf50f18SLuigi Rizzo 		SET_MBUF_REFCNT(m, 1);
127e4166283SLuigi Rizzo 	uma_zfree(zone_pack, m);
12837e3a6d3SLuigi Rizzo 
12937e3a6d3SLuigi Rizzo 	return 0;
130e4166283SLuigi Rizzo }
131e4166283SLuigi Rizzo 
132e4166283SLuigi Rizzo static inline struct mbuf *
13337e3a6d3SLuigi Rizzo nm_os_get_mbuf(struct ifnet *ifp, int len)
134e4166283SLuigi Rizzo {
135e4166283SLuigi Rizzo 	struct mbuf *m;
13637e3a6d3SLuigi Rizzo 
13737e3a6d3SLuigi Rizzo 	(void)ifp;
138fddd4f62SNavdeep Parhar 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
139e4166283SLuigi Rizzo 	if (m) {
14037e3a6d3SLuigi Rizzo 		/* m_getcl() (mb_ctor_mbuf) has an assert that checks that
14137e3a6d3SLuigi Rizzo 		 * M_NOFREE flag is not specified as third argument,
14237e3a6d3SLuigi Rizzo 		 * so we have to set M_NOFREE after m_getcl(). */
14337e3a6d3SLuigi Rizzo 		m->m_flags |= M_NOFREE;
1444bf50f18SLuigi Rizzo 		m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save
14537e3a6d3SLuigi Rizzo 		m->m_ext.ext_free = (void *)void_mbuf_dtor;
146e4166283SLuigi Rizzo 		m->m_ext.ext_type = EXT_EXTREF;
14737e3a6d3SLuigi Rizzo 		ND(5, "create m %p refcnt %d", m, MBUF_REFCNT(m));
148e4166283SLuigi Rizzo 	}
149e4166283SLuigi Rizzo 	return m;
150e4166283SLuigi Rizzo }
151f9790aebSLuigi Rizzo 
15237e3a6d3SLuigi Rizzo #else /* __FreeBSD_version >= 1100000 */
153f9790aebSLuigi Rizzo 
15437e3a6d3SLuigi Rizzo /*
15537e3a6d3SLuigi Rizzo  * Newer versions of FreeBSD, using a straightforward scheme.
15637e3a6d3SLuigi Rizzo  *
15737e3a6d3SLuigi Rizzo  * We allocate mbufs with m_gethdr(), since the mbuf header is needed
15837e3a6d3SLuigi Rizzo  * by the driver. We also attach a customly-provided external storage,
15937e3a6d3SLuigi Rizzo  * which in this case is a netmap buffer. When calling m_extadd(), however
16037e3a6d3SLuigi Rizzo  * we pass a NULL address, since the real address (and length) will be
16137e3a6d3SLuigi Rizzo  * filled in by nm_os_generic_xmit_frame() right before calling
16237e3a6d3SLuigi Rizzo  * if_transmit().
16337e3a6d3SLuigi Rizzo  *
16437e3a6d3SLuigi Rizzo  * The dtor function does nothing, however we need it since mb_free_ext()
16537e3a6d3SLuigi Rizzo  * has a KASSERT(), checking that the mbuf dtor function is not NULL.
16637e3a6d3SLuigi Rizzo  */
16737e3a6d3SLuigi Rizzo 
16837e3a6d3SLuigi Rizzo static void void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { }
16937e3a6d3SLuigi Rizzo 
170*c3e9b4dbSLuiz Otavio O Souza #define SET_MBUF_DESTRUCTOR(m, fn)	do {		\
171*c3e9b4dbSLuiz Otavio O Souza 	(m)->m_ext.ext_free = (fn != NULL) ?		\
172*c3e9b4dbSLuiz Otavio O Souza 	    (void *)fn : (void *)void_mbuf_dtor;	\
173*c3e9b4dbSLuiz Otavio O Souza } while (0)
17425a33410SSean Bruno 
17537e3a6d3SLuigi Rizzo static inline struct mbuf *
17637e3a6d3SLuigi Rizzo nm_os_get_mbuf(struct ifnet *ifp, int len)
17737e3a6d3SLuigi Rizzo {
17837e3a6d3SLuigi Rizzo 	struct mbuf *m;
17937e3a6d3SLuigi Rizzo 
18037e3a6d3SLuigi Rizzo 	(void)ifp;
18137e3a6d3SLuigi Rizzo 	(void)len;
18237e3a6d3SLuigi Rizzo 
18337e3a6d3SLuigi Rizzo 	m = m_gethdr(M_NOWAIT, MT_DATA);
18437e3a6d3SLuigi Rizzo 	if (m == NULL) {
18537e3a6d3SLuigi Rizzo 		return m;
18637e3a6d3SLuigi Rizzo 	}
18737e3a6d3SLuigi Rizzo 
18837e3a6d3SLuigi Rizzo 	m_extadd(m, NULL /* buf */, 0 /* size */, void_mbuf_dtor,
18937e3a6d3SLuigi Rizzo 		 NULL, NULL, 0, EXT_NET_DRV);
19037e3a6d3SLuigi Rizzo 
19137e3a6d3SLuigi Rizzo 	return m;
19237e3a6d3SLuigi Rizzo }
19337e3a6d3SLuigi Rizzo 
19437e3a6d3SLuigi Rizzo #endif /* __FreeBSD_version >= 1100000 */
19537e3a6d3SLuigi Rizzo 
19637e3a6d3SLuigi Rizzo #elif defined _WIN32
19737e3a6d3SLuigi Rizzo 
19837e3a6d3SLuigi Rizzo #include "win_glue.h"
19937e3a6d3SLuigi Rizzo 
20037e3a6d3SLuigi Rizzo #define rtnl_lock()	ND("rtnl_lock called")
20137e3a6d3SLuigi Rizzo #define rtnl_unlock()	ND("rtnl_unlock called")
20237e3a6d3SLuigi Rizzo #define MBUF_TXQ(m) 	0//((m)->m_pkthdr.flowid)
20337e3a6d3SLuigi Rizzo #define MBUF_RXQ(m)	    0//((m)->m_pkthdr.flowid)
20437e3a6d3SLuigi Rizzo #define smp_mb()		//XXX: to be correctly defined
205f9790aebSLuigi Rizzo 
206f9790aebSLuigi Rizzo #else /* linux */
207f9790aebSLuigi Rizzo 
208f9790aebSLuigi Rizzo #include "bsd_glue.h"
209f9790aebSLuigi Rizzo 
210f9790aebSLuigi Rizzo #include <linux/rtnetlink.h>    /* rtnl_[un]lock() */
211f9790aebSLuigi Rizzo #include <linux/ethtool.h>      /* struct ethtool_ops, get_ringparam */
212f9790aebSLuigi Rizzo #include <linux/hrtimer.h>
213f9790aebSLuigi Rizzo 
21437e3a6d3SLuigi Rizzo static inline struct mbuf *
21537e3a6d3SLuigi Rizzo nm_os_get_mbuf(struct ifnet *ifp, int len)
21637e3a6d3SLuigi Rizzo {
21737e3a6d3SLuigi Rizzo 	return alloc_skb(ifp->needed_headroom + len +
21837e3a6d3SLuigi Rizzo 			 ifp->needed_tailroom, GFP_ATOMIC);
21937e3a6d3SLuigi Rizzo }
220f9790aebSLuigi Rizzo 
221f9790aebSLuigi Rizzo #endif /* linux */
222f9790aebSLuigi Rizzo 
223f9790aebSLuigi Rizzo 
224f9790aebSLuigi Rizzo /* Common headers. */
225f9790aebSLuigi Rizzo #include <net/netmap.h>
226f9790aebSLuigi Rizzo #include <dev/netmap/netmap_kern.h>
227f9790aebSLuigi Rizzo #include <dev/netmap/netmap_mem2.h>
228f9790aebSLuigi Rizzo 
229f9790aebSLuigi Rizzo 
23037e3a6d3SLuigi Rizzo #define for_each_kring_n(_i, _k, _karr, _n) \
23137e3a6d3SLuigi Rizzo 	for (_k=_karr, _i = 0; _i < _n; (_k)++, (_i)++)
232f9790aebSLuigi Rizzo 
23337e3a6d3SLuigi Rizzo #define for_each_tx_kring(_i, _k, _na) \
23437e3a6d3SLuigi Rizzo             for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings)
23537e3a6d3SLuigi Rizzo #define for_each_tx_kring_h(_i, _k, _na) \
23637e3a6d3SLuigi Rizzo             for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings + 1)
23737e3a6d3SLuigi Rizzo 
23837e3a6d3SLuigi Rizzo #define for_each_rx_kring(_i, _k, _na) \
23937e3a6d3SLuigi Rizzo             for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings)
24037e3a6d3SLuigi Rizzo #define for_each_rx_kring_h(_i, _k, _na) \
24137e3a6d3SLuigi Rizzo             for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings + 1)
24237e3a6d3SLuigi Rizzo 
24337e3a6d3SLuigi Rizzo 
24437e3a6d3SLuigi Rizzo /* ======================== PERFORMANCE STATISTICS =========================== */
245f9790aebSLuigi Rizzo 
2464bf50f18SLuigi Rizzo #ifdef RATE_GENERIC
247f9790aebSLuigi Rizzo #define IFRATE(x) x
248f9790aebSLuigi Rizzo struct rate_stats {
249f9790aebSLuigi Rizzo 	unsigned long txpkt;
250f9790aebSLuigi Rizzo 	unsigned long txsync;
251f9790aebSLuigi Rizzo 	unsigned long txirq;
25237e3a6d3SLuigi Rizzo 	unsigned long txrepl;
25337e3a6d3SLuigi Rizzo 	unsigned long txdrop;
254f9790aebSLuigi Rizzo 	unsigned long rxpkt;
255f9790aebSLuigi Rizzo 	unsigned long rxirq;
256f9790aebSLuigi Rizzo 	unsigned long rxsync;
257f9790aebSLuigi Rizzo };
258f9790aebSLuigi Rizzo 
259f9790aebSLuigi Rizzo struct rate_context {
260f9790aebSLuigi Rizzo 	unsigned refcount;
261f9790aebSLuigi Rizzo 	struct timer_list timer;
262f9790aebSLuigi Rizzo 	struct rate_stats new;
263f9790aebSLuigi Rizzo 	struct rate_stats old;
264f9790aebSLuigi Rizzo };
265f9790aebSLuigi Rizzo 
266f9790aebSLuigi Rizzo #define RATE_PRINTK(_NAME_) \
267f9790aebSLuigi Rizzo 	printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD);
268f9790aebSLuigi Rizzo #define RATE_PERIOD  2
269f9790aebSLuigi Rizzo static void rate_callback(unsigned long arg)
270f9790aebSLuigi Rizzo {
271f9790aebSLuigi Rizzo 	struct rate_context * ctx = (struct rate_context *)arg;
272f9790aebSLuigi Rizzo 	struct rate_stats cur = ctx->new;
273f9790aebSLuigi Rizzo 	int r;
274f9790aebSLuigi Rizzo 
275f9790aebSLuigi Rizzo 	RATE_PRINTK(txpkt);
276f9790aebSLuigi Rizzo 	RATE_PRINTK(txsync);
277f9790aebSLuigi Rizzo 	RATE_PRINTK(txirq);
27837e3a6d3SLuigi Rizzo 	RATE_PRINTK(txrepl);
27937e3a6d3SLuigi Rizzo 	RATE_PRINTK(txdrop);
280f9790aebSLuigi Rizzo 	RATE_PRINTK(rxpkt);
281f9790aebSLuigi Rizzo 	RATE_PRINTK(rxsync);
282f9790aebSLuigi Rizzo 	RATE_PRINTK(rxirq);
283f9790aebSLuigi Rizzo 	printk("\n");
284f9790aebSLuigi Rizzo 
285f9790aebSLuigi Rizzo 	ctx->old = cur;
286f9790aebSLuigi Rizzo 	r = mod_timer(&ctx->timer, jiffies +
287f9790aebSLuigi Rizzo 			msecs_to_jiffies(RATE_PERIOD * 1000));
288f9790aebSLuigi Rizzo 	if (unlikely(r))
289f9790aebSLuigi Rizzo 		D("[v1000] Error: mod_timer()");
290f9790aebSLuigi Rizzo }
291f9790aebSLuigi Rizzo 
292f9790aebSLuigi Rizzo static struct rate_context rate_ctx;
293f9790aebSLuigi Rizzo 
2944bf50f18SLuigi Rizzo void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi)
2954bf50f18SLuigi Rizzo {
2964bf50f18SLuigi Rizzo     if (txp) rate_ctx.new.txpkt++;
2974bf50f18SLuigi Rizzo     if (txs) rate_ctx.new.txsync++;
2984bf50f18SLuigi Rizzo     if (txi) rate_ctx.new.txirq++;
2994bf50f18SLuigi Rizzo     if (rxp) rate_ctx.new.rxpkt++;
3004bf50f18SLuigi Rizzo     if (rxs) rate_ctx.new.rxsync++;
3014bf50f18SLuigi Rizzo     if (rxi) rate_ctx.new.rxirq++;
3024bf50f18SLuigi Rizzo }
3034bf50f18SLuigi Rizzo 
304f9790aebSLuigi Rizzo #else /* !RATE */
305f9790aebSLuigi Rizzo #define IFRATE(x)
306f9790aebSLuigi Rizzo #endif /* !RATE */
307f9790aebSLuigi Rizzo 
308f9790aebSLuigi Rizzo 
309*c3e9b4dbSLuiz Otavio O Souza /* ========== GENERIC (EMULATED) NETMAP ADAPTER SUPPORT ============= */
310f9790aebSLuigi Rizzo 
311f9790aebSLuigi Rizzo /*
312f9790aebSLuigi Rizzo  * Wrapper used by the generic adapter layer to notify
313f9790aebSLuigi Rizzo  * the poller threads. Differently from netmap_rx_irq(), we check
3144bf50f18SLuigi Rizzo  * only NAF_NETMAP_ON instead of NAF_NATIVE_ON to enable the irq.
315f9790aebSLuigi Rizzo  */
31637e3a6d3SLuigi Rizzo void
31737e3a6d3SLuigi Rizzo netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
318f9790aebSLuigi Rizzo {
3194bf50f18SLuigi Rizzo 	if (unlikely(!nm_netmap_on(na)))
320f9790aebSLuigi Rizzo 		return;
321f9790aebSLuigi Rizzo 
32237e3a6d3SLuigi Rizzo 	netmap_common_irq(na, q, work_done);
32337e3a6d3SLuigi Rizzo #ifdef RATE_GENERIC
32437e3a6d3SLuigi Rizzo 	if (work_done)
32537e3a6d3SLuigi Rizzo 		rate_ctx.new.rxirq++;
32637e3a6d3SLuigi Rizzo 	else
32737e3a6d3SLuigi Rizzo 		rate_ctx.new.txirq++;
32837e3a6d3SLuigi Rizzo #endif  /* RATE_GENERIC */
329f9790aebSLuigi Rizzo }
330f9790aebSLuigi Rizzo 
33137e3a6d3SLuigi Rizzo static int
33237e3a6d3SLuigi Rizzo generic_netmap_unregister(struct netmap_adapter *na)
33337e3a6d3SLuigi Rizzo {
33437e3a6d3SLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
33537e3a6d3SLuigi Rizzo 	struct netmap_kring *kring = NULL;
33637e3a6d3SLuigi Rizzo 	int i, r;
33737e3a6d3SLuigi Rizzo 
33837e3a6d3SLuigi Rizzo 	if (na->active_fds == 0) {
33937e3a6d3SLuigi Rizzo 		rtnl_lock();
34037e3a6d3SLuigi Rizzo 
34137e3a6d3SLuigi Rizzo 		na->na_flags &= ~NAF_NETMAP_ON;
34237e3a6d3SLuigi Rizzo 
34337e3a6d3SLuigi Rizzo 		/* Release packet steering control. */
34437e3a6d3SLuigi Rizzo 		nm_os_catch_tx(gna, 0);
34537e3a6d3SLuigi Rizzo 
34637e3a6d3SLuigi Rizzo 		/* Stop intercepting packets on the RX path. */
34737e3a6d3SLuigi Rizzo 		nm_os_catch_rx(gna, 0);
34837e3a6d3SLuigi Rizzo 
34937e3a6d3SLuigi Rizzo 		rtnl_unlock();
35037e3a6d3SLuigi Rizzo 	}
35137e3a6d3SLuigi Rizzo 
35237e3a6d3SLuigi Rizzo 	for_each_rx_kring_h(r, kring, na) {
35337e3a6d3SLuigi Rizzo 		if (nm_kring_pending_off(kring)) {
354*c3e9b4dbSLuiz Otavio O Souza 			D("Emulated adapter: ring '%s' deactivated", kring->name);
35537e3a6d3SLuigi Rizzo 			kring->nr_mode = NKR_NETMAP_OFF;
35637e3a6d3SLuigi Rizzo 		}
35737e3a6d3SLuigi Rizzo 	}
35837e3a6d3SLuigi Rizzo 	for_each_tx_kring_h(r, kring, na) {
35937e3a6d3SLuigi Rizzo 		if (nm_kring_pending_off(kring)) {
36037e3a6d3SLuigi Rizzo 			kring->nr_mode = NKR_NETMAP_OFF;
361*c3e9b4dbSLuiz Otavio O Souza 			D("Emulated adapter: ring '%s' deactivated", kring->name);
36237e3a6d3SLuigi Rizzo 		}
36337e3a6d3SLuigi Rizzo 	}
36437e3a6d3SLuigi Rizzo 
36537e3a6d3SLuigi Rizzo 	for_each_rx_kring(r, kring, na) {
36637e3a6d3SLuigi Rizzo 		/* Free the mbufs still pending in the RX queues,
36737e3a6d3SLuigi Rizzo 		 * that did not end up into the corresponding netmap
36837e3a6d3SLuigi Rizzo 		 * RX rings. */
36937e3a6d3SLuigi Rizzo 		mbq_safe_purge(&kring->rx_queue);
37037e3a6d3SLuigi Rizzo 		nm_os_mitigation_cleanup(&gna->mit[r]);
37137e3a6d3SLuigi Rizzo 	}
37237e3a6d3SLuigi Rizzo 
37337e3a6d3SLuigi Rizzo 	/* Decrement reference counter for the mbufs in the
37437e3a6d3SLuigi Rizzo 	 * TX pools. These mbufs can be still pending in drivers,
37537e3a6d3SLuigi Rizzo 	 * (e.g. this happens with virtio-net driver, which
37637e3a6d3SLuigi Rizzo 	 * does lazy reclaiming of transmitted mbufs). */
37737e3a6d3SLuigi Rizzo 	for_each_tx_kring(r, kring, na) {
37837e3a6d3SLuigi Rizzo 		/* We must remove the destructor on the TX event,
37937e3a6d3SLuigi Rizzo 		 * because the destructor invokes netmap code, and
38037e3a6d3SLuigi Rizzo 		 * the netmap module may disappear before the
38137e3a6d3SLuigi Rizzo 		 * TX event is consumed. */
38237e3a6d3SLuigi Rizzo 		mtx_lock_spin(&kring->tx_event_lock);
38337e3a6d3SLuigi Rizzo 		if (kring->tx_event) {
384*c3e9b4dbSLuiz Otavio O Souza 			SET_MBUF_DESTRUCTOR(kring->tx_event, NULL);
38537e3a6d3SLuigi Rizzo 		}
38637e3a6d3SLuigi Rizzo 		kring->tx_event = NULL;
38737e3a6d3SLuigi Rizzo 		mtx_unlock_spin(&kring->tx_event_lock);
38837e3a6d3SLuigi Rizzo 	}
38937e3a6d3SLuigi Rizzo 
39037e3a6d3SLuigi Rizzo 	if (na->active_fds == 0) {
391*c3e9b4dbSLuiz Otavio O Souza 		nm_os_free(gna->mit);
39237e3a6d3SLuigi Rizzo 
39337e3a6d3SLuigi Rizzo 		for_each_rx_kring(r, kring, na) {
39437e3a6d3SLuigi Rizzo 			mbq_safe_fini(&kring->rx_queue);
39537e3a6d3SLuigi Rizzo 		}
39637e3a6d3SLuigi Rizzo 
39737e3a6d3SLuigi Rizzo 		for_each_tx_kring(r, kring, na) {
39837e3a6d3SLuigi Rizzo 			mtx_destroy(&kring->tx_event_lock);
39937e3a6d3SLuigi Rizzo 			if (kring->tx_pool == NULL) {
40037e3a6d3SLuigi Rizzo 				continue;
40137e3a6d3SLuigi Rizzo 			}
40237e3a6d3SLuigi Rizzo 
40337e3a6d3SLuigi Rizzo 			for (i=0; i<na->num_tx_desc; i++) {
40437e3a6d3SLuigi Rizzo 				if (kring->tx_pool[i]) {
40537e3a6d3SLuigi Rizzo 					m_freem(kring->tx_pool[i]);
40637e3a6d3SLuigi Rizzo 				}
40737e3a6d3SLuigi Rizzo 			}
408*c3e9b4dbSLuiz Otavio O Souza 			nm_os_free(kring->tx_pool);
40937e3a6d3SLuigi Rizzo 			kring->tx_pool = NULL;
41037e3a6d3SLuigi Rizzo 		}
41137e3a6d3SLuigi Rizzo 
41237e3a6d3SLuigi Rizzo #ifdef RATE_GENERIC
41337e3a6d3SLuigi Rizzo 		if (--rate_ctx.refcount == 0) {
41437e3a6d3SLuigi Rizzo 			D("del_timer()");
41537e3a6d3SLuigi Rizzo 			del_timer(&rate_ctx.timer);
41637e3a6d3SLuigi Rizzo 		}
41737e3a6d3SLuigi Rizzo #endif
418*c3e9b4dbSLuiz Otavio O Souza 		D("Emulated adapter for %s deactivated", na->name);
41937e3a6d3SLuigi Rizzo 	}
42037e3a6d3SLuigi Rizzo 
42137e3a6d3SLuigi Rizzo 	return 0;
42237e3a6d3SLuigi Rizzo }
423f9790aebSLuigi Rizzo 
424f9790aebSLuigi Rizzo /* Enable/disable netmap mode for a generic network interface. */
42517885a7bSLuigi Rizzo static int
42617885a7bSLuigi Rizzo generic_netmap_register(struct netmap_adapter *na, int enable)
427f9790aebSLuigi Rizzo {
428f9790aebSLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
42937e3a6d3SLuigi Rizzo 	struct netmap_kring *kring = NULL;
430f9790aebSLuigi Rizzo 	int error;
431f9790aebSLuigi Rizzo 	int i, r;
432f9790aebSLuigi Rizzo 
43337e3a6d3SLuigi Rizzo 	if (!na) {
434f9790aebSLuigi Rizzo 		return EINVAL;
435f9790aebSLuigi Rizzo 	}
436f9790aebSLuigi Rizzo 
43737e3a6d3SLuigi Rizzo 	if (!enable) {
43837e3a6d3SLuigi Rizzo 		/* This is actually an unregif. */
43937e3a6d3SLuigi Rizzo 		return generic_netmap_unregister(na);
44037e3a6d3SLuigi Rizzo 	}
44137e3a6d3SLuigi Rizzo 
44237e3a6d3SLuigi Rizzo 	if (na->active_fds == 0) {
443*c3e9b4dbSLuiz Otavio O Souza 		D("Emulated adapter for %s activated", na->name);
44437e3a6d3SLuigi Rizzo 		/* Do all memory allocations when (na->active_fds == 0), to
44537e3a6d3SLuigi Rizzo 		 * simplify error management. */
44637e3a6d3SLuigi Rizzo 
44737e3a6d3SLuigi Rizzo 		/* Allocate memory for mitigation support on all the rx queues. */
448*c3e9b4dbSLuiz Otavio O Souza 		gna->mit = nm_os_malloc(na->num_rx_rings * sizeof(struct nm_generic_mit));
449f0ea3689SLuigi Rizzo 		if (!gna->mit) {
450f0ea3689SLuigi Rizzo 			D("mitigation allocation failed");
451f0ea3689SLuigi Rizzo 			error = ENOMEM;
452f0ea3689SLuigi Rizzo 			goto out;
453f0ea3689SLuigi Rizzo 		}
45437e3a6d3SLuigi Rizzo 
45537e3a6d3SLuigi Rizzo 		for_each_rx_kring(r, kring, na) {
45637e3a6d3SLuigi Rizzo 			/* Init mitigation support. */
45737e3a6d3SLuigi Rizzo 			nm_os_mitigation_init(&gna->mit[r], r, na);
458f0ea3689SLuigi Rizzo 
459f9790aebSLuigi Rizzo 			/* Initialize the rx queue, as generic_rx_handler() can
46037e3a6d3SLuigi Rizzo 			 * be called as soon as nm_os_catch_rx() returns.
461f9790aebSLuigi Rizzo 			 */
46237e3a6d3SLuigi Rizzo 			mbq_safe_init(&kring->rx_queue);
463f9790aebSLuigi Rizzo 		}
464f9790aebSLuigi Rizzo 
465f9790aebSLuigi Rizzo 		/*
46637e3a6d3SLuigi Rizzo 		 * Prepare mbuf pools (parallel to the tx rings), for packet
46737e3a6d3SLuigi Rizzo 		 * transmission. Don't preallocate the mbufs here, it's simpler
46837e3a6d3SLuigi Rizzo 		 * to leave this task to txsync.
469f9790aebSLuigi Rizzo 		 */
47037e3a6d3SLuigi Rizzo 		for_each_tx_kring(r, kring, na) {
47137e3a6d3SLuigi Rizzo 			kring->tx_pool = NULL;
47237e3a6d3SLuigi Rizzo 		}
47337e3a6d3SLuigi Rizzo 		for_each_tx_kring(r, kring, na) {
47437e3a6d3SLuigi Rizzo 			kring->tx_pool =
475*c3e9b4dbSLuiz Otavio O Souza 				nm_os_malloc(na->num_tx_desc * sizeof(struct mbuf *));
47637e3a6d3SLuigi Rizzo 			if (!kring->tx_pool) {
477f9790aebSLuigi Rizzo 				D("tx_pool allocation failed");
478f9790aebSLuigi Rizzo 				error = ENOMEM;
47917885a7bSLuigi Rizzo 				goto free_tx_pools;
480f9790aebSLuigi Rizzo 			}
48137e3a6d3SLuigi Rizzo 			mtx_init(&kring->tx_event_lock, "tx_event_lock",
48237e3a6d3SLuigi Rizzo 				 NULL, MTX_SPIN);
48337e3a6d3SLuigi Rizzo 		}
48437e3a6d3SLuigi Rizzo 	}
48537e3a6d3SLuigi Rizzo 
48637e3a6d3SLuigi Rizzo 	for_each_rx_kring_h(r, kring, na) {
48737e3a6d3SLuigi Rizzo 		if (nm_kring_pending_on(kring)) {
488*c3e9b4dbSLuiz Otavio O Souza 			D("Emulated adapter: ring '%s' activated", kring->name);
48937e3a6d3SLuigi Rizzo 			kring->nr_mode = NKR_NETMAP_ON;
49037e3a6d3SLuigi Rizzo 		}
49137e3a6d3SLuigi Rizzo 
49237e3a6d3SLuigi Rizzo 	}
49337e3a6d3SLuigi Rizzo 	for_each_tx_kring_h(r, kring, na) {
49437e3a6d3SLuigi Rizzo 		if (nm_kring_pending_on(kring)) {
495*c3e9b4dbSLuiz Otavio O Souza 			D("Emulated adapter: ring '%s' activated", kring->name);
49637e3a6d3SLuigi Rizzo 			kring->nr_mode = NKR_NETMAP_ON;
49737e3a6d3SLuigi Rizzo 		}
49837e3a6d3SLuigi Rizzo 	}
49937e3a6d3SLuigi Rizzo 
50037e3a6d3SLuigi Rizzo 	for_each_tx_kring(r, kring, na) {
50137e3a6d3SLuigi Rizzo 		/* Initialize tx_pool and tx_event. */
502f9790aebSLuigi Rizzo 		for (i=0; i<na->num_tx_desc; i++) {
50337e3a6d3SLuigi Rizzo 			kring->tx_pool[i] = NULL;
504f9790aebSLuigi Rizzo 		}
50537e3a6d3SLuigi Rizzo 
50637e3a6d3SLuigi Rizzo 		kring->tx_event = NULL;
507f9790aebSLuigi Rizzo 	}
50837e3a6d3SLuigi Rizzo 
50937e3a6d3SLuigi Rizzo 	if (na->active_fds == 0) {
510f9790aebSLuigi Rizzo 		rtnl_lock();
51137e3a6d3SLuigi Rizzo 
512f9790aebSLuigi Rizzo 		/* Prepare to intercept incoming traffic. */
51337e3a6d3SLuigi Rizzo 		error = nm_os_catch_rx(gna, 1);
514f9790aebSLuigi Rizzo 		if (error) {
51537e3a6d3SLuigi Rizzo 			D("nm_os_catch_rx(1) failed (%d)", error);
516f9790aebSLuigi Rizzo 			goto register_handler;
517f9790aebSLuigi Rizzo 		}
518f9790aebSLuigi Rizzo 
519f9790aebSLuigi Rizzo 		/* Make netmap control the packet steering. */
52037e3a6d3SLuigi Rizzo 		error = nm_os_catch_tx(gna, 1);
52137e3a6d3SLuigi Rizzo 		if (error) {
52237e3a6d3SLuigi Rizzo 			D("nm_os_catch_tx(1) failed (%d)", error);
52337e3a6d3SLuigi Rizzo 			goto catch_rx;
52437e3a6d3SLuigi Rizzo 		}
525f9790aebSLuigi Rizzo 
526f9790aebSLuigi Rizzo 		rtnl_unlock();
527f9790aebSLuigi Rizzo 
52837e3a6d3SLuigi Rizzo 		na->na_flags |= NAF_NETMAP_ON;
52937e3a6d3SLuigi Rizzo 
5304bf50f18SLuigi Rizzo #ifdef RATE_GENERIC
531f9790aebSLuigi Rizzo 		if (rate_ctx.refcount == 0) {
532f9790aebSLuigi Rizzo 			D("setup_timer()");
533f9790aebSLuigi Rizzo 			memset(&rate_ctx, 0, sizeof(rate_ctx));
534f9790aebSLuigi Rizzo 			setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx);
535f9790aebSLuigi Rizzo 			if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) {
536f9790aebSLuigi Rizzo 				D("Error: mod_timer()");
537f9790aebSLuigi Rizzo 			}
538f9790aebSLuigi Rizzo 		}
539f9790aebSLuigi Rizzo 		rate_ctx.refcount++;
540f9790aebSLuigi Rizzo #endif /* RATE */
541f9790aebSLuigi Rizzo 	}
542f9790aebSLuigi Rizzo 
543f9790aebSLuigi Rizzo 	return 0;
544f9790aebSLuigi Rizzo 
54537e3a6d3SLuigi Rizzo 	/* Here (na->active_fds == 0) holds. */
54637e3a6d3SLuigi Rizzo catch_rx:
54737e3a6d3SLuigi Rizzo 	nm_os_catch_rx(gna, 0);
548f9790aebSLuigi Rizzo register_handler:
549f9790aebSLuigi Rizzo 	rtnl_unlock();
55017885a7bSLuigi Rizzo free_tx_pools:
55137e3a6d3SLuigi Rizzo 	for_each_tx_kring(r, kring, na) {
55237e3a6d3SLuigi Rizzo 		mtx_destroy(&kring->tx_event_lock);
55337e3a6d3SLuigi Rizzo 		if (kring->tx_pool == NULL) {
55417885a7bSLuigi Rizzo 			continue;
555f2637526SLuigi Rizzo 		}
556*c3e9b4dbSLuiz Otavio O Souza 		nm_os_free(kring->tx_pool);
55737e3a6d3SLuigi Rizzo 		kring->tx_pool = NULL;
55837e3a6d3SLuigi Rizzo 	}
55937e3a6d3SLuigi Rizzo 	for_each_rx_kring(r, kring, na) {
56037e3a6d3SLuigi Rizzo 		mbq_safe_fini(&kring->rx_queue);
561f9790aebSLuigi Rizzo 	}
562*c3e9b4dbSLuiz Otavio O Souza 	nm_os_free(gna->mit);
563f0ea3689SLuigi Rizzo out:
564f9790aebSLuigi Rizzo 
565f9790aebSLuigi Rizzo 	return error;
566f9790aebSLuigi Rizzo }
567f9790aebSLuigi Rizzo 
568f9790aebSLuigi Rizzo /*
569f9790aebSLuigi Rizzo  * Callback invoked when the device driver frees an mbuf used
570f9790aebSLuigi Rizzo  * by netmap to transmit a packet. This usually happens when
571f9790aebSLuigi Rizzo  * the NIC notifies the driver that transmission is completed.
572f9790aebSLuigi Rizzo  */
573f9790aebSLuigi Rizzo static void
574f9790aebSLuigi Rizzo generic_mbuf_destructor(struct mbuf *m)
575f9790aebSLuigi Rizzo {
57637e3a6d3SLuigi Rizzo 	struct netmap_adapter *na = NA(GEN_TX_MBUF_IFP(m));
57737e3a6d3SLuigi Rizzo 	struct netmap_kring *kring;
57837e3a6d3SLuigi Rizzo 	unsigned int r = MBUF_TXQ(m);
57937e3a6d3SLuigi Rizzo 	unsigned int r_orig = r;
58037e3a6d3SLuigi Rizzo 
58137e3a6d3SLuigi Rizzo 	if (unlikely(!nm_netmap_on(na) || r >= na->num_tx_rings)) {
58237e3a6d3SLuigi Rizzo 		D("Error: no netmap adapter on device %p",
58337e3a6d3SLuigi Rizzo 		  GEN_TX_MBUF_IFP(m));
58437e3a6d3SLuigi Rizzo 		return;
58537e3a6d3SLuigi Rizzo 	}
58637e3a6d3SLuigi Rizzo 
58737e3a6d3SLuigi Rizzo 	/*
58837e3a6d3SLuigi Rizzo 	 * First, clear the event mbuf.
58937e3a6d3SLuigi Rizzo 	 * In principle, the event 'm' should match the one stored
59037e3a6d3SLuigi Rizzo 	 * on ring 'r'. However we check it explicitely to stay
59137e3a6d3SLuigi Rizzo 	 * safe against lower layers (qdisc, driver, etc.) changing
59237e3a6d3SLuigi Rizzo 	 * MBUF_TXQ(m) under our feet. If the match is not found
59337e3a6d3SLuigi Rizzo 	 * on 'r', we try to see if it belongs to some other ring.
59437e3a6d3SLuigi Rizzo 	 */
59537e3a6d3SLuigi Rizzo         for (;;) {
59637e3a6d3SLuigi Rizzo 		bool match = false;
59737e3a6d3SLuigi Rizzo 
59837e3a6d3SLuigi Rizzo 		kring = &na->tx_rings[r];
59937e3a6d3SLuigi Rizzo 		mtx_lock_spin(&kring->tx_event_lock);
60037e3a6d3SLuigi Rizzo 		if (kring->tx_event == m) {
60137e3a6d3SLuigi Rizzo 			kring->tx_event = NULL;
60237e3a6d3SLuigi Rizzo 			match = true;
60337e3a6d3SLuigi Rizzo 		}
60437e3a6d3SLuigi Rizzo 		mtx_unlock_spin(&kring->tx_event_lock);
60537e3a6d3SLuigi Rizzo 
60637e3a6d3SLuigi Rizzo 		if (match) {
60737e3a6d3SLuigi Rizzo 			if (r != r_orig) {
60837e3a6d3SLuigi Rizzo 				RD(1, "event %p migrated: ring %u --> %u",
60937e3a6d3SLuigi Rizzo 				      m, r_orig, r);
61037e3a6d3SLuigi Rizzo 			}
61137e3a6d3SLuigi Rizzo 			break;
61237e3a6d3SLuigi Rizzo 		}
61337e3a6d3SLuigi Rizzo 
61437e3a6d3SLuigi Rizzo 		if (++r == na->num_tx_rings) r = 0;
61537e3a6d3SLuigi Rizzo 
61637e3a6d3SLuigi Rizzo 		if (r == r_orig) {
61737e3a6d3SLuigi Rizzo 			RD(1, "Cannot match event %p", m);
61837e3a6d3SLuigi Rizzo 			return;
61937e3a6d3SLuigi Rizzo 		}
62037e3a6d3SLuigi Rizzo 	}
62137e3a6d3SLuigi Rizzo 
62237e3a6d3SLuigi Rizzo 	/* Second, wake up clients. They will reclaim the event through
62337e3a6d3SLuigi Rizzo 	 * txsync. */
62437e3a6d3SLuigi Rizzo 	netmap_generic_irq(na, r, NULL);
625f9790aebSLuigi Rizzo #ifdef __FreeBSD__
62637e3a6d3SLuigi Rizzo 	void_mbuf_dtor(m, NULL, NULL);
62737e3a6d3SLuigi Rizzo #endif
628f9790aebSLuigi Rizzo }
629f9790aebSLuigi Rizzo 
63017885a7bSLuigi Rizzo /* Record completed transmissions and update hwtail.
631f9790aebSLuigi Rizzo  *
63217885a7bSLuigi Rizzo  * The oldest tx buffer not yet completed is at nr_hwtail + 1,
633f9790aebSLuigi Rizzo  * nr_hwcur is the first unsent buffer.
634f9790aebSLuigi Rizzo  */
63517885a7bSLuigi Rizzo static u_int
63637e3a6d3SLuigi Rizzo generic_netmap_tx_clean(struct netmap_kring *kring, int txqdisc)
637f9790aebSLuigi Rizzo {
63817885a7bSLuigi Rizzo 	u_int const lim = kring->nkr_num_slots - 1;
63917885a7bSLuigi Rizzo 	u_int nm_i = nm_next(kring->nr_hwtail, lim);
640f9790aebSLuigi Rizzo 	u_int hwcur = kring->nr_hwcur;
641f9790aebSLuigi Rizzo 	u_int n = 0;
642f9790aebSLuigi Rizzo 	struct mbuf **tx_pool = kring->tx_pool;
643f9790aebSLuigi Rizzo 
64437e3a6d3SLuigi Rizzo 	ND("hwcur = %d, hwtail = %d", kring->nr_hwcur, kring->nr_hwtail);
64537e3a6d3SLuigi Rizzo 
64617885a7bSLuigi Rizzo 	while (nm_i != hwcur) { /* buffers not completed */
64717885a7bSLuigi Rizzo 		struct mbuf *m = tx_pool[nm_i];
648f9790aebSLuigi Rizzo 
64937e3a6d3SLuigi Rizzo 		if (txqdisc) {
65037e3a6d3SLuigi Rizzo 			if (m == NULL) {
65137e3a6d3SLuigi Rizzo 				/* Nothing to do, this is going
65237e3a6d3SLuigi Rizzo 				 * to be replenished. */
65337e3a6d3SLuigi Rizzo 				RD(3, "Is this happening?");
65437e3a6d3SLuigi Rizzo 
65537e3a6d3SLuigi Rizzo 			} else if (MBUF_QUEUED(m)) {
65637e3a6d3SLuigi Rizzo 				break; /* Not dequeued yet. */
65737e3a6d3SLuigi Rizzo 
65837e3a6d3SLuigi Rizzo 			} else if (MBUF_REFCNT(m) != 1) {
65937e3a6d3SLuigi Rizzo 				/* This mbuf has been dequeued but is still busy
66037e3a6d3SLuigi Rizzo 				 * (refcount is 2).
66137e3a6d3SLuigi Rizzo 				 * Leave it to the driver and replenish. */
66237e3a6d3SLuigi Rizzo 				m_freem(m);
66337e3a6d3SLuigi Rizzo 				tx_pool[nm_i] = NULL;
664f9790aebSLuigi Rizzo 			}
66537e3a6d3SLuigi Rizzo 
66637e3a6d3SLuigi Rizzo 		} else {
66737e3a6d3SLuigi Rizzo 			if (unlikely(m == NULL)) {
66837e3a6d3SLuigi Rizzo 				int event_consumed;
66937e3a6d3SLuigi Rizzo 
67037e3a6d3SLuigi Rizzo 				/* This slot was used to place an event. */
67137e3a6d3SLuigi Rizzo 				mtx_lock_spin(&kring->tx_event_lock);
67237e3a6d3SLuigi Rizzo 				event_consumed = (kring->tx_event == NULL);
67337e3a6d3SLuigi Rizzo 				mtx_unlock_spin(&kring->tx_event_lock);
67437e3a6d3SLuigi Rizzo 				if (!event_consumed) {
67537e3a6d3SLuigi Rizzo 					/* The event has not been consumed yet,
67637e3a6d3SLuigi Rizzo 					 * still busy in the driver. */
67737e3a6d3SLuigi Rizzo 					break;
678f9790aebSLuigi Rizzo 				}
67937e3a6d3SLuigi Rizzo 				/* The event has been consumed, we can go
68037e3a6d3SLuigi Rizzo 				 * ahead. */
68137e3a6d3SLuigi Rizzo 
68237e3a6d3SLuigi Rizzo 			} else if (MBUF_REFCNT(m) != 1) {
68337e3a6d3SLuigi Rizzo 				/* This mbuf is still busy: its refcnt is 2. */
68437e3a6d3SLuigi Rizzo 				break;
68537e3a6d3SLuigi Rizzo 			}
68637e3a6d3SLuigi Rizzo 		}
68737e3a6d3SLuigi Rizzo 
688f9790aebSLuigi Rizzo 		n++;
68917885a7bSLuigi Rizzo 		nm_i = nm_next(nm_i, lim);
690f9790aebSLuigi Rizzo 	}
69117885a7bSLuigi Rizzo 	kring->nr_hwtail = nm_prev(nm_i, lim);
69217885a7bSLuigi Rizzo 	ND("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail);
693f9790aebSLuigi Rizzo 
694f9790aebSLuigi Rizzo 	return n;
695f9790aebSLuigi Rizzo }
696f9790aebSLuigi Rizzo 
69737e3a6d3SLuigi Rizzo /* Compute a slot index in the middle between inf and sup. */
698f9790aebSLuigi Rizzo static inline u_int
69937e3a6d3SLuigi Rizzo ring_middle(u_int inf, u_int sup, u_int lim)
700f9790aebSLuigi Rizzo {
70137e3a6d3SLuigi Rizzo 	u_int n = lim + 1;
702f9790aebSLuigi Rizzo 	u_int e;
703f9790aebSLuigi Rizzo 
70437e3a6d3SLuigi Rizzo 	if (sup >= inf) {
70537e3a6d3SLuigi Rizzo 		e = (sup + inf) / 2;
706f9790aebSLuigi Rizzo 	} else { /* wrap around */
70737e3a6d3SLuigi Rizzo 		e = (sup + n + inf) / 2;
708f9790aebSLuigi Rizzo 		if (e >= n) {
709f9790aebSLuigi Rizzo 			e -= n;
710f9790aebSLuigi Rizzo 		}
711f9790aebSLuigi Rizzo 	}
712f9790aebSLuigi Rizzo 
713f9790aebSLuigi Rizzo 	if (unlikely(e >= n)) {
714f9790aebSLuigi Rizzo 		D("This cannot happen");
715f9790aebSLuigi Rizzo 		e = 0;
716f9790aebSLuigi Rizzo 	}
717f9790aebSLuigi Rizzo 
718f9790aebSLuigi Rizzo 	return e;
719f9790aebSLuigi Rizzo }
720f9790aebSLuigi Rizzo 
721f9790aebSLuigi Rizzo static void
722f9790aebSLuigi Rizzo generic_set_tx_event(struct netmap_kring *kring, u_int hwcur)
723f9790aebSLuigi Rizzo {
72437e3a6d3SLuigi Rizzo 	u_int lim = kring->nkr_num_slots - 1;
725f9790aebSLuigi Rizzo 	struct mbuf *m;
726f9790aebSLuigi Rizzo 	u_int e;
72737e3a6d3SLuigi Rizzo 	u_int ntc = nm_next(kring->nr_hwtail, lim); /* next to clean */
728f9790aebSLuigi Rizzo 
72937e3a6d3SLuigi Rizzo 	if (ntc == hwcur) {
73017885a7bSLuigi Rizzo 		return; /* all buffers are free */
731f9790aebSLuigi Rizzo 	}
73237e3a6d3SLuigi Rizzo 
73337e3a6d3SLuigi Rizzo 	/*
73437e3a6d3SLuigi Rizzo 	 * We have pending packets in the driver between hwtail+1
73537e3a6d3SLuigi Rizzo 	 * and hwcur, and we have to chose one of these slot to
73637e3a6d3SLuigi Rizzo 	 * generate a notification.
73737e3a6d3SLuigi Rizzo 	 * There is a race but this is only called within txsync which
73837e3a6d3SLuigi Rizzo 	 * does a double check.
73937e3a6d3SLuigi Rizzo 	 */
74037e3a6d3SLuigi Rizzo #if 0
74137e3a6d3SLuigi Rizzo 	/* Choose a slot in the middle, so that we don't risk ending
74237e3a6d3SLuigi Rizzo 	 * up in a situation where the client continuously wake up,
74337e3a6d3SLuigi Rizzo 	 * fills one or a few TX slots and go to sleep again. */
74437e3a6d3SLuigi Rizzo 	e = ring_middle(ntc, hwcur, lim);
74537e3a6d3SLuigi Rizzo #else
74637e3a6d3SLuigi Rizzo 	/* Choose the first pending slot, to be safe against driver
74737e3a6d3SLuigi Rizzo 	 * reordering mbuf transmissions. */
74837e3a6d3SLuigi Rizzo 	e = ntc;
74937e3a6d3SLuigi Rizzo #endif
750f9790aebSLuigi Rizzo 
751f9790aebSLuigi Rizzo 	m = kring->tx_pool[e];
752f9790aebSLuigi Rizzo 	if (m == NULL) {
75337e3a6d3SLuigi Rizzo 		/* An event is already in place. */
754f9790aebSLuigi Rizzo 		return;
755f9790aebSLuigi Rizzo 	}
756f9790aebSLuigi Rizzo 
75737e3a6d3SLuigi Rizzo 	mtx_lock_spin(&kring->tx_event_lock);
75837e3a6d3SLuigi Rizzo 	if (kring->tx_event) {
75937e3a6d3SLuigi Rizzo 		/* An event is already in place. */
76037e3a6d3SLuigi Rizzo 		mtx_unlock_spin(&kring->tx_event_lock);
76137e3a6d3SLuigi Rizzo 		return;
76237e3a6d3SLuigi Rizzo 	}
76337e3a6d3SLuigi Rizzo 
764*c3e9b4dbSLuiz Otavio O Souza 	SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor);
76537e3a6d3SLuigi Rizzo 	kring->tx_event = m;
76637e3a6d3SLuigi Rizzo 	mtx_unlock_spin(&kring->tx_event_lock);
76737e3a6d3SLuigi Rizzo 
76837e3a6d3SLuigi Rizzo 	kring->tx_pool[e] = NULL;
76937e3a6d3SLuigi Rizzo 
77037e3a6d3SLuigi Rizzo 	ND(5, "Request Event at %d mbuf %p refcnt %d", e, m, m ? MBUF_REFCNT(m) : -2 );
77137e3a6d3SLuigi Rizzo 
77237e3a6d3SLuigi Rizzo 	/* Decrement the refcount. This will free it if we lose the race
77337e3a6d3SLuigi Rizzo 	 * with the driver. */
774f9790aebSLuigi Rizzo 	m_freem(m);
775f9790aebSLuigi Rizzo 	smp_mb();
776f9790aebSLuigi Rizzo }
777f9790aebSLuigi Rizzo 
778f9790aebSLuigi Rizzo 
779f9790aebSLuigi Rizzo /*
780f9790aebSLuigi Rizzo  * generic_netmap_txsync() transforms netmap buffers into mbufs
781f9790aebSLuigi Rizzo  * and passes them to the standard device driver
782f9790aebSLuigi Rizzo  * (ndo_start_xmit() or ifp->if_transmit() ).
783f9790aebSLuigi Rizzo  * On linux this is not done directly, but using dev_queue_xmit(),
784f9790aebSLuigi Rizzo  * since it implements the TX flow control (and takes some locks).
785f9790aebSLuigi Rizzo  */
786f9790aebSLuigi Rizzo static int
7874bf50f18SLuigi Rizzo generic_netmap_txsync(struct netmap_kring *kring, int flags)
788f9790aebSLuigi Rizzo {
7894bf50f18SLuigi Rizzo 	struct netmap_adapter *na = kring->na;
79037e3a6d3SLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
791f9790aebSLuigi Rizzo 	struct ifnet *ifp = na->ifp;
792f9790aebSLuigi Rizzo 	struct netmap_ring *ring = kring->ring;
79317885a7bSLuigi Rizzo 	u_int nm_i;	/* index into the netmap ring */ // j
79417885a7bSLuigi Rizzo 	u_int const lim = kring->nkr_num_slots - 1;
79517885a7bSLuigi Rizzo 	u_int const head = kring->rhead;
7964bf50f18SLuigi Rizzo 	u_int ring_nr = kring->ring_id;
797f9790aebSLuigi Rizzo 
798f9790aebSLuigi Rizzo 	IFRATE(rate_ctx.new.txsync++);
799f9790aebSLuigi Rizzo 
800f9790aebSLuigi Rizzo 	rmb();
80117885a7bSLuigi Rizzo 
802f9790aebSLuigi Rizzo 	/*
80317885a7bSLuigi Rizzo 	 * First part: process new packets to send.
804f9790aebSLuigi Rizzo 	 */
80517885a7bSLuigi Rizzo 	nm_i = kring->nr_hwcur;
80617885a7bSLuigi Rizzo 	if (nm_i != head) {	/* we have new packets to send */
80737e3a6d3SLuigi Rizzo 		struct nm_os_gen_arg a;
80837e3a6d3SLuigi Rizzo 		u_int event = -1;
80937e3a6d3SLuigi Rizzo 
81037e3a6d3SLuigi Rizzo 		if (gna->txqdisc && nm_kr_txempty(kring)) {
81137e3a6d3SLuigi Rizzo 			/* In txqdisc mode, we ask for a delayed notification,
81237e3a6d3SLuigi Rizzo 			 * but only when cur == hwtail, which means that the
81337e3a6d3SLuigi Rizzo 			 * client is going to block. */
81437e3a6d3SLuigi Rizzo 			event = ring_middle(nm_i, head, lim);
81537e3a6d3SLuigi Rizzo 			ND(3, "Place txqdisc event (hwcur=%u,event=%u,"
81637e3a6d3SLuigi Rizzo 			      "head=%u,hwtail=%u)", nm_i, event, head,
81737e3a6d3SLuigi Rizzo 			      kring->nr_hwtail);
81837e3a6d3SLuigi Rizzo 		}
81937e3a6d3SLuigi Rizzo 
82037e3a6d3SLuigi Rizzo 		a.ifp = ifp;
82137e3a6d3SLuigi Rizzo 		a.ring_nr = ring_nr;
82237e3a6d3SLuigi Rizzo 		a.head = a.tail = NULL;
82337e3a6d3SLuigi Rizzo 
82417885a7bSLuigi Rizzo 		while (nm_i != head) {
82517885a7bSLuigi Rizzo 			struct netmap_slot *slot = &ring->slot[nm_i];
826f9790aebSLuigi Rizzo 			u_int len = slot->len;
8274bf50f18SLuigi Rizzo 			void *addr = NMB(na, slot);
82817885a7bSLuigi Rizzo 			/* device-specific */
829f9790aebSLuigi Rizzo 			struct mbuf *m;
830f9790aebSLuigi Rizzo 			int tx_ret;
831f9790aebSLuigi Rizzo 
8324bf50f18SLuigi Rizzo 			NM_CHECK_ADDR_LEN(na, addr, len);
83317885a7bSLuigi Rizzo 
83437e3a6d3SLuigi Rizzo 			/* Tale a mbuf from the tx pool (replenishing the pool
83537e3a6d3SLuigi Rizzo 			 * entry if necessary) and copy in the user packet. */
83617885a7bSLuigi Rizzo 			m = kring->tx_pool[nm_i];
837f9790aebSLuigi Rizzo 			if (unlikely(m == NULL)) {
83837e3a6d3SLuigi Rizzo 				kring->tx_pool[nm_i] = m =
83937e3a6d3SLuigi Rizzo 					nm_os_get_mbuf(ifp, NETMAP_BUF_SIZE(na));
84037e3a6d3SLuigi Rizzo 				if (m == NULL) {
84137e3a6d3SLuigi Rizzo 					RD(2, "Failed to replenish mbuf");
84237e3a6d3SLuigi Rizzo 					/* Here we could schedule a timer which
84337e3a6d3SLuigi Rizzo 					 * retries to replenish after a while,
84437e3a6d3SLuigi Rizzo 					 * and notifies the client when it
84537e3a6d3SLuigi Rizzo 					 * manages to replenish some slots. In
84637e3a6d3SLuigi Rizzo 					 * any case we break early to avoid
84737e3a6d3SLuigi Rizzo 					 * crashes. */
848f9790aebSLuigi Rizzo 					break;
849f9790aebSLuigi Rizzo 				}
85037e3a6d3SLuigi Rizzo 				IFRATE(rate_ctx.new.txrepl++);
851f9790aebSLuigi Rizzo 			}
85237e3a6d3SLuigi Rizzo 
85337e3a6d3SLuigi Rizzo 			a.m = m;
85437e3a6d3SLuigi Rizzo 			a.addr = addr;
85537e3a6d3SLuigi Rizzo 			a.len = len;
85637e3a6d3SLuigi Rizzo 			a.qevent = (nm_i == event);
85737e3a6d3SLuigi Rizzo 			/* When not in txqdisc mode, we should ask
85837e3a6d3SLuigi Rizzo 			 * notifications when NS_REPORT is set, or roughly
85937e3a6d3SLuigi Rizzo 			 * every half ring. To optimize this, we set a
86037e3a6d3SLuigi Rizzo 			 * notification event when the client runs out of
86137e3a6d3SLuigi Rizzo 			 * TX ring space, or when transmission fails. In
86237e3a6d3SLuigi Rizzo 			 * the latter case we also break early.
863f9790aebSLuigi Rizzo 			 */
86437e3a6d3SLuigi Rizzo 			tx_ret = nm_os_generic_xmit_frame(&a);
865f9790aebSLuigi Rizzo 			if (unlikely(tx_ret)) {
86637e3a6d3SLuigi Rizzo 				if (!gna->txqdisc) {
867f9790aebSLuigi Rizzo 					/*
868f9790aebSLuigi Rizzo 					 * No room for this mbuf in the device driver.
869f9790aebSLuigi Rizzo 					 * Request a notification FOR A PREVIOUS MBUF,
870f9790aebSLuigi Rizzo 					 * then call generic_netmap_tx_clean(kring) to do the
871f9790aebSLuigi Rizzo 					 * double check and see if we can free more buffers.
872f9790aebSLuigi Rizzo 					 * If there is space continue, else break;
873f9790aebSLuigi Rizzo 					 * NOTE: the double check is necessary if the problem
874f9790aebSLuigi Rizzo 					 * occurs in the txsync call after selrecord().
875f9790aebSLuigi Rizzo 					 * Also, we need some way to tell the caller that not
876f9790aebSLuigi Rizzo 					 * all buffers were queued onto the device (this was
877f9790aebSLuigi Rizzo 					 * not a problem with native netmap driver where space
878f9790aebSLuigi Rizzo 					 * is preallocated). The bridge has a similar problem
879f9790aebSLuigi Rizzo 					 * and we solve it there by dropping the excess packets.
880f9790aebSLuigi Rizzo 					 */
88117885a7bSLuigi Rizzo 					generic_set_tx_event(kring, nm_i);
88237e3a6d3SLuigi Rizzo 					if (generic_netmap_tx_clean(kring, gna->txqdisc)) {
88337e3a6d3SLuigi Rizzo 						/* space now available */
884f9790aebSLuigi Rizzo 						continue;
885f9790aebSLuigi Rizzo 					} else {
886f9790aebSLuigi Rizzo 						break;
887f9790aebSLuigi Rizzo 					}
888f9790aebSLuigi Rizzo 				}
88937e3a6d3SLuigi Rizzo 
89037e3a6d3SLuigi Rizzo 				/* In txqdisc mode, the netmap-aware qdisc
89137e3a6d3SLuigi Rizzo 				 * queue has the same length as the number of
89237e3a6d3SLuigi Rizzo 				 * netmap slots (N). Since tail is advanced
89337e3a6d3SLuigi Rizzo 				 * only when packets are dequeued, qdisc
89437e3a6d3SLuigi Rizzo 				 * queue overrun cannot happen, so
89537e3a6d3SLuigi Rizzo 				 * nm_os_generic_xmit_frame() did not fail
89637e3a6d3SLuigi Rizzo 				 * because of that.
89737e3a6d3SLuigi Rizzo 				 * However, packets can be dropped because
89837e3a6d3SLuigi Rizzo 				 * carrier is off, or because our qdisc is
89937e3a6d3SLuigi Rizzo 				 * being deactivated, or possibly for other
90037e3a6d3SLuigi Rizzo 				 * reasons. In these cases, we just let the
90137e3a6d3SLuigi Rizzo 				 * packet to be dropped. */
90237e3a6d3SLuigi Rizzo 				IFRATE(rate_ctx.new.txdrop++);
90337e3a6d3SLuigi Rizzo 			}
90437e3a6d3SLuigi Rizzo 
905f9790aebSLuigi Rizzo 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
90617885a7bSLuigi Rizzo 			nm_i = nm_next(nm_i, lim);
907f0ea3689SLuigi Rizzo 			IFRATE(rate_ctx.new.txpkt++);
908f9790aebSLuigi Rizzo 		}
90937e3a6d3SLuigi Rizzo 		if (a.head != NULL) {
91037e3a6d3SLuigi Rizzo 			a.addr = NULL;
91137e3a6d3SLuigi Rizzo 			nm_os_generic_xmit_frame(&a);
91237e3a6d3SLuigi Rizzo 		}
91337e3a6d3SLuigi Rizzo 		/* Update hwcur to the next slot to transmit. Here nm_i
91437e3a6d3SLuigi Rizzo 		 * is not necessarily head, we could break early. */
91537e3a6d3SLuigi Rizzo 		kring->nr_hwcur = nm_i;
91617885a7bSLuigi Rizzo 	}
917f9790aebSLuigi Rizzo 
91817885a7bSLuigi Rizzo 	/*
91917885a7bSLuigi Rizzo 	 * Second, reclaim completed buffers
92017885a7bSLuigi Rizzo 	 */
92137e3a6d3SLuigi Rizzo 	if (!gna->txqdisc && (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring))) {
922f9790aebSLuigi Rizzo 		/* No more available slots? Set a notification event
923f9790aebSLuigi Rizzo 		 * on a netmap slot that will be cleaned in the future.
924f9790aebSLuigi Rizzo 		 * No doublecheck is performed, since txsync() will be
925f9790aebSLuigi Rizzo 		 * called twice by netmap_poll().
926f9790aebSLuigi Rizzo 		 */
92717885a7bSLuigi Rizzo 		generic_set_tx_event(kring, nm_i);
928f9790aebSLuigi Rizzo 	}
929f9790aebSLuigi Rizzo 
93037e3a6d3SLuigi Rizzo 	generic_netmap_tx_clean(kring, gna->txqdisc);
93117885a7bSLuigi Rizzo 
932f9790aebSLuigi Rizzo 	return 0;
933f9790aebSLuigi Rizzo }
934f9790aebSLuigi Rizzo 
93517885a7bSLuigi Rizzo 
936f9790aebSLuigi Rizzo /*
93737e3a6d3SLuigi Rizzo  * This handler is registered (through nm_os_catch_rx())
938f9790aebSLuigi Rizzo  * within the attached network interface
939f9790aebSLuigi Rizzo  * in the RX subsystem, so that every mbuf passed up by
940f9790aebSLuigi Rizzo  * the driver can be stolen to the network stack.
941f9790aebSLuigi Rizzo  * Stolen packets are put in a queue where the
942f9790aebSLuigi Rizzo  * generic_netmap_rxsync() callback can extract them.
94337e3a6d3SLuigi Rizzo  * Returns 1 if the packet was stolen, 0 otherwise.
944f9790aebSLuigi Rizzo  */
94537e3a6d3SLuigi Rizzo int
94617885a7bSLuigi Rizzo generic_rx_handler(struct ifnet *ifp, struct mbuf *m)
947f9790aebSLuigi Rizzo {
948f9790aebSLuigi Rizzo 	struct netmap_adapter *na = NA(ifp);
949f9790aebSLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
95037e3a6d3SLuigi Rizzo 	struct netmap_kring *kring;
951f9790aebSLuigi Rizzo 	u_int work_done;
95237e3a6d3SLuigi Rizzo 	u_int r = MBUF_RXQ(m); /* receive ring number */
953f0ea3689SLuigi Rizzo 
95437e3a6d3SLuigi Rizzo 	if (r >= na->num_rx_rings) {
95537e3a6d3SLuigi Rizzo 		r = r % na->num_rx_rings;
95637e3a6d3SLuigi Rizzo 	}
95737e3a6d3SLuigi Rizzo 
95837e3a6d3SLuigi Rizzo 	kring = &na->rx_rings[r];
95937e3a6d3SLuigi Rizzo 
96037e3a6d3SLuigi Rizzo 	if (kring->nr_mode == NKR_NETMAP_OFF) {
96137e3a6d3SLuigi Rizzo 		/* We must not intercept this mbuf. */
96237e3a6d3SLuigi Rizzo 		return 0;
963f0ea3689SLuigi Rizzo 	}
964f9790aebSLuigi Rizzo 
965f9790aebSLuigi Rizzo 	/* limit the size of the queue */
96637e3a6d3SLuigi Rizzo 	if (unlikely(!gna->rxsg && MBUF_LEN(m) > NETMAP_BUF_SIZE(na))) {
96737e3a6d3SLuigi Rizzo 		/* This may happen when GRO/LRO features are enabled for
96837e3a6d3SLuigi Rizzo 		 * the NIC driver when the generic adapter does not
96937e3a6d3SLuigi Rizzo 		 * support RX scatter-gather. */
97037e3a6d3SLuigi Rizzo 		RD(2, "Warning: driver pushed up big packet "
97137e3a6d3SLuigi Rizzo 				"(size=%d)", (int)MBUF_LEN(m));
97237e3a6d3SLuigi Rizzo 		m_freem(m);
97337e3a6d3SLuigi Rizzo 	} else if (unlikely(mbq_len(&kring->rx_queue) > 1024)) {
974f9790aebSLuigi Rizzo 		m_freem(m);
975f9790aebSLuigi Rizzo 	} else {
97637e3a6d3SLuigi Rizzo 		mbq_safe_enqueue(&kring->rx_queue, m);
977f9790aebSLuigi Rizzo 	}
978f9790aebSLuigi Rizzo 
979f9790aebSLuigi Rizzo 	if (netmap_generic_mit < 32768) {
980f9790aebSLuigi Rizzo 		/* no rx mitigation, pass notification up */
98137e3a6d3SLuigi Rizzo 		netmap_generic_irq(na, r, &work_done);
982f9790aebSLuigi Rizzo 	} else {
983f9790aebSLuigi Rizzo 		/* same as send combining, filter notification if there is a
984f9790aebSLuigi Rizzo 		 * pending timer, otherwise pass it up and start a timer.
985f9790aebSLuigi Rizzo 		 */
98637e3a6d3SLuigi Rizzo 		if (likely(nm_os_mitigation_active(&gna->mit[r]))) {
987f9790aebSLuigi Rizzo 			/* Record that there is some pending work. */
98837e3a6d3SLuigi Rizzo 			gna->mit[r].mit_pending = 1;
989f9790aebSLuigi Rizzo 		} else {
99037e3a6d3SLuigi Rizzo 			netmap_generic_irq(na, r, &work_done);
99137e3a6d3SLuigi Rizzo 			nm_os_mitigation_start(&gna->mit[r]);
992f9790aebSLuigi Rizzo 		}
993f9790aebSLuigi Rizzo 	}
99437e3a6d3SLuigi Rizzo 
99537e3a6d3SLuigi Rizzo 	/* We have intercepted the mbuf. */
99637e3a6d3SLuigi Rizzo 	return 1;
997f9790aebSLuigi Rizzo }
998f9790aebSLuigi Rizzo 
999f9790aebSLuigi Rizzo /*
1000f9790aebSLuigi Rizzo  * generic_netmap_rxsync() extracts mbufs from the queue filled by
1001f9790aebSLuigi Rizzo  * generic_netmap_rx_handler() and puts their content in the netmap
1002f9790aebSLuigi Rizzo  * receive ring.
1003f9790aebSLuigi Rizzo  * Access must be protected because the rx handler is asynchronous,
1004f9790aebSLuigi Rizzo  */
1005f9790aebSLuigi Rizzo static int
10064bf50f18SLuigi Rizzo generic_netmap_rxsync(struct netmap_kring *kring, int flags)
1007f9790aebSLuigi Rizzo {
1008f9790aebSLuigi Rizzo 	struct netmap_ring *ring = kring->ring;
10094bf50f18SLuigi Rizzo 	struct netmap_adapter *na = kring->na;
101017885a7bSLuigi Rizzo 	u_int nm_i;	/* index into the netmap ring */ //j,
101117885a7bSLuigi Rizzo 	u_int n;
101217885a7bSLuigi Rizzo 	u_int const lim = kring->nkr_num_slots - 1;
1013847bf383SLuigi Rizzo 	u_int const head = kring->rhead;
1014f9790aebSLuigi Rizzo 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1015f9790aebSLuigi Rizzo 
101637e3a6d3SLuigi Rizzo 	/* Adapter-specific variables. */
101737e3a6d3SLuigi Rizzo 	uint16_t slot_flags = kring->nkr_slot_flags;
101837e3a6d3SLuigi Rizzo 	u_int nm_buf_len = NETMAP_BUF_SIZE(na);
101937e3a6d3SLuigi Rizzo 	struct mbq tmpq;
102037e3a6d3SLuigi Rizzo 	struct mbuf *m;
102137e3a6d3SLuigi Rizzo 	int avail; /* in bytes */
102237e3a6d3SLuigi Rizzo 	int mlen;
102337e3a6d3SLuigi Rizzo 	int copy;
102437e3a6d3SLuigi Rizzo 
102517885a7bSLuigi Rizzo 	if (head > lim)
1026f9790aebSLuigi Rizzo 		return netmap_ring_reinit(kring);
1027f9790aebSLuigi Rizzo 
102837e3a6d3SLuigi Rizzo 	IFRATE(rate_ctx.new.rxsync++);
102917885a7bSLuigi Rizzo 
1030f9790aebSLuigi Rizzo 	/*
103137e3a6d3SLuigi Rizzo 	 * First part: skip past packets that userspace has released.
103237e3a6d3SLuigi Rizzo 	 * This can possibly make room for the second part.
103317885a7bSLuigi Rizzo 	 */
103417885a7bSLuigi Rizzo 	nm_i = kring->nr_hwcur;
103517885a7bSLuigi Rizzo 	if (nm_i != head) {
1036f9790aebSLuigi Rizzo 		/* Userspace has released some packets. */
103717885a7bSLuigi Rizzo 		for (n = 0; nm_i != head; n++) {
103817885a7bSLuigi Rizzo 			struct netmap_slot *slot = &ring->slot[nm_i];
1039f9790aebSLuigi Rizzo 
1040f9790aebSLuigi Rizzo 			slot->flags &= ~NS_BUF_CHANGED;
104117885a7bSLuigi Rizzo 			nm_i = nm_next(nm_i, lim);
1042f9790aebSLuigi Rizzo 		}
104317885a7bSLuigi Rizzo 		kring->nr_hwcur = head;
1044f9790aebSLuigi Rizzo 	}
104537e3a6d3SLuigi Rizzo 
104637e3a6d3SLuigi Rizzo 	/*
104737e3a6d3SLuigi Rizzo 	 * Second part: import newly received packets.
104837e3a6d3SLuigi Rizzo 	 */
104937e3a6d3SLuigi Rizzo 	if (!netmap_no_pendintr && !force_update) {
105037e3a6d3SLuigi Rizzo 		return 0;
105137e3a6d3SLuigi Rizzo 	}
105237e3a6d3SLuigi Rizzo 
105337e3a6d3SLuigi Rizzo 	nm_i = kring->nr_hwtail; /* First empty slot in the receive ring. */
105437e3a6d3SLuigi Rizzo 
105537e3a6d3SLuigi Rizzo 	/* Compute the available space (in bytes) in this netmap ring.
105637e3a6d3SLuigi Rizzo 	 * The first slot that is not considered in is the one before
105737e3a6d3SLuigi Rizzo 	 * nr_hwcur. */
105837e3a6d3SLuigi Rizzo 
105937e3a6d3SLuigi Rizzo 	avail = nm_prev(kring->nr_hwcur, lim) - nm_i;
106037e3a6d3SLuigi Rizzo 	if (avail < 0)
106137e3a6d3SLuigi Rizzo 		avail += lim + 1;
106237e3a6d3SLuigi Rizzo 	avail *= nm_buf_len;
106337e3a6d3SLuigi Rizzo 
106437e3a6d3SLuigi Rizzo 	/* First pass: While holding the lock on the RX mbuf queue,
106537e3a6d3SLuigi Rizzo 	 * extract as many mbufs as they fit the available space,
106637e3a6d3SLuigi Rizzo 	 * and put them in a temporary queue.
106737e3a6d3SLuigi Rizzo 	 * To avoid performing a per-mbuf division (mlen / nm_buf_len) to
106837e3a6d3SLuigi Rizzo 	 * to update avail, we do the update in a while loop that we
106937e3a6d3SLuigi Rizzo 	 * also use to set the RX slots, but without performing the copy. */
107037e3a6d3SLuigi Rizzo 	mbq_init(&tmpq);
107137e3a6d3SLuigi Rizzo 	mbq_lock(&kring->rx_queue);
107237e3a6d3SLuigi Rizzo 	for (n = 0;; n++) {
107337e3a6d3SLuigi Rizzo 		m = mbq_peek(&kring->rx_queue);
107437e3a6d3SLuigi Rizzo 		if (!m) {
107537e3a6d3SLuigi Rizzo 			/* No more packets from the driver. */
107637e3a6d3SLuigi Rizzo 			break;
107737e3a6d3SLuigi Rizzo 		}
107837e3a6d3SLuigi Rizzo 
107937e3a6d3SLuigi Rizzo 		mlen = MBUF_LEN(m);
108037e3a6d3SLuigi Rizzo 		if (mlen > avail) {
108137e3a6d3SLuigi Rizzo 			/* No more space in the ring. */
108237e3a6d3SLuigi Rizzo 			break;
108337e3a6d3SLuigi Rizzo 		}
108437e3a6d3SLuigi Rizzo 
108537e3a6d3SLuigi Rizzo 		mbq_dequeue(&kring->rx_queue);
108637e3a6d3SLuigi Rizzo 
108737e3a6d3SLuigi Rizzo 		while (mlen) {
108837e3a6d3SLuigi Rizzo 			copy = nm_buf_len;
108937e3a6d3SLuigi Rizzo 			if (mlen < copy) {
109037e3a6d3SLuigi Rizzo 				copy = mlen;
109137e3a6d3SLuigi Rizzo 			}
109237e3a6d3SLuigi Rizzo 			mlen -= copy;
109337e3a6d3SLuigi Rizzo 			avail -= nm_buf_len;
109437e3a6d3SLuigi Rizzo 
109537e3a6d3SLuigi Rizzo 			ring->slot[nm_i].len = copy;
109637e3a6d3SLuigi Rizzo 			ring->slot[nm_i].flags = slot_flags | (mlen ? NS_MOREFRAG : 0);
109737e3a6d3SLuigi Rizzo 			nm_i = nm_next(nm_i, lim);
109837e3a6d3SLuigi Rizzo 		}
109937e3a6d3SLuigi Rizzo 
110037e3a6d3SLuigi Rizzo 		mbq_enqueue(&tmpq, m);
110137e3a6d3SLuigi Rizzo 	}
110237e3a6d3SLuigi Rizzo 	mbq_unlock(&kring->rx_queue);
110337e3a6d3SLuigi Rizzo 
110437e3a6d3SLuigi Rizzo 	/* Second pass: Drain the temporary queue, going over the used RX slots,
110537e3a6d3SLuigi Rizzo 	 * and perform the copy out of the RX queue lock. */
110637e3a6d3SLuigi Rizzo 	nm_i = kring->nr_hwtail;
110737e3a6d3SLuigi Rizzo 
110837e3a6d3SLuigi Rizzo 	for (;;) {
110937e3a6d3SLuigi Rizzo 		void *nmaddr;
111037e3a6d3SLuigi Rizzo 		int ofs = 0;
111137e3a6d3SLuigi Rizzo 		int morefrag;
111237e3a6d3SLuigi Rizzo 
111337e3a6d3SLuigi Rizzo 		m = mbq_dequeue(&tmpq);
111437e3a6d3SLuigi Rizzo 		if (!m)	{
111537e3a6d3SLuigi Rizzo 			break;
111637e3a6d3SLuigi Rizzo 		}
111737e3a6d3SLuigi Rizzo 
111837e3a6d3SLuigi Rizzo 		do {
111937e3a6d3SLuigi Rizzo 			nmaddr = NMB(na, &ring->slot[nm_i]);
112037e3a6d3SLuigi Rizzo 			/* We only check the address here on generic rx rings. */
112137e3a6d3SLuigi Rizzo 			if (nmaddr == NETMAP_BUF_BASE(na)) { /* Bad buffer */
112237e3a6d3SLuigi Rizzo 				m_freem(m);
112337e3a6d3SLuigi Rizzo 				mbq_purge(&tmpq);
112437e3a6d3SLuigi Rizzo 				mbq_fini(&tmpq);
112537e3a6d3SLuigi Rizzo 				return netmap_ring_reinit(kring);
112637e3a6d3SLuigi Rizzo 			}
112737e3a6d3SLuigi Rizzo 
112837e3a6d3SLuigi Rizzo 			copy = ring->slot[nm_i].len;
112937e3a6d3SLuigi Rizzo 			m_copydata(m, ofs, copy, nmaddr);
113037e3a6d3SLuigi Rizzo 			ofs += copy;
113137e3a6d3SLuigi Rizzo 			morefrag = ring->slot[nm_i].flags & NS_MOREFRAG;
113237e3a6d3SLuigi Rizzo 			nm_i = nm_next(nm_i, lim);
113337e3a6d3SLuigi Rizzo 		} while (morefrag);
113437e3a6d3SLuigi Rizzo 
113537e3a6d3SLuigi Rizzo 		m_freem(m);
113637e3a6d3SLuigi Rizzo 	}
113737e3a6d3SLuigi Rizzo 
113837e3a6d3SLuigi Rizzo 	mbq_fini(&tmpq);
113937e3a6d3SLuigi Rizzo 
114037e3a6d3SLuigi Rizzo 	if (n) {
114137e3a6d3SLuigi Rizzo 		kring->nr_hwtail = nm_i;
114237e3a6d3SLuigi Rizzo 		IFRATE(rate_ctx.new.rxpkt += n);
114337e3a6d3SLuigi Rizzo 	}
114437e3a6d3SLuigi Rizzo 	kring->nr_kflags &= ~NKR_PENDINTR;
1145f9790aebSLuigi Rizzo 
1146f9790aebSLuigi Rizzo 	return 0;
1147f9790aebSLuigi Rizzo }
1148f9790aebSLuigi Rizzo 
1149f9790aebSLuigi Rizzo static void
1150f9790aebSLuigi Rizzo generic_netmap_dtor(struct netmap_adapter *na)
1151f9790aebSLuigi Rizzo {
1152f9790aebSLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na;
1153847bf383SLuigi Rizzo 	struct ifnet *ifp = netmap_generic_getifp(gna);
1154f9790aebSLuigi Rizzo 	struct netmap_adapter *prev_na = gna->prev;
1155f9790aebSLuigi Rizzo 
1156f9790aebSLuigi Rizzo 	if (prev_na != NULL) {
1157847bf383SLuigi Rizzo 		netmap_adapter_put(prev_na);
115837e3a6d3SLuigi Rizzo 		if (nm_iszombie(na)) {
1159847bf383SLuigi Rizzo 		        /*
1160847bf383SLuigi Rizzo 		         * The driver has been removed without releasing
1161847bf383SLuigi Rizzo 		         * the reference so we need to do it here.
1162847bf383SLuigi Rizzo 		         */
1163f9790aebSLuigi Rizzo 		        netmap_adapter_put(prev_na);
1164f9790aebSLuigi Rizzo 		}
1165*c3e9b4dbSLuiz Otavio O Souza 		D("Native netmap adapter %p restored", prev_na);
1166847bf383SLuigi Rizzo 	}
116737e3a6d3SLuigi Rizzo 	NM_ATTACH_NA(ifp, prev_na);
116837e3a6d3SLuigi Rizzo 	/*
116937e3a6d3SLuigi Rizzo 	 * netmap_detach_common(), that it's called after this function,
117037e3a6d3SLuigi Rizzo 	 * overrides WNA(ifp) if na->ifp is not NULL.
117137e3a6d3SLuigi Rizzo 	 */
1172f9790aebSLuigi Rizzo 	na->ifp = NULL;
1173*c3e9b4dbSLuiz Otavio O Souza 	D("Emulated netmap adapter for %s destroyed", na->name);
1174*c3e9b4dbSLuiz Otavio O Souza }
1175*c3e9b4dbSLuiz Otavio O Souza 
1176*c3e9b4dbSLuiz Otavio O Souza int
1177*c3e9b4dbSLuiz Otavio O Souza na_is_generic(struct netmap_adapter *na)
1178*c3e9b4dbSLuiz Otavio O Souza {
1179*c3e9b4dbSLuiz Otavio O Souza 	return na->nm_register == generic_netmap_register;
1180f9790aebSLuigi Rizzo }
1181f9790aebSLuigi Rizzo 
1182f9790aebSLuigi Rizzo /*
1183f9790aebSLuigi Rizzo  * generic_netmap_attach() makes it possible to use netmap on
1184f9790aebSLuigi Rizzo  * a device without native netmap support.
1185f9790aebSLuigi Rizzo  * This is less performant than native support but potentially
1186f9790aebSLuigi Rizzo  * faster than raw sockets or similar schemes.
1187f9790aebSLuigi Rizzo  *
1188f9790aebSLuigi Rizzo  * In this "emulated" mode, netmap rings do not necessarily
1189f9790aebSLuigi Rizzo  * have the same size as those in the NIC. We use a default
1190f9790aebSLuigi Rizzo  * value and possibly override it if the OS has ways to fetch the
1191f9790aebSLuigi Rizzo  * actual configuration.
1192f9790aebSLuigi Rizzo  */
1193f9790aebSLuigi Rizzo int
1194f9790aebSLuigi Rizzo generic_netmap_attach(struct ifnet *ifp)
1195f9790aebSLuigi Rizzo {
1196f9790aebSLuigi Rizzo 	struct netmap_adapter *na;
1197f9790aebSLuigi Rizzo 	struct netmap_generic_adapter *gna;
1198f9790aebSLuigi Rizzo 	int retval;
1199f9790aebSLuigi Rizzo 	u_int num_tx_desc, num_rx_desc;
1200f9790aebSLuigi Rizzo 
1201f9790aebSLuigi Rizzo 	num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */
1202f9790aebSLuigi Rizzo 
120337e3a6d3SLuigi Rizzo 	nm_os_generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); /* ignore errors */
1204f9790aebSLuigi Rizzo 	ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc);
1205e4166283SLuigi Rizzo 	if (num_tx_desc == 0 || num_rx_desc == 0) {
1206e4166283SLuigi Rizzo 		D("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc);
1207e4166283SLuigi Rizzo 		return EINVAL;
1208e4166283SLuigi Rizzo 	}
1209f9790aebSLuigi Rizzo 
1210*c3e9b4dbSLuiz Otavio O Souza 	gna = nm_os_malloc(sizeof(*gna));
1211f9790aebSLuigi Rizzo 	if (gna == NULL) {
1212f9790aebSLuigi Rizzo 		D("no memory on attach, give up");
1213f9790aebSLuigi Rizzo 		return ENOMEM;
1214f9790aebSLuigi Rizzo 	}
1215f9790aebSLuigi Rizzo 	na = (struct netmap_adapter *)gna;
1216847bf383SLuigi Rizzo 	strncpy(na->name, ifp->if_xname, sizeof(na->name));
1217f9790aebSLuigi Rizzo 	na->ifp = ifp;
1218f9790aebSLuigi Rizzo 	na->num_tx_desc = num_tx_desc;
1219f9790aebSLuigi Rizzo 	na->num_rx_desc = num_rx_desc;
1220f9790aebSLuigi Rizzo 	na->nm_register = &generic_netmap_register;
1221f9790aebSLuigi Rizzo 	na->nm_txsync = &generic_netmap_txsync;
1222f9790aebSLuigi Rizzo 	na->nm_rxsync = &generic_netmap_rxsync;
1223f9790aebSLuigi Rizzo 	na->nm_dtor = &generic_netmap_dtor;
12244bf50f18SLuigi Rizzo 	/* when using generic, NAF_NETMAP_ON is set so we force
1225f9790aebSLuigi Rizzo 	 * NAF_SKIP_INTR to use the regular interrupt handler
1226f9790aebSLuigi Rizzo 	 */
1227f0ea3689SLuigi Rizzo 	na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS;
1228f9790aebSLuigi Rizzo 
1229f9790aebSLuigi Rizzo 	ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)",
1230f9790aebSLuigi Rizzo 			ifp->num_tx_queues, ifp->real_num_tx_queues,
1231f9790aebSLuigi Rizzo 			ifp->tx_queue_len);
1232f9790aebSLuigi Rizzo 	ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)",
1233f9790aebSLuigi Rizzo 			ifp->num_rx_queues, ifp->real_num_rx_queues);
1234f9790aebSLuigi Rizzo 
123537e3a6d3SLuigi Rizzo 	nm_os_generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings);
1236f9790aebSLuigi Rizzo 
1237f9790aebSLuigi Rizzo 	retval = netmap_attach_common(na);
1238f9790aebSLuigi Rizzo 	if (retval) {
1239*c3e9b4dbSLuiz Otavio O Souza 		nm_os_free(gna);
124037e3a6d3SLuigi Rizzo 		return retval;
1241f9790aebSLuigi Rizzo 	}
1242f9790aebSLuigi Rizzo 
124337e3a6d3SLuigi Rizzo 	gna->prev = NA(ifp); /* save old na */
124437e3a6d3SLuigi Rizzo 	if (gna->prev != NULL) {
124537e3a6d3SLuigi Rizzo 		netmap_adapter_get(gna->prev);
124637e3a6d3SLuigi Rizzo 	}
124737e3a6d3SLuigi Rizzo 	NM_ATTACH_NA(ifp, na);
124837e3a6d3SLuigi Rizzo 
124937e3a6d3SLuigi Rizzo 	nm_os_generic_set_features(gna);
125037e3a6d3SLuigi Rizzo 
1251*c3e9b4dbSLuiz Otavio O Souza 	D("Emulated adapter for %s created (prev was %p)", na->name, gna->prev);
125237e3a6d3SLuigi Rizzo 
1253f9790aebSLuigi Rizzo 	return retval;
1254f9790aebSLuigi Rizzo }
1255