1718cf2ccSPedro F. Giffuni /*-
2718cf2ccSPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3718cf2ccSPedro F. Giffuni  *
437e3a6d3SLuigi Rizzo  * Copyright (C) 2013-2016 Vincenzo Maffione
537e3a6d3SLuigi Rizzo  * Copyright (C) 2013-2016 Luigi Rizzo
637e3a6d3SLuigi Rizzo  * All rights reserved.
7f9790aebSLuigi Rizzo  *
8f9790aebSLuigi Rizzo  * Redistribution and use in source and binary forms, with or without
9f9790aebSLuigi Rizzo  * modification, are permitted provided that the following conditions
10f9790aebSLuigi Rizzo  * are met:
11f9790aebSLuigi Rizzo  *   1. Redistributions of source code must retain the above copyright
12f9790aebSLuigi Rizzo  *      notice, this list of conditions and the following disclaimer.
13f9790aebSLuigi Rizzo  *   2. Redistributions in binary form must reproduce the above copyright
14f9790aebSLuigi Rizzo  *      notice, this list of conditions and the following disclaimer in the
15f9790aebSLuigi Rizzo  *      documentation and/or other materials provided with the distribution.
16f9790aebSLuigi Rizzo  *
17f9790aebSLuigi Rizzo  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18f9790aebSLuigi Rizzo  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19f9790aebSLuigi Rizzo  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20f9790aebSLuigi Rizzo  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21f9790aebSLuigi Rizzo  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22f9790aebSLuigi Rizzo  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23f9790aebSLuigi Rizzo  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24f9790aebSLuigi Rizzo  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25f9790aebSLuigi Rizzo  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26f9790aebSLuigi Rizzo  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27f9790aebSLuigi Rizzo  * SUCH DAMAGE.
28f9790aebSLuigi Rizzo  */
29f9790aebSLuigi Rizzo 
30f9790aebSLuigi Rizzo /*
31f9790aebSLuigi Rizzo  * This module implements netmap support on top of standard,
32f9790aebSLuigi Rizzo  * unmodified device drivers.
33f9790aebSLuigi Rizzo  *
34f9790aebSLuigi Rizzo  * A NIOCREGIF request is handled here if the device does not
35f9790aebSLuigi Rizzo  * have native support. TX and RX rings are emulated as follows:
36f9790aebSLuigi Rizzo  *
37f9790aebSLuigi Rizzo  * NIOCREGIF
38f9790aebSLuigi Rizzo  *	We preallocate a block of TX mbufs (roughly as many as
39f9790aebSLuigi Rizzo  *	tx descriptors; the number is not critical) to speed up
40f9790aebSLuigi Rizzo  *	operation during transmissions. The refcount on most of
41f9790aebSLuigi Rizzo  *	these buffers is artificially bumped up so we can recycle
42f9790aebSLuigi Rizzo  *	them more easily. Also, the destructor is intercepted
43f9790aebSLuigi Rizzo  *	so we use it as an interrupt notification to wake up
44f9790aebSLuigi Rizzo  *	processes blocked on a poll().
45f9790aebSLuigi Rizzo  *
46f9790aebSLuigi Rizzo  *	For each receive ring we allocate one "struct mbq"
47f9790aebSLuigi Rizzo  *	(an mbuf tailq plus a spinlock). We intercept packets
48f9790aebSLuigi Rizzo  *	(through if_input)
49f9790aebSLuigi Rizzo  *	on the receive path and put them in the mbq from which
50f9790aebSLuigi Rizzo  *	netmap receive routines can grab them.
51f9790aebSLuigi Rizzo  *
52f9790aebSLuigi Rizzo  * TX:
53f9790aebSLuigi Rizzo  *	in the generic_txsync() routine, netmap buffers are copied
54f9790aebSLuigi Rizzo  *	(or linked, in a future) to the preallocated mbufs
55f9790aebSLuigi Rizzo  *	and pushed to the transmit queue. Some of these mbufs
56f9790aebSLuigi Rizzo  *	(those with NS_REPORT, or otherwise every half ring)
57f9790aebSLuigi Rizzo  *	have the refcount=1, others have refcount=2.
58f9790aebSLuigi Rizzo  *	When the destructor is invoked, we take that as
59f9790aebSLuigi Rizzo  *	a notification that all mbufs up to that one in
60f9790aebSLuigi Rizzo  *	the specific ring have been completed, and generate
61f9790aebSLuigi Rizzo  *	the equivalent of a transmit interrupt.
62f9790aebSLuigi Rizzo  *
63f9790aebSLuigi Rizzo  * RX:
64f9790aebSLuigi Rizzo  *
65f9790aebSLuigi Rizzo  */
66f9790aebSLuigi Rizzo 
67f9790aebSLuigi Rizzo #ifdef __FreeBSD__
68f9790aebSLuigi Rizzo 
69f9790aebSLuigi Rizzo #include <sys/cdefs.h> /* prerequisite */
70f9790aebSLuigi Rizzo __FBSDID("$FreeBSD$");
71f9790aebSLuigi Rizzo 
72f9790aebSLuigi Rizzo #include <sys/types.h>
73f9790aebSLuigi Rizzo #include <sys/errno.h>
74f9790aebSLuigi Rizzo #include <sys/malloc.h>
75f9790aebSLuigi Rizzo #include <sys/lock.h>   /* PROT_EXEC */
76f9790aebSLuigi Rizzo #include <sys/rwlock.h>
77f9790aebSLuigi Rizzo #include <sys/socket.h> /* sockaddrs */
78f9790aebSLuigi Rizzo #include <sys/selinfo.h>
79f9790aebSLuigi Rizzo #include <net/if.h>
80a02dbe4cSLuiz Otavio O Souza #include <net/if_types.h>
81f9790aebSLuigi Rizzo #include <net/if_var.h>
82f9790aebSLuigi Rizzo #include <machine/bus.h>        /* bus_dmamap_* in netmap_kern.h */
83f9790aebSLuigi Rizzo 
84f9790aebSLuigi Rizzo // XXX temporary - D() defined here
85f9790aebSLuigi Rizzo #include <net/netmap.h>
86f9790aebSLuigi Rizzo #include <dev/netmap/netmap_kern.h>
87f9790aebSLuigi Rizzo #include <dev/netmap/netmap_mem2.h>
88f9790aebSLuigi Rizzo 
89f0ea3689SLuigi Rizzo #define MBUF_RXQ(m)	((m)->m_pkthdr.flowid)
90f9790aebSLuigi Rizzo #define smp_mb()
91f9790aebSLuigi Rizzo 
92f9790aebSLuigi Rizzo /*
935899a007SLuigi Rizzo  * FreeBSD mbuf allocator/deallocator in emulation mode:
9437e3a6d3SLuigi Rizzo  */
9537e3a6d3SLuigi Rizzo #if __FreeBSD_version < 1100000
9637e3a6d3SLuigi Rizzo 
9737e3a6d3SLuigi Rizzo /*
9837e3a6d3SLuigi Rizzo  * For older versions of FreeBSD:
995899a007SLuigi Rizzo  *
1005899a007SLuigi Rizzo  * We allocate EXT_PACKET mbuf+clusters, but need to set M_NOFREE
1015899a007SLuigi Rizzo  * so that the destructor, if invoked, will not free the packet.
1025899a007SLuigi Rizzo  * In principle we should set the destructor only on demand,
1035899a007SLuigi Rizzo  * but since there might be a race we better do it on allocation.
1045899a007SLuigi Rizzo  * As a consequence, we also need to set the destructor or we
1055899a007SLuigi Rizzo  * would leak buffers.
106f9790aebSLuigi Rizzo  */
107f9790aebSLuigi Rizzo 
1084bf50f18SLuigi Rizzo /* mbuf destructor, also need to change the type to EXT_EXTREF,
109f9790aebSLuigi Rizzo  * add an M_NOFREE flag, and then clear the flag and
110f9790aebSLuigi Rizzo  * chain into uma_zfree(zone_pack, mf)
111f9790aebSLuigi Rizzo  * (or reinstall the buffer ?)
112f9790aebSLuigi Rizzo  */
113c3e9b4dbSLuiz Otavio O Souza #define SET_MBUF_DESTRUCTOR(m, fn)	do {		\
114c3e9b4dbSLuiz Otavio O Souza 	(m)->m_ext.ext_free = (void *)fn;	\
115c3e9b4dbSLuiz Otavio O Souza 	(m)->m_ext.ext_type = EXT_EXTREF;	\
116c3e9b4dbSLuiz Otavio O Souza } while (0)
117f9790aebSLuigi Rizzo 
11837e3a6d3SLuigi Rizzo static int
11937e3a6d3SLuigi Rizzo void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2)
120e4166283SLuigi Rizzo {
1214bf50f18SLuigi Rizzo 	/* restore original mbuf */
1224bf50f18SLuigi Rizzo 	m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1;
1234bf50f18SLuigi Rizzo 	m->m_ext.ext_arg1 = NULL;
124e4166283SLuigi Rizzo 	m->m_ext.ext_type = EXT_PACKET;
125e4166283SLuigi Rizzo 	m->m_ext.ext_free = NULL;
12637e3a6d3SLuigi Rizzo 	if (MBUF_REFCNT(m) == 0)
1274bf50f18SLuigi Rizzo 		SET_MBUF_REFCNT(m, 1);
128e4166283SLuigi Rizzo 	uma_zfree(zone_pack, m);
12937e3a6d3SLuigi Rizzo 
13037e3a6d3SLuigi Rizzo 	return 0;
131e4166283SLuigi Rizzo }
132e4166283SLuigi Rizzo 
133e4166283SLuigi Rizzo static inline struct mbuf *
13437e3a6d3SLuigi Rizzo nm_os_get_mbuf(struct ifnet *ifp, int len)
135e4166283SLuigi Rizzo {
136e4166283SLuigi Rizzo 	struct mbuf *m;
13737e3a6d3SLuigi Rizzo 
13837e3a6d3SLuigi Rizzo 	(void)ifp;
139fddd4f62SNavdeep Parhar 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
140e4166283SLuigi Rizzo 	if (m) {
14137e3a6d3SLuigi Rizzo 		/* m_getcl() (mb_ctor_mbuf) has an assert that checks that
14237e3a6d3SLuigi Rizzo 		 * M_NOFREE flag is not specified as third argument,
14337e3a6d3SLuigi Rizzo 		 * so we have to set M_NOFREE after m_getcl(). */
14437e3a6d3SLuigi Rizzo 		m->m_flags |= M_NOFREE;
1454bf50f18SLuigi Rizzo 		m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save
14637e3a6d3SLuigi Rizzo 		m->m_ext.ext_free = (void *)void_mbuf_dtor;
147e4166283SLuigi Rizzo 		m->m_ext.ext_type = EXT_EXTREF;
14837e3a6d3SLuigi Rizzo 		ND(5, "create m %p refcnt %d", m, MBUF_REFCNT(m));
149e4166283SLuigi Rizzo 	}
150e4166283SLuigi Rizzo 	return m;
151e4166283SLuigi Rizzo }
152f9790aebSLuigi Rizzo 
15337e3a6d3SLuigi Rizzo #else /* __FreeBSD_version >= 1100000 */
154f9790aebSLuigi Rizzo 
15537e3a6d3SLuigi Rizzo /*
15637e3a6d3SLuigi Rizzo  * Newer versions of FreeBSD, using a straightforward scheme.
15737e3a6d3SLuigi Rizzo  *
15837e3a6d3SLuigi Rizzo  * We allocate mbufs with m_gethdr(), since the mbuf header is needed
15937e3a6d3SLuigi Rizzo  * by the driver. We also attach a customly-provided external storage,
16037e3a6d3SLuigi Rizzo  * which in this case is a netmap buffer. When calling m_extadd(), however
16137e3a6d3SLuigi Rizzo  * we pass a NULL address, since the real address (and length) will be
16237e3a6d3SLuigi Rizzo  * filled in by nm_os_generic_xmit_frame() right before calling
16337e3a6d3SLuigi Rizzo  * if_transmit().
16437e3a6d3SLuigi Rizzo  *
16537e3a6d3SLuigi Rizzo  * The dtor function does nothing, however we need it since mb_free_ext()
16637e3a6d3SLuigi Rizzo  * has a KASSERT(), checking that the mbuf dtor function is not NULL.
16737e3a6d3SLuigi Rizzo  */
16837e3a6d3SLuigi Rizzo 
1694f80b14cSVincenzo Maffione #if __FreeBSD_version <= 1200050
1704f80b14cSVincenzo Maffione static void void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { }
1714f80b14cSVincenzo Maffione #else  /* __FreeBSD_version >= 1200051 */
1724f80b14cSVincenzo Maffione /* The arg1 and arg2 pointers argument were removed by r324446, which
1734f80b14cSVincenzo Maffione  * in included since version 1200051. */
174e8fd18f3SGleb Smirnoff static void void_mbuf_dtor(struct mbuf *m) { }
1754f80b14cSVincenzo Maffione #endif /* __FreeBSD_version >= 1200051 */
17637e3a6d3SLuigi Rizzo 
177c3e9b4dbSLuiz Otavio O Souza #define SET_MBUF_DESTRUCTOR(m, fn)	do {		\
178c3e9b4dbSLuiz Otavio O Souza 	(m)->m_ext.ext_free = (fn != NULL) ?		\
179c3e9b4dbSLuiz Otavio O Souza 	    (void *)fn : (void *)void_mbuf_dtor;	\
180c3e9b4dbSLuiz Otavio O Souza } while (0)
18125a33410SSean Bruno 
18237e3a6d3SLuigi Rizzo static inline struct mbuf *
18337e3a6d3SLuigi Rizzo nm_os_get_mbuf(struct ifnet *ifp, int len)
18437e3a6d3SLuigi Rizzo {
18537e3a6d3SLuigi Rizzo 	struct mbuf *m;
18637e3a6d3SLuigi Rizzo 
18737e3a6d3SLuigi Rizzo 	(void)ifp;
18837e3a6d3SLuigi Rizzo 	(void)len;
18937e3a6d3SLuigi Rizzo 
19037e3a6d3SLuigi Rizzo 	m = m_gethdr(M_NOWAIT, MT_DATA);
19137e3a6d3SLuigi Rizzo 	if (m == NULL) {
19237e3a6d3SLuigi Rizzo 		return m;
19337e3a6d3SLuigi Rizzo 	}
19437e3a6d3SLuigi Rizzo 
19537e3a6d3SLuigi Rizzo 	m_extadd(m, NULL /* buf */, 0 /* size */, void_mbuf_dtor,
19637e3a6d3SLuigi Rizzo 		 NULL, NULL, 0, EXT_NET_DRV);
19737e3a6d3SLuigi Rizzo 
19837e3a6d3SLuigi Rizzo 	return m;
19937e3a6d3SLuigi Rizzo }
20037e3a6d3SLuigi Rizzo 
20137e3a6d3SLuigi Rizzo #endif /* __FreeBSD_version >= 1100000 */
20237e3a6d3SLuigi Rizzo 
20337e3a6d3SLuigi Rizzo #elif defined _WIN32
20437e3a6d3SLuigi Rizzo 
20537e3a6d3SLuigi Rizzo #include "win_glue.h"
20637e3a6d3SLuigi Rizzo 
20737e3a6d3SLuigi Rizzo #define MBUF_TXQ(m) 	0//((m)->m_pkthdr.flowid)
20837e3a6d3SLuigi Rizzo #define MBUF_RXQ(m)	    0//((m)->m_pkthdr.flowid)
20937e3a6d3SLuigi Rizzo #define smp_mb()		//XXX: to be correctly defined
210f9790aebSLuigi Rizzo 
211f9790aebSLuigi Rizzo #else /* linux */
212f9790aebSLuigi Rizzo 
213f9790aebSLuigi Rizzo #include "bsd_glue.h"
214f9790aebSLuigi Rizzo 
215f9790aebSLuigi Rizzo #include <linux/ethtool.h>      /* struct ethtool_ops, get_ringparam */
216f9790aebSLuigi Rizzo #include <linux/hrtimer.h>
217f9790aebSLuigi Rizzo 
21837e3a6d3SLuigi Rizzo static inline struct mbuf *
21937e3a6d3SLuigi Rizzo nm_os_get_mbuf(struct ifnet *ifp, int len)
22037e3a6d3SLuigi Rizzo {
22137e3a6d3SLuigi Rizzo 	return alloc_skb(ifp->needed_headroom + len +
22237e3a6d3SLuigi Rizzo 			 ifp->needed_tailroom, GFP_ATOMIC);
22337e3a6d3SLuigi Rizzo }
224f9790aebSLuigi Rizzo 
225f9790aebSLuigi Rizzo #endif /* linux */
226f9790aebSLuigi Rizzo 
227f9790aebSLuigi Rizzo 
228f9790aebSLuigi Rizzo /* Common headers. */
229f9790aebSLuigi Rizzo #include <net/netmap.h>
230f9790aebSLuigi Rizzo #include <dev/netmap/netmap_kern.h>
231f9790aebSLuigi Rizzo #include <dev/netmap/netmap_mem2.h>
232f9790aebSLuigi Rizzo 
233f9790aebSLuigi Rizzo 
23437e3a6d3SLuigi Rizzo #define for_each_kring_n(_i, _k, _karr, _n) \
235*2ff91c17SVincenzo Maffione 	for ((_k)=*(_karr), (_i) = 0; (_i) < (_n); (_i)++, (_k) = (_karr)[(_i)])
236f9790aebSLuigi Rizzo 
23737e3a6d3SLuigi Rizzo #define for_each_tx_kring(_i, _k, _na) \
23837e3a6d3SLuigi Rizzo             for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings)
23937e3a6d3SLuigi Rizzo #define for_each_tx_kring_h(_i, _k, _na) \
24037e3a6d3SLuigi Rizzo             for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings + 1)
24137e3a6d3SLuigi Rizzo 
24237e3a6d3SLuigi Rizzo #define for_each_rx_kring(_i, _k, _na) \
24337e3a6d3SLuigi Rizzo             for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings)
24437e3a6d3SLuigi Rizzo #define for_each_rx_kring_h(_i, _k, _na) \
24537e3a6d3SLuigi Rizzo             for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings + 1)
24637e3a6d3SLuigi Rizzo 
24737e3a6d3SLuigi Rizzo 
24837e3a6d3SLuigi Rizzo /* ======================== PERFORMANCE STATISTICS =========================== */
249f9790aebSLuigi Rizzo 
2504bf50f18SLuigi Rizzo #ifdef RATE_GENERIC
251f9790aebSLuigi Rizzo #define IFRATE(x) x
252f9790aebSLuigi Rizzo struct rate_stats {
253f9790aebSLuigi Rizzo 	unsigned long txpkt;
254f9790aebSLuigi Rizzo 	unsigned long txsync;
255f9790aebSLuigi Rizzo 	unsigned long txirq;
25637e3a6d3SLuigi Rizzo 	unsigned long txrepl;
25737e3a6d3SLuigi Rizzo 	unsigned long txdrop;
258f9790aebSLuigi Rizzo 	unsigned long rxpkt;
259f9790aebSLuigi Rizzo 	unsigned long rxirq;
260f9790aebSLuigi Rizzo 	unsigned long rxsync;
261f9790aebSLuigi Rizzo };
262f9790aebSLuigi Rizzo 
263f9790aebSLuigi Rizzo struct rate_context {
264f9790aebSLuigi Rizzo 	unsigned refcount;
265f9790aebSLuigi Rizzo 	struct timer_list timer;
266f9790aebSLuigi Rizzo 	struct rate_stats new;
267f9790aebSLuigi Rizzo 	struct rate_stats old;
268f9790aebSLuigi Rizzo };
269f9790aebSLuigi Rizzo 
270f9790aebSLuigi Rizzo #define RATE_PRINTK(_NAME_) \
271f9790aebSLuigi Rizzo 	printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD);
272f9790aebSLuigi Rizzo #define RATE_PERIOD  2
273f9790aebSLuigi Rizzo static void rate_callback(unsigned long arg)
274f9790aebSLuigi Rizzo {
275f9790aebSLuigi Rizzo 	struct rate_context * ctx = (struct rate_context *)arg;
276f9790aebSLuigi Rizzo 	struct rate_stats cur = ctx->new;
277f9790aebSLuigi Rizzo 	int r;
278f9790aebSLuigi Rizzo 
279f9790aebSLuigi Rizzo 	RATE_PRINTK(txpkt);
280f9790aebSLuigi Rizzo 	RATE_PRINTK(txsync);
281f9790aebSLuigi Rizzo 	RATE_PRINTK(txirq);
28237e3a6d3SLuigi Rizzo 	RATE_PRINTK(txrepl);
28337e3a6d3SLuigi Rizzo 	RATE_PRINTK(txdrop);
284f9790aebSLuigi Rizzo 	RATE_PRINTK(rxpkt);
285f9790aebSLuigi Rizzo 	RATE_PRINTK(rxsync);
286f9790aebSLuigi Rizzo 	RATE_PRINTK(rxirq);
287f9790aebSLuigi Rizzo 	printk("\n");
288f9790aebSLuigi Rizzo 
289f9790aebSLuigi Rizzo 	ctx->old = cur;
290f9790aebSLuigi Rizzo 	r = mod_timer(&ctx->timer, jiffies +
291f9790aebSLuigi Rizzo 			msecs_to_jiffies(RATE_PERIOD * 1000));
292f9790aebSLuigi Rizzo 	if (unlikely(r))
293f9790aebSLuigi Rizzo 		D("[v1000] Error: mod_timer()");
294f9790aebSLuigi Rizzo }
295f9790aebSLuigi Rizzo 
296f9790aebSLuigi Rizzo static struct rate_context rate_ctx;
297f9790aebSLuigi Rizzo 
2984bf50f18SLuigi Rizzo void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi)
2994bf50f18SLuigi Rizzo {
3004bf50f18SLuigi Rizzo     if (txp) rate_ctx.new.txpkt++;
3014bf50f18SLuigi Rizzo     if (txs) rate_ctx.new.txsync++;
3024bf50f18SLuigi Rizzo     if (txi) rate_ctx.new.txirq++;
3034bf50f18SLuigi Rizzo     if (rxp) rate_ctx.new.rxpkt++;
3044bf50f18SLuigi Rizzo     if (rxs) rate_ctx.new.rxsync++;
3054bf50f18SLuigi Rizzo     if (rxi) rate_ctx.new.rxirq++;
3064bf50f18SLuigi Rizzo }
3074bf50f18SLuigi Rizzo 
308f9790aebSLuigi Rizzo #else /* !RATE */
309f9790aebSLuigi Rizzo #define IFRATE(x)
310f9790aebSLuigi Rizzo #endif /* !RATE */
311f9790aebSLuigi Rizzo 
312f9790aebSLuigi Rizzo 
313c3e9b4dbSLuiz Otavio O Souza /* ========== GENERIC (EMULATED) NETMAP ADAPTER SUPPORT ============= */
314f9790aebSLuigi Rizzo 
315f9790aebSLuigi Rizzo /*
316f9790aebSLuigi Rizzo  * Wrapper used by the generic adapter layer to notify
317f9790aebSLuigi Rizzo  * the poller threads. Differently from netmap_rx_irq(), we check
3184bf50f18SLuigi Rizzo  * only NAF_NETMAP_ON instead of NAF_NATIVE_ON to enable the irq.
319f9790aebSLuigi Rizzo  */
32037e3a6d3SLuigi Rizzo void
32137e3a6d3SLuigi Rizzo netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
322f9790aebSLuigi Rizzo {
3234bf50f18SLuigi Rizzo 	if (unlikely(!nm_netmap_on(na)))
324f9790aebSLuigi Rizzo 		return;
325f9790aebSLuigi Rizzo 
32637e3a6d3SLuigi Rizzo 	netmap_common_irq(na, q, work_done);
32737e3a6d3SLuigi Rizzo #ifdef RATE_GENERIC
32837e3a6d3SLuigi Rizzo 	if (work_done)
32937e3a6d3SLuigi Rizzo 		rate_ctx.new.rxirq++;
33037e3a6d3SLuigi Rizzo 	else
33137e3a6d3SLuigi Rizzo 		rate_ctx.new.txirq++;
33237e3a6d3SLuigi Rizzo #endif  /* RATE_GENERIC */
333f9790aebSLuigi Rizzo }
334f9790aebSLuigi Rizzo 
33537e3a6d3SLuigi Rizzo static int
33637e3a6d3SLuigi Rizzo generic_netmap_unregister(struct netmap_adapter *na)
33737e3a6d3SLuigi Rizzo {
33837e3a6d3SLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
33937e3a6d3SLuigi Rizzo 	struct netmap_kring *kring = NULL;
34037e3a6d3SLuigi Rizzo 	int i, r;
34137e3a6d3SLuigi Rizzo 
34237e3a6d3SLuigi Rizzo 	if (na->active_fds == 0) {
34337e3a6d3SLuigi Rizzo 		na->na_flags &= ~NAF_NETMAP_ON;
34437e3a6d3SLuigi Rizzo 
34537e3a6d3SLuigi Rizzo 		/* Stop intercepting packets on the RX path. */
34637e3a6d3SLuigi Rizzo 		nm_os_catch_rx(gna, 0);
34737e3a6d3SLuigi Rizzo 
3484f80b14cSVincenzo Maffione 		/* Release packet steering control. */
3494f80b14cSVincenzo Maffione 		nm_os_catch_tx(gna, 0);
35037e3a6d3SLuigi Rizzo 	}
35137e3a6d3SLuigi Rizzo 
35237e3a6d3SLuigi Rizzo 	for_each_rx_kring_h(r, kring, na) {
35337e3a6d3SLuigi Rizzo 		if (nm_kring_pending_off(kring)) {
354c3e9b4dbSLuiz Otavio O Souza 			D("Emulated adapter: ring '%s' deactivated", kring->name);
35537e3a6d3SLuigi Rizzo 			kring->nr_mode = NKR_NETMAP_OFF;
35637e3a6d3SLuigi Rizzo 		}
35737e3a6d3SLuigi Rizzo 	}
35837e3a6d3SLuigi Rizzo 	for_each_tx_kring_h(r, kring, na) {
35937e3a6d3SLuigi Rizzo 		if (nm_kring_pending_off(kring)) {
36037e3a6d3SLuigi Rizzo 			kring->nr_mode = NKR_NETMAP_OFF;
361c3e9b4dbSLuiz Otavio O Souza 			D("Emulated adapter: ring '%s' deactivated", kring->name);
36237e3a6d3SLuigi Rizzo 		}
36337e3a6d3SLuigi Rizzo 	}
36437e3a6d3SLuigi Rizzo 
36537e3a6d3SLuigi Rizzo 	for_each_rx_kring(r, kring, na) {
36637e3a6d3SLuigi Rizzo 		/* Free the mbufs still pending in the RX queues,
36737e3a6d3SLuigi Rizzo 		 * that did not end up into the corresponding netmap
36837e3a6d3SLuigi Rizzo 		 * RX rings. */
36937e3a6d3SLuigi Rizzo 		mbq_safe_purge(&kring->rx_queue);
37037e3a6d3SLuigi Rizzo 		nm_os_mitigation_cleanup(&gna->mit[r]);
37137e3a6d3SLuigi Rizzo 	}
37237e3a6d3SLuigi Rizzo 
37337e3a6d3SLuigi Rizzo 	/* Decrement reference counter for the mbufs in the
37437e3a6d3SLuigi Rizzo 	 * TX pools. These mbufs can be still pending in drivers,
37537e3a6d3SLuigi Rizzo 	 * (e.g. this happens with virtio-net driver, which
37637e3a6d3SLuigi Rizzo 	 * does lazy reclaiming of transmitted mbufs). */
37737e3a6d3SLuigi Rizzo 	for_each_tx_kring(r, kring, na) {
37837e3a6d3SLuigi Rizzo 		/* We must remove the destructor on the TX event,
37937e3a6d3SLuigi Rizzo 		 * because the destructor invokes netmap code, and
38037e3a6d3SLuigi Rizzo 		 * the netmap module may disappear before the
38137e3a6d3SLuigi Rizzo 		 * TX event is consumed. */
38237e3a6d3SLuigi Rizzo 		mtx_lock_spin(&kring->tx_event_lock);
38337e3a6d3SLuigi Rizzo 		if (kring->tx_event) {
384c3e9b4dbSLuiz Otavio O Souza 			SET_MBUF_DESTRUCTOR(kring->tx_event, NULL);
38537e3a6d3SLuigi Rizzo 		}
38637e3a6d3SLuigi Rizzo 		kring->tx_event = NULL;
38737e3a6d3SLuigi Rizzo 		mtx_unlock_spin(&kring->tx_event_lock);
38837e3a6d3SLuigi Rizzo 	}
38937e3a6d3SLuigi Rizzo 
39037e3a6d3SLuigi Rizzo 	if (na->active_fds == 0) {
391c3e9b4dbSLuiz Otavio O Souza 		nm_os_free(gna->mit);
39237e3a6d3SLuigi Rizzo 
39337e3a6d3SLuigi Rizzo 		for_each_rx_kring(r, kring, na) {
39437e3a6d3SLuigi Rizzo 			mbq_safe_fini(&kring->rx_queue);
39537e3a6d3SLuigi Rizzo 		}
39637e3a6d3SLuigi Rizzo 
39737e3a6d3SLuigi Rizzo 		for_each_tx_kring(r, kring, na) {
39837e3a6d3SLuigi Rizzo 			mtx_destroy(&kring->tx_event_lock);
39937e3a6d3SLuigi Rizzo 			if (kring->tx_pool == NULL) {
40037e3a6d3SLuigi Rizzo 				continue;
40137e3a6d3SLuigi Rizzo 			}
40237e3a6d3SLuigi Rizzo 
40337e3a6d3SLuigi Rizzo 			for (i=0; i<na->num_tx_desc; i++) {
40437e3a6d3SLuigi Rizzo 				if (kring->tx_pool[i]) {
40537e3a6d3SLuigi Rizzo 					m_freem(kring->tx_pool[i]);
40637e3a6d3SLuigi Rizzo 				}
40737e3a6d3SLuigi Rizzo 			}
408c3e9b4dbSLuiz Otavio O Souza 			nm_os_free(kring->tx_pool);
40937e3a6d3SLuigi Rizzo 			kring->tx_pool = NULL;
41037e3a6d3SLuigi Rizzo 		}
41137e3a6d3SLuigi Rizzo 
41237e3a6d3SLuigi Rizzo #ifdef RATE_GENERIC
41337e3a6d3SLuigi Rizzo 		if (--rate_ctx.refcount == 0) {
41437e3a6d3SLuigi Rizzo 			D("del_timer()");
41537e3a6d3SLuigi Rizzo 			del_timer(&rate_ctx.timer);
41637e3a6d3SLuigi Rizzo 		}
41737e3a6d3SLuigi Rizzo #endif
418c3e9b4dbSLuiz Otavio O Souza 		D("Emulated adapter for %s deactivated", na->name);
41937e3a6d3SLuigi Rizzo 	}
42037e3a6d3SLuigi Rizzo 
42137e3a6d3SLuigi Rizzo 	return 0;
42237e3a6d3SLuigi Rizzo }
423f9790aebSLuigi Rizzo 
424f9790aebSLuigi Rizzo /* Enable/disable netmap mode for a generic network interface. */
42517885a7bSLuigi Rizzo static int
42617885a7bSLuigi Rizzo generic_netmap_register(struct netmap_adapter *na, int enable)
427f9790aebSLuigi Rizzo {
428f9790aebSLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
42937e3a6d3SLuigi Rizzo 	struct netmap_kring *kring = NULL;
430f9790aebSLuigi Rizzo 	int error;
431f9790aebSLuigi Rizzo 	int i, r;
432f9790aebSLuigi Rizzo 
43337e3a6d3SLuigi Rizzo 	if (!na) {
434f9790aebSLuigi Rizzo 		return EINVAL;
435f9790aebSLuigi Rizzo 	}
436f9790aebSLuigi Rizzo 
43737e3a6d3SLuigi Rizzo 	if (!enable) {
43837e3a6d3SLuigi Rizzo 		/* This is actually an unregif. */
43937e3a6d3SLuigi Rizzo 		return generic_netmap_unregister(na);
44037e3a6d3SLuigi Rizzo 	}
44137e3a6d3SLuigi Rizzo 
44237e3a6d3SLuigi Rizzo 	if (na->active_fds == 0) {
443c3e9b4dbSLuiz Otavio O Souza 		D("Emulated adapter for %s activated", na->name);
44437e3a6d3SLuigi Rizzo 		/* Do all memory allocations when (na->active_fds == 0), to
44537e3a6d3SLuigi Rizzo 		 * simplify error management. */
44637e3a6d3SLuigi Rizzo 
44737e3a6d3SLuigi Rizzo 		/* Allocate memory for mitigation support on all the rx queues. */
448c3e9b4dbSLuiz Otavio O Souza 		gna->mit = nm_os_malloc(na->num_rx_rings * sizeof(struct nm_generic_mit));
449f0ea3689SLuigi Rizzo 		if (!gna->mit) {
450f0ea3689SLuigi Rizzo 			D("mitigation allocation failed");
451f0ea3689SLuigi Rizzo 			error = ENOMEM;
452f0ea3689SLuigi Rizzo 			goto out;
453f0ea3689SLuigi Rizzo 		}
45437e3a6d3SLuigi Rizzo 
45537e3a6d3SLuigi Rizzo 		for_each_rx_kring(r, kring, na) {
45637e3a6d3SLuigi Rizzo 			/* Init mitigation support. */
45737e3a6d3SLuigi Rizzo 			nm_os_mitigation_init(&gna->mit[r], r, na);
458f0ea3689SLuigi Rizzo 
459f9790aebSLuigi Rizzo 			/* Initialize the rx queue, as generic_rx_handler() can
46037e3a6d3SLuigi Rizzo 			 * be called as soon as nm_os_catch_rx() returns.
461f9790aebSLuigi Rizzo 			 */
46237e3a6d3SLuigi Rizzo 			mbq_safe_init(&kring->rx_queue);
463f9790aebSLuigi Rizzo 		}
464f9790aebSLuigi Rizzo 
465f9790aebSLuigi Rizzo 		/*
46637e3a6d3SLuigi Rizzo 		 * Prepare mbuf pools (parallel to the tx rings), for packet
46737e3a6d3SLuigi Rizzo 		 * transmission. Don't preallocate the mbufs here, it's simpler
46837e3a6d3SLuigi Rizzo 		 * to leave this task to txsync.
469f9790aebSLuigi Rizzo 		 */
47037e3a6d3SLuigi Rizzo 		for_each_tx_kring(r, kring, na) {
47137e3a6d3SLuigi Rizzo 			kring->tx_pool = NULL;
47237e3a6d3SLuigi Rizzo 		}
47337e3a6d3SLuigi Rizzo 		for_each_tx_kring(r, kring, na) {
47437e3a6d3SLuigi Rizzo 			kring->tx_pool =
475c3e9b4dbSLuiz Otavio O Souza 				nm_os_malloc(na->num_tx_desc * sizeof(struct mbuf *));
47637e3a6d3SLuigi Rizzo 			if (!kring->tx_pool) {
477f9790aebSLuigi Rizzo 				D("tx_pool allocation failed");
478f9790aebSLuigi Rizzo 				error = ENOMEM;
47917885a7bSLuigi Rizzo 				goto free_tx_pools;
480f9790aebSLuigi Rizzo 			}
48137e3a6d3SLuigi Rizzo 			mtx_init(&kring->tx_event_lock, "tx_event_lock",
48237e3a6d3SLuigi Rizzo 				 NULL, MTX_SPIN);
48337e3a6d3SLuigi Rizzo 		}
48437e3a6d3SLuigi Rizzo 	}
48537e3a6d3SLuigi Rizzo 
48637e3a6d3SLuigi Rizzo 	for_each_rx_kring_h(r, kring, na) {
48737e3a6d3SLuigi Rizzo 		if (nm_kring_pending_on(kring)) {
488c3e9b4dbSLuiz Otavio O Souza 			D("Emulated adapter: ring '%s' activated", kring->name);
48937e3a6d3SLuigi Rizzo 			kring->nr_mode = NKR_NETMAP_ON;
49037e3a6d3SLuigi Rizzo 		}
49137e3a6d3SLuigi Rizzo 
49237e3a6d3SLuigi Rizzo 	}
49337e3a6d3SLuigi Rizzo 	for_each_tx_kring_h(r, kring, na) {
49437e3a6d3SLuigi Rizzo 		if (nm_kring_pending_on(kring)) {
495c3e9b4dbSLuiz Otavio O Souza 			D("Emulated adapter: ring '%s' activated", kring->name);
49637e3a6d3SLuigi Rizzo 			kring->nr_mode = NKR_NETMAP_ON;
49737e3a6d3SLuigi Rizzo 		}
49837e3a6d3SLuigi Rizzo 	}
49937e3a6d3SLuigi Rizzo 
50037e3a6d3SLuigi Rizzo 	for_each_tx_kring(r, kring, na) {
50137e3a6d3SLuigi Rizzo 		/* Initialize tx_pool and tx_event. */
502f9790aebSLuigi Rizzo 		for (i=0; i<na->num_tx_desc; i++) {
50337e3a6d3SLuigi Rizzo 			kring->tx_pool[i] = NULL;
504f9790aebSLuigi Rizzo 		}
50537e3a6d3SLuigi Rizzo 
50637e3a6d3SLuigi Rizzo 		kring->tx_event = NULL;
507f9790aebSLuigi Rizzo 	}
50837e3a6d3SLuigi Rizzo 
50937e3a6d3SLuigi Rizzo 	if (na->active_fds == 0) {
510f9790aebSLuigi Rizzo 		/* Prepare to intercept incoming traffic. */
51137e3a6d3SLuigi Rizzo 		error = nm_os_catch_rx(gna, 1);
512f9790aebSLuigi Rizzo 		if (error) {
51337e3a6d3SLuigi Rizzo 			D("nm_os_catch_rx(1) failed (%d)", error);
5144f80b14cSVincenzo Maffione 			goto free_tx_pools;
515f9790aebSLuigi Rizzo 		}
516f9790aebSLuigi Rizzo 
5174f80b14cSVincenzo Maffione 		/* Let netmap control the packet steering. */
51837e3a6d3SLuigi Rizzo 		error = nm_os_catch_tx(gna, 1);
51937e3a6d3SLuigi Rizzo 		if (error) {
52037e3a6d3SLuigi Rizzo 			D("nm_os_catch_tx(1) failed (%d)", error);
52137e3a6d3SLuigi Rizzo 			goto catch_rx;
52237e3a6d3SLuigi Rizzo 		}
523f9790aebSLuigi Rizzo 
52437e3a6d3SLuigi Rizzo 		na->na_flags |= NAF_NETMAP_ON;
52537e3a6d3SLuigi Rizzo 
5264bf50f18SLuigi Rizzo #ifdef RATE_GENERIC
527f9790aebSLuigi Rizzo 		if (rate_ctx.refcount == 0) {
528f9790aebSLuigi Rizzo 			D("setup_timer()");
529f9790aebSLuigi Rizzo 			memset(&rate_ctx, 0, sizeof(rate_ctx));
530f9790aebSLuigi Rizzo 			setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx);
531f9790aebSLuigi Rizzo 			if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) {
532f9790aebSLuigi Rizzo 				D("Error: mod_timer()");
533f9790aebSLuigi Rizzo 			}
534f9790aebSLuigi Rizzo 		}
535f9790aebSLuigi Rizzo 		rate_ctx.refcount++;
536f9790aebSLuigi Rizzo #endif /* RATE */
537f9790aebSLuigi Rizzo 	}
538f9790aebSLuigi Rizzo 
539f9790aebSLuigi Rizzo 	return 0;
540f9790aebSLuigi Rizzo 
54137e3a6d3SLuigi Rizzo 	/* Here (na->active_fds == 0) holds. */
54237e3a6d3SLuigi Rizzo catch_rx:
54337e3a6d3SLuigi Rizzo 	nm_os_catch_rx(gna, 0);
54417885a7bSLuigi Rizzo free_tx_pools:
54537e3a6d3SLuigi Rizzo 	for_each_tx_kring(r, kring, na) {
54637e3a6d3SLuigi Rizzo 		mtx_destroy(&kring->tx_event_lock);
54737e3a6d3SLuigi Rizzo 		if (kring->tx_pool == NULL) {
54817885a7bSLuigi Rizzo 			continue;
549f2637526SLuigi Rizzo 		}
550c3e9b4dbSLuiz Otavio O Souza 		nm_os_free(kring->tx_pool);
55137e3a6d3SLuigi Rizzo 		kring->tx_pool = NULL;
55237e3a6d3SLuigi Rizzo 	}
55337e3a6d3SLuigi Rizzo 	for_each_rx_kring(r, kring, na) {
55437e3a6d3SLuigi Rizzo 		mbq_safe_fini(&kring->rx_queue);
555f9790aebSLuigi Rizzo 	}
556c3e9b4dbSLuiz Otavio O Souza 	nm_os_free(gna->mit);
557f0ea3689SLuigi Rizzo out:
558f9790aebSLuigi Rizzo 
559f9790aebSLuigi Rizzo 	return error;
560f9790aebSLuigi Rizzo }
561f9790aebSLuigi Rizzo 
562f9790aebSLuigi Rizzo /*
563f9790aebSLuigi Rizzo  * Callback invoked when the device driver frees an mbuf used
564f9790aebSLuigi Rizzo  * by netmap to transmit a packet. This usually happens when
565f9790aebSLuigi Rizzo  * the NIC notifies the driver that transmission is completed.
566f9790aebSLuigi Rizzo  */
567f9790aebSLuigi Rizzo static void
568f9790aebSLuigi Rizzo generic_mbuf_destructor(struct mbuf *m)
569f9790aebSLuigi Rizzo {
57037e3a6d3SLuigi Rizzo 	struct netmap_adapter *na = NA(GEN_TX_MBUF_IFP(m));
57137e3a6d3SLuigi Rizzo 	struct netmap_kring *kring;
57237e3a6d3SLuigi Rizzo 	unsigned int r = MBUF_TXQ(m);
57337e3a6d3SLuigi Rizzo 	unsigned int r_orig = r;
57437e3a6d3SLuigi Rizzo 
57537e3a6d3SLuigi Rizzo 	if (unlikely(!nm_netmap_on(na) || r >= na->num_tx_rings)) {
57637e3a6d3SLuigi Rizzo 		D("Error: no netmap adapter on device %p",
57737e3a6d3SLuigi Rizzo 		  GEN_TX_MBUF_IFP(m));
57837e3a6d3SLuigi Rizzo 		return;
57937e3a6d3SLuigi Rizzo 	}
58037e3a6d3SLuigi Rizzo 
58137e3a6d3SLuigi Rizzo 	/*
58237e3a6d3SLuigi Rizzo 	 * First, clear the event mbuf.
58337e3a6d3SLuigi Rizzo 	 * In principle, the event 'm' should match the one stored
58437e3a6d3SLuigi Rizzo 	 * on ring 'r'. However we check it explicitely to stay
58537e3a6d3SLuigi Rizzo 	 * safe against lower layers (qdisc, driver, etc.) changing
58637e3a6d3SLuigi Rizzo 	 * MBUF_TXQ(m) under our feet. If the match is not found
58737e3a6d3SLuigi Rizzo 	 * on 'r', we try to see if it belongs to some other ring.
58837e3a6d3SLuigi Rizzo 	 */
58937e3a6d3SLuigi Rizzo         for (;;) {
59037e3a6d3SLuigi Rizzo 		bool match = false;
59137e3a6d3SLuigi Rizzo 
592*2ff91c17SVincenzo Maffione 		kring = na->tx_rings[r];
59337e3a6d3SLuigi Rizzo 		mtx_lock_spin(&kring->tx_event_lock);
59437e3a6d3SLuigi Rizzo 		if (kring->tx_event == m) {
59537e3a6d3SLuigi Rizzo 			kring->tx_event = NULL;
59637e3a6d3SLuigi Rizzo 			match = true;
59737e3a6d3SLuigi Rizzo 		}
59837e3a6d3SLuigi Rizzo 		mtx_unlock_spin(&kring->tx_event_lock);
59937e3a6d3SLuigi Rizzo 
60037e3a6d3SLuigi Rizzo 		if (match) {
60137e3a6d3SLuigi Rizzo 			if (r != r_orig) {
60237e3a6d3SLuigi Rizzo 				RD(1, "event %p migrated: ring %u --> %u",
60337e3a6d3SLuigi Rizzo 				      m, r_orig, r);
60437e3a6d3SLuigi Rizzo 			}
60537e3a6d3SLuigi Rizzo 			break;
60637e3a6d3SLuigi Rizzo 		}
60737e3a6d3SLuigi Rizzo 
60837e3a6d3SLuigi Rizzo 		if (++r == na->num_tx_rings) r = 0;
60937e3a6d3SLuigi Rizzo 
61037e3a6d3SLuigi Rizzo 		if (r == r_orig) {
61137e3a6d3SLuigi Rizzo 			RD(1, "Cannot match event %p", m);
61237e3a6d3SLuigi Rizzo 			return;
61337e3a6d3SLuigi Rizzo 		}
61437e3a6d3SLuigi Rizzo 	}
61537e3a6d3SLuigi Rizzo 
61637e3a6d3SLuigi Rizzo 	/* Second, wake up clients. They will reclaim the event through
61737e3a6d3SLuigi Rizzo 	 * txsync. */
61837e3a6d3SLuigi Rizzo 	netmap_generic_irq(na, r, NULL);
619f9790aebSLuigi Rizzo #ifdef __FreeBSD__
6204f80b14cSVincenzo Maffione #if __FreeBSD_version <= 1200050
6214f80b14cSVincenzo Maffione 	void_mbuf_dtor(m, NULL, NULL);
6224f80b14cSVincenzo Maffione #else  /* __FreeBSD_version >= 1200051 */
623e8fd18f3SGleb Smirnoff 	void_mbuf_dtor(m);
6244f80b14cSVincenzo Maffione #endif /* __FreeBSD_version >= 1200051 */
62537e3a6d3SLuigi Rizzo #endif
626f9790aebSLuigi Rizzo }
627f9790aebSLuigi Rizzo 
62817885a7bSLuigi Rizzo /* Record completed transmissions and update hwtail.
629f9790aebSLuigi Rizzo  *
63017885a7bSLuigi Rizzo  * The oldest tx buffer not yet completed is at nr_hwtail + 1,
631f9790aebSLuigi Rizzo  * nr_hwcur is the first unsent buffer.
632f9790aebSLuigi Rizzo  */
63317885a7bSLuigi Rizzo static u_int
63437e3a6d3SLuigi Rizzo generic_netmap_tx_clean(struct netmap_kring *kring, int txqdisc)
635f9790aebSLuigi Rizzo {
63617885a7bSLuigi Rizzo 	u_int const lim = kring->nkr_num_slots - 1;
63717885a7bSLuigi Rizzo 	u_int nm_i = nm_next(kring->nr_hwtail, lim);
638f9790aebSLuigi Rizzo 	u_int hwcur = kring->nr_hwcur;
639f9790aebSLuigi Rizzo 	u_int n = 0;
640f9790aebSLuigi Rizzo 	struct mbuf **tx_pool = kring->tx_pool;
641f9790aebSLuigi Rizzo 
64237e3a6d3SLuigi Rizzo 	ND("hwcur = %d, hwtail = %d", kring->nr_hwcur, kring->nr_hwtail);
64337e3a6d3SLuigi Rizzo 
64417885a7bSLuigi Rizzo 	while (nm_i != hwcur) { /* buffers not completed */
64517885a7bSLuigi Rizzo 		struct mbuf *m = tx_pool[nm_i];
646f9790aebSLuigi Rizzo 
64737e3a6d3SLuigi Rizzo 		if (txqdisc) {
64837e3a6d3SLuigi Rizzo 			if (m == NULL) {
64937e3a6d3SLuigi Rizzo 				/* Nothing to do, this is going
65037e3a6d3SLuigi Rizzo 				 * to be replenished. */
65137e3a6d3SLuigi Rizzo 				RD(3, "Is this happening?");
65237e3a6d3SLuigi Rizzo 
65337e3a6d3SLuigi Rizzo 			} else if (MBUF_QUEUED(m)) {
65437e3a6d3SLuigi Rizzo 				break; /* Not dequeued yet. */
65537e3a6d3SLuigi Rizzo 
65637e3a6d3SLuigi Rizzo 			} else if (MBUF_REFCNT(m) != 1) {
65737e3a6d3SLuigi Rizzo 				/* This mbuf has been dequeued but is still busy
65837e3a6d3SLuigi Rizzo 				 * (refcount is 2).
65937e3a6d3SLuigi Rizzo 				 * Leave it to the driver and replenish. */
66037e3a6d3SLuigi Rizzo 				m_freem(m);
66137e3a6d3SLuigi Rizzo 				tx_pool[nm_i] = NULL;
662f9790aebSLuigi Rizzo 			}
66337e3a6d3SLuigi Rizzo 
66437e3a6d3SLuigi Rizzo 		} else {
66537e3a6d3SLuigi Rizzo 			if (unlikely(m == NULL)) {
66637e3a6d3SLuigi Rizzo 				int event_consumed;
66737e3a6d3SLuigi Rizzo 
66837e3a6d3SLuigi Rizzo 				/* This slot was used to place an event. */
66937e3a6d3SLuigi Rizzo 				mtx_lock_spin(&kring->tx_event_lock);
67037e3a6d3SLuigi Rizzo 				event_consumed = (kring->tx_event == NULL);
67137e3a6d3SLuigi Rizzo 				mtx_unlock_spin(&kring->tx_event_lock);
67237e3a6d3SLuigi Rizzo 				if (!event_consumed) {
67337e3a6d3SLuigi Rizzo 					/* The event has not been consumed yet,
67437e3a6d3SLuigi Rizzo 					 * still busy in the driver. */
67537e3a6d3SLuigi Rizzo 					break;
676f9790aebSLuigi Rizzo 				}
67737e3a6d3SLuigi Rizzo 				/* The event has been consumed, we can go
67837e3a6d3SLuigi Rizzo 				 * ahead. */
67937e3a6d3SLuigi Rizzo 
68037e3a6d3SLuigi Rizzo 			} else if (MBUF_REFCNT(m) != 1) {
68137e3a6d3SLuigi Rizzo 				/* This mbuf is still busy: its refcnt is 2. */
68237e3a6d3SLuigi Rizzo 				break;
68337e3a6d3SLuigi Rizzo 			}
68437e3a6d3SLuigi Rizzo 		}
68537e3a6d3SLuigi Rizzo 
686f9790aebSLuigi Rizzo 		n++;
68717885a7bSLuigi Rizzo 		nm_i = nm_next(nm_i, lim);
688f9790aebSLuigi Rizzo 	}
68917885a7bSLuigi Rizzo 	kring->nr_hwtail = nm_prev(nm_i, lim);
69017885a7bSLuigi Rizzo 	ND("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail);
691f9790aebSLuigi Rizzo 
692f9790aebSLuigi Rizzo 	return n;
693f9790aebSLuigi Rizzo }
694f9790aebSLuigi Rizzo 
69537e3a6d3SLuigi Rizzo /* Compute a slot index in the middle between inf and sup. */
696f9790aebSLuigi Rizzo static inline u_int
69737e3a6d3SLuigi Rizzo ring_middle(u_int inf, u_int sup, u_int lim)
698f9790aebSLuigi Rizzo {
69937e3a6d3SLuigi Rizzo 	u_int n = lim + 1;
700f9790aebSLuigi Rizzo 	u_int e;
701f9790aebSLuigi Rizzo 
70237e3a6d3SLuigi Rizzo 	if (sup >= inf) {
70337e3a6d3SLuigi Rizzo 		e = (sup + inf) / 2;
704f9790aebSLuigi Rizzo 	} else { /* wrap around */
70537e3a6d3SLuigi Rizzo 		e = (sup + n + inf) / 2;
706f9790aebSLuigi Rizzo 		if (e >= n) {
707f9790aebSLuigi Rizzo 			e -= n;
708f9790aebSLuigi Rizzo 		}
709f9790aebSLuigi Rizzo 	}
710f9790aebSLuigi Rizzo 
711f9790aebSLuigi Rizzo 	if (unlikely(e >= n)) {
712f9790aebSLuigi Rizzo 		D("This cannot happen");
713f9790aebSLuigi Rizzo 		e = 0;
714f9790aebSLuigi Rizzo 	}
715f9790aebSLuigi Rizzo 
716f9790aebSLuigi Rizzo 	return e;
717f9790aebSLuigi Rizzo }
718f9790aebSLuigi Rizzo 
719f9790aebSLuigi Rizzo static void
720f9790aebSLuigi Rizzo generic_set_tx_event(struct netmap_kring *kring, u_int hwcur)
721f9790aebSLuigi Rizzo {
72237e3a6d3SLuigi Rizzo 	u_int lim = kring->nkr_num_slots - 1;
723f9790aebSLuigi Rizzo 	struct mbuf *m;
724f9790aebSLuigi Rizzo 	u_int e;
72537e3a6d3SLuigi Rizzo 	u_int ntc = nm_next(kring->nr_hwtail, lim); /* next to clean */
726f9790aebSLuigi Rizzo 
72737e3a6d3SLuigi Rizzo 	if (ntc == hwcur) {
72817885a7bSLuigi Rizzo 		return; /* all buffers are free */
729f9790aebSLuigi Rizzo 	}
73037e3a6d3SLuigi Rizzo 
73137e3a6d3SLuigi Rizzo 	/*
73237e3a6d3SLuigi Rizzo 	 * We have pending packets in the driver between hwtail+1
73337e3a6d3SLuigi Rizzo 	 * and hwcur, and we have to chose one of these slot to
73437e3a6d3SLuigi Rizzo 	 * generate a notification.
73537e3a6d3SLuigi Rizzo 	 * There is a race but this is only called within txsync which
73637e3a6d3SLuigi Rizzo 	 * does a double check.
73737e3a6d3SLuigi Rizzo 	 */
73837e3a6d3SLuigi Rizzo #if 0
73937e3a6d3SLuigi Rizzo 	/* Choose a slot in the middle, so that we don't risk ending
74037e3a6d3SLuigi Rizzo 	 * up in a situation where the client continuously wake up,
74137e3a6d3SLuigi Rizzo 	 * fills one or a few TX slots and go to sleep again. */
74237e3a6d3SLuigi Rizzo 	e = ring_middle(ntc, hwcur, lim);
74337e3a6d3SLuigi Rizzo #else
74437e3a6d3SLuigi Rizzo 	/* Choose the first pending slot, to be safe against driver
74537e3a6d3SLuigi Rizzo 	 * reordering mbuf transmissions. */
74637e3a6d3SLuigi Rizzo 	e = ntc;
74737e3a6d3SLuigi Rizzo #endif
748f9790aebSLuigi Rizzo 
749f9790aebSLuigi Rizzo 	m = kring->tx_pool[e];
750f9790aebSLuigi Rizzo 	if (m == NULL) {
75137e3a6d3SLuigi Rizzo 		/* An event is already in place. */
752f9790aebSLuigi Rizzo 		return;
753f9790aebSLuigi Rizzo 	}
754f9790aebSLuigi Rizzo 
75537e3a6d3SLuigi Rizzo 	mtx_lock_spin(&kring->tx_event_lock);
75637e3a6d3SLuigi Rizzo 	if (kring->tx_event) {
75737e3a6d3SLuigi Rizzo 		/* An event is already in place. */
75837e3a6d3SLuigi Rizzo 		mtx_unlock_spin(&kring->tx_event_lock);
75937e3a6d3SLuigi Rizzo 		return;
76037e3a6d3SLuigi Rizzo 	}
76137e3a6d3SLuigi Rizzo 
762c3e9b4dbSLuiz Otavio O Souza 	SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor);
76337e3a6d3SLuigi Rizzo 	kring->tx_event = m;
76437e3a6d3SLuigi Rizzo 	mtx_unlock_spin(&kring->tx_event_lock);
76537e3a6d3SLuigi Rizzo 
76637e3a6d3SLuigi Rizzo 	kring->tx_pool[e] = NULL;
76737e3a6d3SLuigi Rizzo 
76837e3a6d3SLuigi Rizzo 	ND(5, "Request Event at %d mbuf %p refcnt %d", e, m, m ? MBUF_REFCNT(m) : -2 );
76937e3a6d3SLuigi Rizzo 
77037e3a6d3SLuigi Rizzo 	/* Decrement the refcount. This will free it if we lose the race
77137e3a6d3SLuigi Rizzo 	 * with the driver. */
772f9790aebSLuigi Rizzo 	m_freem(m);
773f9790aebSLuigi Rizzo 	smp_mb();
774f9790aebSLuigi Rizzo }
775f9790aebSLuigi Rizzo 
776f9790aebSLuigi Rizzo 
777f9790aebSLuigi Rizzo /*
778f9790aebSLuigi Rizzo  * generic_netmap_txsync() transforms netmap buffers into mbufs
779f9790aebSLuigi Rizzo  * and passes them to the standard device driver
780f9790aebSLuigi Rizzo  * (ndo_start_xmit() or ifp->if_transmit() ).
781f9790aebSLuigi Rizzo  * On linux this is not done directly, but using dev_queue_xmit(),
782f9790aebSLuigi Rizzo  * since it implements the TX flow control (and takes some locks).
783f9790aebSLuigi Rizzo  */
784f9790aebSLuigi Rizzo static int
7854bf50f18SLuigi Rizzo generic_netmap_txsync(struct netmap_kring *kring, int flags)
786f9790aebSLuigi Rizzo {
7874bf50f18SLuigi Rizzo 	struct netmap_adapter *na = kring->na;
78837e3a6d3SLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
789f9790aebSLuigi Rizzo 	struct ifnet *ifp = na->ifp;
790f9790aebSLuigi Rizzo 	struct netmap_ring *ring = kring->ring;
79117885a7bSLuigi Rizzo 	u_int nm_i;	/* index into the netmap ring */ // j
79217885a7bSLuigi Rizzo 	u_int const lim = kring->nkr_num_slots - 1;
79317885a7bSLuigi Rizzo 	u_int const head = kring->rhead;
7944bf50f18SLuigi Rizzo 	u_int ring_nr = kring->ring_id;
795f9790aebSLuigi Rizzo 
796f9790aebSLuigi Rizzo 	IFRATE(rate_ctx.new.txsync++);
797f9790aebSLuigi Rizzo 
798f9790aebSLuigi Rizzo 	rmb();
79917885a7bSLuigi Rizzo 
800f9790aebSLuigi Rizzo 	/*
80117885a7bSLuigi Rizzo 	 * First part: process new packets to send.
802f9790aebSLuigi Rizzo 	 */
80317885a7bSLuigi Rizzo 	nm_i = kring->nr_hwcur;
80417885a7bSLuigi Rizzo 	if (nm_i != head) {	/* we have new packets to send */
80537e3a6d3SLuigi Rizzo 		struct nm_os_gen_arg a;
80637e3a6d3SLuigi Rizzo 		u_int event = -1;
80737e3a6d3SLuigi Rizzo 
80837e3a6d3SLuigi Rizzo 		if (gna->txqdisc && nm_kr_txempty(kring)) {
80937e3a6d3SLuigi Rizzo 			/* In txqdisc mode, we ask for a delayed notification,
81037e3a6d3SLuigi Rizzo 			 * but only when cur == hwtail, which means that the
81137e3a6d3SLuigi Rizzo 			 * client is going to block. */
81237e3a6d3SLuigi Rizzo 			event = ring_middle(nm_i, head, lim);
81337e3a6d3SLuigi Rizzo 			ND(3, "Place txqdisc event (hwcur=%u,event=%u,"
81437e3a6d3SLuigi Rizzo 			      "head=%u,hwtail=%u)", nm_i, event, head,
81537e3a6d3SLuigi Rizzo 			      kring->nr_hwtail);
81637e3a6d3SLuigi Rizzo 		}
81737e3a6d3SLuigi Rizzo 
81837e3a6d3SLuigi Rizzo 		a.ifp = ifp;
81937e3a6d3SLuigi Rizzo 		a.ring_nr = ring_nr;
82037e3a6d3SLuigi Rizzo 		a.head = a.tail = NULL;
82137e3a6d3SLuigi Rizzo 
82217885a7bSLuigi Rizzo 		while (nm_i != head) {
82317885a7bSLuigi Rizzo 			struct netmap_slot *slot = &ring->slot[nm_i];
824f9790aebSLuigi Rizzo 			u_int len = slot->len;
8254bf50f18SLuigi Rizzo 			void *addr = NMB(na, slot);
82617885a7bSLuigi Rizzo 			/* device-specific */
827f9790aebSLuigi Rizzo 			struct mbuf *m;
828f9790aebSLuigi Rizzo 			int tx_ret;
829f9790aebSLuigi Rizzo 
8304bf50f18SLuigi Rizzo 			NM_CHECK_ADDR_LEN(na, addr, len);
83117885a7bSLuigi Rizzo 
83237e3a6d3SLuigi Rizzo 			/* Tale a mbuf from the tx pool (replenishing the pool
83337e3a6d3SLuigi Rizzo 			 * entry if necessary) and copy in the user packet. */
83417885a7bSLuigi Rizzo 			m = kring->tx_pool[nm_i];
835f9790aebSLuigi Rizzo 			if (unlikely(m == NULL)) {
83637e3a6d3SLuigi Rizzo 				kring->tx_pool[nm_i] = m =
83737e3a6d3SLuigi Rizzo 					nm_os_get_mbuf(ifp, NETMAP_BUF_SIZE(na));
83837e3a6d3SLuigi Rizzo 				if (m == NULL) {
83937e3a6d3SLuigi Rizzo 					RD(2, "Failed to replenish mbuf");
84037e3a6d3SLuigi Rizzo 					/* Here we could schedule a timer which
84137e3a6d3SLuigi Rizzo 					 * retries to replenish after a while,
84237e3a6d3SLuigi Rizzo 					 * and notifies the client when it
84337e3a6d3SLuigi Rizzo 					 * manages to replenish some slots. In
84437e3a6d3SLuigi Rizzo 					 * any case we break early to avoid
84537e3a6d3SLuigi Rizzo 					 * crashes. */
846f9790aebSLuigi Rizzo 					break;
847f9790aebSLuigi Rizzo 				}
84837e3a6d3SLuigi Rizzo 				IFRATE(rate_ctx.new.txrepl++);
849f9790aebSLuigi Rizzo 			}
85037e3a6d3SLuigi Rizzo 
85137e3a6d3SLuigi Rizzo 			a.m = m;
85237e3a6d3SLuigi Rizzo 			a.addr = addr;
85337e3a6d3SLuigi Rizzo 			a.len = len;
85437e3a6d3SLuigi Rizzo 			a.qevent = (nm_i == event);
85537e3a6d3SLuigi Rizzo 			/* When not in txqdisc mode, we should ask
85637e3a6d3SLuigi Rizzo 			 * notifications when NS_REPORT is set, or roughly
85737e3a6d3SLuigi Rizzo 			 * every half ring. To optimize this, we set a
85837e3a6d3SLuigi Rizzo 			 * notification event when the client runs out of
85937e3a6d3SLuigi Rizzo 			 * TX ring space, or when transmission fails. In
86037e3a6d3SLuigi Rizzo 			 * the latter case we also break early.
861f9790aebSLuigi Rizzo 			 */
86237e3a6d3SLuigi Rizzo 			tx_ret = nm_os_generic_xmit_frame(&a);
863f9790aebSLuigi Rizzo 			if (unlikely(tx_ret)) {
86437e3a6d3SLuigi Rizzo 				if (!gna->txqdisc) {
865f9790aebSLuigi Rizzo 					/*
866f9790aebSLuigi Rizzo 					 * No room for this mbuf in the device driver.
867f9790aebSLuigi Rizzo 					 * Request a notification FOR A PREVIOUS MBUF,
868f9790aebSLuigi Rizzo 					 * then call generic_netmap_tx_clean(kring) to do the
869f9790aebSLuigi Rizzo 					 * double check and see if we can free more buffers.
870f9790aebSLuigi Rizzo 					 * If there is space continue, else break;
871f9790aebSLuigi Rizzo 					 * NOTE: the double check is necessary if the problem
872f9790aebSLuigi Rizzo 					 * occurs in the txsync call after selrecord().
873f9790aebSLuigi Rizzo 					 * Also, we need some way to tell the caller that not
874f9790aebSLuigi Rizzo 					 * all buffers were queued onto the device (this was
875f9790aebSLuigi Rizzo 					 * not a problem with native netmap driver where space
876f9790aebSLuigi Rizzo 					 * is preallocated). The bridge has a similar problem
877f9790aebSLuigi Rizzo 					 * and we solve it there by dropping the excess packets.
878f9790aebSLuigi Rizzo 					 */
87917885a7bSLuigi Rizzo 					generic_set_tx_event(kring, nm_i);
88037e3a6d3SLuigi Rizzo 					if (generic_netmap_tx_clean(kring, gna->txqdisc)) {
88137e3a6d3SLuigi Rizzo 						/* space now available */
882f9790aebSLuigi Rizzo 						continue;
883f9790aebSLuigi Rizzo 					} else {
884f9790aebSLuigi Rizzo 						break;
885f9790aebSLuigi Rizzo 					}
886f9790aebSLuigi Rizzo 				}
88737e3a6d3SLuigi Rizzo 
88837e3a6d3SLuigi Rizzo 				/* In txqdisc mode, the netmap-aware qdisc
88937e3a6d3SLuigi Rizzo 				 * queue has the same length as the number of
89037e3a6d3SLuigi Rizzo 				 * netmap slots (N). Since tail is advanced
89137e3a6d3SLuigi Rizzo 				 * only when packets are dequeued, qdisc
89237e3a6d3SLuigi Rizzo 				 * queue overrun cannot happen, so
89337e3a6d3SLuigi Rizzo 				 * nm_os_generic_xmit_frame() did not fail
89437e3a6d3SLuigi Rizzo 				 * because of that.
89537e3a6d3SLuigi Rizzo 				 * However, packets can be dropped because
89637e3a6d3SLuigi Rizzo 				 * carrier is off, or because our qdisc is
89737e3a6d3SLuigi Rizzo 				 * being deactivated, or possibly for other
89837e3a6d3SLuigi Rizzo 				 * reasons. In these cases, we just let the
89937e3a6d3SLuigi Rizzo 				 * packet to be dropped. */
90037e3a6d3SLuigi Rizzo 				IFRATE(rate_ctx.new.txdrop++);
90137e3a6d3SLuigi Rizzo 			}
90237e3a6d3SLuigi Rizzo 
903f9790aebSLuigi Rizzo 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
90417885a7bSLuigi Rizzo 			nm_i = nm_next(nm_i, lim);
905f0ea3689SLuigi Rizzo 			IFRATE(rate_ctx.new.txpkt++);
906f9790aebSLuigi Rizzo 		}
90737e3a6d3SLuigi Rizzo 		if (a.head != NULL) {
90837e3a6d3SLuigi Rizzo 			a.addr = NULL;
90937e3a6d3SLuigi Rizzo 			nm_os_generic_xmit_frame(&a);
91037e3a6d3SLuigi Rizzo 		}
91137e3a6d3SLuigi Rizzo 		/* Update hwcur to the next slot to transmit. Here nm_i
91237e3a6d3SLuigi Rizzo 		 * is not necessarily head, we could break early. */
91337e3a6d3SLuigi Rizzo 		kring->nr_hwcur = nm_i;
91417885a7bSLuigi Rizzo 	}
915f9790aebSLuigi Rizzo 
91617885a7bSLuigi Rizzo 	/*
91717885a7bSLuigi Rizzo 	 * Second, reclaim completed buffers
91817885a7bSLuigi Rizzo 	 */
91937e3a6d3SLuigi Rizzo 	if (!gna->txqdisc && (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring))) {
920f9790aebSLuigi Rizzo 		/* No more available slots? Set a notification event
921f9790aebSLuigi Rizzo 		 * on a netmap slot that will be cleaned in the future.
922f9790aebSLuigi Rizzo 		 * No doublecheck is performed, since txsync() will be
923f9790aebSLuigi Rizzo 		 * called twice by netmap_poll().
924f9790aebSLuigi Rizzo 		 */
92517885a7bSLuigi Rizzo 		generic_set_tx_event(kring, nm_i);
926f9790aebSLuigi Rizzo 	}
927f9790aebSLuigi Rizzo 
92837e3a6d3SLuigi Rizzo 	generic_netmap_tx_clean(kring, gna->txqdisc);
92917885a7bSLuigi Rizzo 
930f9790aebSLuigi Rizzo 	return 0;
931f9790aebSLuigi Rizzo }
932f9790aebSLuigi Rizzo 
93317885a7bSLuigi Rizzo 
934f9790aebSLuigi Rizzo /*
93537e3a6d3SLuigi Rizzo  * This handler is registered (through nm_os_catch_rx())
936f9790aebSLuigi Rizzo  * within the attached network interface
937f9790aebSLuigi Rizzo  * in the RX subsystem, so that every mbuf passed up by
938f9790aebSLuigi Rizzo  * the driver can be stolen to the network stack.
939f9790aebSLuigi Rizzo  * Stolen packets are put in a queue where the
940f9790aebSLuigi Rizzo  * generic_netmap_rxsync() callback can extract them.
94137e3a6d3SLuigi Rizzo  * Returns 1 if the packet was stolen, 0 otherwise.
942f9790aebSLuigi Rizzo  */
94337e3a6d3SLuigi Rizzo int
94417885a7bSLuigi Rizzo generic_rx_handler(struct ifnet *ifp, struct mbuf *m)
945f9790aebSLuigi Rizzo {
946f9790aebSLuigi Rizzo 	struct netmap_adapter *na = NA(ifp);
947f9790aebSLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
94837e3a6d3SLuigi Rizzo 	struct netmap_kring *kring;
949f9790aebSLuigi Rizzo 	u_int work_done;
95037e3a6d3SLuigi Rizzo 	u_int r = MBUF_RXQ(m); /* receive ring number */
951f0ea3689SLuigi Rizzo 
95237e3a6d3SLuigi Rizzo 	if (r >= na->num_rx_rings) {
95337e3a6d3SLuigi Rizzo 		r = r % na->num_rx_rings;
95437e3a6d3SLuigi Rizzo 	}
95537e3a6d3SLuigi Rizzo 
956*2ff91c17SVincenzo Maffione 	kring = na->rx_rings[r];
95737e3a6d3SLuigi Rizzo 
95837e3a6d3SLuigi Rizzo 	if (kring->nr_mode == NKR_NETMAP_OFF) {
95937e3a6d3SLuigi Rizzo 		/* We must not intercept this mbuf. */
96037e3a6d3SLuigi Rizzo 		return 0;
961f0ea3689SLuigi Rizzo 	}
962f9790aebSLuigi Rizzo 
963f9790aebSLuigi Rizzo 	/* limit the size of the queue */
96437e3a6d3SLuigi Rizzo 	if (unlikely(!gna->rxsg && MBUF_LEN(m) > NETMAP_BUF_SIZE(na))) {
96537e3a6d3SLuigi Rizzo 		/* This may happen when GRO/LRO features are enabled for
96637e3a6d3SLuigi Rizzo 		 * the NIC driver when the generic adapter does not
96737e3a6d3SLuigi Rizzo 		 * support RX scatter-gather. */
96837e3a6d3SLuigi Rizzo 		RD(2, "Warning: driver pushed up big packet "
96937e3a6d3SLuigi Rizzo 				"(size=%d)", (int)MBUF_LEN(m));
97037e3a6d3SLuigi Rizzo 		m_freem(m);
97137e3a6d3SLuigi Rizzo 	} else if (unlikely(mbq_len(&kring->rx_queue) > 1024)) {
972f9790aebSLuigi Rizzo 		m_freem(m);
973f9790aebSLuigi Rizzo 	} else {
97437e3a6d3SLuigi Rizzo 		mbq_safe_enqueue(&kring->rx_queue, m);
975f9790aebSLuigi Rizzo 	}
976f9790aebSLuigi Rizzo 
977f9790aebSLuigi Rizzo 	if (netmap_generic_mit < 32768) {
978f9790aebSLuigi Rizzo 		/* no rx mitigation, pass notification up */
97937e3a6d3SLuigi Rizzo 		netmap_generic_irq(na, r, &work_done);
980f9790aebSLuigi Rizzo 	} else {
981f9790aebSLuigi Rizzo 		/* same as send combining, filter notification if there is a
982f9790aebSLuigi Rizzo 		 * pending timer, otherwise pass it up and start a timer.
983f9790aebSLuigi Rizzo 		 */
98437e3a6d3SLuigi Rizzo 		if (likely(nm_os_mitigation_active(&gna->mit[r]))) {
985f9790aebSLuigi Rizzo 			/* Record that there is some pending work. */
98637e3a6d3SLuigi Rizzo 			gna->mit[r].mit_pending = 1;
987f9790aebSLuigi Rizzo 		} else {
98837e3a6d3SLuigi Rizzo 			netmap_generic_irq(na, r, &work_done);
98937e3a6d3SLuigi Rizzo 			nm_os_mitigation_start(&gna->mit[r]);
990f9790aebSLuigi Rizzo 		}
991f9790aebSLuigi Rizzo 	}
99237e3a6d3SLuigi Rizzo 
99337e3a6d3SLuigi Rizzo 	/* We have intercepted the mbuf. */
99437e3a6d3SLuigi Rizzo 	return 1;
995f9790aebSLuigi Rizzo }
996f9790aebSLuigi Rizzo 
997f9790aebSLuigi Rizzo /*
998f9790aebSLuigi Rizzo  * generic_netmap_rxsync() extracts mbufs from the queue filled by
999f9790aebSLuigi Rizzo  * generic_netmap_rx_handler() and puts their content in the netmap
1000f9790aebSLuigi Rizzo  * receive ring.
1001f9790aebSLuigi Rizzo  * Access must be protected because the rx handler is asynchronous,
1002f9790aebSLuigi Rizzo  */
1003f9790aebSLuigi Rizzo static int
10044bf50f18SLuigi Rizzo generic_netmap_rxsync(struct netmap_kring *kring, int flags)
1005f9790aebSLuigi Rizzo {
1006f9790aebSLuigi Rizzo 	struct netmap_ring *ring = kring->ring;
10074bf50f18SLuigi Rizzo 	struct netmap_adapter *na = kring->na;
100817885a7bSLuigi Rizzo 	u_int nm_i;	/* index into the netmap ring */ //j,
100917885a7bSLuigi Rizzo 	u_int n;
101017885a7bSLuigi Rizzo 	u_int const lim = kring->nkr_num_slots - 1;
1011847bf383SLuigi Rizzo 	u_int const head = kring->rhead;
1012f9790aebSLuigi Rizzo 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1013f9790aebSLuigi Rizzo 
101437e3a6d3SLuigi Rizzo 	/* Adapter-specific variables. */
101537e3a6d3SLuigi Rizzo 	u_int nm_buf_len = NETMAP_BUF_SIZE(na);
101637e3a6d3SLuigi Rizzo 	struct mbq tmpq;
101737e3a6d3SLuigi Rizzo 	struct mbuf *m;
101837e3a6d3SLuigi Rizzo 	int avail; /* in bytes */
101937e3a6d3SLuigi Rizzo 	int mlen;
102037e3a6d3SLuigi Rizzo 	int copy;
102137e3a6d3SLuigi Rizzo 
102217885a7bSLuigi Rizzo 	if (head > lim)
1023f9790aebSLuigi Rizzo 		return netmap_ring_reinit(kring);
1024f9790aebSLuigi Rizzo 
102537e3a6d3SLuigi Rizzo 	IFRATE(rate_ctx.new.rxsync++);
102617885a7bSLuigi Rizzo 
1027f9790aebSLuigi Rizzo 	/*
102837e3a6d3SLuigi Rizzo 	 * First part: skip past packets that userspace has released.
102937e3a6d3SLuigi Rizzo 	 * This can possibly make room for the second part.
103017885a7bSLuigi Rizzo 	 */
103117885a7bSLuigi Rizzo 	nm_i = kring->nr_hwcur;
103217885a7bSLuigi Rizzo 	if (nm_i != head) {
1033f9790aebSLuigi Rizzo 		/* Userspace has released some packets. */
103417885a7bSLuigi Rizzo 		for (n = 0; nm_i != head; n++) {
103517885a7bSLuigi Rizzo 			struct netmap_slot *slot = &ring->slot[nm_i];
1036f9790aebSLuigi Rizzo 
1037f9790aebSLuigi Rizzo 			slot->flags &= ~NS_BUF_CHANGED;
103817885a7bSLuigi Rizzo 			nm_i = nm_next(nm_i, lim);
1039f9790aebSLuigi Rizzo 		}
104017885a7bSLuigi Rizzo 		kring->nr_hwcur = head;
1041f9790aebSLuigi Rizzo 	}
104237e3a6d3SLuigi Rizzo 
104337e3a6d3SLuigi Rizzo 	/*
104437e3a6d3SLuigi Rizzo 	 * Second part: import newly received packets.
104537e3a6d3SLuigi Rizzo 	 */
104637e3a6d3SLuigi Rizzo 	if (!netmap_no_pendintr && !force_update) {
104737e3a6d3SLuigi Rizzo 		return 0;
104837e3a6d3SLuigi Rizzo 	}
104937e3a6d3SLuigi Rizzo 
105037e3a6d3SLuigi Rizzo 	nm_i = kring->nr_hwtail; /* First empty slot in the receive ring. */
105137e3a6d3SLuigi Rizzo 
105237e3a6d3SLuigi Rizzo 	/* Compute the available space (in bytes) in this netmap ring.
105337e3a6d3SLuigi Rizzo 	 * The first slot that is not considered in is the one before
105437e3a6d3SLuigi Rizzo 	 * nr_hwcur. */
105537e3a6d3SLuigi Rizzo 
105637e3a6d3SLuigi Rizzo 	avail = nm_prev(kring->nr_hwcur, lim) - nm_i;
105737e3a6d3SLuigi Rizzo 	if (avail < 0)
105837e3a6d3SLuigi Rizzo 		avail += lim + 1;
105937e3a6d3SLuigi Rizzo 	avail *= nm_buf_len;
106037e3a6d3SLuigi Rizzo 
106137e3a6d3SLuigi Rizzo 	/* First pass: While holding the lock on the RX mbuf queue,
106237e3a6d3SLuigi Rizzo 	 * extract as many mbufs as they fit the available space,
106337e3a6d3SLuigi Rizzo 	 * and put them in a temporary queue.
106437e3a6d3SLuigi Rizzo 	 * To avoid performing a per-mbuf division (mlen / nm_buf_len) to
106537e3a6d3SLuigi Rizzo 	 * to update avail, we do the update in a while loop that we
106637e3a6d3SLuigi Rizzo 	 * also use to set the RX slots, but without performing the copy. */
106737e3a6d3SLuigi Rizzo 	mbq_init(&tmpq);
106837e3a6d3SLuigi Rizzo 	mbq_lock(&kring->rx_queue);
106937e3a6d3SLuigi Rizzo 	for (n = 0;; n++) {
107037e3a6d3SLuigi Rizzo 		m = mbq_peek(&kring->rx_queue);
107137e3a6d3SLuigi Rizzo 		if (!m) {
107237e3a6d3SLuigi Rizzo 			/* No more packets from the driver. */
107337e3a6d3SLuigi Rizzo 			break;
107437e3a6d3SLuigi Rizzo 		}
107537e3a6d3SLuigi Rizzo 
107637e3a6d3SLuigi Rizzo 		mlen = MBUF_LEN(m);
107737e3a6d3SLuigi Rizzo 		if (mlen > avail) {
107837e3a6d3SLuigi Rizzo 			/* No more space in the ring. */
107937e3a6d3SLuigi Rizzo 			break;
108037e3a6d3SLuigi Rizzo 		}
108137e3a6d3SLuigi Rizzo 
108237e3a6d3SLuigi Rizzo 		mbq_dequeue(&kring->rx_queue);
108337e3a6d3SLuigi Rizzo 
108437e3a6d3SLuigi Rizzo 		while (mlen) {
108537e3a6d3SLuigi Rizzo 			copy = nm_buf_len;
108637e3a6d3SLuigi Rizzo 			if (mlen < copy) {
108737e3a6d3SLuigi Rizzo 				copy = mlen;
108837e3a6d3SLuigi Rizzo 			}
108937e3a6d3SLuigi Rizzo 			mlen -= copy;
109037e3a6d3SLuigi Rizzo 			avail -= nm_buf_len;
109137e3a6d3SLuigi Rizzo 
109237e3a6d3SLuigi Rizzo 			ring->slot[nm_i].len = copy;
10934f80b14cSVincenzo Maffione 			ring->slot[nm_i].flags = (mlen ? NS_MOREFRAG : 0);
109437e3a6d3SLuigi Rizzo 			nm_i = nm_next(nm_i, lim);
109537e3a6d3SLuigi Rizzo 		}
109637e3a6d3SLuigi Rizzo 
109737e3a6d3SLuigi Rizzo 		mbq_enqueue(&tmpq, m);
109837e3a6d3SLuigi Rizzo 	}
109937e3a6d3SLuigi Rizzo 	mbq_unlock(&kring->rx_queue);
110037e3a6d3SLuigi Rizzo 
110137e3a6d3SLuigi Rizzo 	/* Second pass: Drain the temporary queue, going over the used RX slots,
110237e3a6d3SLuigi Rizzo 	 * and perform the copy out of the RX queue lock. */
110337e3a6d3SLuigi Rizzo 	nm_i = kring->nr_hwtail;
110437e3a6d3SLuigi Rizzo 
110537e3a6d3SLuigi Rizzo 	for (;;) {
110637e3a6d3SLuigi Rizzo 		void *nmaddr;
110737e3a6d3SLuigi Rizzo 		int ofs = 0;
110837e3a6d3SLuigi Rizzo 		int morefrag;
110937e3a6d3SLuigi Rizzo 
111037e3a6d3SLuigi Rizzo 		m = mbq_dequeue(&tmpq);
111137e3a6d3SLuigi Rizzo 		if (!m)	{
111237e3a6d3SLuigi Rizzo 			break;
111337e3a6d3SLuigi Rizzo 		}
111437e3a6d3SLuigi Rizzo 
111537e3a6d3SLuigi Rizzo 		do {
111637e3a6d3SLuigi Rizzo 			nmaddr = NMB(na, &ring->slot[nm_i]);
111737e3a6d3SLuigi Rizzo 			/* We only check the address here on generic rx rings. */
111837e3a6d3SLuigi Rizzo 			if (nmaddr == NETMAP_BUF_BASE(na)) { /* Bad buffer */
111937e3a6d3SLuigi Rizzo 				m_freem(m);
112037e3a6d3SLuigi Rizzo 				mbq_purge(&tmpq);
112137e3a6d3SLuigi Rizzo 				mbq_fini(&tmpq);
112237e3a6d3SLuigi Rizzo 				return netmap_ring_reinit(kring);
112337e3a6d3SLuigi Rizzo 			}
112437e3a6d3SLuigi Rizzo 
112537e3a6d3SLuigi Rizzo 			copy = ring->slot[nm_i].len;
112637e3a6d3SLuigi Rizzo 			m_copydata(m, ofs, copy, nmaddr);
112737e3a6d3SLuigi Rizzo 			ofs += copy;
112837e3a6d3SLuigi Rizzo 			morefrag = ring->slot[nm_i].flags & NS_MOREFRAG;
112937e3a6d3SLuigi Rizzo 			nm_i = nm_next(nm_i, lim);
113037e3a6d3SLuigi Rizzo 		} while (morefrag);
113137e3a6d3SLuigi Rizzo 
113237e3a6d3SLuigi Rizzo 		m_freem(m);
113337e3a6d3SLuigi Rizzo 	}
113437e3a6d3SLuigi Rizzo 
113537e3a6d3SLuigi Rizzo 	mbq_fini(&tmpq);
113637e3a6d3SLuigi Rizzo 
113737e3a6d3SLuigi Rizzo 	if (n) {
113837e3a6d3SLuigi Rizzo 		kring->nr_hwtail = nm_i;
113937e3a6d3SLuigi Rizzo 		IFRATE(rate_ctx.new.rxpkt += n);
114037e3a6d3SLuigi Rizzo 	}
114137e3a6d3SLuigi Rizzo 	kring->nr_kflags &= ~NKR_PENDINTR;
1142f9790aebSLuigi Rizzo 
1143f9790aebSLuigi Rizzo 	return 0;
1144f9790aebSLuigi Rizzo }
1145f9790aebSLuigi Rizzo 
1146f9790aebSLuigi Rizzo static void
1147f9790aebSLuigi Rizzo generic_netmap_dtor(struct netmap_adapter *na)
1148f9790aebSLuigi Rizzo {
1149f9790aebSLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na;
1150847bf383SLuigi Rizzo 	struct ifnet *ifp = netmap_generic_getifp(gna);
1151f9790aebSLuigi Rizzo 	struct netmap_adapter *prev_na = gna->prev;
1152f9790aebSLuigi Rizzo 
1153f9790aebSLuigi Rizzo 	if (prev_na != NULL) {
1154847bf383SLuigi Rizzo 		netmap_adapter_put(prev_na);
115537e3a6d3SLuigi Rizzo 		if (nm_iszombie(na)) {
1156847bf383SLuigi Rizzo 		        /*
1157847bf383SLuigi Rizzo 		         * The driver has been removed without releasing
1158847bf383SLuigi Rizzo 		         * the reference so we need to do it here.
1159847bf383SLuigi Rizzo 		         */
1160f9790aebSLuigi Rizzo 		        netmap_adapter_put(prev_na);
1161f9790aebSLuigi Rizzo 		}
1162c3e9b4dbSLuiz Otavio O Souza 		D("Native netmap adapter %p restored", prev_na);
1163847bf383SLuigi Rizzo 	}
116437e3a6d3SLuigi Rizzo 	NM_ATTACH_NA(ifp, prev_na);
116537e3a6d3SLuigi Rizzo 	/*
116637e3a6d3SLuigi Rizzo 	 * netmap_detach_common(), that it's called after this function,
116737e3a6d3SLuigi Rizzo 	 * overrides WNA(ifp) if na->ifp is not NULL.
116837e3a6d3SLuigi Rizzo 	 */
1169f9790aebSLuigi Rizzo 	na->ifp = NULL;
1170c3e9b4dbSLuiz Otavio O Souza 	D("Emulated netmap adapter for %s destroyed", na->name);
1171c3e9b4dbSLuiz Otavio O Souza }
1172c3e9b4dbSLuiz Otavio O Souza 
1173c3e9b4dbSLuiz Otavio O Souza int
1174c3e9b4dbSLuiz Otavio O Souza na_is_generic(struct netmap_adapter *na)
1175c3e9b4dbSLuiz Otavio O Souza {
1176c3e9b4dbSLuiz Otavio O Souza 	return na->nm_register == generic_netmap_register;
1177f9790aebSLuigi Rizzo }
1178f9790aebSLuigi Rizzo 
1179f9790aebSLuigi Rizzo /*
1180f9790aebSLuigi Rizzo  * generic_netmap_attach() makes it possible to use netmap on
1181f9790aebSLuigi Rizzo  * a device without native netmap support.
1182f9790aebSLuigi Rizzo  * This is less performant than native support but potentially
1183f9790aebSLuigi Rizzo  * faster than raw sockets or similar schemes.
1184f9790aebSLuigi Rizzo  *
1185f9790aebSLuigi Rizzo  * In this "emulated" mode, netmap rings do not necessarily
1186f9790aebSLuigi Rizzo  * have the same size as those in the NIC. We use a default
1187f9790aebSLuigi Rizzo  * value and possibly override it if the OS has ways to fetch the
1188f9790aebSLuigi Rizzo  * actual configuration.
1189f9790aebSLuigi Rizzo  */
1190f9790aebSLuigi Rizzo int
1191f9790aebSLuigi Rizzo generic_netmap_attach(struct ifnet *ifp)
1192f9790aebSLuigi Rizzo {
1193f9790aebSLuigi Rizzo 	struct netmap_adapter *na;
1194f9790aebSLuigi Rizzo 	struct netmap_generic_adapter *gna;
1195f9790aebSLuigi Rizzo 	int retval;
1196f9790aebSLuigi Rizzo 	u_int num_tx_desc, num_rx_desc;
1197f9790aebSLuigi Rizzo 
1198a02dbe4cSLuiz Otavio O Souza #ifdef __FreeBSD__
1199a02dbe4cSLuiz Otavio O Souza 	if (ifp->if_type == IFT_LOOP) {
1200a02dbe4cSLuiz Otavio O Souza 		D("if_loop is not supported by %s", __func__);
1201a02dbe4cSLuiz Otavio O Souza 		return EINVAL;
1202a02dbe4cSLuiz Otavio O Souza 	}
1203a02dbe4cSLuiz Otavio O Souza #endif
1204a02dbe4cSLuiz Otavio O Souza 
12054f80b14cSVincenzo Maffione 	if (NA(ifp) && !NM_NA_VALID(ifp)) {
12064f80b14cSVincenzo Maffione 		/* If NA(ifp) is not null but there is no valid netmap
12074f80b14cSVincenzo Maffione 		 * adapter it means that someone else is using the same
12084f80b14cSVincenzo Maffione 		 * pointer (e.g. ax25_ptr on linux). This happens for
12094f80b14cSVincenzo Maffione 		 * instance when also PF_RING is in use. */
12104f80b14cSVincenzo Maffione 		D("Error: netmap adapter hook is busy");
12114f80b14cSVincenzo Maffione 		return EBUSY;
12124f80b14cSVincenzo Maffione 	}
12134f80b14cSVincenzo Maffione 
1214f9790aebSLuigi Rizzo 	num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */
1215f9790aebSLuigi Rizzo 
121637e3a6d3SLuigi Rizzo 	nm_os_generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); /* ignore errors */
1217f9790aebSLuigi Rizzo 	ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc);
1218e4166283SLuigi Rizzo 	if (num_tx_desc == 0 || num_rx_desc == 0) {
1219e4166283SLuigi Rizzo 		D("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc);
1220e4166283SLuigi Rizzo 		return EINVAL;
1221e4166283SLuigi Rizzo 	}
1222f9790aebSLuigi Rizzo 
1223c3e9b4dbSLuiz Otavio O Souza 	gna = nm_os_malloc(sizeof(*gna));
1224f9790aebSLuigi Rizzo 	if (gna == NULL) {
1225f9790aebSLuigi Rizzo 		D("no memory on attach, give up");
1226f9790aebSLuigi Rizzo 		return ENOMEM;
1227f9790aebSLuigi Rizzo 	}
1228f9790aebSLuigi Rizzo 	na = (struct netmap_adapter *)gna;
1229847bf383SLuigi Rizzo 	strncpy(na->name, ifp->if_xname, sizeof(na->name));
1230f9790aebSLuigi Rizzo 	na->ifp = ifp;
1231f9790aebSLuigi Rizzo 	na->num_tx_desc = num_tx_desc;
1232f9790aebSLuigi Rizzo 	na->num_rx_desc = num_rx_desc;
1233f9790aebSLuigi Rizzo 	na->nm_register = &generic_netmap_register;
1234f9790aebSLuigi Rizzo 	na->nm_txsync = &generic_netmap_txsync;
1235f9790aebSLuigi Rizzo 	na->nm_rxsync = &generic_netmap_rxsync;
1236f9790aebSLuigi Rizzo 	na->nm_dtor = &generic_netmap_dtor;
12374bf50f18SLuigi Rizzo 	/* when using generic, NAF_NETMAP_ON is set so we force
1238f9790aebSLuigi Rizzo 	 * NAF_SKIP_INTR to use the regular interrupt handler
1239f9790aebSLuigi Rizzo 	 */
1240f0ea3689SLuigi Rizzo 	na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS;
1241f9790aebSLuigi Rizzo 
1242f9790aebSLuigi Rizzo 	ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)",
1243f9790aebSLuigi Rizzo 			ifp->num_tx_queues, ifp->real_num_tx_queues,
1244f9790aebSLuigi Rizzo 			ifp->tx_queue_len);
1245f9790aebSLuigi Rizzo 	ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)",
1246f9790aebSLuigi Rizzo 			ifp->num_rx_queues, ifp->real_num_rx_queues);
1247f9790aebSLuigi Rizzo 
124837e3a6d3SLuigi Rizzo 	nm_os_generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings);
1249f9790aebSLuigi Rizzo 
1250f9790aebSLuigi Rizzo 	retval = netmap_attach_common(na);
1251f9790aebSLuigi Rizzo 	if (retval) {
1252c3e9b4dbSLuiz Otavio O Souza 		nm_os_free(gna);
125337e3a6d3SLuigi Rizzo 		return retval;
1254f9790aebSLuigi Rizzo 	}
1255f9790aebSLuigi Rizzo 
125637e3a6d3SLuigi Rizzo 	gna->prev = NA(ifp); /* save old na */
125737e3a6d3SLuigi Rizzo 	if (gna->prev != NULL) {
125837e3a6d3SLuigi Rizzo 		netmap_adapter_get(gna->prev);
125937e3a6d3SLuigi Rizzo 	}
126037e3a6d3SLuigi Rizzo 	NM_ATTACH_NA(ifp, na);
126137e3a6d3SLuigi Rizzo 
126237e3a6d3SLuigi Rizzo 	nm_os_generic_set_features(gna);
126337e3a6d3SLuigi Rizzo 
1264c3e9b4dbSLuiz Otavio O Souza 	D("Emulated adapter for %s created (prev was %p)", na->name, gna->prev);
126537e3a6d3SLuigi Rizzo 
1266f9790aebSLuigi Rizzo 	return retval;
1267f9790aebSLuigi Rizzo }
1268