1718cf2ccSPedro F. Giffuni /*-
2718cf2ccSPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3718cf2ccSPedro F. Giffuni  *
437e3a6d3SLuigi Rizzo  * Copyright (C) 2013-2016 Vincenzo Maffione
537e3a6d3SLuigi Rizzo  * Copyright (C) 2013-2016 Luigi Rizzo
637e3a6d3SLuigi Rizzo  * All rights reserved.
7f9790aebSLuigi Rizzo  *
8f9790aebSLuigi Rizzo  * Redistribution and use in source and binary forms, with or without
9f9790aebSLuigi Rizzo  * modification, are permitted provided that the following conditions
10f9790aebSLuigi Rizzo  * are met:
11f9790aebSLuigi Rizzo  *   1. Redistributions of source code must retain the above copyright
12f9790aebSLuigi Rizzo  *      notice, this list of conditions and the following disclaimer.
13f9790aebSLuigi Rizzo  *   2. Redistributions in binary form must reproduce the above copyright
14f9790aebSLuigi Rizzo  *      notice, this list of conditions and the following disclaimer in the
15f9790aebSLuigi Rizzo  *      documentation and/or other materials provided with the distribution.
16f9790aebSLuigi Rizzo  *
17f9790aebSLuigi Rizzo  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18f9790aebSLuigi Rizzo  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19f9790aebSLuigi Rizzo  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20f9790aebSLuigi Rizzo  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21f9790aebSLuigi Rizzo  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22f9790aebSLuigi Rizzo  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23f9790aebSLuigi Rizzo  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24f9790aebSLuigi Rizzo  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25f9790aebSLuigi Rizzo  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26f9790aebSLuigi Rizzo  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27f9790aebSLuigi Rizzo  * SUCH DAMAGE.
28f9790aebSLuigi Rizzo  */
29f9790aebSLuigi Rizzo 
30f9790aebSLuigi Rizzo /*
31f9790aebSLuigi Rizzo  * This module implements netmap support on top of standard,
32f9790aebSLuigi Rizzo  * unmodified device drivers.
33f9790aebSLuigi Rizzo  *
34f9790aebSLuigi Rizzo  * A NIOCREGIF request is handled here if the device does not
35f9790aebSLuigi Rizzo  * have native support. TX and RX rings are emulated as follows:
36f9790aebSLuigi Rizzo  *
37f9790aebSLuigi Rizzo  * NIOCREGIF
38f9790aebSLuigi Rizzo  *	We preallocate a block of TX mbufs (roughly as many as
39f9790aebSLuigi Rizzo  *	tx descriptors; the number is not critical) to speed up
40f9790aebSLuigi Rizzo  *	operation during transmissions. The refcount on most of
41f9790aebSLuigi Rizzo  *	these buffers is artificially bumped up so we can recycle
42f9790aebSLuigi Rizzo  *	them more easily. Also, the destructor is intercepted
43f9790aebSLuigi Rizzo  *	so we use it as an interrupt notification to wake up
44f9790aebSLuigi Rizzo  *	processes blocked on a poll().
45f9790aebSLuigi Rizzo  *
46f9790aebSLuigi Rizzo  *	For each receive ring we allocate one "struct mbq"
47f9790aebSLuigi Rizzo  *	(an mbuf tailq plus a spinlock). We intercept packets
48f9790aebSLuigi Rizzo  *	(through if_input)
49f9790aebSLuigi Rizzo  *	on the receive path and put them in the mbq from which
50f9790aebSLuigi Rizzo  *	netmap receive routines can grab them.
51f9790aebSLuigi Rizzo  *
52f9790aebSLuigi Rizzo  * TX:
53f9790aebSLuigi Rizzo  *	in the generic_txsync() routine, netmap buffers are copied
54f9790aebSLuigi Rizzo  *	(or linked, in a future) to the preallocated mbufs
55f9790aebSLuigi Rizzo  *	and pushed to the transmit queue. Some of these mbufs
56f9790aebSLuigi Rizzo  *	(those with NS_REPORT, or otherwise every half ring)
57f9790aebSLuigi Rizzo  *	have the refcount=1, others have refcount=2.
58f9790aebSLuigi Rizzo  *	When the destructor is invoked, we take that as
59f9790aebSLuigi Rizzo  *	a notification that all mbufs up to that one in
60f9790aebSLuigi Rizzo  *	the specific ring have been completed, and generate
61f9790aebSLuigi Rizzo  *	the equivalent of a transmit interrupt.
62f9790aebSLuigi Rizzo  *
63f9790aebSLuigi Rizzo  * RX:
64f9790aebSLuigi Rizzo  *
65f9790aebSLuigi Rizzo  */
66f9790aebSLuigi Rizzo 
67f9790aebSLuigi Rizzo #ifdef __FreeBSD__
68f9790aebSLuigi Rizzo 
69f9790aebSLuigi Rizzo #include <sys/cdefs.h> /* prerequisite */
70f9790aebSLuigi Rizzo __FBSDID("$FreeBSD$");
71f9790aebSLuigi Rizzo 
72f9790aebSLuigi Rizzo #include <sys/types.h>
73f9790aebSLuigi Rizzo #include <sys/errno.h>
74f9790aebSLuigi Rizzo #include <sys/malloc.h>
75f9790aebSLuigi Rizzo #include <sys/lock.h>   /* PROT_EXEC */
76f9790aebSLuigi Rizzo #include <sys/rwlock.h>
77f9790aebSLuigi Rizzo #include <sys/socket.h> /* sockaddrs */
78f9790aebSLuigi Rizzo #include <sys/selinfo.h>
79f9790aebSLuigi Rizzo #include <net/if.h>
80a02dbe4cSLuiz Otavio O Souza #include <net/if_types.h>
81f9790aebSLuigi Rizzo #include <net/if_var.h>
82f9790aebSLuigi Rizzo #include <machine/bus.h>        /* bus_dmamap_* in netmap_kern.h */
83f9790aebSLuigi Rizzo 
84f9790aebSLuigi Rizzo #include <net/netmap.h>
85f9790aebSLuigi Rizzo #include <dev/netmap/netmap_kern.h>
86f9790aebSLuigi Rizzo #include <dev/netmap/netmap_mem2.h>
87f9790aebSLuigi Rizzo 
88f0ea3689SLuigi Rizzo #define MBUF_RXQ(m)	((m)->m_pkthdr.flowid)
89f9790aebSLuigi Rizzo #define smp_mb()
90f9790aebSLuigi Rizzo 
9137e3a6d3SLuigi Rizzo #elif defined _WIN32
9237e3a6d3SLuigi Rizzo 
9337e3a6d3SLuigi Rizzo #include "win_glue.h"
9437e3a6d3SLuigi Rizzo 
9537e3a6d3SLuigi Rizzo #define MBUF_TXQ(m) 	0//((m)->m_pkthdr.flowid)
9637e3a6d3SLuigi Rizzo #define MBUF_RXQ(m)	    0//((m)->m_pkthdr.flowid)
9737e3a6d3SLuigi Rizzo #define smp_mb()		//XXX: to be correctly defined
98f9790aebSLuigi Rizzo 
99f9790aebSLuigi Rizzo #else /* linux */
100f9790aebSLuigi Rizzo 
101f9790aebSLuigi Rizzo #include "bsd_glue.h"
102f9790aebSLuigi Rizzo 
103f9790aebSLuigi Rizzo #include <linux/ethtool.h>      /* struct ethtool_ops, get_ringparam */
104f9790aebSLuigi Rizzo #include <linux/hrtimer.h>
105f9790aebSLuigi Rizzo 
10637e3a6d3SLuigi Rizzo static inline struct mbuf *
10737e3a6d3SLuigi Rizzo nm_os_get_mbuf(struct ifnet *ifp, int len)
10837e3a6d3SLuigi Rizzo {
109a6d768d8SVincenzo Maffione 	return alloc_skb(LL_RESERVED_SPACE(ifp) + len +
11037e3a6d3SLuigi Rizzo 			 ifp->needed_tailroom, GFP_ATOMIC);
11137e3a6d3SLuigi Rizzo }
112f9790aebSLuigi Rizzo 
113f9790aebSLuigi Rizzo #endif /* linux */
114f9790aebSLuigi Rizzo 
115f9790aebSLuigi Rizzo 
116f9790aebSLuigi Rizzo /* Common headers. */
117f9790aebSLuigi Rizzo #include <net/netmap.h>
118f9790aebSLuigi Rizzo #include <dev/netmap/netmap_kern.h>
119f9790aebSLuigi Rizzo #include <dev/netmap/netmap_mem2.h>
120f9790aebSLuigi Rizzo 
121f9790aebSLuigi Rizzo 
12237e3a6d3SLuigi Rizzo #define for_each_kring_n(_i, _k, _karr, _n) \
1232ff91c17SVincenzo Maffione 	for ((_k)=*(_karr), (_i) = 0; (_i) < (_n); (_i)++, (_k) = (_karr)[(_i)])
124f9790aebSLuigi Rizzo 
12537e3a6d3SLuigi Rizzo #define for_each_tx_kring(_i, _k, _na) \
12637e3a6d3SLuigi Rizzo 		for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings)
12737e3a6d3SLuigi Rizzo #define for_each_tx_kring_h(_i, _k, _na) \
12837e3a6d3SLuigi Rizzo 		for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings + 1)
12937e3a6d3SLuigi Rizzo 
13037e3a6d3SLuigi Rizzo #define for_each_rx_kring(_i, _k, _na) \
13137e3a6d3SLuigi Rizzo 		for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings)
13237e3a6d3SLuigi Rizzo #define for_each_rx_kring_h(_i, _k, _na) \
13337e3a6d3SLuigi Rizzo 		for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings + 1)
13437e3a6d3SLuigi Rizzo 
13537e3a6d3SLuigi Rizzo 
13637e3a6d3SLuigi Rizzo /* ======================== PERFORMANCE STATISTICS =========================== */
137f9790aebSLuigi Rizzo 
1384bf50f18SLuigi Rizzo #ifdef RATE_GENERIC
139f9790aebSLuigi Rizzo #define IFRATE(x) x
140f9790aebSLuigi Rizzo struct rate_stats {
141f9790aebSLuigi Rizzo 	unsigned long txpkt;
142f9790aebSLuigi Rizzo 	unsigned long txsync;
143f9790aebSLuigi Rizzo 	unsigned long txirq;
14437e3a6d3SLuigi Rizzo 	unsigned long txrepl;
14537e3a6d3SLuigi Rizzo 	unsigned long txdrop;
146f9790aebSLuigi Rizzo 	unsigned long rxpkt;
147f9790aebSLuigi Rizzo 	unsigned long rxirq;
148f9790aebSLuigi Rizzo 	unsigned long rxsync;
149f9790aebSLuigi Rizzo };
150f9790aebSLuigi Rizzo 
151f9790aebSLuigi Rizzo struct rate_context {
152f9790aebSLuigi Rizzo 	unsigned refcount;
153f9790aebSLuigi Rizzo 	struct timer_list timer;
154f9790aebSLuigi Rizzo 	struct rate_stats new;
155f9790aebSLuigi Rizzo 	struct rate_stats old;
156f9790aebSLuigi Rizzo };
157f9790aebSLuigi Rizzo 
158f9790aebSLuigi Rizzo #define RATE_PRINTK(_NAME_) \
159f9790aebSLuigi Rizzo 	printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD);
160f9790aebSLuigi Rizzo #define RATE_PERIOD  2
161f9790aebSLuigi Rizzo static void rate_callback(unsigned long arg)
162f9790aebSLuigi Rizzo {
163f9790aebSLuigi Rizzo 	struct rate_context * ctx = (struct rate_context *)arg;
164f9790aebSLuigi Rizzo 	struct rate_stats cur = ctx->new;
165f9790aebSLuigi Rizzo 	int r;
166f9790aebSLuigi Rizzo 
167f9790aebSLuigi Rizzo 	RATE_PRINTK(txpkt);
168f9790aebSLuigi Rizzo 	RATE_PRINTK(txsync);
169f9790aebSLuigi Rizzo 	RATE_PRINTK(txirq);
17037e3a6d3SLuigi Rizzo 	RATE_PRINTK(txrepl);
17137e3a6d3SLuigi Rizzo 	RATE_PRINTK(txdrop);
172f9790aebSLuigi Rizzo 	RATE_PRINTK(rxpkt);
173f9790aebSLuigi Rizzo 	RATE_PRINTK(rxsync);
174f9790aebSLuigi Rizzo 	RATE_PRINTK(rxirq);
175f9790aebSLuigi Rizzo 	printk("\n");
176f9790aebSLuigi Rizzo 
177f9790aebSLuigi Rizzo 	ctx->old = cur;
178f9790aebSLuigi Rizzo 	r = mod_timer(&ctx->timer, jiffies +
179f9790aebSLuigi Rizzo 			msecs_to_jiffies(RATE_PERIOD * 1000));
180f9790aebSLuigi Rizzo 	if (unlikely(r))
181b6e66be2SVincenzo Maffione 		nm_prerr("mod_timer() failed");
182f9790aebSLuigi Rizzo }
183f9790aebSLuigi Rizzo 
184f9790aebSLuigi Rizzo static struct rate_context rate_ctx;
185f9790aebSLuigi Rizzo 
1864bf50f18SLuigi Rizzo void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi)
1874bf50f18SLuigi Rizzo {
1884bf50f18SLuigi Rizzo 	if (txp) rate_ctx.new.txpkt++;
1894bf50f18SLuigi Rizzo 	if (txs) rate_ctx.new.txsync++;
1904bf50f18SLuigi Rizzo 	if (txi) rate_ctx.new.txirq++;
1914bf50f18SLuigi Rizzo 	if (rxp) rate_ctx.new.rxpkt++;
1924bf50f18SLuigi Rizzo 	if (rxs) rate_ctx.new.rxsync++;
1934bf50f18SLuigi Rizzo 	if (rxi) rate_ctx.new.rxirq++;
1944bf50f18SLuigi Rizzo }
1954bf50f18SLuigi Rizzo 
196f9790aebSLuigi Rizzo #else /* !RATE */
197f9790aebSLuigi Rizzo #define IFRATE(x)
198f9790aebSLuigi Rizzo #endif /* !RATE */
199f9790aebSLuigi Rizzo 
200f9790aebSLuigi Rizzo 
201c3e9b4dbSLuiz Otavio O Souza /* ========== GENERIC (EMULATED) NETMAP ADAPTER SUPPORT ============= */
202f9790aebSLuigi Rizzo 
203f9790aebSLuigi Rizzo /*
204f9790aebSLuigi Rizzo  * Wrapper used by the generic adapter layer to notify
205f9790aebSLuigi Rizzo  * the poller threads. Differently from netmap_rx_irq(), we check
2064bf50f18SLuigi Rizzo  * only NAF_NETMAP_ON instead of NAF_NATIVE_ON to enable the irq.
207f9790aebSLuigi Rizzo  */
20837e3a6d3SLuigi Rizzo void
20937e3a6d3SLuigi Rizzo netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
210f9790aebSLuigi Rizzo {
2114bf50f18SLuigi Rizzo 	if (unlikely(!nm_netmap_on(na)))
212f9790aebSLuigi Rizzo 		return;
213f9790aebSLuigi Rizzo 
21437e3a6d3SLuigi Rizzo 	netmap_common_irq(na, q, work_done);
21537e3a6d3SLuigi Rizzo #ifdef RATE_GENERIC
21637e3a6d3SLuigi Rizzo 	if (work_done)
21737e3a6d3SLuigi Rizzo 		rate_ctx.new.rxirq++;
21837e3a6d3SLuigi Rizzo 	else
21937e3a6d3SLuigi Rizzo 		rate_ctx.new.txirq++;
22037e3a6d3SLuigi Rizzo #endif  /* RATE_GENERIC */
221f9790aebSLuigi Rizzo }
222f9790aebSLuigi Rizzo 
22337e3a6d3SLuigi Rizzo static int
22437e3a6d3SLuigi Rizzo generic_netmap_unregister(struct netmap_adapter *na)
22537e3a6d3SLuigi Rizzo {
22637e3a6d3SLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
22737e3a6d3SLuigi Rizzo 	struct netmap_kring *kring = NULL;
22837e3a6d3SLuigi Rizzo 	int i, r;
22937e3a6d3SLuigi Rizzo 
23037e3a6d3SLuigi Rizzo 	if (na->active_fds == 0) {
23137e3a6d3SLuigi Rizzo 		na->na_flags &= ~NAF_NETMAP_ON;
23237e3a6d3SLuigi Rizzo 
23337e3a6d3SLuigi Rizzo 		/* Stop intercepting packets on the RX path. */
23437e3a6d3SLuigi Rizzo 		nm_os_catch_rx(gna, 0);
23537e3a6d3SLuigi Rizzo 
2364f80b14cSVincenzo Maffione 		/* Release packet steering control. */
2374f80b14cSVincenzo Maffione 		nm_os_catch_tx(gna, 0);
23837e3a6d3SLuigi Rizzo 	}
23937e3a6d3SLuigi Rizzo 
24075f4f3edSVincenzo Maffione 	netmap_krings_mode_commit(na, /*onoff=*/0);
24137e3a6d3SLuigi Rizzo 
24237e3a6d3SLuigi Rizzo 	for_each_rx_kring(r, kring, na) {
24337e3a6d3SLuigi Rizzo 		/* Free the mbufs still pending in the RX queues,
24437e3a6d3SLuigi Rizzo 		 * that did not end up into the corresponding netmap
24537e3a6d3SLuigi Rizzo 		 * RX rings. */
24637e3a6d3SLuigi Rizzo 		mbq_safe_purge(&kring->rx_queue);
24737e3a6d3SLuigi Rizzo 		nm_os_mitigation_cleanup(&gna->mit[r]);
24837e3a6d3SLuigi Rizzo 	}
24937e3a6d3SLuigi Rizzo 
25037e3a6d3SLuigi Rizzo 	/* Decrement reference counter for the mbufs in the
25137e3a6d3SLuigi Rizzo 	 * TX pools. These mbufs can be still pending in drivers,
25237e3a6d3SLuigi Rizzo 	 * (e.g. this happens with virtio-net driver, which
25337e3a6d3SLuigi Rizzo 	 * does lazy reclaiming of transmitted mbufs). */
25437e3a6d3SLuigi Rizzo 	for_each_tx_kring(r, kring, na) {
25537e3a6d3SLuigi Rizzo 		/* We must remove the destructor on the TX event,
25637e3a6d3SLuigi Rizzo 		 * because the destructor invokes netmap code, and
25737e3a6d3SLuigi Rizzo 		 * the netmap module may disappear before the
25837e3a6d3SLuigi Rizzo 		 * TX event is consumed. */
25937e3a6d3SLuigi Rizzo 		mtx_lock_spin(&kring->tx_event_lock);
26037e3a6d3SLuigi Rizzo 		if (kring->tx_event) {
261c3e9b4dbSLuiz Otavio O Souza 			SET_MBUF_DESTRUCTOR(kring->tx_event, NULL);
26237e3a6d3SLuigi Rizzo 		}
26337e3a6d3SLuigi Rizzo 		kring->tx_event = NULL;
26437e3a6d3SLuigi Rizzo 		mtx_unlock_spin(&kring->tx_event_lock);
26537e3a6d3SLuigi Rizzo 	}
26637e3a6d3SLuigi Rizzo 
26737e3a6d3SLuigi Rizzo 	if (na->active_fds == 0) {
268c3e9b4dbSLuiz Otavio O Souza 		nm_os_free(gna->mit);
26937e3a6d3SLuigi Rizzo 
27037e3a6d3SLuigi Rizzo 		for_each_rx_kring(r, kring, na) {
27137e3a6d3SLuigi Rizzo 			mbq_safe_fini(&kring->rx_queue);
27237e3a6d3SLuigi Rizzo 		}
27337e3a6d3SLuigi Rizzo 
27437e3a6d3SLuigi Rizzo 		for_each_tx_kring(r, kring, na) {
275*ce12afaaSMark Johnston 			callout_drain(&kring->tx_event_callout);
27637e3a6d3SLuigi Rizzo 			mtx_destroy(&kring->tx_event_lock);
27737e3a6d3SLuigi Rizzo 			if (kring->tx_pool == NULL) {
27837e3a6d3SLuigi Rizzo 				continue;
27937e3a6d3SLuigi Rizzo 			}
28037e3a6d3SLuigi Rizzo 
28137e3a6d3SLuigi Rizzo 			for (i=0; i<na->num_tx_desc; i++) {
28237e3a6d3SLuigi Rizzo 				if (kring->tx_pool[i]) {
28337e3a6d3SLuigi Rizzo 					m_freem(kring->tx_pool[i]);
28437e3a6d3SLuigi Rizzo 				}
28537e3a6d3SLuigi Rizzo 			}
286c3e9b4dbSLuiz Otavio O Souza 			nm_os_free(kring->tx_pool);
28737e3a6d3SLuigi Rizzo 			kring->tx_pool = NULL;
28837e3a6d3SLuigi Rizzo 		}
28937e3a6d3SLuigi Rizzo 
29037e3a6d3SLuigi Rizzo #ifdef RATE_GENERIC
29137e3a6d3SLuigi Rizzo 		if (--rate_ctx.refcount == 0) {
292b6e66be2SVincenzo Maffione 			nm_prinf("del_timer()");
29337e3a6d3SLuigi Rizzo 			del_timer(&rate_ctx.timer);
29437e3a6d3SLuigi Rizzo 		}
29537e3a6d3SLuigi Rizzo #endif
296b6e66be2SVincenzo Maffione 		nm_prinf("Emulated adapter for %s deactivated", na->name);
29737e3a6d3SLuigi Rizzo 	}
29837e3a6d3SLuigi Rizzo 
29937e3a6d3SLuigi Rizzo 	return 0;
30037e3a6d3SLuigi Rizzo }
301f9790aebSLuigi Rizzo 
302f9790aebSLuigi Rizzo /* Enable/disable netmap mode for a generic network interface. */
30317885a7bSLuigi Rizzo static int
30417885a7bSLuigi Rizzo generic_netmap_register(struct netmap_adapter *na, int enable)
305f9790aebSLuigi Rizzo {
306f9790aebSLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
30737e3a6d3SLuigi Rizzo 	struct netmap_kring *kring = NULL;
308f9790aebSLuigi Rizzo 	int error;
309f9790aebSLuigi Rizzo 	int i, r;
310f9790aebSLuigi Rizzo 
31137e3a6d3SLuigi Rizzo 	if (!na) {
312f9790aebSLuigi Rizzo 		return EINVAL;
313f9790aebSLuigi Rizzo 	}
314f9790aebSLuigi Rizzo 
31537e3a6d3SLuigi Rizzo 	if (!enable) {
31637e3a6d3SLuigi Rizzo 		/* This is actually an unregif. */
31737e3a6d3SLuigi Rizzo 		return generic_netmap_unregister(na);
31837e3a6d3SLuigi Rizzo 	}
31937e3a6d3SLuigi Rizzo 
32037e3a6d3SLuigi Rizzo 	if (na->active_fds == 0) {
321b6e66be2SVincenzo Maffione 		nm_prinf("Emulated adapter for %s activated", na->name);
32237e3a6d3SLuigi Rizzo 		/* Do all memory allocations when (na->active_fds == 0), to
32337e3a6d3SLuigi Rizzo 		 * simplify error management. */
32437e3a6d3SLuigi Rizzo 
32537e3a6d3SLuigi Rizzo 		/* Allocate memory for mitigation support on all the rx queues. */
326c3e9b4dbSLuiz Otavio O Souza 		gna->mit = nm_os_malloc(na->num_rx_rings * sizeof(struct nm_generic_mit));
327f0ea3689SLuigi Rizzo 		if (!gna->mit) {
328b6e66be2SVincenzo Maffione 			nm_prerr("mitigation allocation failed");
329f0ea3689SLuigi Rizzo 			error = ENOMEM;
330f0ea3689SLuigi Rizzo 			goto out;
331f0ea3689SLuigi Rizzo 		}
33237e3a6d3SLuigi Rizzo 
33337e3a6d3SLuigi Rizzo 		for_each_rx_kring(r, kring, na) {
33437e3a6d3SLuigi Rizzo 			/* Init mitigation support. */
33537e3a6d3SLuigi Rizzo 			nm_os_mitigation_init(&gna->mit[r], r, na);
336f0ea3689SLuigi Rizzo 
337f9790aebSLuigi Rizzo 			/* Initialize the rx queue, as generic_rx_handler() can
33837e3a6d3SLuigi Rizzo 			 * be called as soon as nm_os_catch_rx() returns.
339f9790aebSLuigi Rizzo 			 */
34037e3a6d3SLuigi Rizzo 			mbq_safe_init(&kring->rx_queue);
341f9790aebSLuigi Rizzo 		}
342f9790aebSLuigi Rizzo 
343f9790aebSLuigi Rizzo 		/*
34437e3a6d3SLuigi Rizzo 		 * Prepare mbuf pools (parallel to the tx rings), for packet
34537e3a6d3SLuigi Rizzo 		 * transmission. Don't preallocate the mbufs here, it's simpler
34637e3a6d3SLuigi Rizzo 		 * to leave this task to txsync.
347f9790aebSLuigi Rizzo 		 */
34837e3a6d3SLuigi Rizzo 		for_each_tx_kring(r, kring, na) {
34937e3a6d3SLuigi Rizzo 			kring->tx_pool = NULL;
35037e3a6d3SLuigi Rizzo 		}
35137e3a6d3SLuigi Rizzo 		for_each_tx_kring(r, kring, na) {
35237e3a6d3SLuigi Rizzo 			kring->tx_pool =
353c3e9b4dbSLuiz Otavio O Souza 				nm_os_malloc(na->num_tx_desc * sizeof(struct mbuf *));
35437e3a6d3SLuigi Rizzo 			if (!kring->tx_pool) {
355b6e66be2SVincenzo Maffione 				nm_prerr("tx_pool allocation failed");
356f9790aebSLuigi Rizzo 				error = ENOMEM;
35717885a7bSLuigi Rizzo 				goto free_tx_pools;
358f9790aebSLuigi Rizzo 			}
35937e3a6d3SLuigi Rizzo 			mtx_init(&kring->tx_event_lock, "tx_event_lock",
36037e3a6d3SLuigi Rizzo 				 NULL, MTX_SPIN);
361*ce12afaaSMark Johnston 			callout_init_mtx(&kring->tx_event_callout,
362*ce12afaaSMark Johnston 					 &kring->tx_event_lock,
363*ce12afaaSMark Johnston 					 CALLOUT_RETURNUNLOCKED);
36437e3a6d3SLuigi Rizzo 		}
36537e3a6d3SLuigi Rizzo 	}
36637e3a6d3SLuigi Rizzo 
36775f4f3edSVincenzo Maffione 	netmap_krings_mode_commit(na, /*onoff=*/1);
36837e3a6d3SLuigi Rizzo 
36937e3a6d3SLuigi Rizzo 	for_each_tx_kring(r, kring, na) {
37037e3a6d3SLuigi Rizzo 		/* Initialize tx_pool and tx_event. */
371f9790aebSLuigi Rizzo 		for (i=0; i<na->num_tx_desc; i++) {
37237e3a6d3SLuigi Rizzo 			kring->tx_pool[i] = NULL;
373f9790aebSLuigi Rizzo 		}
37437e3a6d3SLuigi Rizzo 
37537e3a6d3SLuigi Rizzo 		kring->tx_event = NULL;
376f9790aebSLuigi Rizzo 	}
37737e3a6d3SLuigi Rizzo 
37837e3a6d3SLuigi Rizzo 	if (na->active_fds == 0) {
379f9790aebSLuigi Rizzo 		/* Prepare to intercept incoming traffic. */
38037e3a6d3SLuigi Rizzo 		error = nm_os_catch_rx(gna, 1);
381f9790aebSLuigi Rizzo 		if (error) {
382b6e66be2SVincenzo Maffione 			nm_prerr("nm_os_catch_rx(1) failed (%d)", error);
3834f80b14cSVincenzo Maffione 			goto free_tx_pools;
384f9790aebSLuigi Rizzo 		}
385f9790aebSLuigi Rizzo 
3864f80b14cSVincenzo Maffione 		/* Let netmap control the packet steering. */
38737e3a6d3SLuigi Rizzo 		error = nm_os_catch_tx(gna, 1);
38837e3a6d3SLuigi Rizzo 		if (error) {
389b6e66be2SVincenzo Maffione 			nm_prerr("nm_os_catch_tx(1) failed (%d)", error);
39037e3a6d3SLuigi Rizzo 			goto catch_rx;
39137e3a6d3SLuigi Rizzo 		}
392f9790aebSLuigi Rizzo 
39337e3a6d3SLuigi Rizzo 		na->na_flags |= NAF_NETMAP_ON;
39437e3a6d3SLuigi Rizzo 
3954bf50f18SLuigi Rizzo #ifdef RATE_GENERIC
396f9790aebSLuigi Rizzo 		if (rate_ctx.refcount == 0) {
397b6e66be2SVincenzo Maffione 			nm_prinf("setup_timer()");
398f9790aebSLuigi Rizzo 			memset(&rate_ctx, 0, sizeof(rate_ctx));
399f9790aebSLuigi Rizzo 			setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx);
400f9790aebSLuigi Rizzo 			if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) {
401b6e66be2SVincenzo Maffione 				nm_prerr("Error: mod_timer()");
402f9790aebSLuigi Rizzo 			}
403f9790aebSLuigi Rizzo 		}
404f9790aebSLuigi Rizzo 		rate_ctx.refcount++;
405f9790aebSLuigi Rizzo #endif /* RATE */
406f9790aebSLuigi Rizzo 	}
407f9790aebSLuigi Rizzo 
408f9790aebSLuigi Rizzo 	return 0;
409f9790aebSLuigi Rizzo 
41037e3a6d3SLuigi Rizzo 	/* Here (na->active_fds == 0) holds. */
41137e3a6d3SLuigi Rizzo catch_rx:
41237e3a6d3SLuigi Rizzo 	nm_os_catch_rx(gna, 0);
41317885a7bSLuigi Rizzo free_tx_pools:
41437e3a6d3SLuigi Rizzo 	for_each_tx_kring(r, kring, na) {
41537e3a6d3SLuigi Rizzo 		mtx_destroy(&kring->tx_event_lock);
41637e3a6d3SLuigi Rizzo 		if (kring->tx_pool == NULL) {
41717885a7bSLuigi Rizzo 			continue;
418f2637526SLuigi Rizzo 		}
419c3e9b4dbSLuiz Otavio O Souza 		nm_os_free(kring->tx_pool);
42037e3a6d3SLuigi Rizzo 		kring->tx_pool = NULL;
42137e3a6d3SLuigi Rizzo 	}
42237e3a6d3SLuigi Rizzo 	for_each_rx_kring(r, kring, na) {
42337e3a6d3SLuigi Rizzo 		mbq_safe_fini(&kring->rx_queue);
424f9790aebSLuigi Rizzo 	}
425c3e9b4dbSLuiz Otavio O Souza 	nm_os_free(gna->mit);
426f0ea3689SLuigi Rizzo out:
427f9790aebSLuigi Rizzo 
428f9790aebSLuigi Rizzo 	return error;
429f9790aebSLuigi Rizzo }
430f9790aebSLuigi Rizzo 
431f9790aebSLuigi Rizzo /*
432f9790aebSLuigi Rizzo  * Callback invoked when the device driver frees an mbuf used
433f9790aebSLuigi Rizzo  * by netmap to transmit a packet. This usually happens when
434f9790aebSLuigi Rizzo  * the NIC notifies the driver that transmission is completed.
435f9790aebSLuigi Rizzo  */
436f9790aebSLuigi Rizzo static void
437*ce12afaaSMark Johnston generic_mbuf_dtor(struct mbuf *m)
438f9790aebSLuigi Rizzo {
43937e3a6d3SLuigi Rizzo 	struct netmap_adapter *na = NA(GEN_TX_MBUF_IFP(m));
44037e3a6d3SLuigi Rizzo 	struct netmap_kring *kring;
44137e3a6d3SLuigi Rizzo 	unsigned int r = MBUF_TXQ(m);
44237e3a6d3SLuigi Rizzo 	unsigned int r_orig = r;
44337e3a6d3SLuigi Rizzo 
44437e3a6d3SLuigi Rizzo 	if (unlikely(!nm_netmap_on(na) || r >= na->num_tx_rings)) {
445b6e66be2SVincenzo Maffione 		nm_prerr("Error: no netmap adapter on device %p",
44637e3a6d3SLuigi Rizzo 		  GEN_TX_MBUF_IFP(m));
44737e3a6d3SLuigi Rizzo 		return;
44837e3a6d3SLuigi Rizzo 	}
44937e3a6d3SLuigi Rizzo 
45037e3a6d3SLuigi Rizzo 	/*
45137e3a6d3SLuigi Rizzo 	 * First, clear the event mbuf.
45237e3a6d3SLuigi Rizzo 	 * In principle, the event 'm' should match the one stored
45345c67e8fSVincenzo Maffione 	 * on ring 'r'. However we check it explicitly to stay
45437e3a6d3SLuigi Rizzo 	 * safe against lower layers (qdisc, driver, etc.) changing
45537e3a6d3SLuigi Rizzo 	 * MBUF_TXQ(m) under our feet. If the match is not found
45637e3a6d3SLuigi Rizzo 	 * on 'r', we try to see if it belongs to some other ring.
45737e3a6d3SLuigi Rizzo 	 */
45837e3a6d3SLuigi Rizzo 	for (;;) {
45937e3a6d3SLuigi Rizzo 		bool match = false;
46037e3a6d3SLuigi Rizzo 
4612ff91c17SVincenzo Maffione 		kring = na->tx_rings[r];
46237e3a6d3SLuigi Rizzo 		mtx_lock_spin(&kring->tx_event_lock);
46337e3a6d3SLuigi Rizzo 		if (kring->tx_event == m) {
46437e3a6d3SLuigi Rizzo 			kring->tx_event = NULL;
46537e3a6d3SLuigi Rizzo 			match = true;
46637e3a6d3SLuigi Rizzo 		}
46737e3a6d3SLuigi Rizzo 		mtx_unlock_spin(&kring->tx_event_lock);
46837e3a6d3SLuigi Rizzo 
46937e3a6d3SLuigi Rizzo 		if (match) {
47037e3a6d3SLuigi Rizzo 			if (r != r_orig) {
471b6e66be2SVincenzo Maffione 				nm_prlim(1, "event %p migrated: ring %u --> %u",
47237e3a6d3SLuigi Rizzo 				      m, r_orig, r);
47337e3a6d3SLuigi Rizzo 			}
47437e3a6d3SLuigi Rizzo 			break;
47537e3a6d3SLuigi Rizzo 		}
47637e3a6d3SLuigi Rizzo 
47737e3a6d3SLuigi Rizzo 		if (++r == na->num_tx_rings) r = 0;
47837e3a6d3SLuigi Rizzo 
47937e3a6d3SLuigi Rizzo 		if (r == r_orig) {
480*ce12afaaSMark Johnston #ifndef __FreeBSD__
481*ce12afaaSMark Johnston 			/*
482*ce12afaaSMark Johnston 			 * On FreeBSD this situation can arise if the tx_event
483*ce12afaaSMark Johnston 			 * callout handler cleared a stuck packet.
484*ce12afaaSMark Johnston 			 */
485b6e66be2SVincenzo Maffione 			nm_prlim(1, "Cannot match event %p", m);
486*ce12afaaSMark Johnston #endif
487*ce12afaaSMark Johnston 			nm_generic_mbuf_dtor(m);
48837e3a6d3SLuigi Rizzo 			return;
48937e3a6d3SLuigi Rizzo 		}
49037e3a6d3SLuigi Rizzo 	}
49137e3a6d3SLuigi Rizzo 
49237e3a6d3SLuigi Rizzo 	/* Second, wake up clients. They will reclaim the event through
49337e3a6d3SLuigi Rizzo 	 * txsync. */
49437e3a6d3SLuigi Rizzo 	netmap_generic_irq(na, r, NULL);
495*ce12afaaSMark Johnston 	nm_generic_mbuf_dtor(m);
496f9790aebSLuigi Rizzo }
497f9790aebSLuigi Rizzo 
49817885a7bSLuigi Rizzo /* Record completed transmissions and update hwtail.
499f9790aebSLuigi Rizzo  *
50017885a7bSLuigi Rizzo  * The oldest tx buffer not yet completed is at nr_hwtail + 1,
501f9790aebSLuigi Rizzo  * nr_hwcur is the first unsent buffer.
502f9790aebSLuigi Rizzo  */
50317885a7bSLuigi Rizzo static u_int
50437e3a6d3SLuigi Rizzo generic_netmap_tx_clean(struct netmap_kring *kring, int txqdisc)
505f9790aebSLuigi Rizzo {
50617885a7bSLuigi Rizzo 	u_int const lim = kring->nkr_num_slots - 1;
50717885a7bSLuigi Rizzo 	u_int nm_i = nm_next(kring->nr_hwtail, lim);
508f9790aebSLuigi Rizzo 	u_int hwcur = kring->nr_hwcur;
509f9790aebSLuigi Rizzo 	u_int n = 0;
510f9790aebSLuigi Rizzo 	struct mbuf **tx_pool = kring->tx_pool;
511f9790aebSLuigi Rizzo 
512b6e66be2SVincenzo Maffione 	nm_prdis("hwcur = %d, hwtail = %d", kring->nr_hwcur, kring->nr_hwtail);
51337e3a6d3SLuigi Rizzo 
51417885a7bSLuigi Rizzo 	while (nm_i != hwcur) { /* buffers not completed */
51517885a7bSLuigi Rizzo 		struct mbuf *m = tx_pool[nm_i];
516f9790aebSLuigi Rizzo 
51737e3a6d3SLuigi Rizzo 		if (txqdisc) {
51837e3a6d3SLuigi Rizzo 			if (m == NULL) {
51937e3a6d3SLuigi Rizzo 				/* Nothing to do, this is going
52037e3a6d3SLuigi Rizzo 				 * to be replenished. */
521b6e66be2SVincenzo Maffione 				nm_prlim(3, "Is this happening?");
52237e3a6d3SLuigi Rizzo 
52337e3a6d3SLuigi Rizzo 			} else if (MBUF_QUEUED(m)) {
52437e3a6d3SLuigi Rizzo 				break; /* Not dequeued yet. */
52537e3a6d3SLuigi Rizzo 
52637e3a6d3SLuigi Rizzo 			} else if (MBUF_REFCNT(m) != 1) {
52737e3a6d3SLuigi Rizzo 				/* This mbuf has been dequeued but is still busy
52837e3a6d3SLuigi Rizzo 				 * (refcount is 2).
52937e3a6d3SLuigi Rizzo 				 * Leave it to the driver and replenish. */
53037e3a6d3SLuigi Rizzo 				m_freem(m);
53137e3a6d3SLuigi Rizzo 				tx_pool[nm_i] = NULL;
532f9790aebSLuigi Rizzo 			}
53337e3a6d3SLuigi Rizzo 
53437e3a6d3SLuigi Rizzo 		} else {
53537e3a6d3SLuigi Rizzo 			if (unlikely(m == NULL)) {
53637e3a6d3SLuigi Rizzo 				int event_consumed;
53737e3a6d3SLuigi Rizzo 
53837e3a6d3SLuigi Rizzo 				/* This slot was used to place an event. */
53937e3a6d3SLuigi Rizzo 				mtx_lock_spin(&kring->tx_event_lock);
54037e3a6d3SLuigi Rizzo 				event_consumed = (kring->tx_event == NULL);
54137e3a6d3SLuigi Rizzo 				mtx_unlock_spin(&kring->tx_event_lock);
54237e3a6d3SLuigi Rizzo 				if (!event_consumed) {
54337e3a6d3SLuigi Rizzo 					/* The event has not been consumed yet,
54437e3a6d3SLuigi Rizzo 					 * still busy in the driver. */
54537e3a6d3SLuigi Rizzo 					break;
546f9790aebSLuigi Rizzo 				}
54737e3a6d3SLuigi Rizzo 				/* The event has been consumed, we can go
54837e3a6d3SLuigi Rizzo 				 * ahead. */
54937e3a6d3SLuigi Rizzo 			} else if (MBUF_REFCNT(m) != 1) {
55037e3a6d3SLuigi Rizzo 				/* This mbuf is still busy: its refcnt is 2. */
55137e3a6d3SLuigi Rizzo 				break;
55237e3a6d3SLuigi Rizzo 			}
55337e3a6d3SLuigi Rizzo 		}
55437e3a6d3SLuigi Rizzo 
555f9790aebSLuigi Rizzo 		n++;
55617885a7bSLuigi Rizzo 		nm_i = nm_next(nm_i, lim);
557f9790aebSLuigi Rizzo 	}
55817885a7bSLuigi Rizzo 	kring->nr_hwtail = nm_prev(nm_i, lim);
559b6e66be2SVincenzo Maffione 	nm_prdis("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail);
560f9790aebSLuigi Rizzo 
561f9790aebSLuigi Rizzo 	return n;
562f9790aebSLuigi Rizzo }
563f9790aebSLuigi Rizzo 
56437e3a6d3SLuigi Rizzo /* Compute a slot index in the middle between inf and sup. */
565f9790aebSLuigi Rizzo static inline u_int
56637e3a6d3SLuigi Rizzo ring_middle(u_int inf, u_int sup, u_int lim)
567f9790aebSLuigi Rizzo {
56837e3a6d3SLuigi Rizzo 	u_int n = lim + 1;
569f9790aebSLuigi Rizzo 	u_int e;
570f9790aebSLuigi Rizzo 
57137e3a6d3SLuigi Rizzo 	if (sup >= inf) {
57237e3a6d3SLuigi Rizzo 		e = (sup + inf) / 2;
573f9790aebSLuigi Rizzo 	} else { /* wrap around */
57437e3a6d3SLuigi Rizzo 		e = (sup + n + inf) / 2;
575f9790aebSLuigi Rizzo 		if (e >= n) {
576f9790aebSLuigi Rizzo 			e -= n;
577f9790aebSLuigi Rizzo 		}
578f9790aebSLuigi Rizzo 	}
579f9790aebSLuigi Rizzo 
580f9790aebSLuigi Rizzo 	if (unlikely(e >= n)) {
581b6e66be2SVincenzo Maffione 		nm_prerr("This cannot happen");
582f9790aebSLuigi Rizzo 		e = 0;
583f9790aebSLuigi Rizzo 	}
584f9790aebSLuigi Rizzo 
585f9790aebSLuigi Rizzo 	return e;
586f9790aebSLuigi Rizzo }
587f9790aebSLuigi Rizzo 
588*ce12afaaSMark Johnston #ifdef __FreeBSD__
589*ce12afaaSMark Johnston static void
590*ce12afaaSMark Johnston generic_tx_callout(void *arg)
591*ce12afaaSMark Johnston {
592*ce12afaaSMark Johnston 	struct netmap_kring *kring = arg;
593*ce12afaaSMark Johnston 
594*ce12afaaSMark Johnston 	kring->tx_event = NULL;
595*ce12afaaSMark Johnston 	mtx_unlock_spin(&kring->tx_event_lock);
596*ce12afaaSMark Johnston 	netmap_generic_irq(kring->na, kring->ring_id, NULL);
597*ce12afaaSMark Johnston }
598*ce12afaaSMark Johnston #endif
599*ce12afaaSMark Johnston 
600f9790aebSLuigi Rizzo static void
601f9790aebSLuigi Rizzo generic_set_tx_event(struct netmap_kring *kring, u_int hwcur)
602f9790aebSLuigi Rizzo {
60337e3a6d3SLuigi Rizzo 	u_int lim = kring->nkr_num_slots - 1;
604f9790aebSLuigi Rizzo 	struct mbuf *m;
605f9790aebSLuigi Rizzo 	u_int e;
60637e3a6d3SLuigi Rizzo 	u_int ntc = nm_next(kring->nr_hwtail, lim); /* next to clean */
607f9790aebSLuigi Rizzo 
60837e3a6d3SLuigi Rizzo 	if (ntc == hwcur) {
60917885a7bSLuigi Rizzo 		return; /* all buffers are free */
610f9790aebSLuigi Rizzo 	}
61137e3a6d3SLuigi Rizzo 
61237e3a6d3SLuigi Rizzo 	/*
61337e3a6d3SLuigi Rizzo 	 * We have pending packets in the driver between hwtail+1
61437e3a6d3SLuigi Rizzo 	 * and hwcur, and we have to chose one of these slot to
61537e3a6d3SLuigi Rizzo 	 * generate a notification.
61637e3a6d3SLuigi Rizzo 	 * There is a race but this is only called within txsync which
61737e3a6d3SLuigi Rizzo 	 * does a double check.
61837e3a6d3SLuigi Rizzo 	 */
61937e3a6d3SLuigi Rizzo #if 0
62037e3a6d3SLuigi Rizzo 	/* Choose a slot in the middle, so that we don't risk ending
62137e3a6d3SLuigi Rizzo 	 * up in a situation where the client continuously wake up,
62237e3a6d3SLuigi Rizzo 	 * fills one or a few TX slots and go to sleep again. */
62337e3a6d3SLuigi Rizzo 	e = ring_middle(ntc, hwcur, lim);
62437e3a6d3SLuigi Rizzo #else
62537e3a6d3SLuigi Rizzo 	/* Choose the first pending slot, to be safe against driver
62637e3a6d3SLuigi Rizzo 	 * reordering mbuf transmissions. */
62737e3a6d3SLuigi Rizzo 	e = ntc;
62837e3a6d3SLuigi Rizzo #endif
629f9790aebSLuigi Rizzo 
630f9790aebSLuigi Rizzo 	m = kring->tx_pool[e];
631f9790aebSLuigi Rizzo 	if (m == NULL) {
63237e3a6d3SLuigi Rizzo 		/* An event is already in place. */
633f9790aebSLuigi Rizzo 		return;
634f9790aebSLuigi Rizzo 	}
635f9790aebSLuigi Rizzo 
63637e3a6d3SLuigi Rizzo 	mtx_lock_spin(&kring->tx_event_lock);
63737e3a6d3SLuigi Rizzo 	if (kring->tx_event) {
63837e3a6d3SLuigi Rizzo 		/* An event is already in place. */
63937e3a6d3SLuigi Rizzo 		mtx_unlock_spin(&kring->tx_event_lock);
64037e3a6d3SLuigi Rizzo 		return;
64137e3a6d3SLuigi Rizzo 	}
64237e3a6d3SLuigi Rizzo 
643*ce12afaaSMark Johnston 	SET_MBUF_DESTRUCTOR(m, generic_mbuf_dtor);
64437e3a6d3SLuigi Rizzo 	kring->tx_event = m;
645*ce12afaaSMark Johnston #ifdef __FreeBSD__
646*ce12afaaSMark Johnston 	/*
647*ce12afaaSMark Johnston 	 * Handle the possibility that the transmitted buffer isn't reclaimed
648*ce12afaaSMark Johnston 	 * within a bounded period of time.  This can arise when transmitting
649*ce12afaaSMark Johnston 	 * out of multiple ports via a lagg or bridge interface, since the
650*ce12afaaSMark Johnston 	 * member ports may legitimately only free transmitted buffers in
651*ce12afaaSMark Johnston 	 * batches.
652*ce12afaaSMark Johnston 	 *
653*ce12afaaSMark Johnston 	 * The callout handler clears the stuck packet from the ring, allowing
654*ce12afaaSMark Johnston 	 * transmission to proceed.  In the common case we let
655*ce12afaaSMark Johnston 	 * generic_mbuf_dtor() unstick the ring, allowing mbufs to be
656*ce12afaaSMark Johnston 	 * reused most of the time.
657*ce12afaaSMark Johnston 	 */
658*ce12afaaSMark Johnston 	callout_reset_sbt_curcpu(&kring->tx_event_callout, SBT_1MS, 0,
659*ce12afaaSMark Johnston 	    generic_tx_callout, kring, 0);
660*ce12afaaSMark Johnston #endif
66137e3a6d3SLuigi Rizzo 	mtx_unlock_spin(&kring->tx_event_lock);
66237e3a6d3SLuigi Rizzo 
66337e3a6d3SLuigi Rizzo 	kring->tx_pool[e] = NULL;
66437e3a6d3SLuigi Rizzo 
665b6e66be2SVincenzo Maffione 	nm_prdis("Request Event at %d mbuf %p refcnt %d", e, m, m ? MBUF_REFCNT(m) : -2 );
66637e3a6d3SLuigi Rizzo 
66737e3a6d3SLuigi Rizzo 	/* Decrement the refcount. This will free it if we lose the race
66837e3a6d3SLuigi Rizzo 	 * with the driver. */
669f9790aebSLuigi Rizzo 	m_freem(m);
670f9790aebSLuigi Rizzo }
671f9790aebSLuigi Rizzo 
672f9790aebSLuigi Rizzo /*
673f9790aebSLuigi Rizzo  * generic_netmap_txsync() transforms netmap buffers into mbufs
674f9790aebSLuigi Rizzo  * and passes them to the standard device driver
675f9790aebSLuigi Rizzo  * (ndo_start_xmit() or ifp->if_transmit() ).
676f9790aebSLuigi Rizzo  * On linux this is not done directly, but using dev_queue_xmit(),
677f9790aebSLuigi Rizzo  * since it implements the TX flow control (and takes some locks).
678f9790aebSLuigi Rizzo  */
679f9790aebSLuigi Rizzo static int
6804bf50f18SLuigi Rizzo generic_netmap_txsync(struct netmap_kring *kring, int flags)
681f9790aebSLuigi Rizzo {
6824bf50f18SLuigi Rizzo 	struct netmap_adapter *na = kring->na;
68337e3a6d3SLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
684e330262fSJustin Hibbits 	if_t ifp = na->ifp;
685f9790aebSLuigi Rizzo 	struct netmap_ring *ring = kring->ring;
68617885a7bSLuigi Rizzo 	u_int nm_i;	/* index into the netmap ring */ // j
68717885a7bSLuigi Rizzo 	u_int const lim = kring->nkr_num_slots - 1;
68817885a7bSLuigi Rizzo 	u_int const head = kring->rhead;
6894bf50f18SLuigi Rizzo 	u_int ring_nr = kring->ring_id;
690f9790aebSLuigi Rizzo 
691f9790aebSLuigi Rizzo 	IFRATE(rate_ctx.new.txsync++);
692f9790aebSLuigi Rizzo 
693f9790aebSLuigi Rizzo 	rmb();
69417885a7bSLuigi Rizzo 
695f9790aebSLuigi Rizzo 	/*
69617885a7bSLuigi Rizzo 	 * First part: process new packets to send.
697f9790aebSLuigi Rizzo 	 */
69817885a7bSLuigi Rizzo 	nm_i = kring->nr_hwcur;
69917885a7bSLuigi Rizzo 	if (nm_i != head) {	/* we have new packets to send */
70037e3a6d3SLuigi Rizzo 		struct nm_os_gen_arg a;
70137e3a6d3SLuigi Rizzo 		u_int event = -1;
702484456b2SVincenzo Maffione #ifdef __FreeBSD__
703484456b2SVincenzo Maffione 		struct epoch_tracker et;
704484456b2SVincenzo Maffione 
705484456b2SVincenzo Maffione 		NET_EPOCH_ENTER(et);
706484456b2SVincenzo Maffione #endif
70737e3a6d3SLuigi Rizzo 
70837e3a6d3SLuigi Rizzo 		if (gna->txqdisc && nm_kr_txempty(kring)) {
70937e3a6d3SLuigi Rizzo 			/* In txqdisc mode, we ask for a delayed notification,
71037e3a6d3SLuigi Rizzo 			 * but only when cur == hwtail, which means that the
71137e3a6d3SLuigi Rizzo 			 * client is going to block. */
71237e3a6d3SLuigi Rizzo 			event = ring_middle(nm_i, head, lim);
713b6e66be2SVincenzo Maffione 			nm_prdis("Place txqdisc event (hwcur=%u,event=%u,"
71437e3a6d3SLuigi Rizzo 			      "head=%u,hwtail=%u)", nm_i, event, head,
71537e3a6d3SLuigi Rizzo 			      kring->nr_hwtail);
71637e3a6d3SLuigi Rizzo 		}
71737e3a6d3SLuigi Rizzo 
71837e3a6d3SLuigi Rizzo 		a.ifp = ifp;
71937e3a6d3SLuigi Rizzo 		a.ring_nr = ring_nr;
72037e3a6d3SLuigi Rizzo 		a.head = a.tail = NULL;
72137e3a6d3SLuigi Rizzo 
72217885a7bSLuigi Rizzo 		while (nm_i != head) {
72317885a7bSLuigi Rizzo 			struct netmap_slot *slot = &ring->slot[nm_i];
724f9790aebSLuigi Rizzo 			u_int len = slot->len;
7254bf50f18SLuigi Rizzo 			void *addr = NMB(na, slot);
72617885a7bSLuigi Rizzo 			/* device-specific */
727f9790aebSLuigi Rizzo 			struct mbuf *m;
728f9790aebSLuigi Rizzo 			int tx_ret;
729f9790aebSLuigi Rizzo 
7304bf50f18SLuigi Rizzo 			NM_CHECK_ADDR_LEN(na, addr, len);
73117885a7bSLuigi Rizzo 
73237e3a6d3SLuigi Rizzo 			/* Tale a mbuf from the tx pool (replenishing the pool
73337e3a6d3SLuigi Rizzo 			 * entry if necessary) and copy in the user packet. */
73417885a7bSLuigi Rizzo 			m = kring->tx_pool[nm_i];
735f9790aebSLuigi Rizzo 			if (unlikely(m == NULL)) {
73637e3a6d3SLuigi Rizzo 				kring->tx_pool[nm_i] = m =
73737e3a6d3SLuigi Rizzo 					nm_os_get_mbuf(ifp, NETMAP_BUF_SIZE(na));
73837e3a6d3SLuigi Rizzo 				if (m == NULL) {
739b6e66be2SVincenzo Maffione 					nm_prlim(2, "Failed to replenish mbuf");
74037e3a6d3SLuigi Rizzo 					/* Here we could schedule a timer which
74137e3a6d3SLuigi Rizzo 					 * retries to replenish after a while,
74237e3a6d3SLuigi Rizzo 					 * and notifies the client when it
74337e3a6d3SLuigi Rizzo 					 * manages to replenish some slots. In
74437e3a6d3SLuigi Rizzo 					 * any case we break early to avoid
74537e3a6d3SLuigi Rizzo 					 * crashes. */
746f9790aebSLuigi Rizzo 					break;
747f9790aebSLuigi Rizzo 				}
74837e3a6d3SLuigi Rizzo 				IFRATE(rate_ctx.new.txrepl++);
749*ce12afaaSMark Johnston 			} else {
750*ce12afaaSMark Johnston 				nm_os_mbuf_reinit(m);
751f9790aebSLuigi Rizzo 			}
75237e3a6d3SLuigi Rizzo 
75337e3a6d3SLuigi Rizzo 			a.m = m;
75437e3a6d3SLuigi Rizzo 			a.addr = addr;
75537e3a6d3SLuigi Rizzo 			a.len = len;
75637e3a6d3SLuigi Rizzo 			a.qevent = (nm_i == event);
75737e3a6d3SLuigi Rizzo 			/* When not in txqdisc mode, we should ask
75837e3a6d3SLuigi Rizzo 			 * notifications when NS_REPORT is set, or roughly
75937e3a6d3SLuigi Rizzo 			 * every half ring. To optimize this, we set a
76037e3a6d3SLuigi Rizzo 			 * notification event when the client runs out of
76137e3a6d3SLuigi Rizzo 			 * TX ring space, or when transmission fails. In
76237e3a6d3SLuigi Rizzo 			 * the latter case we also break early.
763f9790aebSLuigi Rizzo 			 */
76437e3a6d3SLuigi Rizzo 			tx_ret = nm_os_generic_xmit_frame(&a);
765f9790aebSLuigi Rizzo 			if (unlikely(tx_ret)) {
76637e3a6d3SLuigi Rizzo 				if (!gna->txqdisc) {
767f9790aebSLuigi Rizzo 					/*
768f9790aebSLuigi Rizzo 					 * No room for this mbuf in the device driver.
769f9790aebSLuigi Rizzo 					 * Request a notification FOR A PREVIOUS MBUF,
770f9790aebSLuigi Rizzo 					 * then call generic_netmap_tx_clean(kring) to do the
771f9790aebSLuigi Rizzo 					 * double check and see if we can free more buffers.
772f9790aebSLuigi Rizzo 					 * If there is space continue, else break;
773f9790aebSLuigi Rizzo 					 * NOTE: the double check is necessary if the problem
774f9790aebSLuigi Rizzo 					 * occurs in the txsync call after selrecord().
775f9790aebSLuigi Rizzo 					 * Also, we need some way to tell the caller that not
776f9790aebSLuigi Rizzo 					 * all buffers were queued onto the device (this was
777f9790aebSLuigi Rizzo 					 * not a problem with native netmap driver where space
778f9790aebSLuigi Rizzo 					 * is preallocated). The bridge has a similar problem
779f9790aebSLuigi Rizzo 					 * and we solve it there by dropping the excess packets.
780f9790aebSLuigi Rizzo 					 */
78117885a7bSLuigi Rizzo 					generic_set_tx_event(kring, nm_i);
78237e3a6d3SLuigi Rizzo 					if (generic_netmap_tx_clean(kring, gna->txqdisc)) {
78337e3a6d3SLuigi Rizzo 						/* space now available */
784f9790aebSLuigi Rizzo 						continue;
785f9790aebSLuigi Rizzo 					} else {
786f9790aebSLuigi Rizzo 						break;
787f9790aebSLuigi Rizzo 					}
788f9790aebSLuigi Rizzo 				}
78937e3a6d3SLuigi Rizzo 
79037e3a6d3SLuigi Rizzo 				/* In txqdisc mode, the netmap-aware qdisc
79137e3a6d3SLuigi Rizzo 				 * queue has the same length as the number of
79237e3a6d3SLuigi Rizzo 				 * netmap slots (N). Since tail is advanced
79337e3a6d3SLuigi Rizzo 				 * only when packets are dequeued, qdisc
79437e3a6d3SLuigi Rizzo 				 * queue overrun cannot happen, so
79537e3a6d3SLuigi Rizzo 				 * nm_os_generic_xmit_frame() did not fail
79637e3a6d3SLuigi Rizzo 				 * because of that.
79737e3a6d3SLuigi Rizzo 				 * However, packets can be dropped because
79837e3a6d3SLuigi Rizzo 				 * carrier is off, or because our qdisc is
79937e3a6d3SLuigi Rizzo 				 * being deactivated, or possibly for other
80037e3a6d3SLuigi Rizzo 				 * reasons. In these cases, we just let the
80137e3a6d3SLuigi Rizzo 				 * packet to be dropped. */
80237e3a6d3SLuigi Rizzo 				IFRATE(rate_ctx.new.txdrop++);
80337e3a6d3SLuigi Rizzo 			}
80437e3a6d3SLuigi Rizzo 
805f9790aebSLuigi Rizzo 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
80617885a7bSLuigi Rizzo 			nm_i = nm_next(nm_i, lim);
807f0ea3689SLuigi Rizzo 			IFRATE(rate_ctx.new.txpkt++);
808f9790aebSLuigi Rizzo 		}
80937e3a6d3SLuigi Rizzo 		if (a.head != NULL) {
81037e3a6d3SLuigi Rizzo 			a.addr = NULL;
81137e3a6d3SLuigi Rizzo 			nm_os_generic_xmit_frame(&a);
81237e3a6d3SLuigi Rizzo 		}
81337e3a6d3SLuigi Rizzo 		/* Update hwcur to the next slot to transmit. Here nm_i
81437e3a6d3SLuigi Rizzo 		 * is not necessarily head, we could break early. */
81537e3a6d3SLuigi Rizzo 		kring->nr_hwcur = nm_i;
816484456b2SVincenzo Maffione 
817484456b2SVincenzo Maffione #ifdef __FreeBSD__
818484456b2SVincenzo Maffione 		NET_EPOCH_EXIT(et);
819484456b2SVincenzo Maffione #endif
82017885a7bSLuigi Rizzo 	}
821f9790aebSLuigi Rizzo 
82237e3a6d3SLuigi Rizzo 	if (!gna->txqdisc && (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring))) {
823f9790aebSLuigi Rizzo 		/* No more available slots? Set a notification event
824f9790aebSLuigi Rizzo 		 * on a netmap slot that will be cleaned in the future.
825f9790aebSLuigi Rizzo 		 * No doublecheck is performed, since txsync() will be
826f9790aebSLuigi Rizzo 		 * called twice by netmap_poll().
827f9790aebSLuigi Rizzo 		 */
82817885a7bSLuigi Rizzo 		generic_set_tx_event(kring, nm_i);
829f9790aebSLuigi Rizzo 	}
830f9790aebSLuigi Rizzo 
831*ce12afaaSMark Johnston 	/*
832*ce12afaaSMark Johnston 	 * Second, reclaim completed buffers
833*ce12afaaSMark Johnston 	 */
83437e3a6d3SLuigi Rizzo 	generic_netmap_tx_clean(kring, gna->txqdisc);
83517885a7bSLuigi Rizzo 
836f9790aebSLuigi Rizzo 	return 0;
837f9790aebSLuigi Rizzo }
838f9790aebSLuigi Rizzo 
83917885a7bSLuigi Rizzo 
840f9790aebSLuigi Rizzo /*
84137e3a6d3SLuigi Rizzo  * This handler is registered (through nm_os_catch_rx())
842f9790aebSLuigi Rizzo  * within the attached network interface
843f9790aebSLuigi Rizzo  * in the RX subsystem, so that every mbuf passed up by
844f9790aebSLuigi Rizzo  * the driver can be stolen to the network stack.
845f9790aebSLuigi Rizzo  * Stolen packets are put in a queue where the
846f9790aebSLuigi Rizzo  * generic_netmap_rxsync() callback can extract them.
84737e3a6d3SLuigi Rizzo  * Returns 1 if the packet was stolen, 0 otherwise.
848f9790aebSLuigi Rizzo  */
84937e3a6d3SLuigi Rizzo int
850e330262fSJustin Hibbits generic_rx_handler(if_t ifp, struct mbuf *m)
851f9790aebSLuigi Rizzo {
852f9790aebSLuigi Rizzo 	struct netmap_adapter *na = NA(ifp);
853f9790aebSLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
85437e3a6d3SLuigi Rizzo 	struct netmap_kring *kring;
855f9790aebSLuigi Rizzo 	u_int work_done;
85637e3a6d3SLuigi Rizzo 	u_int r = MBUF_RXQ(m); /* receive ring number */
857f0ea3689SLuigi Rizzo 
85837e3a6d3SLuigi Rizzo 	if (r >= na->num_rx_rings) {
85937e3a6d3SLuigi Rizzo 		r = r % na->num_rx_rings;
86037e3a6d3SLuigi Rizzo 	}
86137e3a6d3SLuigi Rizzo 
8622ff91c17SVincenzo Maffione 	kring = na->rx_rings[r];
86337e3a6d3SLuigi Rizzo 
86437e3a6d3SLuigi Rizzo 	if (kring->nr_mode == NKR_NETMAP_OFF) {
86537e3a6d3SLuigi Rizzo 		/* We must not intercept this mbuf. */
86637e3a6d3SLuigi Rizzo 		return 0;
867f0ea3689SLuigi Rizzo 	}
868f9790aebSLuigi Rizzo 
869f9790aebSLuigi Rizzo 	/* limit the size of the queue */
87037e3a6d3SLuigi Rizzo 	if (unlikely(!gna->rxsg && MBUF_LEN(m) > NETMAP_BUF_SIZE(na))) {
87137e3a6d3SLuigi Rizzo 		/* This may happen when GRO/LRO features are enabled for
87237e3a6d3SLuigi Rizzo 		 * the NIC driver when the generic adapter does not
87337e3a6d3SLuigi Rizzo 		 * support RX scatter-gather. */
874b6e66be2SVincenzo Maffione 		nm_prlim(2, "Warning: driver pushed up big packet "
87537e3a6d3SLuigi Rizzo 				"(size=%d)", (int)MBUF_LEN(m));
876df40e30cSMark Johnston 		if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
87737e3a6d3SLuigi Rizzo 		m_freem(m);
878539437c8SMark Johnston 	} else if (unlikely(mbq_len(&kring->rx_queue) > na->num_rx_desc)) {
879df40e30cSMark Johnston 		if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
880f9790aebSLuigi Rizzo 		m_freem(m);
881f9790aebSLuigi Rizzo 	} else {
88237e3a6d3SLuigi Rizzo 		mbq_safe_enqueue(&kring->rx_queue, m);
883f9790aebSLuigi Rizzo 	}
884f9790aebSLuigi Rizzo 
885f9790aebSLuigi Rizzo 	if (netmap_generic_mit < 32768) {
886f9790aebSLuigi Rizzo 		/* no rx mitigation, pass notification up */
88737e3a6d3SLuigi Rizzo 		netmap_generic_irq(na, r, &work_done);
888f9790aebSLuigi Rizzo 	} else {
889f9790aebSLuigi Rizzo 		/* same as send combining, filter notification if there is a
890f9790aebSLuigi Rizzo 		 * pending timer, otherwise pass it up and start a timer.
891f9790aebSLuigi Rizzo 		 */
89237e3a6d3SLuigi Rizzo 		if (likely(nm_os_mitigation_active(&gna->mit[r]))) {
893f9790aebSLuigi Rizzo 			/* Record that there is some pending work. */
89437e3a6d3SLuigi Rizzo 			gna->mit[r].mit_pending = 1;
895f9790aebSLuigi Rizzo 		} else {
89637e3a6d3SLuigi Rizzo 			netmap_generic_irq(na, r, &work_done);
89737e3a6d3SLuigi Rizzo 			nm_os_mitigation_start(&gna->mit[r]);
898f9790aebSLuigi Rizzo 		}
899f9790aebSLuigi Rizzo 	}
90037e3a6d3SLuigi Rizzo 
90137e3a6d3SLuigi Rizzo 	/* We have intercepted the mbuf. */
90237e3a6d3SLuigi Rizzo 	return 1;
903f9790aebSLuigi Rizzo }
904f9790aebSLuigi Rizzo 
905f9790aebSLuigi Rizzo /*
906f9790aebSLuigi Rizzo  * generic_netmap_rxsync() extracts mbufs from the queue filled by
907f9790aebSLuigi Rizzo  * generic_netmap_rx_handler() and puts their content in the netmap
908f9790aebSLuigi Rizzo  * receive ring.
909f9790aebSLuigi Rizzo  * Access must be protected because the rx handler is asynchronous,
910f9790aebSLuigi Rizzo  */
911f9790aebSLuigi Rizzo static int
9124bf50f18SLuigi Rizzo generic_netmap_rxsync(struct netmap_kring *kring, int flags)
913f9790aebSLuigi Rizzo {
914f9790aebSLuigi Rizzo 	struct netmap_ring *ring = kring->ring;
9154bf50f18SLuigi Rizzo 	struct netmap_adapter *na = kring->na;
91617885a7bSLuigi Rizzo 	u_int nm_i;	/* index into the netmap ring */ //j,
91717885a7bSLuigi Rizzo 	u_int n;
91817885a7bSLuigi Rizzo 	u_int const lim = kring->nkr_num_slots - 1;
919847bf383SLuigi Rizzo 	u_int const head = kring->rhead;
920f9790aebSLuigi Rizzo 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
921f9790aebSLuigi Rizzo 
92237e3a6d3SLuigi Rizzo 	/* Adapter-specific variables. */
92337e3a6d3SLuigi Rizzo 	u_int nm_buf_len = NETMAP_BUF_SIZE(na);
92437e3a6d3SLuigi Rizzo 	struct mbq tmpq;
92537e3a6d3SLuigi Rizzo 	struct mbuf *m;
92637e3a6d3SLuigi Rizzo 	int avail; /* in bytes */
92737e3a6d3SLuigi Rizzo 	int mlen;
92837e3a6d3SLuigi Rizzo 	int copy;
92937e3a6d3SLuigi Rizzo 
93017885a7bSLuigi Rizzo 	if (head > lim)
931f9790aebSLuigi Rizzo 		return netmap_ring_reinit(kring);
932f9790aebSLuigi Rizzo 
93337e3a6d3SLuigi Rizzo 	IFRATE(rate_ctx.new.rxsync++);
93417885a7bSLuigi Rizzo 
935f9790aebSLuigi Rizzo 	/*
93637e3a6d3SLuigi Rizzo 	 * First part: skip past packets that userspace has released.
93737e3a6d3SLuigi Rizzo 	 * This can possibly make room for the second part.
93817885a7bSLuigi Rizzo 	 */
93917885a7bSLuigi Rizzo 	nm_i = kring->nr_hwcur;
94017885a7bSLuigi Rizzo 	if (nm_i != head) {
941f9790aebSLuigi Rizzo 		/* Userspace has released some packets. */
94217885a7bSLuigi Rizzo 		for (n = 0; nm_i != head; n++) {
94317885a7bSLuigi Rizzo 			struct netmap_slot *slot = &ring->slot[nm_i];
944f9790aebSLuigi Rizzo 
945f9790aebSLuigi Rizzo 			slot->flags &= ~NS_BUF_CHANGED;
94617885a7bSLuigi Rizzo 			nm_i = nm_next(nm_i, lim);
947f9790aebSLuigi Rizzo 		}
94817885a7bSLuigi Rizzo 		kring->nr_hwcur = head;
949f9790aebSLuigi Rizzo 	}
95037e3a6d3SLuigi Rizzo 
95137e3a6d3SLuigi Rizzo 	/*
95237e3a6d3SLuigi Rizzo 	 * Second part: import newly received packets.
95337e3a6d3SLuigi Rizzo 	 */
95437e3a6d3SLuigi Rizzo 	if (!netmap_no_pendintr && !force_update) {
95537e3a6d3SLuigi Rizzo 		return 0;
95637e3a6d3SLuigi Rizzo 	}
95737e3a6d3SLuigi Rizzo 
95837e3a6d3SLuigi Rizzo 	nm_i = kring->nr_hwtail; /* First empty slot in the receive ring. */
95937e3a6d3SLuigi Rizzo 
96037e3a6d3SLuigi Rizzo 	/* Compute the available space (in bytes) in this netmap ring.
96137e3a6d3SLuigi Rizzo 	 * The first slot that is not considered in is the one before
96237e3a6d3SLuigi Rizzo 	 * nr_hwcur. */
96337e3a6d3SLuigi Rizzo 
96437e3a6d3SLuigi Rizzo 	avail = nm_prev(kring->nr_hwcur, lim) - nm_i;
96537e3a6d3SLuigi Rizzo 	if (avail < 0)
96637e3a6d3SLuigi Rizzo 		avail += lim + 1;
96737e3a6d3SLuigi Rizzo 	avail *= nm_buf_len;
96837e3a6d3SLuigi Rizzo 
96937e3a6d3SLuigi Rizzo 	/* First pass: While holding the lock on the RX mbuf queue,
97037e3a6d3SLuigi Rizzo 	 * extract as many mbufs as they fit the available space,
97137e3a6d3SLuigi Rizzo 	 * and put them in a temporary queue.
97237e3a6d3SLuigi Rizzo 	 * To avoid performing a per-mbuf division (mlen / nm_buf_len) to
97337e3a6d3SLuigi Rizzo 	 * to update avail, we do the update in a while loop that we
97437e3a6d3SLuigi Rizzo 	 * also use to set the RX slots, but without performing the copy. */
97537e3a6d3SLuigi Rizzo 	mbq_init(&tmpq);
97637e3a6d3SLuigi Rizzo 	mbq_lock(&kring->rx_queue);
97737e3a6d3SLuigi Rizzo 	for (n = 0;; n++) {
97837e3a6d3SLuigi Rizzo 		m = mbq_peek(&kring->rx_queue);
97937e3a6d3SLuigi Rizzo 		if (!m) {
98037e3a6d3SLuigi Rizzo 			/* No more packets from the driver. */
98137e3a6d3SLuigi Rizzo 			break;
98237e3a6d3SLuigi Rizzo 		}
98337e3a6d3SLuigi Rizzo 
98437e3a6d3SLuigi Rizzo 		mlen = MBUF_LEN(m);
98537e3a6d3SLuigi Rizzo 		if (mlen > avail) {
98637e3a6d3SLuigi Rizzo 			/* No more space in the ring. */
98737e3a6d3SLuigi Rizzo 			break;
98837e3a6d3SLuigi Rizzo 		}
98937e3a6d3SLuigi Rizzo 
99037e3a6d3SLuigi Rizzo 		mbq_dequeue(&kring->rx_queue);
99137e3a6d3SLuigi Rizzo 
99237e3a6d3SLuigi Rizzo 		while (mlen) {
99337e3a6d3SLuigi Rizzo 			copy = nm_buf_len;
99437e3a6d3SLuigi Rizzo 			if (mlen < copy) {
99537e3a6d3SLuigi Rizzo 				copy = mlen;
99637e3a6d3SLuigi Rizzo 			}
99737e3a6d3SLuigi Rizzo 			mlen -= copy;
99837e3a6d3SLuigi Rizzo 			avail -= nm_buf_len;
99937e3a6d3SLuigi Rizzo 
100037e3a6d3SLuigi Rizzo 			ring->slot[nm_i].len = copy;
10014f80b14cSVincenzo Maffione 			ring->slot[nm_i].flags = (mlen ? NS_MOREFRAG : 0);
100237e3a6d3SLuigi Rizzo 			nm_i = nm_next(nm_i, lim);
100337e3a6d3SLuigi Rizzo 		}
100437e3a6d3SLuigi Rizzo 
100537e3a6d3SLuigi Rizzo 		mbq_enqueue(&tmpq, m);
100637e3a6d3SLuigi Rizzo 	}
100737e3a6d3SLuigi Rizzo 	mbq_unlock(&kring->rx_queue);
100837e3a6d3SLuigi Rizzo 
100937e3a6d3SLuigi Rizzo 	/* Second pass: Drain the temporary queue, going over the used RX slots,
101037e3a6d3SLuigi Rizzo 	 * and perform the copy out of the RX queue lock. */
101137e3a6d3SLuigi Rizzo 	nm_i = kring->nr_hwtail;
101237e3a6d3SLuigi Rizzo 
101337e3a6d3SLuigi Rizzo 	for (;;) {
101437e3a6d3SLuigi Rizzo 		void *nmaddr;
101537e3a6d3SLuigi Rizzo 		int ofs = 0;
101637e3a6d3SLuigi Rizzo 		int morefrag;
101737e3a6d3SLuigi Rizzo 
101837e3a6d3SLuigi Rizzo 		m = mbq_dequeue(&tmpq);
101937e3a6d3SLuigi Rizzo 		if (!m)	{
102037e3a6d3SLuigi Rizzo 			break;
102137e3a6d3SLuigi Rizzo 		}
102237e3a6d3SLuigi Rizzo 
102337e3a6d3SLuigi Rizzo 		do {
102437e3a6d3SLuigi Rizzo 			nmaddr = NMB(na, &ring->slot[nm_i]);
102537e3a6d3SLuigi Rizzo 			/* We only check the address here on generic rx rings. */
102637e3a6d3SLuigi Rizzo 			if (nmaddr == NETMAP_BUF_BASE(na)) { /* Bad buffer */
102737e3a6d3SLuigi Rizzo 				m_freem(m);
102837e3a6d3SLuigi Rizzo 				mbq_purge(&tmpq);
102937e3a6d3SLuigi Rizzo 				mbq_fini(&tmpq);
103037e3a6d3SLuigi Rizzo 				return netmap_ring_reinit(kring);
103137e3a6d3SLuigi Rizzo 			}
103237e3a6d3SLuigi Rizzo 
103337e3a6d3SLuigi Rizzo 			copy = ring->slot[nm_i].len;
103437e3a6d3SLuigi Rizzo 			m_copydata(m, ofs, copy, nmaddr);
103537e3a6d3SLuigi Rizzo 			ofs += copy;
103637e3a6d3SLuigi Rizzo 			morefrag = ring->slot[nm_i].flags & NS_MOREFRAG;
103737e3a6d3SLuigi Rizzo 			nm_i = nm_next(nm_i, lim);
103837e3a6d3SLuigi Rizzo 		} while (morefrag);
103937e3a6d3SLuigi Rizzo 
104037e3a6d3SLuigi Rizzo 		m_freem(m);
104137e3a6d3SLuigi Rizzo 	}
104237e3a6d3SLuigi Rizzo 
104337e3a6d3SLuigi Rizzo 	mbq_fini(&tmpq);
104437e3a6d3SLuigi Rizzo 
104537e3a6d3SLuigi Rizzo 	if (n) {
104637e3a6d3SLuigi Rizzo 		kring->nr_hwtail = nm_i;
104737e3a6d3SLuigi Rizzo 		IFRATE(rate_ctx.new.rxpkt += n);
104837e3a6d3SLuigi Rizzo 	}
104937e3a6d3SLuigi Rizzo 	kring->nr_kflags &= ~NKR_PENDINTR;
1050f9790aebSLuigi Rizzo 
1051f9790aebSLuigi Rizzo 	return 0;
1052f9790aebSLuigi Rizzo }
1053f9790aebSLuigi Rizzo 
1054f9790aebSLuigi Rizzo static void
1055f9790aebSLuigi Rizzo generic_netmap_dtor(struct netmap_adapter *na)
1056f9790aebSLuigi Rizzo {
1057f9790aebSLuigi Rizzo 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na;
1058e330262fSJustin Hibbits 	if_t ifp = netmap_generic_getifp(gna);
1059f9790aebSLuigi Rizzo 	struct netmap_adapter *prev_na = gna->prev;
1060f9790aebSLuigi Rizzo 
1061f9790aebSLuigi Rizzo 	if (prev_na != NULL) {
1062847bf383SLuigi Rizzo 		netmap_adapter_put(prev_na);
106337e3a6d3SLuigi Rizzo 		if (nm_iszombie(na)) {
1064847bf383SLuigi Rizzo 		        /*
1065847bf383SLuigi Rizzo 		         * The driver has been removed without releasing
1066847bf383SLuigi Rizzo 		         * the reference so we need to do it here.
1067847bf383SLuigi Rizzo 		         */
1068f9790aebSLuigi Rizzo 		        netmap_adapter_put(prev_na);
1069f9790aebSLuigi Rizzo 		}
10705fe59a51SVincenzo Maffione 		nm_prinf("Native netmap adapter for %s restored", prev_na->name);
1071847bf383SLuigi Rizzo 	}
10722a7db7a6SVincenzo Maffione 	NM_RESTORE_NA(ifp, prev_na);
1073f9790aebSLuigi Rizzo 	na->ifp = NULL;
1074b6e66be2SVincenzo Maffione 	nm_prinf("Emulated netmap adapter for %s destroyed", na->name);
1075c3e9b4dbSLuiz Otavio O Souza }
1076c3e9b4dbSLuiz Otavio O Souza 
1077c3e9b4dbSLuiz Otavio O Souza int
1078c3e9b4dbSLuiz Otavio O Souza na_is_generic(struct netmap_adapter *na)
1079c3e9b4dbSLuiz Otavio O Souza {
1080c3e9b4dbSLuiz Otavio O Souza 	return na->nm_register == generic_netmap_register;
1081f9790aebSLuigi Rizzo }
1082f9790aebSLuigi Rizzo 
1083f9790aebSLuigi Rizzo /*
1084f9790aebSLuigi Rizzo  * generic_netmap_attach() makes it possible to use netmap on
1085f9790aebSLuigi Rizzo  * a device without native netmap support.
1086f9790aebSLuigi Rizzo  * This is less performant than native support but potentially
1087f9790aebSLuigi Rizzo  * faster than raw sockets or similar schemes.
1088f9790aebSLuigi Rizzo  *
1089f9790aebSLuigi Rizzo  * In this "emulated" mode, netmap rings do not necessarily
1090f9790aebSLuigi Rizzo  * have the same size as those in the NIC. We use a default
1091f9790aebSLuigi Rizzo  * value and possibly override it if the OS has ways to fetch the
1092f9790aebSLuigi Rizzo  * actual configuration.
1093f9790aebSLuigi Rizzo  */
1094f9790aebSLuigi Rizzo int
1095e330262fSJustin Hibbits generic_netmap_attach(if_t ifp)
1096f9790aebSLuigi Rizzo {
1097f9790aebSLuigi Rizzo 	struct netmap_adapter *na;
1098f9790aebSLuigi Rizzo 	struct netmap_generic_adapter *gna;
1099f9790aebSLuigi Rizzo 	int retval;
1100f9790aebSLuigi Rizzo 	u_int num_tx_desc, num_rx_desc;
1101f9790aebSLuigi Rizzo 
1102a02dbe4cSLuiz Otavio O Souza #ifdef __FreeBSD__
1103e330262fSJustin Hibbits 	if (if_gettype(ifp) == IFT_LOOP) {
1104b6e66be2SVincenzo Maffione 		nm_prerr("if_loop is not supported by %s", __func__);
1105a02dbe4cSLuiz Otavio O Souza 		return EINVAL;
1106a02dbe4cSLuiz Otavio O Souza 	}
1107a02dbe4cSLuiz Otavio O Souza #endif
1108a02dbe4cSLuiz Otavio O Souza 
11092a7db7a6SVincenzo Maffione 	if (NM_NA_CLASH(ifp)) {
11104f80b14cSVincenzo Maffione 		/* If NA(ifp) is not null but there is no valid netmap
11114f80b14cSVincenzo Maffione 		 * adapter it means that someone else is using the same
11124f80b14cSVincenzo Maffione 		 * pointer (e.g. ax25_ptr on linux). This happens for
11134f80b14cSVincenzo Maffione 		 * instance when also PF_RING is in use. */
1114b6e66be2SVincenzo Maffione 		nm_prerr("Error: netmap adapter hook is busy");
11154f80b14cSVincenzo Maffione 		return EBUSY;
11164f80b14cSVincenzo Maffione 	}
11174f80b14cSVincenzo Maffione 
1118f9790aebSLuigi Rizzo 	num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */
1119f9790aebSLuigi Rizzo 
112037e3a6d3SLuigi Rizzo 	nm_os_generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); /* ignore errors */
1121e4166283SLuigi Rizzo 	if (num_tx_desc == 0 || num_rx_desc == 0) {
1122b6e66be2SVincenzo Maffione 		nm_prerr("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc);
1123e4166283SLuigi Rizzo 		return EINVAL;
1124e4166283SLuigi Rizzo 	}
1125f9790aebSLuigi Rizzo 
1126c3e9b4dbSLuiz Otavio O Souza 	gna = nm_os_malloc(sizeof(*gna));
1127f9790aebSLuigi Rizzo 	if (gna == NULL) {
1128b6e66be2SVincenzo Maffione 		nm_prerr("no memory on attach, give up");
1129f9790aebSLuigi Rizzo 		return ENOMEM;
1130f9790aebSLuigi Rizzo 	}
1131f9790aebSLuigi Rizzo 	na = (struct netmap_adapter *)gna;
1132e330262fSJustin Hibbits 	strlcpy(na->name, if_name(ifp), sizeof(na->name));
1133f9790aebSLuigi Rizzo 	na->ifp = ifp;
1134f9790aebSLuigi Rizzo 	na->num_tx_desc = num_tx_desc;
1135f9790aebSLuigi Rizzo 	na->num_rx_desc = num_rx_desc;
11362a7db7a6SVincenzo Maffione 	na->rx_buf_maxsize = 32768;
1137f9790aebSLuigi Rizzo 	na->nm_register = &generic_netmap_register;
1138f9790aebSLuigi Rizzo 	na->nm_txsync = &generic_netmap_txsync;
1139f9790aebSLuigi Rizzo 	na->nm_rxsync = &generic_netmap_rxsync;
1140f9790aebSLuigi Rizzo 	na->nm_dtor = &generic_netmap_dtor;
11414bf50f18SLuigi Rizzo 	/* when using generic, NAF_NETMAP_ON is set so we force
1142f9790aebSLuigi Rizzo 	 * NAF_SKIP_INTR to use the regular interrupt handler
1143f9790aebSLuigi Rizzo 	 */
1144f0ea3689SLuigi Rizzo 	na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS;
1145f9790aebSLuigi Rizzo 
1146b6e66be2SVincenzo Maffione 	nm_prdis("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)",
1147f9790aebSLuigi Rizzo 			ifp->num_tx_queues, ifp->real_num_tx_queues,
1148f9790aebSLuigi Rizzo 			ifp->tx_queue_len);
1149b6e66be2SVincenzo Maffione 	nm_prdis("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)",
1150f9790aebSLuigi Rizzo 			ifp->num_rx_queues, ifp->real_num_rx_queues);
1151f9790aebSLuigi Rizzo 
115237e3a6d3SLuigi Rizzo 	nm_os_generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings);
1153f9790aebSLuigi Rizzo 
1154f9790aebSLuigi Rizzo 	retval = netmap_attach_common(na);
1155f9790aebSLuigi Rizzo 	if (retval) {
1156c3e9b4dbSLuiz Otavio O Souza 		nm_os_free(gna);
115737e3a6d3SLuigi Rizzo 		return retval;
1158f9790aebSLuigi Rizzo 	}
1159f9790aebSLuigi Rizzo 
11602a7db7a6SVincenzo Maffione 	if (NM_NA_VALID(ifp)) {
116137e3a6d3SLuigi Rizzo 		gna->prev = NA(ifp); /* save old na */
116237e3a6d3SLuigi Rizzo 		netmap_adapter_get(gna->prev);
116337e3a6d3SLuigi Rizzo 	}
116437e3a6d3SLuigi Rizzo 	NM_ATTACH_NA(ifp, na);
116537e3a6d3SLuigi Rizzo 
116637e3a6d3SLuigi Rizzo 	nm_os_generic_set_features(gna);
116737e3a6d3SLuigi Rizzo 
1168d7143780SVincenzo Maffione 	nm_prinf("Emulated adapter for %s created (prev was %s)", na->name,
1169d7143780SVincenzo Maffione 	    gna->prev ? gna->prev->name : "NULL");
117037e3a6d3SLuigi Rizzo 
1171f9790aebSLuigi Rizzo 	return retval;
1172f9790aebSLuigi Rizzo }
1173