1*fb3c236aSBjoern A. Zeeb /*-
2*fb3c236aSBjoern A. Zeeb  * Copyright (c) 2021 The FreeBSD Foundation
3*fb3c236aSBjoern A. Zeeb  *
4*fb3c236aSBjoern A. Zeeb  * This software was developed by Björn Zeeb under sponsorship from
5*fb3c236aSBjoern A. Zeeb  * the FreeBSD Foundation.
6*fb3c236aSBjoern A. Zeeb  *
7*fb3c236aSBjoern A. Zeeb  * Redistribution and use in source and binary forms, with or without
8*fb3c236aSBjoern A. Zeeb  * modification, are permitted provided that the following conditions
9*fb3c236aSBjoern A. Zeeb  * are met:
10*fb3c236aSBjoern A. Zeeb  * 1. Redistributions of source code must retain the above copyright
11*fb3c236aSBjoern A. Zeeb  *    notice, this list of conditions and the following disclaimer.
12*fb3c236aSBjoern A. Zeeb  * 2. Redistributions in binary form must reproduce the above copyright
13*fb3c236aSBjoern A. Zeeb  *    notice, this list of conditions and the following disclaimer in the
14*fb3c236aSBjoern A. Zeeb  *    documentation and/or other materials provided with the distribution.
15*fb3c236aSBjoern A. Zeeb  *
16*fb3c236aSBjoern A. Zeeb  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17*fb3c236aSBjoern A. Zeeb  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18*fb3c236aSBjoern A. Zeeb  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19*fb3c236aSBjoern A. Zeeb  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20*fb3c236aSBjoern A. Zeeb  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21*fb3c236aSBjoern A. Zeeb  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22*fb3c236aSBjoern A. Zeeb  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23*fb3c236aSBjoern A. Zeeb  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24*fb3c236aSBjoern A. Zeeb  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25*fb3c236aSBjoern A. Zeeb  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26*fb3c236aSBjoern A. Zeeb  * SUCH DAMAGE.
27*fb3c236aSBjoern A. Zeeb  */
28*fb3c236aSBjoern A. Zeeb 
29*fb3c236aSBjoern A. Zeeb #include <sys/cdefs.h>
30*fb3c236aSBjoern A. Zeeb __FBSDID("$FreeBSD$");
31*fb3c236aSBjoern A. Zeeb 
32*fb3c236aSBjoern A. Zeeb #include <sys/param.h>
33*fb3c236aSBjoern A. Zeeb #include <sys/types.h>
34*fb3c236aSBjoern A. Zeeb #include <sys/kernel.h>
35*fb3c236aSBjoern A. Zeeb #include <sys/sysctl.h>
36*fb3c236aSBjoern A. Zeeb 
37*fb3c236aSBjoern A. Zeeb #include <linux/bitops.h>
38*fb3c236aSBjoern A. Zeeb #include <linux/list.h>
39*fb3c236aSBjoern A. Zeeb #include <linux/netdevice.h>
40*fb3c236aSBjoern A. Zeeb 
41*fb3c236aSBjoern A. Zeeb MALLOC_DEFINE(M_NETDEV, "lkpindev", "Linux KPI netdevice compat");
42*fb3c236aSBjoern A. Zeeb 
43*fb3c236aSBjoern A. Zeeb #define	NAPI_LOCK_INIT(_ndev)		\
44*fb3c236aSBjoern A. Zeeb     mtx_init(&(_ndev)->napi_mtx, "napi_mtx", NULL, MTX_DEF)
45*fb3c236aSBjoern A. Zeeb #define	NAPI_LOCK_DESTROY(_ndev)	mtx_destroy(&(_ndev)->napi_mtx)
46*fb3c236aSBjoern A. Zeeb #define	NAPI_LOCK_ASSERT(_ndev)		mtx_assert(&(_ndev)->napi_mtx, MA_OWNED)
47*fb3c236aSBjoern A. Zeeb #define	NAPI_LOCK(_ndev)		mtx_lock(&(_ndev)->napi_mtx)
48*fb3c236aSBjoern A. Zeeb #define	NAPI_UNLOCK(_ndev)		mtx_unlock(&(_ndev)->napi_mtx)
49*fb3c236aSBjoern A. Zeeb 
50*fb3c236aSBjoern A. Zeeb /* -------------------------------------------------------------------------- */
51*fb3c236aSBjoern A. Zeeb /* Do not schedule new things while we are waiting to clear things. */
52*fb3c236aSBjoern A. Zeeb #define	LKPI_NAPI_FLAG_DISABLE_PENDING				0
53*fb3c236aSBjoern A. Zeeb /* To synchronise that only one poll is ever running. */
54*fb3c236aSBjoern A. Zeeb #define	LKPI_NAPI_FLAG_IS_SCHEDULED				1
55*fb3c236aSBjoern A. Zeeb /* If trying to schedule while poll is running. Need to re-schedule. */
56*fb3c236aSBjoern A. Zeeb #define	LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN			2
57*fb3c236aSBjoern A. Zeeb /* When shutting down forcefully prevent anything from running task/poll. */
58*fb3c236aSBjoern A. Zeeb #define	LKPI_NAPI_FLAG_SHUTDOWN					3
59*fb3c236aSBjoern A. Zeeb 
60*fb3c236aSBjoern A. Zeeb #define LKPI_NAPI_FLAGS \
61*fb3c236aSBjoern A. Zeeb         "\20\1DISABLE_PENDING\2IS_SCHEDULED\3LOST_RACE_TRY_AGAIN"
62*fb3c236aSBjoern A. Zeeb 
63*fb3c236aSBjoern A. Zeeb /* #define	NAPI_DEBUG */
64*fb3c236aSBjoern A. Zeeb #ifdef NAPI_DEBUG
65*fb3c236aSBjoern A. Zeeb static int debug_napi;
66*fb3c236aSBjoern A. Zeeb SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug_napi, CTLFLAG_RWTUN,
67*fb3c236aSBjoern A. Zeeb     &debug_napi, 0, "NAPI debug level");
68*fb3c236aSBjoern A. Zeeb 
69*fb3c236aSBjoern A. Zeeb #define	DNAPI_TODO		0x01
70*fb3c236aSBjoern A. Zeeb #define	DNAPI_IMPROVE		0x02
71*fb3c236aSBjoern A. Zeeb #define	DNAPI_TRACE		0x10
72*fb3c236aSBjoern A. Zeeb #define	DNAPI_TRACE_TASK	0x20
73*fb3c236aSBjoern A. Zeeb #define	DNAPI_DIRECT_DISPATCH	0x1000
74*fb3c236aSBjoern A. Zeeb 
75*fb3c236aSBjoern A. Zeeb #define	NAPI_TRACE(_n)		if (debug_napi & DNAPI_TRACE)		\
76*fb3c236aSBjoern A. Zeeb     printf("NAPI_TRACE %s:%d %u %p (%#jx %b)\n", __func__, __LINE__,	\
77*fb3c236aSBjoern A. Zeeb 	(unsigned int)ticks, _n, (uintmax_t)(_n)->_flags,		\
78*fb3c236aSBjoern A. Zeeb 	(int)(_n)->_flags, LKPI_NAPI_FLAGS)
79*fb3c236aSBjoern A. Zeeb #define	NAPI_TRACE2D(_n, _d)	if (debug_napi & DNAPI_TRACE)		\
80*fb3c236aSBjoern A. Zeeb     printf("NAPI_TRACE %s:%d %u %p (%#jx %b) %d\n", __func__, __LINE__, \
81*fb3c236aSBjoern A. Zeeb 	(unsigned int)ticks, _n, (uintmax_t)(_n)->_flags,		\
82*fb3c236aSBjoern A. Zeeb 	(int)(_n)->_flags, LKPI_NAPI_FLAGS, _d)
83*fb3c236aSBjoern A. Zeeb #define	NAPI_TRACE_TASK(_n, _p, _c) if (debug_napi & DNAPI_TRACE_TASK)	\
84*fb3c236aSBjoern A. Zeeb     printf("NAPI_TRACE %s:%d %u %p (%#jx %b) pending %d count %d "	\
85*fb3c236aSBjoern A. Zeeb 	"rx_count %d\n", __func__, __LINE__,				\
86*fb3c236aSBjoern A. Zeeb 	(unsigned int)ticks, _n, (uintmax_t)(_n)->_flags,		\
87*fb3c236aSBjoern A. Zeeb 	(int)(_n)->_flags, LKPI_NAPI_FLAGS, _p, _c, (_n)->rx_count)
88*fb3c236aSBjoern A. Zeeb #define	NAPI_TODO()		if (debug_napi & DNAPI_TODO)		\
89*fb3c236aSBjoern A. Zeeb     printf("NAPI_TODO %s:%d %d\n", __func__, __LINE__, ticks)
90*fb3c236aSBjoern A. Zeeb #define	NAPI_IMPROVE()		if (debug_napi & DNAPI_IMPROVE)		\
91*fb3c236aSBjoern A. Zeeb     printf("NAPI_IMPROVE %s:%d %d\n", __func__, __LINE__, ticks)
92*fb3c236aSBjoern A. Zeeb 
93*fb3c236aSBjoern A. Zeeb #define	NAPI_DIRECT_DISPATCH()	((debug_napi & DNAPI_DIRECT_DISPATCH) != 0)
94*fb3c236aSBjoern A. Zeeb #else
95*fb3c236aSBjoern A. Zeeb #define	NAPI_TRACE(_n)			do { } while(0)
96*fb3c236aSBjoern A. Zeeb #define	NAPI_TRACE2D(_n, _d)		do { } while(0)
97*fb3c236aSBjoern A. Zeeb #define	NAPI_TRACE_TASK(_n, _p, _c)	do { } while(0)
98*fb3c236aSBjoern A. Zeeb #define	NAPI_TODO()			do { } while(0)
99*fb3c236aSBjoern A. Zeeb #define	NAPI_IMPROVE()			do { } while(0)
100*fb3c236aSBjoern A. Zeeb 
101*fb3c236aSBjoern A. Zeeb #define	NAPI_DIRECT_DISPATCH()		(0)
102*fb3c236aSBjoern A. Zeeb #endif
103*fb3c236aSBjoern A. Zeeb 
104*fb3c236aSBjoern A. Zeeb /* -------------------------------------------------------------------------- */
105*fb3c236aSBjoern A. Zeeb 
106*fb3c236aSBjoern A. Zeeb /*
107*fb3c236aSBjoern A. Zeeb  * Check if a poll is running or can run and and if the latter
108*fb3c236aSBjoern A. Zeeb  * make us as running.  That way we ensure that only one poll
109*fb3c236aSBjoern A. Zeeb  * can only ever run at the same time.  Returns true if no poll
110*fb3c236aSBjoern A. Zeeb  * was scheduled yet.
111*fb3c236aSBjoern A. Zeeb  */
112*fb3c236aSBjoern A. Zeeb bool
linuxkpi_napi_schedule_prep(struct napi_struct * napi)113*fb3c236aSBjoern A. Zeeb linuxkpi_napi_schedule_prep(struct napi_struct *napi)
114*fb3c236aSBjoern A. Zeeb {
115*fb3c236aSBjoern A. Zeeb 	unsigned long old, new;
116*fb3c236aSBjoern A. Zeeb 
117*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE(napi);
118*fb3c236aSBjoern A. Zeeb 
119*fb3c236aSBjoern A. Zeeb 	/* Can can only update/return if all flags agree. */
120*fb3c236aSBjoern A. Zeeb 	do {
121*fb3c236aSBjoern A. Zeeb 		old = READ_ONCE(napi->_flags);
122*fb3c236aSBjoern A. Zeeb 
123*fb3c236aSBjoern A. Zeeb 		/* If we are stopping, cannot run again. */
124*fb3c236aSBjoern A. Zeeb 		if ((old & BIT(LKPI_NAPI_FLAG_DISABLE_PENDING)) != 0) {
125*fb3c236aSBjoern A. Zeeb 			NAPI_TRACE(napi);
126*fb3c236aSBjoern A. Zeeb 			return (false);
127*fb3c236aSBjoern A. Zeeb 		}
128*fb3c236aSBjoern A. Zeeb 
129*fb3c236aSBjoern A. Zeeb 		new = old;
130*fb3c236aSBjoern A. Zeeb 		/* We were already scheduled. Need to try again? */
131*fb3c236aSBjoern A. Zeeb 		if ((old & BIT(LKPI_NAPI_FLAG_IS_SCHEDULED)) != 0)
132*fb3c236aSBjoern A. Zeeb 			new |= BIT(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN);
133*fb3c236aSBjoern A. Zeeb 		new |= BIT(LKPI_NAPI_FLAG_IS_SCHEDULED);
134*fb3c236aSBjoern A. Zeeb 
135*fb3c236aSBjoern A. Zeeb 	} while (atomic_cmpset_acq_long(&napi->_flags, old, new) == 0);
136*fb3c236aSBjoern A. Zeeb 
137*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE(napi);
138*fb3c236aSBjoern A. Zeeb         return ((old & BIT(LKPI_NAPI_FLAG_IS_SCHEDULED)) == 0);
139*fb3c236aSBjoern A. Zeeb }
140*fb3c236aSBjoern A. Zeeb 
141*fb3c236aSBjoern A. Zeeb static void
lkpi___napi_schedule_dd(struct napi_struct * napi)142*fb3c236aSBjoern A. Zeeb lkpi___napi_schedule_dd(struct napi_struct *napi)
143*fb3c236aSBjoern A. Zeeb {
144*fb3c236aSBjoern A. Zeeb 	unsigned long old, new;
145*fb3c236aSBjoern A. Zeeb 	int rc;
146*fb3c236aSBjoern A. Zeeb 
147*fb3c236aSBjoern A. Zeeb 	rc = 0;
148*fb3c236aSBjoern A. Zeeb again:
149*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE2D(napi, rc);
150*fb3c236aSBjoern A. Zeeb 	if (napi->poll != NULL)
151*fb3c236aSBjoern A. Zeeb 		rc = napi->poll(napi, napi->budget);
152*fb3c236aSBjoern A. Zeeb 	napi->rx_count += rc;
153*fb3c236aSBjoern A. Zeeb 
154*fb3c236aSBjoern A. Zeeb 	/* Check if interrupts are still disabled, more work to do. */
155*fb3c236aSBjoern A. Zeeb 	/* Bandaid for now. */
156*fb3c236aSBjoern A. Zeeb 	if (rc >= napi->budget)
157*fb3c236aSBjoern A. Zeeb 		goto again;
158*fb3c236aSBjoern A. Zeeb 
159*fb3c236aSBjoern A. Zeeb 	/* Bandaid for now. */
160*fb3c236aSBjoern A. Zeeb 	if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->_flags))
161*fb3c236aSBjoern A. Zeeb 		goto again;
162*fb3c236aSBjoern A. Zeeb 
163*fb3c236aSBjoern A. Zeeb 	do {
164*fb3c236aSBjoern A. Zeeb 		new = old = READ_ONCE(napi->_flags);
165*fb3c236aSBjoern A. Zeeb 		clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &new);
166*fb3c236aSBjoern A. Zeeb 		clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &new);
167*fb3c236aSBjoern A. Zeeb 	} while (atomic_cmpset_acq_long(&napi->_flags, old, new) == 0);
168*fb3c236aSBjoern A. Zeeb 
169*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE2D(napi, rc);
170*fb3c236aSBjoern A. Zeeb }
171*fb3c236aSBjoern A. Zeeb 
172*fb3c236aSBjoern A. Zeeb void
linuxkpi___napi_schedule(struct napi_struct * napi)173*fb3c236aSBjoern A. Zeeb linuxkpi___napi_schedule(struct napi_struct *napi)
174*fb3c236aSBjoern A. Zeeb {
175*fb3c236aSBjoern A. Zeeb 	int rc;
176*fb3c236aSBjoern A. Zeeb 
177*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE(napi);
178*fb3c236aSBjoern A. Zeeb 	if (test_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->_flags)) {
179*fb3c236aSBjoern A. Zeeb 		clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->_flags);
180*fb3c236aSBjoern A. Zeeb 		clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->_flags);
181*fb3c236aSBjoern A. Zeeb 		NAPI_TRACE(napi);
182*fb3c236aSBjoern A. Zeeb 		return;
183*fb3c236aSBjoern A. Zeeb 	}
184*fb3c236aSBjoern A. Zeeb 
185*fb3c236aSBjoern A. Zeeb 	if (NAPI_DIRECT_DISPATCH()) {
186*fb3c236aSBjoern A. Zeeb 		lkpi___napi_schedule_dd(napi);
187*fb3c236aSBjoern A. Zeeb 	} else {
188*fb3c236aSBjoern A. Zeeb 		rc = taskqueue_enqueue(napi->dev->napi_tq, &napi->napi_task);
189*fb3c236aSBjoern A. Zeeb 		NAPI_TRACE2D(napi, rc);
190*fb3c236aSBjoern A. Zeeb 		if (rc != 0) {
191*fb3c236aSBjoern A. Zeeb 			/* Should we assert EPIPE? */
192*fb3c236aSBjoern A. Zeeb 			return;
193*fb3c236aSBjoern A. Zeeb 		}
194*fb3c236aSBjoern A. Zeeb 	}
195*fb3c236aSBjoern A. Zeeb }
196*fb3c236aSBjoern A. Zeeb 
197*fb3c236aSBjoern A. Zeeb void
linuxkpi_napi_schedule(struct napi_struct * napi)198*fb3c236aSBjoern A. Zeeb linuxkpi_napi_schedule(struct napi_struct *napi)
199*fb3c236aSBjoern A. Zeeb {
200*fb3c236aSBjoern A. Zeeb 
201*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE(napi);
202*fb3c236aSBjoern A. Zeeb 
203*fb3c236aSBjoern A. Zeeb 	/*
204*fb3c236aSBjoern A. Zeeb 	 * iwlwifi calls this sequence instead of napi_schedule()
205*fb3c236aSBjoern A. Zeeb 	 * to be able to test the prep result.
206*fb3c236aSBjoern A. Zeeb 	 */
207*fb3c236aSBjoern A. Zeeb 	if (napi_schedule_prep(napi))
208*fb3c236aSBjoern A. Zeeb 		__napi_schedule(napi);
209*fb3c236aSBjoern A. Zeeb }
210*fb3c236aSBjoern A. Zeeb 
211*fb3c236aSBjoern A. Zeeb void
linuxkpi_napi_reschedule(struct napi_struct * napi)212*fb3c236aSBjoern A. Zeeb linuxkpi_napi_reschedule(struct napi_struct *napi)
213*fb3c236aSBjoern A. Zeeb {
214*fb3c236aSBjoern A. Zeeb 
215*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE(napi);
216*fb3c236aSBjoern A. Zeeb 
217*fb3c236aSBjoern A. Zeeb 	/* Not sure what is different to napi_schedule yet. */
218*fb3c236aSBjoern A. Zeeb 	if (napi_schedule_prep(napi))
219*fb3c236aSBjoern A. Zeeb 		__napi_schedule(napi);
220*fb3c236aSBjoern A. Zeeb }
221*fb3c236aSBjoern A. Zeeb 
222*fb3c236aSBjoern A. Zeeb bool
linuxkpi_napi_complete_done(struct napi_struct * napi,int ret)223*fb3c236aSBjoern A. Zeeb linuxkpi_napi_complete_done(struct napi_struct *napi, int ret)
224*fb3c236aSBjoern A. Zeeb {
225*fb3c236aSBjoern A. Zeeb 	unsigned long old, new;
226*fb3c236aSBjoern A. Zeeb 
227*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE(napi);
228*fb3c236aSBjoern A. Zeeb 	if (NAPI_DIRECT_DISPATCH())
229*fb3c236aSBjoern A. Zeeb 		return (true);
230*fb3c236aSBjoern A. Zeeb 
231*fb3c236aSBjoern A. Zeeb 	do {
232*fb3c236aSBjoern A. Zeeb 		new = old = READ_ONCE(napi->_flags);
233*fb3c236aSBjoern A. Zeeb 
234*fb3c236aSBjoern A. Zeeb 		/*
235*fb3c236aSBjoern A. Zeeb 		 * If we lost a race before, we need to re-schedule.
236*fb3c236aSBjoern A. Zeeb 		 * Leave IS_SCHEDULED set essentially doing "_prep".
237*fb3c236aSBjoern A. Zeeb 		 */
238*fb3c236aSBjoern A. Zeeb 		if (!test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &old))
239*fb3c236aSBjoern A. Zeeb 			clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &new);
240*fb3c236aSBjoern A. Zeeb 		clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &new);
241*fb3c236aSBjoern A. Zeeb 	} while (atomic_cmpset_acq_long(&napi->_flags, old, new) == 0);
242*fb3c236aSBjoern A. Zeeb 
243*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE(napi);
244*fb3c236aSBjoern A. Zeeb 
245*fb3c236aSBjoern A. Zeeb 	/* Someone tried to schedule while poll was running. Re-sched. */
246*fb3c236aSBjoern A. Zeeb 	if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &old)) {
247*fb3c236aSBjoern A. Zeeb 		__napi_schedule(napi);
248*fb3c236aSBjoern A. Zeeb 		return (false);
249*fb3c236aSBjoern A. Zeeb 	}
250*fb3c236aSBjoern A. Zeeb 
251*fb3c236aSBjoern A. Zeeb 	return (true);
252*fb3c236aSBjoern A. Zeeb }
253*fb3c236aSBjoern A. Zeeb 
254*fb3c236aSBjoern A. Zeeb bool
linuxkpi_napi_complete(struct napi_struct * napi)255*fb3c236aSBjoern A. Zeeb linuxkpi_napi_complete(struct napi_struct *napi)
256*fb3c236aSBjoern A. Zeeb {
257*fb3c236aSBjoern A. Zeeb 
258*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE(napi);
259*fb3c236aSBjoern A. Zeeb 	return (napi_complete_done(napi, 0));
260*fb3c236aSBjoern A. Zeeb }
261*fb3c236aSBjoern A. Zeeb 
262*fb3c236aSBjoern A. Zeeb void
linuxkpi_napi_disable(struct napi_struct * napi)263*fb3c236aSBjoern A. Zeeb linuxkpi_napi_disable(struct napi_struct *napi)
264*fb3c236aSBjoern A. Zeeb {
265*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE(napi);
266*fb3c236aSBjoern A. Zeeb 	set_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->_flags);
267*fb3c236aSBjoern A. Zeeb 	while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->_flags))
268*fb3c236aSBjoern A. Zeeb 		pause_sbt("napidslp", SBT_1MS, 0, C_HARDCLOCK);
269*fb3c236aSBjoern A. Zeeb 	clear_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->_flags);
270*fb3c236aSBjoern A. Zeeb }
271*fb3c236aSBjoern A. Zeeb 
272*fb3c236aSBjoern A. Zeeb void
linuxkpi_napi_enable(struct napi_struct * napi)273*fb3c236aSBjoern A. Zeeb linuxkpi_napi_enable(struct napi_struct *napi)
274*fb3c236aSBjoern A. Zeeb {
275*fb3c236aSBjoern A. Zeeb 
276*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE(napi);
277*fb3c236aSBjoern A. Zeeb 	KASSERT(!test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->_flags),
278*fb3c236aSBjoern A. Zeeb 	    ("%s: enabling napi %p already scheduled\n", __func__, napi));
279*fb3c236aSBjoern A. Zeeb 	mb();
280*fb3c236aSBjoern A. Zeeb 	/* Let us be scheduled. */
281*fb3c236aSBjoern A. Zeeb 	clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->_flags);
282*fb3c236aSBjoern A. Zeeb }
283*fb3c236aSBjoern A. Zeeb 
284*fb3c236aSBjoern A. Zeeb void
linuxkpi_napi_synchronize(struct napi_struct * napi)285*fb3c236aSBjoern A. Zeeb linuxkpi_napi_synchronize(struct napi_struct *napi)
286*fb3c236aSBjoern A. Zeeb {
287*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE(napi);
288*fb3c236aSBjoern A. Zeeb #if defined(SMP)
289*fb3c236aSBjoern A. Zeeb 	/* Check & sleep while a napi is scheduled. */
290*fb3c236aSBjoern A. Zeeb 	while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->_flags))
291*fb3c236aSBjoern A. Zeeb 		pause_sbt("napisslp", SBT_1MS, 0, C_HARDCLOCK);
292*fb3c236aSBjoern A. Zeeb #else
293*fb3c236aSBjoern A. Zeeb 	mb();
294*fb3c236aSBjoern A. Zeeb #endif
295*fb3c236aSBjoern A. Zeeb }
296*fb3c236aSBjoern A. Zeeb 
297*fb3c236aSBjoern A. Zeeb /* -------------------------------------------------------------------------- */
298*fb3c236aSBjoern A. Zeeb 
299*fb3c236aSBjoern A. Zeeb static void
lkpi_napi_task(void * ctx,int pending)300*fb3c236aSBjoern A. Zeeb lkpi_napi_task(void *ctx, int pending)
301*fb3c236aSBjoern A. Zeeb {
302*fb3c236aSBjoern A. Zeeb 	struct napi_struct *napi;
303*fb3c236aSBjoern A. Zeeb 	int count;
304*fb3c236aSBjoern A. Zeeb 
305*fb3c236aSBjoern A. Zeeb 	KASSERT(ctx != NULL, ("%s: napi %p, pending %d\n",
306*fb3c236aSBjoern A. Zeeb 	    __func__, ctx, pending));
307*fb3c236aSBjoern A. Zeeb 	napi = ctx;
308*fb3c236aSBjoern A. Zeeb 	KASSERT(napi->poll != NULL, ("%s: napi %p poll is NULL\n",
309*fb3c236aSBjoern A. Zeeb 	    __func__, napi));
310*fb3c236aSBjoern A. Zeeb 
311*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE_TASK(napi, pending, napi->budget);
312*fb3c236aSBjoern A. Zeeb 	count = napi->poll(napi, napi->budget);
313*fb3c236aSBjoern A. Zeeb 	napi->rx_count += count;
314*fb3c236aSBjoern A. Zeeb 	NAPI_TRACE_TASK(napi, pending, count);
315*fb3c236aSBjoern A. Zeeb 
316*fb3c236aSBjoern A. Zeeb 	/*
317*fb3c236aSBjoern A. Zeeb 	 * We must not check against count < pending here.  There are situations
318*fb3c236aSBjoern A. Zeeb 	 * when a driver may "poll" and we may not have any work to do and that
319*fb3c236aSBjoern A. Zeeb 	 * would make us re-schedule ourseless for ever.
320*fb3c236aSBjoern A. Zeeb 	 */
321*fb3c236aSBjoern A. Zeeb 	if (count >= napi->budget) {
322*fb3c236aSBjoern A. Zeeb 		/*
323*fb3c236aSBjoern A. Zeeb 		 * Have to re-schedule ourselves.  napi_complete() was not run
324*fb3c236aSBjoern A. Zeeb 		 * in this case which means we are still SCHEDULED.
325*fb3c236aSBjoern A. Zeeb 		 * In order to queue another task we have to directly call
326*fb3c236aSBjoern A. Zeeb 		 * __napi_schedule() without _prep() in the way.
327*fb3c236aSBjoern A. Zeeb 		 */
328*fb3c236aSBjoern A. Zeeb 		__napi_schedule(napi);
329*fb3c236aSBjoern A. Zeeb 	}
330*fb3c236aSBjoern A. Zeeb }
331*fb3c236aSBjoern A. Zeeb 
332*fb3c236aSBjoern A. Zeeb /* -------------------------------------------------------------------------- */
333*fb3c236aSBjoern A. Zeeb 
334*fb3c236aSBjoern A. Zeeb void
linuxkpi_netif_napi_add(struct net_device * ndev,struct napi_struct * napi,int (* napi_poll)(struct napi_struct *,int),int budget)335*fb3c236aSBjoern A. Zeeb linuxkpi_netif_napi_add(struct net_device *ndev, struct napi_struct *napi,
336*fb3c236aSBjoern A. Zeeb     int(*napi_poll)(struct napi_struct *, int), int budget)
337*fb3c236aSBjoern A. Zeeb {
338*fb3c236aSBjoern A. Zeeb 
339*fb3c236aSBjoern A. Zeeb 	napi->dev = ndev;
340*fb3c236aSBjoern A. Zeeb 	napi->poll = napi_poll;
341*fb3c236aSBjoern A. Zeeb 	napi->budget = budget;
342*fb3c236aSBjoern A. Zeeb 
343*fb3c236aSBjoern A. Zeeb 	INIT_LIST_HEAD(&napi->rx_list);
344*fb3c236aSBjoern A. Zeeb 	napi->rx_count = 0;
345*fb3c236aSBjoern A. Zeeb 
346*fb3c236aSBjoern A. Zeeb 	TASK_INIT(&napi->napi_task, 0, lkpi_napi_task, napi);
347*fb3c236aSBjoern A. Zeeb 
348*fb3c236aSBjoern A. Zeeb 	NAPI_LOCK(ndev);
349*fb3c236aSBjoern A. Zeeb 	TAILQ_INSERT_TAIL(&ndev->napi_head, napi, entry);
350*fb3c236aSBjoern A. Zeeb 	NAPI_UNLOCK(ndev);
351*fb3c236aSBjoern A. Zeeb 
352*fb3c236aSBjoern A. Zeeb 	/* Anything else to do on the ndev? */
353*fb3c236aSBjoern A. Zeeb 	clear_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->_flags);
354*fb3c236aSBjoern A. Zeeb }
355*fb3c236aSBjoern A. Zeeb 
356*fb3c236aSBjoern A. Zeeb static void
lkpi_netif_napi_del_locked(struct napi_struct * napi)357*fb3c236aSBjoern A. Zeeb lkpi_netif_napi_del_locked(struct napi_struct *napi)
358*fb3c236aSBjoern A. Zeeb {
359*fb3c236aSBjoern A. Zeeb 	struct net_device *ndev;
360*fb3c236aSBjoern A. Zeeb 
361*fb3c236aSBjoern A. Zeeb 	ndev = napi->dev;
362*fb3c236aSBjoern A. Zeeb 	NAPI_LOCK_ASSERT(ndev);
363*fb3c236aSBjoern A. Zeeb 
364*fb3c236aSBjoern A. Zeeb 	set_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->_flags);
365*fb3c236aSBjoern A. Zeeb 	TAILQ_REMOVE(&ndev->napi_head, napi, entry);
366*fb3c236aSBjoern A. Zeeb 	while (taskqueue_cancel(ndev->napi_tq, &napi->napi_task, NULL) != 0)
367*fb3c236aSBjoern A. Zeeb 		taskqueue_drain(ndev->napi_tq, &napi->napi_task);
368*fb3c236aSBjoern A. Zeeb }
369*fb3c236aSBjoern A. Zeeb 
370*fb3c236aSBjoern A. Zeeb void
linuxkpi_netif_napi_del(struct napi_struct * napi)371*fb3c236aSBjoern A. Zeeb linuxkpi_netif_napi_del(struct napi_struct *napi)
372*fb3c236aSBjoern A. Zeeb {
373*fb3c236aSBjoern A. Zeeb 	struct net_device *ndev;
374*fb3c236aSBjoern A. Zeeb 
375*fb3c236aSBjoern A. Zeeb 	ndev = napi->dev;
376*fb3c236aSBjoern A. Zeeb 	NAPI_LOCK(ndev);
377*fb3c236aSBjoern A. Zeeb 	lkpi_netif_napi_del_locked(napi);
378*fb3c236aSBjoern A. Zeeb 	NAPI_UNLOCK(ndev);
379*fb3c236aSBjoern A. Zeeb }
380*fb3c236aSBjoern A. Zeeb 
381*fb3c236aSBjoern A. Zeeb /* -------------------------------------------------------------------------- */
382*fb3c236aSBjoern A. Zeeb 
383*fb3c236aSBjoern A. Zeeb void
linuxkpi_init_dummy_netdev(struct net_device * ndev)384*fb3c236aSBjoern A. Zeeb linuxkpi_init_dummy_netdev(struct net_device *ndev)
385*fb3c236aSBjoern A. Zeeb {
386*fb3c236aSBjoern A. Zeeb 
387*fb3c236aSBjoern A. Zeeb 	memset(ndev, 0, sizeof(*ndev));
388*fb3c236aSBjoern A. Zeeb 
389*fb3c236aSBjoern A. Zeeb 	ndev->reg_state = NETREG_DUMMY;
390*fb3c236aSBjoern A. Zeeb 	NAPI_LOCK_INIT(ndev);
391*fb3c236aSBjoern A. Zeeb 	TAILQ_INIT(&ndev->napi_head);
392*fb3c236aSBjoern A. Zeeb 	/* Anything else? */
393*fb3c236aSBjoern A. Zeeb 
394*fb3c236aSBjoern A. Zeeb 	ndev->napi_tq = taskqueue_create("tq_ndev_napi", M_WAITOK,
395*fb3c236aSBjoern A. Zeeb 	    taskqueue_thread_enqueue, &ndev->napi_tq);
396*fb3c236aSBjoern A. Zeeb 	/* One thread for now. */
397*fb3c236aSBjoern A. Zeeb 	(void) taskqueue_start_threads(&ndev->napi_tq, 1, PWAIT,
398*fb3c236aSBjoern A. Zeeb 	    "ndev napi taskq");
399*fb3c236aSBjoern A. Zeeb }
400*fb3c236aSBjoern A. Zeeb 
401*fb3c236aSBjoern A. Zeeb struct net_device *
linuxkpi_alloc_netdev(size_t len,const char * name,uint32_t flags,void (* setup_func)(struct net_device *))402*fb3c236aSBjoern A. Zeeb linuxkpi_alloc_netdev(size_t len, const char *name, uint32_t flags,
403*fb3c236aSBjoern A. Zeeb     void(*setup_func)(struct net_device *))
404*fb3c236aSBjoern A. Zeeb {
405*fb3c236aSBjoern A. Zeeb 	struct net_device *ndev;
406*fb3c236aSBjoern A. Zeeb 
407*fb3c236aSBjoern A. Zeeb 	ndev = malloc(sizeof(*ndev) + len, M_NETDEV, M_NOWAIT);
408*fb3c236aSBjoern A. Zeeb 	if (ndev == NULL)
409*fb3c236aSBjoern A. Zeeb 		return (ndev);
410*fb3c236aSBjoern A. Zeeb 
411*fb3c236aSBjoern A. Zeeb 	/* Always first as it zeros! */
412*fb3c236aSBjoern A. Zeeb 	linuxkpi_init_dummy_netdev(ndev);
413*fb3c236aSBjoern A. Zeeb 
414*fb3c236aSBjoern A. Zeeb 	strlcpy(ndev->name, name, sizeof(*ndev->name));
415*fb3c236aSBjoern A. Zeeb 
416*fb3c236aSBjoern A. Zeeb 	/* This needs extending as we support more. */
417*fb3c236aSBjoern A. Zeeb 
418*fb3c236aSBjoern A. Zeeb 	setup_func(ndev);
419*fb3c236aSBjoern A. Zeeb 
420*fb3c236aSBjoern A. Zeeb 	return (ndev);
421*fb3c236aSBjoern A. Zeeb }
422*fb3c236aSBjoern A. Zeeb 
423*fb3c236aSBjoern A. Zeeb void
linuxkpi_free_netdev(struct net_device * ndev)424*fb3c236aSBjoern A. Zeeb linuxkpi_free_netdev(struct net_device *ndev)
425*fb3c236aSBjoern A. Zeeb {
426*fb3c236aSBjoern A. Zeeb 	struct napi_struct *napi, *temp;
427*fb3c236aSBjoern A. Zeeb 
428*fb3c236aSBjoern A. Zeeb 	NAPI_LOCK(ndev);
429*fb3c236aSBjoern A. Zeeb 	TAILQ_FOREACH_SAFE(napi, &ndev->napi_head, entry, temp) {
430*fb3c236aSBjoern A. Zeeb 		lkpi_netif_napi_del_locked(napi);
431*fb3c236aSBjoern A. Zeeb 	}
432*fb3c236aSBjoern A. Zeeb 	NAPI_UNLOCK(ndev);
433*fb3c236aSBjoern A. Zeeb 
434*fb3c236aSBjoern A. Zeeb 	taskqueue_free(ndev->napi_tq);
435*fb3c236aSBjoern A. Zeeb 	ndev->napi_tq = NULL;
436*fb3c236aSBjoern A. Zeeb 	NAPI_LOCK_DESTROY(ndev);
437*fb3c236aSBjoern A. Zeeb 
438*fb3c236aSBjoern A. Zeeb 	/* This needs extending as we support more. */
439*fb3c236aSBjoern A. Zeeb 
440*fb3c236aSBjoern A. Zeeb 	free(ndev, M_NETDEV);
441*fb3c236aSBjoern A. Zeeb }
442