1d1058958SBjoern A. Zeeb /*-
2d1058958SBjoern A. Zeeb * Copyright (c) 2021 The FreeBSD Foundation
3ac07a3b8SBjoern A. Zeeb * Copyright (c) 2022 Bjoern A. Zeeb
4d1058958SBjoern A. Zeeb *
5d1058958SBjoern A. Zeeb * This software was developed by Björn Zeeb under sponsorship from
6d1058958SBjoern A. Zeeb * the FreeBSD Foundation.
7d1058958SBjoern A. Zeeb *
8d1058958SBjoern A. Zeeb * Redistribution and use in source and binary forms, with or without
9d1058958SBjoern A. Zeeb * modification, are permitted provided that the following conditions
10d1058958SBjoern A. Zeeb * are met:
11d1058958SBjoern A. Zeeb * 1. Redistributions of source code must retain the above copyright
12d1058958SBjoern A. Zeeb * notice, this list of conditions and the following disclaimer.
13d1058958SBjoern A. Zeeb * 2. Redistributions in binary form must reproduce the above copyright
14d1058958SBjoern A. Zeeb * notice, this list of conditions and the following disclaimer in the
15d1058958SBjoern A. Zeeb * documentation and/or other materials provided with the distribution.
16d1058958SBjoern A. Zeeb *
17d1058958SBjoern A. Zeeb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18d1058958SBjoern A. Zeeb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19d1058958SBjoern A. Zeeb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20d1058958SBjoern A. Zeeb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21d1058958SBjoern A. Zeeb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22d1058958SBjoern A. Zeeb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23d1058958SBjoern A. Zeeb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24d1058958SBjoern A. Zeeb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25d1058958SBjoern A. Zeeb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26d1058958SBjoern A. Zeeb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27d1058958SBjoern A. Zeeb * SUCH DAMAGE.
28d1058958SBjoern A. Zeeb */
29d1058958SBjoern A. Zeeb
30d1058958SBjoern A. Zeeb #include <sys/cdefs.h>
31d1058958SBjoern A. Zeeb #include <sys/param.h>
32d1058958SBjoern A. Zeeb #include <sys/types.h>
33d1058958SBjoern A. Zeeb #include <sys/kernel.h>
34d1058958SBjoern A. Zeeb #include <sys/sysctl.h>
35d1058958SBjoern A. Zeeb
36d1058958SBjoern A. Zeeb #include <linux/bitops.h>
37d1058958SBjoern A. Zeeb #include <linux/list.h>
38d1058958SBjoern A. Zeeb #include <linux/netdevice.h>
39d1058958SBjoern A. Zeeb
40d1058958SBjoern A. Zeeb MALLOC_DEFINE(M_NETDEV, "lkpindev", "Linux KPI netdevice compat");
41d1058958SBjoern A. Zeeb
42d1058958SBjoern A. Zeeb #define NAPI_LOCK_INIT(_ndev) \
43d1058958SBjoern A. Zeeb mtx_init(&(_ndev)->napi_mtx, "napi_mtx", NULL, MTX_DEF)
44d1058958SBjoern A. Zeeb #define NAPI_LOCK_DESTROY(_ndev) mtx_destroy(&(_ndev)->napi_mtx)
45d1058958SBjoern A. Zeeb #define NAPI_LOCK_ASSERT(_ndev) mtx_assert(&(_ndev)->napi_mtx, MA_OWNED)
46d1058958SBjoern A. Zeeb #define NAPI_LOCK(_ndev) mtx_lock(&(_ndev)->napi_mtx)
47d1058958SBjoern A. Zeeb #define NAPI_UNLOCK(_ndev) mtx_unlock(&(_ndev)->napi_mtx)
48d1058958SBjoern A. Zeeb
49d1058958SBjoern A. Zeeb /* -------------------------------------------------------------------------- */
50d1058958SBjoern A. Zeeb
51d1058958SBjoern A. Zeeb #define LKPI_NAPI_FLAGS \
52d1058958SBjoern A. Zeeb "\20\1DISABLE_PENDING\2IS_SCHEDULED\3LOST_RACE_TRY_AGAIN"
53d1058958SBjoern A. Zeeb
54d1058958SBjoern A. Zeeb /* #define NAPI_DEBUG */
55d1058958SBjoern A. Zeeb #ifdef NAPI_DEBUG
56d1058958SBjoern A. Zeeb static int debug_napi;
57d1058958SBjoern A. Zeeb SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug_napi, CTLFLAG_RWTUN,
58d1058958SBjoern A. Zeeb &debug_napi, 0, "NAPI debug level");
59d1058958SBjoern A. Zeeb
60d1058958SBjoern A. Zeeb #define DNAPI_TODO 0x01
61d1058958SBjoern A. Zeeb #define DNAPI_IMPROVE 0x02
62d1058958SBjoern A. Zeeb #define DNAPI_TRACE 0x10
63d1058958SBjoern A. Zeeb #define DNAPI_TRACE_TASK 0x20
64d1058958SBjoern A. Zeeb #define DNAPI_DIRECT_DISPATCH 0x1000
65d1058958SBjoern A. Zeeb
66d1058958SBjoern A. Zeeb #define NAPI_TRACE(_n) if (debug_napi & DNAPI_TRACE) \
67d1058958SBjoern A. Zeeb printf("NAPI_TRACE %s:%d %u %p (%#jx %b)\n", __func__, __LINE__, \
68ac07a3b8SBjoern A. Zeeb (unsigned int)ticks, _n, (uintmax_t)(_n)->state, \
69ac07a3b8SBjoern A. Zeeb (int)(_n)->state, LKPI_NAPI_FLAGS)
70d1058958SBjoern A. Zeeb #define NAPI_TRACE2D(_n, _d) if (debug_napi & DNAPI_TRACE) \
71d1058958SBjoern A. Zeeb printf("NAPI_TRACE %s:%d %u %p (%#jx %b) %d\n", __func__, __LINE__, \
72ac07a3b8SBjoern A. Zeeb (unsigned int)ticks, _n, (uintmax_t)(_n)->state, \
73ac07a3b8SBjoern A. Zeeb (int)(_n)->state, LKPI_NAPI_FLAGS, _d)
74d1058958SBjoern A. Zeeb #define NAPI_TRACE_TASK(_n, _p, _c) if (debug_napi & DNAPI_TRACE_TASK) \
75d1058958SBjoern A. Zeeb printf("NAPI_TRACE %s:%d %u %p (%#jx %b) pending %d count %d " \
76d1058958SBjoern A. Zeeb "rx_count %d\n", __func__, __LINE__, \
77ac07a3b8SBjoern A. Zeeb (unsigned int)ticks, _n, (uintmax_t)(_n)->state, \
78ac07a3b8SBjoern A. Zeeb (int)(_n)->state, LKPI_NAPI_FLAGS, _p, _c, (_n)->rx_count)
79d1058958SBjoern A. Zeeb #define NAPI_TODO() if (debug_napi & DNAPI_TODO) \
80d1058958SBjoern A. Zeeb printf("NAPI_TODO %s:%d %d\n", __func__, __LINE__, ticks)
81d1058958SBjoern A. Zeeb #define NAPI_IMPROVE() if (debug_napi & DNAPI_IMPROVE) \
82d1058958SBjoern A. Zeeb printf("NAPI_IMPROVE %s:%d %d\n", __func__, __LINE__, ticks)
83d1058958SBjoern A. Zeeb
84d1058958SBjoern A. Zeeb #define NAPI_DIRECT_DISPATCH() ((debug_napi & DNAPI_DIRECT_DISPATCH) != 0)
85d1058958SBjoern A. Zeeb #else
86d1058958SBjoern A. Zeeb #define NAPI_TRACE(_n) do { } while(0)
87d1058958SBjoern A. Zeeb #define NAPI_TRACE2D(_n, _d) do { } while(0)
88d1058958SBjoern A. Zeeb #define NAPI_TRACE_TASK(_n, _p, _c) do { } while(0)
89d1058958SBjoern A. Zeeb #define NAPI_TODO() do { } while(0)
90d1058958SBjoern A. Zeeb #define NAPI_IMPROVE() do { } while(0)
91d1058958SBjoern A. Zeeb
92d1058958SBjoern A. Zeeb #define NAPI_DIRECT_DISPATCH() (0)
93d1058958SBjoern A. Zeeb #endif
94d1058958SBjoern A. Zeeb
95d1058958SBjoern A. Zeeb /* -------------------------------------------------------------------------- */
96d1058958SBjoern A. Zeeb
97d1058958SBjoern A. Zeeb /*
98d1058958SBjoern A. Zeeb * Check if a poll is running or can run and and if the latter
99d1058958SBjoern A. Zeeb * make us as running. That way we ensure that only one poll
100d1058958SBjoern A. Zeeb * can only ever run at the same time. Returns true if no poll
101d1058958SBjoern A. Zeeb * was scheduled yet.
102d1058958SBjoern A. Zeeb */
103d1058958SBjoern A. Zeeb bool
linuxkpi_napi_schedule_prep(struct napi_struct * napi)104d1058958SBjoern A. Zeeb linuxkpi_napi_schedule_prep(struct napi_struct *napi)
105d1058958SBjoern A. Zeeb {
106d1058958SBjoern A. Zeeb unsigned long old, new;
107d1058958SBjoern A. Zeeb
108d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
109d1058958SBjoern A. Zeeb
110d1058958SBjoern A. Zeeb /* Can can only update/return if all flags agree. */
111d1058958SBjoern A. Zeeb do {
112ac07a3b8SBjoern A. Zeeb old = READ_ONCE(napi->state);
113d1058958SBjoern A. Zeeb
114d1058958SBjoern A. Zeeb /* If we are stopping, cannot run again. */
115d1058958SBjoern A. Zeeb if ((old & BIT(LKPI_NAPI_FLAG_DISABLE_PENDING)) != 0) {
116d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
117d1058958SBjoern A. Zeeb return (false);
118d1058958SBjoern A. Zeeb }
119d1058958SBjoern A. Zeeb
120d1058958SBjoern A. Zeeb new = old;
121d1058958SBjoern A. Zeeb /* We were already scheduled. Need to try again? */
122d1058958SBjoern A. Zeeb if ((old & BIT(LKPI_NAPI_FLAG_IS_SCHEDULED)) != 0)
123d1058958SBjoern A. Zeeb new |= BIT(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN);
124d1058958SBjoern A. Zeeb new |= BIT(LKPI_NAPI_FLAG_IS_SCHEDULED);
125d1058958SBjoern A. Zeeb
126ac07a3b8SBjoern A. Zeeb } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0);
127d1058958SBjoern A. Zeeb
128d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
129d1058958SBjoern A. Zeeb return ((old & BIT(LKPI_NAPI_FLAG_IS_SCHEDULED)) == 0);
130d1058958SBjoern A. Zeeb }
131d1058958SBjoern A. Zeeb
132d1058958SBjoern A. Zeeb static void
lkpi___napi_schedule_dd(struct napi_struct * napi)133d1058958SBjoern A. Zeeb lkpi___napi_schedule_dd(struct napi_struct *napi)
134d1058958SBjoern A. Zeeb {
135d1058958SBjoern A. Zeeb unsigned long old, new;
136d1058958SBjoern A. Zeeb int rc;
137d1058958SBjoern A. Zeeb
138d1058958SBjoern A. Zeeb rc = 0;
139d1058958SBjoern A. Zeeb again:
140d1058958SBjoern A. Zeeb NAPI_TRACE2D(napi, rc);
141d1058958SBjoern A. Zeeb if (napi->poll != NULL)
142d1058958SBjoern A. Zeeb rc = napi->poll(napi, napi->budget);
143d1058958SBjoern A. Zeeb napi->rx_count += rc;
144d1058958SBjoern A. Zeeb
145d1058958SBjoern A. Zeeb /* Check if interrupts are still disabled, more work to do. */
146d1058958SBjoern A. Zeeb /* Bandaid for now. */
147d1058958SBjoern A. Zeeb if (rc >= napi->budget)
148d1058958SBjoern A. Zeeb goto again;
149d1058958SBjoern A. Zeeb
150d1058958SBjoern A. Zeeb /* Bandaid for now. */
151ac07a3b8SBjoern A. Zeeb if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->state))
152d1058958SBjoern A. Zeeb goto again;
153d1058958SBjoern A. Zeeb
154d1058958SBjoern A. Zeeb do {
155ac07a3b8SBjoern A. Zeeb new = old = READ_ONCE(napi->state);
156d1058958SBjoern A. Zeeb clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &new);
157d1058958SBjoern A. Zeeb clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &new);
158ac07a3b8SBjoern A. Zeeb } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0);
159d1058958SBjoern A. Zeeb
160d1058958SBjoern A. Zeeb NAPI_TRACE2D(napi, rc);
161d1058958SBjoern A. Zeeb }
162d1058958SBjoern A. Zeeb
163d1058958SBjoern A. Zeeb void
linuxkpi___napi_schedule(struct napi_struct * napi)164d1058958SBjoern A. Zeeb linuxkpi___napi_schedule(struct napi_struct *napi)
165d1058958SBjoern A. Zeeb {
166d1058958SBjoern A. Zeeb int rc;
167d1058958SBjoern A. Zeeb
168d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
169ac07a3b8SBjoern A. Zeeb if (test_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state)) {
170ac07a3b8SBjoern A. Zeeb clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->state);
171ac07a3b8SBjoern A. Zeeb clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state);
172d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
173d1058958SBjoern A. Zeeb return;
174d1058958SBjoern A. Zeeb }
175d1058958SBjoern A. Zeeb
176d1058958SBjoern A. Zeeb if (NAPI_DIRECT_DISPATCH()) {
177d1058958SBjoern A. Zeeb lkpi___napi_schedule_dd(napi);
178d1058958SBjoern A. Zeeb } else {
179d1058958SBjoern A. Zeeb rc = taskqueue_enqueue(napi->dev->napi_tq, &napi->napi_task);
180d1058958SBjoern A. Zeeb NAPI_TRACE2D(napi, rc);
181d1058958SBjoern A. Zeeb if (rc != 0) {
182d1058958SBjoern A. Zeeb /* Should we assert EPIPE? */
183d1058958SBjoern A. Zeeb return;
184d1058958SBjoern A. Zeeb }
185d1058958SBjoern A. Zeeb }
186d1058958SBjoern A. Zeeb }
187d1058958SBjoern A. Zeeb
18825e0847eSBjoern A. Zeeb bool
linuxkpi_napi_schedule(struct napi_struct * napi)189d1058958SBjoern A. Zeeb linuxkpi_napi_schedule(struct napi_struct *napi)
190d1058958SBjoern A. Zeeb {
191d1058958SBjoern A. Zeeb
192d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
193d1058958SBjoern A. Zeeb
194d1058958SBjoern A. Zeeb /*
195d1058958SBjoern A. Zeeb * iwlwifi calls this sequence instead of napi_schedule()
196d1058958SBjoern A. Zeeb * to be able to test the prep result.
197d1058958SBjoern A. Zeeb */
19825e0847eSBjoern A. Zeeb if (napi_schedule_prep(napi)) {
199d1058958SBjoern A. Zeeb __napi_schedule(napi);
20025e0847eSBjoern A. Zeeb return (true);
20125e0847eSBjoern A. Zeeb }
20225e0847eSBjoern A. Zeeb
20325e0847eSBjoern A. Zeeb return (false);
204d1058958SBjoern A. Zeeb }
205d1058958SBjoern A. Zeeb
206d1058958SBjoern A. Zeeb void
linuxkpi_napi_reschedule(struct napi_struct * napi)207d1058958SBjoern A. Zeeb linuxkpi_napi_reschedule(struct napi_struct *napi)
208d1058958SBjoern A. Zeeb {
209d1058958SBjoern A. Zeeb
210d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
211d1058958SBjoern A. Zeeb
212d1058958SBjoern A. Zeeb /* Not sure what is different to napi_schedule yet. */
213d1058958SBjoern A. Zeeb if (napi_schedule_prep(napi))
214d1058958SBjoern A. Zeeb __napi_schedule(napi);
215d1058958SBjoern A. Zeeb }
216d1058958SBjoern A. Zeeb
217d1058958SBjoern A. Zeeb bool
linuxkpi_napi_complete_done(struct napi_struct * napi,int ret)218d1058958SBjoern A. Zeeb linuxkpi_napi_complete_done(struct napi_struct *napi, int ret)
219d1058958SBjoern A. Zeeb {
220d1058958SBjoern A. Zeeb unsigned long old, new;
221d1058958SBjoern A. Zeeb
222d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
223d1058958SBjoern A. Zeeb if (NAPI_DIRECT_DISPATCH())
224d1058958SBjoern A. Zeeb return (true);
225d1058958SBjoern A. Zeeb
226d1058958SBjoern A. Zeeb do {
227ac07a3b8SBjoern A. Zeeb new = old = READ_ONCE(napi->state);
228d1058958SBjoern A. Zeeb
229d1058958SBjoern A. Zeeb /*
230d1058958SBjoern A. Zeeb * If we lost a race before, we need to re-schedule.
231d1058958SBjoern A. Zeeb * Leave IS_SCHEDULED set essentially doing "_prep".
232d1058958SBjoern A. Zeeb */
233d1058958SBjoern A. Zeeb if (!test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &old))
234d1058958SBjoern A. Zeeb clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &new);
235d1058958SBjoern A. Zeeb clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &new);
236ac07a3b8SBjoern A. Zeeb } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0);
237d1058958SBjoern A. Zeeb
238d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
239d1058958SBjoern A. Zeeb
240d1058958SBjoern A. Zeeb /* Someone tried to schedule while poll was running. Re-sched. */
241d1058958SBjoern A. Zeeb if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &old)) {
242d1058958SBjoern A. Zeeb __napi_schedule(napi);
243d1058958SBjoern A. Zeeb return (false);
244d1058958SBjoern A. Zeeb }
245d1058958SBjoern A. Zeeb
246d1058958SBjoern A. Zeeb return (true);
247d1058958SBjoern A. Zeeb }
248d1058958SBjoern A. Zeeb
249d1058958SBjoern A. Zeeb bool
linuxkpi_napi_complete(struct napi_struct * napi)250d1058958SBjoern A. Zeeb linuxkpi_napi_complete(struct napi_struct *napi)
251d1058958SBjoern A. Zeeb {
252d1058958SBjoern A. Zeeb
253d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
254d1058958SBjoern A. Zeeb return (napi_complete_done(napi, 0));
255d1058958SBjoern A. Zeeb }
256d1058958SBjoern A. Zeeb
257d1058958SBjoern A. Zeeb void
linuxkpi_napi_disable(struct napi_struct * napi)258d1058958SBjoern A. Zeeb linuxkpi_napi_disable(struct napi_struct *napi)
259d1058958SBjoern A. Zeeb {
260d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
261ac07a3b8SBjoern A. Zeeb set_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->state);
262ac07a3b8SBjoern A. Zeeb while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state))
263d1058958SBjoern A. Zeeb pause_sbt("napidslp", SBT_1MS, 0, C_HARDCLOCK);
264ac07a3b8SBjoern A. Zeeb clear_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->state);
265d1058958SBjoern A. Zeeb }
266d1058958SBjoern A. Zeeb
267d1058958SBjoern A. Zeeb void
linuxkpi_napi_enable(struct napi_struct * napi)268d1058958SBjoern A. Zeeb linuxkpi_napi_enable(struct napi_struct *napi)
269d1058958SBjoern A. Zeeb {
270d1058958SBjoern A. Zeeb
271d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
272ac07a3b8SBjoern A. Zeeb KASSERT(!test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state),
273d1058958SBjoern A. Zeeb ("%s: enabling napi %p already scheduled\n", __func__, napi));
274d1058958SBjoern A. Zeeb mb();
275d1058958SBjoern A. Zeeb /* Let us be scheduled. */
276ac07a3b8SBjoern A. Zeeb clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state);
277d1058958SBjoern A. Zeeb }
278d1058958SBjoern A. Zeeb
279d1058958SBjoern A. Zeeb void
linuxkpi_napi_synchronize(struct napi_struct * napi)280d1058958SBjoern A. Zeeb linuxkpi_napi_synchronize(struct napi_struct *napi)
281d1058958SBjoern A. Zeeb {
282d1058958SBjoern A. Zeeb NAPI_TRACE(napi);
283d1058958SBjoern A. Zeeb #if defined(SMP)
284d1058958SBjoern A. Zeeb /* Check & sleep while a napi is scheduled. */
285ac07a3b8SBjoern A. Zeeb while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state))
286d1058958SBjoern A. Zeeb pause_sbt("napisslp", SBT_1MS, 0, C_HARDCLOCK);
287d1058958SBjoern A. Zeeb #else
288d1058958SBjoern A. Zeeb mb();
289d1058958SBjoern A. Zeeb #endif
290d1058958SBjoern A. Zeeb }
291d1058958SBjoern A. Zeeb
292d1058958SBjoern A. Zeeb /* -------------------------------------------------------------------------- */
293d1058958SBjoern A. Zeeb
294d1058958SBjoern A. Zeeb static void
lkpi_napi_task(void * ctx,int pending)295d1058958SBjoern A. Zeeb lkpi_napi_task(void *ctx, int pending)
296d1058958SBjoern A. Zeeb {
297d1058958SBjoern A. Zeeb struct napi_struct *napi;
298d1058958SBjoern A. Zeeb int count;
299d1058958SBjoern A. Zeeb
300d1058958SBjoern A. Zeeb KASSERT(ctx != NULL, ("%s: napi %p, pending %d\n",
301d1058958SBjoern A. Zeeb __func__, ctx, pending));
302d1058958SBjoern A. Zeeb napi = ctx;
303d1058958SBjoern A. Zeeb KASSERT(napi->poll != NULL, ("%s: napi %p poll is NULL\n",
304d1058958SBjoern A. Zeeb __func__, napi));
305d1058958SBjoern A. Zeeb
306d1058958SBjoern A. Zeeb NAPI_TRACE_TASK(napi, pending, napi->budget);
307d1058958SBjoern A. Zeeb count = napi->poll(napi, napi->budget);
308d1058958SBjoern A. Zeeb napi->rx_count += count;
309d1058958SBjoern A. Zeeb NAPI_TRACE_TASK(napi, pending, count);
310d1058958SBjoern A. Zeeb
311d1058958SBjoern A. Zeeb /*
312d1058958SBjoern A. Zeeb * We must not check against count < pending here. There are situations
313d1058958SBjoern A. Zeeb * when a driver may "poll" and we may not have any work to do and that
314d1058958SBjoern A. Zeeb * would make us re-schedule ourseless for ever.
315d1058958SBjoern A. Zeeb */
316d1058958SBjoern A. Zeeb if (count >= napi->budget) {
317d1058958SBjoern A. Zeeb /*
318d1058958SBjoern A. Zeeb * Have to re-schedule ourselves. napi_complete() was not run
319d1058958SBjoern A. Zeeb * in this case which means we are still SCHEDULED.
320d1058958SBjoern A. Zeeb * In order to queue another task we have to directly call
321d1058958SBjoern A. Zeeb * __napi_schedule() without _prep() in the way.
322d1058958SBjoern A. Zeeb */
323d1058958SBjoern A. Zeeb __napi_schedule(napi);
324d1058958SBjoern A. Zeeb }
325d1058958SBjoern A. Zeeb }
326d1058958SBjoern A. Zeeb
327d1058958SBjoern A. Zeeb /* -------------------------------------------------------------------------- */
328d1058958SBjoern A. Zeeb
329d1058958SBjoern A. Zeeb void
linuxkpi_netif_napi_add(struct net_device * ndev,struct napi_struct * napi,int (* napi_poll)(struct napi_struct *,int))330d1058958SBjoern A. Zeeb linuxkpi_netif_napi_add(struct net_device *ndev, struct napi_struct *napi,
33123c73dbaSBjoern A. Zeeb int(*napi_poll)(struct napi_struct *, int))
332d1058958SBjoern A. Zeeb {
333d1058958SBjoern A. Zeeb
334d1058958SBjoern A. Zeeb napi->dev = ndev;
335d1058958SBjoern A. Zeeb napi->poll = napi_poll;
33623c73dbaSBjoern A. Zeeb napi->budget = NAPI_POLL_WEIGHT;
337d1058958SBjoern A. Zeeb
338d1058958SBjoern A. Zeeb INIT_LIST_HEAD(&napi->rx_list);
339d1058958SBjoern A. Zeeb napi->rx_count = 0;
340d1058958SBjoern A. Zeeb
341d1058958SBjoern A. Zeeb TASK_INIT(&napi->napi_task, 0, lkpi_napi_task, napi);
342d1058958SBjoern A. Zeeb
343d1058958SBjoern A. Zeeb NAPI_LOCK(ndev);
344d1058958SBjoern A. Zeeb TAILQ_INSERT_TAIL(&ndev->napi_head, napi, entry);
345d1058958SBjoern A. Zeeb NAPI_UNLOCK(ndev);
346d1058958SBjoern A. Zeeb
347d1058958SBjoern A. Zeeb /* Anything else to do on the ndev? */
348ac07a3b8SBjoern A. Zeeb clear_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state);
349d1058958SBjoern A. Zeeb }
350d1058958SBjoern A. Zeeb
351d1058958SBjoern A. Zeeb static void
lkpi_netif_napi_del_locked(struct napi_struct * napi)352d1058958SBjoern A. Zeeb lkpi_netif_napi_del_locked(struct napi_struct *napi)
353d1058958SBjoern A. Zeeb {
354d1058958SBjoern A. Zeeb struct net_device *ndev;
355d1058958SBjoern A. Zeeb
356d1058958SBjoern A. Zeeb ndev = napi->dev;
357d1058958SBjoern A. Zeeb NAPI_LOCK_ASSERT(ndev);
358d1058958SBjoern A. Zeeb
359ac07a3b8SBjoern A. Zeeb set_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state);
360d1058958SBjoern A. Zeeb TAILQ_REMOVE(&ndev->napi_head, napi, entry);
361d1058958SBjoern A. Zeeb while (taskqueue_cancel(ndev->napi_tq, &napi->napi_task, NULL) != 0)
362d1058958SBjoern A. Zeeb taskqueue_drain(ndev->napi_tq, &napi->napi_task);
363d1058958SBjoern A. Zeeb }
364d1058958SBjoern A. Zeeb
365d1058958SBjoern A. Zeeb void
linuxkpi_netif_napi_del(struct napi_struct * napi)366d1058958SBjoern A. Zeeb linuxkpi_netif_napi_del(struct napi_struct *napi)
367d1058958SBjoern A. Zeeb {
368d1058958SBjoern A. Zeeb struct net_device *ndev;
369d1058958SBjoern A. Zeeb
370d1058958SBjoern A. Zeeb ndev = napi->dev;
371d1058958SBjoern A. Zeeb NAPI_LOCK(ndev);
372d1058958SBjoern A. Zeeb lkpi_netif_napi_del_locked(napi);
373d1058958SBjoern A. Zeeb NAPI_UNLOCK(ndev);
374d1058958SBjoern A. Zeeb }
375d1058958SBjoern A. Zeeb
376d1058958SBjoern A. Zeeb /* -------------------------------------------------------------------------- */
377d1058958SBjoern A. Zeeb
378d1058958SBjoern A. Zeeb void
linuxkpi_init_dummy_netdev(struct net_device * ndev)379d1058958SBjoern A. Zeeb linuxkpi_init_dummy_netdev(struct net_device *ndev)
380d1058958SBjoern A. Zeeb {
381d1058958SBjoern A. Zeeb
382d1058958SBjoern A. Zeeb memset(ndev, 0, sizeof(*ndev));
383d1058958SBjoern A. Zeeb
384d1058958SBjoern A. Zeeb ndev->reg_state = NETREG_DUMMY;
385d1058958SBjoern A. Zeeb NAPI_LOCK_INIT(ndev);
386d1058958SBjoern A. Zeeb TAILQ_INIT(&ndev->napi_head);
387d1058958SBjoern A. Zeeb /* Anything else? */
388d1058958SBjoern A. Zeeb
389d1058958SBjoern A. Zeeb ndev->napi_tq = taskqueue_create("tq_ndev_napi", M_WAITOK,
390d1058958SBjoern A. Zeeb taskqueue_thread_enqueue, &ndev->napi_tq);
391d1058958SBjoern A. Zeeb /* One thread for now. */
392d1058958SBjoern A. Zeeb (void) taskqueue_start_threads(&ndev->napi_tq, 1, PWAIT,
393d1058958SBjoern A. Zeeb "ndev napi taskq");
394d1058958SBjoern A. Zeeb }
395d1058958SBjoern A. Zeeb
396d1058958SBjoern A. Zeeb struct net_device *
linuxkpi_alloc_netdev(size_t len,const char * name,uint32_t flags,void (* setup_func)(struct net_device *))397d1058958SBjoern A. Zeeb linuxkpi_alloc_netdev(size_t len, const char *name, uint32_t flags,
398d1058958SBjoern A. Zeeb void(*setup_func)(struct net_device *))
399d1058958SBjoern A. Zeeb {
400d1058958SBjoern A. Zeeb struct net_device *ndev;
401d1058958SBjoern A. Zeeb
402d1058958SBjoern A. Zeeb ndev = malloc(sizeof(*ndev) + len, M_NETDEV, M_NOWAIT);
403d1058958SBjoern A. Zeeb if (ndev == NULL)
404d1058958SBjoern A. Zeeb return (ndev);
405d1058958SBjoern A. Zeeb
406d1058958SBjoern A. Zeeb /* Always first as it zeros! */
407d1058958SBjoern A. Zeeb linuxkpi_init_dummy_netdev(ndev);
408d1058958SBjoern A. Zeeb
409d1058958SBjoern A. Zeeb strlcpy(ndev->name, name, sizeof(*ndev->name));
410d1058958SBjoern A. Zeeb
411d1058958SBjoern A. Zeeb /* This needs extending as we support more. */
412d1058958SBjoern A. Zeeb
413*0dee359cSBjoern A. Zeeb if (setup_func != NULL)
414d1058958SBjoern A. Zeeb setup_func(ndev);
415d1058958SBjoern A. Zeeb
416d1058958SBjoern A. Zeeb return (ndev);
417d1058958SBjoern A. Zeeb }
418d1058958SBjoern A. Zeeb
419d1058958SBjoern A. Zeeb void
linuxkpi_free_netdev(struct net_device * ndev)420d1058958SBjoern A. Zeeb linuxkpi_free_netdev(struct net_device *ndev)
421d1058958SBjoern A. Zeeb {
422d1058958SBjoern A. Zeeb struct napi_struct *napi, *temp;
423d1058958SBjoern A. Zeeb
424d1058958SBjoern A. Zeeb NAPI_LOCK(ndev);
425d1058958SBjoern A. Zeeb TAILQ_FOREACH_SAFE(napi, &ndev->napi_head, entry, temp) {
426d1058958SBjoern A. Zeeb lkpi_netif_napi_del_locked(napi);
427d1058958SBjoern A. Zeeb }
428d1058958SBjoern A. Zeeb NAPI_UNLOCK(ndev);
429d1058958SBjoern A. Zeeb
430d1058958SBjoern A. Zeeb taskqueue_free(ndev->napi_tq);
431d1058958SBjoern A. Zeeb ndev->napi_tq = NULL;
432d1058958SBjoern A. Zeeb NAPI_LOCK_DESTROY(ndev);
433d1058958SBjoern A. Zeeb
434d1058958SBjoern A. Zeeb /* This needs extending as we support more. */
435d1058958SBjoern A. Zeeb
436d1058958SBjoern A. Zeeb free(ndev, M_NETDEV);
437d1058958SBjoern A. Zeeb }
438