1b321acabSVincenzo Maffione /*
2b321acabSVincenzo Maffione * Copyright (C) 2016-2018 Vincenzo Maffione
3b321acabSVincenzo Maffione * Copyright (C) 2015 Stefano Garzarella
4b321acabSVincenzo Maffione * All rights reserved.
5b321acabSVincenzo Maffione *
6b321acabSVincenzo Maffione * Redistribution and use in source and binary forms, with or without
7b321acabSVincenzo Maffione * modification, are permitted provided that the following conditions
8b321acabSVincenzo Maffione * are met:
9b321acabSVincenzo Maffione * 1. Redistributions of source code must retain the above copyright
10b321acabSVincenzo Maffione * notice, this list of conditions and the following disclaimer.
11b321acabSVincenzo Maffione * 2. Redistributions in binary form must reproduce the above copyright
12b321acabSVincenzo Maffione * notice, this list of conditions and the following disclaimer in the
13b321acabSVincenzo Maffione * documentation and/or other materials provided with the distribution.
14b321acabSVincenzo Maffione *
15b321acabSVincenzo Maffione * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16b321acabSVincenzo Maffione * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17b321acabSVincenzo Maffione * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18b321acabSVincenzo Maffione * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19b321acabSVincenzo Maffione * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20b321acabSVincenzo Maffione * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21b321acabSVincenzo Maffione * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22b321acabSVincenzo Maffione * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23b321acabSVincenzo Maffione * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24b321acabSVincenzo Maffione * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25b321acabSVincenzo Maffione * SUCH DAMAGE.
26b321acabSVincenzo Maffione *
27b321acabSVincenzo Maffione * $FreeBSD$
28b321acabSVincenzo Maffione */
29b321acabSVincenzo Maffione
30b321acabSVincenzo Maffione /*
31b321acabSVincenzo Maffione * common headers
32b321acabSVincenzo Maffione */
33b321acabSVincenzo Maffione #if defined(__FreeBSD__)
34b321acabSVincenzo Maffione #include <sys/cdefs.h>
35b321acabSVincenzo Maffione #include <sys/param.h>
36b321acabSVincenzo Maffione #include <sys/kernel.h>
37b321acabSVincenzo Maffione #include <sys/types.h>
38b321acabSVincenzo Maffione #include <sys/selinfo.h>
39b321acabSVincenzo Maffione #include <sys/socket.h>
40b321acabSVincenzo Maffione #include <net/if.h>
41b321acabSVincenzo Maffione #include <net/if_var.h>
42b321acabSVincenzo Maffione #include <machine/bus.h>
43b321acabSVincenzo Maffione
44b321acabSVincenzo Maffione #define usleep_range(_1, _2) \
45b321acabSVincenzo Maffione pause_sbt("sync-kloop-sleep", SBT_1US * _1, SBT_1US * 1, C_ABSOLUTE)
46b321acabSVincenzo Maffione
47b321acabSVincenzo Maffione #elif defined(linux)
48b321acabSVincenzo Maffione #include <bsd_glue.h>
49b321acabSVincenzo Maffione #include <linux/file.h>
50b321acabSVincenzo Maffione #include <linux/eventfd.h>
51b321acabSVincenzo Maffione #endif
52b321acabSVincenzo Maffione
53b321acabSVincenzo Maffione #include <net/netmap.h>
54b321acabSVincenzo Maffione #include <dev/netmap/netmap_kern.h>
55b321acabSVincenzo Maffione #include <net/netmap_virt.h>
56b321acabSVincenzo Maffione #include <dev/netmap/netmap_mem2.h>
57b321acabSVincenzo Maffione
58b321acabSVincenzo Maffione /* Support for eventfd-based notifications. */
59b321acabSVincenzo Maffione #if defined(linux)
60b321acabSVincenzo Maffione #define SYNC_KLOOP_POLL
61b321acabSVincenzo Maffione #endif
62b321acabSVincenzo Maffione
63b321acabSVincenzo Maffione /* Write kring pointers (hwcur, hwtail) to the CSB.
64b321acabSVincenzo Maffione * This routine is coupled with ptnetmap_guest_read_kring_csb(). */
65b321acabSVincenzo Maffione static inline void
sync_kloop_kernel_write(struct nm_csb_ktoa __user * ptr,uint32_t hwcur,uint32_t hwtail)66b321acabSVincenzo Maffione sync_kloop_kernel_write(struct nm_csb_ktoa __user *ptr, uint32_t hwcur,
67b321acabSVincenzo Maffione uint32_t hwtail)
68b321acabSVincenzo Maffione {
6993b1d6a0SVincenzo Maffione /* Issue a first store-store barrier to make sure writes to the
7093b1d6a0SVincenzo Maffione * netmap ring do not overcome updates on ktoa->hwcur and ktoa->hwtail. */
7193b1d6a0SVincenzo Maffione nm_stst_barrier();
7293b1d6a0SVincenzo Maffione
73b321acabSVincenzo Maffione /*
7493b1d6a0SVincenzo Maffione * The same scheme used in nm_sync_kloop_appl_write() applies here.
75b321acabSVincenzo Maffione * We allow the application to read a value of hwcur more recent than the value
76b321acabSVincenzo Maffione * of hwtail, since this would anyway result in a consistent view of the
77b321acabSVincenzo Maffione * ring state (and hwcur can never wraparound hwtail, since hwcur must be
78b321acabSVincenzo Maffione * behind head).
79b321acabSVincenzo Maffione *
80b321acabSVincenzo Maffione * The following memory barrier scheme is used to make this happen:
81b321acabSVincenzo Maffione *
82b321acabSVincenzo Maffione * Application Kernel
83b321acabSVincenzo Maffione *
84b321acabSVincenzo Maffione * STORE(hwcur) LOAD(hwtail)
8593b1d6a0SVincenzo Maffione * wmb() <-------------> rmb()
86b321acabSVincenzo Maffione * STORE(hwtail) LOAD(hwcur)
87b321acabSVincenzo Maffione */
88b321acabSVincenzo Maffione CSB_WRITE(ptr, hwcur, hwcur);
89b321acabSVincenzo Maffione nm_stst_barrier();
90b321acabSVincenzo Maffione CSB_WRITE(ptr, hwtail, hwtail);
91b321acabSVincenzo Maffione }
92b321acabSVincenzo Maffione
93b321acabSVincenzo Maffione /* Read kring pointers (head, cur, sync_flags) from the CSB.
94b321acabSVincenzo Maffione * This routine is coupled with ptnetmap_guest_write_kring_csb(). */
95b321acabSVincenzo Maffione static inline void
sync_kloop_kernel_read(struct nm_csb_atok __user * ptr,struct netmap_ring * shadow_ring,uint32_t num_slots)96b321acabSVincenzo Maffione sync_kloop_kernel_read(struct nm_csb_atok __user *ptr,
97b321acabSVincenzo Maffione struct netmap_ring *shadow_ring,
98b321acabSVincenzo Maffione uint32_t num_slots)
99b321acabSVincenzo Maffione {
100b321acabSVincenzo Maffione /*
101b321acabSVincenzo Maffione * We place a memory barrier to make sure that the update of head never
102b321acabSVincenzo Maffione * overtakes the update of cur.
10393b1d6a0SVincenzo Maffione * (see explanation in sync_kloop_kernel_write).
104b321acabSVincenzo Maffione */
105b321acabSVincenzo Maffione CSB_READ(ptr, head, shadow_ring->head);
10693b1d6a0SVincenzo Maffione nm_ldld_barrier();
107b321acabSVincenzo Maffione CSB_READ(ptr, cur, shadow_ring->cur);
108b321acabSVincenzo Maffione CSB_READ(ptr, sync_flags, shadow_ring->flags);
10993b1d6a0SVincenzo Maffione
11093b1d6a0SVincenzo Maffione /* Make sure that loads from atok->head and atok->cur are not delayed
11193b1d6a0SVincenzo Maffione * after the loads from the netmap ring. */
11293b1d6a0SVincenzo Maffione nm_ldld_barrier();
113b321acabSVincenzo Maffione }
114b321acabSVincenzo Maffione
115b321acabSVincenzo Maffione /* Enable or disable application --> kernel kicks. */
116b321acabSVincenzo Maffione static inline void
csb_ktoa_kick_enable(struct nm_csb_ktoa __user * csb_ktoa,uint32_t val)117b321acabSVincenzo Maffione csb_ktoa_kick_enable(struct nm_csb_ktoa __user *csb_ktoa, uint32_t val)
118b321acabSVincenzo Maffione {
119b321acabSVincenzo Maffione CSB_WRITE(csb_ktoa, kern_need_kick, val);
120b321acabSVincenzo Maffione }
121b321acabSVincenzo Maffione
122639c36f0SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
123b321acabSVincenzo Maffione /* Are application interrupt enabled or disabled? */
124b321acabSVincenzo Maffione static inline uint32_t
csb_atok_intr_enabled(struct nm_csb_atok __user * csb_atok)125b321acabSVincenzo Maffione csb_atok_intr_enabled(struct nm_csb_atok __user *csb_atok)
126b321acabSVincenzo Maffione {
127b321acabSVincenzo Maffione uint32_t v;
128b321acabSVincenzo Maffione
129b321acabSVincenzo Maffione CSB_READ(csb_atok, appl_need_kick, v);
130b321acabSVincenzo Maffione
131b321acabSVincenzo Maffione return v;
132b321acabSVincenzo Maffione }
133639c36f0SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
134b321acabSVincenzo Maffione
135b321acabSVincenzo Maffione static inline void
sync_kloop_kring_dump(const char * title,const struct netmap_kring * kring)136b321acabSVincenzo Maffione sync_kloop_kring_dump(const char *title, const struct netmap_kring *kring)
137b321acabSVincenzo Maffione {
13893b1d6a0SVincenzo Maffione nm_prinf("%s, kring %s, hwcur %d, rhead %d, "
13993b1d6a0SVincenzo Maffione "rcur %d, rtail %d, hwtail %d",
14093b1d6a0SVincenzo Maffione title, kring->name, kring->nr_hwcur, kring->rhead,
14193b1d6a0SVincenzo Maffione kring->rcur, kring->rtail, kring->nr_hwtail);
142b321acabSVincenzo Maffione }
143b321acabSVincenzo Maffione
144fdde8f32SVincenzo Maffione /* Arguments for netmap_sync_kloop_tx_ring() and
145fdde8f32SVincenzo Maffione * netmap_sync_kloop_rx_ring().
146fdde8f32SVincenzo Maffione */
147b321acabSVincenzo Maffione struct sync_kloop_ring_args {
148b321acabSVincenzo Maffione struct netmap_kring *kring;
149b321acabSVincenzo Maffione struct nm_csb_atok *csb_atok;
150b321acabSVincenzo Maffione struct nm_csb_ktoa *csb_ktoa;
151b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL
152b321acabSVincenzo Maffione struct eventfd_ctx *irq_ctx;
153b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
154fdde8f32SVincenzo Maffione /* Are we busy waiting rather than using a schedule() loop ? */
155fdde8f32SVincenzo Maffione bool busy_wait;
156fdde8f32SVincenzo Maffione /* Are we processing in the context of VM exit ? */
157fdde8f32SVincenzo Maffione bool direct;
158b321acabSVincenzo Maffione };
159b321acabSVincenzo Maffione
160b321acabSVincenzo Maffione static void
netmap_sync_kloop_tx_ring(const struct sync_kloop_ring_args * a)161b321acabSVincenzo Maffione netmap_sync_kloop_tx_ring(const struct sync_kloop_ring_args *a)
162b321acabSVincenzo Maffione {
163b321acabSVincenzo Maffione struct netmap_kring *kring = a->kring;
164b321acabSVincenzo Maffione struct nm_csb_atok *csb_atok = a->csb_atok;
165b321acabSVincenzo Maffione struct nm_csb_ktoa *csb_ktoa = a->csb_ktoa;
166b321acabSVincenzo Maffione struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */
167b321acabSVincenzo Maffione bool more_txspace = false;
168b321acabSVincenzo Maffione uint32_t num_slots;
169b321acabSVincenzo Maffione int batch;
170b321acabSVincenzo Maffione
171fdde8f32SVincenzo Maffione if (unlikely(nm_kr_tryget(kring, 1, NULL))) {
172fdde8f32SVincenzo Maffione return;
173fdde8f32SVincenzo Maffione }
174fdde8f32SVincenzo Maffione
175b321acabSVincenzo Maffione num_slots = kring->nkr_num_slots;
176b321acabSVincenzo Maffione
177b321acabSVincenzo Maffione /* Disable application --> kernel notifications. */
178fdde8f32SVincenzo Maffione if (!a->direct) {
179b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 0);
180fdde8f32SVincenzo Maffione }
181b321acabSVincenzo Maffione /* Copy the application kring pointers from the CSB */
182b321acabSVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
183b321acabSVincenzo Maffione
184b321acabSVincenzo Maffione for (;;) {
185b321acabSVincenzo Maffione batch = shadow_ring.head - kring->nr_hwcur;
186b321acabSVincenzo Maffione if (batch < 0)
187b321acabSVincenzo Maffione batch += num_slots;
188b321acabSVincenzo Maffione
189b321acabSVincenzo Maffione #ifdef PTN_TX_BATCH_LIM
190b321acabSVincenzo Maffione if (batch > PTN_TX_BATCH_LIM(num_slots)) {
191b321acabSVincenzo Maffione /* If application moves ahead too fast, let's cut the move so
192b321acabSVincenzo Maffione * that we don't exceed our batch limit. */
193b321acabSVincenzo Maffione uint32_t head_lim = kring->nr_hwcur + PTN_TX_BATCH_LIM(num_slots);
194b321acabSVincenzo Maffione
195b321acabSVincenzo Maffione if (head_lim >= num_slots)
196b321acabSVincenzo Maffione head_lim -= num_slots;
197b321acabSVincenzo Maffione nm_prdis(1, "batch: %d head: %d head_lim: %d", batch, shadow_ring.head,
198b321acabSVincenzo Maffione head_lim);
199b321acabSVincenzo Maffione shadow_ring.head = head_lim;
200b321acabSVincenzo Maffione batch = PTN_TX_BATCH_LIM(num_slots);
201b321acabSVincenzo Maffione }
202b321acabSVincenzo Maffione #endif /* PTN_TX_BATCH_LIM */
203b321acabSVincenzo Maffione
204b321acabSVincenzo Maffione if (nm_kr_txspace(kring) <= (num_slots >> 1)) {
205b321acabSVincenzo Maffione shadow_ring.flags |= NAF_FORCE_RECLAIM;
206b321acabSVincenzo Maffione }
207b321acabSVincenzo Maffione
208b321acabSVincenzo Maffione /* Netmap prologue */
209b321acabSVincenzo Maffione shadow_ring.tail = kring->rtail;
210b321acabSVincenzo Maffione if (unlikely(nm_txsync_prologue(kring, &shadow_ring) >= num_slots)) {
211b321acabSVincenzo Maffione /* Reinit ring and enable notifications. */
212b321acabSVincenzo Maffione netmap_ring_reinit(kring);
213fdde8f32SVincenzo Maffione if (!a->busy_wait) {
214b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1);
215fdde8f32SVincenzo Maffione }
216b321acabSVincenzo Maffione break;
217b321acabSVincenzo Maffione }
218b321acabSVincenzo Maffione
219b321acabSVincenzo Maffione if (unlikely(netmap_debug & NM_DEBUG_TXSYNC)) {
220b321acabSVincenzo Maffione sync_kloop_kring_dump("pre txsync", kring);
221b321acabSVincenzo Maffione }
222b321acabSVincenzo Maffione
223b321acabSVincenzo Maffione if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
224fdde8f32SVincenzo Maffione if (!a->busy_wait) {
225b321acabSVincenzo Maffione /* Reenable notifications. */
226b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1);
227fdde8f32SVincenzo Maffione }
228b321acabSVincenzo Maffione nm_prerr("txsync() failed");
229b321acabSVincenzo Maffione break;
230b321acabSVincenzo Maffione }
231b321acabSVincenzo Maffione
232b321acabSVincenzo Maffione /*
233b321acabSVincenzo Maffione * Finalize
234b321acabSVincenzo Maffione * Copy kernel hwcur and hwtail into the CSB for the application sync(), and
235b321acabSVincenzo Maffione * do the nm_sync_finalize.
236b321acabSVincenzo Maffione */
237b321acabSVincenzo Maffione sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur,
238b321acabSVincenzo Maffione kring->nr_hwtail);
239b321acabSVincenzo Maffione if (kring->rtail != kring->nr_hwtail) {
240b321acabSVincenzo Maffione /* Some more room available in the parent adapter. */
241b321acabSVincenzo Maffione kring->rtail = kring->nr_hwtail;
242b321acabSVincenzo Maffione more_txspace = true;
243b321acabSVincenzo Maffione }
244b321acabSVincenzo Maffione
245b321acabSVincenzo Maffione if (unlikely(netmap_debug & NM_DEBUG_TXSYNC)) {
246b321acabSVincenzo Maffione sync_kloop_kring_dump("post txsync", kring);
247b321acabSVincenzo Maffione }
248b321acabSVincenzo Maffione
249b321acabSVincenzo Maffione /* Interrupt the application if needed. */
250b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL
251b321acabSVincenzo Maffione if (a->irq_ctx && more_txspace && csb_atok_intr_enabled(csb_atok)) {
252fdde8f32SVincenzo Maffione /* We could disable kernel --> application kicks here,
253fdde8f32SVincenzo Maffione * to avoid spurious interrupts. */
254b321acabSVincenzo Maffione eventfd_signal(a->irq_ctx, 1);
255b321acabSVincenzo Maffione more_txspace = false;
256b321acabSVincenzo Maffione }
257b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
258b321acabSVincenzo Maffione
259b321acabSVincenzo Maffione /* Read CSB to see if there is more work to do. */
260b321acabSVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
261b321acabSVincenzo Maffione if (shadow_ring.head == kring->rhead) {
262fdde8f32SVincenzo Maffione if (a->busy_wait) {
263fdde8f32SVincenzo Maffione break;
264fdde8f32SVincenzo Maffione }
265b321acabSVincenzo Maffione /*
266b321acabSVincenzo Maffione * No more packets to transmit. We enable notifications and
267b321acabSVincenzo Maffione * go to sleep, waiting for a kick from the application when new
268b321acabSVincenzo Maffione * new slots are ready for transmission.
269b321acabSVincenzo Maffione */
270b321acabSVincenzo Maffione /* Reenable notifications. */
271b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1);
27293b1d6a0SVincenzo Maffione /* Double check, with store-load memory barrier. */
27393b1d6a0SVincenzo Maffione nm_stld_barrier();
274b321acabSVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
275b321acabSVincenzo Maffione if (shadow_ring.head != kring->rhead) {
276b321acabSVincenzo Maffione /* We won the race condition, there are more packets to
277b321acabSVincenzo Maffione * transmit. Disable notifications and do another cycle */
278b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 0);
279b321acabSVincenzo Maffione continue;
280b321acabSVincenzo Maffione }
281b321acabSVincenzo Maffione break;
282b321acabSVincenzo Maffione }
283b321acabSVincenzo Maffione
284b321acabSVincenzo Maffione if (nm_kr_txempty(kring)) {
285b321acabSVincenzo Maffione /* No more available TX slots. We stop waiting for a notification
286b321acabSVincenzo Maffione * from the backend (netmap_tx_irq). */
287b321acabSVincenzo Maffione nm_prdis(1, "TX ring");
288b321acabSVincenzo Maffione break;
289b321acabSVincenzo Maffione }
290b321acabSVincenzo Maffione }
291b321acabSVincenzo Maffione
292fdde8f32SVincenzo Maffione nm_kr_put(kring);
293fdde8f32SVincenzo Maffione
294b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL
295b321acabSVincenzo Maffione if (a->irq_ctx && more_txspace && csb_atok_intr_enabled(csb_atok)) {
296b321acabSVincenzo Maffione eventfd_signal(a->irq_ctx, 1);
297b321acabSVincenzo Maffione }
298b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
299b321acabSVincenzo Maffione }
300b321acabSVincenzo Maffione
301b321acabSVincenzo Maffione /* RX cycle without receive any packets */
302b321acabSVincenzo Maffione #define SYNC_LOOP_RX_DRY_CYCLES_MAX 2
303b321acabSVincenzo Maffione
304b321acabSVincenzo Maffione static inline int
sync_kloop_norxslots(struct netmap_kring * kring,uint32_t g_head)305b321acabSVincenzo Maffione sync_kloop_norxslots(struct netmap_kring *kring, uint32_t g_head)
306b321acabSVincenzo Maffione {
307b321acabSVincenzo Maffione return (NM_ACCESS_ONCE(kring->nr_hwtail) == nm_prev(g_head,
308b321acabSVincenzo Maffione kring->nkr_num_slots - 1));
309b321acabSVincenzo Maffione }
310b321acabSVincenzo Maffione
311b321acabSVincenzo Maffione static void
netmap_sync_kloop_rx_ring(const struct sync_kloop_ring_args * a)312b321acabSVincenzo Maffione netmap_sync_kloop_rx_ring(const struct sync_kloop_ring_args *a)
313b321acabSVincenzo Maffione {
314b321acabSVincenzo Maffione
315b321acabSVincenzo Maffione struct netmap_kring *kring = a->kring;
316b321acabSVincenzo Maffione struct nm_csb_atok *csb_atok = a->csb_atok;
317b321acabSVincenzo Maffione struct nm_csb_ktoa *csb_ktoa = a->csb_ktoa;
318b321acabSVincenzo Maffione struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */
319b321acabSVincenzo Maffione int dry_cycles = 0;
320b321acabSVincenzo Maffione bool some_recvd = false;
321b321acabSVincenzo Maffione uint32_t num_slots;
322b321acabSVincenzo Maffione
323fdde8f32SVincenzo Maffione if (unlikely(nm_kr_tryget(kring, 1, NULL))) {
324fdde8f32SVincenzo Maffione return;
325fdde8f32SVincenzo Maffione }
326fdde8f32SVincenzo Maffione
327b321acabSVincenzo Maffione num_slots = kring->nkr_num_slots;
328b321acabSVincenzo Maffione
329b321acabSVincenzo Maffione /* Get RX csb_atok and csb_ktoa pointers from the CSB. */
330b321acabSVincenzo Maffione num_slots = kring->nkr_num_slots;
331b321acabSVincenzo Maffione
332b321acabSVincenzo Maffione /* Disable notifications. */
333fdde8f32SVincenzo Maffione if (!a->direct) {
334b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 0);
335fdde8f32SVincenzo Maffione }
336b321acabSVincenzo Maffione /* Copy the application kring pointers from the CSB */
337b321acabSVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
338b321acabSVincenzo Maffione
339b321acabSVincenzo Maffione for (;;) {
340b321acabSVincenzo Maffione uint32_t hwtail;
341b321acabSVincenzo Maffione
342b321acabSVincenzo Maffione /* Netmap prologue */
343b321acabSVincenzo Maffione shadow_ring.tail = kring->rtail;
344b321acabSVincenzo Maffione if (unlikely(nm_rxsync_prologue(kring, &shadow_ring) >= num_slots)) {
345b321acabSVincenzo Maffione /* Reinit ring and enable notifications. */
346b321acabSVincenzo Maffione netmap_ring_reinit(kring);
347fdde8f32SVincenzo Maffione if (!a->busy_wait) {
348b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1);
349fdde8f32SVincenzo Maffione }
350b321acabSVincenzo Maffione break;
351b321acabSVincenzo Maffione }
352b321acabSVincenzo Maffione
353b321acabSVincenzo Maffione if (unlikely(netmap_debug & NM_DEBUG_RXSYNC)) {
354b321acabSVincenzo Maffione sync_kloop_kring_dump("pre rxsync", kring);
355b321acabSVincenzo Maffione }
356b321acabSVincenzo Maffione
357b321acabSVincenzo Maffione if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
358fdde8f32SVincenzo Maffione if (!a->busy_wait) {
359b321acabSVincenzo Maffione /* Reenable notifications. */
360b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1);
361fdde8f32SVincenzo Maffione }
362b321acabSVincenzo Maffione nm_prerr("rxsync() failed");
363b321acabSVincenzo Maffione break;
364b321acabSVincenzo Maffione }
365b321acabSVincenzo Maffione
366b321acabSVincenzo Maffione /*
367b321acabSVincenzo Maffione * Finalize
368b321acabSVincenzo Maffione * Copy kernel hwcur and hwtail into the CSB for the application sync()
369b321acabSVincenzo Maffione */
370b321acabSVincenzo Maffione hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
371b321acabSVincenzo Maffione sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur, hwtail);
372b321acabSVincenzo Maffione if (kring->rtail != hwtail) {
373b321acabSVincenzo Maffione kring->rtail = hwtail;
374b321acabSVincenzo Maffione some_recvd = true;
375b321acabSVincenzo Maffione dry_cycles = 0;
376b321acabSVincenzo Maffione } else {
377b321acabSVincenzo Maffione dry_cycles++;
378b321acabSVincenzo Maffione }
379b321acabSVincenzo Maffione
380b321acabSVincenzo Maffione if (unlikely(netmap_debug & NM_DEBUG_RXSYNC)) {
381b321acabSVincenzo Maffione sync_kloop_kring_dump("post rxsync", kring);
382b321acabSVincenzo Maffione }
383b321acabSVincenzo Maffione
384b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL
385b321acabSVincenzo Maffione /* Interrupt the application if needed. */
386b321acabSVincenzo Maffione if (a->irq_ctx && some_recvd && csb_atok_intr_enabled(csb_atok)) {
387fdde8f32SVincenzo Maffione /* We could disable kernel --> application kicks here,
388fdde8f32SVincenzo Maffione * to avoid spurious interrupts. */
389b321acabSVincenzo Maffione eventfd_signal(a->irq_ctx, 1);
390b321acabSVincenzo Maffione some_recvd = false;
391b321acabSVincenzo Maffione }
392b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
393b321acabSVincenzo Maffione
394b321acabSVincenzo Maffione /* Read CSB to see if there is more work to do. */
395b321acabSVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
396b321acabSVincenzo Maffione if (sync_kloop_norxslots(kring, shadow_ring.head)) {
397fdde8f32SVincenzo Maffione if (a->busy_wait) {
398fdde8f32SVincenzo Maffione break;
399fdde8f32SVincenzo Maffione }
400b321acabSVincenzo Maffione /*
401b321acabSVincenzo Maffione * No more slots available for reception. We enable notification and
402b321acabSVincenzo Maffione * go to sleep, waiting for a kick from the application when new receive
403b321acabSVincenzo Maffione * slots are available.
404b321acabSVincenzo Maffione */
405b321acabSVincenzo Maffione /* Reenable notifications. */
406b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1);
40793b1d6a0SVincenzo Maffione /* Double check, with store-load memory barrier. */
40893b1d6a0SVincenzo Maffione nm_stld_barrier();
409b321acabSVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
410b321acabSVincenzo Maffione if (!sync_kloop_norxslots(kring, shadow_ring.head)) {
411b321acabSVincenzo Maffione /* We won the race condition, more slots are available. Disable
412b321acabSVincenzo Maffione * notifications and do another cycle. */
413b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 0);
414b321acabSVincenzo Maffione continue;
415b321acabSVincenzo Maffione }
416b321acabSVincenzo Maffione break;
417b321acabSVincenzo Maffione }
418b321acabSVincenzo Maffione
419b321acabSVincenzo Maffione hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
420b321acabSVincenzo Maffione if (unlikely(hwtail == kring->rhead ||
421b321acabSVincenzo Maffione dry_cycles >= SYNC_LOOP_RX_DRY_CYCLES_MAX)) {
422b321acabSVincenzo Maffione /* No more packets to be read from the backend. We stop and
423b321acabSVincenzo Maffione * wait for a notification from the backend (netmap_rx_irq). */
424b321acabSVincenzo Maffione nm_prdis(1, "nr_hwtail: %d rhead: %d dry_cycles: %d",
425b321acabSVincenzo Maffione hwtail, kring->rhead, dry_cycles);
426b321acabSVincenzo Maffione break;
427b321acabSVincenzo Maffione }
428b321acabSVincenzo Maffione }
429b321acabSVincenzo Maffione
430b321acabSVincenzo Maffione nm_kr_put(kring);
431b321acabSVincenzo Maffione
432b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL
433b321acabSVincenzo Maffione /* Interrupt the application if needed. */
434b321acabSVincenzo Maffione if (a->irq_ctx && some_recvd && csb_atok_intr_enabled(csb_atok)) {
435b321acabSVincenzo Maffione eventfd_signal(a->irq_ctx, 1);
436b321acabSVincenzo Maffione }
437b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
438b321acabSVincenzo Maffione }
439b321acabSVincenzo Maffione
440b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL
441fdde8f32SVincenzo Maffione struct sync_kloop_poll_ctx;
442b321acabSVincenzo Maffione struct sync_kloop_poll_entry {
443b321acabSVincenzo Maffione /* Support for receiving notifications from
444b321acabSVincenzo Maffione * a netmap ring or from the application. */
445b321acabSVincenzo Maffione struct file *filp;
446b321acabSVincenzo Maffione wait_queue_t wait;
447b321acabSVincenzo Maffione wait_queue_head_t *wqh;
448b321acabSVincenzo Maffione
449b321acabSVincenzo Maffione /* Support for sending notifications to the application. */
450b321acabSVincenzo Maffione struct eventfd_ctx *irq_ctx;
451b321acabSVincenzo Maffione struct file *irq_filp;
452fdde8f32SVincenzo Maffione
453fdde8f32SVincenzo Maffione /* Arguments for the ring processing function. Useful
454fdde8f32SVincenzo Maffione * in case of custom wake-up function. */
455fdde8f32SVincenzo Maffione struct sync_kloop_ring_args *args;
456fdde8f32SVincenzo Maffione struct sync_kloop_poll_ctx *parent;
457fdde8f32SVincenzo Maffione
458b321acabSVincenzo Maffione };
459b321acabSVincenzo Maffione
460b321acabSVincenzo Maffione struct sync_kloop_poll_ctx {
461b321acabSVincenzo Maffione poll_table wait_table;
462b321acabSVincenzo Maffione unsigned int next_entry;
463fdde8f32SVincenzo Maffione int (*next_wake_fun)(wait_queue_t *, unsigned, int, void *);
464b321acabSVincenzo Maffione unsigned int num_entries;
465fdde8f32SVincenzo Maffione unsigned int num_tx_rings;
466fdde8f32SVincenzo Maffione unsigned int num_rings;
467fdde8f32SVincenzo Maffione /* First num_tx_rings entries are for the TX kicks.
468fdde8f32SVincenzo Maffione * Then the RX kicks entries follow. The last two
469fdde8f32SVincenzo Maffione * entries are for TX irq, and RX irq. */
470b321acabSVincenzo Maffione struct sync_kloop_poll_entry entries[0];
471b321acabSVincenzo Maffione };
472b321acabSVincenzo Maffione
473b321acabSVincenzo Maffione static void
sync_kloop_poll_table_queue_proc(struct file * file,wait_queue_head_t * wqh,poll_table * pt)474b321acabSVincenzo Maffione sync_kloop_poll_table_queue_proc(struct file *file, wait_queue_head_t *wqh,
475b321acabSVincenzo Maffione poll_table *pt)
476b321acabSVincenzo Maffione {
477b321acabSVincenzo Maffione struct sync_kloop_poll_ctx *poll_ctx =
478b321acabSVincenzo Maffione container_of(pt, struct sync_kloop_poll_ctx, wait_table);
479b321acabSVincenzo Maffione struct sync_kloop_poll_entry *entry = poll_ctx->entries +
480b321acabSVincenzo Maffione poll_ctx->next_entry;
481b321acabSVincenzo Maffione
482b321acabSVincenzo Maffione BUG_ON(poll_ctx->next_entry >= poll_ctx->num_entries);
483b321acabSVincenzo Maffione entry->wqh = wqh;
484b321acabSVincenzo Maffione entry->filp = file;
485b321acabSVincenzo Maffione /* Use the default wake up function. */
486fdde8f32SVincenzo Maffione if (poll_ctx->next_wake_fun == NULL) {
487b321acabSVincenzo Maffione init_waitqueue_entry(&entry->wait, current);
488fdde8f32SVincenzo Maffione } else {
489fdde8f32SVincenzo Maffione init_waitqueue_func_entry(&entry->wait,
490fdde8f32SVincenzo Maffione poll_ctx->next_wake_fun);
491fdde8f32SVincenzo Maffione }
492b321acabSVincenzo Maffione add_wait_queue(wqh, &entry->wait);
493fdde8f32SVincenzo Maffione }
494fdde8f32SVincenzo Maffione
495fdde8f32SVincenzo Maffione static int
sync_kloop_tx_kick_wake_fun(wait_queue_t * wait,unsigned mode,int wake_flags,void * key)496fdde8f32SVincenzo Maffione sync_kloop_tx_kick_wake_fun(wait_queue_t *wait, unsigned mode,
497fdde8f32SVincenzo Maffione int wake_flags, void *key)
498fdde8f32SVincenzo Maffione {
499fdde8f32SVincenzo Maffione struct sync_kloop_poll_entry *entry =
500fdde8f32SVincenzo Maffione container_of(wait, struct sync_kloop_poll_entry, wait);
501fdde8f32SVincenzo Maffione
502fdde8f32SVincenzo Maffione netmap_sync_kloop_tx_ring(entry->args);
503fdde8f32SVincenzo Maffione
504fdde8f32SVincenzo Maffione return 0;
505fdde8f32SVincenzo Maffione }
506fdde8f32SVincenzo Maffione
507fdde8f32SVincenzo Maffione static int
sync_kloop_tx_irq_wake_fun(wait_queue_t * wait,unsigned mode,int wake_flags,void * key)508fdde8f32SVincenzo Maffione sync_kloop_tx_irq_wake_fun(wait_queue_t *wait, unsigned mode,
509fdde8f32SVincenzo Maffione int wake_flags, void *key)
510fdde8f32SVincenzo Maffione {
511fdde8f32SVincenzo Maffione struct sync_kloop_poll_entry *entry =
512fdde8f32SVincenzo Maffione container_of(wait, struct sync_kloop_poll_entry, wait);
513fdde8f32SVincenzo Maffione struct sync_kloop_poll_ctx *poll_ctx = entry->parent;
514fdde8f32SVincenzo Maffione int i;
515fdde8f32SVincenzo Maffione
516fdde8f32SVincenzo Maffione for (i = 0; i < poll_ctx->num_tx_rings; i++) {
517fdde8f32SVincenzo Maffione struct eventfd_ctx *irq_ctx = poll_ctx->entries[i].irq_ctx;
518fdde8f32SVincenzo Maffione
519fdde8f32SVincenzo Maffione if (irq_ctx) {
520fdde8f32SVincenzo Maffione eventfd_signal(irq_ctx, 1);
521fdde8f32SVincenzo Maffione }
522fdde8f32SVincenzo Maffione }
523fdde8f32SVincenzo Maffione
524fdde8f32SVincenzo Maffione return 0;
525fdde8f32SVincenzo Maffione }
526fdde8f32SVincenzo Maffione
527fdde8f32SVincenzo Maffione static int
sync_kloop_rx_kick_wake_fun(wait_queue_t * wait,unsigned mode,int wake_flags,void * key)528fdde8f32SVincenzo Maffione sync_kloop_rx_kick_wake_fun(wait_queue_t *wait, unsigned mode,
529fdde8f32SVincenzo Maffione int wake_flags, void *key)
530fdde8f32SVincenzo Maffione {
531fdde8f32SVincenzo Maffione struct sync_kloop_poll_entry *entry =
532fdde8f32SVincenzo Maffione container_of(wait, struct sync_kloop_poll_entry, wait);
533fdde8f32SVincenzo Maffione
534fdde8f32SVincenzo Maffione netmap_sync_kloop_rx_ring(entry->args);
535fdde8f32SVincenzo Maffione
536fdde8f32SVincenzo Maffione return 0;
537fdde8f32SVincenzo Maffione }
538fdde8f32SVincenzo Maffione
539fdde8f32SVincenzo Maffione static int
sync_kloop_rx_irq_wake_fun(wait_queue_t * wait,unsigned mode,int wake_flags,void * key)540fdde8f32SVincenzo Maffione sync_kloop_rx_irq_wake_fun(wait_queue_t *wait, unsigned mode,
541fdde8f32SVincenzo Maffione int wake_flags, void *key)
542fdde8f32SVincenzo Maffione {
543fdde8f32SVincenzo Maffione struct sync_kloop_poll_entry *entry =
544fdde8f32SVincenzo Maffione container_of(wait, struct sync_kloop_poll_entry, wait);
545fdde8f32SVincenzo Maffione struct sync_kloop_poll_ctx *poll_ctx = entry->parent;
546fdde8f32SVincenzo Maffione int i;
547fdde8f32SVincenzo Maffione
548fdde8f32SVincenzo Maffione for (i = poll_ctx->num_tx_rings; i < poll_ctx->num_rings; i++) {
549fdde8f32SVincenzo Maffione struct eventfd_ctx *irq_ctx = poll_ctx->entries[i].irq_ctx;
550fdde8f32SVincenzo Maffione
551fdde8f32SVincenzo Maffione if (irq_ctx) {
552fdde8f32SVincenzo Maffione eventfd_signal(irq_ctx, 1);
553fdde8f32SVincenzo Maffione }
554fdde8f32SVincenzo Maffione }
555fdde8f32SVincenzo Maffione
556fdde8f32SVincenzo Maffione return 0;
557b321acabSVincenzo Maffione }
558b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
559b321acabSVincenzo Maffione
560b321acabSVincenzo Maffione int
netmap_sync_kloop(struct netmap_priv_d * priv,struct nmreq_header * hdr)561b321acabSVincenzo Maffione netmap_sync_kloop(struct netmap_priv_d *priv, struct nmreq_header *hdr)
562b321acabSVincenzo Maffione {
563b321acabSVincenzo Maffione struct nmreq_sync_kloop_start *req =
564b321acabSVincenzo Maffione (struct nmreq_sync_kloop_start *)(uintptr_t)hdr->nr_body;
565b321acabSVincenzo Maffione struct nmreq_opt_sync_kloop_eventfds *eventfds_opt = NULL;
566b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL
567b321acabSVincenzo Maffione struct sync_kloop_poll_ctx *poll_ctx = NULL;
568b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
569b321acabSVincenzo Maffione int num_rx_rings, num_tx_rings, num_rings;
57093b1d6a0SVincenzo Maffione struct sync_kloop_ring_args *args = NULL;
571b321acabSVincenzo Maffione uint32_t sleep_us = req->sleep_us;
572b321acabSVincenzo Maffione struct nm_csb_atok* csb_atok_base;
573b321acabSVincenzo Maffione struct nm_csb_ktoa* csb_ktoa_base;
574b321acabSVincenzo Maffione struct netmap_adapter *na;
575b321acabSVincenzo Maffione struct nmreq_option *opt;
576fdde8f32SVincenzo Maffione bool na_could_sleep = false;
577fdde8f32SVincenzo Maffione bool busy_wait = true;
578fdde8f32SVincenzo Maffione bool direct_tx = false;
579fdde8f32SVincenzo Maffione bool direct_rx = false;
580b321acabSVincenzo Maffione int err = 0;
581b321acabSVincenzo Maffione int i;
582b321acabSVincenzo Maffione
583b321acabSVincenzo Maffione if (sleep_us > 1000000) {
584b321acabSVincenzo Maffione /* We do not accept sleeping for more than a second. */
585b321acabSVincenzo Maffione return EINVAL;
586b321acabSVincenzo Maffione }
587b321acabSVincenzo Maffione
588b321acabSVincenzo Maffione if (priv->np_nifp == NULL) {
589b321acabSVincenzo Maffione return ENXIO;
590b321acabSVincenzo Maffione }
591b321acabSVincenzo Maffione mb(); /* make sure following reads are not from cache */
592b321acabSVincenzo Maffione
593b321acabSVincenzo Maffione na = priv->np_na;
594b321acabSVincenzo Maffione if (!nm_netmap_on(na)) {
595b321acabSVincenzo Maffione return ENXIO;
596b321acabSVincenzo Maffione }
597b321acabSVincenzo Maffione
598b321acabSVincenzo Maffione NMG_LOCK();
599b321acabSVincenzo Maffione /* Make sure the application is working in CSB mode. */
600b321acabSVincenzo Maffione if (!priv->np_csb_atok_base || !priv->np_csb_ktoa_base) {
601b321acabSVincenzo Maffione NMG_UNLOCK();
602b321acabSVincenzo Maffione nm_prerr("sync-kloop on %s requires "
603b321acabSVincenzo Maffione "NETMAP_REQ_OPT_CSB option", na->name);
604b321acabSVincenzo Maffione return EINVAL;
605b321acabSVincenzo Maffione }
606b321acabSVincenzo Maffione
607b321acabSVincenzo Maffione csb_atok_base = priv->np_csb_atok_base;
608b321acabSVincenzo Maffione csb_ktoa_base = priv->np_csb_ktoa_base;
609b321acabSVincenzo Maffione
610b321acabSVincenzo Maffione /* Make sure that no kloop is currently running. */
611b321acabSVincenzo Maffione if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
612b321acabSVincenzo Maffione err = EBUSY;
613b321acabSVincenzo Maffione }
614b321acabSVincenzo Maffione priv->np_kloop_state |= NM_SYNC_KLOOP_RUNNING;
615b321acabSVincenzo Maffione NMG_UNLOCK();
616b321acabSVincenzo Maffione if (err) {
617b321acabSVincenzo Maffione return err;
618b321acabSVincenzo Maffione }
619b321acabSVincenzo Maffione
620b321acabSVincenzo Maffione num_rx_rings = priv->np_qlast[NR_RX] - priv->np_qfirst[NR_RX];
621b321acabSVincenzo Maffione num_tx_rings = priv->np_qlast[NR_TX] - priv->np_qfirst[NR_TX];
622b321acabSVincenzo Maffione num_rings = num_tx_rings + num_rx_rings;
623b321acabSVincenzo Maffione
62493b1d6a0SVincenzo Maffione args = nm_os_malloc(num_rings * sizeof(args[0]));
62593b1d6a0SVincenzo Maffione if (!args) {
62693b1d6a0SVincenzo Maffione err = ENOMEM;
62793b1d6a0SVincenzo Maffione goto out;
62893b1d6a0SVincenzo Maffione }
62993b1d6a0SVincenzo Maffione
630fdde8f32SVincenzo Maffione /* Prepare the arguments for netmap_sync_kloop_tx_ring()
631fdde8f32SVincenzo Maffione * and netmap_sync_kloop_rx_ring(). */
632fdde8f32SVincenzo Maffione for (i = 0; i < num_tx_rings; i++) {
633fdde8f32SVincenzo Maffione struct sync_kloop_ring_args *a = args + i;
634fdde8f32SVincenzo Maffione
635fdde8f32SVincenzo Maffione a->kring = NMR(na, NR_TX)[i + priv->np_qfirst[NR_TX]];
636fdde8f32SVincenzo Maffione a->csb_atok = csb_atok_base + i;
637fdde8f32SVincenzo Maffione a->csb_ktoa = csb_ktoa_base + i;
638fdde8f32SVincenzo Maffione a->busy_wait = busy_wait;
639fdde8f32SVincenzo Maffione a->direct = direct_tx;
640fdde8f32SVincenzo Maffione }
641fdde8f32SVincenzo Maffione for (i = 0; i < num_rx_rings; i++) {
642fdde8f32SVincenzo Maffione struct sync_kloop_ring_args *a = args + num_tx_rings + i;
643fdde8f32SVincenzo Maffione
644fdde8f32SVincenzo Maffione a->kring = NMR(na, NR_RX)[i + priv->np_qfirst[NR_RX]];
645fdde8f32SVincenzo Maffione a->csb_atok = csb_atok_base + num_tx_rings + i;
646fdde8f32SVincenzo Maffione a->csb_ktoa = csb_ktoa_base + num_tx_rings + i;
647fdde8f32SVincenzo Maffione a->busy_wait = busy_wait;
648fdde8f32SVincenzo Maffione a->direct = direct_rx;
649fdde8f32SVincenzo Maffione }
650fdde8f32SVincenzo Maffione
651b321acabSVincenzo Maffione /* Validate notification options. */
652*3ca5cfaaSVincenzo Maffione opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_SYNC_KLOOP_MODE);
653fdde8f32SVincenzo Maffione if (opt != NULL) {
654fdde8f32SVincenzo Maffione struct nmreq_opt_sync_kloop_mode *mode_opt =
655fdde8f32SVincenzo Maffione (struct nmreq_opt_sync_kloop_mode *)opt;
656fdde8f32SVincenzo Maffione
657fdde8f32SVincenzo Maffione direct_tx = !!(mode_opt->mode & NM_OPT_SYNC_KLOOP_DIRECT_TX);
658fdde8f32SVincenzo Maffione direct_rx = !!(mode_opt->mode & NM_OPT_SYNC_KLOOP_DIRECT_RX);
659fdde8f32SVincenzo Maffione if (mode_opt->mode & ~(NM_OPT_SYNC_KLOOP_DIRECT_TX |
660fdde8f32SVincenzo Maffione NM_OPT_SYNC_KLOOP_DIRECT_RX)) {
661fdde8f32SVincenzo Maffione opt->nro_status = err = EINVAL;
662fdde8f32SVincenzo Maffione goto out;
663fdde8f32SVincenzo Maffione }
664fdde8f32SVincenzo Maffione opt->nro_status = 0;
665fdde8f32SVincenzo Maffione }
666*3ca5cfaaSVincenzo Maffione opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS);
667b321acabSVincenzo Maffione if (opt != NULL) {
668b321acabSVincenzo Maffione if (opt->nro_size != sizeof(*eventfds_opt) +
669b321acabSVincenzo Maffione sizeof(eventfds_opt->eventfds[0]) * num_rings) {
670b321acabSVincenzo Maffione /* Option size not consistent with the number of
671b321acabSVincenzo Maffione * entries. */
672b321acabSVincenzo Maffione opt->nro_status = err = EINVAL;
673b321acabSVincenzo Maffione goto out;
674b321acabSVincenzo Maffione }
675b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL
676b321acabSVincenzo Maffione eventfds_opt = (struct nmreq_opt_sync_kloop_eventfds *)opt;
677b321acabSVincenzo Maffione opt->nro_status = 0;
678fdde8f32SVincenzo Maffione
679fdde8f32SVincenzo Maffione /* Check if some ioeventfd entry is not defined, and force sleep
680fdde8f32SVincenzo Maffione * synchronization in that case. */
681fdde8f32SVincenzo Maffione busy_wait = false;
682fdde8f32SVincenzo Maffione for (i = 0; i < num_rings; i++) {
683fdde8f32SVincenzo Maffione if (eventfds_opt->eventfds[i].ioeventfd < 0) {
684fdde8f32SVincenzo Maffione busy_wait = true;
685fdde8f32SVincenzo Maffione break;
686fdde8f32SVincenzo Maffione }
687fdde8f32SVincenzo Maffione }
688fdde8f32SVincenzo Maffione
689fdde8f32SVincenzo Maffione if (busy_wait && (direct_tx || direct_rx)) {
690fdde8f32SVincenzo Maffione /* For direct processing we need all the
691fdde8f32SVincenzo Maffione * ioeventfds to be valid. */
692fdde8f32SVincenzo Maffione opt->nro_status = err = EINVAL;
693fdde8f32SVincenzo Maffione goto out;
694fdde8f32SVincenzo Maffione }
695fdde8f32SVincenzo Maffione
696b321acabSVincenzo Maffione /* We need 2 poll entries for TX and RX notifications coming
697b321acabSVincenzo Maffione * from the netmap adapter, plus one entries per ring for the
698b321acabSVincenzo Maffione * notifications coming from the application. */
699b321acabSVincenzo Maffione poll_ctx = nm_os_malloc(sizeof(*poll_ctx) +
700fdde8f32SVincenzo Maffione (num_rings + 2) * sizeof(poll_ctx->entries[0]));
701b321acabSVincenzo Maffione init_poll_funcptr(&poll_ctx->wait_table,
702b321acabSVincenzo Maffione sync_kloop_poll_table_queue_proc);
703b321acabSVincenzo Maffione poll_ctx->num_entries = 2 + num_rings;
704fdde8f32SVincenzo Maffione poll_ctx->num_tx_rings = num_tx_rings;
705fdde8f32SVincenzo Maffione poll_ctx->num_rings = num_rings;
706b321acabSVincenzo Maffione poll_ctx->next_entry = 0;
707fdde8f32SVincenzo Maffione poll_ctx->next_wake_fun = NULL;
708fdde8f32SVincenzo Maffione
709fdde8f32SVincenzo Maffione if (direct_tx && (na->na_flags & NAF_BDG_MAYSLEEP)) {
710fdde8f32SVincenzo Maffione /* In direct mode, VALE txsync is called from
711fdde8f32SVincenzo Maffione * wake-up context, where it is not possible
712fdde8f32SVincenzo Maffione * to sleep.
713fdde8f32SVincenzo Maffione */
714fdde8f32SVincenzo Maffione na->na_flags &= ~NAF_BDG_MAYSLEEP;
715fdde8f32SVincenzo Maffione na_could_sleep = true;
716fdde8f32SVincenzo Maffione }
717fdde8f32SVincenzo Maffione
718fdde8f32SVincenzo Maffione for (i = 0; i < num_rings + 2; i++) {
719fdde8f32SVincenzo Maffione poll_ctx->entries[i].args = args + i;
720fdde8f32SVincenzo Maffione poll_ctx->entries[i].parent = poll_ctx;
721fdde8f32SVincenzo Maffione }
722fdde8f32SVincenzo Maffione
723b321acabSVincenzo Maffione /* Poll for notifications coming from the applications through
724b321acabSVincenzo Maffione * eventfds. */
725fdde8f32SVincenzo Maffione for (i = 0; i < num_rings; i++, poll_ctx->next_entry++) {
726fdde8f32SVincenzo Maffione struct eventfd_ctx *irq = NULL;
727fdde8f32SVincenzo Maffione struct file *filp = NULL;
728b321acabSVincenzo Maffione unsigned long mask;
729fdde8f32SVincenzo Maffione bool tx_ring = (i < num_tx_rings);
730b321acabSVincenzo Maffione
731fdde8f32SVincenzo Maffione if (eventfds_opt->eventfds[i].irqfd >= 0) {
732fdde8f32SVincenzo Maffione filp = eventfd_fget(
733fdde8f32SVincenzo Maffione eventfds_opt->eventfds[i].irqfd);
734b321acabSVincenzo Maffione if (IS_ERR(filp)) {
735b321acabSVincenzo Maffione err = PTR_ERR(filp);
736b321acabSVincenzo Maffione goto out;
737b321acabSVincenzo Maffione }
738b321acabSVincenzo Maffione irq = eventfd_ctx_fileget(filp);
739b321acabSVincenzo Maffione if (IS_ERR(irq)) {
740b321acabSVincenzo Maffione err = PTR_ERR(irq);
741b321acabSVincenzo Maffione goto out;
742b321acabSVincenzo Maffione }
743b321acabSVincenzo Maffione }
744fdde8f32SVincenzo Maffione poll_ctx->entries[i].irq_filp = filp;
745fdde8f32SVincenzo Maffione poll_ctx->entries[i].irq_ctx = irq;
746fdde8f32SVincenzo Maffione poll_ctx->entries[i].args->busy_wait = busy_wait;
747fdde8f32SVincenzo Maffione /* Don't let netmap_sync_kloop_*x_ring() use
748fdde8f32SVincenzo Maffione * IRQs in direct mode. */
749fdde8f32SVincenzo Maffione poll_ctx->entries[i].args->irq_ctx =
750fdde8f32SVincenzo Maffione ((tx_ring && direct_tx) ||
751fdde8f32SVincenzo Maffione (!tx_ring && direct_rx)) ? NULL :
752fdde8f32SVincenzo Maffione poll_ctx->entries[i].irq_ctx;
753fdde8f32SVincenzo Maffione poll_ctx->entries[i].args->direct =
754fdde8f32SVincenzo Maffione (tx_ring ? direct_tx : direct_rx);
755fdde8f32SVincenzo Maffione
756fdde8f32SVincenzo Maffione if (!busy_wait) {
757fdde8f32SVincenzo Maffione filp = eventfd_fget(
758fdde8f32SVincenzo Maffione eventfds_opt->eventfds[i].ioeventfd);
759fdde8f32SVincenzo Maffione if (IS_ERR(filp)) {
760fdde8f32SVincenzo Maffione err = PTR_ERR(filp);
761fdde8f32SVincenzo Maffione goto out;
762fdde8f32SVincenzo Maffione }
763fdde8f32SVincenzo Maffione if (tx_ring && direct_tx) {
764fdde8f32SVincenzo Maffione /* Override the wake up function
765fdde8f32SVincenzo Maffione * so that it can directly call
766fdde8f32SVincenzo Maffione * netmap_sync_kloop_tx_ring().
767fdde8f32SVincenzo Maffione */
768fdde8f32SVincenzo Maffione poll_ctx->next_wake_fun =
769fdde8f32SVincenzo Maffione sync_kloop_tx_kick_wake_fun;
770fdde8f32SVincenzo Maffione } else if (!tx_ring && direct_rx) {
771fdde8f32SVincenzo Maffione /* Same for direct RX. */
772fdde8f32SVincenzo Maffione poll_ctx->next_wake_fun =
773fdde8f32SVincenzo Maffione sync_kloop_rx_kick_wake_fun;
774fdde8f32SVincenzo Maffione } else {
775fdde8f32SVincenzo Maffione poll_ctx->next_wake_fun = NULL;
776fdde8f32SVincenzo Maffione }
777fdde8f32SVincenzo Maffione mask = filp->f_op->poll(filp,
778fdde8f32SVincenzo Maffione &poll_ctx->wait_table);
779fdde8f32SVincenzo Maffione if (mask & POLLERR) {
780fdde8f32SVincenzo Maffione err = EINVAL;
781fdde8f32SVincenzo Maffione goto out;
782fdde8f32SVincenzo Maffione }
783fdde8f32SVincenzo Maffione }
784fdde8f32SVincenzo Maffione }
785fdde8f32SVincenzo Maffione
786b321acabSVincenzo Maffione /* Poll for notifications coming from the netmap rings bound to
787b321acabSVincenzo Maffione * this file descriptor. */
788fdde8f32SVincenzo Maffione if (!busy_wait) {
789b321acabSVincenzo Maffione NMG_LOCK();
790fdde8f32SVincenzo Maffione /* In direct mode, override the wake up function so
791fdde8f32SVincenzo Maffione * that it can forward the netmap_tx_irq() to the
792fdde8f32SVincenzo Maffione * guest. */
793fdde8f32SVincenzo Maffione poll_ctx->next_wake_fun = direct_tx ?
794fdde8f32SVincenzo Maffione sync_kloop_tx_irq_wake_fun : NULL;
795e2e0ef76SVincenzo Maffione poll_wait(priv->np_filp, priv->np_si[NR_TX],
796e2e0ef76SVincenzo Maffione &poll_ctx->wait_table);
797fdde8f32SVincenzo Maffione poll_ctx->next_entry++;
798fdde8f32SVincenzo Maffione
799fdde8f32SVincenzo Maffione poll_ctx->next_wake_fun = direct_rx ?
800fdde8f32SVincenzo Maffione sync_kloop_rx_irq_wake_fun : NULL;
801e2e0ef76SVincenzo Maffione poll_wait(priv->np_filp, priv->np_si[NR_RX],
802e2e0ef76SVincenzo Maffione &poll_ctx->wait_table);
803fdde8f32SVincenzo Maffione poll_ctx->next_entry++;
804b321acabSVincenzo Maffione NMG_UNLOCK();
805b321acabSVincenzo Maffione }
806b321acabSVincenzo Maffione #else /* SYNC_KLOOP_POLL */
807b321acabSVincenzo Maffione opt->nro_status = EOPNOTSUPP;
808b321acabSVincenzo Maffione goto out;
809b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
810b321acabSVincenzo Maffione }
811b321acabSVincenzo Maffione
812fdde8f32SVincenzo Maffione nm_prinf("kloop busy_wait %u, direct_tx %u, direct_rx %u, "
813fdde8f32SVincenzo Maffione "na_could_sleep %u", busy_wait, direct_tx, direct_rx,
814fdde8f32SVincenzo Maffione na_could_sleep);
81593b1d6a0SVincenzo Maffione
816b321acabSVincenzo Maffione /* Main loop. */
817b321acabSVincenzo Maffione for (;;) {
818b321acabSVincenzo Maffione if (unlikely(NM_ACCESS_ONCE(priv->np_kloop_state) & NM_SYNC_KLOOP_STOPPING)) {
819b321acabSVincenzo Maffione break;
820b321acabSVincenzo Maffione }
821b321acabSVincenzo Maffione
822b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL
823fdde8f32SVincenzo Maffione if (!busy_wait) {
82493b1d6a0SVincenzo Maffione /* It is important to set the task state as
82593b1d6a0SVincenzo Maffione * interruptible before processing any TX/RX ring,
82693b1d6a0SVincenzo Maffione * so that if a notification on ring Y comes after
82793b1d6a0SVincenzo Maffione * we have processed ring Y, but before we call
82893b1d6a0SVincenzo Maffione * schedule(), we don't miss it. This is true because
82993b1d6a0SVincenzo Maffione * the wake up function will change the the task state,
83093b1d6a0SVincenzo Maffione * and therefore the schedule_timeout() call below
83193b1d6a0SVincenzo Maffione * will observe the change).
83293b1d6a0SVincenzo Maffione */
83393b1d6a0SVincenzo Maffione set_current_state(TASK_INTERRUPTIBLE);
83493b1d6a0SVincenzo Maffione }
835b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
836b321acabSVincenzo Maffione
837b321acabSVincenzo Maffione /* Process all the TX rings bound to this file descriptor. */
838fdde8f32SVincenzo Maffione for (i = 0; !direct_tx && i < num_tx_rings; i++) {
83993b1d6a0SVincenzo Maffione struct sync_kloop_ring_args *a = args + i;
84093b1d6a0SVincenzo Maffione netmap_sync_kloop_tx_ring(a);
841b321acabSVincenzo Maffione }
842b321acabSVincenzo Maffione
843b321acabSVincenzo Maffione /* Process all the RX rings bound to this file descriptor. */
844fdde8f32SVincenzo Maffione for (i = 0; !direct_rx && i < num_rx_rings; i++) {
84593b1d6a0SVincenzo Maffione struct sync_kloop_ring_args *a = args + num_tx_rings + i;
84693b1d6a0SVincenzo Maffione netmap_sync_kloop_rx_ring(a);
847b321acabSVincenzo Maffione }
848b321acabSVincenzo Maffione
849fdde8f32SVincenzo Maffione if (busy_wait) {
850b321acabSVincenzo Maffione /* Default synchronization method: sleep for a while. */
851b321acabSVincenzo Maffione usleep_range(sleep_us, sleep_us);
852b321acabSVincenzo Maffione }
853fdde8f32SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
854fdde8f32SVincenzo Maffione else {
855fdde8f32SVincenzo Maffione /* Yield to the scheduler waiting for a notification
856fdde8f32SVincenzo Maffione * to come either from netmap or the application. */
857fdde8f32SVincenzo Maffione schedule_timeout(msecs_to_jiffies(3000));
858fdde8f32SVincenzo Maffione }
859fdde8f32SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
860b321acabSVincenzo Maffione }
861b321acabSVincenzo Maffione out:
862b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL
863b321acabSVincenzo Maffione if (poll_ctx) {
864b321acabSVincenzo Maffione /* Stop polling from netmap and the eventfds, and deallocate
865b321acabSVincenzo Maffione * the poll context. */
866fdde8f32SVincenzo Maffione if (!busy_wait) {
867b321acabSVincenzo Maffione __set_current_state(TASK_RUNNING);
868fdde8f32SVincenzo Maffione }
869b321acabSVincenzo Maffione for (i = 0; i < poll_ctx->next_entry; i++) {
870b321acabSVincenzo Maffione struct sync_kloop_poll_entry *entry =
871b321acabSVincenzo Maffione poll_ctx->entries + i;
872b321acabSVincenzo Maffione
873b321acabSVincenzo Maffione if (entry->wqh)
874b321acabSVincenzo Maffione remove_wait_queue(entry->wqh, &entry->wait);
875b321acabSVincenzo Maffione /* We did not get a reference to the eventfds, but
876b321acabSVincenzo Maffione * don't do that on netmap file descriptors (since
877b321acabSVincenzo Maffione * a reference was not taken. */
878b321acabSVincenzo Maffione if (entry->filp && entry->filp != priv->np_filp)
879b321acabSVincenzo Maffione fput(entry->filp);
880b321acabSVincenzo Maffione if (entry->irq_ctx)
881b321acabSVincenzo Maffione eventfd_ctx_put(entry->irq_ctx);
882b321acabSVincenzo Maffione if (entry->irq_filp)
883b321acabSVincenzo Maffione fput(entry->irq_filp);
884b321acabSVincenzo Maffione }
885b321acabSVincenzo Maffione nm_os_free(poll_ctx);
886b321acabSVincenzo Maffione poll_ctx = NULL;
887b321acabSVincenzo Maffione }
888b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
889b321acabSVincenzo Maffione
89093b1d6a0SVincenzo Maffione if (args) {
89193b1d6a0SVincenzo Maffione nm_os_free(args);
89293b1d6a0SVincenzo Maffione args = NULL;
89393b1d6a0SVincenzo Maffione }
89493b1d6a0SVincenzo Maffione
895b321acabSVincenzo Maffione /* Reset the kloop state. */
896b321acabSVincenzo Maffione NMG_LOCK();
897b321acabSVincenzo Maffione priv->np_kloop_state = 0;
898fdde8f32SVincenzo Maffione if (na_could_sleep) {
899fdde8f32SVincenzo Maffione na->na_flags |= NAF_BDG_MAYSLEEP;
900fdde8f32SVincenzo Maffione }
901b321acabSVincenzo Maffione NMG_UNLOCK();
902b321acabSVincenzo Maffione
903b321acabSVincenzo Maffione return err;
904b321acabSVincenzo Maffione }
905b321acabSVincenzo Maffione
906b321acabSVincenzo Maffione int
netmap_sync_kloop_stop(struct netmap_priv_d * priv)907b321acabSVincenzo Maffione netmap_sync_kloop_stop(struct netmap_priv_d *priv)
908b321acabSVincenzo Maffione {
909e2e0ef76SVincenzo Maffione struct netmap_adapter *na;
910b321acabSVincenzo Maffione bool running = true;
911b321acabSVincenzo Maffione int err = 0;
912b321acabSVincenzo Maffione
913e2e0ef76SVincenzo Maffione if (priv->np_nifp == NULL) {
914e2e0ef76SVincenzo Maffione return ENXIO;
915e2e0ef76SVincenzo Maffione }
916e2e0ef76SVincenzo Maffione mb(); /* make sure following reads are not from cache */
917e2e0ef76SVincenzo Maffione
918e2e0ef76SVincenzo Maffione na = priv->np_na;
919e2e0ef76SVincenzo Maffione if (!nm_netmap_on(na)) {
920e2e0ef76SVincenzo Maffione return ENXIO;
921e2e0ef76SVincenzo Maffione }
922e2e0ef76SVincenzo Maffione
923e2e0ef76SVincenzo Maffione /* Set the kloop stopping flag. */
924b321acabSVincenzo Maffione NMG_LOCK();
925b321acabSVincenzo Maffione priv->np_kloop_state |= NM_SYNC_KLOOP_STOPPING;
926b321acabSVincenzo Maffione NMG_UNLOCK();
927e2e0ef76SVincenzo Maffione
928e2e0ef76SVincenzo Maffione /* Send a notification to the kloop, in case it is blocked in
929e2e0ef76SVincenzo Maffione * schedule_timeout(). We can use either RX or TX, because the
930e2e0ef76SVincenzo Maffione * kloop is waiting on both. */
931e2e0ef76SVincenzo Maffione nm_os_selwakeup(priv->np_si[NR_RX]);
932e2e0ef76SVincenzo Maffione
933e2e0ef76SVincenzo Maffione /* Wait for the kloop to actually terminate. */
934b321acabSVincenzo Maffione while (running) {
935b321acabSVincenzo Maffione usleep_range(1000, 1500);
936b321acabSVincenzo Maffione NMG_LOCK();
937b321acabSVincenzo Maffione running = (NM_ACCESS_ONCE(priv->np_kloop_state)
938b321acabSVincenzo Maffione & NM_SYNC_KLOOP_RUNNING);
939b321acabSVincenzo Maffione NMG_UNLOCK();
940b321acabSVincenzo Maffione }
941b321acabSVincenzo Maffione
942b321acabSVincenzo Maffione return err;
943b321acabSVincenzo Maffione }
944b321acabSVincenzo Maffione
945b321acabSVincenzo Maffione #ifdef WITH_PTNETMAP
946b321acabSVincenzo Maffione /*
947b321acabSVincenzo Maffione * Guest ptnetmap txsync()/rxsync() routines, used in ptnet device drivers.
948b321acabSVincenzo Maffione * These routines are reused across the different operating systems supported
949b321acabSVincenzo Maffione * by netmap.
950b321acabSVincenzo Maffione */
951b321acabSVincenzo Maffione
952b321acabSVincenzo Maffione /*
953b321acabSVincenzo Maffione * Reconcile host and guest views of the transmit ring.
954b321acabSVincenzo Maffione *
955b321acabSVincenzo Maffione * Guest user wants to transmit packets up to the one before ring->head,
956b321acabSVincenzo Maffione * and guest kernel knows tx_ring->hwcur is the first packet unsent
957b321acabSVincenzo Maffione * by the host kernel.
958b321acabSVincenzo Maffione *
959b321acabSVincenzo Maffione * We push out as many packets as possible, and possibly
960b321acabSVincenzo Maffione * reclaim buffers from previously completed transmission.
961b321acabSVincenzo Maffione *
962b321acabSVincenzo Maffione * Notifications from the host are enabled only if the user guest would
963b321acabSVincenzo Maffione * block (no space in the ring).
964b321acabSVincenzo Maffione */
965b321acabSVincenzo Maffione bool
netmap_pt_guest_txsync(struct nm_csb_atok * atok,struct nm_csb_ktoa * ktoa,struct netmap_kring * kring,int flags)966b321acabSVincenzo Maffione netmap_pt_guest_txsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa,
967b321acabSVincenzo Maffione struct netmap_kring *kring, int flags)
968b321acabSVincenzo Maffione {
969b321acabSVincenzo Maffione bool notify = false;
970b321acabSVincenzo Maffione
971b321acabSVincenzo Maffione /* Disable notifications */
972b321acabSVincenzo Maffione atok->appl_need_kick = 0;
973b321acabSVincenzo Maffione
974b321acabSVincenzo Maffione /*
975fdde8f32SVincenzo Maffione * First part: tell the host to process the new packets,
976fdde8f32SVincenzo Maffione * updating the CSB.
977b321acabSVincenzo Maffione */
978b321acabSVincenzo Maffione kring->nr_hwcur = ktoa->hwcur;
97993b1d6a0SVincenzo Maffione nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead);
980b321acabSVincenzo Maffione
981b321acabSVincenzo Maffione /* Ask for a kick from a guest to the host if needed. */
982fdde8f32SVincenzo Maffione if (((kring->rhead != kring->nr_hwcur || nm_kr_wouldblock(kring))
983b321acabSVincenzo Maffione && NM_ACCESS_ONCE(ktoa->kern_need_kick)) ||
984b321acabSVincenzo Maffione (flags & NAF_FORCE_RECLAIM)) {
985b321acabSVincenzo Maffione atok->sync_flags = flags;
986b321acabSVincenzo Maffione notify = true;
987b321acabSVincenzo Maffione }
988b321acabSVincenzo Maffione
989b321acabSVincenzo Maffione /*
990b321acabSVincenzo Maffione * Second part: reclaim buffers for completed transmissions.
991b321acabSVincenzo Maffione */
992fdde8f32SVincenzo Maffione if (nm_kr_wouldblock(kring) || (flags & NAF_FORCE_RECLAIM)) {
99393b1d6a0SVincenzo Maffione nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail,
99493b1d6a0SVincenzo Maffione &kring->nr_hwcur);
995b321acabSVincenzo Maffione }
996b321acabSVincenzo Maffione
997b321acabSVincenzo Maffione /*
998b321acabSVincenzo Maffione * No more room in the ring for new transmissions. The user thread will
999b321acabSVincenzo Maffione * go to sleep and we need to be notified by the host when more free
1000b321acabSVincenzo Maffione * space is available.
1001b321acabSVincenzo Maffione */
1002fdde8f32SVincenzo Maffione if (nm_kr_wouldblock(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
1003b321acabSVincenzo Maffione /* Reenable notifications. */
1004b321acabSVincenzo Maffione atok->appl_need_kick = 1;
100593b1d6a0SVincenzo Maffione /* Double check, with store-load memory barrier. */
100693b1d6a0SVincenzo Maffione nm_stld_barrier();
100793b1d6a0SVincenzo Maffione nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail,
100893b1d6a0SVincenzo Maffione &kring->nr_hwcur);
1009b321acabSVincenzo Maffione /* If there is new free space, disable notifications */
1010fdde8f32SVincenzo Maffione if (unlikely(!nm_kr_wouldblock(kring))) {
1011b321acabSVincenzo Maffione atok->appl_need_kick = 0;
1012b321acabSVincenzo Maffione }
1013b321acabSVincenzo Maffione }
1014b321acabSVincenzo Maffione
1015b321acabSVincenzo Maffione nm_prdis(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)",
1016b321acabSVincenzo Maffione kring->name, atok->head, atok->cur, ktoa->hwtail,
1017b321acabSVincenzo Maffione kring->rhead, kring->rcur, kring->nr_hwtail);
1018b321acabSVincenzo Maffione
1019b321acabSVincenzo Maffione return notify;
1020b321acabSVincenzo Maffione }
1021b321acabSVincenzo Maffione
1022b321acabSVincenzo Maffione /*
1023b321acabSVincenzo Maffione * Reconcile host and guest view of the receive ring.
1024b321acabSVincenzo Maffione *
1025b321acabSVincenzo Maffione * Update hwcur/hwtail from host (reading from CSB).
1026b321acabSVincenzo Maffione *
1027b321acabSVincenzo Maffione * If guest user has released buffers up to the one before ring->head, we
1028b321acabSVincenzo Maffione * also give them to the host.
1029b321acabSVincenzo Maffione *
1030b321acabSVincenzo Maffione * Notifications from the host are enabled only if the user guest would
1031b321acabSVincenzo Maffione * block (no more completed slots in the ring).
1032b321acabSVincenzo Maffione */
1033b321acabSVincenzo Maffione bool
netmap_pt_guest_rxsync(struct nm_csb_atok * atok,struct nm_csb_ktoa * ktoa,struct netmap_kring * kring,int flags)1034b321acabSVincenzo Maffione netmap_pt_guest_rxsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa,
1035b321acabSVincenzo Maffione struct netmap_kring *kring, int flags)
1036b321acabSVincenzo Maffione {
1037b321acabSVincenzo Maffione bool notify = false;
1038b321acabSVincenzo Maffione
1039b321acabSVincenzo Maffione /* Disable notifications */
1040b321acabSVincenzo Maffione atok->appl_need_kick = 0;
1041b321acabSVincenzo Maffione
1042b321acabSVincenzo Maffione /*
1043b321acabSVincenzo Maffione * First part: import newly received packets, by updating the kring
1044b321acabSVincenzo Maffione * hwtail to the hwtail known from the host (read from the CSB).
1045b321acabSVincenzo Maffione * This also updates the kring hwcur.
1046b321acabSVincenzo Maffione */
104793b1d6a0SVincenzo Maffione nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, &kring->nr_hwcur);
1048b321acabSVincenzo Maffione kring->nr_kflags &= ~NKR_PENDINTR;
1049b321acabSVincenzo Maffione
1050b321acabSVincenzo Maffione /*
1051b321acabSVincenzo Maffione * Second part: tell the host about the slots that guest user has
1052b321acabSVincenzo Maffione * released, by updating cur and head in the CSB.
1053b321acabSVincenzo Maffione */
1054b321acabSVincenzo Maffione if (kring->rhead != kring->nr_hwcur) {
105593b1d6a0SVincenzo Maffione nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead);
1056b321acabSVincenzo Maffione }
1057b321acabSVincenzo Maffione
1058b321acabSVincenzo Maffione /*
1059b321acabSVincenzo Maffione * No more completed RX slots. The user thread will go to sleep and
1060b321acabSVincenzo Maffione * we need to be notified by the host when more RX slots have been
1061b321acabSVincenzo Maffione * completed.
1062b321acabSVincenzo Maffione */
1063fdde8f32SVincenzo Maffione if (nm_kr_wouldblock(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
1064b321acabSVincenzo Maffione /* Reenable notifications. */
1065b321acabSVincenzo Maffione atok->appl_need_kick = 1;
106693b1d6a0SVincenzo Maffione /* Double check, with store-load memory barrier. */
106793b1d6a0SVincenzo Maffione nm_stld_barrier();
106893b1d6a0SVincenzo Maffione nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail,
106993b1d6a0SVincenzo Maffione &kring->nr_hwcur);
1070b321acabSVincenzo Maffione /* If there are new slots, disable notifications. */
1071fdde8f32SVincenzo Maffione if (!nm_kr_wouldblock(kring)) {
1072b321acabSVincenzo Maffione atok->appl_need_kick = 0;
1073b321acabSVincenzo Maffione }
1074b321acabSVincenzo Maffione }
1075b321acabSVincenzo Maffione
1076fdde8f32SVincenzo Maffione /* Ask for a kick from the guest to the host if needed. */
1077fdde8f32SVincenzo Maffione if ((kring->rhead != kring->nr_hwcur || nm_kr_wouldblock(kring))
1078fdde8f32SVincenzo Maffione && NM_ACCESS_ONCE(ktoa->kern_need_kick)) {
1079fdde8f32SVincenzo Maffione atok->sync_flags = flags;
1080fdde8f32SVincenzo Maffione notify = true;
1081fdde8f32SVincenzo Maffione }
1082fdde8f32SVincenzo Maffione
1083b321acabSVincenzo Maffione nm_prdis(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)",
1084b321acabSVincenzo Maffione kring->name, atok->head, atok->cur, ktoa->hwtail,
1085b321acabSVincenzo Maffione kring->rhead, kring->rcur, kring->nr_hwtail);
1086b321acabSVincenzo Maffione
1087b321acabSVincenzo Maffione return notify;
1088b321acabSVincenzo Maffione }
1089b321acabSVincenzo Maffione
1090b321acabSVincenzo Maffione /*
1091b321acabSVincenzo Maffione * Callbacks for ptnet drivers: nm_krings_create, nm_krings_delete, nm_dtor.
1092b321acabSVincenzo Maffione */
1093b321acabSVincenzo Maffione int
ptnet_nm_krings_create(struct netmap_adapter * na)1094b321acabSVincenzo Maffione ptnet_nm_krings_create(struct netmap_adapter *na)
1095b321acabSVincenzo Maffione {
1096b321acabSVincenzo Maffione struct netmap_pt_guest_adapter *ptna =
1097b321acabSVincenzo Maffione (struct netmap_pt_guest_adapter *)na; /* Upcast. */
1098b321acabSVincenzo Maffione struct netmap_adapter *na_nm = &ptna->hwup.up;
1099b321acabSVincenzo Maffione struct netmap_adapter *na_dr = &ptna->dr.up;
1100b321acabSVincenzo Maffione int ret;
1101b321acabSVincenzo Maffione
1102b321acabSVincenzo Maffione if (ptna->backend_users) {
1103b321acabSVincenzo Maffione return 0;
1104b321acabSVincenzo Maffione }
1105b321acabSVincenzo Maffione
1106b321acabSVincenzo Maffione /* Create krings on the public netmap adapter. */
1107b321acabSVincenzo Maffione ret = netmap_hw_krings_create(na_nm);
1108b321acabSVincenzo Maffione if (ret) {
1109b321acabSVincenzo Maffione return ret;
1110b321acabSVincenzo Maffione }
1111b321acabSVincenzo Maffione
1112b321acabSVincenzo Maffione /* Copy krings into the netmap adapter private to the driver. */
1113b321acabSVincenzo Maffione na_dr->tx_rings = na_nm->tx_rings;
1114b321acabSVincenzo Maffione na_dr->rx_rings = na_nm->rx_rings;
1115b321acabSVincenzo Maffione
1116b321acabSVincenzo Maffione return 0;
1117b321acabSVincenzo Maffione }
1118b321acabSVincenzo Maffione
1119b321acabSVincenzo Maffione void
ptnet_nm_krings_delete(struct netmap_adapter * na)1120b321acabSVincenzo Maffione ptnet_nm_krings_delete(struct netmap_adapter *na)
1121b321acabSVincenzo Maffione {
1122b321acabSVincenzo Maffione struct netmap_pt_guest_adapter *ptna =
1123b321acabSVincenzo Maffione (struct netmap_pt_guest_adapter *)na; /* Upcast. */
1124b321acabSVincenzo Maffione struct netmap_adapter *na_nm = &ptna->hwup.up;
1125b321acabSVincenzo Maffione struct netmap_adapter *na_dr = &ptna->dr.up;
1126b321acabSVincenzo Maffione
1127b321acabSVincenzo Maffione if (ptna->backend_users) {
1128b321acabSVincenzo Maffione return;
1129b321acabSVincenzo Maffione }
1130b321acabSVincenzo Maffione
1131b321acabSVincenzo Maffione na_dr->tx_rings = NULL;
1132b321acabSVincenzo Maffione na_dr->rx_rings = NULL;
1133b321acabSVincenzo Maffione
1134b321acabSVincenzo Maffione netmap_hw_krings_delete(na_nm);
1135b321acabSVincenzo Maffione }
1136b321acabSVincenzo Maffione
1137b321acabSVincenzo Maffione void
ptnet_nm_dtor(struct netmap_adapter * na)1138b321acabSVincenzo Maffione ptnet_nm_dtor(struct netmap_adapter *na)
1139b321acabSVincenzo Maffione {
1140b321acabSVincenzo Maffione struct netmap_pt_guest_adapter *ptna =
1141b321acabSVincenzo Maffione (struct netmap_pt_guest_adapter *)na;
1142b321acabSVincenzo Maffione
1143b321acabSVincenzo Maffione netmap_mem_put(ptna->dr.up.nm_mem);
1144b321acabSVincenzo Maffione memset(&ptna->dr, 0, sizeof(ptna->dr));
1145b321acabSVincenzo Maffione netmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp);
1146b321acabSVincenzo Maffione }
1147b321acabSVincenzo Maffione
1148b321acabSVincenzo Maffione int
netmap_pt_guest_attach(struct netmap_adapter * arg,unsigned int nifp_offset,unsigned int memid)1149b321acabSVincenzo Maffione netmap_pt_guest_attach(struct netmap_adapter *arg,
1150b321acabSVincenzo Maffione unsigned int nifp_offset, unsigned int memid)
1151b321acabSVincenzo Maffione {
1152b321acabSVincenzo Maffione struct netmap_pt_guest_adapter *ptna;
1153b321acabSVincenzo Maffione struct ifnet *ifp = arg ? arg->ifp : NULL;
1154b321acabSVincenzo Maffione int error;
1155b321acabSVincenzo Maffione
1156b321acabSVincenzo Maffione /* get allocator */
1157b321acabSVincenzo Maffione arg->nm_mem = netmap_mem_pt_guest_new(ifp, nifp_offset, memid);
1158b321acabSVincenzo Maffione if (arg->nm_mem == NULL)
1159b321acabSVincenzo Maffione return ENOMEM;
1160b321acabSVincenzo Maffione arg->na_flags |= NAF_MEM_OWNER;
1161b321acabSVincenzo Maffione error = netmap_attach_ext(arg, sizeof(struct netmap_pt_guest_adapter), 1);
1162b321acabSVincenzo Maffione if (error)
1163b321acabSVincenzo Maffione return error;
1164b321acabSVincenzo Maffione
1165b321acabSVincenzo Maffione /* get the netmap_pt_guest_adapter */
1166b321acabSVincenzo Maffione ptna = (struct netmap_pt_guest_adapter *) NA(ifp);
1167b321acabSVincenzo Maffione
1168b321acabSVincenzo Maffione /* Initialize a separate pass-through netmap adapter that is going to
1169b321acabSVincenzo Maffione * be used by the ptnet driver only, and so never exposed to netmap
1170b321acabSVincenzo Maffione * applications. We only need a subset of the available fields. */
1171b321acabSVincenzo Maffione memset(&ptna->dr, 0, sizeof(ptna->dr));
1172b321acabSVincenzo Maffione ptna->dr.up.ifp = ifp;
1173b321acabSVincenzo Maffione ptna->dr.up.nm_mem = netmap_mem_get(ptna->hwup.up.nm_mem);
1174b321acabSVincenzo Maffione ptna->dr.up.nm_config = ptna->hwup.up.nm_config;
1175b321acabSVincenzo Maffione
1176b321acabSVincenzo Maffione ptna->backend_users = 0;
1177b321acabSVincenzo Maffione
1178b321acabSVincenzo Maffione return 0;
1179b321acabSVincenzo Maffione }
1180b321acabSVincenzo Maffione
1181b321acabSVincenzo Maffione #endif /* WITH_PTNETMAP */
1182