xref: /freebsd-14.2/sys/dev/netmap/netmap_kloop.c (revision a56136a1)
1b6e66be2SVincenzo Maffione /*
2b6e66be2SVincenzo Maffione  * Copyright (C) 2016-2018 Vincenzo Maffione
3b6e66be2SVincenzo Maffione  * Copyright (C) 2015 Stefano Garzarella
4b6e66be2SVincenzo Maffione  * All rights reserved.
5b6e66be2SVincenzo Maffione  *
6b6e66be2SVincenzo Maffione  * Redistribution and use in source and binary forms, with or without
7b6e66be2SVincenzo Maffione  * modification, are permitted provided that the following conditions
8b6e66be2SVincenzo Maffione  * are met:
9b6e66be2SVincenzo Maffione  *   1. Redistributions of source code must retain the above copyright
10b6e66be2SVincenzo Maffione  *      notice, this list of conditions and the following disclaimer.
11b6e66be2SVincenzo Maffione  *   2. Redistributions in binary form must reproduce the above copyright
12b6e66be2SVincenzo Maffione  *      notice, this list of conditions and the following disclaimer in the
13b6e66be2SVincenzo Maffione  *      documentation and/or other materials provided with the distribution.
14b6e66be2SVincenzo Maffione  *
15b6e66be2SVincenzo Maffione  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16b6e66be2SVincenzo Maffione  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17b6e66be2SVincenzo Maffione  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18b6e66be2SVincenzo Maffione  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19b6e66be2SVincenzo Maffione  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20b6e66be2SVincenzo Maffione  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21b6e66be2SVincenzo Maffione  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22b6e66be2SVincenzo Maffione  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23b6e66be2SVincenzo Maffione  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24b6e66be2SVincenzo Maffione  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25b6e66be2SVincenzo Maffione  * SUCH DAMAGE.
26b6e66be2SVincenzo Maffione  *
27b6e66be2SVincenzo Maffione  * $FreeBSD$
28b6e66be2SVincenzo Maffione  */
29b6e66be2SVincenzo Maffione 
30b6e66be2SVincenzo Maffione /*
31b6e66be2SVincenzo Maffione  * common headers
32b6e66be2SVincenzo Maffione  */
33b6e66be2SVincenzo Maffione #if defined(__FreeBSD__)
34b6e66be2SVincenzo Maffione #include <sys/cdefs.h>
35b6e66be2SVincenzo Maffione #include <sys/param.h>
36b6e66be2SVincenzo Maffione #include <sys/kernel.h>
37b6e66be2SVincenzo Maffione #include <sys/types.h>
38b6e66be2SVincenzo Maffione #include <sys/selinfo.h>
39b6e66be2SVincenzo Maffione #include <sys/socket.h>
40b6e66be2SVincenzo Maffione #include <net/if.h>
41b6e66be2SVincenzo Maffione #include <net/if_var.h>
42b6e66be2SVincenzo Maffione #include <machine/bus.h>
43b6e66be2SVincenzo Maffione 
44b6e66be2SVincenzo Maffione #define usleep_range(_1, _2) \
45b6e66be2SVincenzo Maffione         pause_sbt("sync-kloop-sleep", SBT_1US * _1, SBT_1US * 1, C_ABSOLUTE)
46b6e66be2SVincenzo Maffione 
47b6e66be2SVincenzo Maffione #elif defined(linux)
48b6e66be2SVincenzo Maffione #include <bsd_glue.h>
49b6e66be2SVincenzo Maffione #include <linux/file.h>
50b6e66be2SVincenzo Maffione #include <linux/eventfd.h>
51b6e66be2SVincenzo Maffione #endif
52b6e66be2SVincenzo Maffione 
53b6e66be2SVincenzo Maffione #include <net/netmap.h>
54b6e66be2SVincenzo Maffione #include <dev/netmap/netmap_kern.h>
55b6e66be2SVincenzo Maffione #include <net/netmap_virt.h>
56b6e66be2SVincenzo Maffione #include <dev/netmap/netmap_mem2.h>
57b6e66be2SVincenzo Maffione 
58b6e66be2SVincenzo Maffione /* Support for eventfd-based notifications. */
59b6e66be2SVincenzo Maffione #if defined(linux)
60b6e66be2SVincenzo Maffione #define SYNC_KLOOP_POLL
61b6e66be2SVincenzo Maffione #endif
62b6e66be2SVincenzo Maffione 
63b6e66be2SVincenzo Maffione /* Write kring pointers (hwcur, hwtail) to the CSB.
64b6e66be2SVincenzo Maffione  * This routine is coupled with ptnetmap_guest_read_kring_csb(). */
65b6e66be2SVincenzo Maffione static inline void
66b6e66be2SVincenzo Maffione sync_kloop_kernel_write(struct nm_csb_ktoa __user *ptr, uint32_t hwcur,
67b6e66be2SVincenzo Maffione 			   uint32_t hwtail)
68b6e66be2SVincenzo Maffione {
69f79ba6d7SVincenzo Maffione 	/* Issue a first store-store barrier to make sure writes to the
70f79ba6d7SVincenzo Maffione 	 * netmap ring do not overcome updates on ktoa->hwcur and ktoa->hwtail. */
71f79ba6d7SVincenzo Maffione 	nm_stst_barrier();
72f79ba6d7SVincenzo Maffione 
73b6e66be2SVincenzo Maffione 	/*
74f79ba6d7SVincenzo Maffione 	 * The same scheme used in nm_sync_kloop_appl_write() applies here.
75b6e66be2SVincenzo Maffione 	 * We allow the application to read a value of hwcur more recent than the value
76b6e66be2SVincenzo Maffione 	 * of hwtail, since this would anyway result in a consistent view of the
77b6e66be2SVincenzo Maffione 	 * ring state (and hwcur can never wraparound hwtail, since hwcur must be
78b6e66be2SVincenzo Maffione 	 * behind head).
79b6e66be2SVincenzo Maffione 	 *
80b6e66be2SVincenzo Maffione 	 * The following memory barrier scheme is used to make this happen:
81b6e66be2SVincenzo Maffione 	 *
82b6e66be2SVincenzo Maffione 	 *          Application            Kernel
83b6e66be2SVincenzo Maffione 	 *
84b6e66be2SVincenzo Maffione 	 *          STORE(hwcur)           LOAD(hwtail)
85f79ba6d7SVincenzo Maffione 	 *          wmb() <------------->  rmb()
86b6e66be2SVincenzo Maffione 	 *          STORE(hwtail)          LOAD(hwcur)
87b6e66be2SVincenzo Maffione 	 */
88b6e66be2SVincenzo Maffione 	CSB_WRITE(ptr, hwcur, hwcur);
89b6e66be2SVincenzo Maffione 	nm_stst_barrier();
90b6e66be2SVincenzo Maffione 	CSB_WRITE(ptr, hwtail, hwtail);
91b6e66be2SVincenzo Maffione }
92b6e66be2SVincenzo Maffione 
93b6e66be2SVincenzo Maffione /* Read kring pointers (head, cur, sync_flags) from the CSB.
94b6e66be2SVincenzo Maffione  * This routine is coupled with ptnetmap_guest_write_kring_csb(). */
95b6e66be2SVincenzo Maffione static inline void
96b6e66be2SVincenzo Maffione sync_kloop_kernel_read(struct nm_csb_atok __user *ptr,
97b6e66be2SVincenzo Maffione 			  struct netmap_ring *shadow_ring,
98b6e66be2SVincenzo Maffione 			  uint32_t num_slots)
99b6e66be2SVincenzo Maffione {
100b6e66be2SVincenzo Maffione 	/*
101b6e66be2SVincenzo Maffione 	 * We place a memory barrier to make sure that the update of head never
102b6e66be2SVincenzo Maffione 	 * overtakes the update of cur.
103f79ba6d7SVincenzo Maffione 	 * (see explanation in sync_kloop_kernel_write).
104b6e66be2SVincenzo Maffione 	 */
105b6e66be2SVincenzo Maffione 	CSB_READ(ptr, head, shadow_ring->head);
106f79ba6d7SVincenzo Maffione 	nm_ldld_barrier();
107b6e66be2SVincenzo Maffione 	CSB_READ(ptr, cur, shadow_ring->cur);
108b6e66be2SVincenzo Maffione 	CSB_READ(ptr, sync_flags, shadow_ring->flags);
109f79ba6d7SVincenzo Maffione 
110f79ba6d7SVincenzo Maffione 	/* Make sure that loads from atok->head and atok->cur are not delayed
111f79ba6d7SVincenzo Maffione 	 * after the loads from the netmap ring. */
112f79ba6d7SVincenzo Maffione 	nm_ldld_barrier();
113b6e66be2SVincenzo Maffione }
114b6e66be2SVincenzo Maffione 
115b6e66be2SVincenzo Maffione /* Enable or disable application --> kernel kicks. */
116b6e66be2SVincenzo Maffione static inline void
117b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(struct nm_csb_ktoa __user *csb_ktoa, uint32_t val)
118b6e66be2SVincenzo Maffione {
119b6e66be2SVincenzo Maffione 	CSB_WRITE(csb_ktoa, kern_need_kick, val);
120b6e66be2SVincenzo Maffione }
121b6e66be2SVincenzo Maffione 
122dde885deSVincenzo Maffione #ifdef SYNC_KLOOP_POLL
123b6e66be2SVincenzo Maffione /* Are application interrupt enabled or disabled? */
124b6e66be2SVincenzo Maffione static inline uint32_t
125b6e66be2SVincenzo Maffione csb_atok_intr_enabled(struct nm_csb_atok __user *csb_atok)
126b6e66be2SVincenzo Maffione {
127b6e66be2SVincenzo Maffione 	uint32_t v;
128b6e66be2SVincenzo Maffione 
129b6e66be2SVincenzo Maffione 	CSB_READ(csb_atok, appl_need_kick, v);
130b6e66be2SVincenzo Maffione 
131b6e66be2SVincenzo Maffione 	return v;
132b6e66be2SVincenzo Maffione }
133dde885deSVincenzo Maffione #endif  /* SYNC_KLOOP_POLL */
134b6e66be2SVincenzo Maffione 
135b6e66be2SVincenzo Maffione static inline void
136b6e66be2SVincenzo Maffione sync_kloop_kring_dump(const char *title, const struct netmap_kring *kring)
137b6e66be2SVincenzo Maffione {
138f79ba6d7SVincenzo Maffione 	nm_prinf("%s, kring %s, hwcur %d, rhead %d, "
139f79ba6d7SVincenzo Maffione 		"rcur %d, rtail %d, hwtail %d",
140f79ba6d7SVincenzo Maffione 		title, kring->name, kring->nr_hwcur, kring->rhead,
141f79ba6d7SVincenzo Maffione 		kring->rcur, kring->rtail, kring->nr_hwtail);
142b6e66be2SVincenzo Maffione }
143b6e66be2SVincenzo Maffione 
144b6e66be2SVincenzo Maffione struct sync_kloop_ring_args {
145b6e66be2SVincenzo Maffione 	struct netmap_kring *kring;
146b6e66be2SVincenzo Maffione 	struct nm_csb_atok *csb_atok;
147b6e66be2SVincenzo Maffione 	struct nm_csb_ktoa *csb_ktoa;
148b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
149b6e66be2SVincenzo Maffione 	struct eventfd_ctx *irq_ctx;
150b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
151b6e66be2SVincenzo Maffione };
152b6e66be2SVincenzo Maffione 
153b6e66be2SVincenzo Maffione static void
154b6e66be2SVincenzo Maffione netmap_sync_kloop_tx_ring(const struct sync_kloop_ring_args *a)
155b6e66be2SVincenzo Maffione {
156b6e66be2SVincenzo Maffione 	struct netmap_kring *kring = a->kring;
157b6e66be2SVincenzo Maffione 	struct nm_csb_atok *csb_atok = a->csb_atok;
158b6e66be2SVincenzo Maffione 	struct nm_csb_ktoa *csb_ktoa = a->csb_ktoa;
159b6e66be2SVincenzo Maffione 	struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */
160b6e66be2SVincenzo Maffione 	bool more_txspace = false;
161b6e66be2SVincenzo Maffione 	uint32_t num_slots;
162b6e66be2SVincenzo Maffione 	int batch;
163b6e66be2SVincenzo Maffione 
164b6e66be2SVincenzo Maffione 	num_slots = kring->nkr_num_slots;
165b6e66be2SVincenzo Maffione 
166b6e66be2SVincenzo Maffione 	/* Disable application --> kernel notifications. */
167b6e66be2SVincenzo Maffione 	csb_ktoa_kick_enable(csb_ktoa, 0);
168b6e66be2SVincenzo Maffione 	/* Copy the application kring pointers from the CSB */
169b6e66be2SVincenzo Maffione 	sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
170b6e66be2SVincenzo Maffione 
171b6e66be2SVincenzo Maffione 	for (;;) {
172b6e66be2SVincenzo Maffione 		batch = shadow_ring.head - kring->nr_hwcur;
173b6e66be2SVincenzo Maffione 		if (batch < 0)
174b6e66be2SVincenzo Maffione 			batch += num_slots;
175b6e66be2SVincenzo Maffione 
176b6e66be2SVincenzo Maffione #ifdef PTN_TX_BATCH_LIM
177b6e66be2SVincenzo Maffione 		if (batch > PTN_TX_BATCH_LIM(num_slots)) {
178b6e66be2SVincenzo Maffione 			/* If application moves ahead too fast, let's cut the move so
179b6e66be2SVincenzo Maffione 			 * that we don't exceed our batch limit. */
180b6e66be2SVincenzo Maffione 			uint32_t head_lim = kring->nr_hwcur + PTN_TX_BATCH_LIM(num_slots);
181b6e66be2SVincenzo Maffione 
182b6e66be2SVincenzo Maffione 			if (head_lim >= num_slots)
183b6e66be2SVincenzo Maffione 				head_lim -= num_slots;
184b6e66be2SVincenzo Maffione 			nm_prdis(1, "batch: %d head: %d head_lim: %d", batch, shadow_ring.head,
185b6e66be2SVincenzo Maffione 					head_lim);
186b6e66be2SVincenzo Maffione 			shadow_ring.head = head_lim;
187b6e66be2SVincenzo Maffione 			batch = PTN_TX_BATCH_LIM(num_slots);
188b6e66be2SVincenzo Maffione 		}
189b6e66be2SVincenzo Maffione #endif /* PTN_TX_BATCH_LIM */
190b6e66be2SVincenzo Maffione 
191b6e66be2SVincenzo Maffione 		if (nm_kr_txspace(kring) <= (num_slots >> 1)) {
192b6e66be2SVincenzo Maffione 			shadow_ring.flags |= NAF_FORCE_RECLAIM;
193b6e66be2SVincenzo Maffione 		}
194b6e66be2SVincenzo Maffione 
195b6e66be2SVincenzo Maffione 		/* Netmap prologue */
196b6e66be2SVincenzo Maffione 		shadow_ring.tail = kring->rtail;
197b6e66be2SVincenzo Maffione 		if (unlikely(nm_txsync_prologue(kring, &shadow_ring) >= num_slots)) {
198b6e66be2SVincenzo Maffione 			/* Reinit ring and enable notifications. */
199b6e66be2SVincenzo Maffione 			netmap_ring_reinit(kring);
200b6e66be2SVincenzo Maffione 			csb_ktoa_kick_enable(csb_ktoa, 1);
201b6e66be2SVincenzo Maffione 			break;
202b6e66be2SVincenzo Maffione 		}
203b6e66be2SVincenzo Maffione 
204b6e66be2SVincenzo Maffione 		if (unlikely(netmap_debug & NM_DEBUG_TXSYNC)) {
205b6e66be2SVincenzo Maffione 			sync_kloop_kring_dump("pre txsync", kring);
206b6e66be2SVincenzo Maffione 		}
207b6e66be2SVincenzo Maffione 
208b6e66be2SVincenzo Maffione 		if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
209b6e66be2SVincenzo Maffione 			/* Reenable notifications. */
210b6e66be2SVincenzo Maffione 			csb_ktoa_kick_enable(csb_ktoa, 1);
211b6e66be2SVincenzo Maffione 			nm_prerr("txsync() failed");
212b6e66be2SVincenzo Maffione 			break;
213b6e66be2SVincenzo Maffione 		}
214b6e66be2SVincenzo Maffione 
215b6e66be2SVincenzo Maffione 		/*
216b6e66be2SVincenzo Maffione 		 * Finalize
217b6e66be2SVincenzo Maffione 		 * Copy kernel hwcur and hwtail into the CSB for the application sync(), and
218b6e66be2SVincenzo Maffione 		 * do the nm_sync_finalize.
219b6e66be2SVincenzo Maffione 		 */
220b6e66be2SVincenzo Maffione 		sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur,
221b6e66be2SVincenzo Maffione 				kring->nr_hwtail);
222b6e66be2SVincenzo Maffione 		if (kring->rtail != kring->nr_hwtail) {
223b6e66be2SVincenzo Maffione 			/* Some more room available in the parent adapter. */
224b6e66be2SVincenzo Maffione 			kring->rtail = kring->nr_hwtail;
225b6e66be2SVincenzo Maffione 			more_txspace = true;
226b6e66be2SVincenzo Maffione 		}
227b6e66be2SVincenzo Maffione 
228b6e66be2SVincenzo Maffione 		if (unlikely(netmap_debug & NM_DEBUG_TXSYNC)) {
229b6e66be2SVincenzo Maffione 			sync_kloop_kring_dump("post txsync", kring);
230b6e66be2SVincenzo Maffione 		}
231b6e66be2SVincenzo Maffione 
232b6e66be2SVincenzo Maffione 		/* Interrupt the application if needed. */
233b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
234b6e66be2SVincenzo Maffione 		if (a->irq_ctx && more_txspace && csb_atok_intr_enabled(csb_atok)) {
235b6e66be2SVincenzo Maffione 			/* Disable application kick to avoid sending unnecessary kicks */
236b6e66be2SVincenzo Maffione 			eventfd_signal(a->irq_ctx, 1);
237b6e66be2SVincenzo Maffione 			more_txspace = false;
238b6e66be2SVincenzo Maffione 		}
239b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
240b6e66be2SVincenzo Maffione 
241b6e66be2SVincenzo Maffione 		/* Read CSB to see if there is more work to do. */
242b6e66be2SVincenzo Maffione 		sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
243b6e66be2SVincenzo Maffione 		if (shadow_ring.head == kring->rhead) {
244b6e66be2SVincenzo Maffione 			/*
245b6e66be2SVincenzo Maffione 			 * No more packets to transmit. We enable notifications and
246b6e66be2SVincenzo Maffione 			 * go to sleep, waiting for a kick from the application when new
247b6e66be2SVincenzo Maffione 			 * new slots are ready for transmission.
248b6e66be2SVincenzo Maffione 			 */
249b6e66be2SVincenzo Maffione 			/* Reenable notifications. */
250b6e66be2SVincenzo Maffione 			csb_ktoa_kick_enable(csb_ktoa, 1);
251f79ba6d7SVincenzo Maffione 			/* Double check, with store-load memory barrier. */
252f79ba6d7SVincenzo Maffione 			nm_stld_barrier();
253b6e66be2SVincenzo Maffione 			sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
254b6e66be2SVincenzo Maffione 			if (shadow_ring.head != kring->rhead) {
255b6e66be2SVincenzo Maffione 				/* We won the race condition, there are more packets to
256b6e66be2SVincenzo Maffione 				 * transmit. Disable notifications and do another cycle */
257b6e66be2SVincenzo Maffione 				csb_ktoa_kick_enable(csb_ktoa, 0);
258b6e66be2SVincenzo Maffione 				continue;
259b6e66be2SVincenzo Maffione 			}
260b6e66be2SVincenzo Maffione 			break;
261b6e66be2SVincenzo Maffione 		}
262b6e66be2SVincenzo Maffione 
263b6e66be2SVincenzo Maffione 		if (nm_kr_txempty(kring)) {
264b6e66be2SVincenzo Maffione 			/* No more available TX slots. We stop waiting for a notification
265b6e66be2SVincenzo Maffione 			 * from the backend (netmap_tx_irq). */
266b6e66be2SVincenzo Maffione 			nm_prdis(1, "TX ring");
267b6e66be2SVincenzo Maffione 			break;
268b6e66be2SVincenzo Maffione 		}
269b6e66be2SVincenzo Maffione 	}
270b6e66be2SVincenzo Maffione 
271b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
272b6e66be2SVincenzo Maffione 	if (a->irq_ctx && more_txspace && csb_atok_intr_enabled(csb_atok)) {
273b6e66be2SVincenzo Maffione 		eventfd_signal(a->irq_ctx, 1);
274b6e66be2SVincenzo Maffione 	}
275b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
276b6e66be2SVincenzo Maffione }
277b6e66be2SVincenzo Maffione 
278b6e66be2SVincenzo Maffione /* RX cycle without receive any packets */
279b6e66be2SVincenzo Maffione #define SYNC_LOOP_RX_DRY_CYCLES_MAX	2
280b6e66be2SVincenzo Maffione 
281b6e66be2SVincenzo Maffione static inline int
282b6e66be2SVincenzo Maffione sync_kloop_norxslots(struct netmap_kring *kring, uint32_t g_head)
283b6e66be2SVincenzo Maffione {
284b6e66be2SVincenzo Maffione 	return (NM_ACCESS_ONCE(kring->nr_hwtail) == nm_prev(g_head,
285b6e66be2SVincenzo Maffione 				kring->nkr_num_slots - 1));
286b6e66be2SVincenzo Maffione }
287b6e66be2SVincenzo Maffione 
288b6e66be2SVincenzo Maffione static void
289b6e66be2SVincenzo Maffione netmap_sync_kloop_rx_ring(const struct sync_kloop_ring_args *a)
290b6e66be2SVincenzo Maffione {
291b6e66be2SVincenzo Maffione 
292b6e66be2SVincenzo Maffione 	struct netmap_kring *kring = a->kring;
293b6e66be2SVincenzo Maffione 	struct nm_csb_atok *csb_atok = a->csb_atok;
294b6e66be2SVincenzo Maffione 	struct nm_csb_ktoa *csb_ktoa = a->csb_ktoa;
295b6e66be2SVincenzo Maffione 	struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */
296b6e66be2SVincenzo Maffione 	int dry_cycles = 0;
297b6e66be2SVincenzo Maffione 	bool some_recvd = false;
298b6e66be2SVincenzo Maffione 	uint32_t num_slots;
299b6e66be2SVincenzo Maffione 
300b6e66be2SVincenzo Maffione 	num_slots = kring->nkr_num_slots;
301b6e66be2SVincenzo Maffione 
302b6e66be2SVincenzo Maffione 	/* Get RX csb_atok and csb_ktoa pointers from the CSB. */
303b6e66be2SVincenzo Maffione 	num_slots = kring->nkr_num_slots;
304b6e66be2SVincenzo Maffione 
305b6e66be2SVincenzo Maffione 	/* Disable notifications. */
306b6e66be2SVincenzo Maffione 	csb_ktoa_kick_enable(csb_ktoa, 0);
307b6e66be2SVincenzo Maffione 	/* Copy the application kring pointers from the CSB */
308b6e66be2SVincenzo Maffione 	sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
309b6e66be2SVincenzo Maffione 
310b6e66be2SVincenzo Maffione 	for (;;) {
311b6e66be2SVincenzo Maffione 		uint32_t hwtail;
312b6e66be2SVincenzo Maffione 
313b6e66be2SVincenzo Maffione 		/* Netmap prologue */
314b6e66be2SVincenzo Maffione 		shadow_ring.tail = kring->rtail;
315b6e66be2SVincenzo Maffione 		if (unlikely(nm_rxsync_prologue(kring, &shadow_ring) >= num_slots)) {
316b6e66be2SVincenzo Maffione 			/* Reinit ring and enable notifications. */
317b6e66be2SVincenzo Maffione 			netmap_ring_reinit(kring);
318b6e66be2SVincenzo Maffione 			csb_ktoa_kick_enable(csb_ktoa, 1);
319b6e66be2SVincenzo Maffione 			break;
320b6e66be2SVincenzo Maffione 		}
321b6e66be2SVincenzo Maffione 
322b6e66be2SVincenzo Maffione 		if (unlikely(netmap_debug & NM_DEBUG_RXSYNC)) {
323b6e66be2SVincenzo Maffione 			sync_kloop_kring_dump("pre rxsync", kring);
324b6e66be2SVincenzo Maffione 		}
325b6e66be2SVincenzo Maffione 
326b6e66be2SVincenzo Maffione 		if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
327b6e66be2SVincenzo Maffione 			/* Reenable notifications. */
328b6e66be2SVincenzo Maffione 			csb_ktoa_kick_enable(csb_ktoa, 1);
329b6e66be2SVincenzo Maffione 			nm_prerr("rxsync() failed");
330b6e66be2SVincenzo Maffione 			break;
331b6e66be2SVincenzo Maffione 		}
332b6e66be2SVincenzo Maffione 
333b6e66be2SVincenzo Maffione 		/*
334b6e66be2SVincenzo Maffione 		 * Finalize
335b6e66be2SVincenzo Maffione 		 * Copy kernel hwcur and hwtail into the CSB for the application sync()
336b6e66be2SVincenzo Maffione 		 */
337b6e66be2SVincenzo Maffione 		hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
338b6e66be2SVincenzo Maffione 		sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur, hwtail);
339b6e66be2SVincenzo Maffione 		if (kring->rtail != hwtail) {
340b6e66be2SVincenzo Maffione 			kring->rtail = hwtail;
341b6e66be2SVincenzo Maffione 			some_recvd = true;
342b6e66be2SVincenzo Maffione 			dry_cycles = 0;
343b6e66be2SVincenzo Maffione 		} else {
344b6e66be2SVincenzo Maffione 			dry_cycles++;
345b6e66be2SVincenzo Maffione 		}
346b6e66be2SVincenzo Maffione 
347b6e66be2SVincenzo Maffione 		if (unlikely(netmap_debug & NM_DEBUG_RXSYNC)) {
348b6e66be2SVincenzo Maffione 			sync_kloop_kring_dump("post rxsync", kring);
349b6e66be2SVincenzo Maffione 		}
350b6e66be2SVincenzo Maffione 
351b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
352b6e66be2SVincenzo Maffione 		/* Interrupt the application if needed. */
353b6e66be2SVincenzo Maffione 		if (a->irq_ctx && some_recvd && csb_atok_intr_enabled(csb_atok)) {
354b6e66be2SVincenzo Maffione 			/* Disable application kick to avoid sending unnecessary kicks */
355b6e66be2SVincenzo Maffione 			eventfd_signal(a->irq_ctx, 1);
356b6e66be2SVincenzo Maffione 			some_recvd = false;
357b6e66be2SVincenzo Maffione 		}
358b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
359b6e66be2SVincenzo Maffione 
360b6e66be2SVincenzo Maffione 		/* Read CSB to see if there is more work to do. */
361b6e66be2SVincenzo Maffione 		sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
362b6e66be2SVincenzo Maffione 		if (sync_kloop_norxslots(kring, shadow_ring.head)) {
363b6e66be2SVincenzo Maffione 			/*
364b6e66be2SVincenzo Maffione 			 * No more slots available for reception. We enable notification and
365b6e66be2SVincenzo Maffione 			 * go to sleep, waiting for a kick from the application when new receive
366b6e66be2SVincenzo Maffione 			 * slots are available.
367b6e66be2SVincenzo Maffione 			 */
368b6e66be2SVincenzo Maffione 			/* Reenable notifications. */
369b6e66be2SVincenzo Maffione 			csb_ktoa_kick_enable(csb_ktoa, 1);
370f79ba6d7SVincenzo Maffione 			/* Double check, with store-load memory barrier. */
371f79ba6d7SVincenzo Maffione 			nm_stld_barrier();
372b6e66be2SVincenzo Maffione 			sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
373b6e66be2SVincenzo Maffione 			if (!sync_kloop_norxslots(kring, shadow_ring.head)) {
374b6e66be2SVincenzo Maffione 				/* We won the race condition, more slots are available. Disable
375b6e66be2SVincenzo Maffione 				 * notifications and do another cycle. */
376b6e66be2SVincenzo Maffione 				csb_ktoa_kick_enable(csb_ktoa, 0);
377b6e66be2SVincenzo Maffione 				continue;
378b6e66be2SVincenzo Maffione 			}
379b6e66be2SVincenzo Maffione 			break;
380b6e66be2SVincenzo Maffione 		}
381b6e66be2SVincenzo Maffione 
382b6e66be2SVincenzo Maffione 		hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
383b6e66be2SVincenzo Maffione 		if (unlikely(hwtail == kring->rhead ||
384b6e66be2SVincenzo Maffione 					dry_cycles >= SYNC_LOOP_RX_DRY_CYCLES_MAX)) {
385b6e66be2SVincenzo Maffione 			/* No more packets to be read from the backend. We stop and
386b6e66be2SVincenzo Maffione 			 * wait for a notification from the backend (netmap_rx_irq). */
387b6e66be2SVincenzo Maffione 			nm_prdis(1, "nr_hwtail: %d rhead: %d dry_cycles: %d",
388b6e66be2SVincenzo Maffione 					hwtail, kring->rhead, dry_cycles);
389b6e66be2SVincenzo Maffione 			break;
390b6e66be2SVincenzo Maffione 		}
391b6e66be2SVincenzo Maffione 	}
392b6e66be2SVincenzo Maffione 
393b6e66be2SVincenzo Maffione 	nm_kr_put(kring);
394b6e66be2SVincenzo Maffione 
395b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
396b6e66be2SVincenzo Maffione 	/* Interrupt the application if needed. */
397b6e66be2SVincenzo Maffione 	if (a->irq_ctx && some_recvd && csb_atok_intr_enabled(csb_atok)) {
398b6e66be2SVincenzo Maffione 		eventfd_signal(a->irq_ctx, 1);
399b6e66be2SVincenzo Maffione 	}
400b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
401b6e66be2SVincenzo Maffione }
402b6e66be2SVincenzo Maffione 
403b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
404b6e66be2SVincenzo Maffione struct sync_kloop_poll_entry {
405b6e66be2SVincenzo Maffione 	/* Support for receiving notifications from
406b6e66be2SVincenzo Maffione 	 * a netmap ring or from the application. */
407b6e66be2SVincenzo Maffione 	struct file *filp;
408b6e66be2SVincenzo Maffione 	wait_queue_t wait;
409b6e66be2SVincenzo Maffione 	wait_queue_head_t *wqh;
410b6e66be2SVincenzo Maffione 
411b6e66be2SVincenzo Maffione 	/* Support for sending notifications to the application. */
412b6e66be2SVincenzo Maffione 	struct eventfd_ctx *irq_ctx;
413b6e66be2SVincenzo Maffione 	struct file *irq_filp;
414b6e66be2SVincenzo Maffione };
415b6e66be2SVincenzo Maffione 
416b6e66be2SVincenzo Maffione struct sync_kloop_poll_ctx {
417b6e66be2SVincenzo Maffione 	poll_table wait_table;
418b6e66be2SVincenzo Maffione 	unsigned int next_entry;
419b6e66be2SVincenzo Maffione 	unsigned int num_entries;
420b6e66be2SVincenzo Maffione 	struct sync_kloop_poll_entry entries[0];
421b6e66be2SVincenzo Maffione };
422b6e66be2SVincenzo Maffione 
423b6e66be2SVincenzo Maffione static void
424b6e66be2SVincenzo Maffione sync_kloop_poll_table_queue_proc(struct file *file, wait_queue_head_t *wqh,
425b6e66be2SVincenzo Maffione 				poll_table *pt)
426b6e66be2SVincenzo Maffione {
427b6e66be2SVincenzo Maffione 	struct sync_kloop_poll_ctx *poll_ctx =
428b6e66be2SVincenzo Maffione 		container_of(pt, struct sync_kloop_poll_ctx, wait_table);
429b6e66be2SVincenzo Maffione 	struct sync_kloop_poll_entry *entry = poll_ctx->entries +
430b6e66be2SVincenzo Maffione 						poll_ctx->next_entry;
431b6e66be2SVincenzo Maffione 
432b6e66be2SVincenzo Maffione 	BUG_ON(poll_ctx->next_entry >= poll_ctx->num_entries);
433b6e66be2SVincenzo Maffione 	entry->wqh = wqh;
434b6e66be2SVincenzo Maffione 	entry->filp = file;
435b6e66be2SVincenzo Maffione 	/* Use the default wake up function. */
436b6e66be2SVincenzo Maffione 	init_waitqueue_entry(&entry->wait, current);
437b6e66be2SVincenzo Maffione 	add_wait_queue(wqh, &entry->wait);
438b6e66be2SVincenzo Maffione 	poll_ctx->next_entry++;
439b6e66be2SVincenzo Maffione }
440b6e66be2SVincenzo Maffione #endif  /* SYNC_KLOOP_POLL */
441b6e66be2SVincenzo Maffione 
442b6e66be2SVincenzo Maffione int
443b6e66be2SVincenzo Maffione netmap_sync_kloop(struct netmap_priv_d *priv, struct nmreq_header *hdr)
444b6e66be2SVincenzo Maffione {
445b6e66be2SVincenzo Maffione 	struct nmreq_sync_kloop_start *req =
446b6e66be2SVincenzo Maffione 		(struct nmreq_sync_kloop_start *)(uintptr_t)hdr->nr_body;
447b6e66be2SVincenzo Maffione 	struct nmreq_opt_sync_kloop_eventfds *eventfds_opt = NULL;
448b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
449b6e66be2SVincenzo Maffione 	struct sync_kloop_poll_ctx *poll_ctx = NULL;
450b6e66be2SVincenzo Maffione #endif  /* SYNC_KLOOP_POLL */
451b6e66be2SVincenzo Maffione 	int num_rx_rings, num_tx_rings, num_rings;
452f79ba6d7SVincenzo Maffione 	struct sync_kloop_ring_args *args = NULL;
453b6e66be2SVincenzo Maffione 	uint32_t sleep_us = req->sleep_us;
454b6e66be2SVincenzo Maffione 	struct nm_csb_atok* csb_atok_base;
455b6e66be2SVincenzo Maffione 	struct nm_csb_ktoa* csb_ktoa_base;
456b6e66be2SVincenzo Maffione 	struct netmap_adapter *na;
457b6e66be2SVincenzo Maffione 	struct nmreq_option *opt;
458b6e66be2SVincenzo Maffione 	int err = 0;
459b6e66be2SVincenzo Maffione 	int i;
460b6e66be2SVincenzo Maffione 
461b6e66be2SVincenzo Maffione 	if (sleep_us > 1000000) {
462b6e66be2SVincenzo Maffione 		/* We do not accept sleeping for more than a second. */
463b6e66be2SVincenzo Maffione 		return EINVAL;
464b6e66be2SVincenzo Maffione 	}
465b6e66be2SVincenzo Maffione 
466b6e66be2SVincenzo Maffione 	if (priv->np_nifp == NULL) {
467b6e66be2SVincenzo Maffione 		return ENXIO;
468b6e66be2SVincenzo Maffione 	}
469b6e66be2SVincenzo Maffione 	mb(); /* make sure following reads are not from cache */
470b6e66be2SVincenzo Maffione 
471b6e66be2SVincenzo Maffione 	na = priv->np_na;
472b6e66be2SVincenzo Maffione 	if (!nm_netmap_on(na)) {
473b6e66be2SVincenzo Maffione 		return ENXIO;
474b6e66be2SVincenzo Maffione 	}
475b6e66be2SVincenzo Maffione 
476b6e66be2SVincenzo Maffione 	NMG_LOCK();
477b6e66be2SVincenzo Maffione 	/* Make sure the application is working in CSB mode. */
478b6e66be2SVincenzo Maffione 	if (!priv->np_csb_atok_base || !priv->np_csb_ktoa_base) {
479b6e66be2SVincenzo Maffione 		NMG_UNLOCK();
480b6e66be2SVincenzo Maffione 		nm_prerr("sync-kloop on %s requires "
481b6e66be2SVincenzo Maffione 				"NETMAP_REQ_OPT_CSB option", na->name);
482b6e66be2SVincenzo Maffione 		return EINVAL;
483b6e66be2SVincenzo Maffione 	}
484b6e66be2SVincenzo Maffione 
485b6e66be2SVincenzo Maffione 	csb_atok_base = priv->np_csb_atok_base;
486b6e66be2SVincenzo Maffione 	csb_ktoa_base = priv->np_csb_ktoa_base;
487b6e66be2SVincenzo Maffione 
488b6e66be2SVincenzo Maffione 	/* Make sure that no kloop is currently running. */
489b6e66be2SVincenzo Maffione 	if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
490b6e66be2SVincenzo Maffione 		err = EBUSY;
491b6e66be2SVincenzo Maffione 	}
492b6e66be2SVincenzo Maffione 	priv->np_kloop_state |= NM_SYNC_KLOOP_RUNNING;
493b6e66be2SVincenzo Maffione 	NMG_UNLOCK();
494b6e66be2SVincenzo Maffione 	if (err) {
495b6e66be2SVincenzo Maffione 		return err;
496b6e66be2SVincenzo Maffione 	}
497b6e66be2SVincenzo Maffione 
498b6e66be2SVincenzo Maffione 	num_rx_rings = priv->np_qlast[NR_RX] - priv->np_qfirst[NR_RX];
499b6e66be2SVincenzo Maffione 	num_tx_rings = priv->np_qlast[NR_TX] - priv->np_qfirst[NR_TX];
500b6e66be2SVincenzo Maffione 	num_rings = num_tx_rings + num_rx_rings;
501b6e66be2SVincenzo Maffione 
502f79ba6d7SVincenzo Maffione 	args = nm_os_malloc(num_rings * sizeof(args[0]));
503f79ba6d7SVincenzo Maffione 	if (!args) {
504f79ba6d7SVincenzo Maffione 		err = ENOMEM;
505f79ba6d7SVincenzo Maffione 		goto out;
506f79ba6d7SVincenzo Maffione 	}
507f79ba6d7SVincenzo Maffione 
508b6e66be2SVincenzo Maffione 	/* Validate notification options. */
509b6e66be2SVincenzo Maffione 	opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options,
510b6e66be2SVincenzo Maffione 				NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS);
511b6e66be2SVincenzo Maffione 	if (opt != NULL) {
512b6e66be2SVincenzo Maffione 		err = nmreq_checkduplicate(opt);
513b6e66be2SVincenzo Maffione 		if (err) {
514b6e66be2SVincenzo Maffione 			opt->nro_status = err;
515b6e66be2SVincenzo Maffione 			goto out;
516b6e66be2SVincenzo Maffione 		}
517b6e66be2SVincenzo Maffione 		if (opt->nro_size != sizeof(*eventfds_opt) +
518b6e66be2SVincenzo Maffione 			sizeof(eventfds_opt->eventfds[0]) * num_rings) {
519b6e66be2SVincenzo Maffione 			/* Option size not consistent with the number of
520b6e66be2SVincenzo Maffione 			 * entries. */
521b6e66be2SVincenzo Maffione 			opt->nro_status = err = EINVAL;
522b6e66be2SVincenzo Maffione 			goto out;
523b6e66be2SVincenzo Maffione 		}
524b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
525b6e66be2SVincenzo Maffione 		eventfds_opt = (struct nmreq_opt_sync_kloop_eventfds *)opt;
526b6e66be2SVincenzo Maffione 		opt->nro_status = 0;
527b6e66be2SVincenzo Maffione 		/* We need 2 poll entries for TX and RX notifications coming
528b6e66be2SVincenzo Maffione 		 * from the netmap adapter, plus one entries per ring for the
529b6e66be2SVincenzo Maffione 		 * notifications coming from the application. */
530b6e66be2SVincenzo Maffione 		poll_ctx = nm_os_malloc(sizeof(*poll_ctx) +
531b6e66be2SVincenzo Maffione 				(2 + num_rings) * sizeof(poll_ctx->entries[0]));
532b6e66be2SVincenzo Maffione 		init_poll_funcptr(&poll_ctx->wait_table,
533b6e66be2SVincenzo Maffione 					sync_kloop_poll_table_queue_proc);
534b6e66be2SVincenzo Maffione 		poll_ctx->num_entries = 2 + num_rings;
535b6e66be2SVincenzo Maffione 		poll_ctx->next_entry = 0;
536b6e66be2SVincenzo Maffione 		/* Poll for notifications coming from the applications through
537b6e66be2SVincenzo Maffione 		 * eventfds . */
538b6e66be2SVincenzo Maffione 		for (i = 0; i < num_rings; i++) {
539b6e66be2SVincenzo Maffione 			struct eventfd_ctx *irq;
540b6e66be2SVincenzo Maffione 			struct file *filp;
541b6e66be2SVincenzo Maffione 			unsigned long mask;
542b6e66be2SVincenzo Maffione 
543b6e66be2SVincenzo Maffione 			filp = eventfd_fget(eventfds_opt->eventfds[i].ioeventfd);
544b6e66be2SVincenzo Maffione 			if (IS_ERR(filp)) {
545b6e66be2SVincenzo Maffione 				err = PTR_ERR(filp);
546b6e66be2SVincenzo Maffione 				goto out;
547b6e66be2SVincenzo Maffione 			}
548b6e66be2SVincenzo Maffione 			mask = filp->f_op->poll(filp, &poll_ctx->wait_table);
549b6e66be2SVincenzo Maffione 			if (mask & POLLERR) {
550b6e66be2SVincenzo Maffione 				err = EINVAL;
551b6e66be2SVincenzo Maffione 				goto out;
552b6e66be2SVincenzo Maffione 			}
553b6e66be2SVincenzo Maffione 
554b6e66be2SVincenzo Maffione 			filp = eventfd_fget(eventfds_opt->eventfds[i].irqfd);
555b6e66be2SVincenzo Maffione 			if (IS_ERR(filp)) {
556b6e66be2SVincenzo Maffione 				err = PTR_ERR(filp);
557b6e66be2SVincenzo Maffione 				goto out;
558b6e66be2SVincenzo Maffione 			}
559b6e66be2SVincenzo Maffione 			poll_ctx->entries[i].irq_filp = filp;
560b6e66be2SVincenzo Maffione 			irq = eventfd_ctx_fileget(filp);
561b6e66be2SVincenzo Maffione 			if (IS_ERR(irq)) {
562b6e66be2SVincenzo Maffione 				err = PTR_ERR(irq);
563b6e66be2SVincenzo Maffione 				goto out;
564b6e66be2SVincenzo Maffione 			}
565b6e66be2SVincenzo Maffione 			poll_ctx->entries[i].irq_ctx = irq;
566b6e66be2SVincenzo Maffione 		}
567b6e66be2SVincenzo Maffione 		/* Poll for notifications coming from the netmap rings bound to
568b6e66be2SVincenzo Maffione 		 * this file descriptor. */
569b6e66be2SVincenzo Maffione 		{
570b6e66be2SVincenzo Maffione 			NMG_LOCK();
571*a56136a1SVincenzo Maffione 			poll_wait(priv->np_filp, priv->np_si[NR_TX],
572*a56136a1SVincenzo Maffione 			    &poll_ctx->wait_table);
573*a56136a1SVincenzo Maffione 			poll_wait(priv->np_filp, priv->np_si[NR_RX],
574*a56136a1SVincenzo Maffione 			    &poll_ctx->wait_table);
575b6e66be2SVincenzo Maffione 			NMG_UNLOCK();
576b6e66be2SVincenzo Maffione 		}
577b6e66be2SVincenzo Maffione #else   /* SYNC_KLOOP_POLL */
578b6e66be2SVincenzo Maffione 		opt->nro_status = EOPNOTSUPP;
579b6e66be2SVincenzo Maffione 		goto out;
580b6e66be2SVincenzo Maffione #endif  /* SYNC_KLOOP_POLL */
581b6e66be2SVincenzo Maffione 	}
582b6e66be2SVincenzo Maffione 
583f79ba6d7SVincenzo Maffione 	/* Prepare the arguments for netmap_sync_kloop_tx_ring()
584f79ba6d7SVincenzo Maffione 	 * and netmap_sync_kloop_rx_ring(). */
585f79ba6d7SVincenzo Maffione 	for (i = 0; i < num_tx_rings; i++) {
586f79ba6d7SVincenzo Maffione 		struct sync_kloop_ring_args *a = args + i;
587f79ba6d7SVincenzo Maffione 
588f79ba6d7SVincenzo Maffione 		a->kring = NMR(na, NR_TX)[i + priv->np_qfirst[NR_TX]];
589f79ba6d7SVincenzo Maffione 		a->csb_atok = csb_atok_base + i;
590f79ba6d7SVincenzo Maffione 		a->csb_ktoa = csb_ktoa_base + i;
591f79ba6d7SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
592f79ba6d7SVincenzo Maffione 		if (poll_ctx)
593f79ba6d7SVincenzo Maffione 			a->irq_ctx = poll_ctx->entries[i].irq_ctx;
594f79ba6d7SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
595f79ba6d7SVincenzo Maffione 	}
596f79ba6d7SVincenzo Maffione 	for (i = 0; i < num_rx_rings; i++) {
597f79ba6d7SVincenzo Maffione 		struct sync_kloop_ring_args *a = args + num_tx_rings + i;
598f79ba6d7SVincenzo Maffione 
599f79ba6d7SVincenzo Maffione 		a->kring = NMR(na, NR_RX)[i + priv->np_qfirst[NR_RX]];
600f79ba6d7SVincenzo Maffione 		a->csb_atok = csb_atok_base + num_tx_rings + i;
601f79ba6d7SVincenzo Maffione 		a->csb_ktoa = csb_ktoa_base + num_tx_rings + i;
602f79ba6d7SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
603f79ba6d7SVincenzo Maffione 		if (poll_ctx)
604f79ba6d7SVincenzo Maffione 			a->irq_ctx = poll_ctx->entries[num_tx_rings + i].irq_ctx;
605f79ba6d7SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
606f79ba6d7SVincenzo Maffione 	}
607f79ba6d7SVincenzo Maffione 
608b6e66be2SVincenzo Maffione 	/* Main loop. */
609b6e66be2SVincenzo Maffione 	for (;;) {
610b6e66be2SVincenzo Maffione 		if (unlikely(NM_ACCESS_ONCE(priv->np_kloop_state) & NM_SYNC_KLOOP_STOPPING)) {
611b6e66be2SVincenzo Maffione 			break;
612b6e66be2SVincenzo Maffione 		}
613b6e66be2SVincenzo Maffione 
614b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
615f79ba6d7SVincenzo Maffione 		if (poll_ctx) {
616f79ba6d7SVincenzo Maffione 			/* It is important to set the task state as
617f79ba6d7SVincenzo Maffione 			 * interruptible before processing any TX/RX ring,
618f79ba6d7SVincenzo Maffione 			 * so that if a notification on ring Y comes after
619f79ba6d7SVincenzo Maffione 			 * we have processed ring Y, but before we call
620f79ba6d7SVincenzo Maffione 			 * schedule(), we don't miss it. This is true because
621f79ba6d7SVincenzo Maffione 			 * the wake up function will change the the task state,
622f79ba6d7SVincenzo Maffione 			 * and therefore the schedule_timeout() call below
623f79ba6d7SVincenzo Maffione 			 * will observe the change).
624f79ba6d7SVincenzo Maffione 			 */
625f79ba6d7SVincenzo Maffione 			set_current_state(TASK_INTERRUPTIBLE);
626f79ba6d7SVincenzo Maffione 		}
627b6e66be2SVincenzo Maffione #endif  /* SYNC_KLOOP_POLL */
628b6e66be2SVincenzo Maffione 
629b6e66be2SVincenzo Maffione 		/* Process all the TX rings bound to this file descriptor. */
630b6e66be2SVincenzo Maffione 		for (i = 0; i < num_tx_rings; i++) {
631f79ba6d7SVincenzo Maffione 			struct sync_kloop_ring_args *a = args + i;
632b6e66be2SVincenzo Maffione 
633f79ba6d7SVincenzo Maffione 			if (unlikely(nm_kr_tryget(a->kring, 1, NULL))) {
634b6e66be2SVincenzo Maffione 				continue;
635b6e66be2SVincenzo Maffione 			}
636f79ba6d7SVincenzo Maffione 			netmap_sync_kloop_tx_ring(a);
637f79ba6d7SVincenzo Maffione 			nm_kr_put(a->kring);
638b6e66be2SVincenzo Maffione 		}
639b6e66be2SVincenzo Maffione 
640b6e66be2SVincenzo Maffione 		/* Process all the RX rings bound to this file descriptor. */
641b6e66be2SVincenzo Maffione 		for (i = 0; i < num_rx_rings; i++) {
642f79ba6d7SVincenzo Maffione 			struct sync_kloop_ring_args *a = args + num_tx_rings + i;
643b6e66be2SVincenzo Maffione 
644f79ba6d7SVincenzo Maffione 			if (unlikely(nm_kr_tryget(a->kring, 1, NULL))) {
645b6e66be2SVincenzo Maffione 				continue;
646b6e66be2SVincenzo Maffione 			}
647f79ba6d7SVincenzo Maffione 			netmap_sync_kloop_rx_ring(a);
648f79ba6d7SVincenzo Maffione 			nm_kr_put(a->kring);
649b6e66be2SVincenzo Maffione 		}
650b6e66be2SVincenzo Maffione 
651b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
652b6e66be2SVincenzo Maffione 		if (poll_ctx) {
653b6e66be2SVincenzo Maffione 			/* If a poll context is present, yield to the scheduler
654b6e66be2SVincenzo Maffione 			 * waiting for a notification to come either from
655b6e66be2SVincenzo Maffione 			 * netmap or the application. */
656*a56136a1SVincenzo Maffione 			schedule_timeout(msecs_to_jiffies(3000));
657b6e66be2SVincenzo Maffione 		} else
658b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
659b6e66be2SVincenzo Maffione 		{
660b6e66be2SVincenzo Maffione 			/* Default synchronization method: sleep for a while. */
661b6e66be2SVincenzo Maffione 			usleep_range(sleep_us, sleep_us);
662b6e66be2SVincenzo Maffione 		}
663b6e66be2SVincenzo Maffione 	}
664b6e66be2SVincenzo Maffione out:
665b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
666b6e66be2SVincenzo Maffione 	if (poll_ctx) {
667b6e66be2SVincenzo Maffione 		/* Stop polling from netmap and the eventfds, and deallocate
668b6e66be2SVincenzo Maffione 		 * the poll context. */
669b6e66be2SVincenzo Maffione 		__set_current_state(TASK_RUNNING);
670b6e66be2SVincenzo Maffione 		for (i = 0; i < poll_ctx->next_entry; i++) {
671b6e66be2SVincenzo Maffione 			struct sync_kloop_poll_entry *entry =
672b6e66be2SVincenzo Maffione 						poll_ctx->entries + i;
673b6e66be2SVincenzo Maffione 
674b6e66be2SVincenzo Maffione 			if (entry->wqh)
675b6e66be2SVincenzo Maffione 				remove_wait_queue(entry->wqh, &entry->wait);
676b6e66be2SVincenzo Maffione 			/* We did not get a reference to the eventfds, but
677b6e66be2SVincenzo Maffione 			 * don't do that on netmap file descriptors (since
678b6e66be2SVincenzo Maffione 			 * a reference was not taken. */
679b6e66be2SVincenzo Maffione 			if (entry->filp && entry->filp != priv->np_filp)
680b6e66be2SVincenzo Maffione 				fput(entry->filp);
681b6e66be2SVincenzo Maffione 			if (entry->irq_ctx)
682b6e66be2SVincenzo Maffione 				eventfd_ctx_put(entry->irq_ctx);
683b6e66be2SVincenzo Maffione 			if (entry->irq_filp)
684b6e66be2SVincenzo Maffione 				fput(entry->irq_filp);
685b6e66be2SVincenzo Maffione 		}
686b6e66be2SVincenzo Maffione 		nm_os_free(poll_ctx);
687b6e66be2SVincenzo Maffione 		poll_ctx = NULL;
688b6e66be2SVincenzo Maffione 	}
689b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
690b6e66be2SVincenzo Maffione 
691f79ba6d7SVincenzo Maffione 	if (args) {
692f79ba6d7SVincenzo Maffione 		nm_os_free(args);
693f79ba6d7SVincenzo Maffione 		args = NULL;
694f79ba6d7SVincenzo Maffione 	}
695f79ba6d7SVincenzo Maffione 
696b6e66be2SVincenzo Maffione 	/* Reset the kloop state. */
697b6e66be2SVincenzo Maffione 	NMG_LOCK();
698b6e66be2SVincenzo Maffione 	priv->np_kloop_state = 0;
699b6e66be2SVincenzo Maffione 	NMG_UNLOCK();
700b6e66be2SVincenzo Maffione 
701b6e66be2SVincenzo Maffione 	return err;
702b6e66be2SVincenzo Maffione }
703b6e66be2SVincenzo Maffione 
704b6e66be2SVincenzo Maffione int
705b6e66be2SVincenzo Maffione netmap_sync_kloop_stop(struct netmap_priv_d *priv)
706b6e66be2SVincenzo Maffione {
707*a56136a1SVincenzo Maffione 	struct netmap_adapter *na;
708b6e66be2SVincenzo Maffione 	bool running = true;
709b6e66be2SVincenzo Maffione 	int err = 0;
710b6e66be2SVincenzo Maffione 
711*a56136a1SVincenzo Maffione 	if (priv->np_nifp == NULL) {
712*a56136a1SVincenzo Maffione 		return ENXIO;
713*a56136a1SVincenzo Maffione 	}
714*a56136a1SVincenzo Maffione 	mb(); /* make sure following reads are not from cache */
715*a56136a1SVincenzo Maffione 
716*a56136a1SVincenzo Maffione 	na = priv->np_na;
717*a56136a1SVincenzo Maffione 	if (!nm_netmap_on(na)) {
718*a56136a1SVincenzo Maffione 		return ENXIO;
719*a56136a1SVincenzo Maffione 	}
720*a56136a1SVincenzo Maffione 
721*a56136a1SVincenzo Maffione 	/* Set the kloop stopping flag. */
722b6e66be2SVincenzo Maffione 	NMG_LOCK();
723b6e66be2SVincenzo Maffione 	priv->np_kloop_state |= NM_SYNC_KLOOP_STOPPING;
724b6e66be2SVincenzo Maffione 	NMG_UNLOCK();
725*a56136a1SVincenzo Maffione 
726*a56136a1SVincenzo Maffione 	/* Send a notification to the kloop, in case it is blocked in
727*a56136a1SVincenzo Maffione 	 * schedule_timeout(). We can use either RX or TX, because the
728*a56136a1SVincenzo Maffione 	 * kloop is waiting on both. */
729*a56136a1SVincenzo Maffione 	nm_os_selwakeup(priv->np_si[NR_RX]);
730*a56136a1SVincenzo Maffione 
731*a56136a1SVincenzo Maffione 	/* Wait for the kloop to actually terminate. */
732b6e66be2SVincenzo Maffione 	while (running) {
733b6e66be2SVincenzo Maffione 		usleep_range(1000, 1500);
734b6e66be2SVincenzo Maffione 		NMG_LOCK();
735b6e66be2SVincenzo Maffione 		running = (NM_ACCESS_ONCE(priv->np_kloop_state)
736b6e66be2SVincenzo Maffione 				& NM_SYNC_KLOOP_RUNNING);
737b6e66be2SVincenzo Maffione 		NMG_UNLOCK();
738b6e66be2SVincenzo Maffione 	}
739b6e66be2SVincenzo Maffione 
740b6e66be2SVincenzo Maffione 	return err;
741b6e66be2SVincenzo Maffione }
742b6e66be2SVincenzo Maffione 
743b6e66be2SVincenzo Maffione #ifdef WITH_PTNETMAP
744b6e66be2SVincenzo Maffione /*
745b6e66be2SVincenzo Maffione  * Guest ptnetmap txsync()/rxsync() routines, used in ptnet device drivers.
746b6e66be2SVincenzo Maffione  * These routines are reused across the different operating systems supported
747b6e66be2SVincenzo Maffione  * by netmap.
748b6e66be2SVincenzo Maffione  */
749b6e66be2SVincenzo Maffione 
750b6e66be2SVincenzo Maffione /*
751b6e66be2SVincenzo Maffione  * Reconcile host and guest views of the transmit ring.
752b6e66be2SVincenzo Maffione  *
753b6e66be2SVincenzo Maffione  * Guest user wants to transmit packets up to the one before ring->head,
754b6e66be2SVincenzo Maffione  * and guest kernel knows tx_ring->hwcur is the first packet unsent
755b6e66be2SVincenzo Maffione  * by the host kernel.
756b6e66be2SVincenzo Maffione  *
757b6e66be2SVincenzo Maffione  * We push out as many packets as possible, and possibly
758b6e66be2SVincenzo Maffione  * reclaim buffers from previously completed transmission.
759b6e66be2SVincenzo Maffione  *
760b6e66be2SVincenzo Maffione  * Notifications from the host are enabled only if the user guest would
761b6e66be2SVincenzo Maffione  * block (no space in the ring).
762b6e66be2SVincenzo Maffione  */
763b6e66be2SVincenzo Maffione bool
764b6e66be2SVincenzo Maffione netmap_pt_guest_txsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa,
765b6e66be2SVincenzo Maffione 			struct netmap_kring *kring, int flags)
766b6e66be2SVincenzo Maffione {
767b6e66be2SVincenzo Maffione 	bool notify = false;
768b6e66be2SVincenzo Maffione 
769b6e66be2SVincenzo Maffione 	/* Disable notifications */
770b6e66be2SVincenzo Maffione 	atok->appl_need_kick = 0;
771b6e66be2SVincenzo Maffione 
772b6e66be2SVincenzo Maffione 	/*
773b6e66be2SVincenzo Maffione 	 * First part: tell the host (updating the CSB) to process the new
774b6e66be2SVincenzo Maffione 	 * packets.
775b6e66be2SVincenzo Maffione 	 */
776b6e66be2SVincenzo Maffione 	kring->nr_hwcur = ktoa->hwcur;
777f79ba6d7SVincenzo Maffione 	nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead);
778b6e66be2SVincenzo Maffione 
779b6e66be2SVincenzo Maffione         /* Ask for a kick from a guest to the host if needed. */
780b6e66be2SVincenzo Maffione 	if (((kring->rhead != kring->nr_hwcur || nm_kr_txempty(kring))
781b6e66be2SVincenzo Maffione 		&& NM_ACCESS_ONCE(ktoa->kern_need_kick)) ||
782b6e66be2SVincenzo Maffione 			(flags & NAF_FORCE_RECLAIM)) {
783b6e66be2SVincenzo Maffione 		atok->sync_flags = flags;
784b6e66be2SVincenzo Maffione 		notify = true;
785b6e66be2SVincenzo Maffione 	}
786b6e66be2SVincenzo Maffione 
787b6e66be2SVincenzo Maffione 	/*
788b6e66be2SVincenzo Maffione 	 * Second part: reclaim buffers for completed transmissions.
789b6e66be2SVincenzo Maffione 	 */
790b6e66be2SVincenzo Maffione 	if (nm_kr_txempty(kring) || (flags & NAF_FORCE_RECLAIM)) {
791f79ba6d7SVincenzo Maffione 		nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail,
792f79ba6d7SVincenzo Maffione 					&kring->nr_hwcur);
793b6e66be2SVincenzo Maffione 	}
794b6e66be2SVincenzo Maffione 
795b6e66be2SVincenzo Maffione         /*
796b6e66be2SVincenzo Maffione          * No more room in the ring for new transmissions. The user thread will
797b6e66be2SVincenzo Maffione 	 * go to sleep and we need to be notified by the host when more free
798b6e66be2SVincenzo Maffione 	 * space is available.
799b6e66be2SVincenzo Maffione          */
800b6e66be2SVincenzo Maffione 	if (nm_kr_txempty(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
801b6e66be2SVincenzo Maffione 		/* Reenable notifications. */
802b6e66be2SVincenzo Maffione 		atok->appl_need_kick = 1;
803f79ba6d7SVincenzo Maffione                 /* Double check, with store-load memory barrier. */
804f79ba6d7SVincenzo Maffione 		nm_stld_barrier();
805f79ba6d7SVincenzo Maffione 		nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail,
806f79ba6d7SVincenzo Maffione 					&kring->nr_hwcur);
807b6e66be2SVincenzo Maffione                 /* If there is new free space, disable notifications */
808b6e66be2SVincenzo Maffione 		if (unlikely(!nm_kr_txempty(kring))) {
809b6e66be2SVincenzo Maffione 			atok->appl_need_kick = 0;
810b6e66be2SVincenzo Maffione 		}
811b6e66be2SVincenzo Maffione 	}
812b6e66be2SVincenzo Maffione 
813b6e66be2SVincenzo Maffione 	nm_prdis(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)",
814b6e66be2SVincenzo Maffione 		kring->name, atok->head, atok->cur, ktoa->hwtail,
815b6e66be2SVincenzo Maffione 		kring->rhead, kring->rcur, kring->nr_hwtail);
816b6e66be2SVincenzo Maffione 
817b6e66be2SVincenzo Maffione 	return notify;
818b6e66be2SVincenzo Maffione }
819b6e66be2SVincenzo Maffione 
820b6e66be2SVincenzo Maffione /*
821b6e66be2SVincenzo Maffione  * Reconcile host and guest view of the receive ring.
822b6e66be2SVincenzo Maffione  *
823b6e66be2SVincenzo Maffione  * Update hwcur/hwtail from host (reading from CSB).
824b6e66be2SVincenzo Maffione  *
825b6e66be2SVincenzo Maffione  * If guest user has released buffers up to the one before ring->head, we
826b6e66be2SVincenzo Maffione  * also give them to the host.
827b6e66be2SVincenzo Maffione  *
828b6e66be2SVincenzo Maffione  * Notifications from the host are enabled only if the user guest would
829b6e66be2SVincenzo Maffione  * block (no more completed slots in the ring).
830b6e66be2SVincenzo Maffione  */
831b6e66be2SVincenzo Maffione bool
832b6e66be2SVincenzo Maffione netmap_pt_guest_rxsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa,
833b6e66be2SVincenzo Maffione 			struct netmap_kring *kring, int flags)
834b6e66be2SVincenzo Maffione {
835b6e66be2SVincenzo Maffione 	bool notify = false;
836b6e66be2SVincenzo Maffione 
837b6e66be2SVincenzo Maffione         /* Disable notifications */
838b6e66be2SVincenzo Maffione 	atok->appl_need_kick = 0;
839b6e66be2SVincenzo Maffione 
840b6e66be2SVincenzo Maffione 	/*
841b6e66be2SVincenzo Maffione 	 * First part: import newly received packets, by updating the kring
842b6e66be2SVincenzo Maffione 	 * hwtail to the hwtail known from the host (read from the CSB).
843b6e66be2SVincenzo Maffione 	 * This also updates the kring hwcur.
844b6e66be2SVincenzo Maffione 	 */
845f79ba6d7SVincenzo Maffione 	nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, &kring->nr_hwcur);
846b6e66be2SVincenzo Maffione 	kring->nr_kflags &= ~NKR_PENDINTR;
847b6e66be2SVincenzo Maffione 
848b6e66be2SVincenzo Maffione 	/*
849b6e66be2SVincenzo Maffione 	 * Second part: tell the host about the slots that guest user has
850b6e66be2SVincenzo Maffione 	 * released, by updating cur and head in the CSB.
851b6e66be2SVincenzo Maffione 	 */
852b6e66be2SVincenzo Maffione 	if (kring->rhead != kring->nr_hwcur) {
853f79ba6d7SVincenzo Maffione 		nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead);
854b6e66be2SVincenzo Maffione                 /* Ask for a kick from the guest to the host if needed. */
855b6e66be2SVincenzo Maffione 		if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) {
856b6e66be2SVincenzo Maffione 			atok->sync_flags = flags;
857b6e66be2SVincenzo Maffione 			notify = true;
858b6e66be2SVincenzo Maffione 		}
859b6e66be2SVincenzo Maffione 	}
860b6e66be2SVincenzo Maffione 
861b6e66be2SVincenzo Maffione         /*
862b6e66be2SVincenzo Maffione          * No more completed RX slots. The user thread will go to sleep and
863b6e66be2SVincenzo Maffione 	 * we need to be notified by the host when more RX slots have been
864b6e66be2SVincenzo Maffione 	 * completed.
865b6e66be2SVincenzo Maffione          */
866b6e66be2SVincenzo Maffione 	if (nm_kr_rxempty(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
867b6e66be2SVincenzo Maffione 		/* Reenable notifications. */
868b6e66be2SVincenzo Maffione                 atok->appl_need_kick = 1;
869f79ba6d7SVincenzo Maffione                 /* Double check, with store-load memory barrier. */
870f79ba6d7SVincenzo Maffione 		nm_stld_barrier();
871f79ba6d7SVincenzo Maffione 		nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail,
872f79ba6d7SVincenzo Maffione 					&kring->nr_hwcur);
873b6e66be2SVincenzo Maffione                 /* If there are new slots, disable notifications. */
874b6e66be2SVincenzo Maffione 		if (!nm_kr_rxempty(kring)) {
875b6e66be2SVincenzo Maffione                         atok->appl_need_kick = 0;
876b6e66be2SVincenzo Maffione                 }
877b6e66be2SVincenzo Maffione         }
878b6e66be2SVincenzo Maffione 
879b6e66be2SVincenzo Maffione 	nm_prdis(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)",
880b6e66be2SVincenzo Maffione 		kring->name, atok->head, atok->cur, ktoa->hwtail,
881b6e66be2SVincenzo Maffione 		kring->rhead, kring->rcur, kring->nr_hwtail);
882b6e66be2SVincenzo Maffione 
883b6e66be2SVincenzo Maffione 	return notify;
884b6e66be2SVincenzo Maffione }
885b6e66be2SVincenzo Maffione 
886b6e66be2SVincenzo Maffione /*
887b6e66be2SVincenzo Maffione  * Callbacks for ptnet drivers: nm_krings_create, nm_krings_delete, nm_dtor.
888b6e66be2SVincenzo Maffione  */
889b6e66be2SVincenzo Maffione int
890b6e66be2SVincenzo Maffione ptnet_nm_krings_create(struct netmap_adapter *na)
891b6e66be2SVincenzo Maffione {
892b6e66be2SVincenzo Maffione 	struct netmap_pt_guest_adapter *ptna =
893b6e66be2SVincenzo Maffione 			(struct netmap_pt_guest_adapter *)na; /* Upcast. */
894b6e66be2SVincenzo Maffione 	struct netmap_adapter *na_nm = &ptna->hwup.up;
895b6e66be2SVincenzo Maffione 	struct netmap_adapter *na_dr = &ptna->dr.up;
896b6e66be2SVincenzo Maffione 	int ret;
897b6e66be2SVincenzo Maffione 
898b6e66be2SVincenzo Maffione 	if (ptna->backend_users) {
899b6e66be2SVincenzo Maffione 		return 0;
900b6e66be2SVincenzo Maffione 	}
901b6e66be2SVincenzo Maffione 
902b6e66be2SVincenzo Maffione 	/* Create krings on the public netmap adapter. */
903b6e66be2SVincenzo Maffione 	ret = netmap_hw_krings_create(na_nm);
904b6e66be2SVincenzo Maffione 	if (ret) {
905b6e66be2SVincenzo Maffione 		return ret;
906b6e66be2SVincenzo Maffione 	}
907b6e66be2SVincenzo Maffione 
908b6e66be2SVincenzo Maffione 	/* Copy krings into the netmap adapter private to the driver. */
909b6e66be2SVincenzo Maffione 	na_dr->tx_rings = na_nm->tx_rings;
910b6e66be2SVincenzo Maffione 	na_dr->rx_rings = na_nm->rx_rings;
911b6e66be2SVincenzo Maffione 
912b6e66be2SVincenzo Maffione 	return 0;
913b6e66be2SVincenzo Maffione }
914b6e66be2SVincenzo Maffione 
915b6e66be2SVincenzo Maffione void
916b6e66be2SVincenzo Maffione ptnet_nm_krings_delete(struct netmap_adapter *na)
917b6e66be2SVincenzo Maffione {
918b6e66be2SVincenzo Maffione 	struct netmap_pt_guest_adapter *ptna =
919b6e66be2SVincenzo Maffione 			(struct netmap_pt_guest_adapter *)na; /* Upcast. */
920b6e66be2SVincenzo Maffione 	struct netmap_adapter *na_nm = &ptna->hwup.up;
921b6e66be2SVincenzo Maffione 	struct netmap_adapter *na_dr = &ptna->dr.up;
922b6e66be2SVincenzo Maffione 
923b6e66be2SVincenzo Maffione 	if (ptna->backend_users) {
924b6e66be2SVincenzo Maffione 		return;
925b6e66be2SVincenzo Maffione 	}
926b6e66be2SVincenzo Maffione 
927b6e66be2SVincenzo Maffione 	na_dr->tx_rings = NULL;
928b6e66be2SVincenzo Maffione 	na_dr->rx_rings = NULL;
929b6e66be2SVincenzo Maffione 
930b6e66be2SVincenzo Maffione 	netmap_hw_krings_delete(na_nm);
931b6e66be2SVincenzo Maffione }
932b6e66be2SVincenzo Maffione 
933b6e66be2SVincenzo Maffione void
934b6e66be2SVincenzo Maffione ptnet_nm_dtor(struct netmap_adapter *na)
935b6e66be2SVincenzo Maffione {
936b6e66be2SVincenzo Maffione 	struct netmap_pt_guest_adapter *ptna =
937b6e66be2SVincenzo Maffione 			(struct netmap_pt_guest_adapter *)na;
938b6e66be2SVincenzo Maffione 
939b6e66be2SVincenzo Maffione 	netmap_mem_put(ptna->dr.up.nm_mem);
940b6e66be2SVincenzo Maffione 	memset(&ptna->dr, 0, sizeof(ptna->dr));
941b6e66be2SVincenzo Maffione 	netmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp);
942b6e66be2SVincenzo Maffione }
943b6e66be2SVincenzo Maffione 
944b6e66be2SVincenzo Maffione int
945b6e66be2SVincenzo Maffione netmap_pt_guest_attach(struct netmap_adapter *arg,
946b6e66be2SVincenzo Maffione 		       unsigned int nifp_offset, unsigned int memid)
947b6e66be2SVincenzo Maffione {
948b6e66be2SVincenzo Maffione 	struct netmap_pt_guest_adapter *ptna;
949b6e66be2SVincenzo Maffione 	struct ifnet *ifp = arg ? arg->ifp : NULL;
950b6e66be2SVincenzo Maffione 	int error;
951b6e66be2SVincenzo Maffione 
952b6e66be2SVincenzo Maffione 	/* get allocator */
953b6e66be2SVincenzo Maffione 	arg->nm_mem = netmap_mem_pt_guest_new(ifp, nifp_offset, memid);
954b6e66be2SVincenzo Maffione 	if (arg->nm_mem == NULL)
955b6e66be2SVincenzo Maffione 		return ENOMEM;
956b6e66be2SVincenzo Maffione 	arg->na_flags |= NAF_MEM_OWNER;
957b6e66be2SVincenzo Maffione 	error = netmap_attach_ext(arg, sizeof(struct netmap_pt_guest_adapter), 1);
958b6e66be2SVincenzo Maffione 	if (error)
959b6e66be2SVincenzo Maffione 		return error;
960b6e66be2SVincenzo Maffione 
961b6e66be2SVincenzo Maffione 	/* get the netmap_pt_guest_adapter */
962b6e66be2SVincenzo Maffione 	ptna = (struct netmap_pt_guest_adapter *) NA(ifp);
963b6e66be2SVincenzo Maffione 
964b6e66be2SVincenzo Maffione 	/* Initialize a separate pass-through netmap adapter that is going to
965b6e66be2SVincenzo Maffione 	 * be used by the ptnet driver only, and so never exposed to netmap
966b6e66be2SVincenzo Maffione          * applications. We only need a subset of the available fields. */
967b6e66be2SVincenzo Maffione 	memset(&ptna->dr, 0, sizeof(ptna->dr));
968b6e66be2SVincenzo Maffione 	ptna->dr.up.ifp = ifp;
969b6e66be2SVincenzo Maffione 	ptna->dr.up.nm_mem = netmap_mem_get(ptna->hwup.up.nm_mem);
970b6e66be2SVincenzo Maffione         ptna->dr.up.nm_config = ptna->hwup.up.nm_config;
971b6e66be2SVincenzo Maffione 
972b6e66be2SVincenzo Maffione 	ptna->backend_users = 0;
973b6e66be2SVincenzo Maffione 
974b6e66be2SVincenzo Maffione 	return 0;
975b6e66be2SVincenzo Maffione }
976b6e66be2SVincenzo Maffione 
977b6e66be2SVincenzo Maffione #endif /* WITH_PTNETMAP */
978