xref: /freebsd-14.2/sys/dev/netmap/netmap.c (revision 13c46411)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2011-2014 Matteo Landi
5  * Copyright (C) 2011-2016 Luigi Rizzo
6  * Copyright (C) 2011-2016 Giuseppe Lettieri
7  * Copyright (C) 2011-2016 Vincenzo Maffione
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *   1. Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *   2. Redistributions in binary form must reproduce the above copyright
16  *      notice, this list of conditions and the following disclaimer in the
17  *      documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 
33 /*
34  * $FreeBSD$
35  *
36  * This module supports memory mapped access to network devices,
37  * see netmap(4).
38  *
39  * The module uses a large, memory pool allocated by the kernel
40  * and accessible as mmapped memory by multiple userspace threads/processes.
41  * The memory pool contains packet buffers and "netmap rings",
42  * i.e. user-accessible copies of the interface's queues.
43  *
44  * Access to the network card works like this:
45  * 1. a process/thread issues one or more open() on /dev/netmap, to create
46  *    select()able file descriptor on which events are reported.
47  * 2. on each descriptor, the process issues an ioctl() to identify
48  *    the interface that should report events to the file descriptor.
49  * 3. on each descriptor, the process issues an mmap() request to
50  *    map the shared memory region within the process' address space.
51  *    The list of interesting queues is indicated by a location in
52  *    the shared memory region.
53  * 4. using the functions in the netmap(4) userspace API, a process
54  *    can look up the occupation state of a queue, access memory buffers,
55  *    and retrieve received packets or enqueue packets to transmit.
56  * 5. using some ioctl()s the process can synchronize the userspace view
57  *    of the queue with the actual status in the kernel. This includes both
58  *    receiving the notification of new packets, and transmitting new
59  *    packets on the output interface.
60  * 6. select() or poll() can be used to wait for events on individual
61  *    transmit or receive queues (or all queues for a given interface).
62  *
63 
64 		SYNCHRONIZATION (USER)
65 
66 The netmap rings and data structures may be shared among multiple
67 user threads or even independent processes.
68 Any synchronization among those threads/processes is delegated
69 to the threads themselves. Only one thread at a time can be in
70 a system call on the same netmap ring. The OS does not enforce
71 this and only guarantees against system crashes in case of
72 invalid usage.
73 
74 		LOCKING (INTERNAL)
75 
76 Within the kernel, access to the netmap rings is protected as follows:
77 
78 - a spinlock on each ring, to handle producer/consumer races on
79   RX rings attached to the host stack (against multiple host
80   threads writing from the host stack to the same ring),
81   and on 'destination' rings attached to a VALE switch
82   (i.e. RX rings in VALE ports, and TX rings in NIC/host ports)
83   protecting multiple active senders for the same destination)
84 
85 - an atomic variable to guarantee that there is at most one
86   instance of *_*xsync() on the ring at any time.
87   For rings connected to user file
88   descriptors, an atomic_test_and_set() protects this, and the
89   lock on the ring is not actually used.
90   For NIC RX rings connected to a VALE switch, an atomic_test_and_set()
91   is also used to prevent multiple executions (the driver might indeed
92   already guarantee this).
93   For NIC TX rings connected to a VALE switch, the lock arbitrates
94   access to the queue (both when allocating buffers and when pushing
95   them out).
96 
97 - *xsync() should be protected against initializations of the card.
98   On FreeBSD most devices have the reset routine protected by
99   a RING lock (ixgbe, igb, em) or core lock (re). lem is missing
100   the RING protection on rx_reset(), this should be added.
101 
102   On linux there is an external lock on the tx path, which probably
103   also arbitrates access to the reset routine. XXX to be revised
104 
105 - a per-interface core_lock protecting access from the host stack
106   while interfaces may be detached from netmap mode.
107   XXX there should be no need for this lock if we detach the interfaces
108   only while they are down.
109 
110 
111 --- VALE SWITCH ---
112 
113 NMG_LOCK() serializes all modifications to switches and ports.
114 A switch cannot be deleted until all ports are gone.
115 
116 For each switch, an SX lock (RWlock on linux) protects
117 deletion of ports. When configuring or deleting a new port, the
118 lock is acquired in exclusive mode (after holding NMG_LOCK).
119 When forwarding, the lock is acquired in shared mode (without NMG_LOCK).
120 The lock is held throughout the entire forwarding cycle,
121 during which the thread may incur in a page fault.
122 Hence it is important that sleepable shared locks are used.
123 
124 On the rx ring, the per-port lock is grabbed initially to reserve
125 a number of slot in the ring, then the lock is released,
126 packets are copied from source to destination, and then
127 the lock is acquired again and the receive ring is updated.
128 (A similar thing is done on the tx ring for NIC and host stack
129 ports attached to the switch)
130 
131  */
132 
133 
134 /* --- internals ----
135  *
136  * Roadmap to the code that implements the above.
137  *
138  * > 1. a process/thread issues one or more open() on /dev/netmap, to create
139  * >    select()able file descriptor on which events are reported.
140  *
141  *  	Internally, we allocate a netmap_priv_d structure, that will be
142  *  	initialized on ioctl(NIOCREGIF). There is one netmap_priv_d
143  *  	structure for each open().
144  *
145  *      os-specific:
146  *  	    FreeBSD: see netmap_open() (netmap_freebsd.c)
147  *  	    linux:   see linux_netmap_open() (netmap_linux.c)
148  *
149  * > 2. on each descriptor, the process issues an ioctl() to identify
150  * >    the interface that should report events to the file descriptor.
151  *
152  * 	Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0.
153  * 	Most important things happen in netmap_get_na() and
154  * 	netmap_do_regif(), called from there. Additional details can be
155  * 	found in the comments above those functions.
156  *
157  * 	In all cases, this action creates/takes-a-reference-to a
158  * 	netmap_*_adapter describing the port, and allocates a netmap_if
159  * 	and all necessary netmap rings, filling them with netmap buffers.
160  *
161  *      In this phase, the sync callbacks for each ring are set (these are used
162  *      in steps 5 and 6 below).  The callbacks depend on the type of adapter.
163  *      The adapter creation/initialization code puts them in the
164  * 	netmap_adapter (fields na->nm_txsync and na->nm_rxsync).  Then, they
165  * 	are copied from there to the netmap_kring's during netmap_do_regif(), by
166  * 	the nm_krings_create() callback.  All the nm_krings_create callbacks
167  * 	actually call netmap_krings_create() to perform this and the other
168  * 	common stuff. netmap_krings_create() also takes care of the host rings,
169  * 	if needed, by setting their sync callbacks appropriately.
170  *
171  * 	Additional actions depend on the kind of netmap_adapter that has been
172  * 	registered:
173  *
174  * 	- netmap_hw_adapter:  	     [netmap.c]
175  * 	     This is a system netdev/ifp with native netmap support.
176  * 	     The ifp is detached from the host stack by redirecting:
177  * 	       - transmissions (from the network stack) to netmap_transmit()
178  * 	       - receive notifications to the nm_notify() callback for
179  * 	         this adapter. The callback is normally netmap_notify(), unless
180  * 	         the ifp is attached to a bridge using bwrap, in which case it
181  * 	         is netmap_bwrap_intr_notify().
182  *
183  * 	- netmap_generic_adapter:      [netmap_generic.c]
184  * 	      A system netdev/ifp without native netmap support.
185  *
186  * 	(the decision about native/non native support is taken in
187  * 	 netmap_get_hw_na(), called by netmap_get_na())
188  *
189  * 	- netmap_vp_adapter 		[netmap_vale.c]
190  * 	      Returned by netmap_get_bdg_na().
191  * 	      This is a persistent or ephemeral VALE port. Ephemeral ports
192  * 	      are created on the fly if they don't already exist, and are
193  * 	      always attached to a bridge.
194  * 	      Persistent VALE ports must must be created separately, and i
195  * 	      then attached like normal NICs. The NIOCREGIF we are examining
196  * 	      will find them only if they had previously been created and
197  * 	      attached (see VALE_CTL below).
198  *
199  * 	- netmap_pipe_adapter 	      [netmap_pipe.c]
200  * 	      Returned by netmap_get_pipe_na().
201  * 	      Both pipe ends are created, if they didn't already exist.
202  *
203  * 	- netmap_monitor_adapter      [netmap_monitor.c]
204  * 	      Returned by netmap_get_monitor_na().
205  * 	      If successful, the nm_sync callbacks of the monitored adapter
206  * 	      will be intercepted by the returned monitor.
207  *
208  * 	- netmap_bwrap_adapter	      [netmap_vale.c]
209  * 	      Cannot be obtained in this way, see VALE_CTL below
210  *
211  *
212  * 	os-specific:
213  * 	    linux: we first go through linux_netmap_ioctl() to
214  * 	           adapt the FreeBSD interface to the linux one.
215  *
216  *
217  * > 3. on each descriptor, the process issues an mmap() request to
218  * >    map the shared memory region within the process' address space.
219  * >    The list of interesting queues is indicated by a location in
220  * >    the shared memory region.
221  *
222  *      os-specific:
223  *  	    FreeBSD: netmap_mmap_single (netmap_freebsd.c).
224  *  	    linux:   linux_netmap_mmap (netmap_linux.c).
225  *
226  * > 4. using the functions in the netmap(4) userspace API, a process
227  * >    can look up the occupation state of a queue, access memory buffers,
228  * >    and retrieve received packets or enqueue packets to transmit.
229  *
230  * 	these actions do not involve the kernel.
231  *
232  * > 5. using some ioctl()s the process can synchronize the userspace view
233  * >    of the queue with the actual status in the kernel. This includes both
234  * >    receiving the notification of new packets, and transmitting new
235  * >    packets on the output interface.
236  *
237  * 	These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC
238  * 	cases. They invoke the nm_sync callbacks on the netmap_kring
239  * 	structures, as initialized in step 2 and maybe later modified
240  * 	by a monitor. Monitors, however, will always call the original
241  * 	callback before doing anything else.
242  *
243  *
244  * > 6. select() or poll() can be used to wait for events on individual
245  * >    transmit or receive queues (or all queues for a given interface).
246  *
247  * 	Implemented in netmap_poll(). This will call the same nm_sync()
248  * 	callbacks as in step 5 above.
249  *
250  * 	os-specific:
251  * 		linux: we first go through linux_netmap_poll() to adapt
252  * 		       the FreeBSD interface to the linux one.
253  *
254  *
255  *  ----  VALE_CTL -----
256  *
257  *  VALE switches are controlled by issuing a NIOCREGIF with a non-null
258  *  nr_cmd in the nmreq structure. These subcommands are handled by
259  *  netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created
260  *  and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF
261  *  subcommands, respectively.
262  *
263  *  Any network interface known to the system (including a persistent VALE
264  *  port) can be attached to a VALE switch by issuing the
265  *  NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports
266  *  look exactly like ephemeral VALE ports (as created in step 2 above).  The
267  *  attachment of other interfaces, instead, requires the creation of a
268  *  netmap_bwrap_adapter.  Moreover, the attached interface must be put in
269  *  netmap mode. This may require the creation of a netmap_generic_adapter if
270  *  we have no native support for the interface, or if generic adapters have
271  *  been forced by sysctl.
272  *
273  *  Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(),
274  *  called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach()
275  *  callback.  In the case of the bwrap, the callback creates the
276  *  netmap_bwrap_adapter.  The initialization of the bwrap is then
277  *  completed by calling netmap_do_regif() on it, in the nm_bdg_ctl()
278  *  callback (netmap_bwrap_bdg_ctl in netmap_vale.c).
279  *  A generic adapter for the wrapped ifp will be created if needed, when
280  *  netmap_get_bdg_na() calls netmap_get_hw_na().
281  *
282  *
283  *  ---- DATAPATHS -----
284  *
285  *              -= SYSTEM DEVICE WITH NATIVE SUPPORT =-
286  *
287  *    na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
288  *
289  *    - tx from netmap userspace:
290  *	 concurrently:
291  *           1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
292  *                kring->nm_sync() == DEVICE_netmap_txsync()
293  *           2) device interrupt handler
294  *                na->nm_notify()  == netmap_notify()
295  *    - rx from netmap userspace:
296  *       concurrently:
297  *           1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
298  *                kring->nm_sync() == DEVICE_netmap_rxsync()
299  *           2) device interrupt handler
300  *                na->nm_notify()  == netmap_notify()
301  *    - rx from host stack
302  *       concurrently:
303  *           1) host stack
304  *                netmap_transmit()
305  *                  na->nm_notify  == netmap_notify()
306  *           2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
307  *                kring->nm_sync() == netmap_rxsync_from_host
308  *                  netmap_rxsync_from_host(na, NULL, NULL)
309  *    - tx to host stack
310  *           ioctl(NIOCTXSYNC)/netmap_poll() in process context
311  *             kring->nm_sync() == netmap_txsync_to_host
312  *               netmap_txsync_to_host(na)
313  *                 nm_os_send_up()
314  *                   FreeBSD: na->if_input() == ether_input()
315  *                   linux: netif_rx() with NM_MAGIC_PRIORITY_RX
316  *
317  *
318  *               -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
319  *
320  *    na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
321  *
322  *    - tx from netmap userspace:
323  *       concurrently:
324  *           1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
325  *               kring->nm_sync() == generic_netmap_txsync()
326  *                   nm_os_generic_xmit_frame()
327  *                       linux:   dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
328  *                           ifp->ndo_start_xmit == generic_ndo_start_xmit()
329  *                               gna->save_start_xmit == orig. dev. start_xmit
330  *                       FreeBSD: na->if_transmit() == orig. dev if_transmit
331  *           2) generic_mbuf_destructor()
332  *                   na->nm_notify() == netmap_notify()
333  *    - rx from netmap userspace:
334  *           1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
335  *               kring->nm_sync() == generic_netmap_rxsync()
336  *                   mbq_safe_dequeue()
337  *           2) device driver
338  *               generic_rx_handler()
339  *                   mbq_safe_enqueue()
340  *                   na->nm_notify() == netmap_notify()
341  *    - rx from host stack
342  *        FreeBSD: same as native
343  *        Linux: same as native except:
344  *           1) host stack
345  *               dev_queue_xmit() without NM_MAGIC_PRIORITY_TX
346  *                   ifp->ndo_start_xmit == generic_ndo_start_xmit()
347  *                       netmap_transmit()
348  *                           na->nm_notify() == netmap_notify()
349  *    - tx to host stack (same as native):
350  *
351  *
352  *                           -= VALE =-
353  *
354  *   INCOMING:
355  *
356  *      - VALE ports:
357  *          ioctl(NIOCTXSYNC)/netmap_poll() in process context
358  *              kring->nm_sync() == netmap_vp_txsync()
359  *
360  *      - system device with native support:
361  *         from cable:
362  *             interrupt
363  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
364  *                     kring->nm_sync() == DEVICE_netmap_rxsync()
365  *                     netmap_vp_txsync()
366  *                     kring->nm_sync() == DEVICE_netmap_rxsync()
367  *         from host stack:
368  *             netmap_transmit()
369  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
370  *                     kring->nm_sync() == netmap_rxsync_from_host()
371  *                     netmap_vp_txsync()
372  *
373  *      - system device with generic support:
374  *         from device driver:
375  *            generic_rx_handler()
376  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
377  *                     kring->nm_sync() == generic_netmap_rxsync()
378  *                     netmap_vp_txsync()
379  *                     kring->nm_sync() == generic_netmap_rxsync()
380  *         from host stack:
381  *            netmap_transmit()
382  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
383  *                     kring->nm_sync() == netmap_rxsync_from_host()
384  *                     netmap_vp_txsync()
385  *
386  *   (all cases) --> nm_bdg_flush()
387  *                      dest_na->nm_notify() == (see below)
388  *
389  *   OUTGOING:
390  *
391  *      - VALE ports:
392  *         concurrently:
393  *             1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
394  *                    kring->nm_sync() == netmap_vp_rxsync()
395  *             2) from nm_bdg_flush()
396  *                    na->nm_notify() == netmap_notify()
397  *
398  *      - system device with native support:
399  *          to cable:
400  *             na->nm_notify() == netmap_bwrap_notify()
401  *                 netmap_vp_rxsync()
402  *                 kring->nm_sync() == DEVICE_netmap_txsync()
403  *                 netmap_vp_rxsync()
404  *          to host stack:
405  *                 netmap_vp_rxsync()
406  *                 kring->nm_sync() == netmap_txsync_to_host
407  *                 netmap_vp_rxsync_locked()
408  *
409  *      - system device with generic adapter:
410  *          to device driver:
411  *             na->nm_notify() == netmap_bwrap_notify()
412  *                 netmap_vp_rxsync()
413  *                 kring->nm_sync() == generic_netmap_txsync()
414  *                 netmap_vp_rxsync()
415  *          to host stack:
416  *                 netmap_vp_rxsync()
417  *                 kring->nm_sync() == netmap_txsync_to_host
418  *                 netmap_vp_rxsync()
419  *
420  */
421 
422 /*
423  * OS-specific code that is used only within this file.
424  * Other OS-specific code that must be accessed by drivers
425  * is present in netmap_kern.h
426  */
427 
428 #if defined(__FreeBSD__)
429 #include <sys/cdefs.h> /* prerequisite */
430 #include <sys/types.h>
431 #include <sys/errno.h>
432 #include <sys/param.h>	/* defines used in kernel.h */
433 #include <sys/kernel.h>	/* types used in module initialization */
434 #include <sys/conf.h>	/* cdevsw struct, UID, GID */
435 #include <sys/filio.h>	/* FIONBIO */
436 #include <sys/sockio.h>
437 #include <sys/socketvar.h>	/* struct socket */
438 #include <sys/malloc.h>
439 #include <sys/poll.h>
440 #include <sys/proc.h>
441 #include <sys/rwlock.h>
442 #include <sys/socket.h> /* sockaddrs */
443 #include <sys/selinfo.h>
444 #include <sys/sysctl.h>
445 #include <sys/jail.h>
446 #include <sys/epoch.h>
447 #include <net/vnet.h>
448 #include <net/if.h>
449 #include <net/if_var.h>
450 #include <net/bpf.h>		/* BIOCIMMEDIATE */
451 #include <machine/bus.h>	/* bus_dmamap_* */
452 #include <sys/endian.h>
453 #include <sys/refcount.h>
454 #include <net/ethernet.h>	/* ETHER_BPF_MTAP */
455 
456 
457 #elif defined(linux)
458 
459 #include "bsd_glue.h"
460 
461 #elif defined(__APPLE__)
462 
463 #warning OSX support is only partial
464 #include "osx_glue.h"
465 
466 #elif defined (_WIN32)
467 
468 #include "win_glue.h"
469 
470 #else
471 
472 #error	Unsupported platform
473 
474 #endif /* unsupported */
475 
476 /*
477  * common headers
478  */
479 #include <net/netmap.h>
480 #include <dev/netmap/netmap_kern.h>
481 #include <dev/netmap/netmap_mem2.h>
482 
483 
484 /* user-controlled variables */
485 int netmap_verbose;
486 #ifdef CONFIG_NETMAP_DEBUG
487 int netmap_debug;
488 #endif /* CONFIG_NETMAP_DEBUG */
489 
490 static int netmap_no_timestamp; /* don't timestamp on rxsync */
491 int netmap_no_pendintr = 1;
492 int netmap_txsync_retry = 2;
493 static int netmap_fwd = 0;	/* force transparent forwarding */
494 
495 /*
496  * netmap_admode selects the netmap mode to use.
497  * Invalid values are reset to NETMAP_ADMODE_BEST
498  */
499 enum {	NETMAP_ADMODE_BEST = 0,	/* use native, fallback to generic */
500 	NETMAP_ADMODE_NATIVE,	/* either native or none */
501 	NETMAP_ADMODE_GENERIC,	/* force generic */
502 	NETMAP_ADMODE_LAST };
503 static int netmap_admode = NETMAP_ADMODE_BEST;
504 
505 /* netmap_generic_mit controls mitigation of RX notifications for
506  * the generic netmap adapter. The value is a time interval in
507  * nanoseconds. */
508 int netmap_generic_mit = 100*1000;
509 
510 /* We use by default netmap-aware qdiscs with generic netmap adapters,
511  * even if there can be a little performance hit with hardware NICs.
512  * However, using the qdisc is the safer approach, for two reasons:
513  * 1) it prevents non-fifo qdiscs to break the TX notification
514  *    scheme, which is based on mbuf destructors when txqdisc is
515  *    not used.
516  * 2) it makes it possible to transmit over software devices that
517  *    change skb->dev, like bridge, veth, ...
518  *
519  * Anyway users looking for the best performance should
520  * use native adapters.
521  */
522 #ifdef linux
523 int netmap_generic_txqdisc = 1;
524 #endif
525 
526 /* Default number of slots and queues for generic adapters. */
527 int netmap_generic_ringsize = 1024;
528 int netmap_generic_rings = 1;
529 
530 /* Non-zero to enable checksum offloading in NIC drivers */
531 int netmap_generic_hwcsum = 0;
532 
533 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */
534 int ptnet_vnet_hdr = 1;
535 
536 /*
537  * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated
538  * in some other operating systems
539  */
540 SYSBEGIN(main_init);
541 
542 SYSCTL_DECL(_dev_netmap);
543 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
544     "Netmap args");
545 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
546 		CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
547 #ifdef CONFIG_NETMAP_DEBUG
548 SYSCTL_INT(_dev_netmap, OID_AUTO, debug,
549 		CTLFLAG_RW, &netmap_debug, 0, "Debug messages");
550 #endif /* CONFIG_NETMAP_DEBUG */
551 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
552 		CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
553 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
554 		0, "Always look for new received packets.");
555 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
556 		&netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
557 
558 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
559 		"Force NR_FORWARD mode");
560 SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
561 		"Adapter mode. 0 selects the best option available,"
562 		"1 forces native adapter, 2 forces emulated adapter");
563 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum,
564 		0, "Hardware checksums. 0 to disable checksum generation by the NIC (default),"
565 		"1 to enable checksum generation by the NIC");
566 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
567 		0, "RX notification interval in nanoseconds");
568 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
569 		&netmap_generic_ringsize, 0,
570 		"Number of per-ring slots for emulated netmap mode");
571 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
572 		&netmap_generic_rings, 0,
573 		"Number of TX/RX queues for emulated netmap adapters");
574 #ifdef linux
575 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
576 		&netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
577 #endif
578 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
579 		0, "Allow ptnet devices to use virtio-net headers");
580 
581 SYSEND;
582 
583 NMG_LOCK_T	netmap_global_lock;
584 
585 /*
586  * mark the ring as stopped, and run through the locks
587  * to make sure other users get to see it.
588  * stopped must be either NR_KR_STOPPED (for unbounded stop)
589  * of NR_KR_LOCKED (brief stop for mutual exclusion purposes)
590  */
591 static void
592 netmap_disable_ring(struct netmap_kring *kr, int stopped)
593 {
594 	nm_kr_stop(kr, stopped);
595 	// XXX check if nm_kr_stop is sufficient
596 	mtx_lock(&kr->q_lock);
597 	mtx_unlock(&kr->q_lock);
598 	nm_kr_put(kr);
599 }
600 
601 /* stop or enable a single ring */
602 void
603 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped)
604 {
605 	if (stopped)
606 		netmap_disable_ring(NMR(na, t)[ring_id], stopped);
607 	else
608 		NMR(na, t)[ring_id]->nkr_stopped = 0;
609 }
610 
611 
612 /* stop or enable all the rings of na */
613 void
614 netmap_set_all_rings(struct netmap_adapter *na, int stopped)
615 {
616 	int i;
617 	enum txrx t;
618 
619 	if (!nm_netmap_on(na))
620 		return;
621 
622 	if (netmap_verbose) {
623 		nm_prinf("%s: %sable all rings", na->name,
624 		    (stopped ? "dis" : "en"));
625 	}
626 	for_rx_tx(t) {
627 		for (i = 0; i < netmap_real_rings(na, t); i++) {
628 			netmap_set_ring(na, i, t, stopped);
629 		}
630 	}
631 }
632 
633 /*
634  * Convenience function used in drivers.  Waits for current txsync()s/rxsync()s
635  * to finish and prevents any new one from starting.  Call this before turning
636  * netmap mode off, or before removing the hardware rings (e.g., on module
637  * onload).
638  */
639 void
640 netmap_disable_all_rings(struct ifnet *ifp)
641 {
642 	if (NM_NA_VALID(ifp)) {
643 		netmap_set_all_rings(NA(ifp), NM_KR_LOCKED);
644 	}
645 }
646 
647 /*
648  * Convenience function used in drivers.  Re-enables rxsync and txsync on the
649  * adapter's rings In linux drivers, this should be placed near each
650  * napi_enable().
651  */
652 void
653 netmap_enable_all_rings(struct ifnet *ifp)
654 {
655 	if (NM_NA_VALID(ifp)) {
656 		netmap_set_all_rings(NA(ifp), 0 /* enabled */);
657 	}
658 }
659 
660 void
661 netmap_make_zombie(struct ifnet *ifp)
662 {
663 	if (NM_NA_VALID(ifp)) {
664 		struct netmap_adapter *na = NA(ifp);
665 		netmap_set_all_rings(na, NM_KR_LOCKED);
666 		na->na_flags |= NAF_ZOMBIE;
667 		netmap_set_all_rings(na, 0);
668 	}
669 }
670 
671 void
672 netmap_undo_zombie(struct ifnet *ifp)
673 {
674 	if (NM_NA_VALID(ifp)) {
675 		struct netmap_adapter *na = NA(ifp);
676 		if (na->na_flags & NAF_ZOMBIE) {
677 			netmap_set_all_rings(na, NM_KR_LOCKED);
678 			na->na_flags &= ~NAF_ZOMBIE;
679 			netmap_set_all_rings(na, 0);
680 		}
681 	}
682 }
683 
684 /*
685  * generic bound_checking function
686  */
687 u_int
688 nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg)
689 {
690 	u_int oldv = *v;
691 	const char *op = NULL;
692 
693 	if (dflt < lo)
694 		dflt = lo;
695 	if (dflt > hi)
696 		dflt = hi;
697 	if (oldv < lo) {
698 		*v = dflt;
699 		op = "Bump";
700 	} else if (oldv > hi) {
701 		*v = hi;
702 		op = "Clamp";
703 	}
704 	if (op && msg)
705 		nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv);
706 	return *v;
707 }
708 
709 
710 /*
711  * packet-dump function, user-supplied or static buffer.
712  * The destination buffer must be at least 30+4*len
713  */
714 const char *
715 nm_dump_buf(char *p, int len, int lim, char *dst)
716 {
717 	static char _dst[8192];
718 	int i, j, i0;
719 	static char hex[] ="0123456789abcdef";
720 	char *o;	/* output position */
721 
722 #define P_HI(x)	hex[((x) & 0xf0)>>4]
723 #define P_LO(x)	hex[((x) & 0xf)]
724 #define P_C(x)	((x) >= 0x20 && (x) <= 0x7e ? (x) : '.')
725 	if (!dst)
726 		dst = _dst;
727 	if (lim <= 0 || lim > len)
728 		lim = len;
729 	o = dst;
730 	sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim);
731 	o += strlen(o);
732 	/* hexdump routine */
733 	for (i = 0; i < lim; ) {
734 		sprintf(o, "%5d: ", i);
735 		o += strlen(o);
736 		memset(o, ' ', 48);
737 		i0 = i;
738 		for (j=0; j < 16 && i < lim; i++, j++) {
739 			o[j*3] = P_HI(p[i]);
740 			o[j*3+1] = P_LO(p[i]);
741 		}
742 		i = i0;
743 		for (j=0; j < 16 && i < lim; i++, j++)
744 			o[j + 48] = P_C(p[i]);
745 		o[j+48] = '\n';
746 		o += j+49;
747 	}
748 	*o = '\0';
749 #undef P_HI
750 #undef P_LO
751 #undef P_C
752 	return dst;
753 }
754 
755 
756 /*
757  * Fetch configuration from the device, to cope with dynamic
758  * reconfigurations after loading the module.
759  */
760 /* call with NMG_LOCK held */
761 int
762 netmap_update_config(struct netmap_adapter *na)
763 {
764 	struct nm_config_info info;
765 
766 	bzero(&info, sizeof(info));
767 	if (na->nm_config == NULL ||
768 	    na->nm_config(na, &info)) {
769 		/* take whatever we had at init time */
770 		info.num_tx_rings = na->num_tx_rings;
771 		info.num_tx_descs = na->num_tx_desc;
772 		info.num_rx_rings = na->num_rx_rings;
773 		info.num_rx_descs = na->num_rx_desc;
774 		info.rx_buf_maxsize = na->rx_buf_maxsize;
775 	}
776 
777 	if (na->num_tx_rings == info.num_tx_rings &&
778 	    na->num_tx_desc == info.num_tx_descs &&
779 	    na->num_rx_rings == info.num_rx_rings &&
780 	    na->num_rx_desc == info.num_rx_descs &&
781 	    na->rx_buf_maxsize == info.rx_buf_maxsize)
782 		return 0; /* nothing changed */
783 	if (na->active_fds == 0) {
784 		na->num_tx_rings = info.num_tx_rings;
785 		na->num_tx_desc = info.num_tx_descs;
786 		na->num_rx_rings = info.num_rx_rings;
787 		na->num_rx_desc = info.num_rx_descs;
788 		na->rx_buf_maxsize = info.rx_buf_maxsize;
789 		if (netmap_verbose)
790 			nm_prinf("configuration changed for %s: txring %d x %d, "
791 				"rxring %d x %d, rxbufsz %d",
792 				na->name, na->num_tx_rings, na->num_tx_desc,
793 				na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
794 		return 0;
795 	}
796 	nm_prerr("WARNING: configuration changed for %s while active: "
797 		"txring %d x %d, rxring %d x %d, rxbufsz %d",
798 		na->name, info.num_tx_rings, info.num_tx_descs,
799 		info.num_rx_rings, info.num_rx_descs,
800 		info.rx_buf_maxsize);
801 	return 1;
802 }
803 
804 /* nm_sync callbacks for the host rings */
805 static int netmap_txsync_to_host(struct netmap_kring *kring, int flags);
806 static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags);
807 
808 static int
809 netmap_default_bufcfg(struct netmap_kring *kring, uint64_t target)
810 {
811 	kring->hwbuf_len = target;
812 	kring->buf_align = 0; /* no alignment */
813 	return 0;
814 }
815 
816 /* create the krings array and initialize the fields common to all adapters.
817  * The array layout is this:
818  *
819  *                    +----------+
820  * na->tx_rings ----->|          | \
821  *                    |          |  } na->num_tx_ring
822  *                    |          | /
823  *                    +----------+
824  *                    |          |    host tx kring
825  * na->rx_rings ----> +----------+
826  *                    |          | \
827  *                    |          |  } na->num_rx_rings
828  *                    |          | /
829  *                    +----------+
830  *                    |          |    host rx kring
831  *                    +----------+
832  * na->tailroom ----->|          | \
833  *                    |          |  } tailroom bytes
834  *                    |          | /
835  *                    +----------+
836  *
837  * Note: for compatibility, host krings are created even when not needed.
838  * The tailroom space is currently used by vale ports for allocating leases.
839  */
840 /* call with NMG_LOCK held */
841 int
842 netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
843 {
844 	u_int i, len, ndesc;
845 	struct netmap_kring *kring;
846 	u_int n[NR_TXRX];
847 	enum txrx t;
848 	int err = 0;
849 
850 	if (na->tx_rings != NULL) {
851 		if (netmap_debug & NM_DEBUG_ON)
852 			nm_prerr("warning: krings were already created");
853 		return 0;
854 	}
855 
856 	/* account for the (possibly fake) host rings */
857 	n[NR_TX] = netmap_all_rings(na, NR_TX);
858 	n[NR_RX] = netmap_all_rings(na, NR_RX);
859 
860 	len = (n[NR_TX] + n[NR_RX]) *
861 		(sizeof(struct netmap_kring) + sizeof(struct netmap_kring *))
862 		+ tailroom;
863 
864 	na->tx_rings = nm_os_malloc((size_t)len);
865 	if (na->tx_rings == NULL) {
866 		nm_prerr("Cannot allocate krings");
867 		return ENOMEM;
868 	}
869 	na->rx_rings = na->tx_rings + n[NR_TX];
870 	na->tailroom = na->rx_rings + n[NR_RX];
871 
872 	/* link the krings in the krings array */
873 	kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
874 	for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) {
875 		na->tx_rings[i] = kring;
876 		kring++;
877 	}
878 
879 	/*
880 	 * All fields in krings are 0 except the one initialized below.
881 	 * but better be explicit on important kring fields.
882 	 */
883 	for_rx_tx(t) {
884 		ndesc = nma_get_ndesc(na, t);
885 		for (i = 0; i < n[t]; i++) {
886 			kring = NMR(na, t)[i];
887 			bzero(kring, sizeof(*kring));
888 			kring->notify_na = na;
889 			kring->ring_id = i;
890 			kring->tx = t;
891 			kring->nkr_num_slots = ndesc;
892 			kring->nr_mode = NKR_NETMAP_OFF;
893 			kring->nr_pending_mode = NKR_NETMAP_OFF;
894 			if (i < nma_get_nrings(na, t)) {
895 				kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync);
896 				kring->nm_bufcfg = na->nm_bufcfg;
897 				if (kring->nm_bufcfg == NULL)
898 					kring->nm_bufcfg = netmap_default_bufcfg;
899 			} else {
900 				if (!(na->na_flags & NAF_HOST_RINGS))
901 					kring->nr_kflags |= NKR_FAKERING;
902 				kring->nm_sync = (t == NR_TX ?
903 						netmap_txsync_to_host:
904 						netmap_rxsync_from_host);
905 				kring->nm_bufcfg = netmap_default_bufcfg;
906 			}
907 			kring->nm_notify = na->nm_notify;
908 			kring->rhead = kring->rcur = kring->nr_hwcur = 0;
909 			/*
910 			 * IMPORTANT: Always keep one slot empty.
911 			 */
912 			kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
913 			snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
914 					nm_txrx2str(t), i);
915 			nm_prdis("ktx %s h %d c %d t %d",
916 				kring->name, kring->rhead, kring->rcur, kring->rtail);
917 			err = nm_os_selinfo_init(&kring->si, kring->name);
918 			if (err) {
919 				netmap_krings_delete(na);
920 				return err;
921 			}
922 			mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF);
923 			kring->na = na;	/* setting this field marks the mutex as initialized */
924 		}
925 		err = nm_os_selinfo_init(&na->si[t], na->name);
926 		if (err) {
927 			netmap_krings_delete(na);
928 			return err;
929 		}
930 	}
931 
932 	return 0;
933 }
934 
935 
936 /* undo the actions performed by netmap_krings_create */
937 /* call with NMG_LOCK held */
938 void
939 netmap_krings_delete(struct netmap_adapter *na)
940 {
941 	struct netmap_kring **kring = na->tx_rings;
942 	enum txrx t;
943 
944 	if (na->tx_rings == NULL) {
945 		if (netmap_debug & NM_DEBUG_ON)
946 			nm_prerr("warning: krings were already deleted");
947 		return;
948 	}
949 
950 	for_rx_tx(t)
951 		nm_os_selinfo_uninit(&na->si[t]);
952 
953 	/* we rely on the krings layout described above */
954 	for ( ; kring != na->tailroom; kring++) {
955 		if ((*kring)->na != NULL)
956 			mtx_destroy(&(*kring)->q_lock);
957 		nm_os_selinfo_uninit(&(*kring)->si);
958 	}
959 	nm_os_free(na->tx_rings);
960 	na->tx_rings = na->rx_rings = na->tailroom = NULL;
961 }
962 
963 
964 /*
965  * Destructor for NIC ports. They also have an mbuf queue
966  * on the rings connected to the host so we need to purge
967  * them first.
968  */
969 /* call with NMG_LOCK held */
970 void
971 netmap_hw_krings_delete(struct netmap_adapter *na)
972 {
973 	u_int lim = netmap_real_rings(na, NR_RX), i;
974 
975 	for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
976 		struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
977 		nm_prdis("destroy sw mbq with len %d", mbq_len(q));
978 		mbq_purge(q);
979 		mbq_safe_fini(q);
980 	}
981 	netmap_krings_delete(na);
982 }
983 
984 void
985 netmap_mem_restore(struct netmap_adapter *na)
986 {
987 	if (na->nm_mem_prev) {
988 		netmap_mem_put(na->nm_mem);
989 		na->nm_mem = na->nm_mem_prev;
990 		na->nm_mem_prev = NULL;
991 	}
992 }
993 
994 static void
995 netmap_mem_drop(struct netmap_adapter *na)
996 {
997 	/* if the native allocator had been overridden on regif,
998 	 * restore it now and drop the temporary one
999 	 */
1000 	if (netmap_mem_deref(na->nm_mem, na)) {
1001 		netmap_mem_restore(na);
1002 	}
1003 }
1004 
1005 static void
1006 netmap_update_hostrings_mode(struct netmap_adapter *na)
1007 {
1008 	enum txrx t;
1009 	struct netmap_kring *kring;
1010 	int i;
1011 
1012 	for_rx_tx(t) {
1013 		for (i = nma_get_nrings(na, t);
1014 		     i < netmap_real_rings(na, t); i++) {
1015 			kring = NMR(na, t)[i];
1016 			kring->nr_mode = kring->nr_pending_mode;
1017 		}
1018 	}
1019 }
1020 
1021 /*
1022  * Undo everything that was done in netmap_do_regif(). In particular,
1023  * call nm_register(ifp,0) to stop netmap mode on the interface and
1024  * revert to normal operation.
1025  */
1026 /* call with NMG_LOCK held */
1027 static void netmap_unset_ringid(struct netmap_priv_d *);
1028 static void netmap_krings_put(struct netmap_priv_d *);
1029 void
1030 netmap_do_unregif(struct netmap_priv_d *priv)
1031 {
1032 	struct netmap_adapter *na = priv->np_na;
1033 
1034 	NMG_LOCK_ASSERT();
1035 	na->active_fds--;
1036 	/* unset nr_pending_mode and possibly release exclusive mode */
1037 	netmap_krings_put(priv);
1038 
1039 #ifdef	WITH_MONITOR
1040 	/* XXX check whether we have to do something with monitor
1041 	 * when rings change nr_mode. */
1042 	if (na->active_fds <= 0) {
1043 		/* walk through all the rings and tell any monitor
1044 		 * that the port is going to exit netmap mode
1045 		 */
1046 		netmap_monitor_stop(na);
1047 	}
1048 #endif
1049 
1050 	netmap_update_hostrings_mode(na);
1051 
1052 	if (na->active_fds <= 0 || nm_kring_pending(priv)) {
1053 		netmap_set_all_rings(na, NM_KR_LOCKED);
1054 		na->nm_register(na, 0);
1055 		netmap_set_all_rings(na, 0);
1056 	}
1057 
1058 	/* delete rings and buffers that are no longer needed */
1059 	netmap_mem_rings_delete(na);
1060 
1061 	if (na->active_fds <= 0) {	/* last instance */
1062 		/*
1063 		 * (TO CHECK) We enter here
1064 		 * when the last reference to this file descriptor goes
1065 		 * away. This means we cannot have any pending poll()
1066 		 * or interrupt routine operating on the structure.
1067 		 * XXX The file may be closed in a thread while
1068 		 * another thread is using it.
1069 		 * Linux keeps the file opened until the last reference
1070 		 * by any outstanding ioctl/poll or mmap is gone.
1071 		 * FreeBSD does not track mmap()s (but we do) and
1072 		 * wakes up any sleeping poll(). Need to check what
1073 		 * happens if the close() occurs while a concurrent
1074 		 * syscall is running.
1075 		 */
1076 		if (netmap_debug & NM_DEBUG_ON)
1077 			nm_prinf("deleting last instance for %s", na->name);
1078 
1079 		if (nm_netmap_on(na)) {
1080 			nm_prerr("BUG: netmap on while going to delete the krings");
1081 		}
1082 
1083 		na->nm_krings_delete(na);
1084 
1085 		/* restore the default number of host tx and rx rings */
1086 		if (na->na_flags & NAF_HOST_RINGS) {
1087 			na->num_host_tx_rings = 1;
1088 			na->num_host_rx_rings = 1;
1089 		} else {
1090 			na->num_host_tx_rings = 0;
1091 			na->num_host_rx_rings = 0;
1092 		}
1093 	}
1094 
1095 	/* possibly decrement counter of tx_si/rx_si users */
1096 	netmap_unset_ringid(priv);
1097 	/* delete the nifp */
1098 	netmap_mem_if_delete(na, priv->np_nifp);
1099 	/* drop the allocator */
1100 	netmap_mem_drop(na);
1101 	/* mark the priv as unregistered */
1102 	priv->np_na = NULL;
1103 	priv->np_nifp = NULL;
1104 }
1105 
1106 struct netmap_priv_d*
1107 netmap_priv_new(void)
1108 {
1109 	struct netmap_priv_d *priv;
1110 
1111 	priv = nm_os_malloc(sizeof(struct netmap_priv_d));
1112 	if (priv == NULL)
1113 		return NULL;
1114 	priv->np_refs = 1;
1115 	nm_os_get_module();
1116 	return priv;
1117 }
1118 
1119 /*
1120  * Destructor of the netmap_priv_d, called when the fd is closed
1121  * Action: undo all the things done by NIOCREGIF,
1122  * On FreeBSD we need to track whether there are active mmap()s,
1123  * and we use np_active_mmaps for that. On linux, the field is always 0.
1124  * Return: 1 if we can free priv, 0 otherwise.
1125  *
1126  */
1127 /* call with NMG_LOCK held */
1128 void
1129 netmap_priv_delete(struct netmap_priv_d *priv)
1130 {
1131 	struct netmap_adapter *na = priv->np_na;
1132 
1133 	/* number of active references to this fd */
1134 	if (--priv->np_refs > 0) {
1135 		return;
1136 	}
1137 	nm_os_put_module();
1138 	if (na) {
1139 		netmap_do_unregif(priv);
1140 	}
1141 	netmap_unget_na(na, priv->np_ifp);
1142 	bzero(priv, sizeof(*priv));	/* for safety */
1143 	nm_os_free(priv);
1144 }
1145 
1146 
1147 /* call with NMG_LOCK *not* held */
1148 void
1149 netmap_dtor(void *data)
1150 {
1151 	struct netmap_priv_d *priv = data;
1152 
1153 	NMG_LOCK();
1154 	netmap_priv_delete(priv);
1155 	NMG_UNLOCK();
1156 }
1157 
1158 
1159 /*
1160  * Handlers for synchronization of the rings from/to the host stack.
1161  * These are associated to a network interface and are just another
1162  * ring pair managed by userspace.
1163  *
1164  * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD
1165  * flags):
1166  *
1167  * - Before releasing buffers on hw RX rings, the application can mark
1168  *   them with the NS_FORWARD flag. During the next RXSYNC or poll(), they
1169  *   will be forwarded to the host stack, similarly to what happened if
1170  *   the application moved them to the host TX ring.
1171  *
1172  * - Before releasing buffers on the host RX ring, the application can
1173  *   mark them with the NS_FORWARD flag. During the next RXSYNC or poll(),
1174  *   they will be forwarded to the hw TX rings, saving the application
1175  *   from doing the same task in user-space.
1176  *
1177  * Transparent forwarding can be enabled per-ring, by setting the NR_FORWARD
1178  * flag, or globally with the netmap_fwd sysctl.
1179  *
1180  * The transfer NIC --> host is relatively easy, just encapsulate
1181  * into mbufs and we are done. The host --> NIC side is slightly
1182  * harder because there might not be room in the tx ring so it
1183  * might take a while before releasing the buffer.
1184  */
1185 
1186 
1187 /*
1188  * Pass a whole queue of mbufs to the host stack as coming from 'dst'
1189  * We do not need to lock because the queue is private.
1190  * After this call the queue is empty.
1191  */
1192 static void
1193 netmap_send_up(struct ifnet *dst, struct mbq *q)
1194 {
1195 	struct mbuf *m;
1196 	struct mbuf *head = NULL, *prev = NULL;
1197 #ifdef __FreeBSD__
1198 	struct epoch_tracker et;
1199 
1200 	NET_EPOCH_ENTER(et);
1201 #endif /* __FreeBSD__ */
1202 	/* Send packets up, outside the lock; head/prev machinery
1203 	 * is only useful for Windows. */
1204 	while ((m = mbq_dequeue(q)) != NULL) {
1205 		if (netmap_debug & NM_DEBUG_HOST)
1206 			nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m));
1207 		prev = nm_os_send_up(dst, m, prev);
1208 		if (head == NULL)
1209 			head = prev;
1210 	}
1211 	if (head)
1212 		nm_os_send_up(dst, NULL, head);
1213 #ifdef __FreeBSD__
1214 	NET_EPOCH_EXIT(et);
1215 #endif /* __FreeBSD__ */
1216 	mbq_fini(q);
1217 }
1218 
1219 
1220 /*
1221  * Scan the buffers from hwcur to ring->head, and put a copy of those
1222  * marked NS_FORWARD (or all of them if forced) into a queue of mbufs.
1223  * Drop remaining packets in the unlikely event
1224  * of an mbuf shortage.
1225  */
1226 static void
1227 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
1228 {
1229 	u_int const lim = kring->nkr_num_slots - 1;
1230 	u_int const head = kring->rhead;
1231 	u_int n;
1232 	struct netmap_adapter *na = kring->na;
1233 
1234 	for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) {
1235 		struct mbuf *m;
1236 		struct netmap_slot *slot = &kring->ring->slot[n];
1237 
1238 		if ((slot->flags & NS_FORWARD) == 0 && !force)
1239 			continue;
1240 		if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
1241 			nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
1242 			continue;
1243 		}
1244 		slot->flags &= ~NS_FORWARD; // XXX needed ?
1245 		/* XXX TODO: adapt to the case of a multisegment packet */
1246 		m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL);
1247 
1248 		if (m == NULL)
1249 			break;
1250 		mbq_enqueue(q, m);
1251 	}
1252 }
1253 
1254 static inline int
1255 _nm_may_forward(struct netmap_kring *kring)
1256 {
1257 	return	((netmap_fwd || kring->ring->flags & NR_FORWARD) &&
1258 		 kring->na->na_flags & NAF_HOST_RINGS &&
1259 		 kring->tx == NR_RX);
1260 }
1261 
1262 static inline int
1263 nm_may_forward_up(struct netmap_kring *kring)
1264 {
1265 	return	_nm_may_forward(kring) &&
1266 		 kring->ring_id != kring->na->num_rx_rings;
1267 }
1268 
1269 static inline int
1270 nm_may_forward_down(struct netmap_kring *kring, int sync_flags)
1271 {
1272 	return	_nm_may_forward(kring) &&
1273 		 (sync_flags & NAF_CAN_FORWARD_DOWN) &&
1274 		 kring->ring_id == kring->na->num_rx_rings;
1275 }
1276 
1277 /*
1278  * Send to the NIC rings packets marked NS_FORWARD between
1279  * kring->nr_hwcur and kring->rhead.
1280  * Called under kring->rx_queue.lock on the sw rx ring.
1281  *
1282  * It can only be called if the user opened all the TX hw rings,
1283  * see NAF_CAN_FORWARD_DOWN flag.
1284  * We can touch the TX netmap rings (slots, head and cur) since
1285  * we are in poll/ioctl system call context, and the application
1286  * is not supposed to touch the ring (using a different thread)
1287  * during the execution of the system call.
1288  */
1289 static u_int
1290 netmap_sw_to_nic(struct netmap_adapter *na)
1291 {
1292 	struct netmap_kring *kring = na->rx_rings[na->num_rx_rings];
1293 	struct netmap_slot *rxslot = kring->ring->slot;
1294 	u_int i, rxcur = kring->nr_hwcur;
1295 	u_int const head = kring->rhead;
1296 	u_int const src_lim = kring->nkr_num_slots - 1;
1297 	u_int sent = 0;
1298 
1299 	/* scan rings to find space, then fill as much as possible */
1300 	for (i = 0; i < na->num_tx_rings; i++) {
1301 		struct netmap_kring *kdst = na->tx_rings[i];
1302 		struct netmap_ring *rdst = kdst->ring;
1303 		u_int const dst_lim = kdst->nkr_num_slots - 1;
1304 
1305 		/* XXX do we trust ring or kring->rcur,rtail ? */
1306 		for (; rxcur != head && !nm_ring_empty(rdst);
1307 		     rxcur = nm_next(rxcur, src_lim) ) {
1308 			struct netmap_slot *src, *dst, tmp;
1309 			u_int dst_head = rdst->head;
1310 
1311 			src = &rxslot[rxcur];
1312 			if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd)
1313 				continue;
1314 
1315 			sent++;
1316 
1317 			dst = &rdst->slot[dst_head];
1318 
1319 			tmp = *src;
1320 
1321 			src->buf_idx = dst->buf_idx;
1322 			src->flags = NS_BUF_CHANGED;
1323 
1324 			dst->buf_idx = tmp.buf_idx;
1325 			dst->len = tmp.len;
1326 			dst->flags = NS_BUF_CHANGED;
1327 
1328 			rdst->head = rdst->cur = nm_next(dst_head, dst_lim);
1329 		}
1330 		/* if (sent) XXX txsync ? it would be just an optimization */
1331 	}
1332 	return sent;
1333 }
1334 
1335 
1336 /*
1337  * netmap_txsync_to_host() passes packets up. We are called from a
1338  * system call in user process context, and the only contention
1339  * can be among multiple user threads erroneously calling
1340  * this routine concurrently.
1341  */
1342 static int
1343 netmap_txsync_to_host(struct netmap_kring *kring, int flags)
1344 {
1345 	struct netmap_adapter *na = kring->na;
1346 	u_int const lim = kring->nkr_num_slots - 1;
1347 	u_int const head = kring->rhead;
1348 	struct mbq q;
1349 
1350 	/* Take packets from hwcur to head and pass them up.
1351 	 * Force hwcur = head since netmap_grab_packets() stops at head
1352 	 */
1353 	mbq_init(&q);
1354 	netmap_grab_packets(kring, &q, 1 /* force */);
1355 	nm_prdis("have %d pkts in queue", mbq_len(&q));
1356 	kring->nr_hwcur = head;
1357 	kring->nr_hwtail = head + lim;
1358 	if (kring->nr_hwtail > lim)
1359 		kring->nr_hwtail -= lim + 1;
1360 
1361 	netmap_send_up(na->ifp, &q);
1362 	return 0;
1363 }
1364 
1365 
1366 /*
1367  * rxsync backend for packets coming from the host stack.
1368  * They have been put in kring->rx_queue by netmap_transmit().
1369  * We protect access to the kring using kring->rx_queue.lock
1370  *
1371  * also moves to the nic hw rings any packet the user has marked
1372  * for transparent-mode forwarding, then sets the NR_FORWARD
1373  * flag in the kring to let the caller push them out
1374  */
1375 static int
1376 netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
1377 {
1378 	struct netmap_adapter *na = kring->na;
1379 	struct netmap_ring *ring = kring->ring;
1380 	u_int nm_i, n;
1381 	u_int const lim = kring->nkr_num_slots - 1;
1382 	u_int const head = kring->rhead;
1383 	int ret = 0;
1384 	struct mbq *q = &kring->rx_queue, fq;
1385 
1386 	mbq_init(&fq); /* fq holds packets to be freed */
1387 
1388 	mbq_lock(q);
1389 
1390 	/* First part: import newly received packets */
1391 	n = mbq_len(q);
1392 	if (n) { /* grab packets from the queue */
1393 		struct mbuf *m;
1394 		uint32_t stop_i;
1395 
1396 		nm_i = kring->nr_hwtail;
1397 		stop_i = nm_prev(kring->nr_hwcur, lim);
1398 		while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) {
1399 			int len = MBUF_LEN(m);
1400 			struct netmap_slot *slot = &ring->slot[nm_i];
1401 
1402 			m_copydata(m, 0, len, NMB(na, slot));
1403 			nm_prdis("nm %d len %d", nm_i, len);
1404 			if (netmap_debug & NM_DEBUG_HOST)
1405 				nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
1406 
1407 			slot->len = len;
1408 			slot->flags = 0;
1409 			nm_i = nm_next(nm_i, lim);
1410 			mbq_enqueue(&fq, m);
1411 		}
1412 		kring->nr_hwtail = nm_i;
1413 	}
1414 
1415 	/*
1416 	 * Second part: skip past packets that userspace has released.
1417 	 */
1418 	nm_i = kring->nr_hwcur;
1419 	if (nm_i != head) { /* something was released */
1420 		if (nm_may_forward_down(kring, flags)) {
1421 			ret = netmap_sw_to_nic(na);
1422 			if (ret > 0) {
1423 				kring->nr_kflags |= NR_FORWARD;
1424 				ret = 0;
1425 			}
1426 		}
1427 		kring->nr_hwcur = head;
1428 	}
1429 
1430 	mbq_unlock(q);
1431 
1432 	mbq_purge(&fq);
1433 	mbq_fini(&fq);
1434 
1435 	return ret;
1436 }
1437 
1438 
1439 /* Get a netmap adapter for the port.
1440  *
1441  * If it is possible to satisfy the request, return 0
1442  * with *na containing the netmap adapter found.
1443  * Otherwise return an error code, with *na containing NULL.
1444  *
1445  * When the port is attached to a bridge, we always return
1446  * EBUSY.
1447  * Otherwise, if the port is already bound to a file descriptor,
1448  * then we unconditionally return the existing adapter into *na.
1449  * In all the other cases, we return (into *na) either native,
1450  * generic or NULL, according to the following table:
1451  *
1452  *					native_support
1453  * active_fds   dev.netmap.admode         YES     NO
1454  * -------------------------------------------------------
1455  *    >0              *                 NA(ifp) NA(ifp)
1456  *
1457  *     0        NETMAP_ADMODE_BEST      NATIVE  GENERIC
1458  *     0        NETMAP_ADMODE_NATIVE    NATIVE   NULL
1459  *     0        NETMAP_ADMODE_GENERIC   GENERIC GENERIC
1460  *
1461  */
1462 static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */
1463 int
1464 netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na)
1465 {
1466 	/* generic support */
1467 	int i = netmap_admode;	/* Take a snapshot. */
1468 	struct netmap_adapter *prev_na;
1469 	int error = 0;
1470 
1471 	*na = NULL; /* default */
1472 
1473 	/* reset in case of invalid value */
1474 	if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST)
1475 		i = netmap_admode = NETMAP_ADMODE_BEST;
1476 
1477 	if (NM_NA_VALID(ifp)) {
1478 		prev_na = NA(ifp);
1479 		/* If an adapter already exists, return it if
1480 		 * there are active file descriptors or if
1481 		 * netmap is not forced to use generic
1482 		 * adapters.
1483 		 */
1484 		if (NETMAP_OWNED_BY_ANY(prev_na)
1485 			|| i != NETMAP_ADMODE_GENERIC
1486 			|| prev_na->na_flags & NAF_FORCE_NATIVE
1487 #ifdef WITH_PIPES
1488 			/* ugly, but we cannot allow an adapter switch
1489 			 * if some pipe is referring to this one
1490 			 */
1491 			|| prev_na->na_next_pipe > 0
1492 #endif
1493 		) {
1494 			*na = prev_na;
1495 			goto assign_mem;
1496 		}
1497 	}
1498 
1499 	/* If there isn't native support and netmap is not allowed
1500 	 * to use generic adapters, we cannot satisfy the request.
1501 	 */
1502 	if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE)
1503 		return EOPNOTSUPP;
1504 
1505 	/* Otherwise, create a generic adapter and return it,
1506 	 * saving the previously used netmap adapter, if any.
1507 	 *
1508 	 * Note that here 'prev_na', if not NULL, MUST be a
1509 	 * native adapter, and CANNOT be a generic one. This is
1510 	 * true because generic adapters are created on demand, and
1511 	 * destroyed when not used anymore. Therefore, if the adapter
1512 	 * currently attached to an interface 'ifp' is generic, it
1513 	 * must be that
1514 	 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))).
1515 	 * Consequently, if NA(ifp) is generic, we will enter one of
1516 	 * the branches above. This ensures that we never override
1517 	 * a generic adapter with another generic adapter.
1518 	 */
1519 	error = generic_netmap_attach(ifp);
1520 	if (error)
1521 		return error;
1522 
1523 	*na = NA(ifp);
1524 
1525 assign_mem:
1526 	if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
1527 	    (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
1528 		(*na)->nm_mem_prev = (*na)->nm_mem;
1529 		(*na)->nm_mem = netmap_mem_get(nmd);
1530 	}
1531 
1532 	return 0;
1533 }
1534 
1535 /*
1536  * MUST BE CALLED UNDER NMG_LOCK()
1537  *
1538  * Get a refcounted reference to a netmap adapter attached
1539  * to the interface specified by req.
1540  * This is always called in the execution of an ioctl().
1541  *
1542  * Return ENXIO if the interface specified by the request does
1543  * not exist, ENOTSUP if netmap is not supported by the interface,
1544  * EBUSY if the interface is already attached to a bridge,
1545  * EINVAL if parameters are invalid, ENOMEM if needed resources
1546  * could not be allocated.
1547  * If successful, hold a reference to the netmap adapter.
1548  *
1549  * If the interface specified by req is a system one, also keep
1550  * a reference to it and return a valid *ifp.
1551  */
1552 int
1553 netmap_get_na(struct nmreq_header *hdr,
1554 	      struct netmap_adapter **na, struct ifnet **ifp,
1555 	      struct netmap_mem_d *nmd, int create)
1556 {
1557 	struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1558 	int error = 0;
1559 	struct netmap_adapter *ret = NULL;
1560 	int nmd_ref = 0;
1561 
1562 	*na = NULL;     /* default return value */
1563 	*ifp = NULL;
1564 
1565 	if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1566 		return EINVAL;
1567 	}
1568 
1569 	if (req->nr_mode == NR_REG_PIPE_MASTER ||
1570 			req->nr_mode == NR_REG_PIPE_SLAVE) {
1571 		/* Do not accept deprecated pipe modes. */
1572 		nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax");
1573 		return EINVAL;
1574 	}
1575 
1576 	NMG_LOCK_ASSERT();
1577 
1578 	/* if the request contain a memid, try to find the
1579 	 * corresponding memory region
1580 	 */
1581 	if (nmd == NULL && req->nr_mem_id) {
1582 		nmd = netmap_mem_find(req->nr_mem_id);
1583 		if (nmd == NULL)
1584 			return EINVAL;
1585 		/* keep the rereference */
1586 		nmd_ref = 1;
1587 	}
1588 
1589 	/* We cascade through all possible types of netmap adapter.
1590 	 * All netmap_get_*_na() functions return an error and an na,
1591 	 * with the following combinations:
1592 	 *
1593 	 * error    na
1594 	 *   0	   NULL		type doesn't match
1595 	 *  !0	   NULL		type matches, but na creation/lookup failed
1596 	 *   0	  !NULL		type matches and na created/found
1597 	 *  !0    !NULL		impossible
1598 	 */
1599 	error = netmap_get_null_na(hdr, na, nmd, create);
1600 	if (error || *na != NULL)
1601 		goto out;
1602 
1603 	/* try to see if this is a monitor port */
1604 	error = netmap_get_monitor_na(hdr, na, nmd, create);
1605 	if (error || *na != NULL)
1606 		goto out;
1607 
1608 	/* try to see if this is a pipe port */
1609 	error = netmap_get_pipe_na(hdr, na, nmd, create);
1610 	if (error || *na != NULL)
1611 		goto out;
1612 
1613 	/* try to see if this is a vale port */
1614 	error = netmap_get_vale_na(hdr, na, nmd, create);
1615 	if (error)
1616 		goto out;
1617 
1618 	if (*na != NULL) /* valid match in netmap_get_bdg_na() */
1619 		goto out;
1620 
1621 	/*
1622 	 * This must be a hardware na, lookup the name in the system.
1623 	 * Note that by hardware we actually mean "it shows up in ifconfig".
1624 	 * This may still be a tap, a veth/epair, or even a
1625 	 * persistent VALE port.
1626 	 */
1627 	*ifp = ifunit_ref(hdr->nr_name);
1628 	if (*ifp == NULL) {
1629 		error = ENXIO;
1630 		goto out;
1631 	}
1632 
1633 	error = netmap_get_hw_na(*ifp, nmd, &ret);
1634 	if (error)
1635 		goto out;
1636 
1637 	*na = ret;
1638 	netmap_adapter_get(ret);
1639 
1640 	/*
1641 	 * if the adapter supports the host rings and it is not already open,
1642 	 * try to set the number of host rings as requested by the user
1643 	 */
1644 	if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
1645 		if (req->nr_host_tx_rings)
1646 			(*na)->num_host_tx_rings = req->nr_host_tx_rings;
1647 		if (req->nr_host_rx_rings)
1648 			(*na)->num_host_rx_rings = req->nr_host_rx_rings;
1649 	}
1650 	nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings,
1651 			(*na)->num_host_rx_rings);
1652 
1653 out:
1654 	if (error) {
1655 		if (ret)
1656 			netmap_adapter_put(ret);
1657 		if (*ifp) {
1658 			if_rele(*ifp);
1659 			*ifp = NULL;
1660 		}
1661 	}
1662 	if (nmd_ref)
1663 		netmap_mem_put(nmd);
1664 
1665 	return error;
1666 }
1667 
1668 /* undo netmap_get_na() */
1669 void
1670 netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
1671 {
1672 	if (ifp)
1673 		if_rele(ifp);
1674 	if (na)
1675 		netmap_adapter_put(na);
1676 }
1677 
1678 
1679 #define NM_FAIL_ON(t) do {						\
1680 	if (unlikely(t)) {						\
1681 		nm_prlim(5, "%s: fail '" #t "' "				\
1682 			"h %d c %d t %d "				\
1683 			"rh %d rc %d rt %d "				\
1684 			"hc %d ht %d",					\
1685 			kring->name,					\
1686 			head, cur, ring->tail,				\
1687 			kring->rhead, kring->rcur, kring->rtail,	\
1688 			kring->nr_hwcur, kring->nr_hwtail);		\
1689 		return kring->nkr_num_slots;				\
1690 	}								\
1691 } while (0)
1692 
1693 /*
1694  * validate parameters on entry for *_txsync()
1695  * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1696  * in case of error.
1697  *
1698  * rhead, rcur and rtail=hwtail are stored from previous round.
1699  * hwcur is the next packet to send to the ring.
1700  *
1701  * We want
1702  *    hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail
1703  *
1704  * hwcur, rhead, rtail and hwtail are reliable
1705  */
1706 u_int
1707 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1708 {
1709 	u_int head = ring->head; /* read only once */
1710 	u_int cur = ring->cur; /* read only once */
1711 	u_int n = kring->nkr_num_slots;
1712 
1713 	nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
1714 		kring->name,
1715 		kring->nr_hwcur, kring->nr_hwtail,
1716 		ring->head, ring->cur, ring->tail);
1717 #if 1 /* kernel sanity checks; but we can trust the kring. */
1718 	NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n ||
1719 	    kring->rtail >= n ||  kring->nr_hwtail >= n);
1720 #endif /* kernel sanity checks */
1721 	/*
1722 	 * user sanity checks. We only use head,
1723 	 * A, B, ... are possible positions for head:
1724 	 *
1725 	 *  0    A  rhead   B  rtail   C  n-1
1726 	 *  0    D  rtail   E  rhead   F  n-1
1727 	 *
1728 	 * B, F, D are valid. A, C, E are wrong
1729 	 */
1730 	if (kring->rtail >= kring->rhead) {
1731 		/* want rhead <= head <= rtail */
1732 		NM_FAIL_ON(head < kring->rhead || head > kring->rtail);
1733 		/* and also head <= cur <= rtail */
1734 		NM_FAIL_ON(cur < head || cur > kring->rtail);
1735 	} else { /* here rtail < rhead */
1736 		/* we need head outside rtail .. rhead */
1737 		NM_FAIL_ON(head > kring->rtail && head < kring->rhead);
1738 
1739 		/* two cases now: head <= rtail or head >= rhead  */
1740 		if (head <= kring->rtail) {
1741 			/* want head <= cur <= rtail */
1742 			NM_FAIL_ON(cur < head || cur > kring->rtail);
1743 		} else { /* head >= rhead */
1744 			/* cur must be outside rtail..head */
1745 			NM_FAIL_ON(cur > kring->rtail && cur < head);
1746 		}
1747 	}
1748 	if (ring->tail != kring->rtail) {
1749 		nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
1750 			ring->tail, kring->rtail);
1751 		ring->tail = kring->rtail;
1752 	}
1753 	kring->rhead = head;
1754 	kring->rcur = cur;
1755 	return head;
1756 }
1757 
1758 
1759 /*
1760  * validate parameters on entry for *_rxsync()
1761  * Returns ring->head if ok, kring->nkr_num_slots on error.
1762  *
1763  * For a valid configuration,
1764  * hwcur <= head <= cur <= tail <= hwtail
1765  *
1766  * We only consider head and cur.
1767  * hwcur and hwtail are reliable.
1768  *
1769  */
1770 u_int
1771 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1772 {
1773 	uint32_t const n = kring->nkr_num_slots;
1774 	uint32_t head, cur;
1775 
1776 	nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
1777 		kring->name,
1778 		kring->nr_hwcur, kring->nr_hwtail,
1779 		ring->head, ring->cur, ring->tail);
1780 	/*
1781 	 * Before storing the new values, we should check they do not
1782 	 * move backwards. However:
1783 	 * - head is not an issue because the previous value is hwcur;
1784 	 * - cur could in principle go back, however it does not matter
1785 	 *   because we are processing a brand new rxsync()
1786 	 */
1787 	cur = kring->rcur = ring->cur;	/* read only once */
1788 	head = kring->rhead = ring->head;	/* read only once */
1789 #if 1 /* kernel sanity checks */
1790 	NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n);
1791 #endif /* kernel sanity checks */
1792 	/* user sanity checks */
1793 	if (kring->nr_hwtail >= kring->nr_hwcur) {
1794 		/* want hwcur <= rhead <= hwtail */
1795 		NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail);
1796 		/* and also rhead <= rcur <= hwtail */
1797 		NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1798 	} else {
1799 		/* we need rhead outside hwtail..hwcur */
1800 		NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail);
1801 		/* two cases now: head <= hwtail or head >= hwcur  */
1802 		if (head <= kring->nr_hwtail) {
1803 			/* want head <= cur <= hwtail */
1804 			NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1805 		} else {
1806 			/* cur must be outside hwtail..head */
1807 			NM_FAIL_ON(cur < head && cur > kring->nr_hwtail);
1808 		}
1809 	}
1810 	if (ring->tail != kring->rtail) {
1811 		nm_prlim(5, "%s tail overwritten was %d need %d",
1812 			kring->name,
1813 			ring->tail, kring->rtail);
1814 		ring->tail = kring->rtail;
1815 	}
1816 	return head;
1817 }
1818 
1819 
1820 /*
1821  * Error routine called when txsync/rxsync detects an error.
1822  * Can't do much more than resetting head = cur = hwcur, tail = hwtail
1823  * Return 1 on reinit.
1824  *
1825  * This routine is only called by the upper half of the kernel.
1826  * It only reads hwcur (which is changed only by the upper half, too)
1827  * and hwtail (which may be changed by the lower half, but only on
1828  * a tx ring and only to increase it, so any error will be recovered
1829  * on the next call). For the above, we don't strictly need to call
1830  * it under lock.
1831  */
1832 int
1833 netmap_ring_reinit(struct netmap_kring *kring)
1834 {
1835 	struct netmap_ring *ring = kring->ring;
1836 	u_int i, lim = kring->nkr_num_slots - 1;
1837 	int errors = 0;
1838 
1839 	// XXX KASSERT nm_kr_tryget
1840 	nm_prlim(10, "called for %s", kring->name);
1841 	// XXX probably wrong to trust userspace
1842 	kring->rhead = ring->head;
1843 	kring->rcur  = ring->cur;
1844 	kring->rtail = ring->tail;
1845 
1846 	if (ring->cur > lim)
1847 		errors++;
1848 	if (ring->head > lim)
1849 		errors++;
1850 	if (ring->tail > lim)
1851 		errors++;
1852 	for (i = 0; i <= lim; i++) {
1853 		u_int idx = ring->slot[i].buf_idx;
1854 		u_int len = ring->slot[i].len;
1855 		if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
1856 			nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
1857 			ring->slot[i].buf_idx = 0;
1858 			ring->slot[i].len = 0;
1859 		} else if (len > NETMAP_BUF_SIZE(kring->na)) {
1860 			ring->slot[i].len = 0;
1861 			nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
1862 		}
1863 	}
1864 	if (errors) {
1865 		nm_prlim(10, "total %d errors", errors);
1866 		nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
1867 			kring->name,
1868 			ring->cur, kring->nr_hwcur,
1869 			ring->tail, kring->nr_hwtail);
1870 		ring->head = kring->rhead = kring->nr_hwcur;
1871 		ring->cur  = kring->rcur  = kring->nr_hwcur;
1872 		ring->tail = kring->rtail = kring->nr_hwtail;
1873 	}
1874 	return (errors ? 1 : 0);
1875 }
1876 
1877 /* interpret the ringid and flags fields of an nmreq, by translating them
1878  * into a pair of intervals of ring indices:
1879  *
1880  * [priv->np_txqfirst, priv->np_txqlast) and
1881  * [priv->np_rxqfirst, priv->np_rxqlast)
1882  *
1883  */
1884 int
1885 netmap_interp_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1886 {
1887 	struct netmap_adapter *na = priv->np_na;
1888 	struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1889 	int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY };
1890 	enum txrx t;
1891 	u_int j;
1892 	u_int nr_flags = reg->nr_flags, nr_mode = reg->nr_mode,
1893 	      nr_ringid = reg->nr_ringid;
1894 
1895 	for_rx_tx(t) {
1896 		if (nr_flags & excluded_direction[t]) {
1897 			priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1898 			continue;
1899 		}
1900 		switch (nr_mode) {
1901 		case NR_REG_ALL_NIC:
1902 		case NR_REG_NULL:
1903 			priv->np_qfirst[t] = 0;
1904 			priv->np_qlast[t] = nma_get_nrings(na, t);
1905 			nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
1906 				priv->np_qfirst[t], priv->np_qlast[t]);
1907 			break;
1908 		case NR_REG_SW:
1909 		case NR_REG_NIC_SW:
1910 			if (!(na->na_flags & NAF_HOST_RINGS)) {
1911 				nm_prerr("host rings not supported");
1912 				return EINVAL;
1913 			}
1914 			priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
1915 				nma_get_nrings(na, t) : 0);
1916 			priv->np_qlast[t] = netmap_all_rings(na, t);
1917 			nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
1918 				nm_txrx2str(t),
1919 				priv->np_qfirst[t], priv->np_qlast[t]);
1920 			break;
1921 		case NR_REG_ONE_NIC:
1922 			if (nr_ringid >= na->num_tx_rings &&
1923 					nr_ringid >= na->num_rx_rings) {
1924 				nm_prerr("invalid ring id %d", nr_ringid);
1925 				return EINVAL;
1926 			}
1927 			/* if not enough rings, use the first one */
1928 			j = nr_ringid;
1929 			if (j >= nma_get_nrings(na, t))
1930 				j = 0;
1931 			priv->np_qfirst[t] = j;
1932 			priv->np_qlast[t] = j + 1;
1933 			nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
1934 				priv->np_qfirst[t], priv->np_qlast[t]);
1935 			break;
1936 		case NR_REG_ONE_SW:
1937 			if (!(na->na_flags & NAF_HOST_RINGS)) {
1938 				nm_prerr("host rings not supported");
1939 				return EINVAL;
1940 			}
1941 			if (nr_ringid >= na->num_host_tx_rings &&
1942 					nr_ringid >= na->num_host_rx_rings) {
1943 				nm_prerr("invalid ring id %d", nr_ringid);
1944 				return EINVAL;
1945 			}
1946 			/* if not enough rings, use the first one */
1947 			j = nr_ringid;
1948 			if (j >= nma_get_host_nrings(na, t))
1949 				j = 0;
1950 			priv->np_qfirst[t] = nma_get_nrings(na, t) + j;
1951 			priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1;
1952 			nm_prdis("ONE_SW: %s %d %d", nm_txrx2str(t),
1953 				priv->np_qfirst[t], priv->np_qlast[t]);
1954 			break;
1955 		default:
1956 			nm_prerr("invalid regif type %d", nr_mode);
1957 			return EINVAL;
1958 		}
1959 	}
1960 	priv->np_flags = nr_flags;
1961 
1962 	/* Allow transparent forwarding mode in the host --> nic
1963 	 * direction only if all the TX hw rings have been opened. */
1964 	if (priv->np_qfirst[NR_TX] == 0 &&
1965 			priv->np_qlast[NR_TX] >= na->num_tx_rings) {
1966 		priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN;
1967 	}
1968 
1969 	if (netmap_verbose) {
1970 		nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d",
1971 			na->name,
1972 			priv->np_qfirst[NR_TX],
1973 			priv->np_qlast[NR_TX],
1974 			priv->np_qfirst[NR_RX],
1975 			priv->np_qlast[NR_RX],
1976 			nr_ringid);
1977 	}
1978 	return 0;
1979 }
1980 
1981 
1982 /*
1983  * Set the ring ID. For devices with a single queue, a request
1984  * for all rings is the same as a single ring.
1985  */
1986 static int
1987 netmap_set_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1988 {
1989 	struct netmap_adapter *na = priv->np_na;
1990 	struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1991 	int error;
1992 	enum txrx t;
1993 
1994 	error = netmap_interp_ringid(priv, hdr);
1995 	if (error) {
1996 		return error;
1997 	}
1998 
1999 	priv->np_txpoll = (reg->nr_flags & NR_NO_TX_POLL) ? 0 : 1;
2000 
2001 	/* optimization: count the users registered for more than
2002 	 * one ring, which are the ones sleeping on the global queue.
2003 	 * The default netmap_notify() callback will then
2004 	 * avoid signaling the global queue if nobody is using it
2005 	 */
2006 	for_rx_tx(t) {
2007 		if (nm_si_user(priv, t))
2008 			na->si_users[t]++;
2009 	}
2010 	return 0;
2011 }
2012 
2013 static void
2014 netmap_unset_ringid(struct netmap_priv_d *priv)
2015 {
2016 	struct netmap_adapter *na = priv->np_na;
2017 	enum txrx t;
2018 
2019 	for_rx_tx(t) {
2020 		if (nm_si_user(priv, t))
2021 			na->si_users[t]--;
2022 		priv->np_qfirst[t] = priv->np_qlast[t] = 0;
2023 	}
2024 	priv->np_flags = 0;
2025 	priv->np_txpoll = 0;
2026 	priv->np_kloop_state = 0;
2027 }
2028 
2029 #define within_sel(p_, t_, i_)					  	  \
2030 	((i_) < (p_)->np_qlast[(t_)])
2031 #define nonempty_sel(p_, t_)						  \
2032 	(within_sel((p_), (t_), (p_)->np_qfirst[(t_)]))
2033 #define foreach_selected_ring(p_, t_, i_, kring_)			  \
2034 	for ((t_) = nonempty_sel((p_), NR_RX) ? NR_RX : NR_TX,		  \
2035 	     (i_) = (p_)->np_qfirst[(t_)];				  \
2036 	     (t_ == NR_RX ||						  \
2037 	      (t == NR_TX && within_sel((p_), (t_), (i_)))) &&     	  \
2038 	      ((kring_) = NMR((p_)->np_na, (t_))[(i_)]); 		  \
2039 	     (i_) = within_sel((p_), (t_), (i_) + 1) ? (i_) + 1 :         \
2040 		(++(t_) < NR_TXRX ? (p_)->np_qfirst[(t_)] : (i_)))
2041 
2042 
2043 /* Set the nr_pending_mode for the requested rings.
2044  * If requested, also try to get exclusive access to the rings, provided
2045  * the rings we want to bind are not exclusively owned by a previous bind.
2046  */
2047 static int
2048 netmap_krings_get(struct netmap_priv_d *priv)
2049 {
2050 	struct netmap_adapter *na = priv->np_na;
2051 	u_int i;
2052 	struct netmap_kring *kring;
2053 	int excl = (priv->np_flags & NR_EXCLUSIVE);
2054 	enum txrx t;
2055 
2056 	if (netmap_debug & NM_DEBUG_ON)
2057 		nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)",
2058 			na->name,
2059 			priv->np_qfirst[NR_TX],
2060 			priv->np_qlast[NR_TX],
2061 			priv->np_qfirst[NR_RX],
2062 			priv->np_qlast[NR_RX]);
2063 
2064 	/* first round: check that all the requested rings
2065 	 * are neither already exclusively owned, nor we
2066 	 * want exclusive ownership when they are already in use
2067 	 */
2068 	foreach_selected_ring(priv, t, i, kring) {
2069 		if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
2070 		    (kring->users && excl))
2071 		{
2072 			nm_prdis("ring %s busy", kring->name);
2073 			return EBUSY;
2074 		}
2075 	}
2076 
2077 	/* second round: increment usage count (possibly marking them
2078 	 * as exclusive) and set the nr_pending_mode
2079 	 */
2080 	foreach_selected_ring(priv, t, i, kring) {
2081 		kring->users++;
2082 		if (excl)
2083 			kring->nr_kflags |= NKR_EXCLUSIVE;
2084 		kring->nr_pending_mode = NKR_NETMAP_ON;
2085 	}
2086 
2087 	return 0;
2088 
2089 }
2090 
2091 /* Undo netmap_krings_get(). This is done by clearing the exclusive mode
2092  * if was asked on regif, and unset the nr_pending_mode if we are the
2093  * last users of the involved rings. */
2094 static void
2095 netmap_krings_put(struct netmap_priv_d *priv)
2096 {
2097 	u_int i;
2098 	struct netmap_kring *kring;
2099 	int excl = (priv->np_flags & NR_EXCLUSIVE);
2100 	enum txrx t;
2101 
2102 	nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
2103 			na->name,
2104 			priv->np_qfirst[NR_TX],
2105 			priv->np_qlast[NR_TX],
2106 			priv->np_qfirst[NR_RX],
2107 			priv->np_qlast[MR_RX]);
2108 
2109 	foreach_selected_ring(priv, t, i, kring) {
2110 		if (excl)
2111 			kring->nr_kflags &= ~NKR_EXCLUSIVE;
2112 		kring->users--;
2113 		if (kring->users == 0)
2114 			kring->nr_pending_mode = NKR_NETMAP_OFF;
2115 	}
2116 }
2117 
2118 static int
2119 nm_priv_rx_enabled(struct netmap_priv_d *priv)
2120 {
2121 	return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]);
2122 }
2123 
2124 /* Validate the CSB entries for both directions (atok and ktoa).
2125  * To be called under NMG_LOCK(). */
2126 static int
2127 netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo)
2128 {
2129 	struct nm_csb_atok *csb_atok_base =
2130 		(struct nm_csb_atok *)(uintptr_t)csbo->csb_atok;
2131 	struct nm_csb_ktoa *csb_ktoa_base =
2132 		(struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa;
2133 	enum txrx t;
2134 	int num_rings[NR_TXRX], tot_rings;
2135 	size_t entry_size[2];
2136 	void *csb_start[2];
2137 	int i;
2138 
2139 	if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
2140 		nm_prerr("Cannot update CSB while kloop is running");
2141 		return EBUSY;
2142 	}
2143 
2144 	tot_rings = 0;
2145 	for_rx_tx(t) {
2146 		num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t];
2147 		tot_rings += num_rings[t];
2148 	}
2149 	if (tot_rings <= 0)
2150 		return 0;
2151 
2152 	if (!(priv->np_flags & NR_EXCLUSIVE)) {
2153 		nm_prerr("CSB mode requires NR_EXCLUSIVE");
2154 		return EINVAL;
2155 	}
2156 
2157 	entry_size[0] = sizeof(*csb_atok_base);
2158 	entry_size[1] = sizeof(*csb_ktoa_base);
2159 	csb_start[0] = (void *)csb_atok_base;
2160 	csb_start[1] = (void *)csb_ktoa_base;
2161 
2162 	for (i = 0; i < 2; i++) {
2163 		/* On Linux we could use access_ok() to simplify
2164 		 * the validation. However, the advantage of
2165 		 * this approach is that it works also on
2166 		 * FreeBSD. */
2167 		size_t csb_size = tot_rings * entry_size[i];
2168 		void *tmp;
2169 		int err;
2170 
2171 		if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) {
2172 			nm_prerr("Unaligned CSB address");
2173 			return EINVAL;
2174 		}
2175 
2176 		tmp = nm_os_malloc(csb_size);
2177 		if (!tmp)
2178 			return ENOMEM;
2179 		if (i == 0) {
2180 			/* Application --> kernel direction. */
2181 			err = copyin(csb_start[i], tmp, csb_size);
2182 		} else {
2183 			/* Kernel --> application direction. */
2184 			memset(tmp, 0, csb_size);
2185 			err = copyout(tmp, csb_start[i], csb_size);
2186 		}
2187 		nm_os_free(tmp);
2188 		if (err) {
2189 			nm_prerr("Invalid CSB address");
2190 			return err;
2191 		}
2192 	}
2193 
2194 	priv->np_csb_atok_base = csb_atok_base;
2195 	priv->np_csb_ktoa_base = csb_ktoa_base;
2196 
2197 	/* Initialize the CSB. */
2198 	for_rx_tx(t) {
2199 		for (i = 0; i < num_rings[t]; i++) {
2200 			struct netmap_kring *kring =
2201 				NMR(priv->np_na, t)[i + priv->np_qfirst[t]];
2202 			struct nm_csb_atok *csb_atok = csb_atok_base + i;
2203 			struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i;
2204 
2205 			if (t == NR_RX) {
2206 				csb_atok += num_rings[NR_TX];
2207 				csb_ktoa += num_rings[NR_TX];
2208 			}
2209 
2210 			CSB_WRITE(csb_atok, head, kring->rhead);
2211 			CSB_WRITE(csb_atok, cur, kring->rcur);
2212 			CSB_WRITE(csb_atok, appl_need_kick, 1);
2213 			CSB_WRITE(csb_atok, sync_flags, 1);
2214 			CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur);
2215 			CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail);
2216 			CSB_WRITE(csb_ktoa, kern_need_kick, 1);
2217 
2218 			nm_prinf("csb_init for kring %s: head %u, cur %u, "
2219 				"hwcur %u, hwtail %u", kring->name,
2220 				kring->rhead, kring->rcur, kring->nr_hwcur,
2221 				kring->nr_hwtail);
2222 		}
2223 	}
2224 
2225 	return 0;
2226 }
2227 
2228 /* Ensure that the netmap adapter can support the given MTU.
2229  * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2230  */
2231 int
2232 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) {
2233 	unsigned nbs = NETMAP_BUF_SIZE(na);
2234 
2235 	if (mtu <= na->rx_buf_maxsize) {
2236 		/* The MTU fits a single NIC slot. We only
2237 		 * Need to check that netmap buffers are
2238 		 * large enough to hold an MTU. NS_MOREFRAG
2239 		 * cannot be used in this case. */
2240 		if (nbs < mtu) {
2241 			nm_prerr("error: netmap buf size (%u) "
2242 				 "< device MTU (%u)", nbs, mtu);
2243 			return EINVAL;
2244 		}
2245 	} else {
2246 		/* More NIC slots may be needed to receive
2247 		 * or transmit a single packet. Check that
2248 		 * the adapter supports NS_MOREFRAG and that
2249 		 * netmap buffers are large enough to hold
2250 		 * the maximum per-slot size. */
2251 		if (!(na->na_flags & NAF_MOREFRAG)) {
2252 			nm_prerr("error: large MTU (%d) needed "
2253 				 "but %s does not support "
2254 				 "NS_MOREFRAG", mtu,
2255 				 na->ifp->if_xname);
2256 			return EINVAL;
2257 		} else if (nbs < na->rx_buf_maxsize) {
2258 			nm_prerr("error: using NS_MOREFRAG on "
2259 				 "%s requires netmap buf size "
2260 				 ">= %u", na->ifp->if_xname,
2261 				 na->rx_buf_maxsize);
2262 			return EINVAL;
2263 		} else {
2264 			nm_prinf("info: netmap application on "
2265 				 "%s needs to support "
2266 				 "NS_MOREFRAG "
2267 				 "(MTU=%u,netmap_buf_size=%u)",
2268 				 na->ifp->if_xname, mtu, nbs);
2269 		}
2270 	}
2271 	return 0;
2272 }
2273 
2274 /* Handle the offset option, if present in the hdr.
2275  * Returns 0 on success, or an error.
2276  */
2277 static int
2278 netmap_offsets_init(struct netmap_priv_d *priv, struct nmreq_header *hdr)
2279 {
2280 	struct nmreq_opt_offsets *opt;
2281 	struct netmap_adapter *na = priv->np_na;
2282 	struct netmap_kring *kring;
2283 	uint64_t mask = 0, bits = 0, maxbits = sizeof(uint64_t) * 8,
2284 		 max_offset = 0, initial_offset = 0, min_gap = 0;
2285 	u_int i;
2286 	enum txrx t;
2287 	int error = 0;
2288 
2289 	opt = (struct nmreq_opt_offsets *)
2290 		nmreq_getoption(hdr, NETMAP_REQ_OPT_OFFSETS);
2291 	if (opt == NULL)
2292 		return 0;
2293 
2294 	if (!(na->na_flags & NAF_OFFSETS)) {
2295 		if (netmap_verbose)
2296 			nm_prerr("%s does not support offsets",
2297 				na->name);
2298 		error = EOPNOTSUPP;
2299 		goto out;
2300 	}
2301 
2302 	/* check sanity of the opt values */
2303 	max_offset = opt->nro_max_offset;
2304 	min_gap = opt->nro_min_gap;
2305 	initial_offset = opt->nro_initial_offset;
2306 	bits = opt->nro_offset_bits;
2307 
2308 	if (bits > maxbits) {
2309 		if (netmap_verbose)
2310 			nm_prerr("bits: %llu too large (max %llu)",
2311 				(unsigned long long)bits,
2312 				(unsigned long long)maxbits);
2313 		error = EINVAL;
2314 		goto out;
2315 	}
2316 	/* we take bits == 0 as a request to use the entire field */
2317 	if (bits == 0 || bits == maxbits) {
2318 		/* shifting a type by sizeof(type) is undefined */
2319 		bits = maxbits;
2320 		mask = 0xffffffffffffffff;
2321 	} else {
2322 		mask = (1ULL << bits) - 1;
2323 	}
2324 	if (max_offset > NETMAP_BUF_SIZE(na)) {
2325 		if (netmap_verbose)
2326 			nm_prerr("max offset %llu > buf size %u",
2327 				(unsigned long long)max_offset, NETMAP_BUF_SIZE(na));
2328 		error = EINVAL;
2329 		goto out;
2330 	}
2331 	if ((max_offset & mask) != max_offset) {
2332 		if (netmap_verbose)
2333 			nm_prerr("max offset %llu to large for %llu bits",
2334 				(unsigned long long)max_offset,
2335 				(unsigned long long)bits);
2336 		error = EINVAL;
2337 		goto out;
2338 	}
2339 	if (initial_offset > max_offset) {
2340 		if (netmap_verbose)
2341 			nm_prerr("initial offset %llu > max offset %llu",
2342 				(unsigned long long)initial_offset,
2343 				(unsigned long long)max_offset);
2344 		error = EINVAL;
2345 		goto out;
2346 	}
2347 
2348 	/* initialize the kring and ring fields. */
2349 	foreach_selected_ring(priv, t, i, kring) {
2350 		struct netmap_kring *kring = NMR(na, t)[i];
2351 		struct netmap_ring *ring = kring->ring;
2352 		u_int j;
2353 
2354 		/* it the ring is already in use we check that the
2355 		 * new request is compatible with the existing one
2356 		 */
2357 		if (kring->offset_mask) {
2358 			if ((kring->offset_mask & mask) != mask ||
2359 			     kring->offset_max < max_offset) {
2360 				if (netmap_verbose)
2361 					nm_prinf("%s: cannot increase"
2362 						 "offset mask and/or max"
2363 						 "(current: mask=%llx,max=%llu",
2364 							kring->name,
2365 							(unsigned long long)kring->offset_mask,
2366 							(unsigned long long)kring->offset_max);
2367 				error = EBUSY;
2368 				goto out;
2369 			}
2370 			mask = kring->offset_mask;
2371 			max_offset = kring->offset_max;
2372 		} else {
2373 			kring->offset_mask = mask;
2374 			*(uint64_t *)(uintptr_t)&ring->offset_mask = mask;
2375 			kring->offset_max = max_offset;
2376 			kring->offset_gap = min_gap;
2377 		}
2378 
2379 		/* if there is an initial offset, put it into
2380 		 * all the slots
2381 		 *
2382 		 * Note: we cannot change the offsets if the
2383 		 * ring is already in use.
2384 		 */
2385 		if (!initial_offset || kring->users > 1)
2386 			continue;
2387 
2388 		for (j = 0; j < kring->nkr_num_slots; j++) {
2389 			struct netmap_slot *slot = ring->slot + j;
2390 
2391 			nm_write_offset(kring, slot, initial_offset);
2392 		}
2393 	}
2394 
2395 out:
2396 	opt->nro_opt.nro_status = error;
2397 	if (!error) {
2398 		opt->nro_max_offset = max_offset;
2399 	}
2400 	return error;
2401 
2402 }
2403 
2404 static int
2405 netmap_compute_buf_len(struct netmap_priv_d *priv)
2406 {
2407 	enum txrx t;
2408 	u_int i;
2409 	struct netmap_kring *kring;
2410 	int error = 0;
2411 	unsigned mtu = 0;
2412 	struct netmap_adapter *na = priv->np_na;
2413 	uint64_t target, maxframe;
2414 
2415 	if (na->ifp != NULL)
2416 		mtu = nm_os_ifnet_mtu(na->ifp);
2417 
2418 	foreach_selected_ring(priv, t, i, kring) {
2419 
2420 		if (kring->users > 1)
2421 			continue;
2422 
2423 		target = NETMAP_BUF_SIZE(kring->na) -
2424 			kring->offset_max;
2425 		if (!kring->offset_gap)
2426 			kring->offset_gap =
2427 				NETMAP_BUF_SIZE(kring->na);
2428 		if (kring->offset_gap < target)
2429 			target = kring->offset_gap;
2430 
2431 		if (mtu) {
2432 			maxframe = mtu + ETH_HLEN +
2433 				ETH_FCS_LEN + VLAN_HLEN;
2434 			if (maxframe < target) {
2435 				target = maxframe;
2436 			}
2437 		}
2438 
2439 		error = kring->nm_bufcfg(kring, target);
2440 		if (error)
2441 			goto out;
2442 
2443 		*(uint64_t *)(uintptr_t)&kring->ring->buf_align = kring->buf_align;
2444 
2445 		if (mtu && t == NR_RX && kring->hwbuf_len < mtu) {
2446 			if (!(na->na_flags & NAF_MOREFRAG)) {
2447 				nm_prerr("error: large MTU (%d) needed "
2448 					 "but %s does not support "
2449 					 "NS_MOREFRAG", mtu,
2450 					 na->name);
2451 				error = EINVAL;
2452 				goto out;
2453 			} else {
2454 				nm_prinf("info: netmap application on "
2455 					 "%s needs to support "
2456 					 "NS_MOREFRAG "
2457 					 "(MTU=%u,buf_size=%llu)",
2458 					 kring->name, mtu,
2459 					 (unsigned long long)kring->hwbuf_len);
2460 			}
2461 		}
2462 	}
2463 out:
2464 	return error;
2465 }
2466 
2467 /*
2468  * possibly move the interface to netmap-mode.
2469  * If success it returns a pointer to netmap_if, otherwise NULL.
2470  * This must be called with NMG_LOCK held.
2471  *
2472  * The following na callbacks are called in the process:
2473  *
2474  * na->nm_config()			[by netmap_update_config]
2475  * (get current number and size of rings)
2476  *
2477  *  	We have a generic one for linux (netmap_linux_config).
2478  *  	The bwrap has to override this, since it has to forward
2479  *  	the request to the wrapped adapter (netmap_bwrap_config).
2480  *
2481  *
2482  * na->nm_krings_create()
2483  * (create and init the krings array)
2484  *
2485  * 	One of the following:
2486  *
2487  *	* netmap_hw_krings_create, 			(hw ports)
2488  *		creates the standard layout for the krings
2489  * 		and adds the mbq (used for the host rings).
2490  *
2491  * 	* netmap_vp_krings_create			(VALE ports)
2492  * 		add leases and scratchpads
2493  *
2494  * 	* netmap_pipe_krings_create			(pipes)
2495  * 		create the krings and rings of both ends and
2496  * 		cross-link them
2497  *
2498  *      * netmap_monitor_krings_create 			(monitors)
2499  *      	avoid allocating the mbq
2500  *
2501  *      * netmap_bwrap_krings_create			(bwraps)
2502  *      	create both the brap krings array,
2503  *      	the krings array of the wrapped adapter, and
2504  *      	(if needed) the fake array for the host adapter
2505  *
2506  * na->nm_register(, 1)
2507  * (put the adapter in netmap mode)
2508  *
2509  * 	This may be one of the following:
2510  *
2511  * 	* netmap_hw_reg				        (hw ports)
2512  * 		checks that the ifp is still there, then calls
2513  * 		the hardware specific callback;
2514  *
2515  * 	* netmap_vp_reg					(VALE ports)
2516  *		If the port is connected to a bridge,
2517  *		set the NAF_NETMAP_ON flag under the
2518  *		bridge write lock.
2519  *
2520  *	* netmap_pipe_reg				(pipes)
2521  *		inform the other pipe end that it is no
2522  *		longer responsible for the lifetime of this
2523  *		pipe end
2524  *
2525  *	* netmap_monitor_reg				(monitors)
2526  *		intercept the sync callbacks of the monitored
2527  *		rings
2528  *
2529  *	* netmap_bwrap_reg				(bwraps)
2530  *		cross-link the bwrap and hwna rings,
2531  *		forward the request to the hwna, override
2532  *		the hwna notify callback (to get the frames
2533  *		coming from outside go through the bridge).
2534  *
2535  *
2536  */
2537 int
2538 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
2539 	struct nmreq_header *hdr)
2540 {
2541 	struct netmap_if *nifp = NULL;
2542 	int error;
2543 
2544 	NMG_LOCK_ASSERT();
2545 	priv->np_na = na;     /* store the reference */
2546 	error = netmap_mem_finalize(na->nm_mem, na);
2547 	if (error)
2548 		goto err;
2549 
2550 	if (na->active_fds == 0) {
2551 
2552 		/* cache the allocator info in the na */
2553 		error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
2554 		if (error)
2555 			goto err_drop_mem;
2556 		nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
2557 					    na->na_lut.objsize);
2558 
2559 		/* ring configuration may have changed, fetch from the card */
2560 		netmap_update_config(na);
2561 	}
2562 
2563 	/* compute the range of tx and rx rings to monitor */
2564 	error = netmap_set_ringid(priv, hdr);
2565 	if (error)
2566 		goto err_put_lut;
2567 
2568 	if (na->active_fds == 0) {
2569 		/*
2570 		 * If this is the first registration of the adapter,
2571 		 * perform sanity checks and create the in-kernel view
2572 		 * of the netmap rings (the netmap krings).
2573 		 */
2574 		if (na->ifp && nm_priv_rx_enabled(priv)) {
2575 			/* This netmap adapter is attached to an ifnet. */
2576 			unsigned mtu = nm_os_ifnet_mtu(na->ifp);
2577 
2578 			nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
2579 				na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
2580 
2581 			if (na->rx_buf_maxsize == 0) {
2582 				nm_prerr("%s: error: rx_buf_maxsize == 0", na->name);
2583 				error = EIO;
2584 				goto err_drop_mem;
2585 			}
2586 
2587 			error = netmap_buf_size_validate(na, mtu);
2588 			if (error)
2589 				goto err_drop_mem;
2590 		}
2591 
2592 		/*
2593 		 * Depending on the adapter, this may also create
2594 		 * the netmap rings themselves
2595 		 */
2596 		error = na->nm_krings_create(na);
2597 		if (error)
2598 			goto err_put_lut;
2599 
2600 	}
2601 
2602 	/* now the krings must exist and we can check whether some
2603 	 * previous bind has exclusive ownership on them, and set
2604 	 * nr_pending_mode
2605 	 */
2606 	error = netmap_krings_get(priv);
2607 	if (error)
2608 		goto err_del_krings;
2609 
2610 	/* create all needed missing netmap rings */
2611 	error = netmap_mem_rings_create(na);
2612 	if (error)
2613 		goto err_rel_excl;
2614 
2615 	/* initialize offsets if requested */
2616 	error = netmap_offsets_init(priv, hdr);
2617 	if (error)
2618 		goto err_rel_excl;
2619 
2620 	/* compute and validate the buf lengths */
2621 	error = netmap_compute_buf_len(priv);
2622 	if (error)
2623 		goto err_rel_excl;
2624 
2625 	/* in all cases, create a new netmap if */
2626 	nifp = netmap_mem_if_new(na, priv);
2627 	if (nifp == NULL) {
2628 		error = ENOMEM;
2629 		goto err_rel_excl;
2630 	}
2631 
2632 	/* make sure we don't call na->nm_register() when only
2633 	 * host rings are changing mode
2634 	 */
2635 	netmap_update_hostrings_mode(na);
2636 
2637 	if (nm_kring_pending(priv)) {
2638 		/* Some kring is switching mode, tell the adapter to
2639 		 * react on this. */
2640 		netmap_set_all_rings(na, NM_KR_LOCKED);
2641 		error = na->nm_register(na, 1);
2642 		netmap_set_all_rings(na, 0);
2643 		if (error)
2644 			goto err_del_if;
2645 	}
2646 
2647 	/* Commit the reference. */
2648 	na->active_fds++;
2649 
2650 	/*
2651 	 * advertise that the interface is ready by setting np_nifp.
2652 	 * The barrier is needed because readers (poll, *SYNC and mmap)
2653 	 * check for priv->np_nifp != NULL without locking
2654 	 */
2655 	mb(); /* make sure previous writes are visible to all CPUs */
2656 	priv->np_nifp = nifp;
2657 
2658 	return 0;
2659 
2660 err_del_if:
2661 	netmap_mem_if_delete(na, nifp);
2662 err_rel_excl:
2663 	netmap_krings_put(priv);
2664 	netmap_update_hostrings_mode(na);
2665 	netmap_mem_rings_delete(na);
2666 err_del_krings:
2667 	if (na->active_fds == 0)
2668 		na->nm_krings_delete(na);
2669 err_put_lut:
2670 	if (na->active_fds == 0)
2671 		memset(&na->na_lut, 0, sizeof(na->na_lut));
2672 err_drop_mem:
2673 	netmap_mem_drop(na);
2674 err:
2675 	priv->np_na = NULL;
2676 	return error;
2677 }
2678 
2679 
2680 /*
2681  * update kring and ring at the end of rxsync/txsync.
2682  */
2683 static inline void
2684 nm_sync_finalize(struct netmap_kring *kring)
2685 {
2686 	/*
2687 	 * Update ring tail to what the kernel knows
2688 	 * After txsync: head/rhead/hwcur might be behind cur/rcur
2689 	 * if no carrier.
2690 	 */
2691 	kring->ring->tail = kring->rtail = kring->nr_hwtail;
2692 
2693 	nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
2694 		kring->name, kring->nr_hwcur, kring->nr_hwtail,
2695 		kring->rhead, kring->rcur, kring->rtail);
2696 }
2697 
2698 /* set ring timestamp */
2699 static inline void
2700 ring_timestamp_set(struct netmap_ring *ring)
2701 {
2702 	if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) {
2703 		microtime(&ring->ts);
2704 	}
2705 }
2706 
2707 static int nmreq_copyin(struct nmreq_header *, int);
2708 static int nmreq_copyout(struct nmreq_header *, int);
2709 static int nmreq_checkoptions(struct nmreq_header *);
2710 
2711 /*
2712  * ioctl(2) support for the "netmap" device.
2713  *
2714  * Following a list of accepted commands:
2715  * - NIOCCTRL		device control API
2716  * - NIOCTXSYNC		sync TX rings
2717  * - NIOCRXSYNC		sync RX rings
2718  * - SIOCGIFADDR	just for convenience
2719  * - NIOCGINFO		deprecated (legacy API)
2720  * - NIOCREGIF		deprecated (legacy API)
2721  *
2722  * Return 0 on success, errno otherwise.
2723  */
2724 int
2725 netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
2726 		struct thread *td, int nr_body_is_user)
2727 {
2728 	struct mbq q;	/* packets from RX hw queues to host stack */
2729 	struct netmap_adapter *na = NULL;
2730 	struct netmap_mem_d *nmd = NULL;
2731 	struct ifnet *ifp = NULL;
2732 	int error = 0;
2733 	u_int i, qfirst, qlast;
2734 	struct netmap_kring **krings;
2735 	int sync_flags;
2736 	enum txrx t;
2737 
2738 	switch (cmd) {
2739 	case NIOCCTRL: {
2740 		struct nmreq_header *hdr = (struct nmreq_header *)data;
2741 
2742 		if (hdr->nr_version < NETMAP_MIN_API ||
2743 		    hdr->nr_version > NETMAP_MAX_API) {
2744 			nm_prerr("API mismatch: got %d need %d",
2745 				hdr->nr_version, NETMAP_API);
2746 			return EINVAL;
2747 		}
2748 
2749 		/* Make a kernel-space copy of the user-space nr_body.
2750 		 * For convenience, the nr_body pointer and the pointers
2751 		 * in the options list will be replaced with their
2752 		 * kernel-space counterparts. The original pointers are
2753 		 * saved internally and later restored by nmreq_copyout
2754 		 */
2755 		error = nmreq_copyin(hdr, nr_body_is_user);
2756 		if (error) {
2757 			return error;
2758 		}
2759 
2760 		/* Sanitize hdr->nr_name. */
2761 		hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0';
2762 
2763 		switch (hdr->nr_reqtype) {
2764 		case NETMAP_REQ_REGISTER: {
2765 			struct nmreq_register *req =
2766 				(struct nmreq_register *)(uintptr_t)hdr->nr_body;
2767 			struct netmap_if *nifp;
2768 
2769 			/* Protect access to priv from concurrent requests. */
2770 			NMG_LOCK();
2771 			do {
2772 				struct nmreq_option *opt;
2773 				u_int memflags;
2774 
2775 				if (priv->np_nifp != NULL) {	/* thread already registered */
2776 					error = EBUSY;
2777 					break;
2778 				}
2779 
2780 #ifdef WITH_EXTMEM
2781 				opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM);
2782 				if (opt != NULL) {
2783 					struct nmreq_opt_extmem *e =
2784 						(struct nmreq_opt_extmem *)opt;
2785 
2786 					nmd = netmap_mem_ext_create(e->nro_usrptr,
2787 							&e->nro_info, &error);
2788 					opt->nro_status = error;
2789 					if (nmd == NULL)
2790 						break;
2791 				}
2792 #endif /* WITH_EXTMEM */
2793 
2794 				if (nmd == NULL && req->nr_mem_id) {
2795 					/* find the allocator and get a reference */
2796 					nmd = netmap_mem_find(req->nr_mem_id);
2797 					if (nmd == NULL) {
2798 						if (netmap_verbose) {
2799 							nm_prerr("%s: failed to find mem_id %u",
2800 									hdr->nr_name, req->nr_mem_id);
2801 						}
2802 						error = EINVAL;
2803 						break;
2804 					}
2805 				}
2806 				/* find the interface and a reference */
2807 				error = netmap_get_na(hdr, &na, &ifp, nmd,
2808 						      1 /* create */); /* keep reference */
2809 				if (error)
2810 					break;
2811 				if (NETMAP_OWNED_BY_KERN(na)) {
2812 					error = EBUSY;
2813 					break;
2814 				}
2815 
2816 				if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) {
2817 					nm_prerr("virt_hdr_len=%d, but application does "
2818 						"not accept it", na->virt_hdr_len);
2819 					error = EIO;
2820 					break;
2821 				}
2822 
2823 				error = netmap_do_regif(priv, na, hdr);
2824 				if (error) {    /* reg. failed, release priv and ref */
2825 					break;
2826 				}
2827 
2828 				opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2829 				if (opt != NULL) {
2830 					struct nmreq_opt_csb *csbo =
2831 						(struct nmreq_opt_csb *)opt;
2832 					error = netmap_csb_validate(priv, csbo);
2833 					opt->nro_status = error;
2834 					if (error) {
2835 						netmap_do_unregif(priv);
2836 						break;
2837 					}
2838 				}
2839 
2840 				nifp = priv->np_nifp;
2841 
2842 				/* return the offset of the netmap_if object */
2843 				req->nr_rx_rings = na->num_rx_rings;
2844 				req->nr_tx_rings = na->num_tx_rings;
2845 				req->nr_rx_slots = na->num_rx_desc;
2846 				req->nr_tx_slots = na->num_tx_desc;
2847 				req->nr_host_tx_rings = na->num_host_tx_rings;
2848 				req->nr_host_rx_rings = na->num_host_rx_rings;
2849 				error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags,
2850 					&req->nr_mem_id);
2851 				if (error) {
2852 					netmap_do_unregif(priv);
2853 					break;
2854 				}
2855 				if (memflags & NETMAP_MEM_PRIVATE) {
2856 					*(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM;
2857 				}
2858 				for_rx_tx(t) {
2859 					priv->np_si[t] = nm_si_user(priv, t) ?
2860 						&na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si;
2861 				}
2862 
2863 				if (req->nr_extra_bufs) {
2864 					if (netmap_verbose)
2865 						nm_prinf("requested %d extra buffers",
2866 							req->nr_extra_bufs);
2867 					req->nr_extra_bufs = netmap_extra_alloc(na,
2868 						&nifp->ni_bufs_head, req->nr_extra_bufs);
2869 					if (netmap_verbose)
2870 						nm_prinf("got %d extra buffers", req->nr_extra_bufs);
2871 				} else {
2872 					nifp->ni_bufs_head = 0;
2873 				}
2874 				req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp);
2875 
2876 				error = nmreq_checkoptions(hdr);
2877 				if (error) {
2878 					netmap_do_unregif(priv);
2879 					break;
2880 				}
2881 
2882 				/* store ifp reference so that priv destructor may release it */
2883 				priv->np_ifp = ifp;
2884 			} while (0);
2885 			if (error) {
2886 				netmap_unget_na(na, ifp);
2887 			}
2888 			/* release the reference from netmap_mem_find() or
2889 			 * netmap_mem_ext_create()
2890 			 */
2891 			if (nmd)
2892 				netmap_mem_put(nmd);
2893 			NMG_UNLOCK();
2894 			break;
2895 		}
2896 
2897 		case NETMAP_REQ_PORT_INFO_GET: {
2898 			struct nmreq_port_info_get *req =
2899 				(struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
2900 			int nmd_ref = 0;
2901 
2902 			NMG_LOCK();
2903 			do {
2904 				u_int memflags;
2905 
2906 				if (hdr->nr_name[0] != '\0') {
2907 					/* Build a nmreq_register out of the nmreq_port_info_get,
2908 					 * so that we can call netmap_get_na(). */
2909 					struct nmreq_register regreq;
2910 					bzero(&regreq, sizeof(regreq));
2911 					regreq.nr_mode = NR_REG_ALL_NIC;
2912 					regreq.nr_tx_slots = req->nr_tx_slots;
2913 					regreq.nr_rx_slots = req->nr_rx_slots;
2914 					regreq.nr_tx_rings = req->nr_tx_rings;
2915 					regreq.nr_rx_rings = req->nr_rx_rings;
2916 					regreq.nr_host_tx_rings = req->nr_host_tx_rings;
2917 					regreq.nr_host_rx_rings = req->nr_host_rx_rings;
2918 					regreq.nr_mem_id = req->nr_mem_id;
2919 
2920 					/* get a refcount */
2921 					hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2922 					hdr->nr_body = (uintptr_t)&regreq;
2923 					error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2924 					hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */
2925 					hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2926 					if (error) {
2927 						na = NULL;
2928 						ifp = NULL;
2929 						break;
2930 					}
2931 					nmd = na->nm_mem; /* get memory allocator */
2932 				} else {
2933 					nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1);
2934 					if (nmd == NULL) {
2935 						if (netmap_verbose)
2936 							nm_prerr("%s: failed to find mem_id %u",
2937 									hdr->nr_name,
2938 									req->nr_mem_id ? req->nr_mem_id : 1);
2939 						error = EINVAL;
2940 						break;
2941 					}
2942 					nmd_ref = 1;
2943 				}
2944 
2945 				error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags,
2946 					&req->nr_mem_id);
2947 				if (error)
2948 					break;
2949 				if (na == NULL) /* only memory info */
2950 					break;
2951 				netmap_update_config(na);
2952 				req->nr_rx_rings = na->num_rx_rings;
2953 				req->nr_tx_rings = na->num_tx_rings;
2954 				req->nr_rx_slots = na->num_rx_desc;
2955 				req->nr_tx_slots = na->num_tx_desc;
2956 				req->nr_host_tx_rings = na->num_host_tx_rings;
2957 				req->nr_host_rx_rings = na->num_host_rx_rings;
2958 			} while (0);
2959 			netmap_unget_na(na, ifp);
2960 			if (nmd_ref)
2961 				netmap_mem_put(nmd);
2962 			NMG_UNLOCK();
2963 			break;
2964 		}
2965 #ifdef WITH_VALE
2966 		case NETMAP_REQ_VALE_ATTACH: {
2967 			error = netmap_bdg_attach(hdr, NULL /* userspace request */);
2968 			break;
2969 		}
2970 
2971 		case NETMAP_REQ_VALE_DETACH: {
2972 			error = netmap_bdg_detach(hdr, NULL /* userspace request */);
2973 			break;
2974 		}
2975 
2976 		case NETMAP_REQ_PORT_HDR_SET: {
2977 			struct nmreq_port_hdr *req =
2978 				(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2979 			/* Build a nmreq_register out of the nmreq_port_hdr,
2980 			 * so that we can call netmap_get_bdg_na(). */
2981 			struct nmreq_register regreq;
2982 			bzero(&regreq, sizeof(regreq));
2983 			regreq.nr_mode = NR_REG_ALL_NIC;
2984 
2985 			/* For now we only support virtio-net headers, and only for
2986 			 * VALE ports, but this may change in future. Valid lengths
2987 			 * for the virtio-net header are 0 (no header), 10 and 12. */
2988 			if (req->nr_hdr_len != 0 &&
2989 				req->nr_hdr_len != sizeof(struct nm_vnet_hdr) &&
2990 					req->nr_hdr_len != 12) {
2991 				if (netmap_verbose)
2992 					nm_prerr("invalid hdr_len %u", req->nr_hdr_len);
2993 				error = EINVAL;
2994 				break;
2995 			}
2996 			NMG_LOCK();
2997 			hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2998 			hdr->nr_body = (uintptr_t)&regreq;
2999 			error = netmap_get_vale_na(hdr, &na, NULL, 0);
3000 			hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET;
3001 			hdr->nr_body = (uintptr_t)req;
3002 			if (na && !error) {
3003 				struct netmap_vp_adapter *vpna =
3004 					(struct netmap_vp_adapter *)na;
3005 				na->virt_hdr_len = req->nr_hdr_len;
3006 				if (na->virt_hdr_len) {
3007 					vpna->mfs = NETMAP_BUF_SIZE(na);
3008 				}
3009 				if (netmap_verbose)
3010 					nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
3011 				netmap_adapter_put(na);
3012 			} else if (!na) {
3013 				error = ENXIO;
3014 			}
3015 			NMG_UNLOCK();
3016 			break;
3017 		}
3018 
3019 		case NETMAP_REQ_PORT_HDR_GET: {
3020 			/* Get vnet-header length for this netmap port */
3021 			struct nmreq_port_hdr *req =
3022 				(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
3023 			/* Build a nmreq_register out of the nmreq_port_hdr,
3024 			 * so that we can call netmap_get_bdg_na(). */
3025 			struct nmreq_register regreq;
3026 			struct ifnet *ifp;
3027 
3028 			bzero(&regreq, sizeof(regreq));
3029 			regreq.nr_mode = NR_REG_ALL_NIC;
3030 			NMG_LOCK();
3031 			hdr->nr_reqtype = NETMAP_REQ_REGISTER;
3032 			hdr->nr_body = (uintptr_t)&regreq;
3033 			error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
3034 			hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET;
3035 			hdr->nr_body = (uintptr_t)req;
3036 			if (na && !error) {
3037 				req->nr_hdr_len = na->virt_hdr_len;
3038 			}
3039 			netmap_unget_na(na, ifp);
3040 			NMG_UNLOCK();
3041 			break;
3042 		}
3043 
3044 		case NETMAP_REQ_VALE_LIST: {
3045 			error = netmap_vale_list(hdr);
3046 			break;
3047 		}
3048 
3049 		case NETMAP_REQ_VALE_NEWIF: {
3050 			error = nm_vi_create(hdr);
3051 			break;
3052 		}
3053 
3054 		case NETMAP_REQ_VALE_DELIF: {
3055 			error = nm_vi_destroy(hdr->nr_name);
3056 			break;
3057 		}
3058 #endif  /* WITH_VALE */
3059 
3060 		case NETMAP_REQ_VALE_POLLING_ENABLE:
3061 		case NETMAP_REQ_VALE_POLLING_DISABLE: {
3062 			error = nm_bdg_polling(hdr);
3063 			break;
3064 		}
3065 		case NETMAP_REQ_POOLS_INFO_GET: {
3066 			/* Get information from the memory allocator used for
3067 			 * hdr->nr_name. */
3068 			struct nmreq_pools_info *req =
3069 				(struct nmreq_pools_info *)(uintptr_t)hdr->nr_body;
3070 			NMG_LOCK();
3071 			do {
3072 				/* Build a nmreq_register out of the nmreq_pools_info,
3073 				 * so that we can call netmap_get_na(). */
3074 				struct nmreq_register regreq;
3075 				bzero(&regreq, sizeof(regreq));
3076 				regreq.nr_mem_id = req->nr_mem_id;
3077 				regreq.nr_mode = NR_REG_ALL_NIC;
3078 
3079 				hdr->nr_reqtype = NETMAP_REQ_REGISTER;
3080 				hdr->nr_body = (uintptr_t)&regreq;
3081 				error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
3082 				hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */
3083 				hdr->nr_body = (uintptr_t)req; /* reset nr_body */
3084 				if (error) {
3085 					na = NULL;
3086 					ifp = NULL;
3087 					break;
3088 				}
3089 				nmd = na->nm_mem; /* grab the memory allocator */
3090 				if (nmd == NULL) {
3091 					error = EINVAL;
3092 					break;
3093 				}
3094 
3095 				/* Finalize the memory allocator, get the pools
3096 				 * information and release the allocator. */
3097 				error = netmap_mem_finalize(nmd, na);
3098 				if (error) {
3099 					break;
3100 				}
3101 				error = netmap_mem_pools_info_get(req, nmd);
3102 				netmap_mem_drop(na);
3103 			} while (0);
3104 			netmap_unget_na(na, ifp);
3105 			NMG_UNLOCK();
3106 			break;
3107 		}
3108 
3109 		case NETMAP_REQ_CSB_ENABLE: {
3110 			struct nmreq_option *opt;
3111 
3112 			opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
3113 			if (opt == NULL) {
3114 				error = EINVAL;
3115 			} else {
3116 				struct nmreq_opt_csb *csbo =
3117 					(struct nmreq_opt_csb *)opt;
3118 				NMG_LOCK();
3119 				error = netmap_csb_validate(priv, csbo);
3120 				NMG_UNLOCK();
3121 				opt->nro_status = error;
3122 			}
3123 			break;
3124 		}
3125 
3126 		case NETMAP_REQ_SYNC_KLOOP_START: {
3127 			error = netmap_sync_kloop(priv, hdr);
3128 			break;
3129 		}
3130 
3131 		case NETMAP_REQ_SYNC_KLOOP_STOP: {
3132 			error = netmap_sync_kloop_stop(priv);
3133 			break;
3134 		}
3135 
3136 		default: {
3137 			error = EINVAL;
3138 			break;
3139 		}
3140 		}
3141 		/* Write back request body to userspace and reset the
3142 		 * user-space pointer. */
3143 		error = nmreq_copyout(hdr, error);
3144 		break;
3145 	}
3146 
3147 	case NIOCTXSYNC:
3148 	case NIOCRXSYNC: {
3149 		if (unlikely(priv->np_nifp == NULL)) {
3150 			error = ENXIO;
3151 			break;
3152 		}
3153 		mb(); /* make sure following reads are not from cache */
3154 
3155 		if (unlikely(priv->np_csb_atok_base)) {
3156 			nm_prerr("Invalid sync in CSB mode");
3157 			error = EBUSY;
3158 			break;
3159 		}
3160 
3161 		na = priv->np_na;      /* we have a reference */
3162 
3163 		mbq_init(&q);
3164 		t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX);
3165 		krings = NMR(na, t);
3166 		qfirst = priv->np_qfirst[t];
3167 		qlast = priv->np_qlast[t];
3168 		sync_flags = priv->np_sync_flags;
3169 
3170 		for (i = qfirst; i < qlast; i++) {
3171 			struct netmap_kring *kring = krings[i];
3172 			struct netmap_ring *ring = kring->ring;
3173 
3174 			if (unlikely(nm_kr_tryget(kring, 1, &error))) {
3175 				error = (error ? EIO : 0);
3176 				continue;
3177 			}
3178 
3179 			if (cmd == NIOCTXSYNC) {
3180 				if (netmap_debug & NM_DEBUG_TXSYNC)
3181 					nm_prinf("pre txsync ring %d cur %d hwcur %d",
3182 					    i, ring->cur,
3183 					    kring->nr_hwcur);
3184 				if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3185 					netmap_ring_reinit(kring);
3186 				} else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) {
3187 					nm_sync_finalize(kring);
3188 				}
3189 				if (netmap_debug & NM_DEBUG_TXSYNC)
3190 					nm_prinf("post txsync ring %d cur %d hwcur %d",
3191 					    i, ring->cur,
3192 					    kring->nr_hwcur);
3193 			} else {
3194 				if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3195 					netmap_ring_reinit(kring);
3196 				}
3197 				if (nm_may_forward_up(kring)) {
3198 					/* transparent forwarding, see netmap_poll() */
3199 					netmap_grab_packets(kring, &q, netmap_fwd);
3200 				}
3201 				if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) {
3202 					nm_sync_finalize(kring);
3203 				}
3204 				ring_timestamp_set(ring);
3205 			}
3206 			nm_kr_put(kring);
3207 		}
3208 
3209 		if (mbq_peek(&q)) {
3210 			netmap_send_up(na->ifp, &q);
3211 		}
3212 
3213 		break;
3214 	}
3215 
3216 	default: {
3217 		return netmap_ioctl_legacy(priv, cmd, data, td);
3218 		break;
3219 	}
3220 	}
3221 
3222 	return (error);
3223 }
3224 
3225 size_t
3226 nmreq_size_by_type(uint16_t nr_reqtype)
3227 {
3228 	switch (nr_reqtype) {
3229 	case NETMAP_REQ_REGISTER:
3230 		return sizeof(struct nmreq_register);
3231 	case NETMAP_REQ_PORT_INFO_GET:
3232 		return sizeof(struct nmreq_port_info_get);
3233 	case NETMAP_REQ_VALE_ATTACH:
3234 		return sizeof(struct nmreq_vale_attach);
3235 	case NETMAP_REQ_VALE_DETACH:
3236 		return sizeof(struct nmreq_vale_detach);
3237 	case NETMAP_REQ_VALE_LIST:
3238 		return sizeof(struct nmreq_vale_list);
3239 	case NETMAP_REQ_PORT_HDR_SET:
3240 	case NETMAP_REQ_PORT_HDR_GET:
3241 		return sizeof(struct nmreq_port_hdr);
3242 	case NETMAP_REQ_VALE_NEWIF:
3243 		return sizeof(struct nmreq_vale_newif);
3244 	case NETMAP_REQ_VALE_DELIF:
3245 	case NETMAP_REQ_SYNC_KLOOP_STOP:
3246 	case NETMAP_REQ_CSB_ENABLE:
3247 		return 0;
3248 	case NETMAP_REQ_VALE_POLLING_ENABLE:
3249 	case NETMAP_REQ_VALE_POLLING_DISABLE:
3250 		return sizeof(struct nmreq_vale_polling);
3251 	case NETMAP_REQ_POOLS_INFO_GET:
3252 		return sizeof(struct nmreq_pools_info);
3253 	case NETMAP_REQ_SYNC_KLOOP_START:
3254 		return sizeof(struct nmreq_sync_kloop_start);
3255 	}
3256 	return 0;
3257 }
3258 
3259 static size_t
3260 nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
3261 {
3262 	size_t rv = sizeof(struct nmreq_option);
3263 #ifdef NETMAP_REQ_OPT_DEBUG
3264 	if (nro_reqtype & NETMAP_REQ_OPT_DEBUG)
3265 		return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG);
3266 #endif /* NETMAP_REQ_OPT_DEBUG */
3267 	switch (nro_reqtype) {
3268 #ifdef WITH_EXTMEM
3269 	case NETMAP_REQ_OPT_EXTMEM:
3270 		rv = sizeof(struct nmreq_opt_extmem);
3271 		break;
3272 #endif /* WITH_EXTMEM */
3273 	case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS:
3274 		if (nro_size >= rv)
3275 			rv = nro_size;
3276 		break;
3277 	case NETMAP_REQ_OPT_CSB:
3278 		rv = sizeof(struct nmreq_opt_csb);
3279 		break;
3280 	case NETMAP_REQ_OPT_SYNC_KLOOP_MODE:
3281 		rv = sizeof(struct nmreq_opt_sync_kloop_mode);
3282 		break;
3283 	case NETMAP_REQ_OPT_OFFSETS:
3284 		rv = sizeof(struct nmreq_opt_offsets);
3285 		break;
3286 	}
3287 	/* subtract the common header */
3288 	return rv - sizeof(struct nmreq_option);
3289 }
3290 
3291 /*
3292  * nmreq_copyin: create an in-kernel version of the request.
3293  *
3294  * We build the following data structure:
3295  *
3296  * hdr -> +-------+                buf
3297  *        |       |          +---------------+
3298  *        +-------+          |usr body ptr   |
3299  *        |options|-.        +---------------+
3300  *        +-------+ |        |usr options ptr|
3301  *        |body   |--------->+---------------+
3302  *        +-------+ |        |               |
3303  *                  |        |  copy of body |
3304  *                  |        |               |
3305  *                  |        +---------------+
3306  *                  |        |    NULL       |
3307  *                  |        +---------------+
3308  *                  |    .---|               |\
3309  *                  |    |   +---------------+ |
3310  *                  | .------|               | |
3311  *                  | |  |   +---------------+  \ option table
3312  *                  | |  |   |      ...      |  / indexed by option
3313  *                  | |  |   +---------------+ |  type
3314  *                  | |  |   |               | |
3315  *                  | |  |   +---------------+/
3316  *                  | |  |   |usr next ptr 1 |
3317  *                  `-|----->+---------------+
3318  *                    |  |   | copy of opt 1 |
3319  *                    |  |   |               |
3320  *                    |  | .-| nro_next      |
3321  *                    |  | | +---------------+
3322  *                    |  | | |usr next ptr 2 |
3323  *                    |  `-`>+---------------+
3324  *                    |      | copy of opt 2 |
3325  *                    |      |               |
3326  *                    |    .-| nro_next      |
3327  *                    |    | +---------------+
3328  *                    |    | |               |
3329  *                    ~    ~ ~      ...      ~
3330  *                    |    .-|               |
3331  *                    `----->+---------------+
3332  *                         | |usr next ptr n |
3333  *                         `>+---------------+
3334  *                           | copy of opt n |
3335  *                           |               |
3336  *                           | nro_next(NULL)|
3337  *                           +---------------+
3338  *
3339  * The options and body fields of the hdr structure are overwritten
3340  * with in-kernel valid pointers inside the buf. The original user
3341  * pointers are saved in the buf and restored on copyout.
3342  * The list of options is copied and the pointers adjusted. The
3343  * original pointers are saved before the option they belonged.
3344  *
3345  * The option table has an entry for every available option.  Entries
3346  * for options that have not been passed contain NULL.
3347  *
3348  */
3349 
3350 int
3351 nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
3352 {
3353 	size_t rqsz, optsz, bufsz;
3354 	int error = 0;
3355 	char *ker = NULL, *p;
3356 	struct nmreq_option **next, *src, **opt_tab;
3357 	struct nmreq_option buf;
3358 	uint64_t *ptrs;
3359 
3360 	if (hdr->nr_reserved) {
3361 		if (netmap_verbose)
3362 			nm_prerr("nr_reserved must be zero");
3363 		return EINVAL;
3364 	}
3365 
3366 	if (!nr_body_is_user)
3367 		return 0;
3368 
3369 	hdr->nr_reserved = nr_body_is_user;
3370 
3371 	/* compute the total size of the buffer */
3372 	rqsz = nmreq_size_by_type(hdr->nr_reqtype);
3373 	if (rqsz > NETMAP_REQ_MAXSIZE) {
3374 		error = EMSGSIZE;
3375 		goto out_err;
3376 	}
3377 	if ((rqsz && hdr->nr_body == (uintptr_t)NULL) ||
3378 		(!rqsz && hdr->nr_body != (uintptr_t)NULL)) {
3379 		/* Request body expected, but not found; or
3380 		 * request body found but unexpected. */
3381 		if (netmap_verbose)
3382 			nm_prerr("nr_body expected but not found, or vice versa");
3383 		error = EINVAL;
3384 		goto out_err;
3385 	}
3386 
3387 	bufsz = 2 * sizeof(void *) + rqsz +
3388 		NETMAP_REQ_OPT_MAX * sizeof(opt_tab);
3389 	/* compute the size of the buf below the option table.
3390 	 * It must contain a copy of every received option structure.
3391 	 * For every option we also need to store a copy of the user
3392 	 * list pointer.
3393 	 */
3394 	optsz = 0;
3395 	for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src;
3396 	     src = (struct nmreq_option *)(uintptr_t)buf.nro_next)
3397 	{
3398 		error = copyin(src, &buf, sizeof(*src));
3399 		if (error)
3400 			goto out_err;
3401 		optsz += sizeof(*src);
3402 		optsz += nmreq_opt_size_by_type(buf.nro_reqtype, buf.nro_size);
3403 		if (rqsz + optsz > NETMAP_REQ_MAXSIZE) {
3404 			error = EMSGSIZE;
3405 			goto out_err;
3406 		}
3407 		bufsz += sizeof(void *);
3408 	}
3409 	bufsz += optsz;
3410 
3411 	ker = nm_os_malloc(bufsz);
3412 	if (ker == NULL) {
3413 		error = ENOMEM;
3414 		goto out_err;
3415 	}
3416 	p = ker;	/* write pointer into the buffer */
3417 
3418 	/* make a copy of the user pointers */
3419 	ptrs = (uint64_t*)p;
3420 	*ptrs++ = hdr->nr_body;
3421 	*ptrs++ = hdr->nr_options;
3422 	p = (char *)ptrs;
3423 
3424 	/* copy the body */
3425 	error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz);
3426 	if (error)
3427 		goto out_restore;
3428 	/* overwrite the user pointer with the in-kernel one */
3429 	hdr->nr_body = (uintptr_t)p;
3430 	p += rqsz;
3431 	/* start of the options table */
3432 	opt_tab = (struct nmreq_option **)p;
3433 	p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX;
3434 
3435 	/* copy the options */
3436 	next = (struct nmreq_option **)&hdr->nr_options;
3437 	src = *next;
3438 	while (src) {
3439 		struct nmreq_option *opt;
3440 
3441 		/* copy the option header */
3442 		ptrs = (uint64_t *)p;
3443 		opt = (struct nmreq_option *)(ptrs + 1);
3444 		error = copyin(src, opt, sizeof(*src));
3445 		if (error)
3446 			goto out_restore;
3447 		/* make a copy of the user next pointer */
3448 		*ptrs = opt->nro_next;
3449 		/* overwrite the user pointer with the in-kernel one */
3450 		*next = opt;
3451 
3452 		/* initialize the option as not supported.
3453 		 * Recognized options will update this field.
3454 		 */
3455 		opt->nro_status = EOPNOTSUPP;
3456 
3457 		/* check for invalid types */
3458 		if (opt->nro_reqtype < 1) {
3459 			if (netmap_verbose)
3460 				nm_prinf("invalid option type: %u", opt->nro_reqtype);
3461 			opt->nro_status = EINVAL;
3462 			error = EINVAL;
3463 			goto next;
3464 		}
3465 
3466 		if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) {
3467 			/* opt->nro_status is already EOPNOTSUPP */
3468 			error = EOPNOTSUPP;
3469 			goto next;
3470 		}
3471 
3472 		/* if the type is valid, index the option in the table
3473 		 * unless it is a duplicate.
3474 		 */
3475 		if (opt_tab[opt->nro_reqtype] != NULL) {
3476 			if (netmap_verbose)
3477 				nm_prinf("duplicate option: %u", opt->nro_reqtype);
3478 			opt->nro_status = EINVAL;
3479 			opt_tab[opt->nro_reqtype]->nro_status = EINVAL;
3480 			error = EINVAL;
3481 			goto next;
3482 		}
3483 		opt_tab[opt->nro_reqtype] = opt;
3484 
3485 		p = (char *)(opt + 1);
3486 
3487 		/* copy the option body */
3488 		optsz = nmreq_opt_size_by_type(opt->nro_reqtype,
3489 						opt->nro_size);
3490 		if (optsz) {
3491 			/* the option body follows the option header */
3492 			error = copyin(src + 1, p, optsz);
3493 			if (error)
3494 				goto out_restore;
3495 			p += optsz;
3496 		}
3497 
3498 	next:
3499 		/* move to next option */
3500 		next = (struct nmreq_option **)&opt->nro_next;
3501 		src = *next;
3502 	}
3503 	if (error)
3504 		nmreq_copyout(hdr, error);
3505 	return error;
3506 
3507 out_restore:
3508 	ptrs = (uint64_t *)ker;
3509 	hdr->nr_body = *ptrs++;
3510 	hdr->nr_options = *ptrs++;
3511 	hdr->nr_reserved = 0;
3512 	nm_os_free(ker);
3513 out_err:
3514 	return error;
3515 }
3516 
3517 static int
3518 nmreq_copyout(struct nmreq_header *hdr, int rerror)
3519 {
3520 	struct nmreq_option *src, *dst;
3521 	void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart;
3522 	uint64_t *ptrs;
3523 	size_t bodysz;
3524 	int error;
3525 
3526 	if (!hdr->nr_reserved)
3527 		return rerror;
3528 
3529 	/* restore the user pointers in the header */
3530 	ptrs = (uint64_t *)ker - 2;
3531 	bufstart = ptrs;
3532 	hdr->nr_body = *ptrs++;
3533 	src = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3534 	hdr->nr_options = *ptrs;
3535 
3536 	if (!rerror) {
3537 		/* copy the body */
3538 		bodysz = nmreq_size_by_type(hdr->nr_reqtype);
3539 		error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz);
3540 		if (error) {
3541 			rerror = error;
3542 			goto out;
3543 		}
3544 	}
3545 
3546 	/* copy the options */
3547 	dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3548 	while (src) {
3549 		size_t optsz;
3550 		uint64_t next;
3551 
3552 		/* restore the user pointer */
3553 		next = src->nro_next;
3554 		ptrs = (uint64_t *)src - 1;
3555 		src->nro_next = *ptrs;
3556 
3557 		/* always copy the option header */
3558 		error = copyout(src, dst, sizeof(*src));
3559 		if (error) {
3560 			rerror = error;
3561 			goto out;
3562 		}
3563 
3564 		/* copy the option body only if there was no error */
3565 		if (!rerror && !src->nro_status) {
3566 			optsz = nmreq_opt_size_by_type(src->nro_reqtype,
3567 							src->nro_size);
3568 			if (optsz) {
3569 				error = copyout(src + 1, dst + 1, optsz);
3570 				if (error) {
3571 					rerror = error;
3572 					goto out;
3573 				}
3574 			}
3575 		}
3576 		src = (struct nmreq_option *)(uintptr_t)next;
3577 		dst = (struct nmreq_option *)(uintptr_t)*ptrs;
3578 	}
3579 
3580 
3581 out:
3582 	hdr->nr_reserved = 0;
3583 	nm_os_free(bufstart);
3584 	return rerror;
3585 }
3586 
3587 struct nmreq_option *
3588 nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype)
3589 {
3590 	struct nmreq_option **opt_tab;
3591 
3592 	if (!hdr->nr_options)
3593 		return NULL;
3594 
3595 	opt_tab = (struct nmreq_option **)((uintptr_t)hdr->nr_options) -
3596 	    (NETMAP_REQ_OPT_MAX + 1);
3597 	return opt_tab[reqtype];
3598 }
3599 
3600 static int
3601 nmreq_checkoptions(struct nmreq_header *hdr)
3602 {
3603 	struct nmreq_option *opt;
3604 	/* return error if there is still any option
3605 	 * marked as not supported
3606 	 */
3607 
3608 	for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt;
3609 	     opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
3610 		if (opt->nro_status == EOPNOTSUPP)
3611 			return EOPNOTSUPP;
3612 
3613 	return 0;
3614 }
3615 
3616 /*
3617  * select(2) and poll(2) handlers for the "netmap" device.
3618  *
3619  * Can be called for one or more queues.
3620  * Return true the event mask corresponding to ready events.
3621  * If there are no ready events (and 'sr' is not NULL), do a
3622  * selrecord on either individual selinfo or on the global one.
3623  * Device-dependent parts (locking and sync of tx/rx rings)
3624  * are done through callbacks.
3625  *
3626  * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
3627  * The first one is remapped to pwait as selrecord() uses the name as an
3628  * hidden argument.
3629  */
3630 int
3631 netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
3632 {
3633 	struct netmap_adapter *na;
3634 	struct netmap_kring *kring;
3635 	struct netmap_ring *ring;
3636 	u_int i, want[NR_TXRX], revents = 0;
3637 	NM_SELINFO_T *si[NR_TXRX];
3638 #define want_tx want[NR_TX]
3639 #define want_rx want[NR_RX]
3640 	struct mbq q;	/* packets from RX hw queues to host stack */
3641 
3642 	/*
3643 	 * In order to avoid nested locks, we need to "double check"
3644 	 * txsync and rxsync if we decide to do a selrecord().
3645 	 * retry_tx (and retry_rx, later) prevent looping forever.
3646 	 */
3647 	int retry_tx = 1, retry_rx = 1;
3648 
3649 	/* Transparent mode: send_down is 1 if we have found some
3650 	 * packets to forward (host RX ring --> NIC) during the rx
3651 	 * scan and we have not sent them down to the NIC yet.
3652 	 * Transparent mode requires to bind all rings to a single
3653 	 * file descriptor.
3654 	 */
3655 	int send_down = 0;
3656 	int sync_flags = priv->np_sync_flags;
3657 
3658 	mbq_init(&q);
3659 
3660 	if (unlikely(priv->np_nifp == NULL)) {
3661 		return POLLERR;
3662 	}
3663 	mb(); /* make sure following reads are not from cache */
3664 
3665 	na = priv->np_na;
3666 
3667 	if (unlikely(!nm_netmap_on(na)))
3668 		return POLLERR;
3669 
3670 	if (unlikely(priv->np_csb_atok_base)) {
3671 		nm_prerr("Invalid poll in CSB mode");
3672 		return POLLERR;
3673 	}
3674 
3675 	if (netmap_debug & NM_DEBUG_ON)
3676 		nm_prinf("device %s events 0x%x", na->name, events);
3677 	want_tx = events & (POLLOUT | POLLWRNORM);
3678 	want_rx = events & (POLLIN | POLLRDNORM);
3679 
3680 	/*
3681 	 * If the card has more than one queue AND the file descriptor is
3682 	 * bound to all of them, we sleep on the "global" selinfo, otherwise
3683 	 * we sleep on individual selinfo (FreeBSD only allows two selinfo's
3684 	 * per file descriptor).
3685 	 * The interrupt routine in the driver wake one or the other
3686 	 * (or both) depending on which clients are active.
3687 	 *
3688 	 * rxsync() is only called if we run out of buffers on a POLLIN.
3689 	 * txsync() is called if we run out of buffers on POLLOUT, or
3690 	 * there are pending packets to send. The latter can be disabled
3691 	 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
3692 	 */
3693 	si[NR_RX] = priv->np_si[NR_RX];
3694 	si[NR_TX] = priv->np_si[NR_TX];
3695 
3696 #ifdef __FreeBSD__
3697 	/*
3698 	 * We start with a lock free round which is cheap if we have
3699 	 * slots available. If this fails, then lock and call the sync
3700 	 * routines. We can't do this on Linux, as the contract says
3701 	 * that we must call nm_os_selrecord() unconditionally.
3702 	 */
3703 	if (want_tx) {
3704 		const enum txrx t = NR_TX;
3705 		for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3706 			kring = NMR(na, t)[i];
3707 			if (kring->ring->cur != kring->ring->tail) {
3708 				/* Some unseen TX space is available, so what
3709 				 * we don't need to run txsync. */
3710 				revents |= want[t];
3711 				want[t] = 0;
3712 				break;
3713 			}
3714 		}
3715 	}
3716 	if (want_rx) {
3717 		const enum txrx t = NR_RX;
3718 		int rxsync_needed = 0;
3719 
3720 		for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3721 			kring = NMR(na, t)[i];
3722 			if (kring->ring->cur == kring->ring->tail
3723 				|| kring->rhead != kring->ring->head) {
3724 				/* There are no unseen packets on this ring,
3725 				 * or there are some buffers to be returned
3726 				 * to the netmap port. We therefore go ahead
3727 				 * and run rxsync. */
3728 				rxsync_needed = 1;
3729 				break;
3730 			}
3731 		}
3732 		if (!rxsync_needed) {
3733 			revents |= want_rx;
3734 			want_rx = 0;
3735 		}
3736 	}
3737 #endif
3738 
3739 #ifdef linux
3740 	/* The selrecord must be unconditional on linux. */
3741 	nm_os_selrecord(sr, si[NR_RX]);
3742 	nm_os_selrecord(sr, si[NR_TX]);
3743 #endif /* linux */
3744 
3745 	/*
3746 	 * If we want to push packets out (priv->np_txpoll) or
3747 	 * want_tx is still set, we must issue txsync calls
3748 	 * (on all rings, to avoid that the tx rings stall).
3749 	 * Fortunately, normal tx mode has np_txpoll set.
3750 	 */
3751 	if (priv->np_txpoll || want_tx) {
3752 		/*
3753 		 * The first round checks if anyone is ready, if not
3754 		 * do a selrecord and another round to handle races.
3755 		 * want_tx goes to 0 if any space is found, and is
3756 		 * used to skip rings with no pending transmissions.
3757 		 */
3758 flush_tx:
3759 		for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) {
3760 			int found = 0;
3761 
3762 			kring = na->tx_rings[i];
3763 			ring = kring->ring;
3764 
3765 			/*
3766 			 * Don't try to txsync this TX ring if we already found some
3767 			 * space in some of the TX rings (want_tx == 0) and there are no
3768 			 * TX slots in this ring that need to be flushed to the NIC
3769 			 * (head == hwcur).
3770 			 */
3771 			if (!send_down && !want_tx && ring->head == kring->nr_hwcur)
3772 				continue;
3773 
3774 			if (nm_kr_tryget(kring, 1, &revents))
3775 				continue;
3776 
3777 			if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3778 				netmap_ring_reinit(kring);
3779 				revents |= POLLERR;
3780 			} else {
3781 				if (kring->nm_sync(kring, sync_flags))
3782 					revents |= POLLERR;
3783 				else
3784 					nm_sync_finalize(kring);
3785 			}
3786 
3787 			/*
3788 			 * If we found new slots, notify potential
3789 			 * listeners on the same ring.
3790 			 * Since we just did a txsync, look at the copies
3791 			 * of cur,tail in the kring.
3792 			 */
3793 			found = kring->rcur != kring->rtail;
3794 			nm_kr_put(kring);
3795 			if (found) { /* notify other listeners */
3796 				revents |= want_tx;
3797 				want_tx = 0;
3798 #ifndef linux
3799 				kring->nm_notify(kring, 0);
3800 #endif /* linux */
3801 			}
3802 		}
3803 		/* if there were any packet to forward we must have handled them by now */
3804 		send_down = 0;
3805 		if (want_tx && retry_tx && sr) {
3806 #ifndef linux
3807 			nm_os_selrecord(sr, si[NR_TX]);
3808 #endif /* !linux */
3809 			retry_tx = 0;
3810 			goto flush_tx;
3811 		}
3812 	}
3813 
3814 	/*
3815 	 * If want_rx is still set scan receive rings.
3816 	 * Do it on all rings because otherwise we starve.
3817 	 */
3818 	if (want_rx) {
3819 		/* two rounds here for race avoidance */
3820 do_retry_rx:
3821 		for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) {
3822 			int found = 0;
3823 
3824 			kring = na->rx_rings[i];
3825 			ring = kring->ring;
3826 
3827 			if (unlikely(nm_kr_tryget(kring, 1, &revents)))
3828 				continue;
3829 
3830 			if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3831 				netmap_ring_reinit(kring);
3832 				revents |= POLLERR;
3833 			}
3834 			/* now we can use kring->rcur, rtail */
3835 
3836 			/*
3837 			 * transparent mode support: collect packets from
3838 			 * hw rxring(s) that have been released by the user
3839 			 */
3840 			if (nm_may_forward_up(kring)) {
3841 				netmap_grab_packets(kring, &q, netmap_fwd);
3842 			}
3843 
3844 			/* Clear the NR_FORWARD flag anyway, it may be set by
3845 			 * the nm_sync() below only on for the host RX ring (see
3846 			 * netmap_rxsync_from_host()). */
3847 			kring->nr_kflags &= ~NR_FORWARD;
3848 			if (kring->nm_sync(kring, sync_flags))
3849 				revents |= POLLERR;
3850 			else
3851 				nm_sync_finalize(kring);
3852 			send_down |= (kring->nr_kflags & NR_FORWARD);
3853 			ring_timestamp_set(ring);
3854 			found = kring->rcur != kring->rtail;
3855 			nm_kr_put(kring);
3856 			if (found) {
3857 				revents |= want_rx;
3858 				retry_rx = 0;
3859 #ifndef linux
3860 				kring->nm_notify(kring, 0);
3861 #endif /* linux */
3862 			}
3863 		}
3864 
3865 #ifndef linux
3866 		if (retry_rx && sr) {
3867 			nm_os_selrecord(sr, si[NR_RX]);
3868 		}
3869 #endif /* !linux */
3870 		if (send_down || retry_rx) {
3871 			retry_rx = 0;
3872 			if (send_down)
3873 				goto flush_tx; /* and retry_rx */
3874 			else
3875 				goto do_retry_rx;
3876 		}
3877 	}
3878 
3879 	/*
3880 	 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and
3881 	 * ring->head) marked with NS_FORWARD on hw rx rings are passed up
3882 	 * to the host stack.
3883 	 */
3884 
3885 	if (mbq_peek(&q)) {
3886 		netmap_send_up(na->ifp, &q);
3887 	}
3888 
3889 	return (revents);
3890 #undef want_tx
3891 #undef want_rx
3892 }
3893 
3894 int
3895 nma_intr_enable(struct netmap_adapter *na, int onoff)
3896 {
3897 	bool changed = false;
3898 	enum txrx t;
3899 	int i;
3900 
3901 	for_rx_tx(t) {
3902 		for (i = 0; i < nma_get_nrings(na, t); i++) {
3903 			struct netmap_kring *kring = NMR(na, t)[i];
3904 			int on = !(kring->nr_kflags & NKR_NOINTR);
3905 
3906 			if (!!onoff != !!on) {
3907 				changed = true;
3908 			}
3909 			if (onoff) {
3910 				kring->nr_kflags &= ~NKR_NOINTR;
3911 			} else {
3912 				kring->nr_kflags |= NKR_NOINTR;
3913 			}
3914 		}
3915 	}
3916 
3917 	if (!changed) {
3918 		return 0; /* nothing to do */
3919 	}
3920 
3921 	if (!na->nm_intr) {
3922 		nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable",
3923 		  na->name);
3924 		return -1;
3925 	}
3926 
3927 	na->nm_intr(na, onoff);
3928 
3929 	return 0;
3930 }
3931 
3932 
3933 /*-------------------- driver support routines -------------------*/
3934 
3935 /* default notify callback */
3936 static int
3937 netmap_notify(struct netmap_kring *kring, int flags)
3938 {
3939 	struct netmap_adapter *na = kring->notify_na;
3940 	enum txrx t = kring->tx;
3941 
3942 	nm_os_selwakeup(&kring->si);
3943 	/* optimization: avoid a wake up on the global
3944 	 * queue if nobody has registered for more
3945 	 * than one ring
3946 	 */
3947 	if (na->si_users[t] > 0)
3948 		nm_os_selwakeup(&na->si[t]);
3949 
3950 	return NM_IRQ_COMPLETED;
3951 }
3952 
3953 /* called by all routines that create netmap_adapters.
3954  * provide some defaults and get a reference to the
3955  * memory allocator
3956  */
3957 int
3958 netmap_attach_common(struct netmap_adapter *na)
3959 {
3960 	if (!na->rx_buf_maxsize) {
3961 		/* Set a conservative default (larger is safer). */
3962 		na->rx_buf_maxsize = PAGE_SIZE;
3963 	}
3964 
3965 #ifdef __FreeBSD__
3966 	if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
3967 		na->if_input = na->ifp->if_input; /* for netmap_send_up */
3968 	}
3969 	na->pdev = na; /* make sure netmap_mem_map() is called */
3970 #endif /* __FreeBSD__ */
3971 	if (na->na_flags & NAF_HOST_RINGS) {
3972 		if (na->num_host_rx_rings == 0)
3973 			na->num_host_rx_rings = 1;
3974 		if (na->num_host_tx_rings == 0)
3975 			na->num_host_tx_rings = 1;
3976 	}
3977 	if (na->nm_krings_create == NULL) {
3978 		/* we assume that we have been called by a driver,
3979 		 * since other port types all provide their own
3980 		 * nm_krings_create
3981 		 */
3982 		na->nm_krings_create = netmap_hw_krings_create;
3983 		na->nm_krings_delete = netmap_hw_krings_delete;
3984 	}
3985 	if (na->nm_notify == NULL)
3986 		na->nm_notify = netmap_notify;
3987 	na->active_fds = 0;
3988 
3989 	if (na->nm_mem == NULL) {
3990 		/* use iommu or global allocator */
3991 		na->nm_mem = netmap_mem_get_iommu(na);
3992 	}
3993 	if (na->nm_bdg_attach == NULL)
3994 		/* no special nm_bdg_attach callback. On VALE
3995 		 * attach, we need to interpose a bwrap
3996 		 */
3997 		na->nm_bdg_attach = netmap_default_bdg_attach;
3998 
3999 	return 0;
4000 }
4001 
4002 /* Wrapper for the register callback provided netmap-enabled
4003  * hardware drivers.
4004  * nm_iszombie(na) means that the driver module has been
4005  * unloaded, so we cannot call into it.
4006  * nm_os_ifnet_lock() must guarantee mutual exclusion with
4007  * module unloading.
4008  */
4009 static int
4010 netmap_hw_reg(struct netmap_adapter *na, int onoff)
4011 {
4012 	struct netmap_hw_adapter *hwna =
4013 		(struct netmap_hw_adapter*)na;
4014 	int error = 0;
4015 
4016 	nm_os_ifnet_lock();
4017 
4018 	if (nm_iszombie(na)) {
4019 		if (onoff) {
4020 			error = ENXIO;
4021 		} else if (na != NULL) {
4022 			na->na_flags &= ~NAF_NETMAP_ON;
4023 		}
4024 		goto out;
4025 	}
4026 
4027 	error = hwna->nm_hw_register(na, onoff);
4028 
4029 out:
4030 	nm_os_ifnet_unlock();
4031 
4032 	return error;
4033 }
4034 
4035 static void
4036 netmap_hw_dtor(struct netmap_adapter *na)
4037 {
4038 	if (na->ifp == NULL)
4039 		return;
4040 
4041 	NM_DETACH_NA(na->ifp);
4042 }
4043 
4044 
4045 /*
4046  * Allocate a netmap_adapter object, and initialize it from the
4047  * 'arg' passed by the driver on attach.
4048  * We allocate a block of memory of 'size' bytes, which has room
4049  * for struct netmap_adapter plus additional room private to
4050  * the caller.
4051  * Return 0 on success, ENOMEM otherwise.
4052  */
4053 int
4054 netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
4055 {
4056 	struct netmap_hw_adapter *hwna = NULL;
4057 	struct ifnet *ifp = NULL;
4058 
4059 	if (size < sizeof(struct netmap_hw_adapter)) {
4060 		if (netmap_debug & NM_DEBUG_ON)
4061 			nm_prerr("Invalid netmap adapter size %d", (int)size);
4062 		return EINVAL;
4063 	}
4064 
4065 	if (arg == NULL || arg->ifp == NULL) {
4066 		if (netmap_debug & NM_DEBUG_ON)
4067 			nm_prerr("either arg or arg->ifp is NULL");
4068 		return EINVAL;
4069 	}
4070 
4071 	if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) {
4072 		if (netmap_debug & NM_DEBUG_ON)
4073 			nm_prerr("%s: invalid rings tx %d rx %d",
4074 				arg->name, arg->num_tx_rings, arg->num_rx_rings);
4075 		return EINVAL;
4076 	}
4077 
4078 	ifp = arg->ifp;
4079 	if (NM_NA_CLASH(ifp)) {
4080 		/* If NA(ifp) is not null but there is no valid netmap
4081 		 * adapter it means that someone else is using the same
4082 		 * pointer (e.g. ax25_ptr on linux). This happens for
4083 		 * instance when also PF_RING is in use. */
4084 		nm_prerr("Error: netmap adapter hook is busy");
4085 		return EBUSY;
4086 	}
4087 
4088 	hwna = nm_os_malloc(size);
4089 	if (hwna == NULL)
4090 		goto fail;
4091 	hwna->up = *arg;
4092 	hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE;
4093 	strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name));
4094 	if (override_reg) {
4095 		hwna->nm_hw_register = hwna->up.nm_register;
4096 		hwna->up.nm_register = netmap_hw_reg;
4097 	}
4098 	if (netmap_attach_common(&hwna->up)) {
4099 		nm_os_free(hwna);
4100 		goto fail;
4101 	}
4102 	netmap_adapter_get(&hwna->up);
4103 
4104 	NM_ATTACH_NA(ifp, &hwna->up);
4105 
4106 	nm_os_onattach(ifp);
4107 
4108 	if (arg->nm_dtor == NULL) {
4109 		hwna->up.nm_dtor = netmap_hw_dtor;
4110 	}
4111 
4112 	if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
4113 	    hwna->up.num_tx_rings, hwna->up.num_tx_desc,
4114 	    hwna->up.num_rx_rings, hwna->up.num_rx_desc);
4115 	return 0;
4116 
4117 fail:
4118 	nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna);
4119 	return (hwna ? EINVAL : ENOMEM);
4120 }
4121 
4122 
4123 int
4124 netmap_attach(struct netmap_adapter *arg)
4125 {
4126 	return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter),
4127 			1 /* override nm_reg */);
4128 }
4129 
4130 
4131 void
4132 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na)
4133 {
4134 	if (!na) {
4135 		return;
4136 	}
4137 
4138 	refcount_acquire(&na->na_refcount);
4139 }
4140 
4141 
4142 /* returns 1 iff the netmap_adapter is destroyed */
4143 int
4144 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
4145 {
4146 	if (!na)
4147 		return 1;
4148 
4149 	if (!refcount_release(&na->na_refcount))
4150 		return 0;
4151 
4152 	if (na->nm_dtor)
4153 		na->nm_dtor(na);
4154 
4155 	if (na->tx_rings) { /* XXX should not happen */
4156 		if (netmap_debug & NM_DEBUG_ON)
4157 			nm_prerr("freeing leftover tx_rings");
4158 		na->nm_krings_delete(na);
4159 	}
4160 	netmap_pipe_dealloc(na);
4161 	if (na->nm_mem)
4162 		netmap_mem_put(na->nm_mem);
4163 	bzero(na, sizeof(*na));
4164 	nm_os_free(na);
4165 
4166 	return 1;
4167 }
4168 
4169 /* nm_krings_create callback for all hardware native adapters */
4170 int
4171 netmap_hw_krings_create(struct netmap_adapter *na)
4172 {
4173 	int ret = netmap_krings_create(na, 0);
4174 	if (ret == 0) {
4175 		/* initialize the mbq for the sw rx ring */
4176 		u_int lim = netmap_real_rings(na, NR_RX), i;
4177 		for (i = na->num_rx_rings; i < lim; i++) {
4178 			mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
4179 		}
4180 		nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
4181 	}
4182 	return ret;
4183 }
4184 
4185 
4186 
4187 /*
4188  * Called on module unload by the netmap-enabled drivers
4189  */
4190 void
4191 netmap_detach(struct ifnet *ifp)
4192 {
4193 	struct netmap_adapter *na = NA(ifp);
4194 
4195 	if (!na)
4196 		return;
4197 
4198 	NMG_LOCK();
4199 	netmap_set_all_rings(na, NM_KR_LOCKED);
4200 	/*
4201 	 * if the netmap adapter is not native, somebody
4202 	 * changed it, so we can not release it here.
4203 	 * The NAF_ZOMBIE flag will notify the new owner that
4204 	 * the driver is gone.
4205 	 */
4206 	if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
4207 		na->na_flags |= NAF_ZOMBIE;
4208 	}
4209 	/* give active users a chance to notice that NAF_ZOMBIE has been
4210 	 * turned on, so that they can stop and return an error to userspace.
4211 	 * Note that this becomes a NOP if there are no active users and,
4212 	 * therefore, the put() above has deleted the na, since now NA(ifp) is
4213 	 * NULL.
4214 	 */
4215 	netmap_enable_all_rings(ifp);
4216 	NMG_UNLOCK();
4217 }
4218 
4219 
4220 /*
4221  * Intercept packets from the network stack and pass them
4222  * to netmap as incoming packets on the 'software' ring.
4223  *
4224  * We only store packets in a bounded mbq and then copy them
4225  * in the relevant rxsync routine.
4226  *
4227  * We rely on the OS to make sure that the ifp and na do not go
4228  * away (typically the caller checks for IFF_DRV_RUNNING or the like).
4229  * In nm_register() or whenever there is a reinitialization,
4230  * we make sure to make the mode change visible here.
4231  */
4232 int
4233 netmap_transmit(struct ifnet *ifp, struct mbuf *m)
4234 {
4235 	struct netmap_adapter *na = NA(ifp);
4236 	struct netmap_kring *kring, *tx_kring;
4237 	u_int len = MBUF_LEN(m);
4238 	u_int error = ENOBUFS;
4239 	unsigned int txr;
4240 	struct mbq *q;
4241 	int busy;
4242 	u_int i;
4243 
4244 	i = MBUF_TXQ(m);
4245 	if (i >= na->num_host_rx_rings) {
4246 		i = i % na->num_host_rx_rings;
4247 	}
4248 	kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i];
4249 
4250 	// XXX [Linux] we do not need this lock
4251 	// if we follow the down/configure/up protocol -gl
4252 	// mtx_lock(&na->core_lock);
4253 
4254 	if (!nm_netmap_on(na)) {
4255 		nm_prerr("%s not in netmap mode anymore", na->name);
4256 		error = ENXIO;
4257 		goto done;
4258 	}
4259 
4260 	txr = MBUF_TXQ(m);
4261 	if (txr >= na->num_tx_rings) {
4262 		txr %= na->num_tx_rings;
4263 	}
4264 	tx_kring = NMR(na, NR_TX)[txr];
4265 
4266 	if (tx_kring->nr_mode == NKR_NETMAP_OFF) {
4267 		return MBUF_TRANSMIT(na, ifp, m);
4268 	}
4269 
4270 	q = &kring->rx_queue;
4271 
4272 	// XXX reconsider long packets if we handle fragments
4273 	if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */
4274 		nm_prerr("%s from_host, drop packet size %d > %d", na->name,
4275 			len, NETMAP_BUF_SIZE(na));
4276 		goto done;
4277 	}
4278 
4279 	if (!netmap_generic_hwcsum) {
4280 		if (nm_os_mbuf_has_csum_offld(m)) {
4281 			nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
4282 			goto done;
4283 		}
4284 	}
4285 
4286 	if (nm_os_mbuf_has_seg_offld(m)) {
4287 		nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
4288 		goto done;
4289 	}
4290 
4291 #ifdef __FreeBSD__
4292 	ETHER_BPF_MTAP(ifp, m);
4293 #endif /* __FreeBSD__ */
4294 
4295 	/* protect against netmap_rxsync_from_host(), netmap_sw_to_nic()
4296 	 * and maybe other instances of netmap_transmit (the latter
4297 	 * not possible on Linux).
4298 	 * We enqueue the mbuf only if we are sure there is going to be
4299 	 * enough room in the host RX ring, otherwise we drop it.
4300 	 */
4301 	mbq_lock(q);
4302 
4303 	busy = kring->nr_hwtail - kring->nr_hwcur;
4304 	if (busy < 0)
4305 		busy += kring->nkr_num_slots;
4306 	if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
4307 		nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
4308 			kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
4309 	} else {
4310 		mbq_enqueue(q, m);
4311 		nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
4312 		/* notify outside the lock */
4313 		m = NULL;
4314 		error = 0;
4315 	}
4316 	mbq_unlock(q);
4317 
4318 done:
4319 	if (m)
4320 		m_freem(m);
4321 	/* unconditionally wake up listeners */
4322 	kring->nm_notify(kring, 0);
4323 	/* this is normally netmap_notify(), but for nics
4324 	 * connected to a bridge it is netmap_bwrap_intr_notify(),
4325 	 * that possibly forwards the frames through the switch
4326 	 */
4327 
4328 	return (error);
4329 }
4330 
4331 
4332 /*
4333  * Reset function to be called by the driver routines when reinitializing
4334  * a hardware ring. The driver is in charge of locking to protect the kring
4335  * while this operation is being performed. This is normally achieved by
4336  * calling netmap_disable_all_rings() before triggering a reset.
4337  * If the kring is not in netmap mode, return NULL to inform the caller
4338  * that this is the case.
4339  * If the kring is in netmap mode, set hwofs so that the netmap indices
4340  * seen by userspace (head/cut/tail) do not change, although the internal
4341  * NIC indices have been reset to 0.
4342  * In any case, adjust kring->nr_mode.
4343  */
4344 struct netmap_slot *
4345 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
4346 	u_int new_cur)
4347 {
4348 	struct netmap_kring *kring;
4349 	u_int new_hwtail, new_hwofs;
4350 
4351 	if (!nm_native_on(na)) {
4352 		nm_prdis("interface not in native netmap mode");
4353 		return NULL;	/* nothing to reinitialize */
4354 	}
4355 
4356 	if (tx == NR_TX) {
4357 		if (n >= na->num_tx_rings)
4358 			return NULL;
4359 		kring = na->tx_rings[n];
4360 		/*
4361 		 * Set hwofs to rhead, so that slots[rhead] is mapped to
4362 		 * the NIC internal slot 0, and thus the netmap buffer
4363 		 * at rhead is the next to be transmitted. Transmissions
4364 		 * that were pending before the reset are considered as
4365 		 * sent, so that we can have hwcur = rhead. All the slots
4366 		 * are now owned by the user, so we can also reinit hwtail.
4367 		 */
4368 		new_hwofs = kring->rhead;
4369 		new_hwtail = nm_prev(kring->rhead, kring->nkr_num_slots - 1);
4370 	} else {
4371 		if (n >= na->num_rx_rings)
4372 			return NULL;
4373 		kring = na->rx_rings[n];
4374 		/*
4375 		 * Set hwofs to hwtail, so that slots[hwtail] is mapped to
4376 		 * the NIC internal slot 0, and thus the netmap buffer
4377 		 * at hwtail is the next to be given to the NIC.
4378 		 * Unread slots (the ones in [rhead,hwtail[) are owned by
4379 		 * the user, and thus the caller cannot give them
4380 		 * to the NIC right now.
4381 		 */
4382 		new_hwofs = kring->nr_hwtail;
4383 		new_hwtail = kring->nr_hwtail;
4384 	}
4385 	if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4386 		kring->nr_mode = NKR_NETMAP_OFF;
4387 		return NULL;
4388 	}
4389 	if (netmap_verbose) {
4390 	    nm_prinf("%s, hc %u->%u, ht %u->%u, ho %u->%u", kring->name,
4391 	        kring->nr_hwcur, kring->rhead,
4392 	        kring->nr_hwtail, new_hwtail,
4393 		kring->nkr_hwofs, new_hwofs);
4394 	}
4395 	kring->nr_hwcur = kring->rhead;
4396 	kring->nr_hwtail = new_hwtail;
4397 	kring->nkr_hwofs = new_hwofs;
4398 
4399 	/*
4400 	 * Wakeup on the individual and global selwait
4401 	 * We do the wakeup here, but the ring is not yet reconfigured.
4402 	 * However, we are under lock so there are no races.
4403 	 */
4404 	kring->nr_mode = NKR_NETMAP_ON;
4405 	kring->nm_notify(kring, 0);
4406 	return kring->ring->slot;
4407 }
4408 
4409 
4410 /*
4411  * Dispatch rx/tx interrupts to the netmap rings.
4412  *
4413  * "work_done" is non-null on the RX path, NULL for the TX path.
4414  * We rely on the OS to make sure that there is only one active
4415  * instance per queue, and that there is appropriate locking.
4416  *
4417  * The 'notify' routine depends on what the ring is attached to.
4418  * - for a netmap file descriptor, do a selwakeup on the individual
4419  *   waitqueue, plus one on the global one if needed
4420  *   (see netmap_notify)
4421  * - for a nic connected to a switch, call the proper forwarding routine
4422  *   (see netmap_bwrap_intr_notify)
4423  */
4424 int
4425 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
4426 {
4427 	struct netmap_kring *kring;
4428 	enum txrx t = (work_done ? NR_RX : NR_TX);
4429 
4430 	q &= NETMAP_RING_MASK;
4431 
4432 	if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) {
4433 	        nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
4434 	}
4435 
4436 	if (q >= nma_get_nrings(na, t))
4437 		return NM_IRQ_PASS; // not a physical queue
4438 
4439 	kring = NMR(na, t)[q];
4440 
4441 	if (kring->nr_mode == NKR_NETMAP_OFF) {
4442 		return NM_IRQ_PASS;
4443 	}
4444 
4445 	if (t == NR_RX) {
4446 		kring->nr_kflags |= NKR_PENDINTR;	// XXX atomic ?
4447 		*work_done = 1; /* do not fire napi again */
4448 	}
4449 
4450 	return kring->nm_notify(kring, 0);
4451 }
4452 
4453 
4454 /*
4455  * Default functions to handle rx/tx interrupts from a physical device.
4456  * "work_done" is non-null on the RX path, NULL for the TX path.
4457  *
4458  * If the card is not in netmap mode, simply return NM_IRQ_PASS,
4459  * so that the caller proceeds with regular processing.
4460  * Otherwise call netmap_common_irq().
4461  *
4462  * If the card is connected to a netmap file descriptor,
4463  * do a selwakeup on the individual queue, plus one on the global one
4464  * if needed (multiqueue card _and_ there are multiqueue listeners),
4465  * and return NR_IRQ_COMPLETED.
4466  *
4467  * Finally, if called on rx from an interface connected to a switch,
4468  * calls the proper forwarding routine.
4469  */
4470 int
4471 netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
4472 {
4473 	struct netmap_adapter *na = NA(ifp);
4474 
4475 	/*
4476 	 * XXX emulated netmap mode sets NAF_SKIP_INTR so
4477 	 * we still use the regular driver even though the previous
4478 	 * check fails. It is unclear whether we should use
4479 	 * nm_native_on() here.
4480 	 */
4481 	if (!nm_netmap_on(na))
4482 		return NM_IRQ_PASS;
4483 
4484 	if (na->na_flags & NAF_SKIP_INTR) {
4485 		nm_prdis("use regular interrupt");
4486 		return NM_IRQ_PASS;
4487 	}
4488 
4489 	return netmap_common_irq(na, q, work_done);
4490 }
4491 
4492 /* set/clear native flags and if_transmit/netdev_ops */
4493 void
4494 nm_set_native_flags(struct netmap_adapter *na)
4495 {
4496 	struct ifnet *ifp = na->ifp;
4497 
4498 	/* We do the setup for intercepting packets only if we are the
4499 	 * first user of this adapter. */
4500 	if (na->active_fds > 0) {
4501 		return;
4502 	}
4503 
4504 	na->na_flags |= NAF_NETMAP_ON;
4505 	nm_os_onenter(ifp);
4506 }
4507 
4508 void
4509 nm_clear_native_flags(struct netmap_adapter *na)
4510 {
4511 	struct ifnet *ifp = na->ifp;
4512 
4513 	/* We undo the setup for intercepting packets only if we are the
4514 	 * last user of this adapter. */
4515 	if (na->active_fds > 0) {
4516 		return;
4517 	}
4518 
4519 	nm_os_onexit(ifp);
4520 
4521 	na->na_flags &= ~NAF_NETMAP_ON;
4522 }
4523 
4524 void
4525 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
4526 {
4527 	enum txrx t;
4528 
4529 	for_rx_tx(t) {
4530 		int i;
4531 
4532 		for (i = 0; i < netmap_real_rings(na, t); i++) {
4533 			struct netmap_kring *kring = NMR(na, t)[i];
4534 
4535 			if (onoff && nm_kring_pending_on(kring))
4536 				kring->nr_mode = NKR_NETMAP_ON;
4537 			else if (!onoff && nm_kring_pending_off(kring))
4538 				kring->nr_mode = NKR_NETMAP_OFF;
4539 		}
4540 	}
4541 }
4542 
4543 /*
4544  * Module loader and unloader
4545  *
4546  * netmap_init() creates the /dev/netmap device and initializes
4547  * all global variables. Returns 0 on success, errno on failure
4548  * (but there is no chance)
4549  *
4550  * netmap_fini() destroys everything.
4551  */
4552 
4553 static struct cdev *netmap_dev; /* /dev/netmap character device. */
4554 extern struct cdevsw netmap_cdevsw;
4555 
4556 
4557 void
4558 netmap_fini(void)
4559 {
4560 	if (netmap_dev)
4561 		destroy_dev(netmap_dev);
4562 	/* we assume that there are no longer netmap users */
4563 	nm_os_ifnet_fini();
4564 	netmap_uninit_bridges();
4565 	netmap_mem_fini();
4566 	NMG_LOCK_DESTROY();
4567 	nm_prinf("netmap: unloaded module.");
4568 }
4569 
4570 
4571 int
4572 netmap_init(void)
4573 {
4574 	int error;
4575 
4576 	NMG_LOCK_INIT();
4577 
4578 	error = netmap_mem_init();
4579 	if (error != 0)
4580 		goto fail;
4581 	/*
4582 	 * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls
4583 	 * when the module is compiled in.
4584 	 * XXX could use make_dev_credv() to get error number
4585 	 */
4586 	netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD,
4587 		&netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600,
4588 			      "netmap");
4589 	if (!netmap_dev)
4590 		goto fail;
4591 
4592 	error = netmap_init_bridges();
4593 	if (error)
4594 		goto fail;
4595 
4596 #ifdef __FreeBSD__
4597 	nm_os_vi_init_index();
4598 #endif
4599 
4600 	error = nm_os_ifnet_init();
4601 	if (error)
4602 		goto fail;
4603 
4604 #if !defined(__FreeBSD__) || defined(KLD_MODULE)
4605 	nm_prinf("netmap: loaded module");
4606 #endif
4607 	return (0);
4608 fail:
4609 	netmap_fini();
4610 	return (EINVAL); /* may be incorrect */
4611 }
4612