1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2011-2014 Matteo Landi
5 * Copyright (C) 2011-2016 Luigi Rizzo
6 * Copyright (C) 2011-2016 Giuseppe Lettieri
7 * Copyright (C) 2011-2016 Vincenzo Maffione
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32
33 /*
34 * $FreeBSD$
35 *
36 * This module supports memory mapped access to network devices,
37 * see netmap(4).
38 *
39 * The module uses a large, memory pool allocated by the kernel
40 * and accessible as mmapped memory by multiple userspace threads/processes.
41 * The memory pool contains packet buffers and "netmap rings",
42 * i.e. user-accessible copies of the interface's queues.
43 *
44 * Access to the network card works like this:
45 * 1. a process/thread issues one or more open() on /dev/netmap, to create
46 * select()able file descriptor on which events are reported.
47 * 2. on each descriptor, the process issues an ioctl() to identify
48 * the interface that should report events to the file descriptor.
49 * 3. on each descriptor, the process issues an mmap() request to
50 * map the shared memory region within the process' address space.
51 * The list of interesting queues is indicated by a location in
52 * the shared memory region.
53 * 4. using the functions in the netmap(4) userspace API, a process
54 * can look up the occupation state of a queue, access memory buffers,
55 * and retrieve received packets or enqueue packets to transmit.
56 * 5. using some ioctl()s the process can synchronize the userspace view
57 * of the queue with the actual status in the kernel. This includes both
58 * receiving the notification of new packets, and transmitting new
59 * packets on the output interface.
60 * 6. select() or poll() can be used to wait for events on individual
61 * transmit or receive queues (or all queues for a given interface).
62 *
63
64 SYNCHRONIZATION (USER)
65
66 The netmap rings and data structures may be shared among multiple
67 user threads or even independent processes.
68 Any synchronization among those threads/processes is delegated
69 to the threads themselves. Only one thread at a time can be in
70 a system call on the same netmap ring. The OS does not enforce
71 this and only guarantees against system crashes in case of
72 invalid usage.
73
74 LOCKING (INTERNAL)
75
76 Within the kernel, access to the netmap rings is protected as follows:
77
78 - a spinlock on each ring, to handle producer/consumer races on
79 RX rings attached to the host stack (against multiple host
80 threads writing from the host stack to the same ring),
81 and on 'destination' rings attached to a VALE switch
82 (i.e. RX rings in VALE ports, and TX rings in NIC/host ports)
83 protecting multiple active senders for the same destination)
84
85 - an atomic variable to guarantee that there is at most one
86 instance of *_*xsync() on the ring at any time.
87 For rings connected to user file
88 descriptors, an atomic_test_and_set() protects this, and the
89 lock on the ring is not actually used.
90 For NIC RX rings connected to a VALE switch, an atomic_test_and_set()
91 is also used to prevent multiple executions (the driver might indeed
92 already guarantee this).
93 For NIC TX rings connected to a VALE switch, the lock arbitrates
94 access to the queue (both when allocating buffers and when pushing
95 them out).
96
97 - *xsync() should be protected against initializations of the card.
98 On FreeBSD most devices have the reset routine protected by
99 a RING lock (ixgbe, igb, em) or core lock (re). lem is missing
100 the RING protection on rx_reset(), this should be added.
101
102 On linux there is an external lock on the tx path, which probably
103 also arbitrates access to the reset routine. XXX to be revised
104
105 - a per-interface core_lock protecting access from the host stack
106 while interfaces may be detached from netmap mode.
107 XXX there should be no need for this lock if we detach the interfaces
108 only while they are down.
109
110
111 --- VALE SWITCH ---
112
113 NMG_LOCK() serializes all modifications to switches and ports.
114 A switch cannot be deleted until all ports are gone.
115
116 For each switch, an SX lock (RWlock on linux) protects
117 deletion of ports. When configuring or deleting a new port, the
118 lock is acquired in exclusive mode (after holding NMG_LOCK).
119 When forwarding, the lock is acquired in shared mode (without NMG_LOCK).
120 The lock is held throughout the entire forwarding cycle,
121 during which the thread may incur in a page fault.
122 Hence it is important that sleepable shared locks are used.
123
124 On the rx ring, the per-port lock is grabbed initially to reserve
125 a number of slot in the ring, then the lock is released,
126 packets are copied from source to destination, and then
127 the lock is acquired again and the receive ring is updated.
128 (A similar thing is done on the tx ring for NIC and host stack
129 ports attached to the switch)
130
131 */
132
133
134 /* --- internals ----
135 *
136 * Roadmap to the code that implements the above.
137 *
138 * > 1. a process/thread issues one or more open() on /dev/netmap, to create
139 * > select()able file descriptor on which events are reported.
140 *
141 * Internally, we allocate a netmap_priv_d structure, that will be
142 * initialized on ioctl(NIOCREGIF). There is one netmap_priv_d
143 * structure for each open().
144 *
145 * os-specific:
146 * FreeBSD: see netmap_open() (netmap_freebsd.c)
147 * linux: see linux_netmap_open() (netmap_linux.c)
148 *
149 * > 2. on each descriptor, the process issues an ioctl() to identify
150 * > the interface that should report events to the file descriptor.
151 *
152 * Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0.
153 * Most important things happen in netmap_get_na() and
154 * netmap_do_regif(), called from there. Additional details can be
155 * found in the comments above those functions.
156 *
157 * In all cases, this action creates/takes-a-reference-to a
158 * netmap_*_adapter describing the port, and allocates a netmap_if
159 * and all necessary netmap rings, filling them with netmap buffers.
160 *
161 * In this phase, the sync callbacks for each ring are set (these are used
162 * in steps 5 and 6 below). The callbacks depend on the type of adapter.
163 * The adapter creation/initialization code puts them in the
164 * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they
165 * are copied from there to the netmap_kring's during netmap_do_regif(), by
166 * the nm_krings_create() callback. All the nm_krings_create callbacks
167 * actually call netmap_krings_create() to perform this and the other
168 * common stuff. netmap_krings_create() also takes care of the host rings,
169 * if needed, by setting their sync callbacks appropriately.
170 *
171 * Additional actions depend on the kind of netmap_adapter that has been
172 * registered:
173 *
174 * - netmap_hw_adapter: [netmap.c]
175 * This is a system netdev/ifp with native netmap support.
176 * The ifp is detached from the host stack by redirecting:
177 * - transmissions (from the network stack) to netmap_transmit()
178 * - receive notifications to the nm_notify() callback for
179 * this adapter. The callback is normally netmap_notify(), unless
180 * the ifp is attached to a bridge using bwrap, in which case it
181 * is netmap_bwrap_intr_notify().
182 *
183 * - netmap_generic_adapter: [netmap_generic.c]
184 * A system netdev/ifp without native netmap support.
185 *
186 * (the decision about native/non native support is taken in
187 * netmap_get_hw_na(), called by netmap_get_na())
188 *
189 * - netmap_vp_adapter [netmap_vale.c]
190 * Returned by netmap_get_bdg_na().
191 * This is a persistent or ephemeral VALE port. Ephemeral ports
192 * are created on the fly if they don't already exist, and are
193 * always attached to a bridge.
194 * Persistent VALE ports must must be created separately, and i
195 * then attached like normal NICs. The NIOCREGIF we are examining
196 * will find them only if they had previosly been created and
197 * attached (see VALE_CTL below).
198 *
199 * - netmap_pipe_adapter [netmap_pipe.c]
200 * Returned by netmap_get_pipe_na().
201 * Both pipe ends are created, if they didn't already exist.
202 *
203 * - netmap_monitor_adapter [netmap_monitor.c]
204 * Returned by netmap_get_monitor_na().
205 * If successful, the nm_sync callbacks of the monitored adapter
206 * will be intercepted by the returned monitor.
207 *
208 * - netmap_bwrap_adapter [netmap_vale.c]
209 * Cannot be obtained in this way, see VALE_CTL below
210 *
211 *
212 * os-specific:
213 * linux: we first go through linux_netmap_ioctl() to
214 * adapt the FreeBSD interface to the linux one.
215 *
216 *
217 * > 3. on each descriptor, the process issues an mmap() request to
218 * > map the shared memory region within the process' address space.
219 * > The list of interesting queues is indicated by a location in
220 * > the shared memory region.
221 *
222 * os-specific:
223 * FreeBSD: netmap_mmap_single (netmap_freebsd.c).
224 * linux: linux_netmap_mmap (netmap_linux.c).
225 *
226 * > 4. using the functions in the netmap(4) userspace API, a process
227 * > can look up the occupation state of a queue, access memory buffers,
228 * > and retrieve received packets or enqueue packets to transmit.
229 *
230 * these actions do not involve the kernel.
231 *
232 * > 5. using some ioctl()s the process can synchronize the userspace view
233 * > of the queue with the actual status in the kernel. This includes both
234 * > receiving the notification of new packets, and transmitting new
235 * > packets on the output interface.
236 *
237 * These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC
238 * cases. They invoke the nm_sync callbacks on the netmap_kring
239 * structures, as initialized in step 2 and maybe later modified
240 * by a monitor. Monitors, however, will always call the original
241 * callback before doing anything else.
242 *
243 *
244 * > 6. select() or poll() can be used to wait for events on individual
245 * > transmit or receive queues (or all queues for a given interface).
246 *
247 * Implemented in netmap_poll(). This will call the same nm_sync()
248 * callbacks as in step 5 above.
249 *
250 * os-specific:
251 * linux: we first go through linux_netmap_poll() to adapt
252 * the FreeBSD interface to the linux one.
253 *
254 *
255 * ---- VALE_CTL -----
256 *
257 * VALE switches are controlled by issuing a NIOCREGIF with a non-null
258 * nr_cmd in the nmreq structure. These subcommands are handled by
259 * netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created
260 * and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF
261 * subcommands, respectively.
262 *
263 * Any network interface known to the system (including a persistent VALE
264 * port) can be attached to a VALE switch by issuing the
265 * NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports
266 * look exactly like ephemeral VALE ports (as created in step 2 above). The
267 * attachment of other interfaces, instead, requires the creation of a
268 * netmap_bwrap_adapter. Moreover, the attached interface must be put in
269 * netmap mode. This may require the creation of a netmap_generic_adapter if
270 * we have no native support for the interface, or if generic adapters have
271 * been forced by sysctl.
272 *
273 * Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(),
274 * called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach()
275 * callback. In the case of the bwrap, the callback creates the
276 * netmap_bwrap_adapter. The initialization of the bwrap is then
277 * completed by calling netmap_do_regif() on it, in the nm_bdg_ctl()
278 * callback (netmap_bwrap_bdg_ctl in netmap_vale.c).
279 * A generic adapter for the wrapped ifp will be created if needed, when
280 * netmap_get_bdg_na() calls netmap_get_hw_na().
281 *
282 *
283 * ---- DATAPATHS -----
284 *
285 * -= SYSTEM DEVICE WITH NATIVE SUPPORT =-
286 *
287 * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
288 *
289 * - tx from netmap userspace:
290 * concurrently:
291 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
292 * kring->nm_sync() == DEVICE_netmap_txsync()
293 * 2) device interrupt handler
294 * na->nm_notify() == netmap_notify()
295 * - rx from netmap userspace:
296 * concurrently:
297 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
298 * kring->nm_sync() == DEVICE_netmap_rxsync()
299 * 2) device interrupt handler
300 * na->nm_notify() == netmap_notify()
301 * - rx from host stack
302 * concurrently:
303 * 1) host stack
304 * netmap_transmit()
305 * na->nm_notify == netmap_notify()
306 * 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
307 * kring->nm_sync() == netmap_rxsync_from_host
308 * netmap_rxsync_from_host(na, NULL, NULL)
309 * - tx to host stack
310 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
311 * kring->nm_sync() == netmap_txsync_to_host
312 * netmap_txsync_to_host(na)
313 * nm_os_send_up()
314 * FreeBSD: na->if_input() == ether_input()
315 * linux: netif_rx() with NM_MAGIC_PRIORITY_RX
316 *
317 *
318 * -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
319 *
320 * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
321 *
322 * - tx from netmap userspace:
323 * concurrently:
324 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
325 * kring->nm_sync() == generic_netmap_txsync()
326 * nm_os_generic_xmit_frame()
327 * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
328 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
329 * gna->save_start_xmit == orig. dev. start_xmit
330 * FreeBSD: na->if_transmit() == orig. dev if_transmit
331 * 2) generic_mbuf_destructor()
332 * na->nm_notify() == netmap_notify()
333 * - rx from netmap userspace:
334 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
335 * kring->nm_sync() == generic_netmap_rxsync()
336 * mbq_safe_dequeue()
337 * 2) device driver
338 * generic_rx_handler()
339 * mbq_safe_enqueue()
340 * na->nm_notify() == netmap_notify()
341 * - rx from host stack
342 * FreeBSD: same as native
343 * Linux: same as native except:
344 * 1) host stack
345 * dev_queue_xmit() without NM_MAGIC_PRIORITY_TX
346 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
347 * netmap_transmit()
348 * na->nm_notify() == netmap_notify()
349 * - tx to host stack (same as native):
350 *
351 *
352 * -= VALE =-
353 *
354 * INCOMING:
355 *
356 * - VALE ports:
357 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
358 * kring->nm_sync() == netmap_vp_txsync()
359 *
360 * - system device with native support:
361 * from cable:
362 * interrupt
363 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
364 * kring->nm_sync() == DEVICE_netmap_rxsync()
365 * netmap_vp_txsync()
366 * kring->nm_sync() == DEVICE_netmap_rxsync()
367 * from host stack:
368 * netmap_transmit()
369 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
370 * kring->nm_sync() == netmap_rxsync_from_host()
371 * netmap_vp_txsync()
372 *
373 * - system device with generic support:
374 * from device driver:
375 * generic_rx_handler()
376 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
377 * kring->nm_sync() == generic_netmap_rxsync()
378 * netmap_vp_txsync()
379 * kring->nm_sync() == generic_netmap_rxsync()
380 * from host stack:
381 * netmap_transmit()
382 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
383 * kring->nm_sync() == netmap_rxsync_from_host()
384 * netmap_vp_txsync()
385 *
386 * (all cases) --> nm_bdg_flush()
387 * dest_na->nm_notify() == (see below)
388 *
389 * OUTGOING:
390 *
391 * - VALE ports:
392 * concurrently:
393 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
394 * kring->nm_sync() == netmap_vp_rxsync()
395 * 2) from nm_bdg_flush()
396 * na->nm_notify() == netmap_notify()
397 *
398 * - system device with native support:
399 * to cable:
400 * na->nm_notify() == netmap_bwrap_notify()
401 * netmap_vp_rxsync()
402 * kring->nm_sync() == DEVICE_netmap_txsync()
403 * netmap_vp_rxsync()
404 * to host stack:
405 * netmap_vp_rxsync()
406 * kring->nm_sync() == netmap_txsync_to_host
407 * netmap_vp_rxsync_locked()
408 *
409 * - system device with generic adapter:
410 * to device driver:
411 * na->nm_notify() == netmap_bwrap_notify()
412 * netmap_vp_rxsync()
413 * kring->nm_sync() == generic_netmap_txsync()
414 * netmap_vp_rxsync()
415 * to host stack:
416 * netmap_vp_rxsync()
417 * kring->nm_sync() == netmap_txsync_to_host
418 * netmap_vp_rxsync()
419 *
420 */
421
422 /*
423 * OS-specific code that is used only within this file.
424 * Other OS-specific code that must be accessed by drivers
425 * is present in netmap_kern.h
426 */
427
428 #if defined(__FreeBSD__)
429 #include <sys/cdefs.h> /* prerequisite */
430 #include <sys/types.h>
431 #include <sys/errno.h>
432 #include <sys/param.h> /* defines used in kernel.h */
433 #include <sys/kernel.h> /* types used in module initialization */
434 #include <sys/conf.h> /* cdevsw struct, UID, GID */
435 #include <sys/filio.h> /* FIONBIO */
436 #include <sys/sockio.h>
437 #include <sys/socketvar.h> /* struct socket */
438 #include <sys/malloc.h>
439 #include <sys/poll.h>
440 #include <sys/proc.h>
441 #include <sys/rwlock.h>
442 #include <sys/socket.h> /* sockaddrs */
443 #include <sys/selinfo.h>
444 #include <sys/sysctl.h>
445 #include <sys/jail.h>
446 #include <sys/epoch.h>
447 #include <net/vnet.h>
448 #include <net/if.h>
449 #include <net/if_var.h>
450 #include <net/bpf.h> /* BIOCIMMEDIATE */
451 #include <machine/bus.h> /* bus_dmamap_* */
452 #include <sys/endian.h>
453 #include <sys/refcount.h>
454 #include <net/ethernet.h> /* ETHER_BPF_MTAP */
455
456
457 #elif defined(linux)
458
459 #include "bsd_glue.h"
460
461 #elif defined(__APPLE__)
462
463 #warning OSX support is only partial
464 #include "osx_glue.h"
465
466 #elif defined (_WIN32)
467
468 #include "win_glue.h"
469
470 #else
471
472 #error Unsupported platform
473
474 #endif /* unsupported */
475
476 /*
477 * common headers
478 */
479 #include <net/netmap.h>
480 #include <dev/netmap/netmap_kern.h>
481 #include <dev/netmap/netmap_mem2.h>
482
483
484 /* user-controlled variables */
485 int netmap_verbose;
486 #ifdef CONFIG_NETMAP_DEBUG
487 int netmap_debug;
488 #endif /* CONFIG_NETMAP_DEBUG */
489
490 static int netmap_no_timestamp; /* don't timestamp on rxsync */
491 int netmap_no_pendintr = 1;
492 int netmap_txsync_retry = 2;
493 static int netmap_fwd = 0; /* force transparent forwarding */
494
495 /*
496 * netmap_admode selects the netmap mode to use.
497 * Invalid values are reset to NETMAP_ADMODE_BEST
498 */
499 enum { NETMAP_ADMODE_BEST = 0, /* use native, fallback to generic */
500 NETMAP_ADMODE_NATIVE, /* either native or none */
501 NETMAP_ADMODE_GENERIC, /* force generic */
502 NETMAP_ADMODE_LAST };
503 static int netmap_admode = NETMAP_ADMODE_BEST;
504
505 /* netmap_generic_mit controls mitigation of RX notifications for
506 * the generic netmap adapter. The value is a time interval in
507 * nanoseconds. */
508 int netmap_generic_mit = 100*1000;
509
510 /* We use by default netmap-aware qdiscs with generic netmap adapters,
511 * even if there can be a little performance hit with hardware NICs.
512 * However, using the qdisc is the safer approach, for two reasons:
513 * 1) it prevents non-fifo qdiscs to break the TX notification
514 * scheme, which is based on mbuf destructors when txqdisc is
515 * not used.
516 * 2) it makes it possible to transmit over software devices that
517 * change skb->dev, like bridge, veth, ...
518 *
519 * Anyway users looking for the best performance should
520 * use native adapters.
521 */
522 #ifdef linux
523 int netmap_generic_txqdisc = 1;
524 #endif
525
526 /* Default number of slots and queues for generic adapters. */
527 int netmap_generic_ringsize = 1024;
528 int netmap_generic_rings = 1;
529
530 /* Non-zero to enable checksum offloading in NIC drivers */
531 int netmap_generic_hwcsum = 0;
532
533 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */
534 int ptnet_vnet_hdr = 1;
535
536 /*
537 * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated
538 * in some other operating systems
539 */
540 SYSBEGIN(main_init);
541
542 SYSCTL_DECL(_dev_netmap);
543 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
544 "Netmap args");
545 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
546 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
547 #ifdef CONFIG_NETMAP_DEBUG
548 SYSCTL_INT(_dev_netmap, OID_AUTO, debug,
549 CTLFLAG_RW, &netmap_debug, 0, "Debug messages");
550 #endif /* CONFIG_NETMAP_DEBUG */
551 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
552 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
553 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
554 0, "Always look for new received packets.");
555 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
556 &netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
557
558 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
559 "Force NR_FORWARD mode");
560 SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
561 "Adapter mode. 0 selects the best option available,"
562 "1 forces native adapter, 2 forces emulated adapter");
563 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum,
564 0, "Hardware checksums. 0 to disable checksum generation by the NIC (default),"
565 "1 to enable checksum generation by the NIC");
566 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
567 0, "RX notification interval in nanoseconds");
568 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
569 &netmap_generic_ringsize, 0,
570 "Number of per-ring slots for emulated netmap mode");
571 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
572 &netmap_generic_rings, 0,
573 "Number of TX/RX queues for emulated netmap adapters");
574 #ifdef linux
575 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
576 &netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
577 #endif
578 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
579 0, "Allow ptnet devices to use virtio-net headers");
580
581 SYSEND;
582
583 NMG_LOCK_T netmap_global_lock;
584
585 /*
586 * mark the ring as stopped, and run through the locks
587 * to make sure other users get to see it.
588 * stopped must be either NR_KR_STOPPED (for unbounded stop)
589 * of NR_KR_LOCKED (brief stop for mutual exclusion purposes)
590 */
591 static void
netmap_disable_ring(struct netmap_kring * kr,int stopped)592 netmap_disable_ring(struct netmap_kring *kr, int stopped)
593 {
594 nm_kr_stop(kr, stopped);
595 // XXX check if nm_kr_stop is sufficient
596 mtx_lock(&kr->q_lock);
597 mtx_unlock(&kr->q_lock);
598 nm_kr_put(kr);
599 }
600
601 /* stop or enable a single ring */
602 void
netmap_set_ring(struct netmap_adapter * na,u_int ring_id,enum txrx t,int stopped)603 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped)
604 {
605 if (stopped)
606 netmap_disable_ring(NMR(na, t)[ring_id], stopped);
607 else
608 NMR(na, t)[ring_id]->nkr_stopped = 0;
609 }
610
611
612 /* stop or enable all the rings of na */
613 void
netmap_set_all_rings(struct netmap_adapter * na,int stopped)614 netmap_set_all_rings(struct netmap_adapter *na, int stopped)
615 {
616 int i;
617 enum txrx t;
618
619 if (!nm_netmap_on(na))
620 return;
621
622 if (netmap_verbose) {
623 nm_prinf("%s: %sable all rings", na->name,
624 (stopped ? "dis" : "en"));
625 }
626 for_rx_tx(t) {
627 for (i = 0; i < netmap_real_rings(na, t); i++) {
628 netmap_set_ring(na, i, t, stopped);
629 }
630 }
631 }
632
633 /*
634 * Convenience function used in drivers. Waits for current txsync()s/rxsync()s
635 * to finish and prevents any new one from starting. Call this before turning
636 * netmap mode off, or before removing the hardware rings (e.g., on module
637 * onload).
638 */
639 void
netmap_disable_all_rings(struct ifnet * ifp)640 netmap_disable_all_rings(struct ifnet *ifp)
641 {
642 if (NM_NA_VALID(ifp)) {
643 netmap_set_all_rings(NA(ifp), NM_KR_LOCKED);
644 }
645 }
646
647 /*
648 * Convenience function used in drivers. Re-enables rxsync and txsync on the
649 * adapter's rings In linux drivers, this should be placed near each
650 * napi_enable().
651 */
652 void
netmap_enable_all_rings(struct ifnet * ifp)653 netmap_enable_all_rings(struct ifnet *ifp)
654 {
655 if (NM_NA_VALID(ifp)) {
656 netmap_set_all_rings(NA(ifp), 0 /* enabled */);
657 }
658 }
659
660 void
netmap_make_zombie(struct ifnet * ifp)661 netmap_make_zombie(struct ifnet *ifp)
662 {
663 if (NM_NA_VALID(ifp)) {
664 struct netmap_adapter *na = NA(ifp);
665 netmap_set_all_rings(na, NM_KR_LOCKED);
666 na->na_flags |= NAF_ZOMBIE;
667 netmap_set_all_rings(na, 0);
668 }
669 }
670
671 void
netmap_undo_zombie(struct ifnet * ifp)672 netmap_undo_zombie(struct ifnet *ifp)
673 {
674 if (NM_NA_VALID(ifp)) {
675 struct netmap_adapter *na = NA(ifp);
676 if (na->na_flags & NAF_ZOMBIE) {
677 netmap_set_all_rings(na, NM_KR_LOCKED);
678 na->na_flags &= ~NAF_ZOMBIE;
679 netmap_set_all_rings(na, 0);
680 }
681 }
682 }
683
684 /*
685 * generic bound_checking function
686 */
687 u_int
nm_bound_var(u_int * v,u_int dflt,u_int lo,u_int hi,const char * msg)688 nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg)
689 {
690 u_int oldv = *v;
691 const char *op = NULL;
692
693 if (dflt < lo)
694 dflt = lo;
695 if (dflt > hi)
696 dflt = hi;
697 if (oldv < lo) {
698 *v = dflt;
699 op = "Bump";
700 } else if (oldv > hi) {
701 *v = hi;
702 op = "Clamp";
703 }
704 if (op && msg)
705 nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv);
706 return *v;
707 }
708
709
710 /*
711 * packet-dump function, user-supplied or static buffer.
712 * The destination buffer must be at least 30+4*len
713 */
714 const char *
nm_dump_buf(char * p,int len,int lim,char * dst)715 nm_dump_buf(char *p, int len, int lim, char *dst)
716 {
717 static char _dst[8192];
718 int i, j, i0;
719 static char hex[] ="0123456789abcdef";
720 char *o; /* output position */
721
722 #define P_HI(x) hex[((x) & 0xf0)>>4]
723 #define P_LO(x) hex[((x) & 0xf)]
724 #define P_C(x) ((x) >= 0x20 && (x) <= 0x7e ? (x) : '.')
725 if (!dst)
726 dst = _dst;
727 if (lim <= 0 || lim > len)
728 lim = len;
729 o = dst;
730 sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim);
731 o += strlen(o);
732 /* hexdump routine */
733 for (i = 0; i < lim; ) {
734 sprintf(o, "%5d: ", i);
735 o += strlen(o);
736 memset(o, ' ', 48);
737 i0 = i;
738 for (j=0; j < 16 && i < lim; i++, j++) {
739 o[j*3] = P_HI(p[i]);
740 o[j*3+1] = P_LO(p[i]);
741 }
742 i = i0;
743 for (j=0; j < 16 && i < lim; i++, j++)
744 o[j + 48] = P_C(p[i]);
745 o[j+48] = '\n';
746 o += j+49;
747 }
748 *o = '\0';
749 #undef P_HI
750 #undef P_LO
751 #undef P_C
752 return dst;
753 }
754
755
756 /*
757 * Fetch configuration from the device, to cope with dynamic
758 * reconfigurations after loading the module.
759 */
760 /* call with NMG_LOCK held */
761 int
netmap_update_config(struct netmap_adapter * na)762 netmap_update_config(struct netmap_adapter *na)
763 {
764 struct nm_config_info info;
765
766 bzero(&info, sizeof(info));
767 if (na->nm_config == NULL ||
768 na->nm_config(na, &info)) {
769 /* take whatever we had at init time */
770 info.num_tx_rings = na->num_tx_rings;
771 info.num_tx_descs = na->num_tx_desc;
772 info.num_rx_rings = na->num_rx_rings;
773 info.num_rx_descs = na->num_rx_desc;
774 info.rx_buf_maxsize = na->rx_buf_maxsize;
775 }
776
777 if (na->num_tx_rings == info.num_tx_rings &&
778 na->num_tx_desc == info.num_tx_descs &&
779 na->num_rx_rings == info.num_rx_rings &&
780 na->num_rx_desc == info.num_rx_descs &&
781 na->rx_buf_maxsize == info.rx_buf_maxsize)
782 return 0; /* nothing changed */
783 if (na->active_fds == 0) {
784 na->num_tx_rings = info.num_tx_rings;
785 na->num_tx_desc = info.num_tx_descs;
786 na->num_rx_rings = info.num_rx_rings;
787 na->num_rx_desc = info.num_rx_descs;
788 na->rx_buf_maxsize = info.rx_buf_maxsize;
789 if (netmap_verbose)
790 nm_prinf("configuration changed for %s: txring %d x %d, "
791 "rxring %d x %d, rxbufsz %d",
792 na->name, na->num_tx_rings, na->num_tx_desc,
793 na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
794 return 0;
795 }
796 nm_prerr("WARNING: configuration changed for %s while active: "
797 "txring %d x %d, rxring %d x %d, rxbufsz %d",
798 na->name, info.num_tx_rings, info.num_tx_descs,
799 info.num_rx_rings, info.num_rx_descs,
800 info.rx_buf_maxsize);
801 return 1;
802 }
803
804 /* nm_sync callbacks for the host rings */
805 static int netmap_txsync_to_host(struct netmap_kring *kring, int flags);
806 static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags);
807
808 /* create the krings array and initialize the fields common to all adapters.
809 * The array layout is this:
810 *
811 * +----------+
812 * na->tx_rings ----->| | \
813 * | | } na->num_tx_ring
814 * | | /
815 * +----------+
816 * | | host tx kring
817 * na->rx_rings ----> +----------+
818 * | | \
819 * | | } na->num_rx_rings
820 * | | /
821 * +----------+
822 * | | host rx kring
823 * +----------+
824 * na->tailroom ----->| | \
825 * | | } tailroom bytes
826 * | | /
827 * +----------+
828 *
829 * Note: for compatibility, host krings are created even when not needed.
830 * The tailroom space is currently used by vale ports for allocating leases.
831 */
832 /* call with NMG_LOCK held */
833 int
netmap_krings_create(struct netmap_adapter * na,u_int tailroom)834 netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
835 {
836 u_int i, len, ndesc;
837 struct netmap_kring *kring;
838 u_int n[NR_TXRX];
839 enum txrx t;
840 int err = 0;
841
842 if (na->tx_rings != NULL) {
843 if (netmap_debug & NM_DEBUG_ON)
844 nm_prerr("warning: krings were already created");
845 return 0;
846 }
847
848 /* account for the (possibly fake) host rings */
849 n[NR_TX] = netmap_all_rings(na, NR_TX);
850 n[NR_RX] = netmap_all_rings(na, NR_RX);
851
852 len = (n[NR_TX] + n[NR_RX]) *
853 (sizeof(struct netmap_kring) + sizeof(struct netmap_kring *))
854 + tailroom;
855
856 na->tx_rings = nm_os_malloc((size_t)len);
857 if (na->tx_rings == NULL) {
858 nm_prerr("Cannot allocate krings");
859 return ENOMEM;
860 }
861 na->rx_rings = na->tx_rings + n[NR_TX];
862 na->tailroom = na->rx_rings + n[NR_RX];
863
864 /* link the krings in the krings array */
865 kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
866 for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) {
867 na->tx_rings[i] = kring;
868 kring++;
869 }
870
871 /*
872 * All fields in krings are 0 except the one initialized below.
873 * but better be explicit on important kring fields.
874 */
875 for_rx_tx(t) {
876 ndesc = nma_get_ndesc(na, t);
877 for (i = 0; i < n[t]; i++) {
878 kring = NMR(na, t)[i];
879 bzero(kring, sizeof(*kring));
880 kring->notify_na = na;
881 kring->ring_id = i;
882 kring->tx = t;
883 kring->nkr_num_slots = ndesc;
884 kring->nr_mode = NKR_NETMAP_OFF;
885 kring->nr_pending_mode = NKR_NETMAP_OFF;
886 if (i < nma_get_nrings(na, t)) {
887 kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync);
888 } else {
889 if (!(na->na_flags & NAF_HOST_RINGS))
890 kring->nr_kflags |= NKR_FAKERING;
891 kring->nm_sync = (t == NR_TX ?
892 netmap_txsync_to_host:
893 netmap_rxsync_from_host);
894 }
895 kring->nm_notify = na->nm_notify;
896 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
897 /*
898 * IMPORTANT: Always keep one slot empty.
899 */
900 kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
901 snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
902 nm_txrx2str(t), i);
903 nm_prdis("ktx %s h %d c %d t %d",
904 kring->name, kring->rhead, kring->rcur, kring->rtail);
905 err = nm_os_selinfo_init(&kring->si, kring->name);
906 if (err) {
907 netmap_krings_delete(na);
908 return err;
909 }
910 mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF);
911 kring->na = na; /* setting this field marks the mutex as initialized */
912 }
913 err = nm_os_selinfo_init(&na->si[t], na->name);
914 if (err) {
915 netmap_krings_delete(na);
916 return err;
917 }
918 }
919
920 return 0;
921 }
922
923
924 /* undo the actions performed by netmap_krings_create */
925 /* call with NMG_LOCK held */
926 void
netmap_krings_delete(struct netmap_adapter * na)927 netmap_krings_delete(struct netmap_adapter *na)
928 {
929 struct netmap_kring **kring = na->tx_rings;
930 enum txrx t;
931
932 if (na->tx_rings == NULL) {
933 if (netmap_debug & NM_DEBUG_ON)
934 nm_prerr("warning: krings were already deleted");
935 return;
936 }
937
938 for_rx_tx(t)
939 nm_os_selinfo_uninit(&na->si[t]);
940
941 /* we rely on the krings layout described above */
942 for ( ; kring != na->tailroom; kring++) {
943 if ((*kring)->na != NULL)
944 mtx_destroy(&(*kring)->q_lock);
945 nm_os_selinfo_uninit(&(*kring)->si);
946 }
947 nm_os_free(na->tx_rings);
948 na->tx_rings = na->rx_rings = na->tailroom = NULL;
949 }
950
951
952 /*
953 * Destructor for NIC ports. They also have an mbuf queue
954 * on the rings connected to the host so we need to purge
955 * them first.
956 */
957 /* call with NMG_LOCK held */
958 void
netmap_hw_krings_delete(struct netmap_adapter * na)959 netmap_hw_krings_delete(struct netmap_adapter *na)
960 {
961 u_int lim = netmap_real_rings(na, NR_RX), i;
962
963 for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
964 struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
965 nm_prdis("destroy sw mbq with len %d", mbq_len(q));
966 mbq_purge(q);
967 mbq_safe_fini(q);
968 }
969 netmap_krings_delete(na);
970 }
971
972 static void
netmap_mem_drop(struct netmap_adapter * na)973 netmap_mem_drop(struct netmap_adapter *na)
974 {
975 int last = netmap_mem_deref(na->nm_mem, na);
976 /* if the native allocator had been overrided on regif,
977 * restore it now and drop the temporary one
978 */
979 if (last && na->nm_mem_prev) {
980 netmap_mem_put(na->nm_mem);
981 na->nm_mem = na->nm_mem_prev;
982 na->nm_mem_prev = NULL;
983 }
984 }
985
986 /*
987 * Undo everything that was done in netmap_do_regif(). In particular,
988 * call nm_register(ifp,0) to stop netmap mode on the interface and
989 * revert to normal operation.
990 */
991 /* call with NMG_LOCK held */
992 static void netmap_unset_ringid(struct netmap_priv_d *);
993 static void netmap_krings_put(struct netmap_priv_d *);
994 void
netmap_do_unregif(struct netmap_priv_d * priv)995 netmap_do_unregif(struct netmap_priv_d *priv)
996 {
997 struct netmap_adapter *na = priv->np_na;
998
999 NMG_LOCK_ASSERT();
1000 na->active_fds--;
1001 /* unset nr_pending_mode and possibly release exclusive mode */
1002 netmap_krings_put(priv);
1003
1004 #ifdef WITH_MONITOR
1005 /* XXX check whether we have to do something with monitor
1006 * when rings change nr_mode. */
1007 if (na->active_fds <= 0) {
1008 /* walk through all the rings and tell any monitor
1009 * that the port is going to exit netmap mode
1010 */
1011 netmap_monitor_stop(na);
1012 }
1013 #endif
1014
1015 if (na->active_fds <= 0 || nm_kring_pending(priv)) {
1016 na->nm_register(na, 0);
1017 }
1018
1019 /* delete rings and buffers that are no longer needed */
1020 netmap_mem_rings_delete(na);
1021
1022 if (na->active_fds <= 0) { /* last instance */
1023 /*
1024 * (TO CHECK) We enter here
1025 * when the last reference to this file descriptor goes
1026 * away. This means we cannot have any pending poll()
1027 * or interrupt routine operating on the structure.
1028 * XXX The file may be closed in a thread while
1029 * another thread is using it.
1030 * Linux keeps the file opened until the last reference
1031 * by any outstanding ioctl/poll or mmap is gone.
1032 * FreeBSD does not track mmap()s (but we do) and
1033 * wakes up any sleeping poll(). Need to check what
1034 * happens if the close() occurs while a concurrent
1035 * syscall is running.
1036 */
1037 if (netmap_debug & NM_DEBUG_ON)
1038 nm_prinf("deleting last instance for %s", na->name);
1039
1040 if (nm_netmap_on(na)) {
1041 nm_prerr("BUG: netmap on while going to delete the krings");
1042 }
1043
1044 na->nm_krings_delete(na);
1045
1046 /* restore the default number of host tx and rx rings */
1047 if (na->na_flags & NAF_HOST_RINGS) {
1048 na->num_host_tx_rings = 1;
1049 na->num_host_rx_rings = 1;
1050 } else {
1051 na->num_host_tx_rings = 0;
1052 na->num_host_rx_rings = 0;
1053 }
1054 }
1055
1056 /* possibily decrement counter of tx_si/rx_si users */
1057 netmap_unset_ringid(priv);
1058 /* delete the nifp */
1059 netmap_mem_if_delete(na, priv->np_nifp);
1060 /* drop the allocator */
1061 netmap_mem_drop(na);
1062 /* mark the priv as unregistered */
1063 priv->np_na = NULL;
1064 priv->np_nifp = NULL;
1065 }
1066
1067 struct netmap_priv_d*
netmap_priv_new(void)1068 netmap_priv_new(void)
1069 {
1070 struct netmap_priv_d *priv;
1071
1072 priv = nm_os_malloc(sizeof(struct netmap_priv_d));
1073 if (priv == NULL)
1074 return NULL;
1075 priv->np_refs = 1;
1076 nm_os_get_module();
1077 return priv;
1078 }
1079
1080 /*
1081 * Destructor of the netmap_priv_d, called when the fd is closed
1082 * Action: undo all the things done by NIOCREGIF,
1083 * On FreeBSD we need to track whether there are active mmap()s,
1084 * and we use np_active_mmaps for that. On linux, the field is always 0.
1085 * Return: 1 if we can free priv, 0 otherwise.
1086 *
1087 */
1088 /* call with NMG_LOCK held */
1089 void
netmap_priv_delete(struct netmap_priv_d * priv)1090 netmap_priv_delete(struct netmap_priv_d *priv)
1091 {
1092 struct netmap_adapter *na = priv->np_na;
1093
1094 /* number of active references to this fd */
1095 if (--priv->np_refs > 0) {
1096 return;
1097 }
1098 nm_os_put_module();
1099 if (na) {
1100 netmap_do_unregif(priv);
1101 }
1102 netmap_unget_na(na, priv->np_ifp);
1103 bzero(priv, sizeof(*priv)); /* for safety */
1104 nm_os_free(priv);
1105 }
1106
1107
1108 /* call with NMG_LOCK *not* held */
1109 void
netmap_dtor(void * data)1110 netmap_dtor(void *data)
1111 {
1112 struct netmap_priv_d *priv = data;
1113
1114 NMG_LOCK();
1115 netmap_priv_delete(priv);
1116 NMG_UNLOCK();
1117 }
1118
1119
1120 /*
1121 * Handlers for synchronization of the rings from/to the host stack.
1122 * These are associated to a network interface and are just another
1123 * ring pair managed by userspace.
1124 *
1125 * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD
1126 * flags):
1127 *
1128 * - Before releasing buffers on hw RX rings, the application can mark
1129 * them with the NS_FORWARD flag. During the next RXSYNC or poll(), they
1130 * will be forwarded to the host stack, similarly to what happened if
1131 * the application moved them to the host TX ring.
1132 *
1133 * - Before releasing buffers on the host RX ring, the application can
1134 * mark them with the NS_FORWARD flag. During the next RXSYNC or poll(),
1135 * they will be forwarded to the hw TX rings, saving the application
1136 * from doing the same task in user-space.
1137 *
1138 * Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD
1139 * flag, or globally with the netmap_fwd sysctl.
1140 *
1141 * The transfer NIC --> host is relatively easy, just encapsulate
1142 * into mbufs and we are done. The host --> NIC side is slightly
1143 * harder because there might not be room in the tx ring so it
1144 * might take a while before releasing the buffer.
1145 */
1146
1147
1148 /*
1149 * Pass a whole queue of mbufs to the host stack as coming from 'dst'
1150 * We do not need to lock because the queue is private.
1151 * After this call the queue is empty.
1152 */
1153 static void
netmap_send_up(struct ifnet * dst,struct mbq * q)1154 netmap_send_up(struct ifnet *dst, struct mbq *q)
1155 {
1156 struct mbuf *m;
1157 struct mbuf *head = NULL, *prev = NULL;
1158 #ifdef __FreeBSD__
1159 struct epoch_tracker et;
1160
1161 NET_EPOCH_ENTER(et);
1162 #endif /* __FreeBSD__ */
1163 /* Send packets up, outside the lock; head/prev machinery
1164 * is only useful for Windows. */
1165 while ((m = mbq_dequeue(q)) != NULL) {
1166 if (netmap_debug & NM_DEBUG_HOST)
1167 nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m));
1168 prev = nm_os_send_up(dst, m, prev);
1169 if (head == NULL)
1170 head = prev;
1171 }
1172 if (head)
1173 nm_os_send_up(dst, NULL, head);
1174 #ifdef __FreeBSD__
1175 NET_EPOCH_EXIT(et);
1176 #endif /* __FreeBSD__ */
1177 mbq_fini(q);
1178 }
1179
1180
1181 /*
1182 * Scan the buffers from hwcur to ring->head, and put a copy of those
1183 * marked NS_FORWARD (or all of them if forced) into a queue of mbufs.
1184 * Drop remaining packets in the unlikely event
1185 * of an mbuf shortage.
1186 */
1187 static void
netmap_grab_packets(struct netmap_kring * kring,struct mbq * q,int force)1188 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
1189 {
1190 u_int const lim = kring->nkr_num_slots - 1;
1191 u_int const head = kring->rhead;
1192 u_int n;
1193 struct netmap_adapter *na = kring->na;
1194
1195 for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) {
1196 struct mbuf *m;
1197 struct netmap_slot *slot = &kring->ring->slot[n];
1198
1199 if ((slot->flags & NS_FORWARD) == 0 && !force)
1200 continue;
1201 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
1202 nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
1203 continue;
1204 }
1205 slot->flags &= ~NS_FORWARD; // XXX needed ?
1206 /* XXX TODO: adapt to the case of a multisegment packet */
1207 m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL);
1208
1209 if (m == NULL)
1210 break;
1211 mbq_enqueue(q, m);
1212 }
1213 }
1214
1215 static inline int
_nm_may_forward(struct netmap_kring * kring)1216 _nm_may_forward(struct netmap_kring *kring)
1217 {
1218 return ((netmap_fwd || kring->ring->flags & NR_FORWARD) &&
1219 kring->na->na_flags & NAF_HOST_RINGS &&
1220 kring->tx == NR_RX);
1221 }
1222
1223 static inline int
nm_may_forward_up(struct netmap_kring * kring)1224 nm_may_forward_up(struct netmap_kring *kring)
1225 {
1226 return _nm_may_forward(kring) &&
1227 kring->ring_id != kring->na->num_rx_rings;
1228 }
1229
1230 static inline int
nm_may_forward_down(struct netmap_kring * kring,int sync_flags)1231 nm_may_forward_down(struct netmap_kring *kring, int sync_flags)
1232 {
1233 return _nm_may_forward(kring) &&
1234 (sync_flags & NAF_CAN_FORWARD_DOWN) &&
1235 kring->ring_id == kring->na->num_rx_rings;
1236 }
1237
1238 /*
1239 * Send to the NIC rings packets marked NS_FORWARD between
1240 * kring->nr_hwcur and kring->rhead.
1241 * Called under kring->rx_queue.lock on the sw rx ring.
1242 *
1243 * It can only be called if the user opened all the TX hw rings,
1244 * see NAF_CAN_FORWARD_DOWN flag.
1245 * We can touch the TX netmap rings (slots, head and cur) since
1246 * we are in poll/ioctl system call context, and the application
1247 * is not supposed to touch the ring (using a different thread)
1248 * during the execution of the system call.
1249 */
1250 static u_int
netmap_sw_to_nic(struct netmap_adapter * na)1251 netmap_sw_to_nic(struct netmap_adapter *na)
1252 {
1253 struct netmap_kring *kring = na->rx_rings[na->num_rx_rings];
1254 struct netmap_slot *rxslot = kring->ring->slot;
1255 u_int i, rxcur = kring->nr_hwcur;
1256 u_int const head = kring->rhead;
1257 u_int const src_lim = kring->nkr_num_slots - 1;
1258 u_int sent = 0;
1259
1260 /* scan rings to find space, then fill as much as possible */
1261 for (i = 0; i < na->num_tx_rings; i++) {
1262 struct netmap_kring *kdst = na->tx_rings[i];
1263 struct netmap_ring *rdst = kdst->ring;
1264 u_int const dst_lim = kdst->nkr_num_slots - 1;
1265
1266 /* XXX do we trust ring or kring->rcur,rtail ? */
1267 for (; rxcur != head && !nm_ring_empty(rdst);
1268 rxcur = nm_next(rxcur, src_lim) ) {
1269 struct netmap_slot *src, *dst, tmp;
1270 u_int dst_head = rdst->head;
1271
1272 src = &rxslot[rxcur];
1273 if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd)
1274 continue;
1275
1276 sent++;
1277
1278 dst = &rdst->slot[dst_head];
1279
1280 tmp = *src;
1281
1282 src->buf_idx = dst->buf_idx;
1283 src->flags = NS_BUF_CHANGED;
1284
1285 dst->buf_idx = tmp.buf_idx;
1286 dst->len = tmp.len;
1287 dst->flags = NS_BUF_CHANGED;
1288
1289 rdst->head = rdst->cur = nm_next(dst_head, dst_lim);
1290 }
1291 /* if (sent) XXX txsync ? it would be just an optimization */
1292 }
1293 return sent;
1294 }
1295
1296
1297 /*
1298 * netmap_txsync_to_host() passes packets up. We are called from a
1299 * system call in user process context, and the only contention
1300 * can be among multiple user threads erroneously calling
1301 * this routine concurrently.
1302 */
1303 static int
netmap_txsync_to_host(struct netmap_kring * kring,int flags)1304 netmap_txsync_to_host(struct netmap_kring *kring, int flags)
1305 {
1306 struct netmap_adapter *na = kring->na;
1307 u_int const lim = kring->nkr_num_slots - 1;
1308 u_int const head = kring->rhead;
1309 struct mbq q;
1310
1311 /* Take packets from hwcur to head and pass them up.
1312 * Force hwcur = head since netmap_grab_packets() stops at head
1313 */
1314 mbq_init(&q);
1315 netmap_grab_packets(kring, &q, 1 /* force */);
1316 nm_prdis("have %d pkts in queue", mbq_len(&q));
1317 kring->nr_hwcur = head;
1318 kring->nr_hwtail = head + lim;
1319 if (kring->nr_hwtail > lim)
1320 kring->nr_hwtail -= lim + 1;
1321
1322 netmap_send_up(na->ifp, &q);
1323 return 0;
1324 }
1325
1326
1327 /*
1328 * rxsync backend for packets coming from the host stack.
1329 * They have been put in kring->rx_queue by netmap_transmit().
1330 * We protect access to the kring using kring->rx_queue.lock
1331 *
1332 * also moves to the nic hw rings any packet the user has marked
1333 * for transparent-mode forwarding, then sets the NR_FORWARD
1334 * flag in the kring to let the caller push them out
1335 */
1336 static int
netmap_rxsync_from_host(struct netmap_kring * kring,int flags)1337 netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
1338 {
1339 struct netmap_adapter *na = kring->na;
1340 struct netmap_ring *ring = kring->ring;
1341 u_int nm_i, n;
1342 u_int const lim = kring->nkr_num_slots - 1;
1343 u_int const head = kring->rhead;
1344 int ret = 0;
1345 struct mbq *q = &kring->rx_queue, fq;
1346
1347 mbq_init(&fq); /* fq holds packets to be freed */
1348
1349 mbq_lock(q);
1350
1351 /* First part: import newly received packets */
1352 n = mbq_len(q);
1353 if (n) { /* grab packets from the queue */
1354 struct mbuf *m;
1355 uint32_t stop_i;
1356
1357 nm_i = kring->nr_hwtail;
1358 stop_i = nm_prev(kring->nr_hwcur, lim);
1359 while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) {
1360 int len = MBUF_LEN(m);
1361 struct netmap_slot *slot = &ring->slot[nm_i];
1362
1363 m_copydata(m, 0, len, NMB(na, slot));
1364 nm_prdis("nm %d len %d", nm_i, len);
1365 if (netmap_debug & NM_DEBUG_HOST)
1366 nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
1367
1368 slot->len = len;
1369 slot->flags = 0;
1370 nm_i = nm_next(nm_i, lim);
1371 mbq_enqueue(&fq, m);
1372 }
1373 kring->nr_hwtail = nm_i;
1374 }
1375
1376 /*
1377 * Second part: skip past packets that userspace has released.
1378 */
1379 nm_i = kring->nr_hwcur;
1380 if (nm_i != head) { /* something was released */
1381 if (nm_may_forward_down(kring, flags)) {
1382 ret = netmap_sw_to_nic(na);
1383 if (ret > 0) {
1384 kring->nr_kflags |= NR_FORWARD;
1385 ret = 0;
1386 }
1387 }
1388 kring->nr_hwcur = head;
1389 }
1390
1391 mbq_unlock(q);
1392
1393 mbq_purge(&fq);
1394 mbq_fini(&fq);
1395
1396 return ret;
1397 }
1398
1399
1400 /* Get a netmap adapter for the port.
1401 *
1402 * If it is possible to satisfy the request, return 0
1403 * with *na containing the netmap adapter found.
1404 * Otherwise return an error code, with *na containing NULL.
1405 *
1406 * When the port is attached to a bridge, we always return
1407 * EBUSY.
1408 * Otherwise, if the port is already bound to a file descriptor,
1409 * then we unconditionally return the existing adapter into *na.
1410 * In all the other cases, we return (into *na) either native,
1411 * generic or NULL, according to the following table:
1412 *
1413 * native_support
1414 * active_fds dev.netmap.admode YES NO
1415 * -------------------------------------------------------
1416 * >0 * NA(ifp) NA(ifp)
1417 *
1418 * 0 NETMAP_ADMODE_BEST NATIVE GENERIC
1419 * 0 NETMAP_ADMODE_NATIVE NATIVE NULL
1420 * 0 NETMAP_ADMODE_GENERIC GENERIC GENERIC
1421 *
1422 */
1423 static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */
1424 int
netmap_get_hw_na(struct ifnet * ifp,struct netmap_mem_d * nmd,struct netmap_adapter ** na)1425 netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na)
1426 {
1427 /* generic support */
1428 int i = netmap_admode; /* Take a snapshot. */
1429 struct netmap_adapter *prev_na;
1430 int error = 0;
1431
1432 *na = NULL; /* default */
1433
1434 /* reset in case of invalid value */
1435 if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST)
1436 i = netmap_admode = NETMAP_ADMODE_BEST;
1437
1438 if (NM_NA_VALID(ifp)) {
1439 prev_na = NA(ifp);
1440 /* If an adapter already exists, return it if
1441 * there are active file descriptors or if
1442 * netmap is not forced to use generic
1443 * adapters.
1444 */
1445 if (NETMAP_OWNED_BY_ANY(prev_na)
1446 || i != NETMAP_ADMODE_GENERIC
1447 || prev_na->na_flags & NAF_FORCE_NATIVE
1448 #ifdef WITH_PIPES
1449 /* ugly, but we cannot allow an adapter switch
1450 * if some pipe is referring to this one
1451 */
1452 || prev_na->na_next_pipe > 0
1453 #endif
1454 ) {
1455 *na = prev_na;
1456 goto assign_mem;
1457 }
1458 }
1459
1460 /* If there isn't native support and netmap is not allowed
1461 * to use generic adapters, we cannot satisfy the request.
1462 */
1463 if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE)
1464 return EOPNOTSUPP;
1465
1466 /* Otherwise, create a generic adapter and return it,
1467 * saving the previously used netmap adapter, if any.
1468 *
1469 * Note that here 'prev_na', if not NULL, MUST be a
1470 * native adapter, and CANNOT be a generic one. This is
1471 * true because generic adapters are created on demand, and
1472 * destroyed when not used anymore. Therefore, if the adapter
1473 * currently attached to an interface 'ifp' is generic, it
1474 * must be that
1475 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))).
1476 * Consequently, if NA(ifp) is generic, we will enter one of
1477 * the branches above. This ensures that we never override
1478 * a generic adapter with another generic adapter.
1479 */
1480 error = generic_netmap_attach(ifp);
1481 if (error)
1482 return error;
1483
1484 *na = NA(ifp);
1485
1486 assign_mem:
1487 if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
1488 (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
1489 (*na)->nm_mem_prev = (*na)->nm_mem;
1490 (*na)->nm_mem = netmap_mem_get(nmd);
1491 }
1492
1493 return 0;
1494 }
1495
1496 /*
1497 * MUST BE CALLED UNDER NMG_LOCK()
1498 *
1499 * Get a refcounted reference to a netmap adapter attached
1500 * to the interface specified by req.
1501 * This is always called in the execution of an ioctl().
1502 *
1503 * Return ENXIO if the interface specified by the request does
1504 * not exist, ENOTSUP if netmap is not supported by the interface,
1505 * EBUSY if the interface is already attached to a bridge,
1506 * EINVAL if parameters are invalid, ENOMEM if needed resources
1507 * could not be allocated.
1508 * If successful, hold a reference to the netmap adapter.
1509 *
1510 * If the interface specified by req is a system one, also keep
1511 * a reference to it and return a valid *ifp.
1512 */
1513 int
netmap_get_na(struct nmreq_header * hdr,struct netmap_adapter ** na,struct ifnet ** ifp,struct netmap_mem_d * nmd,int create)1514 netmap_get_na(struct nmreq_header *hdr,
1515 struct netmap_adapter **na, struct ifnet **ifp,
1516 struct netmap_mem_d *nmd, int create)
1517 {
1518 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1519 int error = 0;
1520 struct netmap_adapter *ret = NULL;
1521 int nmd_ref = 0;
1522
1523 *na = NULL; /* default return value */
1524 *ifp = NULL;
1525
1526 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1527 return EINVAL;
1528 }
1529
1530 if (req->nr_mode == NR_REG_PIPE_MASTER ||
1531 req->nr_mode == NR_REG_PIPE_SLAVE) {
1532 /* Do not accept deprecated pipe modes. */
1533 nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax");
1534 return EINVAL;
1535 }
1536
1537 NMG_LOCK_ASSERT();
1538
1539 /* if the request contain a memid, try to find the
1540 * corresponding memory region
1541 */
1542 if (nmd == NULL && req->nr_mem_id) {
1543 nmd = netmap_mem_find(req->nr_mem_id);
1544 if (nmd == NULL)
1545 return EINVAL;
1546 /* keep the rereference */
1547 nmd_ref = 1;
1548 }
1549
1550 /* We cascade through all possible types of netmap adapter.
1551 * All netmap_get_*_na() functions return an error and an na,
1552 * with the following combinations:
1553 *
1554 * error na
1555 * 0 NULL type doesn't match
1556 * !0 NULL type matches, but na creation/lookup failed
1557 * 0 !NULL type matches and na created/found
1558 * !0 !NULL impossible
1559 */
1560 error = netmap_get_null_na(hdr, na, nmd, create);
1561 if (error || *na != NULL)
1562 goto out;
1563
1564 /* try to see if this is a monitor port */
1565 error = netmap_get_monitor_na(hdr, na, nmd, create);
1566 if (error || *na != NULL)
1567 goto out;
1568
1569 /* try to see if this is a pipe port */
1570 error = netmap_get_pipe_na(hdr, na, nmd, create);
1571 if (error || *na != NULL)
1572 goto out;
1573
1574 /* try to see if this is a bridge port */
1575 error = netmap_get_vale_na(hdr, na, nmd, create);
1576 if (error)
1577 goto out;
1578
1579 if (*na != NULL) /* valid match in netmap_get_bdg_na() */
1580 goto out;
1581
1582 /*
1583 * This must be a hardware na, lookup the name in the system.
1584 * Note that by hardware we actually mean "it shows up in ifconfig".
1585 * This may still be a tap, a veth/epair, or even a
1586 * persistent VALE port.
1587 */
1588 *ifp = ifunit_ref(hdr->nr_name);
1589 if (*ifp == NULL) {
1590 error = ENXIO;
1591 goto out;
1592 }
1593
1594 error = netmap_get_hw_na(*ifp, nmd, &ret);
1595 if (error)
1596 goto out;
1597
1598 *na = ret;
1599 netmap_adapter_get(ret);
1600
1601 /*
1602 * if the adapter supports the host rings and it is not alread open,
1603 * try to set the number of host rings as requested by the user
1604 */
1605 if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
1606 if (req->nr_host_tx_rings)
1607 (*na)->num_host_tx_rings = req->nr_host_tx_rings;
1608 if (req->nr_host_rx_rings)
1609 (*na)->num_host_rx_rings = req->nr_host_rx_rings;
1610 }
1611 nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings,
1612 (*na)->num_host_rx_rings);
1613
1614 out:
1615 if (error) {
1616 if (ret)
1617 netmap_adapter_put(ret);
1618 if (*ifp) {
1619 if_rele(*ifp);
1620 *ifp = NULL;
1621 }
1622 }
1623 if (nmd_ref)
1624 netmap_mem_put(nmd);
1625
1626 return error;
1627 }
1628
1629 /* undo netmap_get_na() */
1630 void
netmap_unget_na(struct netmap_adapter * na,struct ifnet * ifp)1631 netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
1632 {
1633 if (ifp)
1634 if_rele(ifp);
1635 if (na)
1636 netmap_adapter_put(na);
1637 }
1638
1639
1640 #define NM_FAIL_ON(t) do { \
1641 if (unlikely(t)) { \
1642 nm_prlim(5, "%s: fail '" #t "' " \
1643 "h %d c %d t %d " \
1644 "rh %d rc %d rt %d " \
1645 "hc %d ht %d", \
1646 kring->name, \
1647 head, cur, ring->tail, \
1648 kring->rhead, kring->rcur, kring->rtail, \
1649 kring->nr_hwcur, kring->nr_hwtail); \
1650 return kring->nkr_num_slots; \
1651 } \
1652 } while (0)
1653
1654 /*
1655 * validate parameters on entry for *_txsync()
1656 * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1657 * in case of error.
1658 *
1659 * rhead, rcur and rtail=hwtail are stored from previous round.
1660 * hwcur is the next packet to send to the ring.
1661 *
1662 * We want
1663 * hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail
1664 *
1665 * hwcur, rhead, rtail and hwtail are reliable
1666 */
1667 u_int
nm_txsync_prologue(struct netmap_kring * kring,struct netmap_ring * ring)1668 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1669 {
1670 u_int head = ring->head; /* read only once */
1671 u_int cur = ring->cur; /* read only once */
1672 u_int n = kring->nkr_num_slots;
1673
1674 nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
1675 kring->name,
1676 kring->nr_hwcur, kring->nr_hwtail,
1677 ring->head, ring->cur, ring->tail);
1678 #if 1 /* kernel sanity checks; but we can trust the kring. */
1679 NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n ||
1680 kring->rtail >= n || kring->nr_hwtail >= n);
1681 #endif /* kernel sanity checks */
1682 /*
1683 * user sanity checks. We only use head,
1684 * A, B, ... are possible positions for head:
1685 *
1686 * 0 A rhead B rtail C n-1
1687 * 0 D rtail E rhead F n-1
1688 *
1689 * B, F, D are valid. A, C, E are wrong
1690 */
1691 if (kring->rtail >= kring->rhead) {
1692 /* want rhead <= head <= rtail */
1693 NM_FAIL_ON(head < kring->rhead || head > kring->rtail);
1694 /* and also head <= cur <= rtail */
1695 NM_FAIL_ON(cur < head || cur > kring->rtail);
1696 } else { /* here rtail < rhead */
1697 /* we need head outside rtail .. rhead */
1698 NM_FAIL_ON(head > kring->rtail && head < kring->rhead);
1699
1700 /* two cases now: head <= rtail or head >= rhead */
1701 if (head <= kring->rtail) {
1702 /* want head <= cur <= rtail */
1703 NM_FAIL_ON(cur < head || cur > kring->rtail);
1704 } else { /* head >= rhead */
1705 /* cur must be outside rtail..head */
1706 NM_FAIL_ON(cur > kring->rtail && cur < head);
1707 }
1708 }
1709 if (ring->tail != kring->rtail) {
1710 nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
1711 ring->tail, kring->rtail);
1712 ring->tail = kring->rtail;
1713 }
1714 kring->rhead = head;
1715 kring->rcur = cur;
1716 return head;
1717 }
1718
1719
1720 /*
1721 * validate parameters on entry for *_rxsync()
1722 * Returns ring->head if ok, kring->nkr_num_slots on error.
1723 *
1724 * For a valid configuration,
1725 * hwcur <= head <= cur <= tail <= hwtail
1726 *
1727 * We only consider head and cur.
1728 * hwcur and hwtail are reliable.
1729 *
1730 */
1731 u_int
nm_rxsync_prologue(struct netmap_kring * kring,struct netmap_ring * ring)1732 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1733 {
1734 uint32_t const n = kring->nkr_num_slots;
1735 uint32_t head, cur;
1736
1737 nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
1738 kring->name,
1739 kring->nr_hwcur, kring->nr_hwtail,
1740 ring->head, ring->cur, ring->tail);
1741 /*
1742 * Before storing the new values, we should check they do not
1743 * move backwards. However:
1744 * - head is not an issue because the previous value is hwcur;
1745 * - cur could in principle go back, however it does not matter
1746 * because we are processing a brand new rxsync()
1747 */
1748 cur = kring->rcur = ring->cur; /* read only once */
1749 head = kring->rhead = ring->head; /* read only once */
1750 #if 1 /* kernel sanity checks */
1751 NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n);
1752 #endif /* kernel sanity checks */
1753 /* user sanity checks */
1754 if (kring->nr_hwtail >= kring->nr_hwcur) {
1755 /* want hwcur <= rhead <= hwtail */
1756 NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail);
1757 /* and also rhead <= rcur <= hwtail */
1758 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1759 } else {
1760 /* we need rhead outside hwtail..hwcur */
1761 NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail);
1762 /* two cases now: head <= hwtail or head >= hwcur */
1763 if (head <= kring->nr_hwtail) {
1764 /* want head <= cur <= hwtail */
1765 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1766 } else {
1767 /* cur must be outside hwtail..head */
1768 NM_FAIL_ON(cur < head && cur > kring->nr_hwtail);
1769 }
1770 }
1771 if (ring->tail != kring->rtail) {
1772 nm_prlim(5, "%s tail overwritten was %d need %d",
1773 kring->name,
1774 ring->tail, kring->rtail);
1775 ring->tail = kring->rtail;
1776 }
1777 return head;
1778 }
1779
1780
1781 /*
1782 * Error routine called when txsync/rxsync detects an error.
1783 * Can't do much more than resetting head = cur = hwcur, tail = hwtail
1784 * Return 1 on reinit.
1785 *
1786 * This routine is only called by the upper half of the kernel.
1787 * It only reads hwcur (which is changed only by the upper half, too)
1788 * and hwtail (which may be changed by the lower half, but only on
1789 * a tx ring and only to increase it, so any error will be recovered
1790 * on the next call). For the above, we don't strictly need to call
1791 * it under lock.
1792 */
1793 int
netmap_ring_reinit(struct netmap_kring * kring)1794 netmap_ring_reinit(struct netmap_kring *kring)
1795 {
1796 struct netmap_ring *ring = kring->ring;
1797 u_int i, lim = kring->nkr_num_slots - 1;
1798 int errors = 0;
1799
1800 // XXX KASSERT nm_kr_tryget
1801 nm_prlim(10, "called for %s", kring->name);
1802 // XXX probably wrong to trust userspace
1803 kring->rhead = ring->head;
1804 kring->rcur = ring->cur;
1805 kring->rtail = ring->tail;
1806
1807 if (ring->cur > lim)
1808 errors++;
1809 if (ring->head > lim)
1810 errors++;
1811 if (ring->tail > lim)
1812 errors++;
1813 for (i = 0; i <= lim; i++) {
1814 u_int idx = ring->slot[i].buf_idx;
1815 u_int len = ring->slot[i].len;
1816 if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
1817 nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
1818 ring->slot[i].buf_idx = 0;
1819 ring->slot[i].len = 0;
1820 } else if (len > NETMAP_BUF_SIZE(kring->na)) {
1821 ring->slot[i].len = 0;
1822 nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
1823 }
1824 }
1825 if (errors) {
1826 nm_prlim(10, "total %d errors", errors);
1827 nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
1828 kring->name,
1829 ring->cur, kring->nr_hwcur,
1830 ring->tail, kring->nr_hwtail);
1831 ring->head = kring->rhead = kring->nr_hwcur;
1832 ring->cur = kring->rcur = kring->nr_hwcur;
1833 ring->tail = kring->rtail = kring->nr_hwtail;
1834 }
1835 return (errors ? 1 : 0);
1836 }
1837
1838 /* interpret the ringid and flags fields of an nmreq, by translating them
1839 * into a pair of intervals of ring indices:
1840 *
1841 * [priv->np_txqfirst, priv->np_txqlast) and
1842 * [priv->np_rxqfirst, priv->np_rxqlast)
1843 *
1844 */
1845 int
netmap_interp_ringid(struct netmap_priv_d * priv,struct nmreq_header * hdr)1846 netmap_interp_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1847 {
1848 struct netmap_adapter *na = priv->np_na;
1849 struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1850 int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY };
1851 enum txrx t;
1852 u_int j;
1853 u_int nr_flags = reg->nr_flags, nr_mode = reg->nr_mode,
1854 nr_ringid = reg->nr_ringid;
1855
1856 for_rx_tx(t) {
1857 if (nr_flags & excluded_direction[t]) {
1858 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1859 continue;
1860 }
1861 switch (nr_mode) {
1862 case NR_REG_ALL_NIC:
1863 case NR_REG_NULL:
1864 priv->np_qfirst[t] = 0;
1865 priv->np_qlast[t] = nma_get_nrings(na, t);
1866 nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
1867 priv->np_qfirst[t], priv->np_qlast[t]);
1868 break;
1869 case NR_REG_SW:
1870 case NR_REG_NIC_SW:
1871 if (!(na->na_flags & NAF_HOST_RINGS)) {
1872 nm_prerr("host rings not supported");
1873 return EINVAL;
1874 }
1875 priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
1876 nma_get_nrings(na, t) : 0);
1877 priv->np_qlast[t] = netmap_all_rings(na, t);
1878 nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
1879 nm_txrx2str(t),
1880 priv->np_qfirst[t], priv->np_qlast[t]);
1881 break;
1882 case NR_REG_ONE_NIC:
1883 if (nr_ringid >= na->num_tx_rings &&
1884 nr_ringid >= na->num_rx_rings) {
1885 nm_prerr("invalid ring id %d", nr_ringid);
1886 return EINVAL;
1887 }
1888 /* if not enough rings, use the first one */
1889 j = nr_ringid;
1890 if (j >= nma_get_nrings(na, t))
1891 j = 0;
1892 priv->np_qfirst[t] = j;
1893 priv->np_qlast[t] = j + 1;
1894 nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
1895 priv->np_qfirst[t], priv->np_qlast[t]);
1896 break;
1897 case NR_REG_ONE_SW:
1898 if (!(na->na_flags & NAF_HOST_RINGS)) {
1899 nm_prerr("host rings not supported");
1900 return EINVAL;
1901 }
1902 if (nr_ringid >= na->num_host_tx_rings &&
1903 nr_ringid >= na->num_host_rx_rings) {
1904 nm_prerr("invalid ring id %d", nr_ringid);
1905 return EINVAL;
1906 }
1907 /* if not enough rings, use the first one */
1908 j = nr_ringid;
1909 if (j >= nma_get_host_nrings(na, t))
1910 j = 0;
1911 priv->np_qfirst[t] = nma_get_nrings(na, t) + j;
1912 priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1;
1913 nm_prdis("ONE_SW: %s %d %d", nm_txrx2str(t),
1914 priv->np_qfirst[t], priv->np_qlast[t]);
1915 break;
1916 default:
1917 nm_prerr("invalid regif type %d", nr_mode);
1918 return EINVAL;
1919 }
1920 }
1921 priv->np_flags = nr_flags;
1922
1923 /* Allow transparent forwarding mode in the host --> nic
1924 * direction only if all the TX hw rings have been opened. */
1925 if (priv->np_qfirst[NR_TX] == 0 &&
1926 priv->np_qlast[NR_TX] >= na->num_tx_rings) {
1927 priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN;
1928 }
1929
1930 if (netmap_verbose) {
1931 nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d",
1932 na->name,
1933 priv->np_qfirst[NR_TX],
1934 priv->np_qlast[NR_TX],
1935 priv->np_qfirst[NR_RX],
1936 priv->np_qlast[NR_RX],
1937 nr_ringid);
1938 }
1939 return 0;
1940 }
1941
1942
1943 /*
1944 * Set the ring ID. For devices with a single queue, a request
1945 * for all rings is the same as a single ring.
1946 */
1947 static int
netmap_set_ringid(struct netmap_priv_d * priv,struct nmreq_header * hdr)1948 netmap_set_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1949 {
1950 struct netmap_adapter *na = priv->np_na;
1951 struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1952 int error;
1953 enum txrx t;
1954
1955 error = netmap_interp_ringid(priv, hdr);
1956 if (error) {
1957 return error;
1958 }
1959
1960 priv->np_txpoll = (reg->nr_flags & NR_NO_TX_POLL) ? 0 : 1;
1961
1962 /* optimization: count the users registered for more than
1963 * one ring, which are the ones sleeping on the global queue.
1964 * The default netmap_notify() callback will then
1965 * avoid signaling the global queue if nobody is using it
1966 */
1967 for_rx_tx(t) {
1968 if (nm_si_user(priv, t))
1969 na->si_users[t]++;
1970 }
1971 return 0;
1972 }
1973
1974 static void
netmap_unset_ringid(struct netmap_priv_d * priv)1975 netmap_unset_ringid(struct netmap_priv_d *priv)
1976 {
1977 struct netmap_adapter *na = priv->np_na;
1978 enum txrx t;
1979
1980 for_rx_tx(t) {
1981 if (nm_si_user(priv, t))
1982 na->si_users[t]--;
1983 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1984 }
1985 priv->np_flags = 0;
1986 priv->np_txpoll = 0;
1987 priv->np_kloop_state = 0;
1988 }
1989
1990 #define within_sel(p_, t_, i_) \
1991 ((i_) < (p_)->np_qlast[(t_)])
1992 #define nonempty_sel(p_, t_) \
1993 (within_sel((p_), (t_), (p_)->np_qfirst[(t_)]))
1994 #define foreach_selected_ring(p_, t_, i_, kring_) \
1995 for ((t_) = nonempty_sel((p_), NR_RX) ? NR_RX : NR_TX, \
1996 (i_) = (p_)->np_qfirst[(t_)]; \
1997 (t_ == NR_RX || \
1998 (t == NR_TX && within_sel((p_), (t_), (i_)))) && \
1999 ((kring_) = NMR((p_)->np_na, (t_))[(i_)]); \
2000 (i_) = within_sel((p_), (t_), (i_) + 1) ? (i_) + 1 : \
2001 (++(t_) < NR_TXRX ? (p_)->np_qfirst[(t_)] : (i_)))
2002
2003
2004 /* Set the nr_pending_mode for the requested rings.
2005 * If requested, also try to get exclusive access to the rings, provided
2006 * the rings we want to bind are not exclusively owned by a previous bind.
2007 */
2008 static int
netmap_krings_get(struct netmap_priv_d * priv)2009 netmap_krings_get(struct netmap_priv_d *priv)
2010 {
2011 struct netmap_adapter *na = priv->np_na;
2012 u_int i;
2013 struct netmap_kring *kring;
2014 int excl = (priv->np_flags & NR_EXCLUSIVE);
2015 enum txrx t;
2016
2017 if (netmap_debug & NM_DEBUG_ON)
2018 nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)",
2019 na->name,
2020 priv->np_qfirst[NR_TX],
2021 priv->np_qlast[NR_TX],
2022 priv->np_qfirst[NR_RX],
2023 priv->np_qlast[NR_RX]);
2024
2025 /* first round: check that all the requested rings
2026 * are neither alread exclusively owned, nor we
2027 * want exclusive ownership when they are already in use
2028 */
2029 foreach_selected_ring(priv, t, i, kring) {
2030 if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
2031 (kring->users && excl))
2032 {
2033 nm_prdis("ring %s busy", kring->name);
2034 return EBUSY;
2035 }
2036 }
2037
2038 /* second round: increment usage count (possibly marking them
2039 * as exclusive) and set the nr_pending_mode
2040 */
2041 foreach_selected_ring(priv, t, i, kring) {
2042 kring->users++;
2043 if (excl)
2044 kring->nr_kflags |= NKR_EXCLUSIVE;
2045 kring->nr_pending_mode = NKR_NETMAP_ON;
2046 }
2047
2048 return 0;
2049
2050 }
2051
2052 /* Undo netmap_krings_get(). This is done by clearing the exclusive mode
2053 * if was asked on regif, and unset the nr_pending_mode if we are the
2054 * last users of the involved rings. */
2055 static void
netmap_krings_put(struct netmap_priv_d * priv)2056 netmap_krings_put(struct netmap_priv_d *priv)
2057 {
2058 u_int i;
2059 struct netmap_kring *kring;
2060 int excl = (priv->np_flags & NR_EXCLUSIVE);
2061 enum txrx t;
2062
2063 nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
2064 na->name,
2065 priv->np_qfirst[NR_TX],
2066 priv->np_qlast[NR_TX],
2067 priv->np_qfirst[NR_RX],
2068 priv->np_qlast[MR_RX]);
2069
2070 foreach_selected_ring(priv, t, i, kring) {
2071 if (excl)
2072 kring->nr_kflags &= ~NKR_EXCLUSIVE;
2073 kring->users--;
2074 if (kring->users == 0)
2075 kring->nr_pending_mode = NKR_NETMAP_OFF;
2076 }
2077 }
2078
2079 static int
nm_priv_rx_enabled(struct netmap_priv_d * priv)2080 nm_priv_rx_enabled(struct netmap_priv_d *priv)
2081 {
2082 return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]);
2083 }
2084
2085 /* Validate the CSB entries for both directions (atok and ktoa).
2086 * To be called under NMG_LOCK(). */
2087 static int
netmap_csb_validate(struct netmap_priv_d * priv,struct nmreq_opt_csb * csbo)2088 netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo)
2089 {
2090 struct nm_csb_atok *csb_atok_base =
2091 (struct nm_csb_atok *)(uintptr_t)csbo->csb_atok;
2092 struct nm_csb_ktoa *csb_ktoa_base =
2093 (struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa;
2094 enum txrx t;
2095 int num_rings[NR_TXRX], tot_rings;
2096 size_t entry_size[2];
2097 void *csb_start[2];
2098 int i;
2099
2100 if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
2101 nm_prerr("Cannot update CSB while kloop is running");
2102 return EBUSY;
2103 }
2104
2105 tot_rings = 0;
2106 for_rx_tx(t) {
2107 num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t];
2108 tot_rings += num_rings[t];
2109 }
2110 if (tot_rings <= 0)
2111 return 0;
2112
2113 if (!(priv->np_flags & NR_EXCLUSIVE)) {
2114 nm_prerr("CSB mode requires NR_EXCLUSIVE");
2115 return EINVAL;
2116 }
2117
2118 entry_size[0] = sizeof(*csb_atok_base);
2119 entry_size[1] = sizeof(*csb_ktoa_base);
2120 csb_start[0] = (void *)csb_atok_base;
2121 csb_start[1] = (void *)csb_ktoa_base;
2122
2123 for (i = 0; i < 2; i++) {
2124 /* On Linux we could use access_ok() to simplify
2125 * the validation. However, the advantage of
2126 * this approach is that it works also on
2127 * FreeBSD. */
2128 size_t csb_size = tot_rings * entry_size[i];
2129 void *tmp;
2130 int err;
2131
2132 if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) {
2133 nm_prerr("Unaligned CSB address");
2134 return EINVAL;
2135 }
2136
2137 tmp = nm_os_malloc(csb_size);
2138 if (!tmp)
2139 return ENOMEM;
2140 if (i == 0) {
2141 /* Application --> kernel direction. */
2142 err = copyin(csb_start[i], tmp, csb_size);
2143 } else {
2144 /* Kernel --> application direction. */
2145 memset(tmp, 0, csb_size);
2146 err = copyout(tmp, csb_start[i], csb_size);
2147 }
2148 nm_os_free(tmp);
2149 if (err) {
2150 nm_prerr("Invalid CSB address");
2151 return err;
2152 }
2153 }
2154
2155 priv->np_csb_atok_base = csb_atok_base;
2156 priv->np_csb_ktoa_base = csb_ktoa_base;
2157
2158 /* Initialize the CSB. */
2159 for_rx_tx(t) {
2160 for (i = 0; i < num_rings[t]; i++) {
2161 struct netmap_kring *kring =
2162 NMR(priv->np_na, t)[i + priv->np_qfirst[t]];
2163 struct nm_csb_atok *csb_atok = csb_atok_base + i;
2164 struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i;
2165
2166 if (t == NR_RX) {
2167 csb_atok += num_rings[NR_TX];
2168 csb_ktoa += num_rings[NR_TX];
2169 }
2170
2171 CSB_WRITE(csb_atok, head, kring->rhead);
2172 CSB_WRITE(csb_atok, cur, kring->rcur);
2173 CSB_WRITE(csb_atok, appl_need_kick, 1);
2174 CSB_WRITE(csb_atok, sync_flags, 1);
2175 CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur);
2176 CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail);
2177 CSB_WRITE(csb_ktoa, kern_need_kick, 1);
2178
2179 nm_prinf("csb_init for kring %s: head %u, cur %u, "
2180 "hwcur %u, hwtail %u", kring->name,
2181 kring->rhead, kring->rcur, kring->nr_hwcur,
2182 kring->nr_hwtail);
2183 }
2184 }
2185
2186 return 0;
2187 }
2188
2189 /* Ensure that the netmap adapter can support the given MTU.
2190 * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2191 */
2192 int
netmap_buf_size_validate(const struct netmap_adapter * na,unsigned mtu)2193 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) {
2194 unsigned nbs = NETMAP_BUF_SIZE(na);
2195
2196 if (mtu <= na->rx_buf_maxsize) {
2197 /* The MTU fits a single NIC slot. We only
2198 * Need to check that netmap buffers are
2199 * large enough to hold an MTU. NS_MOREFRAG
2200 * cannot be used in this case. */
2201 if (nbs < mtu) {
2202 nm_prerr("error: netmap buf size (%u) "
2203 "< device MTU (%u)", nbs, mtu);
2204 return EINVAL;
2205 }
2206 } else {
2207 /* More NIC slots may be needed to receive
2208 * or transmit a single packet. Check that
2209 * the adapter supports NS_MOREFRAG and that
2210 * netmap buffers are large enough to hold
2211 * the maximum per-slot size. */
2212 if (!(na->na_flags & NAF_MOREFRAG)) {
2213 nm_prerr("error: large MTU (%d) needed "
2214 "but %s does not support "
2215 "NS_MOREFRAG", mtu,
2216 na->ifp->if_xname);
2217 return EINVAL;
2218 } else if (nbs < na->rx_buf_maxsize) {
2219 nm_prerr("error: using NS_MOREFRAG on "
2220 "%s requires netmap buf size "
2221 ">= %u", na->ifp->if_xname,
2222 na->rx_buf_maxsize);
2223 return EINVAL;
2224 } else {
2225 nm_prinf("info: netmap application on "
2226 "%s needs to support "
2227 "NS_MOREFRAG "
2228 "(MTU=%u,netmap_buf_size=%u)",
2229 na->ifp->if_xname, mtu, nbs);
2230 }
2231 }
2232 return 0;
2233 }
2234
2235
2236 /*
2237 * possibly move the interface to netmap-mode.
2238 * If success it returns a pointer to netmap_if, otherwise NULL.
2239 * This must be called with NMG_LOCK held.
2240 *
2241 * The following na callbacks are called in the process:
2242 *
2243 * na->nm_config() [by netmap_update_config]
2244 * (get current number and size of rings)
2245 *
2246 * We have a generic one for linux (netmap_linux_config).
2247 * The bwrap has to override this, since it has to forward
2248 * the request to the wrapped adapter (netmap_bwrap_config).
2249 *
2250 *
2251 * na->nm_krings_create()
2252 * (create and init the krings array)
2253 *
2254 * One of the following:
2255 *
2256 * * netmap_hw_krings_create, (hw ports)
2257 * creates the standard layout for the krings
2258 * and adds the mbq (used for the host rings).
2259 *
2260 * * netmap_vp_krings_create (VALE ports)
2261 * add leases and scratchpads
2262 *
2263 * * netmap_pipe_krings_create (pipes)
2264 * create the krings and rings of both ends and
2265 * cross-link them
2266 *
2267 * * netmap_monitor_krings_create (monitors)
2268 * avoid allocating the mbq
2269 *
2270 * * netmap_bwrap_krings_create (bwraps)
2271 * create both the brap krings array,
2272 * the krings array of the wrapped adapter, and
2273 * (if needed) the fake array for the host adapter
2274 *
2275 * na->nm_register(, 1)
2276 * (put the adapter in netmap mode)
2277 *
2278 * This may be one of the following:
2279 *
2280 * * netmap_hw_reg (hw ports)
2281 * checks that the ifp is still there, then calls
2282 * the hardware specific callback;
2283 *
2284 * * netmap_vp_reg (VALE ports)
2285 * If the port is connected to a bridge,
2286 * set the NAF_NETMAP_ON flag under the
2287 * bridge write lock.
2288 *
2289 * * netmap_pipe_reg (pipes)
2290 * inform the other pipe end that it is no
2291 * longer responsible for the lifetime of this
2292 * pipe end
2293 *
2294 * * netmap_monitor_reg (monitors)
2295 * intercept the sync callbacks of the monitored
2296 * rings
2297 *
2298 * * netmap_bwrap_reg (bwraps)
2299 * cross-link the bwrap and hwna rings,
2300 * forward the request to the hwna, override
2301 * the hwna notify callback (to get the frames
2302 * coming from outside go through the bridge).
2303 *
2304 *
2305 */
2306 int
netmap_do_regif(struct netmap_priv_d * priv,struct netmap_adapter * na,struct nmreq_header * hdr)2307 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
2308 struct nmreq_header *hdr)
2309 {
2310 struct netmap_if *nifp = NULL;
2311 int error;
2312
2313 NMG_LOCK_ASSERT();
2314 priv->np_na = na; /* store the reference */
2315 error = netmap_mem_finalize(na->nm_mem, na);
2316 if (error)
2317 goto err;
2318
2319 if (na->active_fds == 0) {
2320
2321 /* cache the allocator info in the na */
2322 error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
2323 if (error)
2324 goto err_drop_mem;
2325 nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
2326 na->na_lut.objsize);
2327
2328 /* ring configuration may have changed, fetch from the card */
2329 netmap_update_config(na);
2330 }
2331
2332 /* compute the range of tx and rx rings to monitor */
2333 error = netmap_set_ringid(priv, hdr);
2334 if (error)
2335 goto err_put_lut;
2336
2337 if (na->active_fds == 0) {
2338 /*
2339 * If this is the first registration of the adapter,
2340 * perform sanity checks and create the in-kernel view
2341 * of the netmap rings (the netmap krings).
2342 */
2343 if (na->ifp && nm_priv_rx_enabled(priv)) {
2344 /* This netmap adapter is attached to an ifnet. */
2345 unsigned mtu = nm_os_ifnet_mtu(na->ifp);
2346
2347 nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
2348 na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
2349
2350 if (na->rx_buf_maxsize == 0) {
2351 nm_prerr("%s: error: rx_buf_maxsize == 0", na->name);
2352 error = EIO;
2353 goto err_drop_mem;
2354 }
2355
2356 error = netmap_buf_size_validate(na, mtu);
2357 if (error)
2358 goto err_drop_mem;
2359 }
2360
2361 /*
2362 * Depending on the adapter, this may also create
2363 * the netmap rings themselves
2364 */
2365 error = na->nm_krings_create(na);
2366 if (error)
2367 goto err_put_lut;
2368
2369 }
2370
2371 /* now the krings must exist and we can check whether some
2372 * previous bind has exclusive ownership on them, and set
2373 * nr_pending_mode
2374 */
2375 error = netmap_krings_get(priv);
2376 if (error)
2377 goto err_del_krings;
2378
2379 /* create all needed missing netmap rings */
2380 error = netmap_mem_rings_create(na);
2381 if (error)
2382 goto err_rel_excl;
2383
2384 /* in all cases, create a new netmap if */
2385 nifp = netmap_mem_if_new(na, priv);
2386 if (nifp == NULL) {
2387 error = ENOMEM;
2388 goto err_rel_excl;
2389 }
2390
2391 if (nm_kring_pending(priv)) {
2392 /* Some kring is switching mode, tell the adapter to
2393 * react on this. */
2394 error = na->nm_register(na, 1);
2395 if (error)
2396 goto err_del_if;
2397 }
2398
2399 /* Commit the reference. */
2400 na->active_fds++;
2401
2402 /*
2403 * advertise that the interface is ready by setting np_nifp.
2404 * The barrier is needed because readers (poll, *SYNC and mmap)
2405 * check for priv->np_nifp != NULL without locking
2406 */
2407 mb(); /* make sure previous writes are visible to all CPUs */
2408 priv->np_nifp = nifp;
2409
2410 return 0;
2411
2412 err_del_if:
2413 netmap_mem_if_delete(na, nifp);
2414 err_rel_excl:
2415 netmap_krings_put(priv);
2416 netmap_mem_rings_delete(na);
2417 err_del_krings:
2418 if (na->active_fds == 0)
2419 na->nm_krings_delete(na);
2420 err_put_lut:
2421 if (na->active_fds == 0)
2422 memset(&na->na_lut, 0, sizeof(na->na_lut));
2423 err_drop_mem:
2424 netmap_mem_drop(na);
2425 err:
2426 priv->np_na = NULL;
2427 return error;
2428 }
2429
2430
2431 /*
2432 * update kring and ring at the end of rxsync/txsync.
2433 */
2434 static inline void
nm_sync_finalize(struct netmap_kring * kring)2435 nm_sync_finalize(struct netmap_kring *kring)
2436 {
2437 /*
2438 * Update ring tail to what the kernel knows
2439 * After txsync: head/rhead/hwcur might be behind cur/rcur
2440 * if no carrier.
2441 */
2442 kring->ring->tail = kring->rtail = kring->nr_hwtail;
2443
2444 nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
2445 kring->name, kring->nr_hwcur, kring->nr_hwtail,
2446 kring->rhead, kring->rcur, kring->rtail);
2447 }
2448
2449 /* set ring timestamp */
2450 static inline void
ring_timestamp_set(struct netmap_ring * ring)2451 ring_timestamp_set(struct netmap_ring *ring)
2452 {
2453 if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) {
2454 microtime(&ring->ts);
2455 }
2456 }
2457
2458 static int nmreq_copyin(struct nmreq_header *, int);
2459 static int nmreq_copyout(struct nmreq_header *, int);
2460 static int nmreq_checkoptions(struct nmreq_header *);
2461
2462 /*
2463 * ioctl(2) support for the "netmap" device.
2464 *
2465 * Following a list of accepted commands:
2466 * - NIOCCTRL device control API
2467 * - NIOCTXSYNC sync TX rings
2468 * - NIOCRXSYNC sync RX rings
2469 * - SIOCGIFADDR just for convenience
2470 * - NIOCGINFO deprecated (legacy API)
2471 * - NIOCREGIF deprecated (legacy API)
2472 *
2473 * Return 0 on success, errno otherwise.
2474 */
2475 int
netmap_ioctl(struct netmap_priv_d * priv,u_long cmd,caddr_t data,struct thread * td,int nr_body_is_user)2476 netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
2477 struct thread *td, int nr_body_is_user)
2478 {
2479 struct mbq q; /* packets from RX hw queues to host stack */
2480 struct netmap_adapter *na = NULL;
2481 struct netmap_mem_d *nmd = NULL;
2482 struct ifnet *ifp = NULL;
2483 int error = 0;
2484 u_int i, qfirst, qlast;
2485 struct netmap_kring **krings;
2486 int sync_flags;
2487 enum txrx t;
2488
2489 switch (cmd) {
2490 case NIOCCTRL: {
2491 struct nmreq_header *hdr = (struct nmreq_header *)data;
2492
2493 if (hdr->nr_version < NETMAP_MIN_API ||
2494 hdr->nr_version > NETMAP_MAX_API) {
2495 nm_prerr("API mismatch: got %d need %d",
2496 hdr->nr_version, NETMAP_API);
2497 return EINVAL;
2498 }
2499
2500 /* Make a kernel-space copy of the user-space nr_body.
2501 * For convenince, the nr_body pointer and the pointers
2502 * in the options list will be replaced with their
2503 * kernel-space counterparts. The original pointers are
2504 * saved internally and later restored by nmreq_copyout
2505 */
2506 error = nmreq_copyin(hdr, nr_body_is_user);
2507 if (error) {
2508 return error;
2509 }
2510
2511 /* Sanitize hdr->nr_name. */
2512 hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0';
2513
2514 switch (hdr->nr_reqtype) {
2515 case NETMAP_REQ_REGISTER: {
2516 struct nmreq_register *req =
2517 (struct nmreq_register *)(uintptr_t)hdr->nr_body;
2518 struct netmap_if *nifp;
2519
2520 /* Protect access to priv from concurrent requests. */
2521 NMG_LOCK();
2522 do {
2523 struct nmreq_option *opt;
2524 u_int memflags;
2525
2526 if (priv->np_nifp != NULL) { /* thread already registered */
2527 error = EBUSY;
2528 break;
2529 }
2530
2531 #ifdef WITH_EXTMEM
2532 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM);
2533 if (opt != NULL) {
2534 struct nmreq_opt_extmem *e =
2535 (struct nmreq_opt_extmem *)opt;
2536
2537 nmd = netmap_mem_ext_create(e->nro_usrptr,
2538 &e->nro_info, &error);
2539 opt->nro_status = error;
2540 if (nmd == NULL)
2541 break;
2542 }
2543 #endif /* WITH_EXTMEM */
2544
2545 if (nmd == NULL && req->nr_mem_id) {
2546 /* find the allocator and get a reference */
2547 nmd = netmap_mem_find(req->nr_mem_id);
2548 if (nmd == NULL) {
2549 if (netmap_verbose) {
2550 nm_prerr("%s: failed to find mem_id %u",
2551 hdr->nr_name, req->nr_mem_id);
2552 }
2553 error = EINVAL;
2554 break;
2555 }
2556 }
2557 /* find the interface and a reference */
2558 error = netmap_get_na(hdr, &na, &ifp, nmd,
2559 1 /* create */); /* keep reference */
2560 if (error)
2561 break;
2562 if (NETMAP_OWNED_BY_KERN(na)) {
2563 error = EBUSY;
2564 break;
2565 }
2566
2567 if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) {
2568 nm_prerr("virt_hdr_len=%d, but application does "
2569 "not accept it", na->virt_hdr_len);
2570 error = EIO;
2571 break;
2572 }
2573
2574 error = netmap_do_regif(priv, na, hdr);
2575 if (error) { /* reg. failed, release priv and ref */
2576 break;
2577 }
2578
2579 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2580 if (opt != NULL) {
2581 struct nmreq_opt_csb *csbo =
2582 (struct nmreq_opt_csb *)opt;
2583 error = netmap_csb_validate(priv, csbo);
2584 opt->nro_status = error;
2585 if (error) {
2586 netmap_do_unregif(priv);
2587 break;
2588 }
2589 }
2590
2591 nifp = priv->np_nifp;
2592
2593 /* return the offset of the netmap_if object */
2594 req->nr_rx_rings = na->num_rx_rings;
2595 req->nr_tx_rings = na->num_tx_rings;
2596 req->nr_rx_slots = na->num_rx_desc;
2597 req->nr_tx_slots = na->num_tx_desc;
2598 req->nr_host_tx_rings = na->num_host_tx_rings;
2599 req->nr_host_rx_rings = na->num_host_rx_rings;
2600 error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags,
2601 &req->nr_mem_id);
2602 if (error) {
2603 netmap_do_unregif(priv);
2604 break;
2605 }
2606 if (memflags & NETMAP_MEM_PRIVATE) {
2607 *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM;
2608 }
2609 for_rx_tx(t) {
2610 priv->np_si[t] = nm_si_user(priv, t) ?
2611 &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si;
2612 }
2613
2614 if (req->nr_extra_bufs) {
2615 if (netmap_verbose)
2616 nm_prinf("requested %d extra buffers",
2617 req->nr_extra_bufs);
2618 req->nr_extra_bufs = netmap_extra_alloc(na,
2619 &nifp->ni_bufs_head, req->nr_extra_bufs);
2620 if (netmap_verbose)
2621 nm_prinf("got %d extra buffers", req->nr_extra_bufs);
2622 }
2623 req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp);
2624
2625 error = nmreq_checkoptions(hdr);
2626 if (error) {
2627 netmap_do_unregif(priv);
2628 break;
2629 }
2630
2631 /* store ifp reference so that priv destructor may release it */
2632 priv->np_ifp = ifp;
2633 } while (0);
2634 if (error) {
2635 netmap_unget_na(na, ifp);
2636 }
2637 /* release the reference from netmap_mem_find() or
2638 * netmap_mem_ext_create()
2639 */
2640 if (nmd)
2641 netmap_mem_put(nmd);
2642 NMG_UNLOCK();
2643 break;
2644 }
2645
2646 case NETMAP_REQ_PORT_INFO_GET: {
2647 struct nmreq_port_info_get *req =
2648 (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
2649 int nmd_ref = 0;
2650
2651 NMG_LOCK();
2652 do {
2653 u_int memflags;
2654
2655 if (hdr->nr_name[0] != '\0') {
2656 /* Build a nmreq_register out of the nmreq_port_info_get,
2657 * so that we can call netmap_get_na(). */
2658 struct nmreq_register regreq;
2659 bzero(®req, sizeof(regreq));
2660 regreq.nr_mode = NR_REG_ALL_NIC;
2661 regreq.nr_tx_slots = req->nr_tx_slots;
2662 regreq.nr_rx_slots = req->nr_rx_slots;
2663 regreq.nr_tx_rings = req->nr_tx_rings;
2664 regreq.nr_rx_rings = req->nr_rx_rings;
2665 regreq.nr_host_tx_rings = req->nr_host_tx_rings;
2666 regreq.nr_host_rx_rings = req->nr_host_rx_rings;
2667 regreq.nr_mem_id = req->nr_mem_id;
2668
2669 /* get a refcount */
2670 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2671 hdr->nr_body = (uintptr_t)®req;
2672 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2673 hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */
2674 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2675 if (error) {
2676 na = NULL;
2677 ifp = NULL;
2678 break;
2679 }
2680 nmd = na->nm_mem; /* get memory allocator */
2681 } else {
2682 nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1);
2683 if (nmd == NULL) {
2684 if (netmap_verbose)
2685 nm_prerr("%s: failed to find mem_id %u",
2686 hdr->nr_name,
2687 req->nr_mem_id ? req->nr_mem_id : 1);
2688 error = EINVAL;
2689 break;
2690 }
2691 nmd_ref = 1;
2692 }
2693
2694 error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags,
2695 &req->nr_mem_id);
2696 if (error)
2697 break;
2698 if (na == NULL) /* only memory info */
2699 break;
2700 netmap_update_config(na);
2701 req->nr_rx_rings = na->num_rx_rings;
2702 req->nr_tx_rings = na->num_tx_rings;
2703 req->nr_rx_slots = na->num_rx_desc;
2704 req->nr_tx_slots = na->num_tx_desc;
2705 req->nr_host_tx_rings = na->num_host_tx_rings;
2706 req->nr_host_rx_rings = na->num_host_rx_rings;
2707 } while (0);
2708 netmap_unget_na(na, ifp);
2709 if (nmd_ref)
2710 netmap_mem_put(nmd);
2711 NMG_UNLOCK();
2712 break;
2713 }
2714 #ifdef WITH_VALE
2715 case NETMAP_REQ_VALE_ATTACH: {
2716 error = netmap_vale_attach(hdr, NULL /* userspace request */);
2717 break;
2718 }
2719
2720 case NETMAP_REQ_VALE_DETACH: {
2721 error = netmap_vale_detach(hdr, NULL /* userspace request */);
2722 break;
2723 }
2724
2725 case NETMAP_REQ_VALE_LIST: {
2726 error = netmap_vale_list(hdr);
2727 break;
2728 }
2729
2730 case NETMAP_REQ_PORT_HDR_SET: {
2731 struct nmreq_port_hdr *req =
2732 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2733 /* Build a nmreq_register out of the nmreq_port_hdr,
2734 * so that we can call netmap_get_bdg_na(). */
2735 struct nmreq_register regreq;
2736 bzero(®req, sizeof(regreq));
2737 regreq.nr_mode = NR_REG_ALL_NIC;
2738
2739 /* For now we only support virtio-net headers, and only for
2740 * VALE ports, but this may change in future. Valid lengths
2741 * for the virtio-net header are 0 (no header), 10 and 12. */
2742 if (req->nr_hdr_len != 0 &&
2743 req->nr_hdr_len != sizeof(struct nm_vnet_hdr) &&
2744 req->nr_hdr_len != 12) {
2745 if (netmap_verbose)
2746 nm_prerr("invalid hdr_len %u", req->nr_hdr_len);
2747 error = EINVAL;
2748 break;
2749 }
2750 NMG_LOCK();
2751 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2752 hdr->nr_body = (uintptr_t)®req;
2753 error = netmap_get_vale_na(hdr, &na, NULL, 0);
2754 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET;
2755 hdr->nr_body = (uintptr_t)req;
2756 if (na && !error) {
2757 struct netmap_vp_adapter *vpna =
2758 (struct netmap_vp_adapter *)na;
2759 na->virt_hdr_len = req->nr_hdr_len;
2760 if (na->virt_hdr_len) {
2761 vpna->mfs = NETMAP_BUF_SIZE(na);
2762 }
2763 if (netmap_verbose)
2764 nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
2765 netmap_adapter_put(na);
2766 } else if (!na) {
2767 error = ENXIO;
2768 }
2769 NMG_UNLOCK();
2770 break;
2771 }
2772
2773 case NETMAP_REQ_PORT_HDR_GET: {
2774 /* Get vnet-header length for this netmap port */
2775 struct nmreq_port_hdr *req =
2776 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2777 /* Build a nmreq_register out of the nmreq_port_hdr,
2778 * so that we can call netmap_get_bdg_na(). */
2779 struct nmreq_register regreq;
2780 struct ifnet *ifp;
2781
2782 bzero(®req, sizeof(regreq));
2783 regreq.nr_mode = NR_REG_ALL_NIC;
2784 NMG_LOCK();
2785 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2786 hdr->nr_body = (uintptr_t)®req;
2787 error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
2788 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET;
2789 hdr->nr_body = (uintptr_t)req;
2790 if (na && !error) {
2791 req->nr_hdr_len = na->virt_hdr_len;
2792 }
2793 netmap_unget_na(na, ifp);
2794 NMG_UNLOCK();
2795 break;
2796 }
2797
2798 case NETMAP_REQ_VALE_NEWIF: {
2799 error = nm_vi_create(hdr);
2800 break;
2801 }
2802
2803 case NETMAP_REQ_VALE_DELIF: {
2804 error = nm_vi_destroy(hdr->nr_name);
2805 break;
2806 }
2807
2808 case NETMAP_REQ_VALE_POLLING_ENABLE:
2809 case NETMAP_REQ_VALE_POLLING_DISABLE: {
2810 error = nm_bdg_polling(hdr);
2811 break;
2812 }
2813 #endif /* WITH_VALE */
2814 case NETMAP_REQ_POOLS_INFO_GET: {
2815 /* Get information from the memory allocator used for
2816 * hdr->nr_name. */
2817 struct nmreq_pools_info *req =
2818 (struct nmreq_pools_info *)(uintptr_t)hdr->nr_body;
2819 NMG_LOCK();
2820 do {
2821 /* Build a nmreq_register out of the nmreq_pools_info,
2822 * so that we can call netmap_get_na(). */
2823 struct nmreq_register regreq;
2824 bzero(®req, sizeof(regreq));
2825 regreq.nr_mem_id = req->nr_mem_id;
2826 regreq.nr_mode = NR_REG_ALL_NIC;
2827
2828 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2829 hdr->nr_body = (uintptr_t)®req;
2830 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2831 hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */
2832 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2833 if (error) {
2834 na = NULL;
2835 ifp = NULL;
2836 break;
2837 }
2838 nmd = na->nm_mem; /* grab the memory allocator */
2839 if (nmd == NULL) {
2840 error = EINVAL;
2841 break;
2842 }
2843
2844 /* Finalize the memory allocator, get the pools
2845 * information and release the allocator. */
2846 error = netmap_mem_finalize(nmd, na);
2847 if (error) {
2848 break;
2849 }
2850 error = netmap_mem_pools_info_get(req, nmd);
2851 netmap_mem_drop(na);
2852 } while (0);
2853 netmap_unget_na(na, ifp);
2854 NMG_UNLOCK();
2855 break;
2856 }
2857
2858 case NETMAP_REQ_CSB_ENABLE: {
2859 struct nmreq_option *opt;
2860
2861 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2862 if (opt == NULL) {
2863 error = EINVAL;
2864 } else {
2865 struct nmreq_opt_csb *csbo =
2866 (struct nmreq_opt_csb *)opt;
2867 NMG_LOCK();
2868 error = netmap_csb_validate(priv, csbo);
2869 NMG_UNLOCK();
2870 opt->nro_status = error;
2871 }
2872 break;
2873 }
2874
2875 case NETMAP_REQ_SYNC_KLOOP_START: {
2876 error = netmap_sync_kloop(priv, hdr);
2877 break;
2878 }
2879
2880 case NETMAP_REQ_SYNC_KLOOP_STOP: {
2881 error = netmap_sync_kloop_stop(priv);
2882 break;
2883 }
2884
2885 default: {
2886 error = EINVAL;
2887 break;
2888 }
2889 }
2890 /* Write back request body to userspace and reset the
2891 * user-space pointer. */
2892 error = nmreq_copyout(hdr, error);
2893 break;
2894 }
2895
2896 case NIOCTXSYNC:
2897 case NIOCRXSYNC: {
2898 if (unlikely(priv->np_nifp == NULL)) {
2899 error = ENXIO;
2900 break;
2901 }
2902 mb(); /* make sure following reads are not from cache */
2903
2904 if (unlikely(priv->np_csb_atok_base)) {
2905 nm_prerr("Invalid sync in CSB mode");
2906 error = EBUSY;
2907 break;
2908 }
2909
2910 na = priv->np_na; /* we have a reference */
2911
2912 mbq_init(&q);
2913 t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX);
2914 krings = NMR(na, t);
2915 qfirst = priv->np_qfirst[t];
2916 qlast = priv->np_qlast[t];
2917 sync_flags = priv->np_sync_flags;
2918
2919 for (i = qfirst; i < qlast; i++) {
2920 struct netmap_kring *kring = krings[i];
2921 struct netmap_ring *ring = kring->ring;
2922
2923 if (unlikely(nm_kr_tryget(kring, 1, &error))) {
2924 error = (error ? EIO : 0);
2925 continue;
2926 }
2927
2928 if (cmd == NIOCTXSYNC) {
2929 if (netmap_debug & NM_DEBUG_TXSYNC)
2930 nm_prinf("pre txsync ring %d cur %d hwcur %d",
2931 i, ring->cur,
2932 kring->nr_hwcur);
2933 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2934 netmap_ring_reinit(kring);
2935 } else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) {
2936 nm_sync_finalize(kring);
2937 }
2938 if (netmap_debug & NM_DEBUG_TXSYNC)
2939 nm_prinf("post txsync ring %d cur %d hwcur %d",
2940 i, ring->cur,
2941 kring->nr_hwcur);
2942 } else {
2943 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2944 netmap_ring_reinit(kring);
2945 }
2946 if (nm_may_forward_up(kring)) {
2947 /* transparent forwarding, see netmap_poll() */
2948 netmap_grab_packets(kring, &q, netmap_fwd);
2949 }
2950 if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) {
2951 nm_sync_finalize(kring);
2952 }
2953 ring_timestamp_set(ring);
2954 }
2955 nm_kr_put(kring);
2956 }
2957
2958 if (mbq_peek(&q)) {
2959 netmap_send_up(na->ifp, &q);
2960 }
2961
2962 break;
2963 }
2964
2965 default: {
2966 return netmap_ioctl_legacy(priv, cmd, data, td);
2967 break;
2968 }
2969 }
2970
2971 return (error);
2972 }
2973
2974 size_t
nmreq_size_by_type(uint16_t nr_reqtype)2975 nmreq_size_by_type(uint16_t nr_reqtype)
2976 {
2977 switch (nr_reqtype) {
2978 case NETMAP_REQ_REGISTER:
2979 return sizeof(struct nmreq_register);
2980 case NETMAP_REQ_PORT_INFO_GET:
2981 return sizeof(struct nmreq_port_info_get);
2982 case NETMAP_REQ_VALE_ATTACH:
2983 return sizeof(struct nmreq_vale_attach);
2984 case NETMAP_REQ_VALE_DETACH:
2985 return sizeof(struct nmreq_vale_detach);
2986 case NETMAP_REQ_VALE_LIST:
2987 return sizeof(struct nmreq_vale_list);
2988 case NETMAP_REQ_PORT_HDR_SET:
2989 case NETMAP_REQ_PORT_HDR_GET:
2990 return sizeof(struct nmreq_port_hdr);
2991 case NETMAP_REQ_VALE_NEWIF:
2992 return sizeof(struct nmreq_vale_newif);
2993 case NETMAP_REQ_VALE_DELIF:
2994 case NETMAP_REQ_SYNC_KLOOP_STOP:
2995 case NETMAP_REQ_CSB_ENABLE:
2996 return 0;
2997 case NETMAP_REQ_VALE_POLLING_ENABLE:
2998 case NETMAP_REQ_VALE_POLLING_DISABLE:
2999 return sizeof(struct nmreq_vale_polling);
3000 case NETMAP_REQ_POOLS_INFO_GET:
3001 return sizeof(struct nmreq_pools_info);
3002 case NETMAP_REQ_SYNC_KLOOP_START:
3003 return sizeof(struct nmreq_sync_kloop_start);
3004 }
3005 return 0;
3006 }
3007
3008 static size_t
nmreq_opt_size_by_type(uint32_t nro_reqtype,uint64_t nro_size)3009 nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
3010 {
3011 size_t rv = sizeof(struct nmreq_option);
3012 #ifdef NETMAP_REQ_OPT_DEBUG
3013 if (nro_reqtype & NETMAP_REQ_OPT_DEBUG)
3014 return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG);
3015 #endif /* NETMAP_REQ_OPT_DEBUG */
3016 switch (nro_reqtype) {
3017 #ifdef WITH_EXTMEM
3018 case NETMAP_REQ_OPT_EXTMEM:
3019 rv = sizeof(struct nmreq_opt_extmem);
3020 break;
3021 #endif /* WITH_EXTMEM */
3022 case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS:
3023 if (nro_size >= rv)
3024 rv = nro_size;
3025 break;
3026 case NETMAP_REQ_OPT_CSB:
3027 rv = sizeof(struct nmreq_opt_csb);
3028 break;
3029 case NETMAP_REQ_OPT_SYNC_KLOOP_MODE:
3030 rv = sizeof(struct nmreq_opt_sync_kloop_mode);
3031 break;
3032 }
3033 /* subtract the common header */
3034 return rv - sizeof(struct nmreq_option);
3035 }
3036
3037 /*
3038 * nmreq_copyin: create an in-kernel version of the request.
3039 *
3040 * We build the following data structure:
3041 *
3042 * hdr -> +-------+ buf
3043 * | | +---------------+
3044 * +-------+ |usr body ptr |
3045 * |options|-. +---------------+
3046 * +-------+ | |usr options ptr|
3047 * |body |--------->+---------------+
3048 * +-------+ | | |
3049 * | | copy of body |
3050 * | | |
3051 * | +---------------+
3052 * | | NULL |
3053 * | +---------------+
3054 * | .---| |\
3055 * | | +---------------+ |
3056 * | .------| | |
3057 * | | | +---------------+ \ option table
3058 * | | | | ... | / indexed by option
3059 * | | | +---------------+ | type
3060 * | | | | | |
3061 * | | | +---------------+/
3062 * | | | |usr next ptr 1 |
3063 * `-|----->+---------------+
3064 * | | | copy of opt 1 |
3065 * | | | |
3066 * | | .-| nro_next |
3067 * | | | +---------------+
3068 * | | | |usr next ptr 2 |
3069 * | `-`>+---------------+
3070 * | | copy of opt 2 |
3071 * | | |
3072 * | .-| nro_next |
3073 * | | +---------------+
3074 * | | | |
3075 * ~ ~ ~ ... ~
3076 * | .-| |
3077 * `----->+---------------+
3078 * | |usr next ptr n |
3079 * `>+---------------+
3080 * | copy of opt n |
3081 * | |
3082 * | nro_next(NULL)|
3083 * +---------------+
3084 *
3085 * The options and body fields of the hdr structure are overwritten
3086 * with in-kernel valid pointers inside the buf. The original user
3087 * pointers are saved in the buf and restored on copyout.
3088 * The list of options is copied and the pointers adjusted. The
3089 * original pointers are saved before the option they belonged.
3090 *
3091 * The option table has an entry for every availabe option. Entries
3092 * for options that have not been passed contain NULL.
3093 *
3094 */
3095
3096 int
nmreq_copyin(struct nmreq_header * hdr,int nr_body_is_user)3097 nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
3098 {
3099 size_t rqsz, optsz, bufsz;
3100 int error = 0;
3101 char *ker = NULL, *p;
3102 struct nmreq_option **next, *src, **opt_tab;
3103 uint64_t *ptrs;
3104
3105 if (hdr->nr_reserved) {
3106 if (netmap_verbose)
3107 nm_prerr("nr_reserved must be zero");
3108 return EINVAL;
3109 }
3110
3111 if (!nr_body_is_user)
3112 return 0;
3113
3114 hdr->nr_reserved = nr_body_is_user;
3115
3116 /* compute the total size of the buffer */
3117 rqsz = nmreq_size_by_type(hdr->nr_reqtype);
3118 if (rqsz > NETMAP_REQ_MAXSIZE) {
3119 error = EMSGSIZE;
3120 goto out_err;
3121 }
3122 if ((rqsz && hdr->nr_body == (uintptr_t)NULL) ||
3123 (!rqsz && hdr->nr_body != (uintptr_t)NULL)) {
3124 /* Request body expected, but not found; or
3125 * request body found but unexpected. */
3126 if (netmap_verbose)
3127 nm_prerr("nr_body expected but not found, or vice versa");
3128 error = EINVAL;
3129 goto out_err;
3130 }
3131
3132 /*
3133 * The buffer size must be large enough to store the request body,
3134 * all the possible options and the additional user pointers
3135 * (2+NETMAP_REQ_OPT_MAX). Note that the maximum size of body plus
3136 * options can not exceed NETMAP_REQ_MAXSIZE;
3137 */
3138 bufsz = (2 + NETMAP_REQ_OPT_MAX) * sizeof(void *) + NETMAP_REQ_MAXSIZE +
3139 NETMAP_REQ_OPT_MAX * sizeof(opt_tab);
3140
3141 ker = nm_os_malloc(bufsz);
3142 if (ker == NULL) {
3143 error = ENOMEM;
3144 goto out_err;
3145 }
3146 p = ker; /* write pointer into the buffer */
3147
3148 /* make a copy of the user pointers */
3149 ptrs = (uint64_t*)p;
3150 *ptrs++ = hdr->nr_body;
3151 *ptrs++ = hdr->nr_options;
3152 p = (char *)ptrs;
3153
3154 /* copy the body */
3155 error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz);
3156 if (error)
3157 goto out_restore;
3158 /* overwrite the user pointer with the in-kernel one */
3159 hdr->nr_body = (uintptr_t)p;
3160 p += rqsz;
3161 /* start of the options table */
3162 opt_tab = (struct nmreq_option **)p;
3163 p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX;
3164
3165 /* copy the options */
3166 next = (struct nmreq_option **)&hdr->nr_options;
3167 src = *next;
3168 while (src) {
3169 struct nmreq_option *opt;
3170
3171 /* copy the option header */
3172 ptrs = (uint64_t *)p;
3173 opt = (struct nmreq_option *)(ptrs + 1);
3174 error = copyin(src, opt, sizeof(*src));
3175 if (error)
3176 goto out_restore;
3177 rqsz += sizeof(*src);
3178 /* make a copy of the user next pointer */
3179 *ptrs = opt->nro_next;
3180 /* overwrite the user pointer with the in-kernel one */
3181 *next = opt;
3182
3183 /* initialize the option as not supported.
3184 * Recognized options will update this field.
3185 */
3186 opt->nro_status = EOPNOTSUPP;
3187
3188 /* check for invalid types */
3189 if (opt->nro_reqtype < 1) {
3190 if (netmap_verbose)
3191 nm_prinf("invalid option type: %u", opt->nro_reqtype);
3192 opt->nro_status = EINVAL;
3193 error = EINVAL;
3194 goto next;
3195 }
3196
3197 if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) {
3198 /* opt->nro_status is already EOPNOTSUPP */
3199 error = EOPNOTSUPP;
3200 goto next;
3201 }
3202
3203 /* if the type is valid, index the option in the table
3204 * unless it is a duplicate.
3205 */
3206 if (opt_tab[opt->nro_reqtype] != NULL) {
3207 if (netmap_verbose)
3208 nm_prinf("duplicate option: %u", opt->nro_reqtype);
3209 opt->nro_status = EINVAL;
3210 opt_tab[opt->nro_reqtype]->nro_status = EINVAL;
3211 error = EINVAL;
3212 goto next;
3213 }
3214 opt_tab[opt->nro_reqtype] = opt;
3215
3216 p = (char *)(opt + 1);
3217
3218 /* copy the option body */
3219 optsz = nmreq_opt_size_by_type(opt->nro_reqtype,
3220 opt->nro_size);
3221 /* check optsz and nro_size to avoid for possible integer overflows of rqsz */
3222 if ((optsz > NETMAP_REQ_MAXSIZE) || (opt->nro_size > NETMAP_REQ_MAXSIZE)
3223 || (rqsz + optsz > NETMAP_REQ_MAXSIZE)
3224 || (optsz > 0 && rqsz + optsz <= rqsz)) {
3225 error = EMSGSIZE;
3226 goto out_restore;
3227 }
3228 rqsz += optsz;
3229 if (optsz) {
3230 /* the option body follows the option header */
3231 error = copyin(src + 1, p, optsz);
3232 if (error)
3233 goto out_restore;
3234 p += optsz;
3235 }
3236
3237 next:
3238 /* move to next option */
3239 next = (struct nmreq_option **)&opt->nro_next;
3240 src = *next;
3241 }
3242 if (error)
3243 nmreq_copyout(hdr, error);
3244 return error;
3245
3246 out_restore:
3247 ptrs = (uint64_t *)ker;
3248 hdr->nr_body = *ptrs++;
3249 hdr->nr_options = *ptrs++;
3250 hdr->nr_reserved = 0;
3251 nm_os_free(ker);
3252 out_err:
3253 return error;
3254 }
3255
3256 static int
nmreq_copyout(struct nmreq_header * hdr,int rerror)3257 nmreq_copyout(struct nmreq_header *hdr, int rerror)
3258 {
3259 struct nmreq_option *src, *dst;
3260 void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart;
3261 uint64_t *ptrs;
3262 size_t bodysz;
3263 int error;
3264
3265 if (!hdr->nr_reserved)
3266 return rerror;
3267
3268 /* restore the user pointers in the header */
3269 ptrs = (uint64_t *)ker - 2;
3270 bufstart = ptrs;
3271 hdr->nr_body = *ptrs++;
3272 src = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3273 hdr->nr_options = *ptrs;
3274
3275 if (!rerror) {
3276 /* copy the body */
3277 bodysz = nmreq_size_by_type(hdr->nr_reqtype);
3278 error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz);
3279 if (error) {
3280 rerror = error;
3281 goto out;
3282 }
3283 }
3284
3285 /* copy the options */
3286 dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3287 while (src) {
3288 size_t optsz;
3289 uint64_t next;
3290
3291 /* restore the user pointer */
3292 next = src->nro_next;
3293 ptrs = (uint64_t *)src - 1;
3294 src->nro_next = *ptrs;
3295
3296 /* always copy the option header */
3297 error = copyout(src, dst, sizeof(*src));
3298 if (error) {
3299 rerror = error;
3300 goto out;
3301 }
3302
3303 /* copy the option body only if there was no error */
3304 if (!rerror && !src->nro_status) {
3305 optsz = nmreq_opt_size_by_type(src->nro_reqtype,
3306 src->nro_size);
3307 if (optsz) {
3308 error = copyout(src + 1, dst + 1, optsz);
3309 if (error) {
3310 rerror = error;
3311 goto out;
3312 }
3313 }
3314 }
3315 src = (struct nmreq_option *)(uintptr_t)next;
3316 dst = (struct nmreq_option *)(uintptr_t)*ptrs;
3317 }
3318
3319
3320 out:
3321 hdr->nr_reserved = 0;
3322 nm_os_free(bufstart);
3323 return rerror;
3324 }
3325
3326 struct nmreq_option *
nmreq_getoption(struct nmreq_header * hdr,uint16_t reqtype)3327 nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype)
3328 {
3329 struct nmreq_option **opt_tab;
3330
3331 if (!hdr->nr_options)
3332 return NULL;
3333
3334 opt_tab = (struct nmreq_option **)((uintptr_t)hdr->nr_options) -
3335 (NETMAP_REQ_OPT_MAX + 1);
3336 return opt_tab[reqtype];
3337 }
3338
3339 static int
nmreq_checkoptions(struct nmreq_header * hdr)3340 nmreq_checkoptions(struct nmreq_header *hdr)
3341 {
3342 struct nmreq_option *opt;
3343 /* return error if there is still any option
3344 * marked as not supported
3345 */
3346
3347 for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt;
3348 opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
3349 if (opt->nro_status == EOPNOTSUPP)
3350 return EOPNOTSUPP;
3351
3352 return 0;
3353 }
3354
3355 /*
3356 * select(2) and poll(2) handlers for the "netmap" device.
3357 *
3358 * Can be called for one or more queues.
3359 * Return true the event mask corresponding to ready events.
3360 * If there are no ready events (and 'sr' is not NULL), do a
3361 * selrecord on either individual selinfo or on the global one.
3362 * Device-dependent parts (locking and sync of tx/rx rings)
3363 * are done through callbacks.
3364 *
3365 * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
3366 * The first one is remapped to pwait as selrecord() uses the name as an
3367 * hidden argument.
3368 */
3369 int
netmap_poll(struct netmap_priv_d * priv,int events,NM_SELRECORD_T * sr)3370 netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
3371 {
3372 struct netmap_adapter *na;
3373 struct netmap_kring *kring;
3374 struct netmap_ring *ring;
3375 u_int i, want[NR_TXRX], revents = 0;
3376 NM_SELINFO_T *si[NR_TXRX];
3377 #define want_tx want[NR_TX]
3378 #define want_rx want[NR_RX]
3379 struct mbq q; /* packets from RX hw queues to host stack */
3380
3381 /*
3382 * In order to avoid nested locks, we need to "double check"
3383 * txsync and rxsync if we decide to do a selrecord().
3384 * retry_tx (and retry_rx, later) prevent looping forever.
3385 */
3386 int retry_tx = 1, retry_rx = 1;
3387
3388 /* Transparent mode: send_down is 1 if we have found some
3389 * packets to forward (host RX ring --> NIC) during the rx
3390 * scan and we have not sent them down to the NIC yet.
3391 * Transparent mode requires to bind all rings to a single
3392 * file descriptor.
3393 */
3394 int send_down = 0;
3395 int sync_flags = priv->np_sync_flags;
3396
3397 mbq_init(&q);
3398
3399 if (unlikely(priv->np_nifp == NULL)) {
3400 return POLLERR;
3401 }
3402 mb(); /* make sure following reads are not from cache */
3403
3404 na = priv->np_na;
3405
3406 if (unlikely(!nm_netmap_on(na)))
3407 return POLLERR;
3408
3409 if (unlikely(priv->np_csb_atok_base)) {
3410 nm_prerr("Invalid poll in CSB mode");
3411 return POLLERR;
3412 }
3413
3414 if (netmap_debug & NM_DEBUG_ON)
3415 nm_prinf("device %s events 0x%x", na->name, events);
3416 want_tx = events & (POLLOUT | POLLWRNORM);
3417 want_rx = events & (POLLIN | POLLRDNORM);
3418
3419 /*
3420 * If the card has more than one queue AND the file descriptor is
3421 * bound to all of them, we sleep on the "global" selinfo, otherwise
3422 * we sleep on individual selinfo (FreeBSD only allows two selinfo's
3423 * per file descriptor).
3424 * The interrupt routine in the driver wake one or the other
3425 * (or both) depending on which clients are active.
3426 *
3427 * rxsync() is only called if we run out of buffers on a POLLIN.
3428 * txsync() is called if we run out of buffers on POLLOUT, or
3429 * there are pending packets to send. The latter can be disabled
3430 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
3431 */
3432 si[NR_RX] = priv->np_si[NR_RX];
3433 si[NR_TX] = priv->np_si[NR_TX];
3434
3435 #ifdef __FreeBSD__
3436 /*
3437 * We start with a lock free round which is cheap if we have
3438 * slots available. If this fails, then lock and call the sync
3439 * routines. We can't do this on Linux, as the contract says
3440 * that we must call nm_os_selrecord() unconditionally.
3441 */
3442 if (want_tx) {
3443 const enum txrx t = NR_TX;
3444 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3445 kring = NMR(na, t)[i];
3446 if (kring->ring->cur != kring->ring->tail) {
3447 /* Some unseen TX space is available, so what
3448 * we don't need to run txsync. */
3449 revents |= want[t];
3450 want[t] = 0;
3451 break;
3452 }
3453 }
3454 }
3455 if (want_rx) {
3456 const enum txrx t = NR_RX;
3457 int rxsync_needed = 0;
3458
3459 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3460 kring = NMR(na, t)[i];
3461 if (kring->ring->cur == kring->ring->tail
3462 || kring->rhead != kring->ring->head) {
3463 /* There are no unseen packets on this ring,
3464 * or there are some buffers to be returned
3465 * to the netmap port. We therefore go ahead
3466 * and run rxsync. */
3467 rxsync_needed = 1;
3468 break;
3469 }
3470 }
3471 if (!rxsync_needed) {
3472 revents |= want_rx;
3473 want_rx = 0;
3474 }
3475 }
3476 #endif
3477
3478 #ifdef linux
3479 /* The selrecord must be unconditional on linux. */
3480 nm_os_selrecord(sr, si[NR_RX]);
3481 nm_os_selrecord(sr, si[NR_TX]);
3482 #endif /* linux */
3483
3484 /*
3485 * If we want to push packets out (priv->np_txpoll) or
3486 * want_tx is still set, we must issue txsync calls
3487 * (on all rings, to avoid that the tx rings stall).
3488 * Fortunately, normal tx mode has np_txpoll set.
3489 */
3490 if (priv->np_txpoll || want_tx) {
3491 /*
3492 * The first round checks if anyone is ready, if not
3493 * do a selrecord and another round to handle races.
3494 * want_tx goes to 0 if any space is found, and is
3495 * used to skip rings with no pending transmissions.
3496 */
3497 flush_tx:
3498 for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) {
3499 int found = 0;
3500
3501 kring = na->tx_rings[i];
3502 ring = kring->ring;
3503
3504 /*
3505 * Don't try to txsync this TX ring if we already found some
3506 * space in some of the TX rings (want_tx == 0) and there are no
3507 * TX slots in this ring that need to be flushed to the NIC
3508 * (head == hwcur).
3509 */
3510 if (!send_down && !want_tx && ring->head == kring->nr_hwcur)
3511 continue;
3512
3513 if (nm_kr_tryget(kring, 1, &revents))
3514 continue;
3515
3516 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3517 netmap_ring_reinit(kring);
3518 revents |= POLLERR;
3519 } else {
3520 if (kring->nm_sync(kring, sync_flags))
3521 revents |= POLLERR;
3522 else
3523 nm_sync_finalize(kring);
3524 }
3525
3526 /*
3527 * If we found new slots, notify potential
3528 * listeners on the same ring.
3529 * Since we just did a txsync, look at the copies
3530 * of cur,tail in the kring.
3531 */
3532 found = kring->rcur != kring->rtail;
3533 nm_kr_put(kring);
3534 if (found) { /* notify other listeners */
3535 revents |= want_tx;
3536 want_tx = 0;
3537 #ifndef linux
3538 kring->nm_notify(kring, 0);
3539 #endif /* linux */
3540 }
3541 }
3542 /* if there were any packet to forward we must have handled them by now */
3543 send_down = 0;
3544 if (want_tx && retry_tx && sr) {
3545 #ifndef linux
3546 nm_os_selrecord(sr, si[NR_TX]);
3547 #endif /* !linux */
3548 retry_tx = 0;
3549 goto flush_tx;
3550 }
3551 }
3552
3553 /*
3554 * If want_rx is still set scan receive rings.
3555 * Do it on all rings because otherwise we starve.
3556 */
3557 if (want_rx) {
3558 /* two rounds here for race avoidance */
3559 do_retry_rx:
3560 for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) {
3561 int found = 0;
3562
3563 kring = na->rx_rings[i];
3564 ring = kring->ring;
3565
3566 if (unlikely(nm_kr_tryget(kring, 1, &revents)))
3567 continue;
3568
3569 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3570 netmap_ring_reinit(kring);
3571 revents |= POLLERR;
3572 }
3573 /* now we can use kring->rcur, rtail */
3574
3575 /*
3576 * transparent mode support: collect packets from
3577 * hw rxring(s) that have been released by the user
3578 */
3579 if (nm_may_forward_up(kring)) {
3580 netmap_grab_packets(kring, &q, netmap_fwd);
3581 }
3582
3583 /* Clear the NR_FORWARD flag anyway, it may be set by
3584 * the nm_sync() below only on for the host RX ring (see
3585 * netmap_rxsync_from_host()). */
3586 kring->nr_kflags &= ~NR_FORWARD;
3587 if (kring->nm_sync(kring, sync_flags))
3588 revents |= POLLERR;
3589 else
3590 nm_sync_finalize(kring);
3591 send_down |= (kring->nr_kflags & NR_FORWARD);
3592 ring_timestamp_set(ring);
3593 found = kring->rcur != kring->rtail;
3594 nm_kr_put(kring);
3595 if (found) {
3596 revents |= want_rx;
3597 retry_rx = 0;
3598 #ifndef linux
3599 kring->nm_notify(kring, 0);
3600 #endif /* linux */
3601 }
3602 }
3603
3604 #ifndef linux
3605 if (retry_rx && sr) {
3606 nm_os_selrecord(sr, si[NR_RX]);
3607 }
3608 #endif /* !linux */
3609 if (send_down || retry_rx) {
3610 retry_rx = 0;
3611 if (send_down)
3612 goto flush_tx; /* and retry_rx */
3613 else
3614 goto do_retry_rx;
3615 }
3616 }
3617
3618 /*
3619 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and
3620 * ring->head) marked with NS_FORWARD on hw rx rings are passed up
3621 * to the host stack.
3622 */
3623
3624 if (mbq_peek(&q)) {
3625 netmap_send_up(na->ifp, &q);
3626 }
3627
3628 return (revents);
3629 #undef want_tx
3630 #undef want_rx
3631 }
3632
3633 int
nma_intr_enable(struct netmap_adapter * na,int onoff)3634 nma_intr_enable(struct netmap_adapter *na, int onoff)
3635 {
3636 bool changed = false;
3637 enum txrx t;
3638 int i;
3639
3640 for_rx_tx(t) {
3641 for (i = 0; i < nma_get_nrings(na, t); i++) {
3642 struct netmap_kring *kring = NMR(na, t)[i];
3643 int on = !(kring->nr_kflags & NKR_NOINTR);
3644
3645 if (!!onoff != !!on) {
3646 changed = true;
3647 }
3648 if (onoff) {
3649 kring->nr_kflags &= ~NKR_NOINTR;
3650 } else {
3651 kring->nr_kflags |= NKR_NOINTR;
3652 }
3653 }
3654 }
3655
3656 if (!changed) {
3657 return 0; /* nothing to do */
3658 }
3659
3660 if (!na->nm_intr) {
3661 nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable",
3662 na->name);
3663 return -1;
3664 }
3665
3666 na->nm_intr(na, onoff);
3667
3668 return 0;
3669 }
3670
3671
3672 /*-------------------- driver support routines -------------------*/
3673
3674 /* default notify callback */
3675 static int
netmap_notify(struct netmap_kring * kring,int flags)3676 netmap_notify(struct netmap_kring *kring, int flags)
3677 {
3678 struct netmap_adapter *na = kring->notify_na;
3679 enum txrx t = kring->tx;
3680
3681 nm_os_selwakeup(&kring->si);
3682 /* optimization: avoid a wake up on the global
3683 * queue if nobody has registered for more
3684 * than one ring
3685 */
3686 if (na->si_users[t] > 0)
3687 nm_os_selwakeup(&na->si[t]);
3688
3689 return NM_IRQ_COMPLETED;
3690 }
3691
3692 /* called by all routines that create netmap_adapters.
3693 * provide some defaults and get a reference to the
3694 * memory allocator
3695 */
3696 int
netmap_attach_common(struct netmap_adapter * na)3697 netmap_attach_common(struct netmap_adapter *na)
3698 {
3699 if (!na->rx_buf_maxsize) {
3700 /* Set a conservative default (larger is safer). */
3701 na->rx_buf_maxsize = PAGE_SIZE;
3702 }
3703
3704 #ifdef __FreeBSD__
3705 if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
3706 na->if_input = na->ifp->if_input; /* for netmap_send_up */
3707 }
3708 na->pdev = na; /* make sure netmap_mem_map() is called */
3709 #endif /* __FreeBSD__ */
3710 if (na->na_flags & NAF_HOST_RINGS) {
3711 if (na->num_host_rx_rings == 0)
3712 na->num_host_rx_rings = 1;
3713 if (na->num_host_tx_rings == 0)
3714 na->num_host_tx_rings = 1;
3715 }
3716 if (na->nm_krings_create == NULL) {
3717 /* we assume that we have been called by a driver,
3718 * since other port types all provide their own
3719 * nm_krings_create
3720 */
3721 na->nm_krings_create = netmap_hw_krings_create;
3722 na->nm_krings_delete = netmap_hw_krings_delete;
3723 }
3724 if (na->nm_notify == NULL)
3725 na->nm_notify = netmap_notify;
3726 na->active_fds = 0;
3727
3728 if (na->nm_mem == NULL) {
3729 /* use the global allocator */
3730 na->nm_mem = netmap_mem_get(&nm_mem);
3731 }
3732 #ifdef WITH_VALE
3733 if (na->nm_bdg_attach == NULL)
3734 /* no special nm_bdg_attach callback. On VALE
3735 * attach, we need to interpose a bwrap
3736 */
3737 na->nm_bdg_attach = netmap_default_bdg_attach;
3738 #endif
3739
3740 return 0;
3741 }
3742
3743 /* Wrapper for the register callback provided netmap-enabled
3744 * hardware drivers.
3745 * nm_iszombie(na) means that the driver module has been
3746 * unloaded, so we cannot call into it.
3747 * nm_os_ifnet_lock() must guarantee mutual exclusion with
3748 * module unloading.
3749 */
3750 static int
netmap_hw_reg(struct netmap_adapter * na,int onoff)3751 netmap_hw_reg(struct netmap_adapter *na, int onoff)
3752 {
3753 struct netmap_hw_adapter *hwna =
3754 (struct netmap_hw_adapter*)na;
3755 int error = 0;
3756
3757 nm_os_ifnet_lock();
3758
3759 if (nm_iszombie(na)) {
3760 if (onoff) {
3761 error = ENXIO;
3762 } else if (na != NULL) {
3763 na->na_flags &= ~NAF_NETMAP_ON;
3764 }
3765 goto out;
3766 }
3767
3768 error = hwna->nm_hw_register(na, onoff);
3769
3770 out:
3771 nm_os_ifnet_unlock();
3772
3773 return error;
3774 }
3775
3776 static void
netmap_hw_dtor(struct netmap_adapter * na)3777 netmap_hw_dtor(struct netmap_adapter *na)
3778 {
3779 if (na->ifp == NULL)
3780 return;
3781
3782 NM_DETACH_NA(na->ifp);
3783 }
3784
3785
3786 /*
3787 * Allocate a netmap_adapter object, and initialize it from the
3788 * 'arg' passed by the driver on attach.
3789 * We allocate a block of memory of 'size' bytes, which has room
3790 * for struct netmap_adapter plus additional room private to
3791 * the caller.
3792 * Return 0 on success, ENOMEM otherwise.
3793 */
3794 int
netmap_attach_ext(struct netmap_adapter * arg,size_t size,int override_reg)3795 netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
3796 {
3797 struct netmap_hw_adapter *hwna = NULL;
3798 struct ifnet *ifp = NULL;
3799
3800 if (size < sizeof(struct netmap_hw_adapter)) {
3801 if (netmap_debug & NM_DEBUG_ON)
3802 nm_prerr("Invalid netmap adapter size %d", (int)size);
3803 return EINVAL;
3804 }
3805
3806 if (arg == NULL || arg->ifp == NULL) {
3807 if (netmap_debug & NM_DEBUG_ON)
3808 nm_prerr("either arg or arg->ifp is NULL");
3809 return EINVAL;
3810 }
3811
3812 if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) {
3813 if (netmap_debug & NM_DEBUG_ON)
3814 nm_prerr("%s: invalid rings tx %d rx %d",
3815 arg->name, arg->num_tx_rings, arg->num_rx_rings);
3816 return EINVAL;
3817 }
3818
3819 ifp = arg->ifp;
3820 if (NM_NA_CLASH(ifp)) {
3821 /* If NA(ifp) is not null but there is no valid netmap
3822 * adapter it means that someone else is using the same
3823 * pointer (e.g. ax25_ptr on linux). This happens for
3824 * instance when also PF_RING is in use. */
3825 nm_prerr("Error: netmap adapter hook is busy");
3826 return EBUSY;
3827 }
3828
3829 hwna = nm_os_malloc(size);
3830 if (hwna == NULL)
3831 goto fail;
3832 hwna->up = *arg;
3833 hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE;
3834 strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name));
3835 if (override_reg) {
3836 hwna->nm_hw_register = hwna->up.nm_register;
3837 hwna->up.nm_register = netmap_hw_reg;
3838 }
3839 if (netmap_attach_common(&hwna->up)) {
3840 nm_os_free(hwna);
3841 goto fail;
3842 }
3843 netmap_adapter_get(&hwna->up);
3844
3845 NM_ATTACH_NA(ifp, &hwna->up);
3846
3847 nm_os_onattach(ifp);
3848
3849 if (arg->nm_dtor == NULL) {
3850 hwna->up.nm_dtor = netmap_hw_dtor;
3851 }
3852
3853 if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
3854 hwna->up.num_tx_rings, hwna->up.num_tx_desc,
3855 hwna->up.num_rx_rings, hwna->up.num_rx_desc);
3856 return 0;
3857
3858 fail:
3859 nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna);
3860 return (hwna ? EINVAL : ENOMEM);
3861 }
3862
3863
3864 int
netmap_attach(struct netmap_adapter * arg)3865 netmap_attach(struct netmap_adapter *arg)
3866 {
3867 return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter),
3868 1 /* override nm_reg */);
3869 }
3870
3871
3872 void
NM_DBG(netmap_adapter_get)3873 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na)
3874 {
3875 if (!na) {
3876 return;
3877 }
3878
3879 refcount_acquire(&na->na_refcount);
3880 }
3881
3882
3883 /* returns 1 iff the netmap_adapter is destroyed */
3884 int
NM_DBG(netmap_adapter_put)3885 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
3886 {
3887 if (!na)
3888 return 1;
3889
3890 if (!refcount_release(&na->na_refcount))
3891 return 0;
3892
3893 if (na->nm_dtor)
3894 na->nm_dtor(na);
3895
3896 if (na->tx_rings) { /* XXX should not happen */
3897 if (netmap_debug & NM_DEBUG_ON)
3898 nm_prerr("freeing leftover tx_rings");
3899 na->nm_krings_delete(na);
3900 }
3901 netmap_pipe_dealloc(na);
3902 if (na->nm_mem)
3903 netmap_mem_put(na->nm_mem);
3904 bzero(na, sizeof(*na));
3905 nm_os_free(na);
3906
3907 return 1;
3908 }
3909
3910 /* nm_krings_create callback for all hardware native adapters */
3911 int
netmap_hw_krings_create(struct netmap_adapter * na)3912 netmap_hw_krings_create(struct netmap_adapter *na)
3913 {
3914 int ret = netmap_krings_create(na, 0);
3915 if (ret == 0) {
3916 /* initialize the mbq for the sw rx ring */
3917 u_int lim = netmap_real_rings(na, NR_RX), i;
3918 for (i = na->num_rx_rings; i < lim; i++) {
3919 mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
3920 }
3921 nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
3922 }
3923 return ret;
3924 }
3925
3926
3927
3928 /*
3929 * Called on module unload by the netmap-enabled drivers
3930 */
3931 void
netmap_detach(struct ifnet * ifp)3932 netmap_detach(struct ifnet *ifp)
3933 {
3934 struct netmap_adapter *na = NA(ifp);
3935
3936 if (!na)
3937 return;
3938
3939 NMG_LOCK();
3940 netmap_set_all_rings(na, NM_KR_LOCKED);
3941 /*
3942 * if the netmap adapter is not native, somebody
3943 * changed it, so we can not release it here.
3944 * The NAF_ZOMBIE flag will notify the new owner that
3945 * the driver is gone.
3946 */
3947 if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
3948 na->na_flags |= NAF_ZOMBIE;
3949 }
3950 /* give active users a chance to notice that NAF_ZOMBIE has been
3951 * turned on, so that they can stop and return an error to userspace.
3952 * Note that this becomes a NOP if there are no active users and,
3953 * therefore, the put() above has deleted the na, since now NA(ifp) is
3954 * NULL.
3955 */
3956 netmap_enable_all_rings(ifp);
3957 NMG_UNLOCK();
3958 }
3959
3960
3961 /*
3962 * Intercept packets from the network stack and pass them
3963 * to netmap as incoming packets on the 'software' ring.
3964 *
3965 * We only store packets in a bounded mbq and then copy them
3966 * in the relevant rxsync routine.
3967 *
3968 * We rely on the OS to make sure that the ifp and na do not go
3969 * away (typically the caller checks for IFF_DRV_RUNNING or the like).
3970 * In nm_register() or whenever there is a reinitialization,
3971 * we make sure to make the mode change visible here.
3972 */
3973 int
netmap_transmit(struct ifnet * ifp,struct mbuf * m)3974 netmap_transmit(struct ifnet *ifp, struct mbuf *m)
3975 {
3976 struct netmap_adapter *na = NA(ifp);
3977 struct netmap_kring *kring, *tx_kring;
3978 u_int len = MBUF_LEN(m);
3979 u_int error = ENOBUFS;
3980 unsigned int txr;
3981 struct mbq *q;
3982 int busy;
3983 u_int i;
3984
3985 i = MBUF_TXQ(m);
3986 if (i >= na->num_host_rx_rings) {
3987 i = i % na->num_host_rx_rings;
3988 }
3989 kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i];
3990
3991 // XXX [Linux] we do not need this lock
3992 // if we follow the down/configure/up protocol -gl
3993 // mtx_lock(&na->core_lock);
3994
3995 if (!nm_netmap_on(na)) {
3996 nm_prerr("%s not in netmap mode anymore", na->name);
3997 error = ENXIO;
3998 goto done;
3999 }
4000
4001 txr = MBUF_TXQ(m);
4002 if (txr >= na->num_tx_rings) {
4003 txr %= na->num_tx_rings;
4004 }
4005 tx_kring = NMR(na, NR_TX)[txr];
4006
4007 if (tx_kring->nr_mode == NKR_NETMAP_OFF) {
4008 return MBUF_TRANSMIT(na, ifp, m);
4009 }
4010
4011 q = &kring->rx_queue;
4012
4013 // XXX reconsider long packets if we handle fragments
4014 if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */
4015 nm_prerr("%s from_host, drop packet size %d > %d", na->name,
4016 len, NETMAP_BUF_SIZE(na));
4017 goto done;
4018 }
4019
4020 if (!netmap_generic_hwcsum) {
4021 if (nm_os_mbuf_has_csum_offld(m)) {
4022 nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
4023 goto done;
4024 }
4025 }
4026
4027 if (nm_os_mbuf_has_seg_offld(m)) {
4028 nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
4029 goto done;
4030 }
4031
4032 #ifdef __FreeBSD__
4033 ETHER_BPF_MTAP(ifp, m);
4034 #endif /* __FreeBSD__ */
4035
4036 /* protect against netmap_rxsync_from_host(), netmap_sw_to_nic()
4037 * and maybe other instances of netmap_transmit (the latter
4038 * not possible on Linux).
4039 * We enqueue the mbuf only if we are sure there is going to be
4040 * enough room in the host RX ring, otherwise we drop it.
4041 */
4042 mbq_lock(q);
4043
4044 busy = kring->nr_hwtail - kring->nr_hwcur;
4045 if (busy < 0)
4046 busy += kring->nkr_num_slots;
4047 if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
4048 nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
4049 kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
4050 } else {
4051 mbq_enqueue(q, m);
4052 nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
4053 /* notify outside the lock */
4054 m = NULL;
4055 error = 0;
4056 }
4057 mbq_unlock(q);
4058
4059 done:
4060 if (m)
4061 m_freem(m);
4062 /* unconditionally wake up listeners */
4063 kring->nm_notify(kring, 0);
4064 /* this is normally netmap_notify(), but for nics
4065 * connected to a bridge it is netmap_bwrap_intr_notify(),
4066 * that possibly forwards the frames through the switch
4067 */
4068
4069 return (error);
4070 }
4071
4072
4073 /*
4074 * Reset function to be called by the driver routines when reinitializing
4075 * a hardware ring. The driver is in charge of locking to protect the kring
4076 * while this operation is being performed. This is normally achieved by
4077 * calling netmap_disable_all_rings() before triggering a reset.
4078 * If the kring is not in netmap mode, return NULL to inform the caller
4079 * that this is the case.
4080 * If the kring is in netmap mode, set hwofs so that the netmap indices
4081 * seen by userspace (head/cut/tail) do not change, although the internal
4082 * NIC indices have been reset to 0.
4083 * In any case, adjust kring->nr_mode.
4084 */
4085 struct netmap_slot *
netmap_reset(struct netmap_adapter * na,enum txrx tx,u_int n,u_int new_cur)4086 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
4087 u_int new_cur)
4088 {
4089 struct netmap_kring *kring;
4090 u_int new_hwtail, new_hwofs;
4091
4092 if (!nm_native_on(na)) {
4093 nm_prdis("interface not in native netmap mode");
4094 return NULL; /* nothing to reinitialize */
4095 }
4096
4097 if (tx == NR_TX) {
4098 if (n >= na->num_tx_rings)
4099 return NULL;
4100 kring = na->tx_rings[n];
4101 /*
4102 * Set hwofs to rhead, so that slots[rhead] is mapped to
4103 * the NIC internal slot 0, and thus the netmap buffer
4104 * at rhead is the next to be transmitted. Transmissions
4105 * that were pending before the reset are considered as
4106 * sent, so that we can have hwcur = rhead. All the slots
4107 * are now owned by the user, so we can also reinit hwtail.
4108 */
4109 new_hwofs = kring->rhead;
4110 new_hwtail = nm_prev(kring->rhead, kring->nkr_num_slots - 1);
4111 } else {
4112 if (n >= na->num_rx_rings)
4113 return NULL;
4114 kring = na->rx_rings[n];
4115 /*
4116 * Set hwofs to hwtail, so that slots[hwtail] is mapped to
4117 * the NIC internal slot 0, and thus the netmap buffer
4118 * at hwtail is the next to be given to the NIC.
4119 * Unread slots (the ones in [rhead,hwtail[) are owned by
4120 * the user, and thus the caller cannot give them
4121 * to the NIC right now.
4122 */
4123 new_hwofs = kring->nr_hwtail;
4124 new_hwtail = kring->nr_hwtail;
4125 }
4126 if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4127 kring->nr_mode = NKR_NETMAP_OFF;
4128 return NULL;
4129 }
4130 if (netmap_verbose) {
4131 nm_prinf("%s, hc %u->%u, ht %u->%u, ho %u->%u", kring->name,
4132 kring->nr_hwcur, kring->rhead,
4133 kring->nr_hwtail, new_hwtail,
4134 kring->nkr_hwofs, new_hwofs);
4135 }
4136 kring->nr_hwcur = kring->rhead;
4137 kring->nr_hwtail = new_hwtail;
4138 kring->nkr_hwofs = new_hwofs;
4139
4140 /*
4141 * Wakeup on the individual and global selwait
4142 * We do the wakeup here, but the ring is not yet reconfigured.
4143 * However, we are under lock so there are no races.
4144 */
4145 kring->nr_mode = NKR_NETMAP_ON;
4146 kring->nm_notify(kring, 0);
4147 return kring->ring->slot;
4148 }
4149
4150
4151 /*
4152 * Dispatch rx/tx interrupts to the netmap rings.
4153 *
4154 * "work_done" is non-null on the RX path, NULL for the TX path.
4155 * We rely on the OS to make sure that there is only one active
4156 * instance per queue, and that there is appropriate locking.
4157 *
4158 * The 'notify' routine depends on what the ring is attached to.
4159 * - for a netmap file descriptor, do a selwakeup on the individual
4160 * waitqueue, plus one on the global one if needed
4161 * (see netmap_notify)
4162 * - for a nic connected to a switch, call the proper forwarding routine
4163 * (see netmap_bwrap_intr_notify)
4164 */
4165 int
netmap_common_irq(struct netmap_adapter * na,u_int q,u_int * work_done)4166 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
4167 {
4168 struct netmap_kring *kring;
4169 enum txrx t = (work_done ? NR_RX : NR_TX);
4170
4171 q &= NETMAP_RING_MASK;
4172
4173 if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) {
4174 nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
4175 }
4176
4177 if (q >= nma_get_nrings(na, t))
4178 return NM_IRQ_PASS; // not a physical queue
4179
4180 kring = NMR(na, t)[q];
4181
4182 if (kring->nr_mode == NKR_NETMAP_OFF) {
4183 return NM_IRQ_PASS;
4184 }
4185
4186 if (t == NR_RX) {
4187 kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ?
4188 *work_done = 1; /* do not fire napi again */
4189 }
4190
4191 return kring->nm_notify(kring, 0);
4192 }
4193
4194
4195 /*
4196 * Default functions to handle rx/tx interrupts from a physical device.
4197 * "work_done" is non-null on the RX path, NULL for the TX path.
4198 *
4199 * If the card is not in netmap mode, simply return NM_IRQ_PASS,
4200 * so that the caller proceeds with regular processing.
4201 * Otherwise call netmap_common_irq().
4202 *
4203 * If the card is connected to a netmap file descriptor,
4204 * do a selwakeup on the individual queue, plus one on the global one
4205 * if needed (multiqueue card _and_ there are multiqueue listeners),
4206 * and return NR_IRQ_COMPLETED.
4207 *
4208 * Finally, if called on rx from an interface connected to a switch,
4209 * calls the proper forwarding routine.
4210 */
4211 int
netmap_rx_irq(struct ifnet * ifp,u_int q,u_int * work_done)4212 netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
4213 {
4214 struct netmap_adapter *na = NA(ifp);
4215
4216 /*
4217 * XXX emulated netmap mode sets NAF_SKIP_INTR so
4218 * we still use the regular driver even though the previous
4219 * check fails. It is unclear whether we should use
4220 * nm_native_on() here.
4221 */
4222 if (!nm_netmap_on(na))
4223 return NM_IRQ_PASS;
4224
4225 if (na->na_flags & NAF_SKIP_INTR) {
4226 nm_prdis("use regular interrupt");
4227 return NM_IRQ_PASS;
4228 }
4229
4230 return netmap_common_irq(na, q, work_done);
4231 }
4232
4233 /* set/clear native flags and if_transmit/netdev_ops */
4234 void
nm_set_native_flags(struct netmap_adapter * na)4235 nm_set_native_flags(struct netmap_adapter *na)
4236 {
4237 struct ifnet *ifp = na->ifp;
4238
4239 /* We do the setup for intercepting packets only if we are the
4240 * first user of this adapter. */
4241 if (na->active_fds > 0) {
4242 return;
4243 }
4244
4245 na->na_flags |= NAF_NETMAP_ON;
4246 nm_os_onenter(ifp);
4247 nm_update_hostrings_mode(na);
4248 }
4249
4250 void
nm_clear_native_flags(struct netmap_adapter * na)4251 nm_clear_native_flags(struct netmap_adapter *na)
4252 {
4253 struct ifnet *ifp = na->ifp;
4254
4255 /* We undo the setup for intercepting packets only if we are the
4256 * last user of this adapter. */
4257 if (na->active_fds > 0) {
4258 return;
4259 }
4260
4261 nm_update_hostrings_mode(na);
4262 nm_os_onexit(ifp);
4263
4264 na->na_flags &= ~NAF_NETMAP_ON;
4265 }
4266
4267 void
netmap_krings_mode_commit(struct netmap_adapter * na,int onoff)4268 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
4269 {
4270 enum txrx t;
4271
4272 for_rx_tx(t) {
4273 int i;
4274
4275 for (i = 0; i < netmap_real_rings(na, t); i++) {
4276 struct netmap_kring *kring = NMR(na, t)[i];
4277
4278 if (onoff && nm_kring_pending_on(kring))
4279 kring->nr_mode = NKR_NETMAP_ON;
4280 else if (!onoff && nm_kring_pending_off(kring))
4281 kring->nr_mode = NKR_NETMAP_OFF;
4282 }
4283 }
4284 }
4285
4286 /*
4287 * Module loader and unloader
4288 *
4289 * netmap_init() creates the /dev/netmap device and initializes
4290 * all global variables. Returns 0 on success, errno on failure
4291 * (but there is no chance)
4292 *
4293 * netmap_fini() destroys everything.
4294 */
4295
4296 static struct cdev *netmap_dev; /* /dev/netmap character device. */
4297 extern struct cdevsw netmap_cdevsw;
4298
4299
4300 void
netmap_fini(void)4301 netmap_fini(void)
4302 {
4303 if (netmap_dev)
4304 destroy_dev(netmap_dev);
4305 /* we assume that there are no longer netmap users */
4306 nm_os_ifnet_fini();
4307 netmap_uninit_bridges();
4308 netmap_mem_fini();
4309 NMG_LOCK_DESTROY();
4310 nm_prinf("netmap: unloaded module.");
4311 }
4312
4313
4314 int
netmap_init(void)4315 netmap_init(void)
4316 {
4317 int error;
4318
4319 NMG_LOCK_INIT();
4320
4321 error = netmap_mem_init();
4322 if (error != 0)
4323 goto fail;
4324 /*
4325 * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls
4326 * when the module is compiled in.
4327 * XXX could use make_dev_credv() to get error number
4328 */
4329 netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD,
4330 &netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600,
4331 "netmap");
4332 if (!netmap_dev)
4333 goto fail;
4334
4335 error = netmap_init_bridges();
4336 if (error)
4337 goto fail;
4338
4339 #ifdef __FreeBSD__
4340 nm_os_vi_init_index();
4341 #endif
4342
4343 error = nm_os_ifnet_init();
4344 if (error)
4345 goto fail;
4346
4347 #if !defined(__FreeBSD__) || defined(KLD_MODULE)
4348 nm_prinf("netmap: loaded module");
4349 #endif
4350 return (0);
4351 fail:
4352 netmap_fini();
4353 return (EINVAL); /* may be incorrect */
4354 }
4355