1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``S IS''AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * $FreeBSD$
31 *
32 * Definitions of constants and the structures used by the netmap
33 * framework, for the part visible to both kernel and userspace.
34 * Detailed info on netmap is available with "man netmap" or at
35 *
36 * http://info.iet.unipi.it/~luigi/netmap/
37 *
38 * This API is also used to communicate with the VALE software switch
39 */
40
41 #ifndef _NET_NETMAP_H_
42 #define _NET_NETMAP_H_
43
44 #define NETMAP_API 14 /* current API version */
45
46 #define NETMAP_MIN_API 14 /* min and max versions accepted */
47 #define NETMAP_MAX_API 15
48 /*
49 * Some fields should be cache-aligned to reduce contention.
50 * The alignment is architecture and OS dependent, but rather than
51 * digging into OS headers to find the exact value we use an estimate
52 * that should cover most architectures.
53 */
54 #define NM_CACHE_ALIGN 128
55
56 /*
57 * --- Netmap data structures ---
58 *
59 * The userspace data structures used by netmap are shown below.
60 * They are allocated by the kernel and mmap()ed by userspace threads.
61 * Pointers are implemented as memory offsets or indexes,
62 * so that they can be easily dereferenced in kernel and userspace.
63
64 KERNEL (opaque, obviously)
65
66 ====================================================================
67 |
68 USERSPACE | struct netmap_ring
69 +---->+---------------+
70 / | head,cur,tail |
71 struct netmap_if (nifp, 1 per fd) / | buf_ofs |
72 +----------------+ / | other fields |
73 | ni_tx_rings | / +===============+
74 | ni_rx_rings | / | buf_idx, len | slot[0]
75 | | / | flags, ptr |
76 | | / +---------------+
77 +================+ / | buf_idx, len | slot[1]
78 | txring_ofs[0] | (rel.to nifp)--' | flags, ptr |
79 | txring_ofs[1] | +---------------+
80 (tx+htx entries) (num_slots entries)
81 | txring_ofs[t] | | buf_idx, len | slot[n-1]
82 +----------------+ | flags, ptr |
83 | rxring_ofs[0] | +---------------+
84 | rxring_ofs[1] |
85 (rx+hrx entries)
86 | rxring_ofs[r] |
87 +----------------+
88
89 * For each "interface" (NIC, host stack, PIPE, VALE switch port) bound to
90 * a file descriptor, the mmap()ed region contains a (logically readonly)
91 * struct netmap_if pointing to struct netmap_ring's.
92 *
93 * There is one netmap_ring per physical NIC ring, plus at least one tx/rx ring
94 * pair attached to the host stack (these pairs are unused for non-NIC ports).
95 *
96 * All physical/host stack ports share the same memory region,
97 * so that zero-copy can be implemented between them.
98 * VALE switch ports instead have separate memory regions.
99 *
100 * The netmap_ring is the userspace-visible replica of the NIC ring.
101 * Each slot has the index of a buffer (MTU-sized and residing in the
102 * mmapped region), its length and some flags. An extra 64-bit pointer
103 * is provided for user-supplied buffers in the tx path.
104 *
105 * In user space, the buffer address is computed as
106 * (char *)ring + buf_ofs + index * NETMAP_BUF_SIZE
107 *
108 * Added in NETMAP_API 11:
109 *
110 * + NIOCREGIF can request the allocation of extra spare buffers from
111 * the same memory pool. The desired number of buffers must be in
112 * nr_arg3. The ioctl may return fewer buffers, depending on memory
113 * availability. nr_arg3 will return the actual value, and, once
114 * mapped, nifp->ni_bufs_head will be the index of the first buffer.
115 *
116 * The buffers are linked to each other using the first uint32_t
117 * as the index. On close, ni_bufs_head must point to the list of
118 * buffers to be released.
119 *
120 * + NIOCREGIF can attach to PIPE rings sharing the same memory
121 * space with a parent device. The ifname indicates the parent device,
122 * which must already exist. Flags in nr_flags indicate if we want to
123 * bind the master or slave side, the index (from nr_ringid)
124 * is just a cookie and does not need to be sequential.
125 *
126 * + NIOCREGIF can also attach to 'monitor' rings that replicate
127 * the content of specific rings, also from the same memory space.
128 *
129 * Extra flags in nr_flags support the above functions.
130 * Application libraries may use the following naming scheme:
131 * netmap:foo all NIC rings pairs
132 * netmap:foo^ only host rings pairs
133 * netmap:foo^k the k-th host rings pair
134 * netmap:foo+ all NIC rings + host rings pairs
135 * netmap:foo-k the k-th NIC rings pair
136 * netmap:foo{k PIPE rings pair k, master side
137 * netmap:foo}k PIPE rings pair k, slave side
138 *
139 * Some notes about host rings:
140 *
141 * + The RX host rings are used to store those packets that the host network
142 * stack is trying to transmit through a NIC queue, but only if that queue
143 * is currently in netmap mode. Netmap will not intercept host stack mbufs
144 * designated to NIC queues that are not in netmap mode. As a consequence,
145 * registering a netmap port with netmap:foo^ is not enough to intercept
146 * mbufs in the RX host rings; the netmap port should be registered with
147 * netmap:foo*, or another registration should be done to open at least a
148 * NIC TX queue in netmap mode.
149 *
150 * + Netmap is not currently able to deal with intercepted trasmit mbufs which
151 * require offloadings like TSO, UFO, checksumming offloadings, etc. It is
152 * responsibility of the user to disable those offloadings (e.g. using
153 * ifconfig on FreeBSD or ethtool -K on Linux) for an interface that is being
154 * used in netmap mode. If the offloadings are not disabled, GSO and/or
155 * unchecksummed packets may be dropped immediately or end up in the host RX
156 * rings, and will be dropped as soon as the packet reaches another netmap
157 * adapter.
158 */
159
160 /*
161 * struct netmap_slot is a buffer descriptor
162 */
163 struct netmap_slot {
164 uint32_t buf_idx; /* buffer index */
165 uint16_t len; /* length for this slot */
166 uint16_t flags; /* buf changed, etc. */
167 uint64_t ptr; /* pointer for indirect buffers */
168 };
169
170 /*
171 * The following flags control how the slot is used
172 */
173
174 #define NS_BUF_CHANGED 0x0001 /* buf_idx changed */
175 /*
176 * must be set whenever buf_idx is changed (as it might be
177 * necessary to recompute the physical address and mapping)
178 *
179 * It is also set by the kernel whenever the buf_idx is
180 * changed internally (e.g., by pipes). Applications may
181 * use this information to know when they can reuse the
182 * contents of previously prepared buffers.
183 */
184
185 #define NS_REPORT 0x0002 /* ask the hardware to report results */
186 /*
187 * Request notification when slot is used by the hardware.
188 * Normally transmit completions are handled lazily and
189 * may be unreported. This flag lets us know when a slot
190 * has been sent (e.g. to terminate the sender).
191 */
192
193 #define NS_FORWARD 0x0004 /* pass packet 'forward' */
194 /*
195 * (Only for physical ports, rx rings with NR_FORWARD set).
196 * Slot released to the kernel (i.e. before ring->head) with
197 * this flag set are passed to the peer ring (host/NIC),
198 * thus restoring the host-NIC connection for these slots.
199 * This supports efficient traffic monitoring or firewalling.
200 */
201
202 #define NS_NO_LEARN 0x0008 /* disable bridge learning */
203 /*
204 * On a VALE switch, do not 'learn' the source port for
205 * this buffer.
206 */
207
208 #define NS_INDIRECT 0x0010 /* userspace buffer */
209 /*
210 * (VALE tx rings only) data is in a userspace buffer,
211 * whose address is in the 'ptr' field in the slot.
212 */
213
214 #define NS_MOREFRAG 0x0020 /* packet has more fragments */
215 /*
216 * (VALE ports, ptnetmap ports and some NIC ports, e.g.
217 * ixgbe and i40e on Linux)
218 * Set on all but the last slot of a multi-segment packet.
219 * The 'len' field refers to the individual fragment.
220 */
221
222 #define NS_PORT_SHIFT 8
223 #define NS_PORT_MASK (0xff << NS_PORT_SHIFT)
224 /*
225 * The high 8 bits of the flag, if not zero, indicate the
226 * destination port for the VALE switch, overriding
227 * the lookup table.
228 */
229
230 #define NS_RFRAGS(_slot) ( ((_slot)->flags >> 8) & 0xff)
231 /*
232 * (VALE rx rings only) the high 8 bits
233 * are the number of fragments.
234 */
235
236 #define NETMAP_MAX_FRAGS 64 /* max number of fragments */
237
238 /*
239 * struct netmap_ring
240 *
241 * Netmap representation of a TX or RX ring (also known as "queue").
242 * This is a queue implemented as a fixed-size circular array.
243 * At the software level the important fields are: head, cur, tail.
244 *
245 * In TX rings:
246 *
247 * head first slot available for transmission.
248 * cur wakeup point. select() and poll() will unblock
249 * when 'tail' moves past 'cur'
250 * tail (readonly) first slot reserved to the kernel
251 *
252 * [head .. tail-1] can be used for new packets to send;
253 * 'head' and 'cur' must be incremented as slots are filled
254 * with new packets to be sent;
255 * 'cur' can be moved further ahead if we need more space
256 * for new transmissions. XXX todo (2014-03-12)
257 *
258 * In RX rings:
259 *
260 * head first valid received packet
261 * cur wakeup point. select() and poll() will unblock
262 * when 'tail' moves past 'cur'
263 * tail (readonly) first slot reserved to the kernel
264 *
265 * [head .. tail-1] contain received packets;
266 * 'head' and 'cur' must be incremented as slots are consumed
267 * and can be returned to the kernel;
268 * 'cur' can be moved further ahead if we want to wait for
269 * new packets without returning the previous ones.
270 *
271 * DATA OWNERSHIP/LOCKING:
272 * The netmap_ring, and all slots and buffers in the range
273 * [head .. tail-1] are owned by the user program;
274 * the kernel only accesses them during a netmap system call
275 * and in the user thread context.
276 *
277 * Other slots and buffers are reserved for use by the kernel
278 */
279 struct netmap_ring {
280 /*
281 * buf_ofs is meant to be used through macros.
282 * It contains the offset of the buffer region from this
283 * descriptor.
284 */
285 const int64_t buf_ofs;
286 const uint32_t num_slots; /* number of slots in the ring. */
287 const uint32_t nr_buf_size;
288 const uint16_t ringid;
289 const uint16_t dir; /* 0: tx, 1: rx */
290
291 uint32_t head; /* (u) first user slot */
292 uint32_t cur; /* (u) wakeup point */
293 uint32_t tail; /* (k) first kernel slot */
294
295 uint32_t flags;
296
297 struct timeval ts; /* (k) time of last *sync() */
298
299 /* opaque room for a mutex or similar object */
300 #if !defined(_WIN32) || defined(__CYGWIN__)
301 uint8_t __attribute__((__aligned__(NM_CACHE_ALIGN))) sem[128];
302 #else
303 uint8_t __declspec(align(NM_CACHE_ALIGN)) sem[128];
304 #endif
305
306 /* the slots follow. This struct has variable size */
307 struct netmap_slot slot[0]; /* array of slots. */
308 };
309
310 /*
311 * RING FLAGS
312 */
313 #define NR_TIMESTAMP 0x0002 /* set timestamp on *sync() */
314 /*
315 * updates the 'ts' field on each netmap syscall. This saves
316 * saves a separate gettimeofday(), and is not much worse than
317 * software timestamps generated in the interrupt handler.
318 */
319
320 #define NR_FORWARD 0x0004 /* enable NS_FORWARD for ring */
321 /*
322 * Enables the NS_FORWARD slot flag for the ring.
323 */
324
325 /*
326 * Helper functions for kernel and userspace
327 */
328
329 /*
330 * Check if space is available in the ring. We use ring->head, which
331 * points to the next netmap slot to be published to netmap. It is
332 * possible that the applications moves ring->cur ahead of ring->tail
333 * (e.g., by setting ring->cur <== ring->tail), if it wants more slots
334 * than the ones currently available, and it wants to be notified when
335 * more arrive. See netmap(4) for more details and examples.
336 */
337 static inline int
nm_ring_empty(struct netmap_ring * ring)338 nm_ring_empty(struct netmap_ring *ring)
339 {
340 return (ring->head == ring->tail);
341 }
342
343 /*
344 * Netmap representation of an interface and its queue(s).
345 * This is initialized by the kernel when binding a file
346 * descriptor to a port, and should be considered as readonly
347 * by user programs. The kernel never uses it.
348 *
349 * There is one netmap_if for each file descriptor on which we want
350 * to select/poll.
351 * select/poll operates on one or all pairs depending on the value of
352 * nmr_queueid passed on the ioctl.
353 */
354 struct netmap_if {
355 char ni_name[IFNAMSIZ]; /* name of the interface. */
356 const uint32_t ni_version; /* API version, currently unused */
357 const uint32_t ni_flags; /* properties */
358 #define NI_PRIV_MEM 0x1 /* private memory region */
359
360 /*
361 * The number of packet rings available in netmap mode.
362 * Physical NICs can have different numbers of tx and rx rings.
363 * Physical NICs also have at least a 'host' rings pair.
364 * Additionally, clients can request additional ring pairs to
365 * be used for internal communication.
366 */
367 const uint32_t ni_tx_rings; /* number of HW tx rings */
368 const uint32_t ni_rx_rings; /* number of HW rx rings */
369
370 uint32_t ni_bufs_head; /* head index for extra bufs */
371 const uint32_t ni_host_tx_rings; /* number of SW tx rings */
372 const uint32_t ni_host_rx_rings; /* number of SW rx rings */
373 uint32_t ni_spare1[3];
374 /*
375 * The following array contains the offset of each netmap ring
376 * from this structure, in the following order:
377 * - NIC tx rings (ni_tx_rings);
378 * - host tx rings (ni_host_tx_rings);
379 * - NIC rx rings (ni_rx_rings);
380 * - host rx ring (ni_host_rx_rings);
381 *
382 * The area is filled up by the kernel on NETMAP_REQ_REGISTER,
383 * and then only read by userspace code.
384 */
385 const ssize_t ring_ofs[0];
386 };
387
388 /* Legacy interface to interact with a netmap control device.
389 * Included for backward compatibility. The user should not include this
390 * file directly. */
391 #include "netmap_legacy.h"
392
393 /*
394 * New API to control netmap control devices. New applications should only use
395 * nmreq_xyz structs with the NIOCCTRL ioctl() command.
396 *
397 * NIOCCTRL takes a nmreq_header struct, which contains the required
398 * API version, the name of a netmap port, a command type, and pointers
399 * to request body and options.
400 *
401 * nr_name (in)
402 * The name of the port (em0, valeXXX:YYY, eth0{pn1 etc.)
403 *
404 * nr_version (in/out)
405 * Must match NETMAP_API as used in the kernel, error otherwise.
406 * Always returns the desired value on output.
407 *
408 * nr_reqtype (in)
409 * One of the NETMAP_REQ_* command types below
410 *
411 * nr_body (in)
412 * Pointer to a command-specific struct, described by one
413 * of the struct nmreq_xyz below.
414 *
415 * nr_options (in)
416 * Command specific options, if any.
417 *
418 * A NETMAP_REQ_REGISTER command activates netmap mode on the netmap
419 * port (e.g. physical interface) specified by nmreq_header.nr_name.
420 * The request body (struct nmreq_register) has several arguments to
421 * specify how the port is to be registered.
422 *
423 * nr_tx_slots, nr_tx_slots, nr_tx_rings, nr_rx_rings,
424 * nr_host_tx_rings, nr_host_rx_rings (in/out)
425 * On input, non-zero values may be used to reconfigure the port
426 * according to the requested values, but this is not guaranteed.
427 * On output the actual values in use are reported.
428 *
429 * nr_mode (in)
430 * Indicate what set of rings must be bound to the netmap
431 * device (e.g. all NIC rings, host rings only, NIC and
432 * host rings, ...). Values are in NR_REG_*.
433 *
434 * nr_ringid (in)
435 * If nr_mode == NR_REG_ONE_NIC (only a single couple of TX/RX
436 * rings), indicate which NIC TX and/or RX ring is to be bound
437 * (0..nr_*x_rings-1).
438 *
439 * nr_flags (in)
440 * Indicate special options for how to open the port.
441 *
442 * NR_NO_TX_POLL can be OR-ed to make select()/poll() push
443 * packets on tx rings only if POLLOUT is set.
444 * The default is to push any pending packet.
445 *
446 * NR_DO_RX_POLL can be OR-ed to make select()/poll() release
447 * packets on rx rings also when POLLIN is NOT set.
448 * The default is to touch the rx ring only with POLLIN.
449 * Note that this is the opposite of TX because it
450 * reflects the common usage.
451 *
452 * Other options are NR_MONITOR_TX, NR_MONITOR_RX, NR_ZCOPY_MON,
453 * NR_EXCLUSIVE, NR_RX_RINGS_ONLY, NR_TX_RINGS_ONLY and
454 * NR_ACCEPT_VNET_HDR.
455 *
456 * nr_mem_id (in/out)
457 * The identity of the memory region used.
458 * On input, 0 means the system decides autonomously,
459 * other values may try to select a specific region.
460 * On return the actual value is reported.
461 * Region '1' is the global allocator, normally shared
462 * by all interfaces. Other values are private regions.
463 * If two ports the same region zero-copy is possible.
464 *
465 * nr_extra_bufs (in/out)
466 * Number of extra buffers to be allocated.
467 *
468 * The other NETMAP_REQ_* commands are described below.
469 *
470 */
471
472 /* maximum size of a request, including all options */
473 #define NETMAP_REQ_MAXSIZE 4096
474
475 /* Header common to all request options. */
476 struct nmreq_option {
477 /* Pointer ot the next option. */
478 uint64_t nro_next;
479 /* Option type. */
480 uint32_t nro_reqtype;
481 /* (out) status of the option:
482 * 0: recognized and processed
483 * !=0: errno value
484 */
485 uint32_t nro_status;
486 /* Option size, used only for options that can have variable size
487 * (e.g. because they contain arrays). For fixed-size options this
488 * field should be set to zero. */
489 uint64_t nro_size;
490 };
491
492 /* Header common to all requests. Do not reorder these fields, as we need
493 * the second one (nr_reqtype) to know how much to copy from/to userspace. */
494 struct nmreq_header {
495 uint16_t nr_version; /* API version */
496 uint16_t nr_reqtype; /* nmreq type (NETMAP_REQ_*) */
497 uint32_t nr_reserved; /* must be zero */
498 #define NETMAP_REQ_IFNAMSIZ 64
499 char nr_name[NETMAP_REQ_IFNAMSIZ]; /* port name */
500 uint64_t nr_options; /* command-specific options */
501 uint64_t nr_body; /* ptr to nmreq_xyz struct */
502 };
503
504 enum {
505 /* Register a netmap port with the device. */
506 NETMAP_REQ_REGISTER = 1,
507 /* Get information from a netmap port. */
508 NETMAP_REQ_PORT_INFO_GET,
509 /* Attach a netmap port to a VALE switch. */
510 NETMAP_REQ_VALE_ATTACH,
511 /* Detach a netmap port from a VALE switch. */
512 NETMAP_REQ_VALE_DETACH,
513 /* List the ports attached to a VALE switch. */
514 NETMAP_REQ_VALE_LIST,
515 /* Set the port header length (was virtio-net header length). */
516 NETMAP_REQ_PORT_HDR_SET,
517 /* Get the port header length (was virtio-net header length). */
518 NETMAP_REQ_PORT_HDR_GET,
519 /* Create a new persistent VALE port. */
520 NETMAP_REQ_VALE_NEWIF,
521 /* Delete a persistent VALE port. */
522 NETMAP_REQ_VALE_DELIF,
523 /* Enable polling kernel thread(s) on an attached VALE port. */
524 NETMAP_REQ_VALE_POLLING_ENABLE,
525 /* Disable polling kernel thread(s) on an attached VALE port. */
526 NETMAP_REQ_VALE_POLLING_DISABLE,
527 /* Get info about the pools of a memory allocator. */
528 NETMAP_REQ_POOLS_INFO_GET,
529 /* Start an in-kernel loop that syncs the rings periodically or
530 * on notifications. The loop runs in the context of the ioctl
531 * syscall, and only stops on NETMAP_REQ_SYNC_KLOOP_STOP. */
532 NETMAP_REQ_SYNC_KLOOP_START,
533 /* Stops the thread executing the in-kernel loop. The thread
534 * returns from the ioctl syscall. */
535 NETMAP_REQ_SYNC_KLOOP_STOP,
536 /* Enable CSB mode on a registered netmap control device. */
537 NETMAP_REQ_CSB_ENABLE,
538 };
539
540 enum {
541 /* On NETMAP_REQ_REGISTER, ask netmap to use memory allocated
542 * from user-space allocated memory pools (e.g. hugepages).
543 */
544 NETMAP_REQ_OPT_EXTMEM = 1,
545
546 /* ON NETMAP_REQ_SYNC_KLOOP_START, ask netmap to use eventfd-based
547 * notifications to synchronize the kernel loop with the application.
548 */
549 NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS,
550
551 /* On NETMAP_REQ_REGISTER, ask netmap to work in CSB mode, where
552 * head, cur and tail pointers are not exchanged through the
553 * struct netmap_ring header, but rather using an user-provided
554 * memory area (see struct nm_csb_atok and struct nm_csb_ktoa).
555 */
556 NETMAP_REQ_OPT_CSB,
557
558 /* An extension to NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS, which specifies
559 * if the TX and/or RX rings are synced in the context of the VM exit.
560 * This requires the 'ioeventfd' fields to be valid (cannot be < 0).
561 */
562 NETMAP_REQ_OPT_SYNC_KLOOP_MODE,
563
564 /* This is a marker to count the number of available options.
565 * New options must be added above it. */
566 NETMAP_REQ_OPT_MAX,
567 };
568
569 /*
570 * nr_reqtype: NETMAP_REQ_REGISTER
571 * Bind (register) a netmap port to this control device.
572 */
573 struct nmreq_register {
574 uint64_t nr_offset; /* nifp offset in the shared region */
575 uint64_t nr_memsize; /* size of the shared region */
576 uint32_t nr_tx_slots; /* slots in tx rings */
577 uint32_t nr_rx_slots; /* slots in rx rings */
578 uint16_t nr_tx_rings; /* number of tx rings */
579 uint16_t nr_rx_rings; /* number of rx rings */
580 uint16_t nr_host_tx_rings; /* number of host tx rings */
581 uint16_t nr_host_rx_rings; /* number of host rx rings */
582
583 uint16_t nr_mem_id; /* id of the memory allocator */
584 uint16_t nr_ringid; /* ring(s) we care about */
585 uint32_t nr_mode; /* specify NR_REG_* modes */
586 uint32_t nr_extra_bufs; /* number of requested extra buffers */
587
588 uint64_t nr_flags; /* additional flags (see below) */
589 /* monitors use nr_ringid and nr_mode to select the rings to monitor */
590 #define NR_MONITOR_TX 0x100
591 #define NR_MONITOR_RX 0x200
592 #define NR_ZCOPY_MON 0x400
593 /* request exclusive access to the selected rings */
594 #define NR_EXCLUSIVE 0x800
595 /* 0x1000 unused */
596 #define NR_RX_RINGS_ONLY 0x2000
597 #define NR_TX_RINGS_ONLY 0x4000
598 /* Applications set this flag if they are able to deal with virtio-net headers,
599 * that is send/receive frames that start with a virtio-net header.
600 * If not set, NETMAP_REQ_REGISTER will fail with netmap ports that require
601 * applications to use those headers. If the flag is set, the application can
602 * use the NETMAP_VNET_HDR_GET command to figure out the header length. */
603 #define NR_ACCEPT_VNET_HDR 0x8000
604 /* The following two have the same meaning of NETMAP_NO_TX_POLL and
605 * NETMAP_DO_RX_POLL. */
606 #define NR_DO_RX_POLL 0x10000
607 #define NR_NO_TX_POLL 0x20000
608 };
609
610 /* Valid values for nmreq_register.nr_mode (see above). */
611 enum { NR_REG_DEFAULT = 0, /* backward compat, should not be used. */
612 NR_REG_ALL_NIC = 1,
613 NR_REG_SW = 2,
614 NR_REG_NIC_SW = 3,
615 NR_REG_ONE_NIC = 4,
616 NR_REG_PIPE_MASTER = 5, /* deprecated, use "x{y" port name syntax */
617 NR_REG_PIPE_SLAVE = 6, /* deprecated, use "x}y" port name syntax */
618 NR_REG_NULL = 7,
619 NR_REG_ONE_SW = 8,
620 };
621
622 /* A single ioctl number is shared by all the new API command.
623 * Demultiplexing is done using the hdr.nr_reqtype field.
624 * FreeBSD uses the size value embedded in the _IOWR to determine
625 * how much to copy in/out, so we define the ioctl() command
626 * specifying only nmreq_header, and copyin/copyout the rest. */
627 #define NIOCCTRL _IOWR('i', 151, struct nmreq_header)
628
629 /* The ioctl commands to sync TX/RX netmap rings.
630 * NIOCTXSYNC, NIOCRXSYNC synchronize tx or rx queues,
631 * whose identity is set in NETMAP_REQ_REGISTER through nr_ringid.
632 * These are non blocking and take no argument. */
633 #define NIOCTXSYNC _IO('i', 148) /* sync tx queues */
634 #define NIOCRXSYNC _IO('i', 149) /* sync rx queues */
635
636 /*
637 * nr_reqtype: NETMAP_REQ_PORT_INFO_GET
638 * Get information about a netmap port, including number of rings.
639 * slots per ring, id of the memory allocator, etc. The netmap
640 * control device used for this operation does not need to be bound
641 * to a netmap port.
642 */
643 struct nmreq_port_info_get {
644 uint64_t nr_memsize; /* size of the shared region */
645 uint32_t nr_tx_slots; /* slots in tx rings */
646 uint32_t nr_rx_slots; /* slots in rx rings */
647 uint16_t nr_tx_rings; /* number of tx rings */
648 uint16_t nr_rx_rings; /* number of rx rings */
649 uint16_t nr_host_tx_rings; /* number of host tx rings */
650 uint16_t nr_host_rx_rings; /* number of host rx rings */
651 uint16_t nr_mem_id; /* memory allocator id (in/out) */
652 uint16_t pad[3];
653 };
654
655 #define NM_BDG_NAME "vale" /* prefix for bridge port name */
656
657 /*
658 * nr_reqtype: NETMAP_REQ_VALE_ATTACH
659 * Attach a netmap port to a VALE switch. Both the name of the netmap
660 * port and the VALE switch are specified through the nr_name argument.
661 * The attach operation could need to register a port, so at least
662 * the same arguments are available.
663 * port_index will contain the index where the port has been attached.
664 */
665 struct nmreq_vale_attach {
666 struct nmreq_register reg;
667 uint32_t port_index;
668 uint32_t pad1;
669 };
670
671 /*
672 * nr_reqtype: NETMAP_REQ_VALE_DETACH
673 * Detach a netmap port from a VALE switch. Both the name of the netmap
674 * port and the VALE switch are specified through the nr_name argument.
675 * port_index will contain the index where the port was attached.
676 */
677 struct nmreq_vale_detach {
678 uint32_t port_index;
679 uint32_t pad1;
680 };
681
682 /*
683 * nr_reqtype: NETMAP_REQ_VALE_LIST
684 * List the ports of a VALE switch.
685 */
686 struct nmreq_vale_list {
687 /* Name of the VALE port (valeXXX:YYY) or empty. */
688 uint16_t nr_bridge_idx;
689 uint16_t pad1;
690 uint32_t nr_port_idx;
691 };
692
693 /*
694 * nr_reqtype: NETMAP_REQ_PORT_HDR_SET or NETMAP_REQ_PORT_HDR_GET
695 * Set or get the port header length of the port identified by hdr.nr_name.
696 * The control device does not need to be bound to a netmap port.
697 */
698 struct nmreq_port_hdr {
699 uint32_t nr_hdr_len;
700 uint32_t pad1;
701 };
702
703 /*
704 * nr_reqtype: NETMAP_REQ_VALE_NEWIF
705 * Create a new persistent VALE port.
706 */
707 struct nmreq_vale_newif {
708 uint32_t nr_tx_slots; /* slots in tx rings */
709 uint32_t nr_rx_slots; /* slots in rx rings */
710 uint16_t nr_tx_rings; /* number of tx rings */
711 uint16_t nr_rx_rings; /* number of rx rings */
712 uint16_t nr_mem_id; /* id of the memory allocator */
713 uint16_t pad1;
714 };
715
716 /*
717 * nr_reqtype: NETMAP_REQ_VALE_POLLING_ENABLE or NETMAP_REQ_VALE_POLLING_DISABLE
718 * Enable or disable polling kthreads on a VALE port.
719 */
720 struct nmreq_vale_polling {
721 uint32_t nr_mode;
722 #define NETMAP_POLLING_MODE_SINGLE_CPU 1
723 #define NETMAP_POLLING_MODE_MULTI_CPU 2
724 uint32_t nr_first_cpu_id;
725 uint32_t nr_num_polling_cpus;
726 uint32_t pad1;
727 };
728
729 /*
730 * nr_reqtype: NETMAP_REQ_POOLS_INFO_GET
731 * Get info about the pools of the memory allocator of the netmap
732 * port specified by hdr.nr_name and nr_mem_id. The netmap control
733 * device used for this operation does not need to be bound to a netmap
734 * port.
735 */
736 struct nmreq_pools_info {
737 uint64_t nr_memsize;
738 uint16_t nr_mem_id; /* in/out argument */
739 uint16_t pad1[3];
740 uint64_t nr_if_pool_offset;
741 uint32_t nr_if_pool_objtotal;
742 uint32_t nr_if_pool_objsize;
743 uint64_t nr_ring_pool_offset;
744 uint32_t nr_ring_pool_objtotal;
745 uint32_t nr_ring_pool_objsize;
746 uint64_t nr_buf_pool_offset;
747 uint32_t nr_buf_pool_objtotal;
748 uint32_t nr_buf_pool_objsize;
749 };
750
751 /*
752 * nr_reqtype: NETMAP_REQ_SYNC_KLOOP_START
753 * Start an in-kernel loop that syncs the rings periodically or on
754 * notifications. The loop runs in the context of the ioctl syscall,
755 * and only stops on NETMAP_REQ_SYNC_KLOOP_STOP.
756 * The registered netmap port must be open in CSB mode.
757 */
758 struct nmreq_sync_kloop_start {
759 /* Sleeping is the default synchronization method for the kloop.
760 * The 'sleep_us' field specifies how many microsconds to sleep for
761 * when there is no work to do, before doing another kloop iteration.
762 */
763 uint32_t sleep_us;
764 uint32_t pad1;
765 };
766
767 /* A CSB entry for the application --> kernel direction. */
768 struct nm_csb_atok {
769 uint32_t head; /* AW+ KR+ the head of the appl netmap_ring */
770 uint32_t cur; /* AW+ KR+ the cur of the appl netmap_ring */
771 uint32_t appl_need_kick; /* AW+ KR+ kern --> appl notification enable */
772 uint32_t sync_flags; /* AW+ KR+ the flags of the appl [tx|rx]sync() */
773 uint32_t pad[12]; /* pad to a 64 bytes cacheline */
774 };
775
776 /* A CSB entry for the application <-- kernel direction. */
777 struct nm_csb_ktoa {
778 uint32_t hwcur; /* AR+ KW+ the hwcur of the kern netmap_kring */
779 uint32_t hwtail; /* AR+ KW+ the hwtail of the kern netmap_kring */
780 uint32_t kern_need_kick; /* AR+ KW+ appl-->kern notification enable */
781 uint32_t pad[13];
782 };
783
784 #ifdef __linux__
785
786 #ifdef __KERNEL__
787 #define nm_stst_barrier smp_wmb
788 #define nm_ldld_barrier smp_rmb
789 #define nm_stld_barrier smp_mb
790 #else /* !__KERNEL__ */
nm_stst_barrier(void)791 static inline void nm_stst_barrier(void)
792 {
793 /* A memory barrier with release semantic has the combined
794 * effect of a store-store barrier and a load-store barrier,
795 * which is fine for us. */
796 __atomic_thread_fence(__ATOMIC_RELEASE);
797 }
nm_ldld_barrier(void)798 static inline void nm_ldld_barrier(void)
799 {
800 /* A memory barrier with acquire semantic has the combined
801 * effect of a load-load barrier and a store-load barrier,
802 * which is fine for us. */
803 __atomic_thread_fence(__ATOMIC_ACQUIRE);
804 }
805 #endif /* !__KERNEL__ */
806
807 #elif defined(__FreeBSD__)
808
809 #ifdef _KERNEL
810 #define nm_stst_barrier atomic_thread_fence_rel
811 #define nm_ldld_barrier atomic_thread_fence_acq
812 #define nm_stld_barrier atomic_thread_fence_seq_cst
813 #else /* !_KERNEL */
814 #include <stdatomic.h>
nm_stst_barrier(void)815 static inline void nm_stst_barrier(void)
816 {
817 atomic_thread_fence(memory_order_release);
818 }
nm_ldld_barrier(void)819 static inline void nm_ldld_barrier(void)
820 {
821 atomic_thread_fence(memory_order_acquire);
822 }
823 #endif /* !_KERNEL */
824
825 #else /* !__linux__ && !__FreeBSD__ */
826 #error "OS not supported"
827 #endif /* !__linux__ && !__FreeBSD__ */
828
829 /* Application side of sync-kloop: Write ring pointers (cur, head) to the CSB.
830 * This routine is coupled with sync_kloop_kernel_read(). */
831 static inline void
nm_sync_kloop_appl_write(struct nm_csb_atok * atok,uint32_t cur,uint32_t head)832 nm_sync_kloop_appl_write(struct nm_csb_atok *atok, uint32_t cur,
833 uint32_t head)
834 {
835 /* Issue a first store-store barrier to make sure writes to the
836 * netmap ring do not overcome updates on atok->cur and atok->head. */
837 nm_stst_barrier();
838
839 /*
840 * We need to write cur and head to the CSB but we cannot do it atomically.
841 * There is no way we can prevent the host from reading the updated value
842 * of one of the two and the old value of the other. However, if we make
843 * sure that the host never reads a value of head more recent than the
844 * value of cur we are safe. We can allow the host to read a value of cur
845 * more recent than the value of head, since in the netmap ring cur can be
846 * ahead of head and cur cannot wrap around head because it must be behind
847 * tail. Inverting the order of writes below could instead result into the
848 * host to think head went ahead of cur, which would cause the sync
849 * prologue to fail.
850 *
851 * The following memory barrier scheme is used to make this happen:
852 *
853 * Guest Host
854 *
855 * STORE(cur) LOAD(head)
856 * wmb() <-----------> rmb()
857 * STORE(head) LOAD(cur)
858 *
859 */
860 atok->cur = cur;
861 nm_stst_barrier();
862 atok->head = head;
863 }
864
865 /* Application side of sync-kloop: Read kring pointers (hwcur, hwtail) from
866 * the CSB. This routine is coupled with sync_kloop_kernel_write(). */
867 static inline void
nm_sync_kloop_appl_read(struct nm_csb_ktoa * ktoa,uint32_t * hwtail,uint32_t * hwcur)868 nm_sync_kloop_appl_read(struct nm_csb_ktoa *ktoa, uint32_t *hwtail,
869 uint32_t *hwcur)
870 {
871 /*
872 * We place a memory barrier to make sure that the update of hwtail never
873 * overtakes the update of hwcur.
874 * (see explanation in sync_kloop_kernel_write).
875 */
876 *hwtail = ktoa->hwtail;
877 nm_ldld_barrier();
878 *hwcur = ktoa->hwcur;
879
880 /* Make sure that loads from ktoa->hwtail and ktoa->hwcur are not delayed
881 * after the loads from the netmap ring. */
882 nm_ldld_barrier();
883 }
884
885 /*
886 * data for NETMAP_REQ_OPT_* options
887 */
888
889 struct nmreq_opt_sync_kloop_eventfds {
890 struct nmreq_option nro_opt; /* common header */
891 /* An array of N entries for bidirectional notifications between
892 * the kernel loop and the application. The number of entries and
893 * their order must agree with the CSB arrays passed in the
894 * NETMAP_REQ_OPT_CSB option. Each entry contains a file descriptor
895 * backed by an eventfd.
896 *
897 * If any of the 'ioeventfd' entries is < 0, the event loop uses
898 * the sleeping synchronization strategy (according to sleep_us),
899 * and keeps kern_need_kick always disabled.
900 * Each 'irqfd' can be < 0, and in that case the corresponding queue
901 * is never notified.
902 */
903 struct {
904 /* Notifier for the application --> kernel loop direction. */
905 int32_t ioeventfd;
906 /* Notifier for the kernel loop --> application direction. */
907 int32_t irqfd;
908 } eventfds[0];
909 };
910
911 struct nmreq_opt_sync_kloop_mode {
912 struct nmreq_option nro_opt; /* common header */
913 #define NM_OPT_SYNC_KLOOP_DIRECT_TX (1 << 0)
914 #define NM_OPT_SYNC_KLOOP_DIRECT_RX (1 << 1)
915 uint32_t mode;
916 };
917
918 struct nmreq_opt_extmem {
919 struct nmreq_option nro_opt; /* common header */
920 uint64_t nro_usrptr; /* (in) ptr to usr memory */
921 struct nmreq_pools_info nro_info; /* (in/out) */
922 };
923
924 struct nmreq_opt_csb {
925 struct nmreq_option nro_opt;
926
927 /* Array of CSB entries for application --> kernel communication
928 * (N entries). */
929 uint64_t csb_atok;
930
931 /* Array of CSB entries for kernel --> application communication
932 * (N entries). */
933 uint64_t csb_ktoa;
934 };
935
936 #endif /* _NET_NETMAP_H_ */
937