168b8534bSLuigi Rizzo /* 2*2579e2d7SLuigi Rizzo * Copyright (C) 2011-2013 Matteo Landi, Luigi Rizzo. All rights reserved. 368b8534bSLuigi Rizzo * 468b8534bSLuigi Rizzo * Redistribution and use in source and binary forms, with or without 568b8534bSLuigi Rizzo * modification, are permitted provided that the following conditions 668b8534bSLuigi Rizzo * are met: 768b8534bSLuigi Rizzo * 1. Redistributions of source code must retain the above copyright 868b8534bSLuigi Rizzo * notice, this list of conditions and the following disclaimer. 968b8534bSLuigi Rizzo * 2. Redistributions in binary form must reproduce the above copyright 1068b8534bSLuigi Rizzo * notice, this list of conditions and the following disclaimer in the 1168b8534bSLuigi Rizzo * documentation and/or other materials provided with the distribution. 1268b8534bSLuigi Rizzo * 1368b8534bSLuigi Rizzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1468b8534bSLuigi Rizzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1568b8534bSLuigi Rizzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1668b8534bSLuigi Rizzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1768b8534bSLuigi Rizzo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1868b8534bSLuigi Rizzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 1968b8534bSLuigi Rizzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2068b8534bSLuigi Rizzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2168b8534bSLuigi Rizzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2268b8534bSLuigi Rizzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2368b8534bSLuigi Rizzo * SUCH DAMAGE. 2468b8534bSLuigi Rizzo */ 2568b8534bSLuigi Rizzo 2668b8534bSLuigi Rizzo /* 2768b8534bSLuigi Rizzo * $FreeBSD$ 288241616dSLuigi Rizzo * $Id: netmap_kern.h 11829 2012-09-26 04:06:34Z luigi $ 2968b8534bSLuigi Rizzo * 3068b8534bSLuigi Rizzo * The header contains the definitions of constants and function 3168b8534bSLuigi Rizzo * prototypes used only in kernelspace. 3268b8534bSLuigi Rizzo */ 3368b8534bSLuigi Rizzo 3468b8534bSLuigi Rizzo #ifndef _NET_NETMAP_KERN_H_ 3568b8534bSLuigi Rizzo #define _NET_NETMAP_KERN_H_ 3668b8534bSLuigi Rizzo 371a26580eSLuigi Rizzo #if defined(__FreeBSD__) 38f196ce38SLuigi Rizzo #define likely(x) __builtin_expect(!!(x), 1) 39f196ce38SLuigi Rizzo #define unlikely(x) __builtin_expect(!!(x), 0) 40f196ce38SLuigi Rizzo 411a26580eSLuigi Rizzo #define NM_LOCK_T struct mtx 421a26580eSLuigi Rizzo #define NM_SELINFO_T struct selinfo 431a26580eSLuigi Rizzo #define MBUF_LEN(m) ((m)->m_pkthdr.len) 441a26580eSLuigi Rizzo #define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m) 4564ae02c3SLuigi Rizzo #elif defined (linux) 46*2579e2d7SLuigi Rizzo #define NM_LOCK_T safe_spinlock_t // see bsd_glue.h 471a26580eSLuigi Rizzo #define NM_SELINFO_T wait_queue_head_t 481a26580eSLuigi Rizzo #define MBUF_LEN(m) ((m)->len) 491a26580eSLuigi Rizzo #define NM_SEND_UP(ifp, m) netif_rx(m) 50f196ce38SLuigi Rizzo 51f196ce38SLuigi Rizzo #ifndef DEV_NETMAP 52f196ce38SLuigi Rizzo #define DEV_NETMAP 53f196ce38SLuigi Rizzo #endif 54f196ce38SLuigi Rizzo 55f196ce38SLuigi Rizzo /* 568241616dSLuigi Rizzo * IFCAP_NETMAP goes into net_device's priv_flags (if_capenable). 578241616dSLuigi Rizzo * This was 16 bits up to linux 2.6.36, so we need a 16 bit value on older 58f196ce38SLuigi Rizzo * platforms and tolerate the clash with IFF_DYNAMIC and IFF_BRIDGE_PORT. 598241616dSLuigi Rizzo * For the 32-bit value, 0x100000 has no clashes until at least 3.5.1 60f196ce38SLuigi Rizzo */ 61f196ce38SLuigi Rizzo #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) 62f196ce38SLuigi Rizzo #define IFCAP_NETMAP 0x8000 63f196ce38SLuigi Rizzo #else 64f196ce38SLuigi Rizzo #define IFCAP_NETMAP 0x100000 65f196ce38SLuigi Rizzo #endif 66f196ce38SLuigi Rizzo 67f196ce38SLuigi Rizzo #elif defined (__APPLE__) 688241616dSLuigi Rizzo #warning apple support is incomplete. 69f196ce38SLuigi Rizzo #define likely(x) __builtin_expect(!!(x), 1) 70f196ce38SLuigi Rizzo #define unlikely(x) __builtin_expect(!!(x), 0) 71f196ce38SLuigi Rizzo #define NM_LOCK_T IOLock * 72f196ce38SLuigi Rizzo #define NM_SELINFO_T struct selinfo 73f196ce38SLuigi Rizzo #define MBUF_LEN(m) ((m)->m_pkthdr.len) 74f196ce38SLuigi Rizzo #define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m) 75f196ce38SLuigi Rizzo 761a26580eSLuigi Rizzo #else 771a26580eSLuigi Rizzo #error unsupported platform 781a26580eSLuigi Rizzo #endif 791a26580eSLuigi Rizzo 8068b8534bSLuigi Rizzo #define ND(format, ...) 8168b8534bSLuigi Rizzo #define D(format, ...) \ 8268b8534bSLuigi Rizzo do { \ 8368b8534bSLuigi Rizzo struct timeval __xxts; \ 8468b8534bSLuigi Rizzo microtime(&__xxts); \ 8568b8534bSLuigi Rizzo printf("%03d.%06d %s [%d] " format "\n", \ 8668b8534bSLuigi Rizzo (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \ 8768b8534bSLuigi Rizzo __FUNCTION__, __LINE__, ##__VA_ARGS__); \ 8868b8534bSLuigi Rizzo } while (0) 8968b8534bSLuigi Rizzo 908241616dSLuigi Rizzo /* rate limited, lps indicates how many per second */ 918241616dSLuigi Rizzo #define RD(lps, format, ...) \ 928241616dSLuigi Rizzo do { \ 938241616dSLuigi Rizzo static int t0, __cnt; \ 948241616dSLuigi Rizzo if (t0 != time_second) { \ 958241616dSLuigi Rizzo t0 = time_second; \ 968241616dSLuigi Rizzo __cnt = 0; \ 978241616dSLuigi Rizzo } \ 988241616dSLuigi Rizzo if (__cnt++ < lps) \ 998241616dSLuigi Rizzo D(format, ##__VA_ARGS__); \ 1008241616dSLuigi Rizzo } while (0) 1018241616dSLuigi Rizzo 10268b8534bSLuigi Rizzo struct netmap_adapter; 10368b8534bSLuigi Rizzo 10468b8534bSLuigi Rizzo /* 10564ae02c3SLuigi Rizzo * private, kernel view of a ring. Keeps track of the status of 10664ae02c3SLuigi Rizzo * a ring across system calls. 10764ae02c3SLuigi Rizzo * 10864ae02c3SLuigi Rizzo * nr_hwcur index of the next buffer to refill. 10964ae02c3SLuigi Rizzo * It corresponds to ring->cur - ring->reserved 11064ae02c3SLuigi Rizzo * 11164ae02c3SLuigi Rizzo * nr_hwavail the number of slots "owned" by userspace. 11264ae02c3SLuigi Rizzo * nr_hwavail =:= ring->avail + ring->reserved 11368b8534bSLuigi Rizzo * 1141a26580eSLuigi Rizzo * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots. 11568b8534bSLuigi Rizzo * This is so that, on a reset, buffers owned by userspace are not 11668b8534bSLuigi Rizzo * modified by the kernel. In particular: 1171a26580eSLuigi Rizzo * RX rings: the next empty buffer (hwcur + hwavail + hwofs) coincides with 11868b8534bSLuigi Rizzo * the next empty buffer as known by the hardware (next_to_check or so). 11968b8534bSLuigi Rizzo * TX rings: hwcur + hwofs coincides with next_to_send 1201dce924dSLuigi Rizzo * 1211dce924dSLuigi Rizzo * For received packets, slot->flags is set to nkr_slot_flags 1221dce924dSLuigi Rizzo * so we can provide a proper initial value (e.g. set NS_FORWARD 1231dce924dSLuigi Rizzo * when operating in 'transparent' mode). 12468b8534bSLuigi Rizzo */ 12568b8534bSLuigi Rizzo struct netmap_kring { 12668b8534bSLuigi Rizzo struct netmap_ring *ring; 12768b8534bSLuigi Rizzo u_int nr_hwcur; 12868b8534bSLuigi Rizzo int nr_hwavail; 1292157a17cSLuigi Rizzo u_int nr_kflags; /* private driver flags */ 1302157a17cSLuigi Rizzo #define NKR_PENDINTR 0x1 // Pending interrupt. 13168b8534bSLuigi Rizzo u_int nkr_num_slots; 13268b8534bSLuigi Rizzo 1331dce924dSLuigi Rizzo uint16_t nkr_slot_flags; /* initial value for flags */ 134506cc70cSLuigi Rizzo int nkr_hwofs; /* offset between NIC and netmap ring */ 1351a26580eSLuigi Rizzo struct netmap_adapter *na; 1361a26580eSLuigi Rizzo NM_SELINFO_T si; /* poll/select wait queue */ 1371a26580eSLuigi Rizzo NM_LOCK_T q_lock; /* used if no device lock available */ 1382157a17cSLuigi Rizzo } __attribute__((__aligned__(64))); 13968b8534bSLuigi Rizzo 14068b8534bSLuigi Rizzo /* 1411a26580eSLuigi Rizzo * This struct extends the 'struct adapter' (or 14268b8534bSLuigi Rizzo * equivalent) device descriptor. It contains all fields needed to 14368b8534bSLuigi Rizzo * support netmap operation. 14468b8534bSLuigi Rizzo */ 14568b8534bSLuigi Rizzo struct netmap_adapter { 1468241616dSLuigi Rizzo /* 1478241616dSLuigi Rizzo * On linux we do not have a good way to tell if an interface 1488241616dSLuigi Rizzo * is netmap-capable. So we use the following trick: 1498241616dSLuigi Rizzo * NA(ifp) points here, and the first entry (which hopefully 1508241616dSLuigi Rizzo * always exists and is at least 32 bits) contains a magic 1518241616dSLuigi Rizzo * value which we can use to detect that the interface is good. 1528241616dSLuigi Rizzo */ 1538241616dSLuigi Rizzo uint32_t magic; 1548241616dSLuigi Rizzo uint32_t na_flags; /* future place for IFCAP_NETMAP */ 1558241616dSLuigi Rizzo #define NAF_SKIP_INTR 1 /* use the regular interrupt handler. 1568241616dSLuigi Rizzo * useful during initialization 1578241616dSLuigi Rizzo */ 15868b8534bSLuigi Rizzo int refcount; /* number of user-space descriptors using this 15968b8534bSLuigi Rizzo interface, which is equal to the number of 16068b8534bSLuigi Rizzo struct netmap_if objs in the mapped region. */ 161d76bf4ffSLuigi Rizzo /* 162d76bf4ffSLuigi Rizzo * The selwakeup in the interrupt thread can use per-ring 163d76bf4ffSLuigi Rizzo * and/or global wait queues. We track how many clients 164d76bf4ffSLuigi Rizzo * of each type we have so we can optimize the drivers, 165d76bf4ffSLuigi Rizzo * and especially avoid huge contention on the locks. 166d76bf4ffSLuigi Rizzo */ 167d76bf4ffSLuigi Rizzo int na_single; /* threads attached to a single hw queue */ 168d76bf4ffSLuigi Rizzo int na_multi; /* threads attached to multiple hw queues */ 16968b8534bSLuigi Rizzo 17068b8534bSLuigi Rizzo int separate_locks; /* set if the interface suports different 17168b8534bSLuigi Rizzo locks for rx, tx and core. */ 17268b8534bSLuigi Rizzo 17324e57ec9SEd Maste u_int num_rx_rings; /* number of adapter receive rings */ 17424e57ec9SEd Maste u_int num_tx_rings; /* number of adapter transmit rings */ 17568b8534bSLuigi Rizzo 17668b8534bSLuigi Rizzo u_int num_tx_desc; /* number of descriptor in each queue */ 17768b8534bSLuigi Rizzo u_int num_rx_desc; 17868b8534bSLuigi Rizzo 17968b8534bSLuigi Rizzo /* tx_rings and rx_rings are private but allocated 18068b8534bSLuigi Rizzo * as a contiguous chunk of memory. Each array has 18168b8534bSLuigi Rizzo * N+1 entries, for the adapter queues and for the host queue. 18268b8534bSLuigi Rizzo */ 18368b8534bSLuigi Rizzo struct netmap_kring *tx_rings; /* array of TX rings. */ 18468b8534bSLuigi Rizzo struct netmap_kring *rx_rings; /* array of RX rings. */ 18568b8534bSLuigi Rizzo 18664ae02c3SLuigi Rizzo NM_SELINFO_T tx_si, rx_si; /* global wait queues */ 18764ae02c3SLuigi Rizzo 18868b8534bSLuigi Rizzo /* copy of if_qflush and if_transmit pointers, to intercept 18968b8534bSLuigi Rizzo * packets from the network stack when netmap is active. 19068b8534bSLuigi Rizzo */ 19168b8534bSLuigi Rizzo int (*if_transmit)(struct ifnet *, struct mbuf *); 19268b8534bSLuigi Rizzo 19368b8534bSLuigi Rizzo /* references to the ifnet and device routines, used by 19468b8534bSLuigi Rizzo * the generic netmap functions. 19568b8534bSLuigi Rizzo */ 19668b8534bSLuigi Rizzo struct ifnet *ifp; /* adapter is ifp->if_softc */ 19768b8534bSLuigi Rizzo 1981a26580eSLuigi Rizzo NM_LOCK_T core_lock; /* used if no device lock available */ 1991a26580eSLuigi Rizzo 20068b8534bSLuigi Rizzo int (*nm_register)(struct ifnet *, int onoff); 2011a26580eSLuigi Rizzo void (*nm_lock)(struct ifnet *, int what, u_int ringid); 2021a26580eSLuigi Rizzo int (*nm_txsync)(struct ifnet *, u_int ring, int lock); 2031a26580eSLuigi Rizzo int (*nm_rxsync)(struct ifnet *, u_int ring, int lock); 204ae10d1afSLuigi Rizzo /* return configuration information */ 205ae10d1afSLuigi Rizzo int (*nm_config)(struct ifnet *, u_int *txr, u_int *txd, 206ae10d1afSLuigi Rizzo u_int *rxr, u_int *rxd); 207f196ce38SLuigi Rizzo 208f196ce38SLuigi Rizzo int bdg_port; 20964ae02c3SLuigi Rizzo #ifdef linux 21064ae02c3SLuigi Rizzo struct net_device_ops nm_ndo; 211f196ce38SLuigi Rizzo int if_refcount; // XXX additions for bridge 21264ae02c3SLuigi Rizzo #endif /* linux */ 21368b8534bSLuigi Rizzo }; 21468b8534bSLuigi Rizzo 21568b8534bSLuigi Rizzo /* 2168241616dSLuigi Rizzo * The combination of "enable" (ifp->if_capenable & IFCAP_NETMAP) 21768b8534bSLuigi Rizzo * and refcount gives the status of the interface, namely: 21868b8534bSLuigi Rizzo * 21968b8534bSLuigi Rizzo * enable refcount Status 22068b8534bSLuigi Rizzo * 22168b8534bSLuigi Rizzo * FALSE 0 normal operation 22268b8534bSLuigi Rizzo * FALSE != 0 -- (impossible) 22368b8534bSLuigi Rizzo * TRUE 1 netmap mode 22468b8534bSLuigi Rizzo * TRUE 0 being deleted. 22568b8534bSLuigi Rizzo */ 22668b8534bSLuigi Rizzo 22768b8534bSLuigi Rizzo #define NETMAP_DELETING(_na) ( ((_na)->refcount == 0) && \ 22868b8534bSLuigi Rizzo ( (_na)->ifp->if_capenable & IFCAP_NETMAP) ) 22968b8534bSLuigi Rizzo 23068b8534bSLuigi Rizzo /* 23168b8534bSLuigi Rizzo * parameters for (*nm_lock)(adapter, what, index) 23268b8534bSLuigi Rizzo */ 23368b8534bSLuigi Rizzo enum { 23468b8534bSLuigi Rizzo NETMAP_NO_LOCK = 0, 23568b8534bSLuigi Rizzo NETMAP_CORE_LOCK, NETMAP_CORE_UNLOCK, 23668b8534bSLuigi Rizzo NETMAP_TX_LOCK, NETMAP_TX_UNLOCK, 23768b8534bSLuigi Rizzo NETMAP_RX_LOCK, NETMAP_RX_UNLOCK, 2381a26580eSLuigi Rizzo #ifdef __FreeBSD__ 2391a26580eSLuigi Rizzo #define NETMAP_REG_LOCK NETMAP_CORE_LOCK 2401a26580eSLuigi Rizzo #define NETMAP_REG_UNLOCK NETMAP_CORE_UNLOCK 2411a26580eSLuigi Rizzo #else 2421a26580eSLuigi Rizzo NETMAP_REG_LOCK, NETMAP_REG_UNLOCK 2431a26580eSLuigi Rizzo #endif 24468b8534bSLuigi Rizzo }; 24568b8534bSLuigi Rizzo 24668b8534bSLuigi Rizzo /* 24768b8534bSLuigi Rizzo * The following are support routines used by individual drivers to 24868b8534bSLuigi Rizzo * support netmap operation. 24968b8534bSLuigi Rizzo * 25068b8534bSLuigi Rizzo * netmap_attach() initializes a struct netmap_adapter, allocating the 25168b8534bSLuigi Rizzo * struct netmap_ring's and the struct selinfo. 25268b8534bSLuigi Rizzo * 25368b8534bSLuigi Rizzo * netmap_detach() frees the memory allocated by netmap_attach(). 25468b8534bSLuigi Rizzo * 25568b8534bSLuigi Rizzo * netmap_start() replaces the if_transmit routine of the interface, 25668b8534bSLuigi Rizzo * and is used to intercept packets coming from the stack. 25768b8534bSLuigi Rizzo * 25868b8534bSLuigi Rizzo * netmap_load_map/netmap_reload_map are helper routines to set/reset 25968b8534bSLuigi Rizzo * the dmamap for a packet buffer 26068b8534bSLuigi Rizzo * 26168b8534bSLuigi Rizzo * netmap_reset() is a helper routine to be called in the driver 26268b8534bSLuigi Rizzo * when reinitializing a ring. 26368b8534bSLuigi Rizzo */ 26468b8534bSLuigi Rizzo int netmap_attach(struct netmap_adapter *, int); 26568b8534bSLuigi Rizzo void netmap_detach(struct ifnet *); 26668b8534bSLuigi Rizzo int netmap_start(struct ifnet *, struct mbuf *); 26768b8534bSLuigi Rizzo enum txrx { NR_RX = 0, NR_TX = 1 }; 26868b8534bSLuigi Rizzo struct netmap_slot *netmap_reset(struct netmap_adapter *na, 26968b8534bSLuigi Rizzo enum txrx tx, int n, u_int new_cur); 27068b8534bSLuigi Rizzo int netmap_ring_reinit(struct netmap_kring *); 27168b8534bSLuigi Rizzo 272b3d53016SLuigi Rizzo extern u_int netmap_buf_size; 2735819da83SLuigi Rizzo #define NETMAP_BUF_SIZE netmap_buf_size 2742157a17cSLuigi Rizzo extern int netmap_mitigate; 2755819da83SLuigi Rizzo extern int netmap_no_pendintr; 27668b8534bSLuigi Rizzo extern u_int netmap_total_buffers; 27768b8534bSLuigi Rizzo extern char *netmap_buffer_base; 27868b8534bSLuigi Rizzo extern int netmap_verbose; // XXX debugging 27968b8534bSLuigi Rizzo enum { /* verbose flags */ 28068b8534bSLuigi Rizzo NM_VERB_ON = 1, /* generic verbose */ 28168b8534bSLuigi Rizzo NM_VERB_HOST = 0x2, /* verbose host stack */ 28268b8534bSLuigi Rizzo NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */ 28368b8534bSLuigi Rizzo NM_VERB_TXSYNC = 0x20, 28468b8534bSLuigi Rizzo NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */ 28568b8534bSLuigi Rizzo NM_VERB_TXINTR = 0x200, 28668b8534bSLuigi Rizzo NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */ 28768b8534bSLuigi Rizzo NM_VERB_NIC_TXSYNC = 0x2000, 28868b8534bSLuigi Rizzo }; 28968b8534bSLuigi Rizzo 29068b8534bSLuigi Rizzo /* 291d0c7b075SLuigi Rizzo * NA returns a pointer to the struct netmap adapter from the ifp, 292d0c7b075SLuigi Rizzo * WNA is used to write it. 29368b8534bSLuigi Rizzo */ 294d0c7b075SLuigi Rizzo #ifndef WNA 295d0c7b075SLuigi Rizzo #define WNA(_ifp) (_ifp)->if_pspare[0] 296d0c7b075SLuigi Rizzo #endif 297d0c7b075SLuigi Rizzo #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp)) 29868b8534bSLuigi Rizzo 2998241616dSLuigi Rizzo /* 3008241616dSLuigi Rizzo * Macros to determine if an interface is netmap capable or netmap enabled. 3018241616dSLuigi Rizzo * See the magic field in struct netmap_adapter. 3028241616dSLuigi Rizzo */ 3038241616dSLuigi Rizzo #ifdef __FreeBSD__ 3048241616dSLuigi Rizzo /* 3058241616dSLuigi Rizzo * on FreeBSD just use if_capabilities and if_capenable. 3068241616dSLuigi Rizzo */ 3078241616dSLuigi Rizzo #define NETMAP_CAPABLE(ifp) (NA(ifp) && \ 3088241616dSLuigi Rizzo (ifp)->if_capabilities & IFCAP_NETMAP ) 3098241616dSLuigi Rizzo 3108241616dSLuigi Rizzo #define NETMAP_SET_CAPABLE(ifp) \ 3118241616dSLuigi Rizzo (ifp)->if_capabilities |= IFCAP_NETMAP 3128241616dSLuigi Rizzo 3138241616dSLuigi Rizzo #else /* linux */ 3148241616dSLuigi Rizzo 3158241616dSLuigi Rizzo /* 3168241616dSLuigi Rizzo * on linux: 3178241616dSLuigi Rizzo * we check if NA(ifp) is set and its first element has a related 3188241616dSLuigi Rizzo * magic value. The capenable is within the struct netmap_adapter. 3198241616dSLuigi Rizzo */ 3208241616dSLuigi Rizzo #define NETMAP_MAGIC 0x52697a7a 3218241616dSLuigi Rizzo 3228241616dSLuigi Rizzo #define NETMAP_CAPABLE(ifp) (NA(ifp) && \ 3238241616dSLuigi Rizzo ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC ) 3248241616dSLuigi Rizzo 3258241616dSLuigi Rizzo #define NETMAP_SET_CAPABLE(ifp) \ 3268241616dSLuigi Rizzo NA(ifp)->magic = ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC 3278241616dSLuigi Rizzo 3288241616dSLuigi Rizzo #endif /* linux */ 32968b8534bSLuigi Rizzo 330f196ce38SLuigi Rizzo #ifdef __FreeBSD__ 3316dba29a2SLuigi Rizzo /* Callback invoked by the dma machinery after a successfull dmamap_load */ 3326dba29a2SLuigi Rizzo static void netmap_dmamap_cb(__unused void *arg, 3336dba29a2SLuigi Rizzo __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error) 3346dba29a2SLuigi Rizzo { 3356dba29a2SLuigi Rizzo } 3366dba29a2SLuigi Rizzo 3376dba29a2SLuigi Rizzo /* bus_dmamap_load wrapper: call aforementioned function if map != NULL. 3386dba29a2SLuigi Rizzo * XXX can we do it without a callback ? 3396dba29a2SLuigi Rizzo */ 3406dba29a2SLuigi Rizzo static inline void 3416dba29a2SLuigi Rizzo netmap_load_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 3426dba29a2SLuigi Rizzo { 3436dba29a2SLuigi Rizzo if (map) 3446dba29a2SLuigi Rizzo bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, 3456dba29a2SLuigi Rizzo netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 3466dba29a2SLuigi Rizzo } 3476dba29a2SLuigi Rizzo 3486dba29a2SLuigi Rizzo /* update the map when a buffer changes. */ 3496dba29a2SLuigi Rizzo static inline void 3506dba29a2SLuigi Rizzo netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 3516dba29a2SLuigi Rizzo { 3526dba29a2SLuigi Rizzo if (map) { 3536dba29a2SLuigi Rizzo bus_dmamap_unload(tag, map); 3546dba29a2SLuigi Rizzo bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, 3556dba29a2SLuigi Rizzo netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 3566dba29a2SLuigi Rizzo } 3576dba29a2SLuigi Rizzo } 358f196ce38SLuigi Rizzo #else /* linux */ 359f196ce38SLuigi Rizzo 360f196ce38SLuigi Rizzo /* 361f196ce38SLuigi Rizzo * XXX How do we redefine these functions: 362f196ce38SLuigi Rizzo * 363f196ce38SLuigi Rizzo * on linux we need 364f196ce38SLuigi Rizzo * dma_map_single(&pdev->dev, virt_addr, len, direction) 365f196ce38SLuigi Rizzo * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction 366f196ce38SLuigi Rizzo * The len can be implicit (on netmap it is NETMAP_BUF_SIZE) 367f196ce38SLuigi Rizzo * unfortunately the direction is not, so we need to change 368f196ce38SLuigi Rizzo * something to have a cross API 369f196ce38SLuigi Rizzo */ 370f196ce38SLuigi Rizzo #define netmap_load_map(_t, _m, _b) 371f196ce38SLuigi Rizzo #define netmap_reload_map(_t, _m, _b) 372f196ce38SLuigi Rizzo #if 0 373f196ce38SLuigi Rizzo struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l]; 374f196ce38SLuigi Rizzo /* set time_stamp *before* dma to help avoid a possible race */ 375f196ce38SLuigi Rizzo buffer_info->time_stamp = jiffies; 376f196ce38SLuigi Rizzo buffer_info->mapped_as_page = false; 377f196ce38SLuigi Rizzo buffer_info->length = len; 378f196ce38SLuigi Rizzo //buffer_info->next_to_watch = l; 379f196ce38SLuigi Rizzo /* reload dma map */ 380f196ce38SLuigi Rizzo dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 381f196ce38SLuigi Rizzo NETMAP_BUF_SIZE, DMA_TO_DEVICE); 382f196ce38SLuigi Rizzo buffer_info->dma = dma_map_single(&adapter->pdev->dev, 383f196ce38SLuigi Rizzo addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE); 384f196ce38SLuigi Rizzo 385f196ce38SLuigi Rizzo if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 386f196ce38SLuigi Rizzo D("dma mapping error"); 387f196ce38SLuigi Rizzo /* goto dma_error; See e1000_put_txbuf() */ 388f196ce38SLuigi Rizzo /* XXX reset */ 389f196ce38SLuigi Rizzo } 390f196ce38SLuigi Rizzo tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX 391f196ce38SLuigi Rizzo 392f196ce38SLuigi Rizzo #endif 393f196ce38SLuigi Rizzo 394f196ce38SLuigi Rizzo /* 395f196ce38SLuigi Rizzo * The bus_dmamap_sync() can be one of wmb() or rmb() depending on direction. 396f196ce38SLuigi Rizzo */ 397f196ce38SLuigi Rizzo #define bus_dmamap_sync(_a, _b, _c) 398f196ce38SLuigi Rizzo 399f196ce38SLuigi Rizzo #endif /* linux */ 4006dba29a2SLuigi Rizzo 4015644ccecSLuigi Rizzo /* 4025644ccecSLuigi Rizzo * functions to map NIC to KRING indexes (n2k) and vice versa (k2n) 4035644ccecSLuigi Rizzo */ 4045644ccecSLuigi Rizzo static inline int 40564ae02c3SLuigi Rizzo netmap_idx_n2k(struct netmap_kring *kr, int idx) 4065644ccecSLuigi Rizzo { 40764ae02c3SLuigi Rizzo int n = kr->nkr_num_slots; 40864ae02c3SLuigi Rizzo idx += kr->nkr_hwofs; 40964ae02c3SLuigi Rizzo if (idx < 0) 41064ae02c3SLuigi Rizzo return idx + n; 41164ae02c3SLuigi Rizzo else if (idx < n) 41264ae02c3SLuigi Rizzo return idx; 4135644ccecSLuigi Rizzo else 41464ae02c3SLuigi Rizzo return idx - n; 4155644ccecSLuigi Rizzo } 4165644ccecSLuigi Rizzo 4175644ccecSLuigi Rizzo 4185644ccecSLuigi Rizzo static inline int 41964ae02c3SLuigi Rizzo netmap_idx_k2n(struct netmap_kring *kr, int idx) 4205644ccecSLuigi Rizzo { 42164ae02c3SLuigi Rizzo int n = kr->nkr_num_slots; 42264ae02c3SLuigi Rizzo idx -= kr->nkr_hwofs; 42364ae02c3SLuigi Rizzo if (idx < 0) 42464ae02c3SLuigi Rizzo return idx + n; 42564ae02c3SLuigi Rizzo else if (idx < n) 42664ae02c3SLuigi Rizzo return idx; 4275644ccecSLuigi Rizzo else 42864ae02c3SLuigi Rizzo return idx - n; 4295644ccecSLuigi Rizzo } 4305644ccecSLuigi Rizzo 4315644ccecSLuigi Rizzo 432d76bf4ffSLuigi Rizzo /* Entries of the look-up table. */ 433d76bf4ffSLuigi Rizzo struct lut_entry { 434d76bf4ffSLuigi Rizzo void *vaddr; /* virtual address. */ 435d76bf4ffSLuigi Rizzo vm_paddr_t paddr; /* phisical address. */ 436d76bf4ffSLuigi Rizzo }; 437d76bf4ffSLuigi Rizzo 438d76bf4ffSLuigi Rizzo struct netmap_obj_pool; 439d76bf4ffSLuigi Rizzo extern struct lut_entry *netmap_buffer_lut; 440d76bf4ffSLuigi Rizzo #define NMB_VA(i) (netmap_buffer_lut[i].vaddr) 441d76bf4ffSLuigi Rizzo #define NMB_PA(i) (netmap_buffer_lut[i].paddr) 442d76bf4ffSLuigi Rizzo 44368b8534bSLuigi Rizzo /* 4446e10c8b8SLuigi Rizzo * NMB return the virtual address of a buffer (buffer 0 on bad index) 4456e10c8b8SLuigi Rizzo * PNMB also fills the physical address 44668b8534bSLuigi Rizzo */ 4476e10c8b8SLuigi Rizzo static inline void * 44868b8534bSLuigi Rizzo NMB(struct netmap_slot *slot) 44968b8534bSLuigi Rizzo { 45068b8534bSLuigi Rizzo uint32_t i = slot->buf_idx; 451f196ce38SLuigi Rizzo return (unlikely(i >= netmap_total_buffers)) ? NMB_VA(0) : NMB_VA(i); 45268b8534bSLuigi Rizzo } 45368b8534bSLuigi Rizzo 4546e10c8b8SLuigi Rizzo static inline void * 4556e10c8b8SLuigi Rizzo PNMB(struct netmap_slot *slot, uint64_t *pp) 4566e10c8b8SLuigi Rizzo { 4576e10c8b8SLuigi Rizzo uint32_t i = slot->buf_idx; 458d76bf4ffSLuigi Rizzo void *ret = (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i); 459*2579e2d7SLuigi Rizzo 460d76bf4ffSLuigi Rizzo *pp = (i >= netmap_total_buffers) ? NMB_PA(0) : NMB_PA(i); 4616e10c8b8SLuigi Rizzo return ret; 4626e10c8b8SLuigi Rizzo } 4636e10c8b8SLuigi Rizzo 4641a26580eSLuigi Rizzo /* default functions to handle rx/tx interrupts */ 4651a26580eSLuigi Rizzo int netmap_rx_irq(struct ifnet *, int, int *); 4661a26580eSLuigi Rizzo #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL) 467f196ce38SLuigi Rizzo 468*2579e2d7SLuigi Rizzo 469f196ce38SLuigi Rizzo extern int netmap_copy; 47068b8534bSLuigi Rizzo #endif /* _NET_NETMAP_KERN_H_ */ 471