1b321acabSVincenzo Maffione /* 2b321acabSVincenzo Maffione * Copyright (C) 2016-2018 Vincenzo Maffione 3b321acabSVincenzo Maffione * Copyright (C) 2015 Stefano Garzarella 4b321acabSVincenzo Maffione * All rights reserved. 5b321acabSVincenzo Maffione * 6b321acabSVincenzo Maffione * Redistribution and use in source and binary forms, with or without 7b321acabSVincenzo Maffione * modification, are permitted provided that the following conditions 8b321acabSVincenzo Maffione * are met: 9b321acabSVincenzo Maffione * 1. Redistributions of source code must retain the above copyright 10b321acabSVincenzo Maffione * notice, this list of conditions and the following disclaimer. 11b321acabSVincenzo Maffione * 2. Redistributions in binary form must reproduce the above copyright 12b321acabSVincenzo Maffione * notice, this list of conditions and the following disclaimer in the 13b321acabSVincenzo Maffione * documentation and/or other materials provided with the distribution. 14b321acabSVincenzo Maffione * 15b321acabSVincenzo Maffione * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16b321acabSVincenzo Maffione * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17b321acabSVincenzo Maffione * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18b321acabSVincenzo Maffione * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19b321acabSVincenzo Maffione * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20b321acabSVincenzo Maffione * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21b321acabSVincenzo Maffione * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22b321acabSVincenzo Maffione * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23b321acabSVincenzo Maffione * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24b321acabSVincenzo Maffione * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25b321acabSVincenzo Maffione * SUCH DAMAGE. 26b321acabSVincenzo Maffione * 27b321acabSVincenzo Maffione * $FreeBSD$ 28b321acabSVincenzo Maffione */ 29b321acabSVincenzo Maffione 30b321acabSVincenzo Maffione /* 31b321acabSVincenzo Maffione * common headers 32b321acabSVincenzo Maffione */ 33b321acabSVincenzo Maffione #if defined(__FreeBSD__) 34b321acabSVincenzo Maffione #include <sys/cdefs.h> 35b321acabSVincenzo Maffione #include <sys/param.h> 36b321acabSVincenzo Maffione #include <sys/kernel.h> 37b321acabSVincenzo Maffione #include <sys/types.h> 38b321acabSVincenzo Maffione #include <sys/selinfo.h> 39b321acabSVincenzo Maffione #include <sys/socket.h> 40b321acabSVincenzo Maffione #include <net/if.h> 41b321acabSVincenzo Maffione #include <net/if_var.h> 42b321acabSVincenzo Maffione #include <machine/bus.h> 43b321acabSVincenzo Maffione 44b321acabSVincenzo Maffione #define usleep_range(_1, _2) \ 45b321acabSVincenzo Maffione pause_sbt("sync-kloop-sleep", SBT_1US * _1, SBT_1US * 1, C_ABSOLUTE) 46b321acabSVincenzo Maffione 47b321acabSVincenzo Maffione #elif defined(linux) 48b321acabSVincenzo Maffione #include <bsd_glue.h> 49b321acabSVincenzo Maffione #include <linux/file.h> 50b321acabSVincenzo Maffione #include <linux/eventfd.h> 51b321acabSVincenzo Maffione #endif 52b321acabSVincenzo Maffione 53b321acabSVincenzo Maffione #include <net/netmap.h> 54b321acabSVincenzo Maffione #include <dev/netmap/netmap_kern.h> 55b321acabSVincenzo Maffione #include <net/netmap_virt.h> 56b321acabSVincenzo Maffione #include <dev/netmap/netmap_mem2.h> 57b321acabSVincenzo Maffione 58b321acabSVincenzo Maffione /* Support for eventfd-based notifications. */ 59b321acabSVincenzo Maffione #if defined(linux) 60b321acabSVincenzo Maffione #define SYNC_KLOOP_POLL 61b321acabSVincenzo Maffione #endif 62b321acabSVincenzo Maffione 63b321acabSVincenzo Maffione /* Write kring pointers (hwcur, hwtail) to the CSB. 64b321acabSVincenzo Maffione * This routine is coupled with ptnetmap_guest_read_kring_csb(). */ 65b321acabSVincenzo Maffione static inline void 66b321acabSVincenzo Maffione sync_kloop_kernel_write(struct nm_csb_ktoa __user *ptr, uint32_t hwcur, 67b321acabSVincenzo Maffione uint32_t hwtail) 68b321acabSVincenzo Maffione { 6993b1d6a0SVincenzo Maffione /* Issue a first store-store barrier to make sure writes to the 7093b1d6a0SVincenzo Maffione * netmap ring do not overcome updates on ktoa->hwcur and ktoa->hwtail. */ 7193b1d6a0SVincenzo Maffione nm_stst_barrier(); 7293b1d6a0SVincenzo Maffione 73b321acabSVincenzo Maffione /* 7493b1d6a0SVincenzo Maffione * The same scheme used in nm_sync_kloop_appl_write() applies here. 75b321acabSVincenzo Maffione * We allow the application to read a value of hwcur more recent than the value 76b321acabSVincenzo Maffione * of hwtail, since this would anyway result in a consistent view of the 77b321acabSVincenzo Maffione * ring state (and hwcur can never wraparound hwtail, since hwcur must be 78b321acabSVincenzo Maffione * behind head). 79b321acabSVincenzo Maffione * 80b321acabSVincenzo Maffione * The following memory barrier scheme is used to make this happen: 81b321acabSVincenzo Maffione * 82b321acabSVincenzo Maffione * Application Kernel 83b321acabSVincenzo Maffione * 84b321acabSVincenzo Maffione * STORE(hwcur) LOAD(hwtail) 8593b1d6a0SVincenzo Maffione * wmb() <-------------> rmb() 86b321acabSVincenzo Maffione * STORE(hwtail) LOAD(hwcur) 87b321acabSVincenzo Maffione */ 88b321acabSVincenzo Maffione CSB_WRITE(ptr, hwcur, hwcur); 89b321acabSVincenzo Maffione nm_stst_barrier(); 90b321acabSVincenzo Maffione CSB_WRITE(ptr, hwtail, hwtail); 91b321acabSVincenzo Maffione } 92b321acabSVincenzo Maffione 93b321acabSVincenzo Maffione /* Read kring pointers (head, cur, sync_flags) from the CSB. 94b321acabSVincenzo Maffione * This routine is coupled with ptnetmap_guest_write_kring_csb(). */ 95b321acabSVincenzo Maffione static inline void 96b321acabSVincenzo Maffione sync_kloop_kernel_read(struct nm_csb_atok __user *ptr, 97b321acabSVincenzo Maffione struct netmap_ring *shadow_ring, 98b321acabSVincenzo Maffione uint32_t num_slots) 99b321acabSVincenzo Maffione { 100b321acabSVincenzo Maffione /* 101b321acabSVincenzo Maffione * We place a memory barrier to make sure that the update of head never 102b321acabSVincenzo Maffione * overtakes the update of cur. 10393b1d6a0SVincenzo Maffione * (see explanation in sync_kloop_kernel_write). 104b321acabSVincenzo Maffione */ 105b321acabSVincenzo Maffione CSB_READ(ptr, head, shadow_ring->head); 10693b1d6a0SVincenzo Maffione nm_ldld_barrier(); 107b321acabSVincenzo Maffione CSB_READ(ptr, cur, shadow_ring->cur); 108b321acabSVincenzo Maffione CSB_READ(ptr, sync_flags, shadow_ring->flags); 10993b1d6a0SVincenzo Maffione 11093b1d6a0SVincenzo Maffione /* Make sure that loads from atok->head and atok->cur are not delayed 11193b1d6a0SVincenzo Maffione * after the loads from the netmap ring. */ 11293b1d6a0SVincenzo Maffione nm_ldld_barrier(); 113b321acabSVincenzo Maffione } 114b321acabSVincenzo Maffione 115b321acabSVincenzo Maffione /* Enable or disable application --> kernel kicks. */ 116b321acabSVincenzo Maffione static inline void 117b321acabSVincenzo Maffione csb_ktoa_kick_enable(struct nm_csb_ktoa __user *csb_ktoa, uint32_t val) 118b321acabSVincenzo Maffione { 119b321acabSVincenzo Maffione CSB_WRITE(csb_ktoa, kern_need_kick, val); 120b321acabSVincenzo Maffione } 121b321acabSVincenzo Maffione 122639c36f0SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 123b321acabSVincenzo Maffione /* Are application interrupt enabled or disabled? */ 124b321acabSVincenzo Maffione static inline uint32_t 125b321acabSVincenzo Maffione csb_atok_intr_enabled(struct nm_csb_atok __user *csb_atok) 126b321acabSVincenzo Maffione { 127b321acabSVincenzo Maffione uint32_t v; 128b321acabSVincenzo Maffione 129b321acabSVincenzo Maffione CSB_READ(csb_atok, appl_need_kick, v); 130b321acabSVincenzo Maffione 131b321acabSVincenzo Maffione return v; 132b321acabSVincenzo Maffione } 133639c36f0SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 134b321acabSVincenzo Maffione 135b321acabSVincenzo Maffione static inline void 136b321acabSVincenzo Maffione sync_kloop_kring_dump(const char *title, const struct netmap_kring *kring) 137b321acabSVincenzo Maffione { 13893b1d6a0SVincenzo Maffione nm_prinf("%s, kring %s, hwcur %d, rhead %d, " 13993b1d6a0SVincenzo Maffione "rcur %d, rtail %d, hwtail %d", 14093b1d6a0SVincenzo Maffione title, kring->name, kring->nr_hwcur, kring->rhead, 14193b1d6a0SVincenzo Maffione kring->rcur, kring->rtail, kring->nr_hwtail); 142b321acabSVincenzo Maffione } 143b321acabSVincenzo Maffione 144b321acabSVincenzo Maffione struct sync_kloop_ring_args { 145b321acabSVincenzo Maffione struct netmap_kring *kring; 146b321acabSVincenzo Maffione struct nm_csb_atok *csb_atok; 147b321acabSVincenzo Maffione struct nm_csb_ktoa *csb_ktoa; 148b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL 149b321acabSVincenzo Maffione struct eventfd_ctx *irq_ctx; 150b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 151b321acabSVincenzo Maffione }; 152b321acabSVincenzo Maffione 153b321acabSVincenzo Maffione static void 154b321acabSVincenzo Maffione netmap_sync_kloop_tx_ring(const struct sync_kloop_ring_args *a) 155b321acabSVincenzo Maffione { 156b321acabSVincenzo Maffione struct netmap_kring *kring = a->kring; 157b321acabSVincenzo Maffione struct nm_csb_atok *csb_atok = a->csb_atok; 158b321acabSVincenzo Maffione struct nm_csb_ktoa *csb_ktoa = a->csb_ktoa; 159b321acabSVincenzo Maffione struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */ 160b321acabSVincenzo Maffione bool more_txspace = false; 161b321acabSVincenzo Maffione uint32_t num_slots; 162b321acabSVincenzo Maffione int batch; 163b321acabSVincenzo Maffione 164b321acabSVincenzo Maffione num_slots = kring->nkr_num_slots; 165b321acabSVincenzo Maffione 166b321acabSVincenzo Maffione /* Disable application --> kernel notifications. */ 167b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 0); 168b321acabSVincenzo Maffione /* Copy the application kring pointers from the CSB */ 169b321acabSVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots); 170b321acabSVincenzo Maffione 171b321acabSVincenzo Maffione for (;;) { 172b321acabSVincenzo Maffione batch = shadow_ring.head - kring->nr_hwcur; 173b321acabSVincenzo Maffione if (batch < 0) 174b321acabSVincenzo Maffione batch += num_slots; 175b321acabSVincenzo Maffione 176b321acabSVincenzo Maffione #ifdef PTN_TX_BATCH_LIM 177b321acabSVincenzo Maffione if (batch > PTN_TX_BATCH_LIM(num_slots)) { 178b321acabSVincenzo Maffione /* If application moves ahead too fast, let's cut the move so 179b321acabSVincenzo Maffione * that we don't exceed our batch limit. */ 180b321acabSVincenzo Maffione uint32_t head_lim = kring->nr_hwcur + PTN_TX_BATCH_LIM(num_slots); 181b321acabSVincenzo Maffione 182b321acabSVincenzo Maffione if (head_lim >= num_slots) 183b321acabSVincenzo Maffione head_lim -= num_slots; 184b321acabSVincenzo Maffione nm_prdis(1, "batch: %d head: %d head_lim: %d", batch, shadow_ring.head, 185b321acabSVincenzo Maffione head_lim); 186b321acabSVincenzo Maffione shadow_ring.head = head_lim; 187b321acabSVincenzo Maffione batch = PTN_TX_BATCH_LIM(num_slots); 188b321acabSVincenzo Maffione } 189b321acabSVincenzo Maffione #endif /* PTN_TX_BATCH_LIM */ 190b321acabSVincenzo Maffione 191b321acabSVincenzo Maffione if (nm_kr_txspace(kring) <= (num_slots >> 1)) { 192b321acabSVincenzo Maffione shadow_ring.flags |= NAF_FORCE_RECLAIM; 193b321acabSVincenzo Maffione } 194b321acabSVincenzo Maffione 195b321acabSVincenzo Maffione /* Netmap prologue */ 196b321acabSVincenzo Maffione shadow_ring.tail = kring->rtail; 197b321acabSVincenzo Maffione if (unlikely(nm_txsync_prologue(kring, &shadow_ring) >= num_slots)) { 198b321acabSVincenzo Maffione /* Reinit ring and enable notifications. */ 199b321acabSVincenzo Maffione netmap_ring_reinit(kring); 200b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1); 201b321acabSVincenzo Maffione break; 202b321acabSVincenzo Maffione } 203b321acabSVincenzo Maffione 204b321acabSVincenzo Maffione if (unlikely(netmap_debug & NM_DEBUG_TXSYNC)) { 205b321acabSVincenzo Maffione sync_kloop_kring_dump("pre txsync", kring); 206b321acabSVincenzo Maffione } 207b321acabSVincenzo Maffione 208b321acabSVincenzo Maffione if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) { 209b321acabSVincenzo Maffione /* Reenable notifications. */ 210b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1); 211b321acabSVincenzo Maffione nm_prerr("txsync() failed"); 212b321acabSVincenzo Maffione break; 213b321acabSVincenzo Maffione } 214b321acabSVincenzo Maffione 215b321acabSVincenzo Maffione /* 216b321acabSVincenzo Maffione * Finalize 217b321acabSVincenzo Maffione * Copy kernel hwcur and hwtail into the CSB for the application sync(), and 218b321acabSVincenzo Maffione * do the nm_sync_finalize. 219b321acabSVincenzo Maffione */ 220b321acabSVincenzo Maffione sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur, 221b321acabSVincenzo Maffione kring->nr_hwtail); 222b321acabSVincenzo Maffione if (kring->rtail != kring->nr_hwtail) { 223b321acabSVincenzo Maffione /* Some more room available in the parent adapter. */ 224b321acabSVincenzo Maffione kring->rtail = kring->nr_hwtail; 225b321acabSVincenzo Maffione more_txspace = true; 226b321acabSVincenzo Maffione } 227b321acabSVincenzo Maffione 228b321acabSVincenzo Maffione if (unlikely(netmap_debug & NM_DEBUG_TXSYNC)) { 229b321acabSVincenzo Maffione sync_kloop_kring_dump("post txsync", kring); 230b321acabSVincenzo Maffione } 231b321acabSVincenzo Maffione 232b321acabSVincenzo Maffione /* Interrupt the application if needed. */ 233b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL 234b321acabSVincenzo Maffione if (a->irq_ctx && more_txspace && csb_atok_intr_enabled(csb_atok)) { 235b321acabSVincenzo Maffione /* Disable application kick to avoid sending unnecessary kicks */ 236b321acabSVincenzo Maffione eventfd_signal(a->irq_ctx, 1); 237b321acabSVincenzo Maffione more_txspace = false; 238b321acabSVincenzo Maffione } 239b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 240b321acabSVincenzo Maffione 241b321acabSVincenzo Maffione /* Read CSB to see if there is more work to do. */ 242b321acabSVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots); 243b321acabSVincenzo Maffione if (shadow_ring.head == kring->rhead) { 244b321acabSVincenzo Maffione /* 245b321acabSVincenzo Maffione * No more packets to transmit. We enable notifications and 246b321acabSVincenzo Maffione * go to sleep, waiting for a kick from the application when new 247b321acabSVincenzo Maffione * new slots are ready for transmission. 248b321acabSVincenzo Maffione */ 249b321acabSVincenzo Maffione /* Reenable notifications. */ 250b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1); 25193b1d6a0SVincenzo Maffione /* Double check, with store-load memory barrier. */ 25293b1d6a0SVincenzo Maffione nm_stld_barrier(); 253b321acabSVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots); 254b321acabSVincenzo Maffione if (shadow_ring.head != kring->rhead) { 255b321acabSVincenzo Maffione /* We won the race condition, there are more packets to 256b321acabSVincenzo Maffione * transmit. Disable notifications and do another cycle */ 257b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 0); 258b321acabSVincenzo Maffione continue; 259b321acabSVincenzo Maffione } 260b321acabSVincenzo Maffione break; 261b321acabSVincenzo Maffione } 262b321acabSVincenzo Maffione 263b321acabSVincenzo Maffione if (nm_kr_txempty(kring)) { 264b321acabSVincenzo Maffione /* No more available TX slots. We stop waiting for a notification 265b321acabSVincenzo Maffione * from the backend (netmap_tx_irq). */ 266b321acabSVincenzo Maffione nm_prdis(1, "TX ring"); 267b321acabSVincenzo Maffione break; 268b321acabSVincenzo Maffione } 269b321acabSVincenzo Maffione } 270b321acabSVincenzo Maffione 271b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL 272b321acabSVincenzo Maffione if (a->irq_ctx && more_txspace && csb_atok_intr_enabled(csb_atok)) { 273b321acabSVincenzo Maffione eventfd_signal(a->irq_ctx, 1); 274b321acabSVincenzo Maffione } 275b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 276b321acabSVincenzo Maffione } 277b321acabSVincenzo Maffione 278b321acabSVincenzo Maffione /* RX cycle without receive any packets */ 279b321acabSVincenzo Maffione #define SYNC_LOOP_RX_DRY_CYCLES_MAX 2 280b321acabSVincenzo Maffione 281b321acabSVincenzo Maffione static inline int 282b321acabSVincenzo Maffione sync_kloop_norxslots(struct netmap_kring *kring, uint32_t g_head) 283b321acabSVincenzo Maffione { 284b321acabSVincenzo Maffione return (NM_ACCESS_ONCE(kring->nr_hwtail) == nm_prev(g_head, 285b321acabSVincenzo Maffione kring->nkr_num_slots - 1)); 286b321acabSVincenzo Maffione } 287b321acabSVincenzo Maffione 288b321acabSVincenzo Maffione static void 289b321acabSVincenzo Maffione netmap_sync_kloop_rx_ring(const struct sync_kloop_ring_args *a) 290b321acabSVincenzo Maffione { 291b321acabSVincenzo Maffione 292b321acabSVincenzo Maffione struct netmap_kring *kring = a->kring; 293b321acabSVincenzo Maffione struct nm_csb_atok *csb_atok = a->csb_atok; 294b321acabSVincenzo Maffione struct nm_csb_ktoa *csb_ktoa = a->csb_ktoa; 295b321acabSVincenzo Maffione struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */ 296b321acabSVincenzo Maffione int dry_cycles = 0; 297b321acabSVincenzo Maffione bool some_recvd = false; 298b321acabSVincenzo Maffione uint32_t num_slots; 299b321acabSVincenzo Maffione 300b321acabSVincenzo Maffione num_slots = kring->nkr_num_slots; 301b321acabSVincenzo Maffione 302b321acabSVincenzo Maffione /* Get RX csb_atok and csb_ktoa pointers from the CSB. */ 303b321acabSVincenzo Maffione num_slots = kring->nkr_num_slots; 304b321acabSVincenzo Maffione 305b321acabSVincenzo Maffione /* Disable notifications. */ 306b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 0); 307b321acabSVincenzo Maffione /* Copy the application kring pointers from the CSB */ 308b321acabSVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots); 309b321acabSVincenzo Maffione 310b321acabSVincenzo Maffione for (;;) { 311b321acabSVincenzo Maffione uint32_t hwtail; 312b321acabSVincenzo Maffione 313b321acabSVincenzo Maffione /* Netmap prologue */ 314b321acabSVincenzo Maffione shadow_ring.tail = kring->rtail; 315b321acabSVincenzo Maffione if (unlikely(nm_rxsync_prologue(kring, &shadow_ring) >= num_slots)) { 316b321acabSVincenzo Maffione /* Reinit ring and enable notifications. */ 317b321acabSVincenzo Maffione netmap_ring_reinit(kring); 318b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1); 319b321acabSVincenzo Maffione break; 320b321acabSVincenzo Maffione } 321b321acabSVincenzo Maffione 322b321acabSVincenzo Maffione if (unlikely(netmap_debug & NM_DEBUG_RXSYNC)) { 323b321acabSVincenzo Maffione sync_kloop_kring_dump("pre rxsync", kring); 324b321acabSVincenzo Maffione } 325b321acabSVincenzo Maffione 326b321acabSVincenzo Maffione if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) { 327b321acabSVincenzo Maffione /* Reenable notifications. */ 328b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1); 329b321acabSVincenzo Maffione nm_prerr("rxsync() failed"); 330b321acabSVincenzo Maffione break; 331b321acabSVincenzo Maffione } 332b321acabSVincenzo Maffione 333b321acabSVincenzo Maffione /* 334b321acabSVincenzo Maffione * Finalize 335b321acabSVincenzo Maffione * Copy kernel hwcur and hwtail into the CSB for the application sync() 336b321acabSVincenzo Maffione */ 337b321acabSVincenzo Maffione hwtail = NM_ACCESS_ONCE(kring->nr_hwtail); 338b321acabSVincenzo Maffione sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur, hwtail); 339b321acabSVincenzo Maffione if (kring->rtail != hwtail) { 340b321acabSVincenzo Maffione kring->rtail = hwtail; 341b321acabSVincenzo Maffione some_recvd = true; 342b321acabSVincenzo Maffione dry_cycles = 0; 343b321acabSVincenzo Maffione } else { 344b321acabSVincenzo Maffione dry_cycles++; 345b321acabSVincenzo Maffione } 346b321acabSVincenzo Maffione 347b321acabSVincenzo Maffione if (unlikely(netmap_debug & NM_DEBUG_RXSYNC)) { 348b321acabSVincenzo Maffione sync_kloop_kring_dump("post rxsync", kring); 349b321acabSVincenzo Maffione } 350b321acabSVincenzo Maffione 351b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL 352b321acabSVincenzo Maffione /* Interrupt the application if needed. */ 353b321acabSVincenzo Maffione if (a->irq_ctx && some_recvd && csb_atok_intr_enabled(csb_atok)) { 354b321acabSVincenzo Maffione /* Disable application kick to avoid sending unnecessary kicks */ 355b321acabSVincenzo Maffione eventfd_signal(a->irq_ctx, 1); 356b321acabSVincenzo Maffione some_recvd = false; 357b321acabSVincenzo Maffione } 358b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 359b321acabSVincenzo Maffione 360b321acabSVincenzo Maffione /* Read CSB to see if there is more work to do. */ 361b321acabSVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots); 362b321acabSVincenzo Maffione if (sync_kloop_norxslots(kring, shadow_ring.head)) { 363b321acabSVincenzo Maffione /* 364b321acabSVincenzo Maffione * No more slots available for reception. We enable notification and 365b321acabSVincenzo Maffione * go to sleep, waiting for a kick from the application when new receive 366b321acabSVincenzo Maffione * slots are available. 367b321acabSVincenzo Maffione */ 368b321acabSVincenzo Maffione /* Reenable notifications. */ 369b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1); 37093b1d6a0SVincenzo Maffione /* Double check, with store-load memory barrier. */ 37193b1d6a0SVincenzo Maffione nm_stld_barrier(); 372b321acabSVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots); 373b321acabSVincenzo Maffione if (!sync_kloop_norxslots(kring, shadow_ring.head)) { 374b321acabSVincenzo Maffione /* We won the race condition, more slots are available. Disable 375b321acabSVincenzo Maffione * notifications and do another cycle. */ 376b321acabSVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 0); 377b321acabSVincenzo Maffione continue; 378b321acabSVincenzo Maffione } 379b321acabSVincenzo Maffione break; 380b321acabSVincenzo Maffione } 381b321acabSVincenzo Maffione 382b321acabSVincenzo Maffione hwtail = NM_ACCESS_ONCE(kring->nr_hwtail); 383b321acabSVincenzo Maffione if (unlikely(hwtail == kring->rhead || 384b321acabSVincenzo Maffione dry_cycles >= SYNC_LOOP_RX_DRY_CYCLES_MAX)) { 385b321acabSVincenzo Maffione /* No more packets to be read from the backend. We stop and 386b321acabSVincenzo Maffione * wait for a notification from the backend (netmap_rx_irq). */ 387b321acabSVincenzo Maffione nm_prdis(1, "nr_hwtail: %d rhead: %d dry_cycles: %d", 388b321acabSVincenzo Maffione hwtail, kring->rhead, dry_cycles); 389b321acabSVincenzo Maffione break; 390b321acabSVincenzo Maffione } 391b321acabSVincenzo Maffione } 392b321acabSVincenzo Maffione 393b321acabSVincenzo Maffione nm_kr_put(kring); 394b321acabSVincenzo Maffione 395b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL 396b321acabSVincenzo Maffione /* Interrupt the application if needed. */ 397b321acabSVincenzo Maffione if (a->irq_ctx && some_recvd && csb_atok_intr_enabled(csb_atok)) { 398b321acabSVincenzo Maffione eventfd_signal(a->irq_ctx, 1); 399b321acabSVincenzo Maffione } 400b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 401b321acabSVincenzo Maffione } 402b321acabSVincenzo Maffione 403b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL 404b321acabSVincenzo Maffione struct sync_kloop_poll_entry { 405b321acabSVincenzo Maffione /* Support for receiving notifications from 406b321acabSVincenzo Maffione * a netmap ring or from the application. */ 407b321acabSVincenzo Maffione struct file *filp; 408b321acabSVincenzo Maffione wait_queue_t wait; 409b321acabSVincenzo Maffione wait_queue_head_t *wqh; 410b321acabSVincenzo Maffione 411b321acabSVincenzo Maffione /* Support for sending notifications to the application. */ 412b321acabSVincenzo Maffione struct eventfd_ctx *irq_ctx; 413b321acabSVincenzo Maffione struct file *irq_filp; 414b321acabSVincenzo Maffione }; 415b321acabSVincenzo Maffione 416b321acabSVincenzo Maffione struct sync_kloop_poll_ctx { 417b321acabSVincenzo Maffione poll_table wait_table; 418b321acabSVincenzo Maffione unsigned int next_entry; 419b321acabSVincenzo Maffione unsigned int num_entries; 420b321acabSVincenzo Maffione struct sync_kloop_poll_entry entries[0]; 421b321acabSVincenzo Maffione }; 422b321acabSVincenzo Maffione 423b321acabSVincenzo Maffione static void 424b321acabSVincenzo Maffione sync_kloop_poll_table_queue_proc(struct file *file, wait_queue_head_t *wqh, 425b321acabSVincenzo Maffione poll_table *pt) 426b321acabSVincenzo Maffione { 427b321acabSVincenzo Maffione struct sync_kloop_poll_ctx *poll_ctx = 428b321acabSVincenzo Maffione container_of(pt, struct sync_kloop_poll_ctx, wait_table); 429b321acabSVincenzo Maffione struct sync_kloop_poll_entry *entry = poll_ctx->entries + 430b321acabSVincenzo Maffione poll_ctx->next_entry; 431b321acabSVincenzo Maffione 432b321acabSVincenzo Maffione BUG_ON(poll_ctx->next_entry >= poll_ctx->num_entries); 433b321acabSVincenzo Maffione entry->wqh = wqh; 434b321acabSVincenzo Maffione entry->filp = file; 435b321acabSVincenzo Maffione /* Use the default wake up function. */ 436b321acabSVincenzo Maffione init_waitqueue_entry(&entry->wait, current); 437b321acabSVincenzo Maffione add_wait_queue(wqh, &entry->wait); 438b321acabSVincenzo Maffione poll_ctx->next_entry++; 439b321acabSVincenzo Maffione } 440b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 441b321acabSVincenzo Maffione 442b321acabSVincenzo Maffione int 443b321acabSVincenzo Maffione netmap_sync_kloop(struct netmap_priv_d *priv, struct nmreq_header *hdr) 444b321acabSVincenzo Maffione { 445b321acabSVincenzo Maffione struct nmreq_sync_kloop_start *req = 446b321acabSVincenzo Maffione (struct nmreq_sync_kloop_start *)(uintptr_t)hdr->nr_body; 447b321acabSVincenzo Maffione struct nmreq_opt_sync_kloop_eventfds *eventfds_opt = NULL; 448b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL 449b321acabSVincenzo Maffione struct sync_kloop_poll_ctx *poll_ctx = NULL; 450b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 451b321acabSVincenzo Maffione int num_rx_rings, num_tx_rings, num_rings; 45293b1d6a0SVincenzo Maffione struct sync_kloop_ring_args *args = NULL; 453b321acabSVincenzo Maffione uint32_t sleep_us = req->sleep_us; 454b321acabSVincenzo Maffione struct nm_csb_atok* csb_atok_base; 455b321acabSVincenzo Maffione struct nm_csb_ktoa* csb_ktoa_base; 456b321acabSVincenzo Maffione struct netmap_adapter *na; 457b321acabSVincenzo Maffione struct nmreq_option *opt; 458b321acabSVincenzo Maffione int err = 0; 459b321acabSVincenzo Maffione int i; 460b321acabSVincenzo Maffione 461b321acabSVincenzo Maffione if (sleep_us > 1000000) { 462b321acabSVincenzo Maffione /* We do not accept sleeping for more than a second. */ 463b321acabSVincenzo Maffione return EINVAL; 464b321acabSVincenzo Maffione } 465b321acabSVincenzo Maffione 466b321acabSVincenzo Maffione if (priv->np_nifp == NULL) { 467b321acabSVincenzo Maffione return ENXIO; 468b321acabSVincenzo Maffione } 469b321acabSVincenzo Maffione mb(); /* make sure following reads are not from cache */ 470b321acabSVincenzo Maffione 471b321acabSVincenzo Maffione na = priv->np_na; 472b321acabSVincenzo Maffione if (!nm_netmap_on(na)) { 473b321acabSVincenzo Maffione return ENXIO; 474b321acabSVincenzo Maffione } 475b321acabSVincenzo Maffione 476b321acabSVincenzo Maffione NMG_LOCK(); 477b321acabSVincenzo Maffione /* Make sure the application is working in CSB mode. */ 478b321acabSVincenzo Maffione if (!priv->np_csb_atok_base || !priv->np_csb_ktoa_base) { 479b321acabSVincenzo Maffione NMG_UNLOCK(); 480b321acabSVincenzo Maffione nm_prerr("sync-kloop on %s requires " 481b321acabSVincenzo Maffione "NETMAP_REQ_OPT_CSB option", na->name); 482b321acabSVincenzo Maffione return EINVAL; 483b321acabSVincenzo Maffione } 484b321acabSVincenzo Maffione 485b321acabSVincenzo Maffione csb_atok_base = priv->np_csb_atok_base; 486b321acabSVincenzo Maffione csb_ktoa_base = priv->np_csb_ktoa_base; 487b321acabSVincenzo Maffione 488b321acabSVincenzo Maffione /* Make sure that no kloop is currently running. */ 489b321acabSVincenzo Maffione if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) { 490b321acabSVincenzo Maffione err = EBUSY; 491b321acabSVincenzo Maffione } 492b321acabSVincenzo Maffione priv->np_kloop_state |= NM_SYNC_KLOOP_RUNNING; 493b321acabSVincenzo Maffione NMG_UNLOCK(); 494b321acabSVincenzo Maffione if (err) { 495b321acabSVincenzo Maffione return err; 496b321acabSVincenzo Maffione } 497b321acabSVincenzo Maffione 498b321acabSVincenzo Maffione num_rx_rings = priv->np_qlast[NR_RX] - priv->np_qfirst[NR_RX]; 499b321acabSVincenzo Maffione num_tx_rings = priv->np_qlast[NR_TX] - priv->np_qfirst[NR_TX]; 500b321acabSVincenzo Maffione num_rings = num_tx_rings + num_rx_rings; 501b321acabSVincenzo Maffione 50293b1d6a0SVincenzo Maffione args = nm_os_malloc(num_rings * sizeof(args[0])); 50393b1d6a0SVincenzo Maffione if (!args) { 50493b1d6a0SVincenzo Maffione err = ENOMEM; 50593b1d6a0SVincenzo Maffione goto out; 50693b1d6a0SVincenzo Maffione } 50793b1d6a0SVincenzo Maffione 508b321acabSVincenzo Maffione /* Validate notification options. */ 509b321acabSVincenzo Maffione opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options, 510b321acabSVincenzo Maffione NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS); 511b321acabSVincenzo Maffione if (opt != NULL) { 512b321acabSVincenzo Maffione err = nmreq_checkduplicate(opt); 513b321acabSVincenzo Maffione if (err) { 514b321acabSVincenzo Maffione opt->nro_status = err; 515b321acabSVincenzo Maffione goto out; 516b321acabSVincenzo Maffione } 517b321acabSVincenzo Maffione if (opt->nro_size != sizeof(*eventfds_opt) + 518b321acabSVincenzo Maffione sizeof(eventfds_opt->eventfds[0]) * num_rings) { 519b321acabSVincenzo Maffione /* Option size not consistent with the number of 520b321acabSVincenzo Maffione * entries. */ 521b321acabSVincenzo Maffione opt->nro_status = err = EINVAL; 522b321acabSVincenzo Maffione goto out; 523b321acabSVincenzo Maffione } 524b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL 525b321acabSVincenzo Maffione eventfds_opt = (struct nmreq_opt_sync_kloop_eventfds *)opt; 526b321acabSVincenzo Maffione opt->nro_status = 0; 527b321acabSVincenzo Maffione /* We need 2 poll entries for TX and RX notifications coming 528b321acabSVincenzo Maffione * from the netmap adapter, plus one entries per ring for the 529b321acabSVincenzo Maffione * notifications coming from the application. */ 530b321acabSVincenzo Maffione poll_ctx = nm_os_malloc(sizeof(*poll_ctx) + 531b321acabSVincenzo Maffione (2 + num_rings) * sizeof(poll_ctx->entries[0])); 532b321acabSVincenzo Maffione init_poll_funcptr(&poll_ctx->wait_table, 533b321acabSVincenzo Maffione sync_kloop_poll_table_queue_proc); 534b321acabSVincenzo Maffione poll_ctx->num_entries = 2 + num_rings; 535b321acabSVincenzo Maffione poll_ctx->next_entry = 0; 536b321acabSVincenzo Maffione /* Poll for notifications coming from the applications through 537b321acabSVincenzo Maffione * eventfds . */ 538b321acabSVincenzo Maffione for (i = 0; i < num_rings; i++) { 539b321acabSVincenzo Maffione struct eventfd_ctx *irq; 540b321acabSVincenzo Maffione struct file *filp; 541b321acabSVincenzo Maffione unsigned long mask; 542b321acabSVincenzo Maffione 543b321acabSVincenzo Maffione filp = eventfd_fget(eventfds_opt->eventfds[i].ioeventfd); 544b321acabSVincenzo Maffione if (IS_ERR(filp)) { 545b321acabSVincenzo Maffione err = PTR_ERR(filp); 546b321acabSVincenzo Maffione goto out; 547b321acabSVincenzo Maffione } 548b321acabSVincenzo Maffione mask = filp->f_op->poll(filp, &poll_ctx->wait_table); 549b321acabSVincenzo Maffione if (mask & POLLERR) { 550b321acabSVincenzo Maffione err = EINVAL; 551b321acabSVincenzo Maffione goto out; 552b321acabSVincenzo Maffione } 553b321acabSVincenzo Maffione 554b321acabSVincenzo Maffione filp = eventfd_fget(eventfds_opt->eventfds[i].irqfd); 555b321acabSVincenzo Maffione if (IS_ERR(filp)) { 556b321acabSVincenzo Maffione err = PTR_ERR(filp); 557b321acabSVincenzo Maffione goto out; 558b321acabSVincenzo Maffione } 559b321acabSVincenzo Maffione poll_ctx->entries[i].irq_filp = filp; 560b321acabSVincenzo Maffione irq = eventfd_ctx_fileget(filp); 561b321acabSVincenzo Maffione if (IS_ERR(irq)) { 562b321acabSVincenzo Maffione err = PTR_ERR(irq); 563b321acabSVincenzo Maffione goto out; 564b321acabSVincenzo Maffione } 565b321acabSVincenzo Maffione poll_ctx->entries[i].irq_ctx = irq; 566b321acabSVincenzo Maffione } 567b321acabSVincenzo Maffione /* Poll for notifications coming from the netmap rings bound to 568b321acabSVincenzo Maffione * this file descriptor. */ 569b321acabSVincenzo Maffione { 570b321acabSVincenzo Maffione NMG_LOCK(); 571*e2e0ef76SVincenzo Maffione poll_wait(priv->np_filp, priv->np_si[NR_TX], 572*e2e0ef76SVincenzo Maffione &poll_ctx->wait_table); 573*e2e0ef76SVincenzo Maffione poll_wait(priv->np_filp, priv->np_si[NR_RX], 574*e2e0ef76SVincenzo Maffione &poll_ctx->wait_table); 575b321acabSVincenzo Maffione NMG_UNLOCK(); 576b321acabSVincenzo Maffione } 577b321acabSVincenzo Maffione #else /* SYNC_KLOOP_POLL */ 578b321acabSVincenzo Maffione opt->nro_status = EOPNOTSUPP; 579b321acabSVincenzo Maffione goto out; 580b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 581b321acabSVincenzo Maffione } 582b321acabSVincenzo Maffione 58393b1d6a0SVincenzo Maffione /* Prepare the arguments for netmap_sync_kloop_tx_ring() 58493b1d6a0SVincenzo Maffione * and netmap_sync_kloop_rx_ring(). */ 58593b1d6a0SVincenzo Maffione for (i = 0; i < num_tx_rings; i++) { 58693b1d6a0SVincenzo Maffione struct sync_kloop_ring_args *a = args + i; 58793b1d6a0SVincenzo Maffione 58893b1d6a0SVincenzo Maffione a->kring = NMR(na, NR_TX)[i + priv->np_qfirst[NR_TX]]; 58993b1d6a0SVincenzo Maffione a->csb_atok = csb_atok_base + i; 59093b1d6a0SVincenzo Maffione a->csb_ktoa = csb_ktoa_base + i; 59193b1d6a0SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 59293b1d6a0SVincenzo Maffione if (poll_ctx) 59393b1d6a0SVincenzo Maffione a->irq_ctx = poll_ctx->entries[i].irq_ctx; 59493b1d6a0SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 59593b1d6a0SVincenzo Maffione } 59693b1d6a0SVincenzo Maffione for (i = 0; i < num_rx_rings; i++) { 59793b1d6a0SVincenzo Maffione struct sync_kloop_ring_args *a = args + num_tx_rings + i; 59893b1d6a0SVincenzo Maffione 59993b1d6a0SVincenzo Maffione a->kring = NMR(na, NR_RX)[i + priv->np_qfirst[NR_RX]]; 60093b1d6a0SVincenzo Maffione a->csb_atok = csb_atok_base + num_tx_rings + i; 60193b1d6a0SVincenzo Maffione a->csb_ktoa = csb_ktoa_base + num_tx_rings + i; 60293b1d6a0SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 60393b1d6a0SVincenzo Maffione if (poll_ctx) 60493b1d6a0SVincenzo Maffione a->irq_ctx = poll_ctx->entries[num_tx_rings + i].irq_ctx; 60593b1d6a0SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 60693b1d6a0SVincenzo Maffione } 60793b1d6a0SVincenzo Maffione 608b321acabSVincenzo Maffione /* Main loop. */ 609b321acabSVincenzo Maffione for (;;) { 610b321acabSVincenzo Maffione if (unlikely(NM_ACCESS_ONCE(priv->np_kloop_state) & NM_SYNC_KLOOP_STOPPING)) { 611b321acabSVincenzo Maffione break; 612b321acabSVincenzo Maffione } 613b321acabSVincenzo Maffione 614b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL 61593b1d6a0SVincenzo Maffione if (poll_ctx) { 61693b1d6a0SVincenzo Maffione /* It is important to set the task state as 61793b1d6a0SVincenzo Maffione * interruptible before processing any TX/RX ring, 61893b1d6a0SVincenzo Maffione * so that if a notification on ring Y comes after 61993b1d6a0SVincenzo Maffione * we have processed ring Y, but before we call 62093b1d6a0SVincenzo Maffione * schedule(), we don't miss it. This is true because 62193b1d6a0SVincenzo Maffione * the wake up function will change the the task state, 62293b1d6a0SVincenzo Maffione * and therefore the schedule_timeout() call below 62393b1d6a0SVincenzo Maffione * will observe the change). 62493b1d6a0SVincenzo Maffione */ 62593b1d6a0SVincenzo Maffione set_current_state(TASK_INTERRUPTIBLE); 62693b1d6a0SVincenzo Maffione } 627b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 628b321acabSVincenzo Maffione 629b321acabSVincenzo Maffione /* Process all the TX rings bound to this file descriptor. */ 630b321acabSVincenzo Maffione for (i = 0; i < num_tx_rings; i++) { 63193b1d6a0SVincenzo Maffione struct sync_kloop_ring_args *a = args + i; 632b321acabSVincenzo Maffione 63393b1d6a0SVincenzo Maffione if (unlikely(nm_kr_tryget(a->kring, 1, NULL))) { 634b321acabSVincenzo Maffione continue; 635b321acabSVincenzo Maffione } 63693b1d6a0SVincenzo Maffione netmap_sync_kloop_tx_ring(a); 63793b1d6a0SVincenzo Maffione nm_kr_put(a->kring); 638b321acabSVincenzo Maffione } 639b321acabSVincenzo Maffione 640b321acabSVincenzo Maffione /* Process all the RX rings bound to this file descriptor. */ 641b321acabSVincenzo Maffione for (i = 0; i < num_rx_rings; i++) { 64293b1d6a0SVincenzo Maffione struct sync_kloop_ring_args *a = args + num_tx_rings + i; 643b321acabSVincenzo Maffione 64493b1d6a0SVincenzo Maffione if (unlikely(nm_kr_tryget(a->kring, 1, NULL))) { 645b321acabSVincenzo Maffione continue; 646b321acabSVincenzo Maffione } 64793b1d6a0SVincenzo Maffione netmap_sync_kloop_rx_ring(a); 64893b1d6a0SVincenzo Maffione nm_kr_put(a->kring); 649b321acabSVincenzo Maffione } 650b321acabSVincenzo Maffione 651b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL 652b321acabSVincenzo Maffione if (poll_ctx) { 653b321acabSVincenzo Maffione /* If a poll context is present, yield to the scheduler 654b321acabSVincenzo Maffione * waiting for a notification to come either from 655b321acabSVincenzo Maffione * netmap or the application. */ 656*e2e0ef76SVincenzo Maffione schedule_timeout(msecs_to_jiffies(3000)); 657b321acabSVincenzo Maffione } else 658b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 659b321acabSVincenzo Maffione { 660b321acabSVincenzo Maffione /* Default synchronization method: sleep for a while. */ 661b321acabSVincenzo Maffione usleep_range(sleep_us, sleep_us); 662b321acabSVincenzo Maffione } 663b321acabSVincenzo Maffione } 664b321acabSVincenzo Maffione out: 665b321acabSVincenzo Maffione #ifdef SYNC_KLOOP_POLL 666b321acabSVincenzo Maffione if (poll_ctx) { 667b321acabSVincenzo Maffione /* Stop polling from netmap and the eventfds, and deallocate 668b321acabSVincenzo Maffione * the poll context. */ 669b321acabSVincenzo Maffione __set_current_state(TASK_RUNNING); 670b321acabSVincenzo Maffione for (i = 0; i < poll_ctx->next_entry; i++) { 671b321acabSVincenzo Maffione struct sync_kloop_poll_entry *entry = 672b321acabSVincenzo Maffione poll_ctx->entries + i; 673b321acabSVincenzo Maffione 674b321acabSVincenzo Maffione if (entry->wqh) 675b321acabSVincenzo Maffione remove_wait_queue(entry->wqh, &entry->wait); 676b321acabSVincenzo Maffione /* We did not get a reference to the eventfds, but 677b321acabSVincenzo Maffione * don't do that on netmap file descriptors (since 678b321acabSVincenzo Maffione * a reference was not taken. */ 679b321acabSVincenzo Maffione if (entry->filp && entry->filp != priv->np_filp) 680b321acabSVincenzo Maffione fput(entry->filp); 681b321acabSVincenzo Maffione if (entry->irq_ctx) 682b321acabSVincenzo Maffione eventfd_ctx_put(entry->irq_ctx); 683b321acabSVincenzo Maffione if (entry->irq_filp) 684b321acabSVincenzo Maffione fput(entry->irq_filp); 685b321acabSVincenzo Maffione } 686b321acabSVincenzo Maffione nm_os_free(poll_ctx); 687b321acabSVincenzo Maffione poll_ctx = NULL; 688b321acabSVincenzo Maffione } 689b321acabSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 690b321acabSVincenzo Maffione 69193b1d6a0SVincenzo Maffione if (args) { 69293b1d6a0SVincenzo Maffione nm_os_free(args); 69393b1d6a0SVincenzo Maffione args = NULL; 69493b1d6a0SVincenzo Maffione } 69593b1d6a0SVincenzo Maffione 696b321acabSVincenzo Maffione /* Reset the kloop state. */ 697b321acabSVincenzo Maffione NMG_LOCK(); 698b321acabSVincenzo Maffione priv->np_kloop_state = 0; 699b321acabSVincenzo Maffione NMG_UNLOCK(); 700b321acabSVincenzo Maffione 701b321acabSVincenzo Maffione return err; 702b321acabSVincenzo Maffione } 703b321acabSVincenzo Maffione 704b321acabSVincenzo Maffione int 705b321acabSVincenzo Maffione netmap_sync_kloop_stop(struct netmap_priv_d *priv) 706b321acabSVincenzo Maffione { 707*e2e0ef76SVincenzo Maffione struct netmap_adapter *na; 708b321acabSVincenzo Maffione bool running = true; 709b321acabSVincenzo Maffione int err = 0; 710b321acabSVincenzo Maffione 711*e2e0ef76SVincenzo Maffione if (priv->np_nifp == NULL) { 712*e2e0ef76SVincenzo Maffione return ENXIO; 713*e2e0ef76SVincenzo Maffione } 714*e2e0ef76SVincenzo Maffione mb(); /* make sure following reads are not from cache */ 715*e2e0ef76SVincenzo Maffione 716*e2e0ef76SVincenzo Maffione na = priv->np_na; 717*e2e0ef76SVincenzo Maffione if (!nm_netmap_on(na)) { 718*e2e0ef76SVincenzo Maffione return ENXIO; 719*e2e0ef76SVincenzo Maffione } 720*e2e0ef76SVincenzo Maffione 721*e2e0ef76SVincenzo Maffione /* Set the kloop stopping flag. */ 722b321acabSVincenzo Maffione NMG_LOCK(); 723b321acabSVincenzo Maffione priv->np_kloop_state |= NM_SYNC_KLOOP_STOPPING; 724b321acabSVincenzo Maffione NMG_UNLOCK(); 725*e2e0ef76SVincenzo Maffione 726*e2e0ef76SVincenzo Maffione /* Send a notification to the kloop, in case it is blocked in 727*e2e0ef76SVincenzo Maffione * schedule_timeout(). We can use either RX or TX, because the 728*e2e0ef76SVincenzo Maffione * kloop is waiting on both. */ 729*e2e0ef76SVincenzo Maffione nm_os_selwakeup(priv->np_si[NR_RX]); 730*e2e0ef76SVincenzo Maffione 731*e2e0ef76SVincenzo Maffione /* Wait for the kloop to actually terminate. */ 732b321acabSVincenzo Maffione while (running) { 733b321acabSVincenzo Maffione usleep_range(1000, 1500); 734b321acabSVincenzo Maffione NMG_LOCK(); 735b321acabSVincenzo Maffione running = (NM_ACCESS_ONCE(priv->np_kloop_state) 736b321acabSVincenzo Maffione & NM_SYNC_KLOOP_RUNNING); 737b321acabSVincenzo Maffione NMG_UNLOCK(); 738b321acabSVincenzo Maffione } 739b321acabSVincenzo Maffione 740b321acabSVincenzo Maffione return err; 741b321acabSVincenzo Maffione } 742b321acabSVincenzo Maffione 743b321acabSVincenzo Maffione #ifdef WITH_PTNETMAP 744b321acabSVincenzo Maffione /* 745b321acabSVincenzo Maffione * Guest ptnetmap txsync()/rxsync() routines, used in ptnet device drivers. 746b321acabSVincenzo Maffione * These routines are reused across the different operating systems supported 747b321acabSVincenzo Maffione * by netmap. 748b321acabSVincenzo Maffione */ 749b321acabSVincenzo Maffione 750b321acabSVincenzo Maffione /* 751b321acabSVincenzo Maffione * Reconcile host and guest views of the transmit ring. 752b321acabSVincenzo Maffione * 753b321acabSVincenzo Maffione * Guest user wants to transmit packets up to the one before ring->head, 754b321acabSVincenzo Maffione * and guest kernel knows tx_ring->hwcur is the first packet unsent 755b321acabSVincenzo Maffione * by the host kernel. 756b321acabSVincenzo Maffione * 757b321acabSVincenzo Maffione * We push out as many packets as possible, and possibly 758b321acabSVincenzo Maffione * reclaim buffers from previously completed transmission. 759b321acabSVincenzo Maffione * 760b321acabSVincenzo Maffione * Notifications from the host are enabled only if the user guest would 761b321acabSVincenzo Maffione * block (no space in the ring). 762b321acabSVincenzo Maffione */ 763b321acabSVincenzo Maffione bool 764b321acabSVincenzo Maffione netmap_pt_guest_txsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa, 765b321acabSVincenzo Maffione struct netmap_kring *kring, int flags) 766b321acabSVincenzo Maffione { 767b321acabSVincenzo Maffione bool notify = false; 768b321acabSVincenzo Maffione 769b321acabSVincenzo Maffione /* Disable notifications */ 770b321acabSVincenzo Maffione atok->appl_need_kick = 0; 771b321acabSVincenzo Maffione 772b321acabSVincenzo Maffione /* 773b321acabSVincenzo Maffione * First part: tell the host (updating the CSB) to process the new 774b321acabSVincenzo Maffione * packets. 775b321acabSVincenzo Maffione */ 776b321acabSVincenzo Maffione kring->nr_hwcur = ktoa->hwcur; 77793b1d6a0SVincenzo Maffione nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); 778b321acabSVincenzo Maffione 779b321acabSVincenzo Maffione /* Ask for a kick from a guest to the host if needed. */ 780b321acabSVincenzo Maffione if (((kring->rhead != kring->nr_hwcur || nm_kr_txempty(kring)) 781b321acabSVincenzo Maffione && NM_ACCESS_ONCE(ktoa->kern_need_kick)) || 782b321acabSVincenzo Maffione (flags & NAF_FORCE_RECLAIM)) { 783b321acabSVincenzo Maffione atok->sync_flags = flags; 784b321acabSVincenzo Maffione notify = true; 785b321acabSVincenzo Maffione } 786b321acabSVincenzo Maffione 787b321acabSVincenzo Maffione /* 788b321acabSVincenzo Maffione * Second part: reclaim buffers for completed transmissions. 789b321acabSVincenzo Maffione */ 790b321acabSVincenzo Maffione if (nm_kr_txempty(kring) || (flags & NAF_FORCE_RECLAIM)) { 79193b1d6a0SVincenzo Maffione nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, 79293b1d6a0SVincenzo Maffione &kring->nr_hwcur); 793b321acabSVincenzo Maffione } 794b321acabSVincenzo Maffione 795b321acabSVincenzo Maffione /* 796b321acabSVincenzo Maffione * No more room in the ring for new transmissions. The user thread will 797b321acabSVincenzo Maffione * go to sleep and we need to be notified by the host when more free 798b321acabSVincenzo Maffione * space is available. 799b321acabSVincenzo Maffione */ 800b321acabSVincenzo Maffione if (nm_kr_txempty(kring) && !(kring->nr_kflags & NKR_NOINTR)) { 801b321acabSVincenzo Maffione /* Reenable notifications. */ 802b321acabSVincenzo Maffione atok->appl_need_kick = 1; 80393b1d6a0SVincenzo Maffione /* Double check, with store-load memory barrier. */ 80493b1d6a0SVincenzo Maffione nm_stld_barrier(); 80593b1d6a0SVincenzo Maffione nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, 80693b1d6a0SVincenzo Maffione &kring->nr_hwcur); 807b321acabSVincenzo Maffione /* If there is new free space, disable notifications */ 808b321acabSVincenzo Maffione if (unlikely(!nm_kr_txempty(kring))) { 809b321acabSVincenzo Maffione atok->appl_need_kick = 0; 810b321acabSVincenzo Maffione } 811b321acabSVincenzo Maffione } 812b321acabSVincenzo Maffione 813b321acabSVincenzo Maffione nm_prdis(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)", 814b321acabSVincenzo Maffione kring->name, atok->head, atok->cur, ktoa->hwtail, 815b321acabSVincenzo Maffione kring->rhead, kring->rcur, kring->nr_hwtail); 816b321acabSVincenzo Maffione 817b321acabSVincenzo Maffione return notify; 818b321acabSVincenzo Maffione } 819b321acabSVincenzo Maffione 820b321acabSVincenzo Maffione /* 821b321acabSVincenzo Maffione * Reconcile host and guest view of the receive ring. 822b321acabSVincenzo Maffione * 823b321acabSVincenzo Maffione * Update hwcur/hwtail from host (reading from CSB). 824b321acabSVincenzo Maffione * 825b321acabSVincenzo Maffione * If guest user has released buffers up to the one before ring->head, we 826b321acabSVincenzo Maffione * also give them to the host. 827b321acabSVincenzo Maffione * 828b321acabSVincenzo Maffione * Notifications from the host are enabled only if the user guest would 829b321acabSVincenzo Maffione * block (no more completed slots in the ring). 830b321acabSVincenzo Maffione */ 831b321acabSVincenzo Maffione bool 832b321acabSVincenzo Maffione netmap_pt_guest_rxsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa, 833b321acabSVincenzo Maffione struct netmap_kring *kring, int flags) 834b321acabSVincenzo Maffione { 835b321acabSVincenzo Maffione bool notify = false; 836b321acabSVincenzo Maffione 837b321acabSVincenzo Maffione /* Disable notifications */ 838b321acabSVincenzo Maffione atok->appl_need_kick = 0; 839b321acabSVincenzo Maffione 840b321acabSVincenzo Maffione /* 841b321acabSVincenzo Maffione * First part: import newly received packets, by updating the kring 842b321acabSVincenzo Maffione * hwtail to the hwtail known from the host (read from the CSB). 843b321acabSVincenzo Maffione * This also updates the kring hwcur. 844b321acabSVincenzo Maffione */ 84593b1d6a0SVincenzo Maffione nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, &kring->nr_hwcur); 846b321acabSVincenzo Maffione kring->nr_kflags &= ~NKR_PENDINTR; 847b321acabSVincenzo Maffione 848b321acabSVincenzo Maffione /* 849b321acabSVincenzo Maffione * Second part: tell the host about the slots that guest user has 850b321acabSVincenzo Maffione * released, by updating cur and head in the CSB. 851b321acabSVincenzo Maffione */ 852b321acabSVincenzo Maffione if (kring->rhead != kring->nr_hwcur) { 85393b1d6a0SVincenzo Maffione nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); 854b321acabSVincenzo Maffione /* Ask for a kick from the guest to the host if needed. */ 855b321acabSVincenzo Maffione if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) { 856b321acabSVincenzo Maffione atok->sync_flags = flags; 857b321acabSVincenzo Maffione notify = true; 858b321acabSVincenzo Maffione } 859b321acabSVincenzo Maffione } 860b321acabSVincenzo Maffione 861b321acabSVincenzo Maffione /* 862b321acabSVincenzo Maffione * No more completed RX slots. The user thread will go to sleep and 863b321acabSVincenzo Maffione * we need to be notified by the host when more RX slots have been 864b321acabSVincenzo Maffione * completed. 865b321acabSVincenzo Maffione */ 866b321acabSVincenzo Maffione if (nm_kr_rxempty(kring) && !(kring->nr_kflags & NKR_NOINTR)) { 867b321acabSVincenzo Maffione /* Reenable notifications. */ 868b321acabSVincenzo Maffione atok->appl_need_kick = 1; 86993b1d6a0SVincenzo Maffione /* Double check, with store-load memory barrier. */ 87093b1d6a0SVincenzo Maffione nm_stld_barrier(); 87193b1d6a0SVincenzo Maffione nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, 87293b1d6a0SVincenzo Maffione &kring->nr_hwcur); 873b321acabSVincenzo Maffione /* If there are new slots, disable notifications. */ 874b321acabSVincenzo Maffione if (!nm_kr_rxempty(kring)) { 875b321acabSVincenzo Maffione atok->appl_need_kick = 0; 876b321acabSVincenzo Maffione } 877b321acabSVincenzo Maffione } 878b321acabSVincenzo Maffione 879b321acabSVincenzo Maffione nm_prdis(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)", 880b321acabSVincenzo Maffione kring->name, atok->head, atok->cur, ktoa->hwtail, 881b321acabSVincenzo Maffione kring->rhead, kring->rcur, kring->nr_hwtail); 882b321acabSVincenzo Maffione 883b321acabSVincenzo Maffione return notify; 884b321acabSVincenzo Maffione } 885b321acabSVincenzo Maffione 886b321acabSVincenzo Maffione /* 887b321acabSVincenzo Maffione * Callbacks for ptnet drivers: nm_krings_create, nm_krings_delete, nm_dtor. 888b321acabSVincenzo Maffione */ 889b321acabSVincenzo Maffione int 890b321acabSVincenzo Maffione ptnet_nm_krings_create(struct netmap_adapter *na) 891b321acabSVincenzo Maffione { 892b321acabSVincenzo Maffione struct netmap_pt_guest_adapter *ptna = 893b321acabSVincenzo Maffione (struct netmap_pt_guest_adapter *)na; /* Upcast. */ 894b321acabSVincenzo Maffione struct netmap_adapter *na_nm = &ptna->hwup.up; 895b321acabSVincenzo Maffione struct netmap_adapter *na_dr = &ptna->dr.up; 896b321acabSVincenzo Maffione int ret; 897b321acabSVincenzo Maffione 898b321acabSVincenzo Maffione if (ptna->backend_users) { 899b321acabSVincenzo Maffione return 0; 900b321acabSVincenzo Maffione } 901b321acabSVincenzo Maffione 902b321acabSVincenzo Maffione /* Create krings on the public netmap adapter. */ 903b321acabSVincenzo Maffione ret = netmap_hw_krings_create(na_nm); 904b321acabSVincenzo Maffione if (ret) { 905b321acabSVincenzo Maffione return ret; 906b321acabSVincenzo Maffione } 907b321acabSVincenzo Maffione 908b321acabSVincenzo Maffione /* Copy krings into the netmap adapter private to the driver. */ 909b321acabSVincenzo Maffione na_dr->tx_rings = na_nm->tx_rings; 910b321acabSVincenzo Maffione na_dr->rx_rings = na_nm->rx_rings; 911b321acabSVincenzo Maffione 912b321acabSVincenzo Maffione return 0; 913b321acabSVincenzo Maffione } 914b321acabSVincenzo Maffione 915b321acabSVincenzo Maffione void 916b321acabSVincenzo Maffione ptnet_nm_krings_delete(struct netmap_adapter *na) 917b321acabSVincenzo Maffione { 918b321acabSVincenzo Maffione struct netmap_pt_guest_adapter *ptna = 919b321acabSVincenzo Maffione (struct netmap_pt_guest_adapter *)na; /* Upcast. */ 920b321acabSVincenzo Maffione struct netmap_adapter *na_nm = &ptna->hwup.up; 921b321acabSVincenzo Maffione struct netmap_adapter *na_dr = &ptna->dr.up; 922b321acabSVincenzo Maffione 923b321acabSVincenzo Maffione if (ptna->backend_users) { 924b321acabSVincenzo Maffione return; 925b321acabSVincenzo Maffione } 926b321acabSVincenzo Maffione 927b321acabSVincenzo Maffione na_dr->tx_rings = NULL; 928b321acabSVincenzo Maffione na_dr->rx_rings = NULL; 929b321acabSVincenzo Maffione 930b321acabSVincenzo Maffione netmap_hw_krings_delete(na_nm); 931b321acabSVincenzo Maffione } 932b321acabSVincenzo Maffione 933b321acabSVincenzo Maffione void 934b321acabSVincenzo Maffione ptnet_nm_dtor(struct netmap_adapter *na) 935b321acabSVincenzo Maffione { 936b321acabSVincenzo Maffione struct netmap_pt_guest_adapter *ptna = 937b321acabSVincenzo Maffione (struct netmap_pt_guest_adapter *)na; 938b321acabSVincenzo Maffione 939b321acabSVincenzo Maffione netmap_mem_put(ptna->dr.up.nm_mem); 940b321acabSVincenzo Maffione memset(&ptna->dr, 0, sizeof(ptna->dr)); 941b321acabSVincenzo Maffione netmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp); 942b321acabSVincenzo Maffione } 943b321acabSVincenzo Maffione 944b321acabSVincenzo Maffione int 945b321acabSVincenzo Maffione netmap_pt_guest_attach(struct netmap_adapter *arg, 946b321acabSVincenzo Maffione unsigned int nifp_offset, unsigned int memid) 947b321acabSVincenzo Maffione { 948b321acabSVincenzo Maffione struct netmap_pt_guest_adapter *ptna; 949b321acabSVincenzo Maffione struct ifnet *ifp = arg ? arg->ifp : NULL; 950b321acabSVincenzo Maffione int error; 951b321acabSVincenzo Maffione 952b321acabSVincenzo Maffione /* get allocator */ 953b321acabSVincenzo Maffione arg->nm_mem = netmap_mem_pt_guest_new(ifp, nifp_offset, memid); 954b321acabSVincenzo Maffione if (arg->nm_mem == NULL) 955b321acabSVincenzo Maffione return ENOMEM; 956b321acabSVincenzo Maffione arg->na_flags |= NAF_MEM_OWNER; 957b321acabSVincenzo Maffione error = netmap_attach_ext(arg, sizeof(struct netmap_pt_guest_adapter), 1); 958b321acabSVincenzo Maffione if (error) 959b321acabSVincenzo Maffione return error; 960b321acabSVincenzo Maffione 961b321acabSVincenzo Maffione /* get the netmap_pt_guest_adapter */ 962b321acabSVincenzo Maffione ptna = (struct netmap_pt_guest_adapter *) NA(ifp); 963b321acabSVincenzo Maffione 964b321acabSVincenzo Maffione /* Initialize a separate pass-through netmap adapter that is going to 965b321acabSVincenzo Maffione * be used by the ptnet driver only, and so never exposed to netmap 966b321acabSVincenzo Maffione * applications. We only need a subset of the available fields. */ 967b321acabSVincenzo Maffione memset(&ptna->dr, 0, sizeof(ptna->dr)); 968b321acabSVincenzo Maffione ptna->dr.up.ifp = ifp; 969b321acabSVincenzo Maffione ptna->dr.up.nm_mem = netmap_mem_get(ptna->hwup.up.nm_mem); 970b321acabSVincenzo Maffione ptna->dr.up.nm_config = ptna->hwup.up.nm_config; 971b321acabSVincenzo Maffione 972b321acabSVincenzo Maffione ptna->backend_users = 0; 973b321acabSVincenzo Maffione 974b321acabSVincenzo Maffione return 0; 975b321acabSVincenzo Maffione } 976b321acabSVincenzo Maffione 977b321acabSVincenzo Maffione #endif /* WITH_PTNETMAP */ 978