1 /*-
2 * Copyright (c) 2016, Vincenzo Maffione
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /* Driver for ptnet paravirtualized network device. */
28
29 #include <sys/cdefs.h>
30
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/taskqueue.h>
44 #include <sys/smp.h>
45 #include <sys/time.h>
46 #include <machine/smp.h>
47
48 #include <vm/uma.h>
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_types.h>
58 #include <net/if_media.h>
59 #include <net/if_vlan_var.h>
60 #include <net/bpf.h>
61
62 #include <netinet/in_systm.h>
63 #include <netinet/in.h>
64 #include <netinet/ip.h>
65 #include <netinet/ip6.h>
66 #include <netinet6/ip6_var.h>
67 #include <netinet/udp.h>
68 #include <netinet/tcp.h>
69
70 #include <machine/bus.h>
71 #include <machine/resource.h>
72 #include <sys/bus.h>
73 #include <sys/rman.h>
74
75 #include <dev/pci/pcivar.h>
76 #include <dev/pci/pcireg.h>
77
78 #include "opt_inet.h"
79 #include "opt_inet6.h"
80
81 #include <sys/selinfo.h>
82 #include <net/netmap.h>
83 #include <dev/netmap/netmap_kern.h>
84 #include <net/netmap_virt.h>
85 #include <dev/netmap/netmap_mem2.h>
86 #include <dev/virtio/network/virtio_net.h>
87
88 #ifdef WITH_PTNETMAP
89
90 #ifndef INET
91 #error "INET not defined, cannot support offloadings"
92 #endif
93
94 static uint64_t ptnet_get_counter(if_t, ift_counter);
95
96 //#define PTNETMAP_STATS
97 //#define DEBUG
98 #ifdef DEBUG
99 #define DBG(x) x
100 #else /* !DEBUG */
101 #define DBG(x)
102 #endif /* !DEBUG */
103
104 extern int ptnet_vnet_hdr; /* Tunable parameter */
105
106 struct ptnet_softc;
107
108 struct ptnet_queue_stats {
109 uint64_t packets; /* if_[io]packets */
110 uint64_t bytes; /* if_[io]bytes */
111 uint64_t errors; /* if_[io]errors */
112 uint64_t iqdrops; /* if_iqdrops */
113 uint64_t mcasts; /* if_[io]mcasts */
114 #ifdef PTNETMAP_STATS
115 uint64_t intrs;
116 uint64_t kicks;
117 #endif /* PTNETMAP_STATS */
118 };
119
120 struct ptnet_queue {
121 struct ptnet_softc *sc;
122 struct resource *irq;
123 void *cookie;
124 int kring_id;
125 struct nm_csb_atok *atok;
126 struct nm_csb_ktoa *ktoa;
127 unsigned int kick;
128 struct mtx lock;
129 struct buf_ring *bufring; /* for TX queues */
130 struct ptnet_queue_stats stats;
131 #ifdef PTNETMAP_STATS
132 struct ptnet_queue_stats last_stats;
133 #endif /* PTNETMAP_STATS */
134 struct taskqueue *taskq;
135 struct task task;
136 char lock_name[16];
137 };
138
139 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock)
140 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock)
141 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock)
142
143 struct ptnet_softc {
144 device_t dev;
145 if_t ifp;
146 struct ifmedia media;
147 struct mtx lock;
148 char lock_name[16];
149 char hwaddr[ETHER_ADDR_LEN];
150
151 /* Mirror of PTFEAT register. */
152 uint32_t ptfeatures;
153 unsigned int vnet_hdr_len;
154
155 /* PCI BARs support. */
156 struct resource *iomem;
157 struct resource *msix_mem;
158
159 unsigned int num_rings;
160 unsigned int num_tx_rings;
161 struct ptnet_queue *queues;
162 struct ptnet_queue *rxqueues;
163 struct nm_csb_atok *csb_gh;
164 struct nm_csb_ktoa *csb_hg;
165
166 unsigned int min_tx_space;
167
168 struct netmap_pt_guest_adapter *ptna;
169
170 struct callout tick;
171 #ifdef PTNETMAP_STATS
172 struct timeval last_ts;
173 #endif /* PTNETMAP_STATS */
174 };
175
176 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock)
177 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock)
178
179 static int ptnet_probe(device_t);
180 static int ptnet_attach(device_t);
181 static int ptnet_detach(device_t);
182 static int ptnet_suspend(device_t);
183 static int ptnet_resume(device_t);
184 static int ptnet_shutdown(device_t);
185
186 static void ptnet_init(void *opaque);
187 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data);
188 static int ptnet_init_locked(struct ptnet_softc *sc);
189 static int ptnet_stop(struct ptnet_softc *sc);
190 static int ptnet_transmit(if_t ifp, struct mbuf *m);
191 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq,
192 unsigned int budget,
193 bool may_resched);
194 static void ptnet_qflush(if_t ifp);
195 static void ptnet_tx_task(void *context, int pending);
196
197 static int ptnet_media_change(if_t ifp);
198 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr);
199 #ifdef PTNETMAP_STATS
200 static void ptnet_tick(void *opaque);
201 #endif
202
203 static int ptnet_irqs_init(struct ptnet_softc *sc);
204 static void ptnet_irqs_fini(struct ptnet_softc *sc);
205
206 static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd);
207 static int ptnet_nm_config(struct netmap_adapter *na,
208 struct nm_config_info *info);
209 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc);
210 static int ptnet_nm_register(struct netmap_adapter *na, int onoff);
211 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags);
212 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags);
213 static void ptnet_nm_intr(struct netmap_adapter *na, int onoff);
214
215 static void ptnet_tx_intr(void *opaque);
216 static void ptnet_rx_intr(void *opaque);
217
218 static unsigned ptnet_rx_discard(struct netmap_kring *kring,
219 unsigned int head);
220 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget,
221 bool may_resched);
222 static void ptnet_rx_task(void *context, int pending);
223
224 #ifdef DEVICE_POLLING
225 static poll_handler_t ptnet_poll;
226 #endif
227
228 static device_method_t ptnet_methods[] = {
229 DEVMETHOD(device_probe, ptnet_probe),
230 DEVMETHOD(device_attach, ptnet_attach),
231 DEVMETHOD(device_detach, ptnet_detach),
232 DEVMETHOD(device_suspend, ptnet_suspend),
233 DEVMETHOD(device_resume, ptnet_resume),
234 DEVMETHOD(device_shutdown, ptnet_shutdown),
235 DEVMETHOD_END
236 };
237
238 static driver_t ptnet_driver = {
239 "ptnet",
240 ptnet_methods,
241 sizeof(struct ptnet_softc)
242 };
243
244 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */
245 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, NULL, NULL,
246 SI_ORDER_MIDDLE + 2);
247
248 static int
ptnet_probe(device_t dev)249 ptnet_probe(device_t dev)
250 {
251 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID ||
252 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) {
253 return (ENXIO);
254 }
255
256 device_set_desc(dev, "ptnet network adapter");
257
258 return (BUS_PROBE_DEFAULT);
259 }
260
ptnet_kick(struct ptnet_queue * pq)261 static inline void ptnet_kick(struct ptnet_queue *pq)
262 {
263 #ifdef PTNETMAP_STATS
264 pq->stats.kicks ++;
265 #endif /* PTNETMAP_STATS */
266 bus_write_4(pq->sc->iomem, pq->kick, 0);
267 }
268
269 #define PTNET_BUF_RING_SIZE 4096
270 #define PTNET_RX_BUDGET 512
271 #define PTNET_RX_BATCH 1
272 #define PTNET_TX_BUDGET 512
273 #define PTNET_TX_BATCH 64
274 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf)
275 #define PTNET_MAX_PKT_SIZE 65536
276
277 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP)
278 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
279 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\
280 PTNET_CSUM_OFFLOAD_IPV6)
281
282 static int
ptnet_attach(device_t dev)283 ptnet_attach(device_t dev)
284 {
285 uint32_t ptfeatures = 0;
286 unsigned int num_rx_rings, num_tx_rings;
287 struct netmap_adapter na_arg;
288 unsigned int nifp_offset;
289 struct ptnet_softc *sc;
290 if_t ifp;
291 uint32_t macreg;
292 int err, rid;
293 int i;
294
295 sc = device_get_softc(dev);
296 sc->dev = dev;
297
298 /* Setup PCI resources. */
299 pci_enable_busmaster(dev);
300
301 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR);
302 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
303 RF_ACTIVE);
304 if (sc->iomem == NULL) {
305 device_printf(dev, "Failed to map I/O BAR\n");
306 return (ENXIO);
307 }
308
309 /* Negotiate features with the hypervisor. */
310 if (ptnet_vnet_hdr) {
311 ptfeatures |= PTNETMAP_F_VNET_HDR;
312 }
313 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */
314 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */
315 sc->ptfeatures = ptfeatures;
316
317 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
318 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
319 sc->num_rings = num_tx_rings + num_rx_rings;
320 sc->num_tx_rings = num_tx_rings;
321
322 if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) {
323 device_printf(dev, "CSB cannot handle that many rings (%u)\n",
324 sc->num_rings);
325 err = ENOMEM;
326 goto err_path;
327 }
328
329 /* Allocate CSB and carry out CSB allocation protocol. */
330 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO,
331 (size_t)0, -1UL, PAGE_SIZE, 0);
332 if (sc->csb_gh == NULL) {
333 device_printf(dev, "Failed to allocate CSB\n");
334 err = ENOMEM;
335 goto err_path;
336 }
337 sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE);
338
339 {
340 /*
341 * We use uint64_t rather than vm_paddr_t since we
342 * need 64 bit addresses even on 32 bit platforms.
343 */
344 uint64_t paddr = vtophys(sc->csb_gh);
345
346 /* CSB allocation protocol: write to BAH first, then
347 * to BAL (for both GH and HG sections). */
348 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH,
349 (paddr >> 32) & 0xffffffff);
350 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL,
351 paddr & 0xffffffff);
352 paddr = vtophys(sc->csb_hg);
353 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH,
354 (paddr >> 32) & 0xffffffff);
355 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL,
356 paddr & 0xffffffff);
357 }
358
359 /* Allocate and initialize per-queue data structures. */
360 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings,
361 M_DEVBUF, M_NOWAIT | M_ZERO);
362 if (sc->queues == NULL) {
363 err = ENOMEM;
364 goto err_path;
365 }
366 sc->rxqueues = sc->queues + num_tx_rings;
367
368 for (i = 0; i < sc->num_rings; i++) {
369 struct ptnet_queue *pq = sc->queues + i;
370
371 pq->sc = sc;
372 pq->kring_id = i;
373 pq->kick = PTNET_IO_KICK_BASE + 4 * i;
374 pq->atok = sc->csb_gh + i;
375 pq->ktoa = sc->csb_hg + i;
376 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d",
377 device_get_nameunit(dev), i);
378 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF);
379 if (i >= num_tx_rings) {
380 /* RX queue: fix kring_id. */
381 pq->kring_id -= num_tx_rings;
382 } else {
383 /* TX queue: allocate buf_ring. */
384 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE,
385 M_DEVBUF, M_NOWAIT, &pq->lock);
386 if (pq->bufring == NULL) {
387 err = ENOMEM;
388 goto err_path;
389 }
390 }
391 }
392
393 sc->min_tx_space = 64; /* Safe initial value. */
394
395 err = ptnet_irqs_init(sc);
396 if (err) {
397 goto err_path;
398 }
399
400 /* Setup Ethernet interface. */
401 sc->ifp = ifp = if_alloc(IFT_ETHER);
402 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
403 if_setbaudrate(ifp, IF_Gbps(10));
404 if_setsoftc(ifp, sc);
405 if_setflags(ifp, IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX);
406 if_setinitfn(ifp, ptnet_init);
407 if_setioctlfn(ifp, ptnet_ioctl);
408 if_setget_counter(ifp, ptnet_get_counter);
409 if_settransmitfn(ifp, ptnet_transmit);
410 if_setqflushfn(ifp, ptnet_qflush);
411
412 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change,
413 ptnet_media_status);
414 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL);
415 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX);
416
417 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI);
418 sc->hwaddr[0] = (macreg >> 8) & 0xff;
419 sc->hwaddr[1] = macreg & 0xff;
420 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO);
421 sc->hwaddr[2] = (macreg >> 24) & 0xff;
422 sc->hwaddr[3] = (macreg >> 16) & 0xff;
423 sc->hwaddr[4] = (macreg >> 8) & 0xff;
424 sc->hwaddr[5] = macreg & 0xff;
425
426 ether_ifattach(ifp, sc->hwaddr);
427
428 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
429 if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU, 0);
430
431 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) {
432 /* Similarly to what the vtnet driver does, we can emulate
433 * VLAN offloadings by inserting and removing the 802.1Q
434 * header during transmit and receive. We are then able
435 * to do checksum offloading of VLAN frames. */
436 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6
437 | IFCAP_VLAN_HWCSUM
438 | IFCAP_TSO | IFCAP_LRO
439 | IFCAP_VLAN_HWTSO
440 | IFCAP_VLAN_HWTAGGING, 0);
441 }
442
443 if_setcapenable(ifp, if_getcapabilities(ifp));
444 #ifdef DEVICE_POLLING
445 /* Don't enable polling by default. */
446 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
447 #endif
448 snprintf(sc->lock_name, sizeof(sc->lock_name),
449 "%s", device_get_nameunit(dev));
450 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF);
451 callout_init_mtx(&sc->tick, &sc->lock, 0);
452
453 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */
454 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS);
455 memset(&na_arg, 0, sizeof(na_arg));
456 na_arg.ifp = ifp;
457 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
458 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
459 na_arg.num_tx_rings = num_tx_rings;
460 na_arg.num_rx_rings = num_rx_rings;
461 na_arg.nm_config = ptnet_nm_config;
462 na_arg.nm_krings_create = ptnet_nm_krings_create;
463 na_arg.nm_krings_delete = ptnet_nm_krings_delete;
464 na_arg.nm_dtor = ptnet_nm_dtor;
465 na_arg.nm_intr = ptnet_nm_intr;
466 na_arg.nm_register = ptnet_nm_register;
467 na_arg.nm_txsync = ptnet_nm_txsync;
468 na_arg.nm_rxsync = ptnet_nm_rxsync;
469
470 netmap_pt_guest_attach(&na_arg, nifp_offset,
471 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID));
472
473 /* Now a netmap adapter for this ifp has been allocated, and it
474 * can be accessed through NA(ifp). We also have to initialize the CSB
475 * pointer. */
476 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp);
477
478 /* If virtio-net header was negotiated, set the virt_hdr_len field in
479 * the netmap adapter, to inform users that this netmap adapter requires
480 * the application to deal with the headers. */
481 ptnet_update_vnet_hdr(sc);
482
483 device_printf(dev, "%s() completed\n", __func__);
484
485 return (0);
486
487 err_path:
488 ptnet_detach(dev);
489 return err;
490 }
491
492 /* Stop host sync-kloop if it was running. */
493 static void
ptnet_device_shutdown(struct ptnet_softc * sc)494 ptnet_device_shutdown(struct ptnet_softc *sc)
495 {
496 ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE);
497 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0);
498 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0);
499 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0);
500 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0);
501 }
502
503 static int
ptnet_detach(device_t dev)504 ptnet_detach(device_t dev)
505 {
506 struct ptnet_softc *sc = device_get_softc(dev);
507 int i;
508
509 ptnet_device_shutdown(sc);
510
511 #ifdef DEVICE_POLLING
512 if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
513 ether_poll_deregister(sc->ifp);
514 }
515 #endif
516 callout_drain(&sc->tick);
517
518 if (sc->queues) {
519 /* Drain taskqueues before calling if_detach. */
520 for (i = 0; i < sc->num_rings; i++) {
521 struct ptnet_queue *pq = sc->queues + i;
522
523 if (pq->taskq) {
524 taskqueue_drain(pq->taskq, &pq->task);
525 }
526 }
527 }
528
529 if (sc->ifp) {
530 ether_ifdetach(sc->ifp);
531
532 /* Uninitialize netmap adapters for this device. */
533 netmap_detach(sc->ifp);
534
535 ifmedia_removeall(&sc->media);
536 if_free(sc->ifp);
537 sc->ifp = NULL;
538 }
539
540 ptnet_irqs_fini(sc);
541
542 if (sc->csb_gh) {
543 contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF);
544 sc->csb_gh = NULL;
545 sc->csb_hg = NULL;
546 }
547
548 if (sc->queues) {
549 for (i = 0; i < sc->num_rings; i++) {
550 struct ptnet_queue *pq = sc->queues + i;
551
552 if (mtx_initialized(&pq->lock)) {
553 mtx_destroy(&pq->lock);
554 }
555 if (pq->bufring != NULL) {
556 buf_ring_free(pq->bufring, M_DEVBUF);
557 }
558 }
559 free(sc->queues, M_DEVBUF);
560 sc->queues = NULL;
561 }
562
563 if (sc->iomem) {
564 bus_release_resource(dev, SYS_RES_IOPORT,
565 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem);
566 sc->iomem = NULL;
567 }
568
569 mtx_destroy(&sc->lock);
570
571 device_printf(dev, "%s() completed\n", __func__);
572
573 return (0);
574 }
575
576 static int
ptnet_suspend(device_t dev)577 ptnet_suspend(device_t dev)
578 {
579 struct ptnet_softc *sc = device_get_softc(dev);
580
581 (void)sc;
582
583 return (0);
584 }
585
586 static int
ptnet_resume(device_t dev)587 ptnet_resume(device_t dev)
588 {
589 struct ptnet_softc *sc = device_get_softc(dev);
590
591 (void)sc;
592
593 return (0);
594 }
595
596 static int
ptnet_shutdown(device_t dev)597 ptnet_shutdown(device_t dev)
598 {
599 struct ptnet_softc *sc = device_get_softc(dev);
600
601 ptnet_device_shutdown(sc);
602
603 return (0);
604 }
605
606 static int
ptnet_irqs_init(struct ptnet_softc * sc)607 ptnet_irqs_init(struct ptnet_softc *sc)
608 {
609 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR);
610 int nvecs = sc->num_rings;
611 device_t dev = sc->dev;
612 int err = ENOSPC;
613 int cpu_cur;
614 int i;
615
616 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) {
617 device_printf(dev, "Could not find MSI-X capability\n");
618 return (ENXIO);
619 }
620
621 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
622 &rid, RF_ACTIVE);
623 if (sc->msix_mem == NULL) {
624 device_printf(dev, "Failed to allocate MSIX PCI BAR\n");
625 return (ENXIO);
626 }
627
628 if (pci_msix_count(dev) < nvecs) {
629 device_printf(dev, "Not enough MSI-X vectors\n");
630 goto err_path;
631 }
632
633 err = pci_alloc_msix(dev, &nvecs);
634 if (err) {
635 device_printf(dev, "Failed to allocate MSI-X vectors\n");
636 goto err_path;
637 }
638
639 for (i = 0; i < nvecs; i++) {
640 struct ptnet_queue *pq = sc->queues + i;
641
642 rid = i + 1;
643 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
644 RF_ACTIVE);
645 if (pq->irq == NULL) {
646 device_printf(dev, "Failed to allocate interrupt "
647 "for queue #%d\n", i);
648 err = ENOSPC;
649 goto err_path;
650 }
651 }
652
653 cpu_cur = CPU_FIRST();
654 for (i = 0; i < nvecs; i++) {
655 struct ptnet_queue *pq = sc->queues + i;
656 void (*handler)(void *) = ptnet_tx_intr;
657
658 if (i >= sc->num_tx_rings) {
659 handler = ptnet_rx_intr;
660 }
661 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE,
662 NULL /* intr_filter */, handler,
663 pq, &pq->cookie);
664 if (err) {
665 device_printf(dev, "Failed to register intr handler "
666 "for queue #%d\n", i);
667 goto err_path;
668 }
669
670 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i);
671 #if 0
672 bus_bind_intr(sc->dev, pq->irq, cpu_cur);
673 #endif
674 cpu_cur = CPU_NEXT(cpu_cur);
675 }
676
677 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs);
678
679 cpu_cur = CPU_FIRST();
680 for (i = 0; i < nvecs; i++) {
681 struct ptnet_queue *pq = sc->queues + i;
682
683 if (i < sc->num_tx_rings)
684 TASK_INIT(&pq->task, 0, ptnet_tx_task, pq);
685 else
686 NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq);
687
688 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT,
689 taskqueue_thread_enqueue, &pq->taskq);
690 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d",
691 device_get_nameunit(sc->dev), cpu_cur);
692 cpu_cur = CPU_NEXT(cpu_cur);
693 }
694
695 return 0;
696 err_path:
697 ptnet_irqs_fini(sc);
698 return err;
699 }
700
701 static void
ptnet_irqs_fini(struct ptnet_softc * sc)702 ptnet_irqs_fini(struct ptnet_softc *sc)
703 {
704 device_t dev = sc->dev;
705 int i;
706
707 for (i = 0; i < sc->num_rings; i++) {
708 struct ptnet_queue *pq = sc->queues + i;
709
710 if (pq->taskq) {
711 taskqueue_free(pq->taskq);
712 pq->taskq = NULL;
713 }
714
715 if (pq->cookie) {
716 bus_teardown_intr(dev, pq->irq, pq->cookie);
717 pq->cookie = NULL;
718 }
719
720 if (pq->irq) {
721 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq);
722 pq->irq = NULL;
723 }
724 }
725
726 if (sc->msix_mem) {
727 pci_release_msi(dev);
728
729 bus_release_resource(dev, SYS_RES_MEMORY,
730 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR),
731 sc->msix_mem);
732 sc->msix_mem = NULL;
733 }
734 }
735
736 static void
ptnet_init(void * opaque)737 ptnet_init(void *opaque)
738 {
739 struct ptnet_softc *sc = opaque;
740
741 PTNET_CORE_LOCK(sc);
742 ptnet_init_locked(sc);
743 PTNET_CORE_UNLOCK(sc);
744 }
745
746 static int
ptnet_ioctl(if_t ifp,u_long cmd,caddr_t data)747 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data)
748 {
749 struct ptnet_softc *sc = if_getsoftc(ifp);
750 device_t dev = sc->dev;
751 struct ifreq *ifr = (struct ifreq *)data;
752 int mask __unused, err = 0;
753
754 switch (cmd) {
755 case SIOCSIFFLAGS:
756 device_printf(dev, "SIOCSIFFLAGS %x\n", if_getflags(ifp));
757 PTNET_CORE_LOCK(sc);
758 if (if_getflags(ifp) & IFF_UP) {
759 /* Network stack wants the iff to be up. */
760 err = ptnet_init_locked(sc);
761 } else {
762 /* Network stack wants the iff to be down. */
763 err = ptnet_stop(sc);
764 }
765 /* We don't need to do nothing to support IFF_PROMISC,
766 * since that is managed by the backend port. */
767 PTNET_CORE_UNLOCK(sc);
768 break;
769
770 case SIOCSIFCAP:
771 device_printf(dev, "SIOCSIFCAP %x %x\n",
772 ifr->ifr_reqcap, if_getcapenable(ifp));
773 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
774 #ifdef DEVICE_POLLING
775 if (mask & IFCAP_POLLING) {
776 struct ptnet_queue *pq;
777 int i;
778
779 if (ifr->ifr_reqcap & IFCAP_POLLING) {
780 err = ether_poll_register(ptnet_poll, ifp);
781 if (err) {
782 break;
783 }
784 /* Stop queues and sync with taskqueues. */
785 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
786 for (i = 0; i < sc->num_rings; i++) {
787 pq = sc-> queues + i;
788 /* Make sure the worker sees the
789 * IFF_DRV_RUNNING down. */
790 PTNET_Q_LOCK(pq);
791 pq->atok->appl_need_kick = 0;
792 PTNET_Q_UNLOCK(pq);
793 /* Wait for rescheduling to finish. */
794 if (pq->taskq) {
795 taskqueue_drain(pq->taskq,
796 &pq->task);
797 }
798 }
799 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
800 } else {
801 err = ether_poll_deregister(ifp);
802 for (i = 0; i < sc->num_rings; i++) {
803 pq = sc-> queues + i;
804 PTNET_Q_LOCK(pq);
805 pq->atok->appl_need_kick = 1;
806 PTNET_Q_UNLOCK(pq);
807 }
808 }
809 }
810 #endif /* DEVICE_POLLING */
811 if_setcapenable(ifp, ifr->ifr_reqcap);
812 break;
813
814 case SIOCSIFMTU:
815 /* We support any reasonable MTU. */
816 if (ifr->ifr_mtu < ETHERMIN ||
817 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) {
818 err = EINVAL;
819 } else {
820 PTNET_CORE_LOCK(sc);
821 if_setmtu(ifp, ifr->ifr_mtu);
822 PTNET_CORE_UNLOCK(sc);
823 }
824 break;
825
826 case SIOCSIFMEDIA:
827 case SIOCGIFMEDIA:
828 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
829 break;
830
831 default:
832 err = ether_ioctl(ifp, cmd, data);
833 break;
834 }
835
836 return err;
837 }
838
839 static int
ptnet_init_locked(struct ptnet_softc * sc)840 ptnet_init_locked(struct ptnet_softc *sc)
841 {
842 if_t ifp = sc->ifp;
843 struct netmap_adapter *na_dr = &sc->ptna->dr.up;
844 struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
845 unsigned int nm_buf_size;
846 int ret;
847
848 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
849 return 0; /* nothing to do */
850 }
851
852 device_printf(sc->dev, "%s\n", __func__);
853
854 /* Translate offload capabilities according to if_capenable. */
855 if_sethwassist(ifp, 0);
856 if (if_getcapenable(ifp) & IFCAP_TXCSUM)
857 if_sethwassistbits(ifp, PTNET_CSUM_OFFLOAD, 0);
858 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
859 if_sethwassistbits(ifp, PTNET_CSUM_OFFLOAD_IPV6, 0);
860 if (if_getcapenable(ifp) & IFCAP_TSO4)
861 if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
862 if (if_getcapenable(ifp) & IFCAP_TSO6)
863 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
864
865 /*
866 * Prepare the interface for netmap mode access.
867 */
868 netmap_update_config(na_dr);
869
870 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr);
871 if (ret) {
872 device_printf(sc->dev, "netmap_mem_finalize() failed\n");
873 return ret;
874 }
875
876 if (sc->ptna->backend_users == 0) {
877 ret = ptnet_nm_krings_create(na_nm);
878 if (ret) {
879 device_printf(sc->dev, "ptnet_nm_krings_create() "
880 "failed\n");
881 goto err_mem_finalize;
882 }
883
884 ret = netmap_mem_rings_create(na_dr);
885 if (ret) {
886 device_printf(sc->dev, "netmap_mem_rings_create() "
887 "failed\n");
888 goto err_rings_create;
889 }
890
891 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut);
892 if (ret) {
893 device_printf(sc->dev, "netmap_mem_get_lut() "
894 "failed\n");
895 goto err_get_lut;
896 }
897 }
898
899 ret = ptnet_nm_register(na_dr, 1 /* on */);
900 if (ret) {
901 goto err_register;
902 }
903
904 nm_buf_size = NETMAP_BUF_SIZE(na_dr);
905
906 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size"));
907 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2;
908 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__,
909 sc->min_tx_space);
910 #ifdef PTNETMAP_STATS
911 callout_reset(&sc->tick, hz, ptnet_tick, sc);
912 #endif
913
914 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
915
916 return 0;
917
918 err_register:
919 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut));
920 err_get_lut:
921 netmap_mem_rings_delete(na_dr);
922 err_rings_create:
923 ptnet_nm_krings_delete(na_nm);
924 err_mem_finalize:
925 netmap_mem_deref(na_dr->nm_mem, na_dr);
926
927 return ret;
928 }
929
930 /* To be called under core lock. */
931 static int
ptnet_stop(struct ptnet_softc * sc)932 ptnet_stop(struct ptnet_softc *sc)
933 {
934 if_t ifp = sc->ifp;
935 struct netmap_adapter *na_dr = &sc->ptna->dr.up;
936 struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
937 int i;
938
939 device_printf(sc->dev, "%s\n", __func__);
940
941 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
942 return 0; /* nothing to do */
943 }
944
945 /* Clear the driver-ready flag, and synchronize with all the queues,
946 * so that after this loop we are sure nobody is working anymore with
947 * the device. This scheme is taken from the vtnet driver. */
948 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
949 callout_stop(&sc->tick);
950 for (i = 0; i < sc->num_rings; i++) {
951 PTNET_Q_LOCK(sc->queues + i);
952 PTNET_Q_UNLOCK(sc->queues + i);
953 }
954
955 ptnet_nm_register(na_dr, 0 /* off */);
956
957 if (sc->ptna->backend_users == 0) {
958 netmap_mem_rings_delete(na_dr);
959 ptnet_nm_krings_delete(na_nm);
960 }
961 netmap_mem_deref(na_dr->nm_mem, na_dr);
962
963 return 0;
964 }
965
966 static void
ptnet_qflush(if_t ifp)967 ptnet_qflush(if_t ifp)
968 {
969 struct ptnet_softc *sc = if_getsoftc(ifp);
970 int i;
971
972 /* Flush all the bufrings and do the interface flush. */
973 for (i = 0; i < sc->num_rings; i++) {
974 struct ptnet_queue *pq = sc->queues + i;
975 struct mbuf *m;
976
977 PTNET_Q_LOCK(pq);
978 if (pq->bufring) {
979 while ((m = buf_ring_dequeue_sc(pq->bufring))) {
980 m_freem(m);
981 }
982 }
983 PTNET_Q_UNLOCK(pq);
984 }
985
986 if_qflush(ifp);
987 }
988
989 static int
ptnet_media_change(if_t ifp)990 ptnet_media_change(if_t ifp)
991 {
992 struct ptnet_softc *sc = if_getsoftc(ifp);
993 struct ifmedia *ifm = &sc->media;
994
995 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
996 return EINVAL;
997 }
998
999 return 0;
1000 }
1001
1002 static uint64_t
ptnet_get_counter(if_t ifp,ift_counter cnt)1003 ptnet_get_counter(if_t ifp, ift_counter cnt)
1004 {
1005 struct ptnet_softc *sc = if_getsoftc(ifp);
1006 struct ptnet_queue_stats stats[2];
1007 int i;
1008
1009 /* Accumulate statistics over the queues. */
1010 memset(stats, 0, sizeof(stats));
1011 for (i = 0; i < sc->num_rings; i++) {
1012 struct ptnet_queue *pq = sc->queues + i;
1013 int idx = (i < sc->num_tx_rings) ? 0 : 1;
1014
1015 stats[idx].packets += pq->stats.packets;
1016 stats[idx].bytes += pq->stats.bytes;
1017 stats[idx].errors += pq->stats.errors;
1018 stats[idx].iqdrops += pq->stats.iqdrops;
1019 stats[idx].mcasts += pq->stats.mcasts;
1020 }
1021
1022 switch (cnt) {
1023 case IFCOUNTER_IPACKETS:
1024 return (stats[1].packets);
1025 case IFCOUNTER_IQDROPS:
1026 return (stats[1].iqdrops);
1027 case IFCOUNTER_IERRORS:
1028 return (stats[1].errors);
1029 case IFCOUNTER_OPACKETS:
1030 return (stats[0].packets);
1031 case IFCOUNTER_OBYTES:
1032 return (stats[0].bytes);
1033 case IFCOUNTER_OMCASTS:
1034 return (stats[0].mcasts);
1035 default:
1036 return (if_get_counter_default(ifp, cnt));
1037 }
1038 }
1039
1040
1041 #ifdef PTNETMAP_STATS
1042 /* Called under core lock. */
1043 static void
ptnet_tick(void * opaque)1044 ptnet_tick(void *opaque)
1045 {
1046 struct ptnet_softc *sc = opaque;
1047 int i;
1048
1049 for (i = 0; i < sc->num_rings; i++) {
1050 struct ptnet_queue *pq = sc->queues + i;
1051 struct ptnet_queue_stats cur = pq->stats;
1052 struct timeval now;
1053 unsigned int delta;
1054
1055 microtime(&now);
1056 delta = now.tv_usec - sc->last_ts.tv_usec +
1057 (now.tv_sec - sc->last_ts.tv_sec) * 1000000;
1058 delta /= 1000; /* in milliseconds */
1059
1060 if (delta == 0)
1061 continue;
1062
1063 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, "
1064 "intr %lu\n", i, delta,
1065 (cur.packets - pq->last_stats.packets),
1066 (cur.kicks - pq->last_stats.kicks),
1067 (cur.intrs - pq->last_stats.intrs));
1068 pq->last_stats = cur;
1069 }
1070 microtime(&sc->last_ts);
1071 callout_schedule(&sc->tick, hz);
1072 }
1073 #endif /* PTNETMAP_STATS */
1074
1075 static void
ptnet_media_status(if_t ifp,struct ifmediareq * ifmr)1076 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr)
1077 {
1078 /* We are always active, as the backend netmap port is
1079 * always open in netmap mode. */
1080 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1081 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX;
1082 }
1083
1084 static uint32_t
ptnet_nm_ptctl(struct ptnet_softc * sc,uint32_t cmd)1085 ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd)
1086 {
1087 /*
1088 * Write a command and read back error status,
1089 * with zero meaning success.
1090 */
1091 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd);
1092 return bus_read_4(sc->iomem, PTNET_IO_PTCTL);
1093 }
1094
1095 static int
ptnet_nm_config(struct netmap_adapter * na,struct nm_config_info * info)1096 ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info)
1097 {
1098 struct ptnet_softc *sc = if_getsoftc(na->ifp);
1099
1100 info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
1101 info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
1102 info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
1103 info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
1104 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
1105
1106 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n",
1107 info->num_tx_rings, info->num_rx_rings,
1108 info->num_tx_descs, info->num_rx_descs,
1109 info->rx_buf_maxsize);
1110
1111 return 0;
1112 }
1113
1114 static void
ptnet_sync_from_csb(struct ptnet_softc * sc,struct netmap_adapter * na)1115 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na)
1116 {
1117 int i;
1118
1119 /* Sync krings from the host, reading from
1120 * CSB. */
1121 for (i = 0; i < sc->num_rings; i++) {
1122 struct nm_csb_atok *atok = sc->queues[i].atok;
1123 struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa;
1124 struct netmap_kring *kring;
1125
1126 if (i < na->num_tx_rings) {
1127 kring = na->tx_rings[i];
1128 } else {
1129 kring = na->rx_rings[i - na->num_tx_rings];
1130 }
1131 kring->rhead = kring->ring->head = atok->head;
1132 kring->rcur = kring->ring->cur = atok->cur;
1133 kring->nr_hwcur = ktoa->hwcur;
1134 kring->nr_hwtail = kring->rtail =
1135 kring->ring->tail = ktoa->hwtail;
1136
1137 nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
1138 ktoa->hwcur, atok->head, atok->cur,
1139 ktoa->hwtail);
1140 nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
1141 t, i, kring->nr_hwcur, kring->rhead, kring->rcur,
1142 kring->ring->head, kring->ring->cur, kring->nr_hwtail,
1143 kring->rtail, kring->ring->tail);
1144 }
1145 }
1146
1147 static void
ptnet_update_vnet_hdr(struct ptnet_softc * sc)1148 ptnet_update_vnet_hdr(struct ptnet_softc *sc)
1149 {
1150 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0;
1151
1152 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len);
1153 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN);
1154 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len;
1155 }
1156
1157 static int
ptnet_nm_register(struct netmap_adapter * na,int onoff)1158 ptnet_nm_register(struct netmap_adapter *na, int onoff)
1159 {
1160 /* device-specific */
1161 if_t ifp = na->ifp;
1162 struct ptnet_softc *sc = if_getsoftc(ifp);
1163 int native = (na == &sc->ptna->hwup.up);
1164 struct ptnet_queue *pq;
1165 int ret = 0;
1166 int i;
1167
1168 if (!onoff) {
1169 sc->ptna->backend_users--;
1170 }
1171
1172 /* If this is the last netmap client, guest interrupt enable flags may
1173 * be in arbitrary state. Since these flags are going to be used also
1174 * by the netdevice driver, we have to make sure to start with
1175 * notifications enabled. Also, schedule NAPI to flush pending packets
1176 * in the RX rings, since we will not receive further interrupts
1177 * until these will be processed. */
1178 if (native && !onoff && na->active_fds == 0) {
1179 nm_prinf("Exit netmap mode, re-enable interrupts");
1180 for (i = 0; i < sc->num_rings; i++) {
1181 pq = sc->queues + i;
1182 pq->atok->appl_need_kick = 1;
1183 }
1184 }
1185
1186 if (onoff) {
1187 if (sc->ptna->backend_users == 0) {
1188 /* Initialize notification enable fields in the CSB. */
1189 for (i = 0; i < sc->num_rings; i++) {
1190 pq = sc->queues + i;
1191 pq->ktoa->kern_need_kick = 1;
1192 pq->atok->appl_need_kick =
1193 (!(if_getcapenable(ifp) & IFCAP_POLLING)
1194 && i >= sc->num_tx_rings);
1195 }
1196
1197 /* Set the virtio-net header length. */
1198 ptnet_update_vnet_hdr(sc);
1199
1200 /* Make sure the host adapter passed through is ready
1201 * for txsync/rxsync. */
1202 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE);
1203 if (ret) {
1204 return ret;
1205 }
1206
1207 /* Align the guest krings and rings to the state stored
1208 * in the CSB. */
1209 ptnet_sync_from_csb(sc, na);
1210 }
1211
1212 /* If not native, don't call nm_set_native_flags, since we don't want
1213 * to replace if_transmit method, nor set NAF_NETMAP_ON */
1214 if (native) {
1215 netmap_krings_mode_commit(na, onoff);
1216 nm_set_native_flags(na);
1217 }
1218
1219 } else {
1220 if (native) {
1221 nm_clear_native_flags(na);
1222 netmap_krings_mode_commit(na, onoff);
1223 }
1224
1225 if (sc->ptna->backend_users == 0) {
1226 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE);
1227 }
1228 }
1229
1230 if (onoff) {
1231 sc->ptna->backend_users++;
1232 }
1233
1234 return ret;
1235 }
1236
1237 static int
ptnet_nm_txsync(struct netmap_kring * kring,int flags)1238 ptnet_nm_txsync(struct netmap_kring *kring, int flags)
1239 {
1240 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
1241 struct ptnet_queue *pq = sc->queues + kring->ring_id;
1242 bool notify;
1243
1244 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags);
1245 if (notify) {
1246 ptnet_kick(pq);
1247 }
1248
1249 return 0;
1250 }
1251
1252 static int
ptnet_nm_rxsync(struct netmap_kring * kring,int flags)1253 ptnet_nm_rxsync(struct netmap_kring *kring, int flags)
1254 {
1255 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
1256 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id;
1257 bool notify;
1258
1259 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags);
1260 if (notify) {
1261 ptnet_kick(pq);
1262 }
1263
1264 return 0;
1265 }
1266
1267 static void
ptnet_nm_intr(struct netmap_adapter * na,int onoff)1268 ptnet_nm_intr(struct netmap_adapter *na, int onoff)
1269 {
1270 struct ptnet_softc *sc = if_getsoftc(na->ifp);
1271 int i;
1272
1273 for (i = 0; i < sc->num_rings; i++) {
1274 struct ptnet_queue *pq = sc->queues + i;
1275 pq->atok->appl_need_kick = onoff;
1276 }
1277 }
1278
1279 static void
ptnet_tx_intr(void * opaque)1280 ptnet_tx_intr(void *opaque)
1281 {
1282 struct ptnet_queue *pq = opaque;
1283 struct ptnet_softc *sc = pq->sc;
1284
1285 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id));
1286 #ifdef PTNETMAP_STATS
1287 pq->stats.intrs ++;
1288 #endif /* PTNETMAP_STATS */
1289
1290 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) {
1291 return;
1292 }
1293
1294 /* Schedule the tasqueue to flush process transmissions requests.
1295 * However, vtnet, if_em and if_igb just call ptnet_transmit() here,
1296 * at least when using MSI-X interrupts. The if_em driver, instead
1297 * schedule taskqueue when using legacy interrupts. */
1298 taskqueue_enqueue(pq->taskq, &pq->task);
1299 }
1300
1301 static void
ptnet_rx_intr(void * opaque)1302 ptnet_rx_intr(void *opaque)
1303 {
1304 struct ptnet_queue *pq = opaque;
1305 struct ptnet_softc *sc = pq->sc;
1306 unsigned int unused;
1307
1308 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id));
1309 #ifdef PTNETMAP_STATS
1310 pq->stats.intrs ++;
1311 #endif /* PTNETMAP_STATS */
1312
1313 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) {
1314 return;
1315 }
1316
1317 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts,
1318 * receive-side processing is executed directly in the interrupt
1319 * service routine. Alternatively, we may schedule the taskqueue. */
1320 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1321 }
1322
1323 static void
ptnet_vlan_tag_remove(struct mbuf * m)1324 ptnet_vlan_tag_remove(struct mbuf *m)
1325 {
1326 struct ether_vlan_header *evh;
1327
1328 evh = mtod(m, struct ether_vlan_header *);
1329 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
1330 m->m_flags |= M_VLANTAG;
1331
1332 /* Strip the 802.1Q header. */
1333 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
1334 ETHER_HDR_LEN - ETHER_TYPE_LEN);
1335 m_adj(m, ETHER_VLAN_ENCAP_LEN);
1336 }
1337
1338 static void
ptnet_ring_update(struct ptnet_queue * pq,struct netmap_kring * kring,unsigned int head,unsigned int sync_flags)1339 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring,
1340 unsigned int head, unsigned int sync_flags)
1341 {
1342 struct netmap_ring *ring = kring->ring;
1343 struct nm_csb_atok *atok = pq->atok;
1344 struct nm_csb_ktoa *ktoa = pq->ktoa;
1345
1346 /* Some packets have been pushed to the netmap ring. We have
1347 * to tell the host to process the new packets, updating cur
1348 * and head in the CSB. */
1349 ring->head = ring->cur = head;
1350
1351 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */
1352 kring->rcur = kring->rhead = head;
1353
1354 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead);
1355
1356 /* Kick the host if needed. */
1357 if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) {
1358 atok->sync_flags = sync_flags;
1359 ptnet_kick(pq);
1360 }
1361 }
1362
1363 #define PTNET_TX_NOSPACE(_h, _k, _min) \
1364 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \
1365 (_k)->rtail - (_h)) < (_min)
1366
1367 /* This function may be called by the network stack, or by
1368 * by the taskqueue thread. */
1369 static int
ptnet_drain_transmit_queue(struct ptnet_queue * pq,unsigned int budget,bool may_resched)1370 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
1371 bool may_resched)
1372 {
1373 struct ptnet_softc *sc = pq->sc;
1374 bool have_vnet_hdr = sc->vnet_hdr_len;
1375 struct netmap_adapter *na = &sc->ptna->dr.up;
1376 if_t ifp = sc->ifp;
1377 unsigned int batch_count = 0;
1378 struct nm_csb_atok *atok;
1379 struct nm_csb_ktoa *ktoa;
1380 struct netmap_kring *kring;
1381 struct netmap_ring *ring;
1382 struct netmap_slot *slot;
1383 unsigned int count = 0;
1384 unsigned int minspace;
1385 unsigned int head;
1386 unsigned int lim;
1387 struct mbuf *mhead;
1388 struct mbuf *mf;
1389 int nmbuf_bytes;
1390 uint8_t *nmbuf;
1391
1392 if (!PTNET_Q_TRYLOCK(pq)) {
1393 /* We failed to acquire the lock, schedule the taskqueue. */
1394 nm_prlim(1, "Deferring TX work");
1395 if (may_resched) {
1396 taskqueue_enqueue(pq->taskq, &pq->task);
1397 }
1398
1399 return 0;
1400 }
1401
1402 if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
1403 PTNET_Q_UNLOCK(pq);
1404 nm_prlim(1, "Interface is down");
1405 return ENETDOWN;
1406 }
1407
1408 atok = pq->atok;
1409 ktoa = pq->ktoa;
1410 kring = na->tx_rings[pq->kring_id];
1411 ring = kring->ring;
1412 lim = kring->nkr_num_slots - 1;
1413 head = ring->head;
1414 minspace = sc->min_tx_space;
1415
1416 while (count < budget) {
1417 if (PTNET_TX_NOSPACE(head, kring, minspace)) {
1418 /* We ran out of slot, let's see if the host has
1419 * freed up some, by reading hwcur and hwtail from
1420 * the CSB. */
1421 ptnet_sync_tail(ktoa, kring);
1422
1423 if (PTNET_TX_NOSPACE(head, kring, minspace)) {
1424 /* Still no slots available. Reactivate the
1425 * interrupts so that we can be notified
1426 * when some free slots are made available by
1427 * the host. */
1428 atok->appl_need_kick = 1;
1429
1430 /* Double check. We need a full barrier to
1431 * prevent the store to atok->appl_need_kick
1432 * to be reordered with the load from
1433 * ktoa->hwcur and ktoa->hwtail (store-load
1434 * barrier). */
1435 nm_stld_barrier();
1436 ptnet_sync_tail(ktoa, kring);
1437 if (likely(PTNET_TX_NOSPACE(head, kring,
1438 minspace))) {
1439 break;
1440 }
1441
1442 nm_prlim(1, "Found more slots by doublecheck");
1443 /* More slots were freed before reactivating
1444 * the interrupts. */
1445 atok->appl_need_kick = 0;
1446 }
1447 }
1448
1449 mhead = drbr_peek(ifp, pq->bufring);
1450 if (!mhead) {
1451 break;
1452 }
1453
1454 /* Initialize transmission state variables. */
1455 slot = ring->slot + head;
1456 nmbuf = NMB(na, slot);
1457 nmbuf_bytes = 0;
1458
1459 /* If needed, prepare the virtio-net header at the beginning
1460 * of the first slot. */
1461 if (have_vnet_hdr) {
1462 struct virtio_net_hdr *vh =
1463 (struct virtio_net_hdr *)nmbuf;
1464
1465 /* For performance, we could replace this memset() with
1466 * two 8-bytes-wide writes. */
1467 memset(nmbuf, 0, PTNET_HDR_SIZE);
1468 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) {
1469 mhead = virtio_net_tx_offload(ifp, mhead, false,
1470 vh);
1471 if (unlikely(!mhead)) {
1472 /* Packet dropped because errors
1473 * occurred while preparing the vnet
1474 * header. Let's go ahead with the next
1475 * packet. */
1476 pq->stats.errors ++;
1477 drbr_advance(ifp, pq->bufring);
1478 continue;
1479 }
1480 }
1481 nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x "
1482 "csum_start %u csum_ofs %u hdr_len = %u "
1483 "gso_size %u gso_type %x", __func__,
1484 mhead->m_pkthdr.csum_flags, vh->flags,
1485 vh->csum_start, vh->csum_offset, vh->hdr_len,
1486 vh->gso_size, vh->gso_type);
1487
1488 nmbuf += PTNET_HDR_SIZE;
1489 nmbuf_bytes += PTNET_HDR_SIZE;
1490 }
1491
1492 for (mf = mhead; mf; mf = mf->m_next) {
1493 uint8_t *mdata = mf->m_data;
1494 int mlen = mf->m_len;
1495
1496 for (;;) {
1497 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes;
1498
1499 if (mlen < copy) {
1500 copy = mlen;
1501 }
1502 memcpy(nmbuf, mdata, copy);
1503
1504 mdata += copy;
1505 mlen -= copy;
1506 nmbuf += copy;
1507 nmbuf_bytes += copy;
1508
1509 if (!mlen) {
1510 break;
1511 }
1512
1513 slot->len = nmbuf_bytes;
1514 slot->flags = NS_MOREFRAG;
1515
1516 head = nm_next(head, lim);
1517 KASSERT(head != ring->tail,
1518 ("Unexpectedly run out of TX space"));
1519 slot = ring->slot + head;
1520 nmbuf = NMB(na, slot);
1521 nmbuf_bytes = 0;
1522 }
1523 }
1524
1525 /* Complete last slot and update head. */
1526 slot->len = nmbuf_bytes;
1527 slot->flags = 0;
1528 head = nm_next(head, lim);
1529
1530 /* Consume the packet just processed. */
1531 drbr_advance(ifp, pq->bufring);
1532
1533 /* Copy the packet to listeners. */
1534 ETHER_BPF_MTAP(ifp, mhead);
1535
1536 pq->stats.packets ++;
1537 pq->stats.bytes += mhead->m_pkthdr.len;
1538 if (mhead->m_flags & M_MCAST) {
1539 pq->stats.mcasts ++;
1540 }
1541
1542 m_freem(mhead);
1543
1544 count ++;
1545 if (++batch_count == PTNET_TX_BATCH) {
1546 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1547 batch_count = 0;
1548 }
1549 }
1550
1551 if (batch_count) {
1552 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1553 }
1554
1555 if (count >= budget && may_resched) {
1556 DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n",
1557 drbr_inuse(ifp, pq->bufring)));
1558 taskqueue_enqueue(pq->taskq, &pq->task);
1559 }
1560
1561 PTNET_Q_UNLOCK(pq);
1562
1563 return count;
1564 }
1565
1566 static int
ptnet_transmit(if_t ifp,struct mbuf * m)1567 ptnet_transmit(if_t ifp, struct mbuf *m)
1568 {
1569 struct ptnet_softc *sc = if_getsoftc(ifp);
1570 struct ptnet_queue *pq;
1571 unsigned int queue_idx;
1572 int err;
1573
1574 DBG(device_printf(sc->dev, "transmit %p\n", m));
1575
1576 /* Insert 802.1Q header if needed. */
1577 if (m->m_flags & M_VLANTAG) {
1578 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1579 if (m == NULL) {
1580 return ENOBUFS;
1581 }
1582 m->m_flags &= ~M_VLANTAG;
1583 }
1584
1585 /* Get the flow-id if available. */
1586 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ?
1587 m->m_pkthdr.flowid : curcpu;
1588
1589 if (unlikely(queue_idx >= sc->num_tx_rings)) {
1590 queue_idx %= sc->num_tx_rings;
1591 }
1592
1593 pq = sc->queues + queue_idx;
1594
1595 err = drbr_enqueue(ifp, pq->bufring, m);
1596 if (err) {
1597 /* ENOBUFS when the bufring is full */
1598 nm_prlim(1, "%s: drbr_enqueue() failed %d\n",
1599 __func__, err);
1600 pq->stats.errors ++;
1601 return err;
1602 }
1603
1604 if (if_getcapenable(ifp) & IFCAP_POLLING) {
1605 /* If polling is on, the transmit queues will be
1606 * drained by the poller. */
1607 return 0;
1608 }
1609
1610 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1611
1612 return (err < 0) ? err : 0;
1613 }
1614
1615 static unsigned int
ptnet_rx_discard(struct netmap_kring * kring,unsigned int head)1616 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head)
1617 {
1618 struct netmap_ring *ring = kring->ring;
1619 struct netmap_slot *slot = ring->slot + head;
1620
1621 for (;;) {
1622 head = nm_next(head, kring->nkr_num_slots - 1);
1623 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) {
1624 break;
1625 }
1626 slot = ring->slot + head;
1627 }
1628
1629 return head;
1630 }
1631
1632 static inline struct mbuf *
ptnet_rx_slot(struct mbuf * mtail,uint8_t * nmbuf,unsigned int nmbuf_len)1633 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len)
1634 {
1635 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len;
1636
1637 do {
1638 unsigned int copy;
1639
1640 if (mtail->m_len == MCLBYTES) {
1641 struct mbuf *mf;
1642
1643 mf = m_getcl(M_NOWAIT, MT_DATA, 0);
1644 if (unlikely(!mf)) {
1645 return NULL;
1646 }
1647
1648 mtail->m_next = mf;
1649 mtail = mf;
1650 mdata = mtod(mtail, uint8_t *);
1651 mtail->m_len = 0;
1652 }
1653
1654 copy = MCLBYTES - mtail->m_len;
1655 if (nmbuf_len < copy) {
1656 copy = nmbuf_len;
1657 }
1658
1659 memcpy(mdata, nmbuf, copy);
1660
1661 nmbuf += copy;
1662 nmbuf_len -= copy;
1663 mdata += copy;
1664 mtail->m_len += copy;
1665 } while (nmbuf_len);
1666
1667 return mtail;
1668 }
1669
1670 static int
ptnet_rx_eof(struct ptnet_queue * pq,unsigned int budget,bool may_resched)1671 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
1672 {
1673 struct ptnet_softc *sc = pq->sc;
1674 bool have_vnet_hdr = sc->vnet_hdr_len;
1675 struct nm_csb_atok *atok = pq->atok;
1676 struct nm_csb_ktoa *ktoa = pq->ktoa;
1677 struct netmap_adapter *na = &sc->ptna->dr.up;
1678 struct netmap_kring *kring = na->rx_rings[pq->kring_id];
1679 struct netmap_ring *ring = kring->ring;
1680 unsigned int const lim = kring->nkr_num_slots - 1;
1681 unsigned int batch_count = 0;
1682 if_t ifp = sc->ifp;
1683 unsigned int count = 0;
1684 uint32_t head;
1685
1686 PTNET_Q_LOCK(pq);
1687
1688 if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
1689 goto unlock;
1690 }
1691
1692 kring->nr_kflags &= ~NKR_PENDINTR;
1693
1694 head = ring->head;
1695 while (count < budget) {
1696 uint32_t prev_head = head;
1697 struct mbuf *mhead, *mtail;
1698 struct virtio_net_hdr *vh;
1699 struct netmap_slot *slot;
1700 unsigned int nmbuf_len;
1701 uint8_t *nmbuf;
1702 int deliver = 1; /* the mbuf to the network stack. */
1703 host_sync:
1704 if (head == ring->tail) {
1705 /* We ran out of slot, let's see if the host has
1706 * added some, by reading hwcur and hwtail from
1707 * the CSB. */
1708 ptnet_sync_tail(ktoa, kring);
1709
1710 if (head == ring->tail) {
1711 /* Still no slots available. Reactivate
1712 * interrupts as they were disabled by the
1713 * host thread right before issuing the
1714 * last interrupt. */
1715 atok->appl_need_kick = 1;
1716
1717 /* Double check for more completed RX slots.
1718 * We need a full barrier to prevent the store
1719 * to atok->appl_need_kick to be reordered with
1720 * the load from ktoa->hwcur and ktoa->hwtail
1721 * (store-load barrier). */
1722 nm_stld_barrier();
1723 ptnet_sync_tail(ktoa, kring);
1724 if (likely(head == ring->tail)) {
1725 break;
1726 }
1727 atok->appl_need_kick = 0;
1728 }
1729 }
1730
1731 /* Initialize ring state variables, possibly grabbing the
1732 * virtio-net header. */
1733 slot = ring->slot + head;
1734 nmbuf = NMB(na, slot);
1735 nmbuf_len = slot->len;
1736
1737 vh = (struct virtio_net_hdr *)nmbuf;
1738 if (have_vnet_hdr) {
1739 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) {
1740 /* There is no good reason why host should
1741 * put the header in multiple netmap slots.
1742 * If this is the case, discard. */
1743 nm_prlim(1, "Fragmented vnet-hdr: dropping");
1744 head = ptnet_rx_discard(kring, head);
1745 pq->stats.iqdrops ++;
1746 deliver = 0;
1747 goto skip;
1748 }
1749 nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u "
1750 "csum_ofs %u hdr_len = %u gso_size %u "
1751 "gso_type %x", __func__, vh->flags,
1752 vh->csum_start, vh->csum_offset, vh->hdr_len,
1753 vh->gso_size, vh->gso_type);
1754 nmbuf += PTNET_HDR_SIZE;
1755 nmbuf_len -= PTNET_HDR_SIZE;
1756 }
1757
1758 /* Allocate the head of a new mbuf chain.
1759 * We use m_getcl() to allocate an mbuf with standard cluster
1760 * size (MCLBYTES). In the future we could use m_getjcl()
1761 * to choose different sizes. */
1762 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1763 if (unlikely(mhead == NULL)) {
1764 device_printf(sc->dev, "%s: failed to allocate mbuf "
1765 "head\n", __func__);
1766 pq->stats.errors ++;
1767 break;
1768 }
1769
1770 /* Initialize the mbuf state variables. */
1771 mhead->m_pkthdr.len = nmbuf_len;
1772 mtail->m_len = 0;
1773
1774 /* Scan all the netmap slots containing the current packet. */
1775 for (;;) {
1776 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag "
1777 "len %u, flags %u\n", __func__,
1778 head, ring->tail, slot->len,
1779 slot->flags));
1780
1781 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len);
1782 if (unlikely(!mtail)) {
1783 /* Ouch. We ran out of memory while processing
1784 * a packet. We have to restore the previous
1785 * head position, free the mbuf chain, and
1786 * schedule the taskqueue to give the packet
1787 * another chance. */
1788 device_printf(sc->dev, "%s: failed to allocate"
1789 " mbuf frag, reset head %u --> %u\n",
1790 __func__, head, prev_head);
1791 head = prev_head;
1792 m_freem(mhead);
1793 pq->stats.errors ++;
1794 if (may_resched) {
1795 taskqueue_enqueue(pq->taskq,
1796 &pq->task);
1797 }
1798 goto escape;
1799 }
1800
1801 /* We have to increment head irrespective of the
1802 * NS_MOREFRAG being set or not. */
1803 head = nm_next(head, lim);
1804
1805 if (!(slot->flags & NS_MOREFRAG)) {
1806 break;
1807 }
1808
1809 if (unlikely(head == ring->tail)) {
1810 /* The very last slot prepared by the host has
1811 * the NS_MOREFRAG set. Drop it and continue
1812 * the outer cycle (to do the double-check). */
1813 nm_prlim(1, "Incomplete packet: dropping");
1814 m_freem(mhead);
1815 pq->stats.iqdrops ++;
1816 goto host_sync;
1817 }
1818
1819 slot = ring->slot + head;
1820 nmbuf = NMB(na, slot);
1821 nmbuf_len = slot->len;
1822 mhead->m_pkthdr.len += nmbuf_len;
1823 }
1824
1825 mhead->m_pkthdr.rcvif = ifp;
1826 mhead->m_pkthdr.csum_flags = 0;
1827
1828 /* Store the queue idx in the packet header. */
1829 mhead->m_pkthdr.flowid = pq->kring_id;
1830 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE);
1831
1832 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1833 struct ether_header *eh;
1834
1835 eh = mtod(mhead, struct ether_header *);
1836 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1837 ptnet_vlan_tag_remove(mhead);
1838 /*
1839 * With the 802.1Q header removed, update the
1840 * checksum starting location accordingly.
1841 */
1842 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1843 vh->csum_start -= ETHER_VLAN_ENCAP_LEN;
1844 }
1845 }
1846
1847 if (unlikely(have_vnet_hdr && virtio_net_rx_csum(mhead, vh))) {
1848 m_freem(mhead);
1849 nm_prlim(1, "Csum offload error: dropping");
1850 pq->stats.iqdrops ++;
1851 deliver = 0;
1852 }
1853
1854 skip:
1855 count ++;
1856 if (++batch_count >= PTNET_RX_BATCH) {
1857 /* Some packets have been (or will be) pushed to the network
1858 * stack. We need to update the CSB to tell the host about
1859 * the new ring->cur and ring->head (RX buffer refill). */
1860 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
1861 batch_count = 0;
1862 }
1863
1864 if (likely(deliver)) {
1865 pq->stats.packets ++;
1866 pq->stats.bytes += mhead->m_pkthdr.len;
1867
1868 PTNET_Q_UNLOCK(pq);
1869 if_input(ifp, mhead);
1870 PTNET_Q_LOCK(pq);
1871 /* The ring->head index (and related indices) are
1872 * updated under pq lock by ptnet_ring_update().
1873 * Since we dropped the lock to call if_input(), we
1874 * must reload ring->head and restart processing the
1875 * ring from there. */
1876 head = ring->head;
1877
1878 if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
1879 /* The interface has gone down while we didn't
1880 * have the lock. Stop any processing and exit. */
1881 goto unlock;
1882 }
1883 }
1884 }
1885 escape:
1886 if (batch_count) {
1887 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
1888
1889 }
1890
1891 if (count >= budget && may_resched) {
1892 /* If we ran out of budget or the double-check found new
1893 * slots to process, schedule the taskqueue. */
1894 DBG(nm_prlim(1, "out of budget: resched h %u t %u\n",
1895 head, ring->tail));
1896 taskqueue_enqueue(pq->taskq, &pq->task);
1897 }
1898 unlock:
1899 PTNET_Q_UNLOCK(pq);
1900
1901 return count;
1902 }
1903
1904 static void
ptnet_rx_task(void * context,int pending)1905 ptnet_rx_task(void *context, int pending)
1906 {
1907 struct ptnet_queue *pq = context;
1908
1909 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
1910 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1911 }
1912
1913 static void
ptnet_tx_task(void * context,int pending)1914 ptnet_tx_task(void *context, int pending)
1915 {
1916 struct ptnet_queue *pq = context;
1917
1918 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
1919 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1920 }
1921
1922 #ifdef DEVICE_POLLING
1923 /* We don't need to handle differently POLL_AND_CHECK_STATUS and
1924 * POLL_ONLY, since we don't have an Interrupt Status Register. */
1925 static int
ptnet_poll(if_t ifp,enum poll_cmd cmd,int budget)1926 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget)
1927 {
1928 struct ptnet_softc *sc = if_getsoftc(ifp);
1929 unsigned int queue_budget;
1930 unsigned int count = 0;
1931 bool borrow = false;
1932 int i;
1933
1934 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet"));
1935 queue_budget = MAX(budget / sc->num_rings, 1);
1936 nm_prlim(1, "Per-queue budget is %d", queue_budget);
1937
1938 while (budget) {
1939 unsigned int rcnt = 0;
1940
1941 for (i = 0; i < sc->num_rings; i++) {
1942 struct ptnet_queue *pq = sc->queues + i;
1943
1944 if (borrow) {
1945 queue_budget = MIN(queue_budget, budget);
1946 if (queue_budget == 0) {
1947 break;
1948 }
1949 }
1950
1951 if (i < sc->num_tx_rings) {
1952 rcnt += ptnet_drain_transmit_queue(pq,
1953 queue_budget, false);
1954 } else {
1955 rcnt += ptnet_rx_eof(pq, queue_budget,
1956 false);
1957 }
1958 }
1959
1960 if (!rcnt) {
1961 /* A scan of the queues gave no result, we can
1962 * stop here. */
1963 break;
1964 }
1965
1966 if (rcnt > budget) {
1967 /* This may happen when initial budget < sc->num_rings,
1968 * since one packet budget is given to each queue
1969 * anyway. Just pretend we didn't eat "so much". */
1970 rcnt = budget;
1971 }
1972 count += rcnt;
1973 budget -= rcnt;
1974 borrow = true;
1975 }
1976
1977
1978 return count;
1979 }
1980 #endif /* DEVICE_POLLING */
1981 #endif /* WITH_PTNETMAP */
1982