1 /*
2 * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 /*
27 * $FreeBSD$
28 */
29
30 #include <net/netmap.h>
31 #include <sys/selinfo.h>
32 #include <vm/vm.h>
33 #include <vm/pmap.h> /* vtophys ? */
34 #include <dev/netmap/netmap_kern.h>
35
36 /*
37 * Return 1 if the queue identified by 't' and 'idx' is in netmap mode.
38 */
39 static int
vtnet_netmap_queue_on(struct vtnet_softc * sc,enum txrx t,int idx)40 vtnet_netmap_queue_on(struct vtnet_softc *sc, enum txrx t, int idx)
41 {
42 struct netmap_adapter *na = NA(sc->vtnet_ifp);
43
44 if (!nm_native_on(na))
45 return 0;
46
47 if (t == NR_RX)
48 return !!(idx < na->num_rx_rings &&
49 na->rx_rings[idx]->nr_mode == NKR_NETMAP_ON);
50
51 return !!(idx < na->num_tx_rings &&
52 na->tx_rings[idx]->nr_mode == NKR_NETMAP_ON);
53 }
54
55 static void
vtnet_free_used(struct virtqueue * vq,int netmap_bufs,enum txrx t,int idx)56 vtnet_free_used(struct virtqueue *vq, int netmap_bufs, enum txrx t, int idx)
57 {
58 void *cookie;
59 int deq = 0;
60
61 while ((cookie = virtqueue_dequeue(vq, NULL)) != NULL) {
62 if (netmap_bufs) {
63 /* These are netmap buffers: there is nothing to do. */
64 } else {
65 /* These are mbufs that we need to free. */
66 struct mbuf *m;
67
68 if (t == NR_TX) {
69 struct vtnet_tx_header *txhdr = cookie;
70 m = txhdr->vth_mbuf;
71 m_freem(m);
72 uma_zfree(vtnet_tx_header_zone, txhdr);
73 } else {
74 m = cookie;
75 m_freem(m);
76 }
77 }
78 deq++;
79 }
80
81 if (deq)
82 nm_prinf("%d sgs dequeued from %s-%d (netmap=%d)",
83 deq, nm_txrx2str(t), idx, netmap_bufs);
84 }
85
86 /* Register and unregister. */
87 static int
vtnet_netmap_reg(struct netmap_adapter * na,int state)88 vtnet_netmap_reg(struct netmap_adapter *na, int state)
89 {
90 struct ifnet *ifp = na->ifp;
91 struct vtnet_softc *sc = ifp->if_softc;
92 int success;
93 int i;
94
95 /* Drain the taskqueues to make sure that there are no worker threads
96 * accessing the virtqueues. */
97 vtnet_drain_taskqueues(sc);
98
99 VTNET_CORE_LOCK(sc);
100
101 /* We need nm_netmap_on() to return true when called by
102 * vtnet_init_locked() below. */
103 if (state)
104 nm_set_native_flags(na);
105
106 /* We need to trigger a device reset in order to unexpose guest buffers
107 * published to the host. */
108 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
109 /* Get pending used buffers. The way they are freed depends on whether
110 * they are netmap buffer or they are mbufs. We can tell apart the two
111 * cases by looking at kring->nr_mode, before this is possibly updated
112 * in the loop below. */
113 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
114 struct vtnet_txq *txq = &sc->vtnet_txqs[i];
115 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
116 struct netmap_kring *kring;
117
118 VTNET_TXQ_LOCK(txq);
119 kring = NMR(na, NR_TX)[i];
120 vtnet_free_used(txq->vtntx_vq,
121 kring->nr_mode == NKR_NETMAP_ON, NR_TX, i);
122 VTNET_TXQ_UNLOCK(txq);
123
124 VTNET_RXQ_LOCK(rxq);
125 kring = NMR(na, NR_RX)[i];
126 vtnet_free_used(rxq->vtnrx_vq,
127 kring->nr_mode == NKR_NETMAP_ON, NR_RX, i);
128 VTNET_RXQ_UNLOCK(rxq);
129 }
130 vtnet_init_locked(sc);
131 success = (ifp->if_drv_flags & IFF_DRV_RUNNING) ? 0 : ENXIO;
132
133 if (state) {
134 netmap_krings_mode_commit(na, state);
135 } else {
136 nm_clear_native_flags(na);
137 netmap_krings_mode_commit(na, state);
138 }
139
140 VTNET_CORE_UNLOCK(sc);
141
142 return success;
143 }
144
145
146 /* Reconcile kernel and user view of the transmit ring. */
147 static int
vtnet_netmap_txsync(struct netmap_kring * kring,int flags)148 vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
149 {
150 struct netmap_adapter *na = kring->na;
151 struct ifnet *ifp = na->ifp;
152 struct netmap_ring *ring = kring->ring;
153 u_int ring_nr = kring->ring_id;
154 u_int nm_i; /* index into the netmap ring */
155 u_int const lim = kring->nkr_num_slots - 1;
156 u_int const head = kring->rhead;
157
158 /* device-specific */
159 struct vtnet_softc *sc = ifp->if_softc;
160 struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
161 struct virtqueue *vq = txq->vtntx_vq;
162 int interrupts = !(kring->nr_kflags & NKR_NOINTR);
163 u_int n;
164
165 /*
166 * First part: process new packets to send.
167 */
168 rmb();
169
170 nm_i = kring->nr_hwcur;
171 if (nm_i != head) { /* we have new packets to send */
172 struct sglist *sg = txq->vtntx_sg;
173
174 for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
175 /* we use an empty header here */
176 struct netmap_slot *slot = &ring->slot[nm_i];
177 u_int len = slot->len;
178 uint64_t paddr;
179 void *addr = PNMB(na, slot, &paddr);
180 int err;
181
182 NM_CHECK_ADDR_LEN(na, addr, len);
183
184 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
185 /* Initialize the scatterlist, expose it to the hypervisor,
186 * and kick the hypervisor (if necessary).
187 */
188 sglist_reset(sg); // cheap
189 err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size);
190 err |= sglist_append_phys(sg, paddr, len);
191 KASSERT(err == 0, ("%s: cannot append to sglist %d",
192 __func__, err));
193 err = virtqueue_enqueue(vq, /*cookie=*/txq, sg,
194 /*readable=*/sg->sg_nseg,
195 /*writeable=*/0);
196 if (unlikely(err)) {
197 if (err != ENOSPC)
198 nm_prerr("virtqueue_enqueue(%s) failed: %d",
199 kring->name, err);
200 break;
201 }
202 }
203
204 virtqueue_notify(vq);
205
206 /* Update hwcur depending on where we stopped. */
207 kring->nr_hwcur = nm_i; /* note we migth break early */
208 }
209
210 /* Free used slots. We only consider our own used buffers, recognized
211 * by the token we passed to virtqueue_enqueue.
212 */
213 n = 0;
214 for (;;) {
215 void *token = virtqueue_dequeue(vq, NULL);
216 if (token == NULL)
217 break;
218 if (unlikely(token != (void *)txq))
219 nm_prerr("BUG: TX token mismatch");
220 else
221 n++;
222 }
223 if (n > 0) {
224 kring->nr_hwtail += n;
225 if (kring->nr_hwtail > lim)
226 kring->nr_hwtail -= lim + 1;
227 }
228
229 if (interrupts && virtqueue_nfree(vq) < 32)
230 virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG);
231
232 return 0;
233 }
234
235 static int
vtnet_netmap_kring_refill(struct netmap_kring * kring,u_int nm_i,u_int head)236 vtnet_netmap_kring_refill(struct netmap_kring *kring, u_int nm_i, u_int head)
237 {
238 struct netmap_adapter *na = kring->na;
239 struct ifnet *ifp = na->ifp;
240 struct netmap_ring *ring = kring->ring;
241 u_int ring_nr = kring->ring_id;
242 u_int const lim = kring->nkr_num_slots - 1;
243
244 /* device-specific */
245 struct vtnet_softc *sc = ifp->if_softc;
246 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
247 struct virtqueue *vq = rxq->vtnrx_vq;
248
249 /* use a local sglist, default might be short */
250 struct sglist_seg ss[2];
251 struct sglist sg = { ss, 0, 0, 2 };
252
253 for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
254 struct netmap_slot *slot = &ring->slot[nm_i];
255 uint64_t paddr;
256 void *addr = PNMB(na, slot, &paddr);
257 int err;
258
259 if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
260 if (netmap_ring_reinit(kring))
261 return -1;
262 }
263
264 slot->flags &= ~NS_BUF_CHANGED;
265 sglist_reset(&sg);
266 err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size);
267 err |= sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na));
268 KASSERT(err == 0, ("%s: cannot append to sglist %d",
269 __func__, err));
270 /* writable for the host */
271 err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg,
272 /*readable=*/0, /*writeable=*/sg.sg_nseg);
273 if (unlikely(err)) {
274 if (err != ENOSPC)
275 nm_prerr("virtqueue_enqueue(%s) failed: %d",
276 kring->name, err);
277 break;
278 }
279 }
280
281 return nm_i;
282 }
283
284 /*
285 * Publish netmap buffers on a RX virtqueue.
286 * Returns -1 if this virtqueue is not being opened in netmap mode.
287 * If the virtqueue is being opened in netmap mode, return 0 on success and
288 * a positive error code on failure.
289 */
290 static int
vtnet_netmap_rxq_populate(struct vtnet_rxq * rxq)291 vtnet_netmap_rxq_populate(struct vtnet_rxq *rxq)
292 {
293 struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp);
294 struct netmap_kring *kring;
295 int error;
296
297 if (!nm_native_on(na) || rxq->vtnrx_id >= na->num_rx_rings)
298 return -1;
299
300 kring = na->rx_rings[rxq->vtnrx_id];
301 if (!(nm_kring_pending_on(kring) ||
302 kring->nr_pending_mode == NKR_NETMAP_ON))
303 return -1;
304
305 /* Expose all the RX netmap buffers. Note that the number of
306 * netmap slots in the RX ring matches the maximum number of
307 * 2-elements sglist that the RX virtqueue can accommodate. */
308 error = vtnet_netmap_kring_refill(kring, 0, na->num_rx_desc);
309 virtqueue_notify(rxq->vtnrx_vq);
310
311 return error < 0 ? ENXIO : 0;
312 }
313
314 /* Reconcile kernel and user view of the receive ring. */
315 static int
vtnet_netmap_rxsync(struct netmap_kring * kring,int flags)316 vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
317 {
318 struct netmap_adapter *na = kring->na;
319 struct ifnet *ifp = na->ifp;
320 struct netmap_ring *ring = kring->ring;
321 u_int ring_nr = kring->ring_id;
322 u_int nm_i; /* index into the netmap ring */
323 u_int const lim = kring->nkr_num_slots - 1;
324 u_int const head = kring->rhead;
325 int force_update = (flags & NAF_FORCE_READ) ||
326 (kring->nr_kflags & NKR_PENDINTR);
327 int interrupts = !(kring->nr_kflags & NKR_NOINTR);
328
329 /* device-specific */
330 struct vtnet_softc *sc = ifp->if_softc;
331 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
332 struct virtqueue *vq = rxq->vtnrx_vq;
333
334 rmb();
335 /*
336 * First part: import newly received packets.
337 * Only accept our own buffers (matching the token). We should only get
338 * matching buffers. We may need to stop early to avoid hwtail to overrun
339 * hwcur.
340 */
341 if (netmap_no_pendintr || force_update) {
342 uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
343 void *token;
344
345 vtnet_rxq_disable_intr(rxq);
346
347 nm_i = kring->nr_hwtail;
348 while (nm_i != hwtail_lim) {
349 int len;
350 token = virtqueue_dequeue(vq, &len);
351 if (token == NULL) {
352 if (interrupts && vtnet_rxq_enable_intr(rxq)) {
353 vtnet_rxq_disable_intr(rxq);
354 continue;
355 }
356 break;
357 }
358 if (unlikely(token != (void *)rxq)) {
359 nm_prerr("BUG: RX token mismatch");
360 } else {
361 /* Skip the virtio-net header. */
362 len -= sc->vtnet_hdr_size;
363 if (unlikely(len < 0)) {
364 nm_prlim(1, "Truncated virtio-net-header, "
365 "missing %d bytes", -len);
366 len = 0;
367 }
368 ring->slot[nm_i].len = len;
369 ring->slot[nm_i].flags = 0;
370 nm_i = nm_next(nm_i, lim);
371 }
372 }
373 kring->nr_hwtail = nm_i;
374 kring->nr_kflags &= ~NKR_PENDINTR;
375 }
376 nm_prdis("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur,
377 kring->nr_hwcur, kring->nr_hwtail);
378
379 /*
380 * Second part: skip past packets that userspace has released.
381 */
382 nm_i = kring->nr_hwcur; /* netmap ring index */
383 if (nm_i != head) {
384 int nm_j = vtnet_netmap_kring_refill(kring, nm_i, head);
385 if (nm_j < 0)
386 return nm_j;
387 kring->nr_hwcur = nm_j;
388 virtqueue_notify(vq);
389 }
390
391 nm_prdis("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur,
392 ring->tail, kring->nr_hwcur, kring->nr_hwtail);
393
394 return 0;
395 }
396
397
398 /* Enable/disable interrupts on all virtqueues. */
399 static void
vtnet_netmap_intr(struct netmap_adapter * na,int state)400 vtnet_netmap_intr(struct netmap_adapter *na, int state)
401 {
402 struct vtnet_softc *sc = na->ifp->if_softc;
403 int i;
404
405 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
406 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
407 struct vtnet_txq *txq = &sc->vtnet_txqs[i];
408 struct virtqueue *txvq = txq->vtntx_vq;
409
410 if (state) {
411 vtnet_rxq_enable_intr(rxq);
412 virtqueue_enable_intr(txvq);
413 } else {
414 vtnet_rxq_disable_intr(rxq);
415 virtqueue_disable_intr(txvq);
416 }
417 }
418 }
419
420 static int
vtnet_netmap_tx_slots(struct vtnet_softc * sc)421 vtnet_netmap_tx_slots(struct vtnet_softc *sc)
422 {
423 int div;
424
425 /* We need to prepend a virtio-net header to each netmap buffer to be
426 * transmitted, therefore calling virtqueue_enqueue() passing sglist
427 * with 2 elements.
428 * TX virtqueues use indirect descriptors if the feature was negotiated
429 * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect
430 * descriptors, a single virtio descriptor is sufficient to reference
431 * each TX sglist. Without them, we need two separate virtio descriptors
432 * for each TX sglist. We therefore compute the number of netmap TX
433 * slots according to these assumptions.
434 */
435 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1)
436 div = 1;
437 else
438 div = 2;
439
440 return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div;
441 }
442
443 static int
vtnet_netmap_rx_slots(struct vtnet_softc * sc)444 vtnet_netmap_rx_slots(struct vtnet_softc *sc)
445 {
446 int div;
447
448 /* We need to prepend a virtio-net header to each netmap buffer to be
449 * received, therefore calling virtqueue_enqueue() passing sglist
450 * with 2 elements.
451 * RX virtqueues use indirect descriptors if the feature was negotiated
452 * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect
453 * descriptors, a single virtio descriptor is sufficient to reference
454 * each RX sglist. Without them, we need two separate virtio descriptors
455 * for each RX sglist. We therefore compute the number of netmap RX
456 * slots according to these assumptions.
457 */
458 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1)
459 div = 1;
460 else
461 div = 2;
462
463 return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div;
464 }
465
466 static int
vtnet_netmap_config(struct netmap_adapter * na,struct nm_config_info * info)467 vtnet_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
468 {
469 struct vtnet_softc *sc = na->ifp->if_softc;
470
471 info->num_tx_rings = sc->vtnet_act_vq_pairs;
472 info->num_rx_rings = sc->vtnet_act_vq_pairs;
473 info->num_tx_descs = vtnet_netmap_tx_slots(sc);
474 info->num_rx_descs = vtnet_netmap_rx_slots(sc);
475 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
476
477 return 0;
478 }
479
480 static void
vtnet_netmap_attach(struct vtnet_softc * sc)481 vtnet_netmap_attach(struct vtnet_softc *sc)
482 {
483 struct netmap_adapter na;
484
485 bzero(&na, sizeof(na));
486
487 na.ifp = sc->vtnet_ifp;
488 na.na_flags = 0;
489 na.num_tx_desc = vtnet_netmap_tx_slots(sc);
490 na.num_rx_desc = vtnet_netmap_rx_slots(sc);
491 na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
492 na.rx_buf_maxsize = 0;
493 na.nm_register = vtnet_netmap_reg;
494 na.nm_txsync = vtnet_netmap_txsync;
495 na.nm_rxsync = vtnet_netmap_rxsync;
496 na.nm_intr = vtnet_netmap_intr;
497 na.nm_config = vtnet_netmap_config;
498
499 netmap_attach(&na);
500
501 nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d",
502 na.num_tx_rings, na.num_tx_desc,
503 na.num_tx_rings, na.num_rx_desc);
504 }
505 /* end of file */
506