1 /*
2  * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * $FreeBSD$
28  */
29 
30 #include <net/netmap.h>
31 #include <sys/selinfo.h>
32 #include <vm/vm.h>
33 #include <vm/pmap.h>    /* vtophys ? */
34 #include <dev/netmap/netmap_kern.h>
35 
36 /*
37  * Return 1 if the queue identified by 't' and 'idx' is in netmap mode.
38  */
39 static int
40 vtnet_netmap_queue_on(struct vtnet_softc *sc, enum txrx t, int idx)
41 {
42 	struct netmap_adapter *na = NA(sc->vtnet_ifp);
43 
44 	if (!nm_native_on(na))
45 		return 0;
46 
47 	if (t == NR_RX)
48 		return !!(idx < na->num_rx_rings &&
49 			na->rx_rings[idx]->nr_mode == NKR_NETMAP_ON);
50 
51 	return !!(idx < na->num_tx_rings &&
52 		na->tx_rings[idx]->nr_mode == NKR_NETMAP_ON);
53 }
54 
55 /* Register and unregister. */
56 static int
57 vtnet_netmap_reg(struct netmap_adapter *na, int state)
58 {
59 	struct ifnet *ifp = na->ifp;
60 	struct vtnet_softc *sc = ifp->if_softc;
61 	int success;
62 	int i;
63 
64 	/* Drain the taskqueues to make sure that there are no worker threads
65 	 * accessing the virtqueues. */
66 	vtnet_drain_taskqueues(sc);
67 
68 	VTNET_CORE_LOCK(sc);
69 
70 	/* We need nm_netmap_on() to return true when called by
71 	 * vtnet_init_locked() below. */
72 	if (state)
73 		nm_set_native_flags(na);
74 
75 	/* We need to trigger a device reset in order to unexpose guest buffers
76 	 * published to the host. */
77 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
78 	/* Get pending used buffers. The way they are freed depends on whether
79 	 * they are netmap buffer or they are mbufs. We can tell apart the two
80 	 * cases by looking at kring->nr_mode, before this is possibly updated
81 	 * in the loop below. */
82 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
83 		struct vtnet_txq *txq = &sc->vtnet_txqs[i];
84 		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
85 
86 		VTNET_TXQ_LOCK(txq);
87 		vtnet_txq_free_mbufs(txq);
88 		VTNET_TXQ_UNLOCK(txq);
89 
90 		VTNET_RXQ_LOCK(rxq);
91 		vtnet_rxq_free_mbufs(rxq);
92 		VTNET_RXQ_UNLOCK(rxq);
93 	}
94 	vtnet_init_locked(sc);
95 	success = (ifp->if_drv_flags & IFF_DRV_RUNNING) ? 0 : ENXIO;
96 
97 	if (state) {
98 		netmap_krings_mode_commit(na, state);
99 	} else {
100 		nm_clear_native_flags(na);
101 		netmap_krings_mode_commit(na, state);
102 	}
103 
104 	VTNET_CORE_UNLOCK(sc);
105 
106 	return success;
107 }
108 
109 
110 /* Reconcile kernel and user view of the transmit ring. */
111 static int
112 vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
113 {
114 	struct netmap_adapter *na = kring->na;
115 	struct ifnet *ifp = na->ifp;
116 	struct netmap_ring *ring = kring->ring;
117 	u_int ring_nr = kring->ring_id;
118 	u_int nm_i;	/* index into the netmap ring */
119 	u_int const lim = kring->nkr_num_slots - 1;
120 	u_int const head = kring->rhead;
121 
122 	/* device-specific */
123 	struct vtnet_softc *sc = ifp->if_softc;
124 	struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
125 	struct virtqueue *vq = txq->vtntx_vq;
126 	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
127 	u_int n;
128 
129 	/*
130 	 * First part: process new packets to send.
131 	 */
132 
133 	nm_i = kring->nr_hwcur;
134 	if (nm_i != head) {	/* we have new packets to send */
135 		struct sglist *sg = txq->vtntx_sg;
136 
137 		for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
138 			/* we use an empty header here */
139 			struct netmap_slot *slot = &ring->slot[nm_i];
140 			u_int len = slot->len;
141 			uint64_t paddr;
142 			void *addr = PNMB(na, slot, &paddr);
143 			int err;
144 
145 			NM_CHECK_ADDR_LEN(na, addr, len);
146 
147 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
148 			/* Initialize the scatterlist, expose it to the hypervisor,
149 			 * and kick the hypervisor (if necessary).
150 			 */
151 			sglist_reset(sg); // cheap
152 			err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size);
153 			err |= sglist_append_phys(sg, paddr, len);
154 			KASSERT(err == 0, ("%s: cannot append to sglist %d",
155 						__func__, err));
156 			err = virtqueue_enqueue(vq, /*cookie=*/txq, sg,
157 						/*readable=*/sg->sg_nseg,
158 						/*writeable=*/0);
159 			if (unlikely(err)) {
160 				if (err != ENOSPC)
161 					nm_prerr("virtqueue_enqueue(%s) failed: %d",
162 							kring->name, err);
163 				break;
164 			}
165 		}
166 
167 		virtqueue_notify(vq);
168 
169 		/* Update hwcur depending on where we stopped. */
170 		kring->nr_hwcur = nm_i; /* note we migth break early */
171 	}
172 
173 	/* Free used slots. We only consider our own used buffers, recognized
174 	 * by the token we passed to virtqueue_enqueue.
175 	 */
176 	n = 0;
177 	for (;;) {
178 		void *token = virtqueue_dequeue(vq, NULL);
179 		if (token == NULL)
180 			break;
181 		if (unlikely(token != (void *)txq))
182 			nm_prerr("BUG: TX token mismatch");
183 		else
184 			n++;
185 	}
186 	if (n > 0) {
187 		kring->nr_hwtail += n;
188 		if (kring->nr_hwtail > lim)
189 			kring->nr_hwtail -= lim + 1;
190 	}
191 
192 	if (interrupts && virtqueue_nfree(vq) < 32)
193 		virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG);
194 
195 	return 0;
196 }
197 
198 /*
199  * Publish (up to) num netmap receive buffers to the host,
200  * starting from the first one that the user made available
201  * (kring->nr_hwcur).
202  */
203 static int
204 vtnet_netmap_kring_refill(struct netmap_kring *kring, u_int num)
205 {
206 	struct netmap_adapter *na = kring->na;
207 	struct ifnet *ifp = na->ifp;
208 	struct netmap_ring *ring = kring->ring;
209 	u_int ring_nr = kring->ring_id;
210 	u_int const lim = kring->nkr_num_slots - 1;
211 	u_int nm_i = kring->nr_hwcur;
212 
213 	/* device-specific */
214 	struct vtnet_softc *sc = ifp->if_softc;
215 	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
216 	struct virtqueue *vq = rxq->vtnrx_vq;
217 
218 	/* use a local sglist, default might be short */
219 	struct sglist_seg ss[2];
220 	struct sglist sg = { ss, 0, 0, 2 };
221 
222 	for (; num > 0; nm_i = nm_next(nm_i, lim), num--) {
223 		struct netmap_slot *slot = &ring->slot[nm_i];
224 		uint64_t paddr;
225 		void *addr = PNMB(na, slot, &paddr);
226 		int err;
227 
228 		if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
229 			if (netmap_ring_reinit(kring))
230 				return -1;
231 		}
232 
233 		slot->flags &= ~NS_BUF_CHANGED;
234 		sglist_reset(&sg);
235 		err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size);
236 		err |= sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na));
237 		KASSERT(err == 0, ("%s: cannot append to sglist %d",
238 					__func__, err));
239 		/* writable for the host */
240 		err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg,
241 				/*readable=*/0, /*writeable=*/sg.sg_nseg);
242 		if (unlikely(err)) {
243 			if (err != ENOSPC)
244 				nm_prerr("virtqueue_enqueue(%s) failed: %d",
245 					kring->name, err);
246 			break;
247 		}
248 	}
249 
250 	return nm_i;
251 }
252 
253 /*
254  * Publish netmap buffers on a RX virtqueue.
255  * Returns -1 if this virtqueue is not being opened in netmap mode.
256  * If the virtqueue is being opened in netmap mode, return 0 on success and
257  * a positive error code on failure.
258  */
259 static int
260 vtnet_netmap_rxq_populate(struct vtnet_rxq *rxq)
261 {
262 	struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp);
263 	struct netmap_kring *kring;
264 	int error;
265 
266 	if (!nm_native_on(na) || rxq->vtnrx_id >= na->num_rx_rings)
267 		return -1;
268 
269 	kring = na->rx_rings[rxq->vtnrx_id];
270 	if (!(nm_kring_pending_on(kring) ||
271 			kring->nr_pending_mode == NKR_NETMAP_ON))
272 		return -1;
273 
274 	/* Expose all the RX netmap buffers we can. In case of no indirect
275 	 * buffers, the number of netmap slots in the RX ring matches the
276 	 * maximum number of 2-elements sglist that the RX virtqueue can
277 	 * accommodate (minus 1 to avoid netmap ring wraparound). */
278 	error = vtnet_netmap_kring_refill(kring, na->num_rx_desc - 1);
279 	virtqueue_notify(rxq->vtnrx_vq);
280 
281 	return error < 0 ? ENXIO : 0;
282 }
283 
284 /* Reconcile kernel and user view of the receive ring. */
285 static int
286 vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
287 {
288 	struct netmap_adapter *na = kring->na;
289 	struct ifnet *ifp = na->ifp;
290 	struct netmap_ring *ring = kring->ring;
291 	u_int ring_nr = kring->ring_id;
292 	u_int nm_i;	/* index into the netmap ring */
293 	u_int const lim = kring->nkr_num_slots - 1;
294 	u_int const head = kring->rhead;
295 	int force_update = (flags & NAF_FORCE_READ) ||
296 				(kring->nr_kflags & NKR_PENDINTR);
297 	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
298 
299 	/* device-specific */
300 	struct vtnet_softc *sc = ifp->if_softc;
301 	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
302 	struct virtqueue *vq = rxq->vtnrx_vq;
303 
304 	/*
305 	 * First part: import newly received packets.
306 	 * Only accept our own buffers (matching the token). We should only get
307 	 * matching buffers. We may need to stop early to avoid hwtail to overrun
308 	 * hwcur.
309 	 */
310 	if (netmap_no_pendintr || force_update) {
311 		uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
312 		void *token;
313 
314 		vtnet_rxq_disable_intr(rxq);
315 
316 		nm_i = kring->nr_hwtail;
317 		while (nm_i != hwtail_lim) {
318 			int len;
319 			token = virtqueue_dequeue(vq, &len);
320 			if (token == NULL) {
321 				if (interrupts && vtnet_rxq_enable_intr(rxq)) {
322 					vtnet_rxq_disable_intr(rxq);
323 					continue;
324 				}
325 				break;
326 			}
327 			if (unlikely(token != (void *)rxq)) {
328 				nm_prerr("BUG: RX token mismatch");
329 			} else {
330 				/* Skip the virtio-net header. */
331 				len -= sc->vtnet_hdr_size;
332 				if (unlikely(len < 0)) {
333 					nm_prlim(1, "Truncated virtio-net-header, "
334 						"missing %d bytes", -len);
335 					len = 0;
336 				}
337 				ring->slot[nm_i].len = len;
338 				ring->slot[nm_i].flags = 0;
339 				nm_i = nm_next(nm_i, lim);
340 			}
341 		}
342 		kring->nr_hwtail = nm_i;
343 		kring->nr_kflags &= ~NKR_PENDINTR;
344 	}
345 	nm_prdis("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur,
346 				kring->nr_hwcur, kring->nr_hwtail);
347 
348 	/*
349 	 * Second part: skip past packets that userspace has released.
350 	 */
351 	nm_i = kring->nr_hwcur; /* netmap ring index */
352 	if (nm_i != head) {
353 		int howmany = head - nm_i;
354 		int nm_j;
355 
356 		if (howmany < 0)
357 			howmany += kring->nkr_num_slots;
358 		nm_j = vtnet_netmap_kring_refill(kring, howmany);
359 		if (nm_j < 0)
360 			return nm_j;
361 		kring->nr_hwcur = nm_j;
362 		virtqueue_notify(vq);
363 	}
364 
365 	nm_prdis("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur,
366 		ring->tail, kring->nr_hwcur, kring->nr_hwtail);
367 
368 	return 0;
369 }
370 
371 
372 /* Enable/disable interrupts on all virtqueues. */
373 static void
374 vtnet_netmap_intr(struct netmap_adapter *na, int state)
375 {
376 	struct vtnet_softc *sc = na->ifp->if_softc;
377 	int i;
378 
379 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
380 		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
381 		struct vtnet_txq *txq = &sc->vtnet_txqs[i];
382 		struct virtqueue *txvq = txq->vtntx_vq;
383 
384 		if (state) {
385 			vtnet_rxq_enable_intr(rxq);
386 			virtqueue_enable_intr(txvq);
387 		} else {
388 			vtnet_rxq_disable_intr(rxq);
389 			virtqueue_disable_intr(txvq);
390 		}
391 	}
392 }
393 
394 static int
395 vtnet_netmap_tx_slots(struct vtnet_softc *sc)
396 {
397 	int div;
398 
399 	/* We need to prepend a virtio-net header to each netmap buffer to be
400 	 * transmitted, therefore calling virtqueue_enqueue() passing sglist
401 	 * with 2 elements.
402 	 * TX virtqueues use indirect descriptors if the feature was negotiated
403 	 * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect
404 	 * descriptors, a single virtio descriptor is sufficient to reference
405 	 * each TX sglist. Without them, we need two separate virtio descriptors
406 	 * for each TX sglist. We therefore compute the number of netmap TX
407 	 * slots according to these assumptions.
408 	 */
409 	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1)
410 		div = 1;
411 	else
412 		div = 2;
413 
414 	return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div;
415 }
416 
417 static int
418 vtnet_netmap_rx_slots(struct vtnet_softc *sc)
419 {
420 	int div;
421 
422 	/* We need to prepend a virtio-net header to each netmap buffer to be
423 	 * received, therefore calling virtqueue_enqueue() passing sglist
424 	 * with 2 elements.
425 	 * RX virtqueues use indirect descriptors if the feature was negotiated
426 	 * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect
427 	 * descriptors, a single virtio descriptor is sufficient to reference
428 	 * each RX sglist. Without them, we need two separate virtio descriptors
429 	 * for each RX sglist. We therefore compute the number of netmap RX
430 	 * slots according to these assumptions.
431 	 */
432 	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1)
433 		div = 1;
434 	else
435 		div = 2;
436 
437 	return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div;
438 }
439 
440 static int
441 vtnet_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
442 {
443 	struct vtnet_softc *sc = na->ifp->if_softc;
444 
445 	info->num_tx_rings = sc->vtnet_act_vq_pairs;
446 	info->num_rx_rings = sc->vtnet_act_vq_pairs;
447 	info->num_tx_descs = vtnet_netmap_tx_slots(sc);
448 	info->num_rx_descs = vtnet_netmap_rx_slots(sc);
449 	info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
450 
451 	return 0;
452 }
453 
454 static void
455 vtnet_netmap_attach(struct vtnet_softc *sc)
456 {
457 	struct netmap_adapter na;
458 
459 	bzero(&na, sizeof(na));
460 
461 	na.ifp = sc->vtnet_ifp;
462 	na.na_flags = 0;
463 	na.num_tx_desc = vtnet_netmap_tx_slots(sc);
464 	na.num_rx_desc = vtnet_netmap_rx_slots(sc);
465 	na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
466 	na.rx_buf_maxsize = 0;
467 	na.nm_register = vtnet_netmap_reg;
468 	na.nm_txsync = vtnet_netmap_txsync;
469 	na.nm_rxsync = vtnet_netmap_rxsync;
470 	na.nm_intr = vtnet_netmap_intr;
471 	na.nm_config = vtnet_netmap_config;
472 
473 	netmap_attach(&na);
474 
475 	nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d",
476 			na.num_tx_rings, na.num_tx_desc,
477 			na.num_tx_rings, na.num_rx_desc);
478 }
479 /* end of file */
480