xref: /freebsd-14.2/sys/dev/netmap/if_re_netmap.h (revision 5644ccec)
1 /*
2  * Copyright (C) 2011 Luigi Rizzo. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * $FreeBSD$
28  * $Id: if_re_netmap.h 10075 2011-12-25 22:55:48Z luigi $
29  *
30  * netmap support for if_re
31  */
32 
33 #include <net/netmap.h>
34 #include <sys/selinfo.h>
35 #include <vm/vm.h>
36 #include <vm/pmap.h>    /* vtophys ? */
37 #include <dev/netmap/netmap_kern.h>
38 
39 static int re_netmap_reg(struct ifnet *, int onoff);
40 static int re_netmap_txsync(struct ifnet *, u_int, int);
41 static int re_netmap_rxsync(struct ifnet *, u_int, int);
42 static void re_netmap_lock_wrapper(struct ifnet *, int, u_int);
43 
44 static void
45 re_netmap_attach(struct rl_softc *sc)
46 {
47 	struct netmap_adapter na;
48 
49 	bzero(&na, sizeof(na));
50 
51 	na.ifp = sc->rl_ifp;
52 	na.separate_locks = 0;
53 	na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt;
54 	na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt;
55 	na.nm_txsync = re_netmap_txsync;
56 	na.nm_rxsync = re_netmap_rxsync;
57 	na.nm_lock = re_netmap_lock_wrapper;
58 	na.nm_register = re_netmap_reg;
59 	netmap_attach(&na, 1);
60 }
61 
62 
63 /*
64  * wrapper to export locks to the generic code
65  * We should not use the tx/rx locks
66  */
67 static void
68 re_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
69 {
70 	struct rl_softc *adapter = ifp->if_softc;
71 
72 	switch (what) {
73 	case NETMAP_CORE_LOCK:
74 		RL_LOCK(adapter);
75 		break;
76 	case NETMAP_CORE_UNLOCK:
77 		RL_UNLOCK(adapter);
78 		break;
79 
80 	case NETMAP_TX_LOCK:
81 	case NETMAP_RX_LOCK:
82 	case NETMAP_TX_UNLOCK:
83 	case NETMAP_RX_UNLOCK:
84 		D("invalid lock call %d, no tx/rx locks here", what);
85 		break;
86 	}
87 }
88 
89 
90 /*
91  * support for netmap register/unregisted. We are already under core lock.
92  * only called on the first register or the last unregister.
93  */
94 static int
95 re_netmap_reg(struct ifnet *ifp, int onoff)
96 {
97 	struct rl_softc *adapter = ifp->if_softc;
98 	struct netmap_adapter *na = NA(ifp);
99 	int error = 0;
100 
101 	if (na == NULL)
102 		return EINVAL;
103 	/* Tell the stack that the interface is no longer active */
104 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
105 
106 	re_stop(adapter);
107 
108 	if (onoff) {
109 		ifp->if_capenable |= IFCAP_NETMAP;
110 
111 		/* save if_transmit to restore it later */
112 		na->if_transmit = ifp->if_transmit;
113 		ifp->if_transmit = netmap_start;
114 
115 		re_init_locked(adapter);
116 
117 		if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
118 			error = ENOMEM;
119 			goto fail;
120 		}
121 	} else {
122 fail:
123 		/* restore if_transmit */
124 		ifp->if_transmit = na->if_transmit;
125 		ifp->if_capenable &= ~IFCAP_NETMAP;
126 		re_init_locked(adapter);	/* also enables intr */
127 	}
128 	return (error);
129 }
130 
131 
132 /*
133  * Reconcile kernel and user view of the transmit ring.
134  */
135 static int
136 re_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
137 {
138 	struct rl_softc *sc = ifp->if_softc;
139 	struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
140 	struct netmap_adapter *na = NA(sc->rl_ifp);
141 	struct netmap_kring *kring = &na->tx_rings[ring_nr];
142 	struct netmap_ring *ring = kring->ring;
143 	int j, k, l, n, lim = kring->nkr_num_slots - 1;
144 
145 	k = ring->cur;
146 	if (k > lim)
147 		return netmap_ring_reinit(kring);
148 
149 	if (do_lock)
150 		RL_LOCK(sc);
151 
152 	/* Sync the TX descriptor list */
153 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
154             sc->rl_ldata.rl_tx_list_map,
155             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
156 
157 	/* XXX move after the transmissions */
158 	/* record completed transmissions */
159         for (n = 0, l = sc->rl_ldata.rl_tx_considx;
160 	    l != sc->rl_ldata.rl_tx_prodidx;
161 	    n++, l = RL_TX_DESC_NXT(sc, l)) {
162 		uint32_t cmdstat =
163 			le32toh(sc->rl_ldata.rl_tx_list[l].rl_cmdstat);
164 		if (cmdstat & RL_TDESC_STAT_OWN)
165 			break;
166 	}
167 	if (n > 0) {
168 		sc->rl_ldata.rl_tx_considx = l;
169 		sc->rl_ldata.rl_tx_free += n;
170 		kring->nr_hwavail += n;
171 	}
172 
173 	/* update avail to what the hardware knows */
174 	ring->avail = kring->nr_hwavail;
175 
176 	j = kring->nr_hwcur;
177 	if (j != k) {	/* we have new packets to send */
178 		n = 0;
179 		l = sc->rl_ldata.rl_tx_prodidx;
180 		while (j != k) {
181 			struct netmap_slot *slot = &ring->slot[j];
182 			struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[l];
183 			int cmd = slot->len | RL_TDESC_CMD_EOF |
184 				RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ;
185 			uint64_t paddr;
186 			void *addr = PNMB(slot, &paddr);
187 			int len = slot->len;
188 
189 			if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
190 				if (do_lock)
191 					RL_UNLOCK(sc);
192 				// XXX what about prodidx ?
193 				return netmap_ring_reinit(kring);
194 			}
195 
196 			if (l == lim)	/* mark end of ring */
197 				cmd |= RL_TDESC_CMD_EOR;
198 
199 			if (slot->flags & NS_BUF_CHANGED) {
200 				desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
201 				desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
202 				/* buffer has changed, unload and reload map */
203 				netmap_reload_map(sc->rl_ldata.rl_tx_mtag,
204 					txd[l].tx_dmamap, addr);
205 				slot->flags &= ~NS_BUF_CHANGED;
206 			}
207 			slot->flags &= ~NS_REPORT;
208 			desc->rl_cmdstat = htole32(cmd);
209 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
210 				txd[l].tx_dmamap, BUS_DMASYNC_PREWRITE);
211 			j = (j == lim) ? 0 : j + 1;
212 			l = (l == lim) ? 0 : l + 1;
213 			n++;
214 		}
215 		sc->rl_ldata.rl_tx_prodidx = l;
216 		kring->nr_hwcur = k;
217 
218 		/* decrease avail by number of sent packets */
219 		ring->avail -= n;
220 		kring->nr_hwavail = ring->avail;
221 
222 		bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
223 		    sc->rl_ldata.rl_tx_list_map,
224 		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
225 
226 		/* start ? */
227 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
228 	}
229 	if (do_lock)
230 		RL_UNLOCK(sc);
231 	return 0;
232 }
233 
234 
235 /*
236  * Reconcile kernel and user view of the receive ring.
237  */
238 static int
239 re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
240 {
241 	struct rl_softc *sc = ifp->if_softc;
242 	struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
243 	struct netmap_adapter *na = NA(sc->rl_ifp);
244 	struct netmap_kring *kring = &na->rx_rings[ring_nr];
245 	struct netmap_ring *ring = kring->ring;
246 	int j, k, l, n, lim = kring->nkr_num_slots - 1;
247 
248 	k = ring->cur;
249 	if (k > lim)
250 		return netmap_ring_reinit(kring);
251 
252 	if (do_lock)
253 		RL_LOCK(sc);
254 	/* XXX check sync modes */
255 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
256 	    sc->rl_ldata.rl_rx_list_map,
257 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
258 
259 	/*
260 	 * The device uses all the buffers in the ring, so we need
261 	 * another termination condition in addition to RL_RDESC_STAT_OWN
262 	 * cleared (all buffers could have it cleared. The easiest one
263 	 * is to limit the amount of data reported up to 'lim'
264 	 */
265 	l = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
266 	j = netmap_ridx_n2k(na, ring_nr, l); /* the kring index */
267 	for (n = kring->nr_hwavail; n < lim ; n++) {
268 		struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[l];
269 		uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
270 		uint32_t total_len;
271 
272 		if ((rxstat & RL_RDESC_STAT_OWN) != 0)
273 			break;
274 		total_len = rxstat & sc->rl_rxlenmask;
275 		/* XXX subtract crc */
276 		total_len = (total_len < 4) ? 0 : total_len - 4;
277 		kring->ring->slot[j].len = total_len;
278 		/*  sync was in re_newbuf() */
279 		bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
280 		    rxd[l].rx_dmamap, BUS_DMASYNC_POSTREAD);
281 		j = (j == lim) ? 0 : j + 1;
282 		l = (l == lim) ? 0 : l + 1;
283 	}
284 	if (n != kring->nr_hwavail) {
285 		sc->rl_ldata.rl_rx_prodidx = l;
286 		sc->rl_ifp->if_ipackets += n - kring->nr_hwavail;
287 		kring->nr_hwavail = n;
288 	}
289 
290 	/* skip past packets that userspace has already processed,
291 	 * making them available for reception.
292 	 * advance nr_hwcur and issue a bus_dmamap_sync on the
293 	 * buffers so it is safe to write to them.
294 	 * Also increase nr_hwavail
295 	 */
296 	j = kring->nr_hwcur;
297 	if (j != k) {	/* userspace has read some packets. */
298 		n = 0;
299 		l = netmap_ridx_k2n(na, ring_nr, j); /* the NIC index */
300 		while (j != k) {
301 			struct netmap_slot *slot = ring->slot + j;
302 			struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[l];
303 			int cmd = na->buff_size | RL_RDESC_CMD_OWN;
304 			uint64_t paddr;
305 			void *addr = PNMB(slot, &paddr);
306 
307 			if (addr == netmap_buffer_base) { /* bad buf */
308 				if (do_lock)
309 					RL_UNLOCK(sc);
310 				return netmap_ring_reinit(kring);
311 			}
312 
313 			if (l == lim)	/* mark end of ring */
314 				cmd |= RL_RDESC_CMD_EOR;
315 
316 			desc->rl_cmdstat = htole32(cmd);
317 			slot->flags &= ~NS_REPORT;
318 			if (slot->flags & NS_BUF_CHANGED) {
319 				desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
320 				desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
321 				netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
322 					rxd[l].rx_dmamap, addr);
323 				slot->flags &= ~NS_BUF_CHANGED;
324 			}
325 			bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
326 				rxd[l].rx_dmamap, BUS_DMASYNC_PREREAD);
327 			j = (j == lim) ? 0 : j + 1;
328 			l = (l == lim) ? 0 : l + 1;
329 			n++;
330 		}
331 		kring->nr_hwavail -= n;
332 		kring->nr_hwcur = k;
333 		/* Flush the RX DMA ring */
334 
335 		bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
336 		    sc->rl_ldata.rl_rx_list_map,
337 		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
338 	}
339 	/* tell userspace that there are new packets */
340 	ring->avail = kring->nr_hwavail;
341 	if (do_lock)
342 		RL_UNLOCK(sc);
343 	return 0;
344 }
345 
346 /*
347  * Additional routines to init the tx and rx rings.
348  * In other drivers we do that inline in the main code.
349  */
350 static void
351 re_netmap_tx_init(struct rl_softc *sc)
352 {
353 	struct rl_txdesc *txd;
354 	struct rl_desc *desc;
355 	int i, n;
356 	struct netmap_adapter *na = NA(sc->rl_ifp);
357 	struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
358 
359 	/* slot is NULL if we are not in netmap mode */
360 	if (!slot)
361 		return;
362 	/* in netmap mode, overwrite addresses and maps */
363 	txd = sc->rl_ldata.rl_tx_desc;
364 	desc = sc->rl_ldata.rl_tx_list;
365 	n = sc->rl_ldata.rl_tx_desc_cnt;
366 
367 	/* l points in the netmap ring, i points in the NIC ring */
368 	for (i = 0; i < n; i++) {
369 		void *addr;
370 		uint64_t paddr;
371 		int l = netmap_tidx_n2k(na, 0, i);
372 
373 		addr = PNMB(slot + l, &paddr);
374 		desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
375 		desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
376 		netmap_load_map(sc->rl_ldata.rl_tx_mtag,
377 			txd[i].tx_dmamap, addr);
378 	}
379 }
380 
381 static void
382 re_netmap_rx_init(struct rl_softc *sc)
383 {
384 	struct netmap_adapter *na = NA(sc->rl_ifp);
385 	struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
386 	struct rl_desc *desc = sc->rl_ldata.rl_rx_list;
387 	uint32_t cmdstat;
388 	int i, n, max_avail;
389 
390 	if (!slot)
391 		return;
392 	n = sc->rl_ldata.rl_rx_desc_cnt;
393 	/*
394 	 * Userspace owned hwavail packets before the reset,
395 	 * so the NIC that last hwavail descriptors of the ring
396 	 * are still owned by the driver (and keep one empty).
397 	 */
398 	max_avail = n - 1 - na->rx_rings[0].nr_hwavail;
399 	for (i = 0; i < n; i++) {
400 		void *addr;
401 		uint64_t paddr;
402 		int l = netmap_ridx_n2k(na, 0, i);
403 
404 		addr = PNMB(slot + l, &paddr);
405 
406 		netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
407 		    sc->rl_ldata.rl_rx_desc[i].rx_dmamap, addr);
408 		bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
409 		    sc->rl_ldata.rl_rx_desc[i].rx_dmamap, BUS_DMASYNC_PREREAD);
410 		desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
411 		desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
412 		cmdstat = na->buff_size;
413 		if (i == n - 1) /* mark the end of ring */
414 			cmdstat |= RL_RDESC_CMD_EOR;
415 		if (i < max_avail)
416 			cmdstat |= RL_RDESC_CMD_OWN;
417 		desc[i].rl_cmdstat = htole32(cmdstat);
418 	}
419 }
420