xref: /f-stack/freebsd/sys/buf_ring.h (revision 22ce4aff)
1a9643ea8Slogwang /*-
2*22ce4affSfengbojiang  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3*22ce4affSfengbojiang  *
4a9643ea8Slogwang  * Copyright (c) 2007-2009 Kip Macy <[email protected]>
5a9643ea8Slogwang  * All rights reserved.
6a9643ea8Slogwang  *
7a9643ea8Slogwang  * Redistribution and use in source and binary forms, with or without
8a9643ea8Slogwang  * modification, are permitted provided that the following conditions
9a9643ea8Slogwang  * are met:
10a9643ea8Slogwang  * 1. Redistributions of source code must retain the above copyright
11a9643ea8Slogwang  *    notice, this list of conditions and the following disclaimer.
12a9643ea8Slogwang  * 2. Redistributions in binary form must reproduce the above copyright
13a9643ea8Slogwang  *    notice, this list of conditions and the following disclaimer in the
14a9643ea8Slogwang  *    documentation and/or other materials provided with the distribution.
15a9643ea8Slogwang  *
16a9643ea8Slogwang  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17a9643ea8Slogwang  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18a9643ea8Slogwang  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19a9643ea8Slogwang  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20a9643ea8Slogwang  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21a9643ea8Slogwang  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22a9643ea8Slogwang  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23a9643ea8Slogwang  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24a9643ea8Slogwang  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25a9643ea8Slogwang  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26a9643ea8Slogwang  * SUCH DAMAGE.
27a9643ea8Slogwang  *
28a9643ea8Slogwang  * $FreeBSD$
29a9643ea8Slogwang  *
30a9643ea8Slogwang  */
31a9643ea8Slogwang 
32a9643ea8Slogwang #ifndef	_SYS_BUF_RING_H_
33a9643ea8Slogwang #define	_SYS_BUF_RING_H_
34a9643ea8Slogwang 
35a9643ea8Slogwang #include <machine/cpu.h>
36a9643ea8Slogwang 
37a9643ea8Slogwang #ifdef DEBUG_BUFRING
38a9643ea8Slogwang #include <sys/lock.h>
39a9643ea8Slogwang #include <sys/mutex.h>
40a9643ea8Slogwang #endif
41a9643ea8Slogwang 
42a9643ea8Slogwang struct buf_ring {
43a9643ea8Slogwang 	volatile uint32_t	br_prod_head;
44a9643ea8Slogwang 	volatile uint32_t	br_prod_tail;
45a9643ea8Slogwang 	int              	br_prod_size;
46a9643ea8Slogwang 	int              	br_prod_mask;
47a9643ea8Slogwang 	uint64_t		br_drops;
48a9643ea8Slogwang 	volatile uint32_t	br_cons_head __aligned(CACHE_LINE_SIZE);
49a9643ea8Slogwang 	volatile uint32_t	br_cons_tail;
50a9643ea8Slogwang 	int		 	br_cons_size;
51a9643ea8Slogwang 	int              	br_cons_mask;
52a9643ea8Slogwang #ifdef DEBUG_BUFRING
53a9643ea8Slogwang 	struct mtx		*br_lock;
54a9643ea8Slogwang #endif
55a9643ea8Slogwang 	void			*br_ring[0] __aligned(CACHE_LINE_SIZE);
56a9643ea8Slogwang };
57a9643ea8Slogwang 
58a9643ea8Slogwang /*
59a9643ea8Slogwang  * multi-producer safe lock-free ring buffer enqueue
60a9643ea8Slogwang  *
61a9643ea8Slogwang  */
62a9643ea8Slogwang static __inline int
buf_ring_enqueue(struct buf_ring * br,void * buf)63a9643ea8Slogwang buf_ring_enqueue(struct buf_ring *br, void *buf)
64a9643ea8Slogwang {
65a9643ea8Slogwang 	uint32_t prod_head, prod_next, cons_tail;
66a9643ea8Slogwang #ifdef DEBUG_BUFRING
67a9643ea8Slogwang 	int i;
68*22ce4affSfengbojiang 
69*22ce4affSfengbojiang 	/*
70*22ce4affSfengbojiang 	 * Note: It is possible to encounter an mbuf that was removed
71*22ce4affSfengbojiang 	 * via drbr_peek(), and then re-added via drbr_putback() and
72*22ce4affSfengbojiang 	 * trigger a spurious panic.
73*22ce4affSfengbojiang 	 */
74a9643ea8Slogwang 	for (i = br->br_cons_head; i != br->br_prod_head;
75a9643ea8Slogwang 	     i = ((i + 1) & br->br_cons_mask))
76a9643ea8Slogwang 		if(br->br_ring[i] == buf)
77a9643ea8Slogwang 			panic("buf=%p already enqueue at %d prod=%d cons=%d",
78a9643ea8Slogwang 			    buf, i, br->br_prod_tail, br->br_cons_tail);
79a9643ea8Slogwang #endif
80a9643ea8Slogwang 	critical_enter();
81a9643ea8Slogwang 	do {
82a9643ea8Slogwang 		prod_head = br->br_prod_head;
83a9643ea8Slogwang 		prod_next = (prod_head + 1) & br->br_prod_mask;
84a9643ea8Slogwang 		cons_tail = br->br_cons_tail;
85a9643ea8Slogwang 
86a9643ea8Slogwang 		if (prod_next == cons_tail) {
87a9643ea8Slogwang 			rmb();
88a9643ea8Slogwang 			if (prod_head == br->br_prod_head &&
89a9643ea8Slogwang 			    cons_tail == br->br_cons_tail) {
90a9643ea8Slogwang 				br->br_drops++;
91a9643ea8Slogwang 				critical_exit();
92a9643ea8Slogwang 				return (ENOBUFS);
93a9643ea8Slogwang 			}
94a9643ea8Slogwang 			continue;
95a9643ea8Slogwang 		}
96a9643ea8Slogwang 	} while (!atomic_cmpset_acq_int(&br->br_prod_head, prod_head, prod_next));
97a9643ea8Slogwang #ifdef DEBUG_BUFRING
98a9643ea8Slogwang 	if (br->br_ring[prod_head] != NULL)
99a9643ea8Slogwang 		panic("dangling value in enqueue");
100a9643ea8Slogwang #endif
101a9643ea8Slogwang 	br->br_ring[prod_head] = buf;
102a9643ea8Slogwang 
103a9643ea8Slogwang 	/*
104a9643ea8Slogwang 	 * If there are other enqueues in progress
105a9643ea8Slogwang 	 * that preceded us, we need to wait for them
106a9643ea8Slogwang 	 * to complete
107a9643ea8Slogwang 	 */
108a9643ea8Slogwang 	while (br->br_prod_tail != prod_head)
109a9643ea8Slogwang 		cpu_spinwait();
110a9643ea8Slogwang 	atomic_store_rel_int(&br->br_prod_tail, prod_next);
111a9643ea8Slogwang 	critical_exit();
112a9643ea8Slogwang 	return (0);
113a9643ea8Slogwang }
114a9643ea8Slogwang 
115a9643ea8Slogwang /*
116a9643ea8Slogwang  * multi-consumer safe dequeue
117a9643ea8Slogwang  *
118a9643ea8Slogwang  */
119a9643ea8Slogwang static __inline void *
buf_ring_dequeue_mc(struct buf_ring * br)120a9643ea8Slogwang buf_ring_dequeue_mc(struct buf_ring *br)
121a9643ea8Slogwang {
122a9643ea8Slogwang 	uint32_t cons_head, cons_next;
123a9643ea8Slogwang 	void *buf;
124a9643ea8Slogwang 
125a9643ea8Slogwang 	critical_enter();
126a9643ea8Slogwang 	do {
127a9643ea8Slogwang 		cons_head = br->br_cons_head;
128a9643ea8Slogwang 		cons_next = (cons_head + 1) & br->br_cons_mask;
129a9643ea8Slogwang 
130a9643ea8Slogwang 		if (cons_head == br->br_prod_tail) {
131a9643ea8Slogwang 			critical_exit();
132a9643ea8Slogwang 			return (NULL);
133a9643ea8Slogwang 		}
134a9643ea8Slogwang 	} while (!atomic_cmpset_acq_int(&br->br_cons_head, cons_head, cons_next));
135a9643ea8Slogwang 
136a9643ea8Slogwang 	buf = br->br_ring[cons_head];
137a9643ea8Slogwang #ifdef DEBUG_BUFRING
138a9643ea8Slogwang 	br->br_ring[cons_head] = NULL;
139a9643ea8Slogwang #endif
140a9643ea8Slogwang 	/*
141a9643ea8Slogwang 	 * If there are other dequeues in progress
142a9643ea8Slogwang 	 * that preceded us, we need to wait for them
143a9643ea8Slogwang 	 * to complete
144a9643ea8Slogwang 	 */
145a9643ea8Slogwang 	while (br->br_cons_tail != cons_head)
146a9643ea8Slogwang 		cpu_spinwait();
147a9643ea8Slogwang 
148a9643ea8Slogwang 	atomic_store_rel_int(&br->br_cons_tail, cons_next);
149a9643ea8Slogwang 	critical_exit();
150a9643ea8Slogwang 
151a9643ea8Slogwang 	return (buf);
152a9643ea8Slogwang }
153a9643ea8Slogwang 
154a9643ea8Slogwang /*
155a9643ea8Slogwang  * single-consumer dequeue
156a9643ea8Slogwang  * use where dequeue is protected by a lock
157a9643ea8Slogwang  * e.g. a network driver's tx queue lock
158a9643ea8Slogwang  */
159a9643ea8Slogwang static __inline void *
buf_ring_dequeue_sc(struct buf_ring * br)160a9643ea8Slogwang buf_ring_dequeue_sc(struct buf_ring *br)
161a9643ea8Slogwang {
162a9643ea8Slogwang 	uint32_t cons_head, cons_next;
163a9643ea8Slogwang #ifdef PREFETCH_DEFINED
164a9643ea8Slogwang 	uint32_t cons_next_next;
165a9643ea8Slogwang #endif
166a9643ea8Slogwang 	uint32_t prod_tail;
167a9643ea8Slogwang 	void *buf;
168a9643ea8Slogwang 
169a9643ea8Slogwang 	/*
170a9643ea8Slogwang 	 * This is a workaround to allow using buf_ring on ARM and ARM64.
171a9643ea8Slogwang 	 * ARM64TODO: Fix buf_ring in a generic way.
172a9643ea8Slogwang 	 * REMARKS: It is suspected that br_cons_head does not require
173a9643ea8Slogwang 	 *   load_acq operation, but this change was extensively tested
174a9643ea8Slogwang 	 *   and confirmed it's working. To be reviewed once again in
175a9643ea8Slogwang 	 *   FreeBSD-12.
176a9643ea8Slogwang 	 *
177a9643ea8Slogwang 	 * Preventing following situation:
178a9643ea8Slogwang 
179a9643ea8Slogwang 	 * Core(0) - buf_ring_enqueue()                                       Core(1) - buf_ring_dequeue_sc()
180a9643ea8Slogwang 	 * -----------------------------------------                                       ----------------------------------------------
181a9643ea8Slogwang 	 *
182a9643ea8Slogwang 	 *                                                                                cons_head = br->br_cons_head;
183a9643ea8Slogwang 	 * atomic_cmpset_acq_32(&br->br_prod_head, ...));
184a9643ea8Slogwang 	 *                                                                                buf = br->br_ring[cons_head];     <see <1>>
185a9643ea8Slogwang 	 * br->br_ring[prod_head] = buf;
186a9643ea8Slogwang 	 * atomic_store_rel_32(&br->br_prod_tail, ...);
187a9643ea8Slogwang 	 *                                                                                prod_tail = br->br_prod_tail;
188a9643ea8Slogwang 	 *                                                                                if (cons_head == prod_tail)
189a9643ea8Slogwang 	 *                                                                                        return (NULL);
190a9643ea8Slogwang 	 *                                                                                <condition is false and code uses invalid(old) buf>`
191a9643ea8Slogwang 	 *
192a9643ea8Slogwang 	 * <1> Load (on core 1) from br->br_ring[cons_head] can be reordered (speculative readed) by CPU.
193a9643ea8Slogwang 	 */
194a9643ea8Slogwang #if defined(__arm__) || defined(__aarch64__)
195a9643ea8Slogwang 	cons_head = atomic_load_acq_32(&br->br_cons_head);
196a9643ea8Slogwang #else
197a9643ea8Slogwang 	cons_head = br->br_cons_head;
198a9643ea8Slogwang #endif
199a9643ea8Slogwang 	prod_tail = atomic_load_acq_32(&br->br_prod_tail);
200a9643ea8Slogwang 
201a9643ea8Slogwang 	cons_next = (cons_head + 1) & br->br_cons_mask;
202a9643ea8Slogwang #ifdef PREFETCH_DEFINED
203a9643ea8Slogwang 	cons_next_next = (cons_head + 2) & br->br_cons_mask;
204a9643ea8Slogwang #endif
205a9643ea8Slogwang 
206a9643ea8Slogwang 	if (cons_head == prod_tail)
207a9643ea8Slogwang 		return (NULL);
208a9643ea8Slogwang 
209a9643ea8Slogwang #ifdef PREFETCH_DEFINED
210a9643ea8Slogwang 	if (cons_next != prod_tail) {
211a9643ea8Slogwang 		prefetch(br->br_ring[cons_next]);
212a9643ea8Slogwang 		if (cons_next_next != prod_tail)
213a9643ea8Slogwang 			prefetch(br->br_ring[cons_next_next]);
214a9643ea8Slogwang 	}
215a9643ea8Slogwang #endif
216a9643ea8Slogwang 	br->br_cons_head = cons_next;
217a9643ea8Slogwang 	buf = br->br_ring[cons_head];
218a9643ea8Slogwang 
219a9643ea8Slogwang #ifdef DEBUG_BUFRING
220a9643ea8Slogwang 	br->br_ring[cons_head] = NULL;
221a9643ea8Slogwang 	if (!mtx_owned(br->br_lock))
222a9643ea8Slogwang 		panic("lock not held on single consumer dequeue");
223a9643ea8Slogwang 	if (br->br_cons_tail != cons_head)
224a9643ea8Slogwang 		panic("inconsistent list cons_tail=%d cons_head=%d",
225a9643ea8Slogwang 		    br->br_cons_tail, cons_head);
226a9643ea8Slogwang #endif
227a9643ea8Slogwang 	br->br_cons_tail = cons_next;
228a9643ea8Slogwang 	return (buf);
229a9643ea8Slogwang }
230a9643ea8Slogwang 
231a9643ea8Slogwang /*
232a9643ea8Slogwang  * single-consumer advance after a peek
233a9643ea8Slogwang  * use where it is protected by a lock
234a9643ea8Slogwang  * e.g. a network driver's tx queue lock
235a9643ea8Slogwang  */
236a9643ea8Slogwang static __inline void
buf_ring_advance_sc(struct buf_ring * br)237a9643ea8Slogwang buf_ring_advance_sc(struct buf_ring *br)
238a9643ea8Slogwang {
239a9643ea8Slogwang 	uint32_t cons_head, cons_next;
240a9643ea8Slogwang 	uint32_t prod_tail;
241a9643ea8Slogwang 
242a9643ea8Slogwang 	cons_head = br->br_cons_head;
243a9643ea8Slogwang 	prod_tail = br->br_prod_tail;
244a9643ea8Slogwang 
245a9643ea8Slogwang 	cons_next = (cons_head + 1) & br->br_cons_mask;
246a9643ea8Slogwang 	if (cons_head == prod_tail)
247a9643ea8Slogwang 		return;
248a9643ea8Slogwang 	br->br_cons_head = cons_next;
249a9643ea8Slogwang #ifdef DEBUG_BUFRING
250a9643ea8Slogwang 	br->br_ring[cons_head] = NULL;
251a9643ea8Slogwang #endif
252a9643ea8Slogwang 	br->br_cons_tail = cons_next;
253a9643ea8Slogwang }
254a9643ea8Slogwang 
255a9643ea8Slogwang /*
256a9643ea8Slogwang  * Used to return a buffer (most likely already there)
257*22ce4affSfengbojiang  * to the top of the ring. The caller should *not*
258a9643ea8Slogwang  * have used any dequeue to pull it out of the ring
259a9643ea8Slogwang  * but instead should have used the peek() function.
260a9643ea8Slogwang  * This is normally used where the transmit queue
261*22ce4affSfengbojiang  * of a driver is full, and an mbuf must be returned.
262a9643ea8Slogwang  * Most likely whats in the ring-buffer is what
263a9643ea8Slogwang  * is being put back (since it was not removed), but
264a9643ea8Slogwang  * sometimes the lower transmit function may have
265a9643ea8Slogwang  * done a pullup or other function that will have
266*22ce4affSfengbojiang  * changed it. As an optimization we always put it
267a9643ea8Slogwang  * back (since jhb says the store is probably cheaper),
268a9643ea8Slogwang  * if we have to do a multi-queue version we will need
269a9643ea8Slogwang  * the compare and an atomic.
270a9643ea8Slogwang  */
271a9643ea8Slogwang static __inline void
buf_ring_putback_sc(struct buf_ring * br,void * new)272a9643ea8Slogwang buf_ring_putback_sc(struct buf_ring *br, void *new)
273a9643ea8Slogwang {
274a9643ea8Slogwang 	KASSERT(br->br_cons_head != br->br_prod_tail,
275a9643ea8Slogwang 		("Buf-Ring has none in putback")) ;
276a9643ea8Slogwang 	br->br_ring[br->br_cons_head] = new;
277a9643ea8Slogwang }
278a9643ea8Slogwang 
279a9643ea8Slogwang /*
280a9643ea8Slogwang  * return a pointer to the first entry in the ring
281a9643ea8Slogwang  * without modifying it, or NULL if the ring is empty
282a9643ea8Slogwang  * race-prone if not protected by a lock
283a9643ea8Slogwang  */
284a9643ea8Slogwang static __inline void *
buf_ring_peek(struct buf_ring * br)285a9643ea8Slogwang buf_ring_peek(struct buf_ring *br)
286a9643ea8Slogwang {
287a9643ea8Slogwang 
288a9643ea8Slogwang #ifdef DEBUG_BUFRING
289a9643ea8Slogwang 	if ((br->br_lock != NULL) && !mtx_owned(br->br_lock))
290a9643ea8Slogwang 		panic("lock not held on single consumer dequeue");
291a9643ea8Slogwang #endif
292a9643ea8Slogwang 	/*
293a9643ea8Slogwang 	 * I believe it is safe to not have a memory barrier
294a9643ea8Slogwang 	 * here because we control cons and tail is worst case
295a9643ea8Slogwang 	 * a lagging indicator so we worst case we might
296a9643ea8Slogwang 	 * return NULL immediately after a buffer has been enqueued
297a9643ea8Slogwang 	 */
298a9643ea8Slogwang 	if (br->br_cons_head == br->br_prod_tail)
299a9643ea8Slogwang 		return (NULL);
300a9643ea8Slogwang 
301a9643ea8Slogwang 	return (br->br_ring[br->br_cons_head]);
302a9643ea8Slogwang }
303a9643ea8Slogwang 
304a9643ea8Slogwang static __inline void *
buf_ring_peek_clear_sc(struct buf_ring * br)305a9643ea8Slogwang buf_ring_peek_clear_sc(struct buf_ring *br)
306a9643ea8Slogwang {
307a9643ea8Slogwang #ifdef DEBUG_BUFRING
308a9643ea8Slogwang 	void *ret;
309a9643ea8Slogwang 
310a9643ea8Slogwang 	if (!mtx_owned(br->br_lock))
311a9643ea8Slogwang 		panic("lock not held on single consumer dequeue");
312a9643ea8Slogwang #endif
313*22ce4affSfengbojiang 
314a9643ea8Slogwang 	if (br->br_cons_head == br->br_prod_tail)
315a9643ea8Slogwang 		return (NULL);
316a9643ea8Slogwang 
317*22ce4affSfengbojiang #if defined(__arm__) || defined(__aarch64__)
318*22ce4affSfengbojiang 	/*
319*22ce4affSfengbojiang 	 * The barrier is required there on ARM and ARM64 to ensure, that
320*22ce4affSfengbojiang 	 * br->br_ring[br->br_cons_head] will not be fetched before the above
321*22ce4affSfengbojiang 	 * condition is checked.
322*22ce4affSfengbojiang 	 * Without the barrier, it is possible, that buffer will be fetched
323*22ce4affSfengbojiang 	 * before the enqueue will put mbuf into br, then, in the meantime, the
324*22ce4affSfengbojiang 	 * enqueue will update the array and the br_prod_tail, and the
325*22ce4affSfengbojiang 	 * conditional check will be true, so we will return previously fetched
326*22ce4affSfengbojiang 	 * (and invalid) buffer.
327*22ce4affSfengbojiang 	 */
328*22ce4affSfengbojiang 	atomic_thread_fence_acq();
329*22ce4affSfengbojiang #endif
330*22ce4affSfengbojiang 
331a9643ea8Slogwang #ifdef DEBUG_BUFRING
332a9643ea8Slogwang 	/*
333a9643ea8Slogwang 	 * Single consumer, i.e. cons_head will not move while we are
334a9643ea8Slogwang 	 * running, so atomic_swap_ptr() is not necessary here.
335a9643ea8Slogwang 	 */
336a9643ea8Slogwang 	ret = br->br_ring[br->br_cons_head];
337a9643ea8Slogwang 	br->br_ring[br->br_cons_head] = NULL;
338a9643ea8Slogwang 	return (ret);
339a9643ea8Slogwang #else
340a9643ea8Slogwang 	return (br->br_ring[br->br_cons_head]);
341a9643ea8Slogwang #endif
342a9643ea8Slogwang }
343a9643ea8Slogwang 
344a9643ea8Slogwang static __inline int
buf_ring_full(struct buf_ring * br)345a9643ea8Slogwang buf_ring_full(struct buf_ring *br)
346a9643ea8Slogwang {
347a9643ea8Slogwang 
348a9643ea8Slogwang 	return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail);
349a9643ea8Slogwang }
350a9643ea8Slogwang 
351a9643ea8Slogwang static __inline int
buf_ring_empty(struct buf_ring * br)352a9643ea8Slogwang buf_ring_empty(struct buf_ring *br)
353a9643ea8Slogwang {
354a9643ea8Slogwang 
355a9643ea8Slogwang 	return (br->br_cons_head == br->br_prod_tail);
356a9643ea8Slogwang }
357a9643ea8Slogwang 
358a9643ea8Slogwang static __inline int
buf_ring_count(struct buf_ring * br)359a9643ea8Slogwang buf_ring_count(struct buf_ring *br)
360a9643ea8Slogwang {
361a9643ea8Slogwang 
362a9643ea8Slogwang 	return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail)
363a9643ea8Slogwang 	    & br->br_prod_mask);
364a9643ea8Slogwang }
365a9643ea8Slogwang 
366a9643ea8Slogwang struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags,
367a9643ea8Slogwang     struct mtx *);
368a9643ea8Slogwang void buf_ring_free(struct buf_ring *br, struct malloc_type *type);
369a9643ea8Slogwang 
370a9643ea8Slogwang #endif
371