xref: /linux-6.15/include/net/netdev_queues.h (revision 23fa6a23)
1c91c46deSJakub Kicinski /* SPDX-License-Identifier: GPL-2.0 */
2c91c46deSJakub Kicinski #ifndef _LINUX_NET_QUEUES_H
3c91c46deSJakub Kicinski #define _LINUX_NET_QUEUES_H
4c91c46deSJakub Kicinski 
5c91c46deSJakub Kicinski #include <linux/netdevice.h>
6c91c46deSJakub Kicinski 
73c836451SJakub Kicinski /**
83c836451SJakub Kicinski  * struct netdev_config - queue-related configuration for a netdev
93c836451SJakub Kicinski  * @hds_thresh:		HDS Threshold value.
103c836451SJakub Kicinski  * @hds_config:		HDS value from userspace.
113c836451SJakub Kicinski  */
123c836451SJakub Kicinski struct netdev_config {
133c836451SJakub Kicinski 	u32	hds_thresh;
143c836451SJakub Kicinski 	u8	hds_config;
153c836451SJakub Kicinski };
163c836451SJakub Kicinski 
1792f8b1f5SJakub Kicinski /* See the netdev.yaml spec for definition of each statistic */
18ab63a238SJakub Kicinski struct netdev_queue_stats_rx {
19ab63a238SJakub Kicinski 	u64 bytes;
20ab63a238SJakub Kicinski 	u64 packets;
2192f8b1f5SJakub Kicinski 	u64 alloc_fail;
220cfe71f4SXuan Zhuo 
230cfe71f4SXuan Zhuo 	u64 hw_drops;
240cfe71f4SXuan Zhuo 	u64 hw_drop_overruns;
250cfe71f4SXuan Zhuo 
2634eea78aSJakub Kicinski 	u64 csum_complete;
270cfe71f4SXuan Zhuo 	u64 csum_unnecessary;
280cfe71f4SXuan Zhuo 	u64 csum_none;
290cfe71f4SXuan Zhuo 	u64 csum_bad;
300cfe71f4SXuan Zhuo 
310cfe71f4SXuan Zhuo 	u64 hw_gro_packets;
320cfe71f4SXuan Zhuo 	u64 hw_gro_bytes;
330cfe71f4SXuan Zhuo 	u64 hw_gro_wire_packets;
340cfe71f4SXuan Zhuo 	u64 hw_gro_wire_bytes;
350cfe71f4SXuan Zhuo 
360cfe71f4SXuan Zhuo 	u64 hw_drop_ratelimits;
37ab63a238SJakub Kicinski };
38ab63a238SJakub Kicinski 
39ab63a238SJakub Kicinski struct netdev_queue_stats_tx {
40ab63a238SJakub Kicinski 	u64 bytes;
41ab63a238SJakub Kicinski 	u64 packets;
420cfe71f4SXuan Zhuo 
430cfe71f4SXuan Zhuo 	u64 hw_drops;
440cfe71f4SXuan Zhuo 	u64 hw_drop_errors;
450cfe71f4SXuan Zhuo 
460cfe71f4SXuan Zhuo 	u64 csum_none;
470cfe71f4SXuan Zhuo 	u64 needs_csum;
480cfe71f4SXuan Zhuo 
490cfe71f4SXuan Zhuo 	u64 hw_gso_packets;
500cfe71f4SXuan Zhuo 	u64 hw_gso_bytes;
510cfe71f4SXuan Zhuo 	u64 hw_gso_wire_packets;
520cfe71f4SXuan Zhuo 	u64 hw_gso_wire_bytes;
530cfe71f4SXuan Zhuo 
540cfe71f4SXuan Zhuo 	u64 hw_drop_ratelimits;
55b5603510SDaniel Jurgens 
56b5603510SDaniel Jurgens 	u64 stop;
57b5603510SDaniel Jurgens 	u64 wake;
58ab63a238SJakub Kicinski };
59ab63a238SJakub Kicinski 
60ab63a238SJakub Kicinski /**
61ab63a238SJakub Kicinski  * struct netdev_stat_ops - netdev ops for fine grained stats
62ab63a238SJakub Kicinski  * @get_queue_stats_rx:	get stats for a given Rx queue
63ab63a238SJakub Kicinski  * @get_queue_stats_tx:	get stats for a given Tx queue
64ab63a238SJakub Kicinski  * @get_base_stats:	get base stats (not belonging to any live instance)
65ab63a238SJakub Kicinski  *
66ab63a238SJakub Kicinski  * Query stats for a given object. The values of the statistics are undefined
67ab63a238SJakub Kicinski  * on entry (specifically they are *not* zero-initialized). Drivers should
68ab63a238SJakub Kicinski  * assign values only to the statistics they collect. Statistics which are not
69ab63a238SJakub Kicinski  * collected must be left undefined.
70ab63a238SJakub Kicinski  *
71ab63a238SJakub Kicinski  * Queue objects are not necessarily persistent, and only currently active
72ab63a238SJakub Kicinski  * queues are queried by the per-queue callbacks. This means that per-queue
73ab63a238SJakub Kicinski  * statistics will not generally add up to the total number of events for
74ab63a238SJakub Kicinski  * the device. The @get_base_stats callback allows filling in the delta
75ab63a238SJakub Kicinski  * between events for currently live queues and overall device history.
7669c8b998SJakub Kicinski  * @get_base_stats can also be used to report any miscellaneous packets
7769c8b998SJakub Kicinski  * transferred outside of the main set of queues used by the networking stack.
78ab63a238SJakub Kicinski  * When the statistics for the entire device are queried, first @get_base_stats
79ab63a238SJakub Kicinski  * is issued to collect the delta, and then a series of per-queue callbacks.
80ab63a238SJakub Kicinski  * Only statistics which are set in @get_base_stats will be reported
81ab63a238SJakub Kicinski  * at the device level, meaning that unlike in queue callbacks, setting
82ab63a238SJakub Kicinski  * a statistic to zero in @get_base_stats is a legitimate thing to do.
83ab63a238SJakub Kicinski  * This is because @get_base_stats has a second function of designating which
84ab63a238SJakub Kicinski  * statistics are in fact correct for the entire device (e.g. when history
85ab63a238SJakub Kicinski  * for some of the events is not maintained, and reliable "total" cannot
86ab63a238SJakub Kicinski  * be provided).
87ab63a238SJakub Kicinski  *
88ab63a238SJakub Kicinski  * Device drivers can assume that when collecting total device stats,
89ab63a238SJakub Kicinski  * the @get_base_stats and subsequent per-queue calls are performed
90ab63a238SJakub Kicinski  * "atomically" (without releasing the rtnl_lock).
91ab63a238SJakub Kicinski  *
92ab63a238SJakub Kicinski  * Device drivers are encouraged to reset the per-queue statistics when
93ab63a238SJakub Kicinski  * number of queues change. This is because the primary use case for
94ab63a238SJakub Kicinski  * per-queue statistics is currently to detect traffic imbalance.
95ab63a238SJakub Kicinski  */
96ab63a238SJakub Kicinski struct netdev_stat_ops {
97ab63a238SJakub Kicinski 	void (*get_queue_stats_rx)(struct net_device *dev, int idx,
98ab63a238SJakub Kicinski 				   struct netdev_queue_stats_rx *stats);
99ab63a238SJakub Kicinski 	void (*get_queue_stats_tx)(struct net_device *dev, int idx,
100ab63a238SJakub Kicinski 				   struct netdev_queue_stats_tx *stats);
101ab63a238SJakub Kicinski 	void (*get_base_stats)(struct net_device *dev,
102ab63a238SJakub Kicinski 			       struct netdev_queue_stats_rx *rx,
103ab63a238SJakub Kicinski 			       struct netdev_queue_stats_tx *tx);
104ab63a238SJakub Kicinski };
105ab63a238SJakub Kicinski 
106*23fa6a23SJakub Kicinski void netdev_stat_queue_sum(struct net_device *netdev,
107*23fa6a23SJakub Kicinski 			   int rx_start, int rx_end,
108*23fa6a23SJakub Kicinski 			   struct netdev_queue_stats_rx *rx_sum,
109*23fa6a23SJakub Kicinski 			   int tx_start, int tx_end,
110*23fa6a23SJakub Kicinski 			   struct netdev_queue_stats_tx *tx_sum);
111*23fa6a23SJakub Kicinski 
112c91c46deSJakub Kicinski /**
113087b24deSMina Almasry  * struct netdev_queue_mgmt_ops - netdev ops for queue management
114087b24deSMina Almasry  *
115087b24deSMina Almasry  * @ndo_queue_mem_size: Size of the struct that describes a queue's memory.
116087b24deSMina Almasry  *
117087b24deSMina Almasry  * @ndo_queue_mem_alloc: Allocate memory for an RX queue at the specified index.
118087b24deSMina Almasry  *			 The new memory is written at the specified address.
119087b24deSMina Almasry  *
120087b24deSMina Almasry  * @ndo_queue_mem_free:	Free memory from an RX queue.
121087b24deSMina Almasry  *
122087b24deSMina Almasry  * @ndo_queue_start:	Start an RX queue with the specified memory and at the
123087b24deSMina Almasry  *			specified index.
124087b24deSMina Almasry  *
125087b24deSMina Almasry  * @ndo_queue_stop:	Stop the RX queue at the specified index. The stopped
126087b24deSMina Almasry  *			queue's memory is written at the specified address.
1273e7efc3fSJakub Kicinski  *
1283e7efc3fSJakub Kicinski  * Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
1293e7efc3fSJakub Kicinski  * the interface is closed. @ndo_queue_start and @ndo_queue_stop will only
1303e7efc3fSJakub Kicinski  * be called for an interface which is open.
131087b24deSMina Almasry  */
132087b24deSMina Almasry struct netdev_queue_mgmt_ops {
133087b24deSMina Almasry 	size_t			ndo_queue_mem_size;
134087b24deSMina Almasry 	int			(*ndo_queue_mem_alloc)(struct net_device *dev,
135087b24deSMina Almasry 						       void *per_queue_mem,
136087b24deSMina Almasry 						       int idx);
137087b24deSMina Almasry 	void			(*ndo_queue_mem_free)(struct net_device *dev,
138087b24deSMina Almasry 						      void *per_queue_mem);
139087b24deSMina Almasry 	int			(*ndo_queue_start)(struct net_device *dev,
140087b24deSMina Almasry 						   void *per_queue_mem,
141087b24deSMina Almasry 						   int idx);
142087b24deSMina Almasry 	int			(*ndo_queue_stop)(struct net_device *dev,
143087b24deSMina Almasry 						  void *per_queue_mem,
144087b24deSMina Almasry 						  int idx);
145087b24deSMina Almasry };
146087b24deSMina Almasry 
147087b24deSMina Almasry /**
148c91c46deSJakub Kicinski  * DOC: Lockless queue stopping / waking helpers.
149c91c46deSJakub Kicinski  *
150c91c46deSJakub Kicinski  * The netif_txq_maybe_stop() and __netif_txq_completed_wake()
151c91c46deSJakub Kicinski  * macros are designed to safely implement stopping
152c91c46deSJakub Kicinski  * and waking netdev queues without full lock protection.
153c91c46deSJakub Kicinski  *
154c91c46deSJakub Kicinski  * We assume that there can be no concurrent stop attempts and no concurrent
155c91c46deSJakub Kicinski  * wake attempts. The try-stop should happen from the xmit handler,
156c91c46deSJakub Kicinski  * while wake up should be triggered from NAPI poll context.
157c91c46deSJakub Kicinski  * The two may run concurrently (single producer, single consumer).
158c91c46deSJakub Kicinski  *
159c91c46deSJakub Kicinski  * The try-stop side is expected to run from the xmit handler and therefore
160c91c46deSJakub Kicinski  * it does not reschedule Tx (netif_tx_start_queue() instead of
161c91c46deSJakub Kicinski  * netif_tx_wake_queue()). Uses of the ``stop`` macros outside of the xmit
162c91c46deSJakub Kicinski  * handler may lead to xmit queue being enabled but not run.
163c91c46deSJakub Kicinski  * The waking side does not have similar context restrictions.
164c91c46deSJakub Kicinski  *
165c91c46deSJakub Kicinski  * The macros guarantee that rings will not remain stopped if there's
166c91c46deSJakub Kicinski  * space available, but they do *not* prevent false wake ups when
167c91c46deSJakub Kicinski  * the ring is full! Drivers should check for ring full at the start
168c91c46deSJakub Kicinski  * for the xmit handler.
169c91c46deSJakub Kicinski  *
170c91c46deSJakub Kicinski  * All descriptor ring indexes (and other relevant shared state) must
171c91c46deSJakub Kicinski  * be updated before invoking the macros.
172c91c46deSJakub Kicinski  */
173c91c46deSJakub Kicinski 
174c91c46deSJakub Kicinski #define netif_txq_try_stop(txq, get_desc, start_thrs)			\
175c91c46deSJakub Kicinski 	({								\
176c91c46deSJakub Kicinski 		int _res;						\
177c91c46deSJakub Kicinski 									\
178c91c46deSJakub Kicinski 		netif_tx_stop_queue(txq);				\
179c91c46deSJakub Kicinski 		/* Producer index and stop bit must be visible		\
180c91c46deSJakub Kicinski 		 * to consumer before we recheck.			\
181301f227fSJakub Kicinski 		 * Pairs with a barrier in __netif_txq_completed_wake(). \
182c91c46deSJakub Kicinski 		 */							\
183c91c46deSJakub Kicinski 		smp_mb__after_atomic();					\
184c91c46deSJakub Kicinski 									\
185c91c46deSJakub Kicinski 		/* We need to check again in a case another		\
186c91c46deSJakub Kicinski 		 * CPU has just made room available.			\
187c91c46deSJakub Kicinski 		 */							\
188c91c46deSJakub Kicinski 		_res = 0;						\
189c91c46deSJakub Kicinski 		if (unlikely(get_desc >= start_thrs)) {			\
190c91c46deSJakub Kicinski 			netif_tx_start_queue(txq);			\
191c91c46deSJakub Kicinski 			_res = -1;					\
192c91c46deSJakub Kicinski 		}							\
193c91c46deSJakub Kicinski 		_res;							\
194c91c46deSJakub Kicinski 	})								\
195c91c46deSJakub Kicinski 
196c91c46deSJakub Kicinski /**
197c91c46deSJakub Kicinski  * netif_txq_maybe_stop() - locklessly stop a Tx queue, if needed
198c91c46deSJakub Kicinski  * @txq:	struct netdev_queue to stop/start
199c91c46deSJakub Kicinski  * @get_desc:	get current number of free descriptors (see requirements below!)
200c91c46deSJakub Kicinski  * @stop_thrs:	minimal number of available descriptors for queue to be left
201c91c46deSJakub Kicinski  *		enabled
202c91c46deSJakub Kicinski  * @start_thrs:	minimal number of descriptors to re-enable the queue, can be
203c91c46deSJakub Kicinski  *		equal to @stop_thrs or higher to avoid frequent waking
204c91c46deSJakub Kicinski  *
205c91c46deSJakub Kicinski  * All arguments may be evaluated multiple times, beware of side effects.
206c91c46deSJakub Kicinski  * @get_desc must be a formula or a function call, it must always
207c91c46deSJakub Kicinski  * return up-to-date information when evaluated!
208c91c46deSJakub Kicinski  * Expected to be used from ndo_start_xmit, see the comment on top of the file.
209c91c46deSJakub Kicinski  *
210c91c46deSJakub Kicinski  * Returns:
211c91c46deSJakub Kicinski  *	 0 if the queue was stopped
212c91c46deSJakub Kicinski  *	 1 if the queue was left enabled
213c91c46deSJakub Kicinski  *	-1 if the queue was re-enabled (raced with waking)
214c91c46deSJakub Kicinski  */
215c91c46deSJakub Kicinski #define netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs)	\
216c91c46deSJakub Kicinski 	({								\
217c91c46deSJakub Kicinski 		int _res;						\
218c91c46deSJakub Kicinski 									\
219c91c46deSJakub Kicinski 		_res = 1;						\
220c91c46deSJakub Kicinski 		if (unlikely(get_desc < stop_thrs))			\
221c91c46deSJakub Kicinski 			_res = netif_txq_try_stop(txq, get_desc, start_thrs); \
222c91c46deSJakub Kicinski 		_res;							\
223c91c46deSJakub Kicinski 	})								\
224c91c46deSJakub Kicinski 
225301f227fSJakub Kicinski /* Variant of netdev_tx_completed_queue() which guarantees smp_mb() if
226301f227fSJakub Kicinski  * @bytes != 0, regardless of kernel config.
227301f227fSJakub Kicinski  */
228301f227fSJakub Kicinski static inline void
netdev_txq_completed_mb(struct netdev_queue * dev_queue,unsigned int pkts,unsigned int bytes)229301f227fSJakub Kicinski netdev_txq_completed_mb(struct netdev_queue *dev_queue,
230301f227fSJakub Kicinski 			unsigned int pkts, unsigned int bytes)
231301f227fSJakub Kicinski {
232301f227fSJakub Kicinski 	if (IS_ENABLED(CONFIG_BQL))
233301f227fSJakub Kicinski 		netdev_tx_completed_queue(dev_queue, pkts, bytes);
234301f227fSJakub Kicinski 	else if (bytes)
235301f227fSJakub Kicinski 		smp_mb();
236301f227fSJakub Kicinski }
237c91c46deSJakub Kicinski 
238c91c46deSJakub Kicinski /**
239301f227fSJakub Kicinski  * __netif_txq_completed_wake() - locklessly wake a Tx queue, if needed
240c91c46deSJakub Kicinski  * @txq:	struct netdev_queue to stop/start
241301f227fSJakub Kicinski  * @pkts:	number of packets completed
242301f227fSJakub Kicinski  * @bytes:	number of bytes completed
243c91c46deSJakub Kicinski  * @get_desc:	get current number of free descriptors (see requirements below!)
244c91c46deSJakub Kicinski  * @start_thrs:	minimal number of descriptors to re-enable the queue
245c91c46deSJakub Kicinski  * @down_cond:	down condition, predicate indicating that the queue should
246c91c46deSJakub Kicinski  *		not be woken up even if descriptors are available
247c91c46deSJakub Kicinski  *
248c91c46deSJakub Kicinski  * All arguments may be evaluated multiple times.
249c91c46deSJakub Kicinski  * @get_desc must be a formula or a function call, it must always
250c91c46deSJakub Kicinski  * return up-to-date information when evaluated!
251301f227fSJakub Kicinski  * Reports completed pkts/bytes to BQL.
252c91c46deSJakub Kicinski  *
253c91c46deSJakub Kicinski  * Returns:
254c91c46deSJakub Kicinski  *	 0 if the queue was woken up
255c91c46deSJakub Kicinski  *	 1 if the queue was already enabled (or disabled but @down_cond is true)
256c91c46deSJakub Kicinski  *	-1 if the queue was left unchanged (@start_thrs not reached)
257c91c46deSJakub Kicinski  */
258301f227fSJakub Kicinski #define __netif_txq_completed_wake(txq, pkts, bytes,			\
259301f227fSJakub Kicinski 				   get_desc, start_thrs, down_cond)	\
260c91c46deSJakub Kicinski 	({								\
261c91c46deSJakub Kicinski 		int _res;						\
262c91c46deSJakub Kicinski 									\
263301f227fSJakub Kicinski 		/* Report to BQL and piggy back on its barrier.		\
264301f227fSJakub Kicinski 		 * Barrier makes sure that anybody stopping the queue	\
265301f227fSJakub Kicinski 		 * after this point sees the new consumer index.	\
266301f227fSJakub Kicinski 		 * Pairs with barrier in netif_txq_try_stop().		\
267c91c46deSJakub Kicinski 		 */							\
268301f227fSJakub Kicinski 		netdev_txq_completed_mb(txq, pkts, bytes);		\
269301f227fSJakub Kicinski 									\
270301f227fSJakub Kicinski 		_res = -1;						\
271894d7508SMarc Kleine-Budde 		if (pkts && likely(get_desc >= start_thrs)) {		\
272c91c46deSJakub Kicinski 			_res = 1;					\
273c91c46deSJakub Kicinski 			if (unlikely(netif_tx_queue_stopped(txq)) &&	\
274c91c46deSJakub Kicinski 			    !(down_cond)) {				\
275c91c46deSJakub Kicinski 				netif_tx_wake_queue(txq);		\
276c91c46deSJakub Kicinski 				_res = 0;				\
277c91c46deSJakub Kicinski 			}						\
278c91c46deSJakub Kicinski 		}							\
279c91c46deSJakub Kicinski 		_res;							\
280c91c46deSJakub Kicinski 	})
281c91c46deSJakub Kicinski 
282301f227fSJakub Kicinski #define netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs) \
283301f227fSJakub Kicinski 	__netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs, false)
284c91c46deSJakub Kicinski 
285c91c46deSJakub Kicinski /* subqueue variants follow */
286c91c46deSJakub Kicinski 
287c91c46deSJakub Kicinski #define netif_subqueue_try_stop(dev, idx, get_desc, start_thrs)		\
288c91c46deSJakub Kicinski 	({								\
289c91c46deSJakub Kicinski 		struct netdev_queue *txq;				\
290c91c46deSJakub Kicinski 									\
291c91c46deSJakub Kicinski 		txq = netdev_get_tx_queue(dev, idx);			\
292c91c46deSJakub Kicinski 		netif_txq_try_stop(txq, get_desc, start_thrs);		\
293c91c46deSJakub Kicinski 	})
294c91c46deSJakub Kicinski 
295c91c46deSJakub Kicinski #define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
296c91c46deSJakub Kicinski 	({								\
297c91c46deSJakub Kicinski 		struct netdev_queue *txq;				\
298c91c46deSJakub Kicinski 									\
299c91c46deSJakub Kicinski 		txq = netdev_get_tx_queue(dev, idx);			\
300c91c46deSJakub Kicinski 		netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs); \
301c91c46deSJakub Kicinski 	})
302c91c46deSJakub Kicinski 
303cb18e559SHeiner Kallweit #define netif_subqueue_completed_wake(dev, idx, pkts, bytes,		\
304cb18e559SHeiner Kallweit 				      get_desc, start_thrs)		\
305cb18e559SHeiner Kallweit 	({								\
306cb18e559SHeiner Kallweit 		struct netdev_queue *txq;				\
307cb18e559SHeiner Kallweit 									\
308cb18e559SHeiner Kallweit 		txq = netdev_get_tx_queue(dev, idx);			\
309cb18e559SHeiner Kallweit 		netif_txq_completed_wake(txq, pkts, bytes,		\
310cb18e559SHeiner Kallweit 					 get_desc, start_thrs);		\
311cb18e559SHeiner Kallweit 	})
312cb18e559SHeiner Kallweit 
313c91c46deSJakub Kicinski #endif
314