xref: /linux-6.15/include/net/libeth/tx.h (revision 080d72f4)
1*080d72f4SAlexander Lobakin /* SPDX-License-Identifier: GPL-2.0-only */
2*080d72f4SAlexander Lobakin /* Copyright (C) 2024 Intel Corporation */
3*080d72f4SAlexander Lobakin 
4*080d72f4SAlexander Lobakin #ifndef __LIBETH_TX_H
5*080d72f4SAlexander Lobakin #define __LIBETH_TX_H
6*080d72f4SAlexander Lobakin 
7*080d72f4SAlexander Lobakin #include <linux/skbuff.h>
8*080d72f4SAlexander Lobakin 
9*080d72f4SAlexander Lobakin #include <net/libeth/types.h>
10*080d72f4SAlexander Lobakin 
11*080d72f4SAlexander Lobakin /* Tx buffer completion */
12*080d72f4SAlexander Lobakin 
13*080d72f4SAlexander Lobakin /**
14*080d72f4SAlexander Lobakin  * enum libeth_sqe_type - type of &libeth_sqe to act on Tx completion
15*080d72f4SAlexander Lobakin  * @LIBETH_SQE_EMPTY: unused/empty, no action required
16*080d72f4SAlexander Lobakin  * @LIBETH_SQE_CTX: context descriptor with empty SQE, no action required
17*080d72f4SAlexander Lobakin  * @LIBETH_SQE_SLAB: kmalloc-allocated buffer, unmap and kfree()
18*080d72f4SAlexander Lobakin  * @LIBETH_SQE_FRAG: mapped skb frag, only unmap DMA
19*080d72f4SAlexander Lobakin  * @LIBETH_SQE_SKB: &sk_buff, unmap and napi_consume_skb(), update stats
20*080d72f4SAlexander Lobakin  */
21*080d72f4SAlexander Lobakin enum libeth_sqe_type {
22*080d72f4SAlexander Lobakin 	LIBETH_SQE_EMPTY		= 0U,
23*080d72f4SAlexander Lobakin 	LIBETH_SQE_CTX,
24*080d72f4SAlexander Lobakin 	LIBETH_SQE_SLAB,
25*080d72f4SAlexander Lobakin 	LIBETH_SQE_FRAG,
26*080d72f4SAlexander Lobakin 	LIBETH_SQE_SKB,
27*080d72f4SAlexander Lobakin };
28*080d72f4SAlexander Lobakin 
29*080d72f4SAlexander Lobakin /**
30*080d72f4SAlexander Lobakin  * struct libeth_sqe - represents a Send Queue Element / Tx buffer
31*080d72f4SAlexander Lobakin  * @type: type of the buffer, see the enum above
32*080d72f4SAlexander Lobakin  * @rs_idx: index of the last buffer from the batch this one was sent in
33*080d72f4SAlexander Lobakin  * @raw: slab buffer to free via kfree()
34*080d72f4SAlexander Lobakin  * @skb: &sk_buff to consume
35*080d72f4SAlexander Lobakin  * @dma: DMA address to unmap
36*080d72f4SAlexander Lobakin  * @len: length of the mapped region to unmap
37*080d72f4SAlexander Lobakin  * @nr_frags: number of frags in the frame this buffer belongs to
38*080d72f4SAlexander Lobakin  * @packets: number of physical packets sent for this frame
39*080d72f4SAlexander Lobakin  * @bytes: number of physical bytes sent for this frame
40*080d72f4SAlexander Lobakin  * @priv: driver-private scratchpad
41*080d72f4SAlexander Lobakin  */
42*080d72f4SAlexander Lobakin struct libeth_sqe {
43*080d72f4SAlexander Lobakin 	enum libeth_sqe_type		type:32;
44*080d72f4SAlexander Lobakin 	u32				rs_idx;
45*080d72f4SAlexander Lobakin 
46*080d72f4SAlexander Lobakin 	union {
47*080d72f4SAlexander Lobakin 		void				*raw;
48*080d72f4SAlexander Lobakin 		struct sk_buff			*skb;
49*080d72f4SAlexander Lobakin 	};
50*080d72f4SAlexander Lobakin 
51*080d72f4SAlexander Lobakin 	DEFINE_DMA_UNMAP_ADDR(dma);
52*080d72f4SAlexander Lobakin 	DEFINE_DMA_UNMAP_LEN(len);
53*080d72f4SAlexander Lobakin 
54*080d72f4SAlexander Lobakin 	u32				nr_frags;
55*080d72f4SAlexander Lobakin 	u32				packets;
56*080d72f4SAlexander Lobakin 	u32				bytes;
57*080d72f4SAlexander Lobakin 
58*080d72f4SAlexander Lobakin 	unsigned long			priv;
59*080d72f4SAlexander Lobakin } __aligned_largest;
60*080d72f4SAlexander Lobakin 
61*080d72f4SAlexander Lobakin /**
62*080d72f4SAlexander Lobakin  * LIBETH_SQE_CHECK_PRIV - check the driver's private SQE data
63*080d72f4SAlexander Lobakin  * @p: type or name of the object the driver wants to fit into &libeth_sqe
64*080d72f4SAlexander Lobakin  *
65*080d72f4SAlexander Lobakin  * Make sure the driver's private data fits into libeth_sqe::priv. To be used
66*080d72f4SAlexander Lobakin  * right after its declaration.
67*080d72f4SAlexander Lobakin  */
68*080d72f4SAlexander Lobakin #define LIBETH_SQE_CHECK_PRIV(p)					  \
69*080d72f4SAlexander Lobakin 	static_assert(sizeof(p) <= sizeof_field(struct libeth_sqe, priv))
70*080d72f4SAlexander Lobakin 
71*080d72f4SAlexander Lobakin /**
72*080d72f4SAlexander Lobakin  * struct libeth_cq_pp - completion queue poll params
73*080d72f4SAlexander Lobakin  * @dev: &device to perform DMA unmapping
74*080d72f4SAlexander Lobakin  * @ss: onstack NAPI stats to fill
75*080d72f4SAlexander Lobakin  * @napi: whether it's called from the NAPI context
76*080d72f4SAlexander Lobakin  *
77*080d72f4SAlexander Lobakin  * libeth uses this structure to access objects needed for performing full
78*080d72f4SAlexander Lobakin  * Tx complete operation without passing lots of arguments and change the
79*080d72f4SAlexander Lobakin  * prototypes each time a new one is added.
80*080d72f4SAlexander Lobakin  */
81*080d72f4SAlexander Lobakin struct libeth_cq_pp {
82*080d72f4SAlexander Lobakin 	struct device			*dev;
83*080d72f4SAlexander Lobakin 	struct libeth_sq_napi_stats	*ss;
84*080d72f4SAlexander Lobakin 
85*080d72f4SAlexander Lobakin 	bool				napi;
86*080d72f4SAlexander Lobakin };
87*080d72f4SAlexander Lobakin 
88*080d72f4SAlexander Lobakin /**
89*080d72f4SAlexander Lobakin  * libeth_tx_complete - perform Tx completion for one SQE
90*080d72f4SAlexander Lobakin  * @sqe: SQE to complete
91*080d72f4SAlexander Lobakin  * @cp: poll params
92*080d72f4SAlexander Lobakin  *
93*080d72f4SAlexander Lobakin  * Do Tx complete for all the types of buffers, incl. freeing, unmapping,
94*080d72f4SAlexander Lobakin  * updating the stats etc.
95*080d72f4SAlexander Lobakin  */
libeth_tx_complete(struct libeth_sqe * sqe,const struct libeth_cq_pp * cp)96*080d72f4SAlexander Lobakin static inline void libeth_tx_complete(struct libeth_sqe *sqe,
97*080d72f4SAlexander Lobakin 				      const struct libeth_cq_pp *cp)
98*080d72f4SAlexander Lobakin {
99*080d72f4SAlexander Lobakin 	switch (sqe->type) {
100*080d72f4SAlexander Lobakin 	case LIBETH_SQE_EMPTY:
101*080d72f4SAlexander Lobakin 		return;
102*080d72f4SAlexander Lobakin 	case LIBETH_SQE_SKB:
103*080d72f4SAlexander Lobakin 	case LIBETH_SQE_FRAG:
104*080d72f4SAlexander Lobakin 	case LIBETH_SQE_SLAB:
105*080d72f4SAlexander Lobakin 		dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma),
106*080d72f4SAlexander Lobakin 			       dma_unmap_len(sqe, len), DMA_TO_DEVICE);
107*080d72f4SAlexander Lobakin 		break;
108*080d72f4SAlexander Lobakin 	default:
109*080d72f4SAlexander Lobakin 		break;
110*080d72f4SAlexander Lobakin 	}
111*080d72f4SAlexander Lobakin 
112*080d72f4SAlexander Lobakin 	switch (sqe->type) {
113*080d72f4SAlexander Lobakin 	case LIBETH_SQE_SKB:
114*080d72f4SAlexander Lobakin 		cp->ss->packets += sqe->packets;
115*080d72f4SAlexander Lobakin 		cp->ss->bytes += sqe->bytes;
116*080d72f4SAlexander Lobakin 
117*080d72f4SAlexander Lobakin 		napi_consume_skb(sqe->skb, cp->napi);
118*080d72f4SAlexander Lobakin 		break;
119*080d72f4SAlexander Lobakin 	case LIBETH_SQE_SLAB:
120*080d72f4SAlexander Lobakin 		kfree(sqe->raw);
121*080d72f4SAlexander Lobakin 		break;
122*080d72f4SAlexander Lobakin 	default:
123*080d72f4SAlexander Lobakin 		break;
124*080d72f4SAlexander Lobakin 	}
125*080d72f4SAlexander Lobakin 
126*080d72f4SAlexander Lobakin 	sqe->type = LIBETH_SQE_EMPTY;
127*080d72f4SAlexander Lobakin }
128*080d72f4SAlexander Lobakin 
129*080d72f4SAlexander Lobakin #endif /* __LIBETH_TX_H */
130