xref: /f-stack/dpdk/drivers/net/qede/qede_rxtx.h (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 
8 #ifndef _QEDE_RXTX_H_
9 #define _QEDE_RXTX_H_
10 
11 #include "qede_ethdev.h"
12 
13 /* Ring Descriptors */
14 #define RX_RING_SIZE_POW        16	/* 64K */
15 #define RX_RING_SIZE            (1ULL << RX_RING_SIZE_POW)
16 #define NUM_RX_BDS_MAX          (RX_RING_SIZE - 1)
17 #define NUM_RX_BDS_MIN          128
18 #define NUM_RX_BDS_DEF          NUM_RX_BDS_MAX
19 #define NUM_RX_BDS(q)           (q->nb_rx_desc - 1)
20 
21 #define TX_RING_SIZE_POW        16	/* 64K */
22 #define TX_RING_SIZE            (1ULL << TX_RING_SIZE_POW)
23 #define NUM_TX_BDS_MAX          (TX_RING_SIZE - 1)
24 #define NUM_TX_BDS_MIN          128
25 #define NUM_TX_BDS_DEF          NUM_TX_BDS_MAX
26 #define NUM_TX_BDS(q)           (q->nb_tx_desc - 1)
27 
28 #define TX_CONS(txq)            (txq->sw_tx_cons & NUM_TX_BDS(txq))
29 #define TX_PROD(txq)            (txq->sw_tx_prod & NUM_TX_BDS(txq))
30 
31 #define QEDE_DEFAULT_TX_FREE_THRESH	32
32 
33 #define QEDE_CSUM_ERROR			(1 << 0)
34 #define QEDE_CSUM_UNNECESSARY		(1 << 1)
35 #define QEDE_TUNN_CSUM_UNNECESSARY	(1 << 2)
36 
37 #define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \
38 	do { \
39 		(bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
40 		(bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
41 		(bd)->nbytes = rte_cpu_to_le_16(len); \
42 	} while (0)
43 
44 #define CQE_HAS_VLAN(flags) \
45 	((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
46 		<< PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
47 
48 #define CQE_HAS_OUTER_VLAN(flags) \
49 	((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
50 		<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
51 
52 #define QEDE_MIN_RX_BUFF_SIZE		(1024)
53 #define QEDE_VLAN_TAG_SIZE		(4)
54 #define QEDE_LLC_SNAP_HDR_LEN		(8)
55 
56 /* Max supported alignment is 256 (8 shift)
57  * minimal alignment shift 6 is optimal for 57xxx HW performance
58  */
59 #define QEDE_L1_CACHE_SHIFT	6
60 #define QEDE_RX_ALIGN_SHIFT	(RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
61 #define QEDE_FW_RX_ALIGN_END	(1UL << QEDE_RX_ALIGN_SHIFT)
62 #define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
63 					~(QEDE_FW_RX_ALIGN_END - 1))
64 #define QEDE_FLOOR_TO_CACHE_LINE_SIZE(n) RTE_ALIGN_FLOOR(n, \
65 							 QEDE_FW_RX_ALIGN_END)
66 
67 /* Note: QEDE_LLC_SNAP_HDR_LEN is optional,
68  * +2 is for padding in front of L2 header
69  */
70 #define QEDE_ETH_OVERHEAD	(((2 * QEDE_VLAN_TAG_SIZE)) \
71 				 + (QEDE_LLC_SNAP_HDR_LEN) + 2)
72 
73 #define QEDE_MAX_ETHER_HDR_LEN	(RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
74 
75 #define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4			|\
76 				 ETH_RSS_NONFRAG_IPV4_TCP	|\
77 				 ETH_RSS_NONFRAG_IPV4_UDP	|\
78 				 ETH_RSS_IPV6			|\
79 				 ETH_RSS_NONFRAG_IPV6_TCP	|\
80 				 ETH_RSS_NONFRAG_IPV6_UDP	|\
81 				 ETH_RSS_VXLAN			|\
82 				 ETH_RSS_GENEVE)
83 
84 #define QEDE_RXTX_MAX(qdev) \
85 	(RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues))
86 
87 /* Macros for non-tunnel packet types lkup table */
88 #define QEDE_PKT_TYPE_UNKNOWN				0x0
89 #define QEDE_PKT_TYPE_MAX				0x3f
90 
91 #define QEDE_PKT_TYPE_IPV4				0x1
92 #define QEDE_PKT_TYPE_IPV6				0x2
93 #define QEDE_PKT_TYPE_IPV4_TCP				0x5
94 #define QEDE_PKT_TYPE_IPV6_TCP				0x6
95 #define QEDE_PKT_TYPE_IPV4_UDP				0x9
96 #define QEDE_PKT_TYPE_IPV6_UDP				0xa
97 
98 /* For frag pkts, corresponding IP bits is set */
99 #define QEDE_PKT_TYPE_IPV4_FRAG				0x11
100 #define QEDE_PKT_TYPE_IPV6_FRAG				0x12
101 
102 #define QEDE_PKT_TYPE_IPV4_VLAN				0x21
103 #define QEDE_PKT_TYPE_IPV6_VLAN				0x22
104 #define QEDE_PKT_TYPE_IPV4_TCP_VLAN			0x25
105 #define QEDE_PKT_TYPE_IPV6_TCP_VLAN			0x26
106 #define QEDE_PKT_TYPE_IPV4_UDP_VLAN			0x29
107 #define QEDE_PKT_TYPE_IPV6_UDP_VLAN			0x2a
108 
109 #define QEDE_PKT_TYPE_IPV4_VLAN_FRAG			0x31
110 #define QEDE_PKT_TYPE_IPV6_VLAN_FRAG			0x32
111 
112 /* Macros for tunneled packets with next protocol lkup table */
113 #define QEDE_PKT_TYPE_TUNN_GENEVE			0x1
114 #define QEDE_PKT_TYPE_TUNN_GRE				0x2
115 #define QEDE_PKT_TYPE_TUNN_VXLAN			0x3
116 
117 /* Bit 2 is don't care bit */
118 #define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE	0x9
119 #define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE		0xa
120 #define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN	0xb
121 
122 #define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE	0xd
123 #define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE		0xe
124 #define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN		0xf
125 
126 
127 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE    0x11
128 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE       0x12
129 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN     0x13
130 
131 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE	0x15
132 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE		0x16
133 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN	0x17
134 
135 
136 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE    0x19
137 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE       0x1a
138 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN     0x1b
139 
140 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE      0x1d
141 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE		0x1e
142 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN       0x1f
143 
144 #define QEDE_PKT_TYPE_TUNN_MAX_TYPE			0x20 /* 2^5 */
145 
146 #define QEDE_TX_CSUM_OFFLOAD_MASK (PKT_TX_IP_CKSUM              | \
147 				   PKT_TX_TCP_CKSUM             | \
148 				   PKT_TX_UDP_CKSUM             | \
149 				   PKT_TX_OUTER_IP_CKSUM        | \
150 				   PKT_TX_TCP_SEG		| \
151 				   PKT_TX_IPV4			| \
152 				   PKT_TX_IPV6)
153 
154 #define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
155 			      PKT_TX_VLAN_PKT		| \
156 			      PKT_TX_TUNNEL_MASK)
157 
158 #define QEDE_TX_OFFLOAD_NOTSUP_MASK \
159 	(PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
160 
161 /*
162  * RX BD descriptor ring
163  */
164 struct qede_rx_entry {
165 	struct rte_mbuf *mbuf;
166 	uint32_t page_offset;
167 	/* allows expansion .. */
168 };
169 
170 /* TPA related structures */
171 struct qede_agg_info {
172 	struct rte_mbuf *tpa_head; /* Pointer to first TPA segment */
173 	struct rte_mbuf *tpa_tail; /* Pointer to last TPA segment */
174 };
175 
176 /*
177  * Structure associated with each RX queue.
178  */
179 struct qede_rx_queue {
180 	/* Always keep qdev as first member */
181 	struct qede_dev *qdev;
182 	struct rte_mempool *mb_pool;
183 	struct ecore_chain rx_bd_ring;
184 	struct ecore_chain rx_comp_ring;
185 	uint16_t *hw_cons_ptr;
186 	void OSAL_IOMEM *hw_rxq_prod_addr;
187 	struct qede_rx_entry *sw_rx_ring;
188 	struct ecore_sb_info *sb_info;
189 	uint16_t sw_rx_cons;
190 	uint16_t sw_rx_prod;
191 	uint16_t nb_rx_desc;
192 	uint16_t queue_id;
193 	uint16_t port_id;
194 	uint16_t rx_buf_size;
195 	uint16_t rx_alloc_count;
196 	uint16_t unused;
197 	uint64_t rcv_pkts;
198 	uint64_t rx_segs;
199 	uint64_t rx_hw_errors;
200 	uint64_t rx_alloc_errors;
201 	struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
202 	void *handle;
203 };
204 
205 /*
206  * TX BD descriptor ring
207  */
208 struct qede_tx_entry {
209 	struct rte_mbuf *mbuf;
210 	uint8_t flags;
211 };
212 
213 union db_prod {
214 	struct eth_db_data data;
215 	uint32_t raw;
216 };
217 
218 struct qede_tx_queue {
219 	/* Always keep qdev as first member */
220 	struct qede_dev *qdev;
221 	struct ecore_chain tx_pbl;
222 	struct qede_tx_entry *sw_tx_ring;
223 	uint16_t nb_tx_desc;
224 	uint16_t nb_tx_avail;
225 	uint16_t tx_free_thresh;
226 	uint16_t queue_id;
227 	uint16_t *hw_cons_ptr;
228 	uint16_t sw_tx_cons;
229 	uint16_t sw_tx_prod;
230 	void OSAL_IOMEM *doorbell_addr;
231 	volatile union db_prod tx_db;
232 	uint16_t port_id;
233 	uint64_t xmit_pkts;
234 	bool is_legacy;
235 	void *handle;
236 };
237 
238 struct qede_fastpath {
239 	struct ecore_sb_info *sb_info;
240 	struct qede_rx_queue *rxq;
241 	struct qede_tx_queue *txq;
242 };
243 
244 /* This structure holds the inforation of fast path queues
245  * belonging to individual engines in CMT mode.
246  */
247 struct qede_fastpath_cmt {
248 	/* Always keep this a first element */
249 	struct qede_dev *qdev;
250 	/* fastpath info of engine 0 */
251 	struct qede_fastpath *fp0;
252 	/* fastpath info of engine 1 */
253 	struct qede_fastpath *fp1;
254 };
255 
256 /*
257  * RX/TX function prototypes
258  */
259 int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
260 			uint16_t nb_desc, unsigned int socket_id,
261 			const struct rte_eth_rxconf *rx_conf,
262 			struct rte_mempool *mp);
263 
264 int qede_tx_queue_setup(struct rte_eth_dev *dev,
265 			uint16_t queue_idx,
266 			uint16_t nb_desc,
267 			unsigned int socket_id,
268 			const struct rte_eth_txconf *tx_conf);
269 
270 void qede_rx_queue_release(void *rx_queue);
271 
272 void qede_tx_queue_release(void *tx_queue);
273 
274 uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
275 			uint16_t nb_pkts);
276 uint16_t qede_xmit_pkts_cmt(void *p_txq, struct rte_mbuf **tx_pkts,
277 			    uint16_t nb_pkts);
278 uint16_t qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts,
279 				uint16_t nb_pkts);
280 
281 uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
282 			     uint16_t nb_pkts);
283 
284 uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
285 			uint16_t nb_pkts);
286 uint16_t qede_recv_pkts_cmt(void *p_rxq, struct rte_mbuf **rx_pkts,
287 			    uint16_t nb_pkts);
288 uint16_t
289 qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts,
290 		       uint16_t nb_pkts);
291 uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
292 			      struct rte_mbuf **pkts,
293 			      uint16_t nb_pkts);
294 
295 int qede_start_queues(struct rte_eth_dev *eth_dev);
296 
297 void qede_stop_queues(struct rte_eth_dev *eth_dev);
298 int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
299 			  uint16_t max_frame_size);
300 int
301 qede_rx_descriptor_status(void *rxq, uint16_t offset);
302 
303 /* Fastpath resource alloc/dealloc helpers */
304 int qede_alloc_fp_resc(struct qede_dev *qdev);
305 
306 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev);
307 
308 #endif /* _QEDE_RXTX_H_ */
309