1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2017-2019 Solarflare Communications Inc.
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10 #ifndef _SFC_DP_RX_H
11 #define _SFC_DP_RX_H
12
13 #include <rte_mempool.h>
14 #include <rte_ethdev_driver.h>
15
16 #include "sfc_dp.h"
17
18 #ifdef __cplusplus
19 extern "C" {
20 #endif
21
22 /**
23 * Generic receive queue information used on data path.
24 * It must be kept as small as it is possible since it is built into
25 * the structure used on datapath.
26 */
27 struct sfc_dp_rxq {
28 struct sfc_dp_queue dpq;
29 };
30
31 /** Datapath receive queue descriptor number limitations */
32 struct sfc_dp_rx_hw_limits {
33 unsigned int rxq_max_entries;
34 unsigned int rxq_min_entries;
35 unsigned int evq_max_entries;
36 unsigned int evq_min_entries;
37 };
38
39 /**
40 * Datapath receive queue creation information.
41 *
42 * The structure is used just to pass information from control path to
43 * datapath. It could be just function arguments, but it would be hardly
44 * readable.
45 */
46 struct sfc_dp_rx_qcreate_info {
47 /** Memory pool to allocate Rx buffer from */
48 struct rte_mempool *refill_mb_pool;
49 /** Maximum number of pushed Rx descriptors in the queue */
50 unsigned int max_fill_level;
51 /** Minimum number of unused Rx descriptors to do refill */
52 unsigned int refill_threshold;
53 /**
54 * Usable mbuf data space in accordance with alignment and
55 * padding requirements imposed by HW.
56 */
57 unsigned int buf_size;
58
59 /**
60 * Maximum number of Rx descriptors completed in one Rx event.
61 * Just for sanity checks if datapath would like to do.
62 */
63 unsigned int batch_max;
64
65 /** Pseudo-header size */
66 unsigned int prefix_size;
67
68 /** Receive queue flags initializer */
69 unsigned int flags;
70 #define SFC_RXQ_FLAG_RSS_HASH 0x1
71
72 /** Rx queue size */
73 unsigned int rxq_entries;
74 /** DMA-mapped Rx descriptors ring */
75 void *rxq_hw_ring;
76
77 /** Event queue index in hardware */
78 unsigned int evq_hw_index;
79 /** Associated event queue size */
80 unsigned int evq_entries;
81 /** Hardware event ring */
82 void *evq_hw_ring;
83
84 /** The queue index in hardware (required to push right doorbell) */
85 unsigned int hw_index;
86 /**
87 * Virtual address of the memory-mapped BAR to push Rx refill
88 * doorbell
89 */
90 volatile void *mem_bar;
91 /** Function control window offset */
92 efsys_dma_addr_t fcw_offset;
93 /** VI window size shift */
94 unsigned int vi_window_shift;
95 };
96
97 /**
98 * Get Rx datapath specific device info.
99 *
100 * @param dev_info Device info to be adjusted
101 */
102 typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
103
104 /**
105 * Test if an Rx datapath supports specific mempool ops.
106 *
107 * @param pool The name of the pool operations to test.
108 *
109 * @return Check status.
110 * @retval 0 Best mempool ops choice.
111 * @retval 1 Mempool ops are supported.
112 * @retval -ENOTSUP Mempool ops not supported.
113 */
114 typedef int (sfc_dp_rx_pool_ops_supported_t)(const char *pool);
115
116 /**
117 * Get size of receive and event queue rings by the number of Rx
118 * descriptors and mempool configuration.
119 *
120 * @param nb_rx_desc Number of Rx descriptors
121 * @param mb_pool mbuf pool with Rx buffers
122 * @param rxq_entries Location for number of Rx ring entries
123 * @param evq_entries Location for number of event ring entries
124 * @param rxq_max_fill_level Location for maximum Rx ring fill level
125 *
126 * @return 0 or positive errno.
127 */
128 typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc,
129 struct sfc_dp_rx_hw_limits *limits,
130 struct rte_mempool *mb_pool,
131 unsigned int *rxq_entries,
132 unsigned int *evq_entries,
133 unsigned int *rxq_max_fill_level);
134
135 /**
136 * Allocate and initialize datapath receive queue.
137 *
138 * @param port_id The port identifier
139 * @param queue_id The queue identifier
140 * @param pci_addr PCI function address
141 * @param socket_id Socket identifier to allocate memory
142 * @param info Receive queue information
143 * @param dp_rxqp Location for generic datapath receive queue pointer
144 *
145 * @return 0 or positive errno.
146 */
147 typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
148 const struct rte_pci_addr *pci_addr,
149 int socket_id,
150 const struct sfc_dp_rx_qcreate_info *info,
151 struct sfc_dp_rxq **dp_rxqp);
152
153 /**
154 * Free resources allocated for datapath recevie queue.
155 */
156 typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq);
157
158 /**
159 * Receive queue start callback.
160 *
161 * It handovers EvQ to the datapath.
162 */
163 typedef int (sfc_dp_rx_qstart_t)(struct sfc_dp_rxq *dp_rxq,
164 unsigned int evq_read_ptr,
165 const efx_rx_prefix_layout_t *pinfo);
166
167 /**
168 * Receive queue stop function called before flush.
169 */
170 typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq,
171 unsigned int *evq_read_ptr);
172
173 /**
174 * Receive event handler used during queue flush only.
175 */
176 typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
177
178 /**
179 * Packed stream receive event handler used during queue flush only.
180 */
181 typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq,
182 unsigned int id);
183
184 /**
185 * Receive queue purge function called after queue flush.
186 *
187 * Should be used to free unused recevie buffers.
188 */
189 typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
190
191 /** Get packet types recognized/classified */
192 typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(
193 uint32_t tunnel_encaps);
194
195 /** Get number of pending Rx descriptors */
196 typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
197
198 /** Check Rx descriptor status */
199 typedef int (sfc_dp_rx_qdesc_status_t)(struct sfc_dp_rxq *dp_rxq,
200 uint16_t offset);
201 /** Enable Rx interrupts */
202 typedef int (sfc_dp_rx_intr_enable_t)(struct sfc_dp_rxq *dp_rxq);
203
204 /** Disable Rx interrupts */
205 typedef int (sfc_dp_rx_intr_disable_t)(struct sfc_dp_rxq *dp_rxq);
206
207 /** Receive datapath definition */
208 struct sfc_dp_rx {
209 struct sfc_dp dp;
210
211 unsigned int features;
212 #define SFC_DP_RX_FEAT_MULTI_PROCESS 0x1
213 #define SFC_DP_RX_FEAT_FLOW_FLAG 0x2
214 #define SFC_DP_RX_FEAT_FLOW_MARK 0x4
215 #define SFC_DP_RX_FEAT_INTR 0x8
216 /**
217 * Rx offload capabilities supported by the datapath on device
218 * level only if HW/FW supports it.
219 */
220 uint64_t dev_offload_capa;
221 /**
222 * Rx offload capabilities supported by the datapath per-queue
223 * if HW/FW supports it.
224 */
225 uint64_t queue_offload_capa;
226 sfc_dp_rx_get_dev_info_t *get_dev_info;
227 sfc_dp_rx_pool_ops_supported_t *pool_ops_supported;
228 sfc_dp_rx_qsize_up_rings_t *qsize_up_rings;
229 sfc_dp_rx_qcreate_t *qcreate;
230 sfc_dp_rx_qdestroy_t *qdestroy;
231 sfc_dp_rx_qstart_t *qstart;
232 sfc_dp_rx_qstop_t *qstop;
233 sfc_dp_rx_qrx_ev_t *qrx_ev;
234 sfc_dp_rx_qrx_ps_ev_t *qrx_ps_ev;
235 sfc_dp_rx_qpurge_t *qpurge;
236 sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
237 sfc_dp_rx_qdesc_npending_t *qdesc_npending;
238 sfc_dp_rx_qdesc_status_t *qdesc_status;
239 sfc_dp_rx_intr_enable_t *intr_enable;
240 sfc_dp_rx_intr_disable_t *intr_disable;
241 eth_rx_burst_t pkt_burst;
242 };
243
244 static inline struct sfc_dp_rx *
sfc_dp_find_rx_by_name(struct sfc_dp_list * head,const char * name)245 sfc_dp_find_rx_by_name(struct sfc_dp_list *head, const char *name)
246 {
247 struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_RX, name);
248
249 return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
250 }
251
252 static inline struct sfc_dp_rx *
sfc_dp_find_rx_by_caps(struct sfc_dp_list * head,unsigned int avail_caps)253 sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
254 {
255 struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_RX, avail_caps);
256
257 return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
258 }
259
260 static inline uint64_t
sfc_dp_rx_offload_capa(const struct sfc_dp_rx * dp_rx)261 sfc_dp_rx_offload_capa(const struct sfc_dp_rx *dp_rx)
262 {
263 return dp_rx->dev_offload_capa | dp_rx->queue_offload_capa;
264 }
265
266 /** Get Rx datapath ops by the datapath RxQ handle */
267 const struct sfc_dp_rx *sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);
268
269 extern struct sfc_dp_rx sfc_efx_rx;
270 extern struct sfc_dp_rx sfc_ef10_rx;
271 extern struct sfc_dp_rx sfc_ef10_essb_rx;
272 extern struct sfc_dp_rx sfc_ef100_rx;
273
274 #ifdef __cplusplus
275 }
276 #endif
277 #endif /* _SFC_DP_RX_H */
278