1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2021 Xilinx, Inc. 4 * Copyright(c) 2017-2019 Solarflare Communications Inc. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #ifndef _SFC_DP_RX_H 11 #define _SFC_DP_RX_H 12 13 #include <rte_mempool.h> 14 #include <ethdev_driver.h> 15 16 #include "sfc_dp.h" 17 #include "sfc_nic_dma_dp.h" 18 19 #ifdef __cplusplus 20 extern "C" { 21 #endif 22 23 /** 24 * Generic receive queue information used on data path. 25 * It must be kept as small as it is possible since it is built into 26 * the structure used on datapath. 27 */ 28 struct sfc_dp_rxq { 29 struct sfc_dp_queue dpq; 30 }; 31 32 /** Datapath receive queue descriptor number limitations */ 33 struct sfc_dp_rx_hw_limits { 34 unsigned int rxq_max_entries; 35 unsigned int rxq_min_entries; 36 unsigned int evq_max_entries; 37 unsigned int evq_min_entries; 38 }; 39 40 /** 41 * Datapath receive queue creation information. 42 * 43 * The structure is used just to pass information from control path to 44 * datapath. It could be just function arguments, but it would be hardly 45 * readable. 46 */ 47 struct sfc_dp_rx_qcreate_info { 48 /** Memory pool to allocate Rx buffer from */ 49 struct rte_mempool *refill_mb_pool; 50 /** Maximum number of pushed Rx descriptors in the queue */ 51 unsigned int max_fill_level; 52 /** Minimum number of unused Rx descriptors to do refill */ 53 unsigned int refill_threshold; 54 /** 55 * Usable mbuf data space in accordance with alignment and 56 * padding requirements imposed by HW. 57 */ 58 unsigned int buf_size; 59 60 /** 61 * Maximum number of Rx descriptors completed in one Rx event. 62 * Just for sanity checks if datapath would like to do. 63 */ 64 unsigned int batch_max; 65 66 /** Pseudo-header size */ 67 unsigned int prefix_size; 68 69 /** Receive queue flags initializer */ 70 unsigned int flags; 71 #define SFC_RXQ_FLAG_RSS_HASH 0x1 72 73 /** Rx queue size */ 74 unsigned int rxq_entries; 75 /** DMA-mapped Rx descriptors ring */ 76 void *rxq_hw_ring; 77 78 /** Event queue index in hardware */ 79 unsigned int evq_hw_index; 80 /** Associated event queue size */ 81 unsigned int evq_entries; 82 /** Hardware event ring */ 83 void *evq_hw_ring; 84 85 /** The queue index in hardware (required to push right doorbell) */ 86 unsigned int hw_index; 87 /** 88 * Virtual address of the memory-mapped BAR to push Rx refill 89 * doorbell 90 */ 91 volatile void *mem_bar; 92 /** Function control window offset */ 93 efsys_dma_addr_t fcw_offset; 94 /** VI window size shift */ 95 unsigned int vi_window_shift; 96 97 /** Mask to extract user bits from Rx prefix mark field */ 98 uint32_t user_mark_mask; 99 100 /** NIC's DMA mapping information */ 101 const struct sfc_nic_dma_info *nic_dma_info; 102 }; 103 104 /** 105 * Get Rx datapath specific device info. 106 * 107 * @param dev_info Device info to be adjusted 108 */ 109 typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info); 110 111 /** 112 * Test if an Rx datapath supports specific mempool ops. 113 * 114 * @param pool The name of the pool operations to test. 115 * 116 * @return Check status. 117 * @retval 0 Best mempool ops choice. 118 * @retval 1 Mempool ops are supported. 119 * @retval -ENOTSUP Mempool ops not supported. 120 */ 121 typedef int (sfc_dp_rx_pool_ops_supported_t)(const char *pool); 122 123 /** 124 * Get size of receive and event queue rings by the number of Rx 125 * descriptors and mempool configuration. 126 * 127 * @param nb_rx_desc Number of Rx descriptors 128 * @param mb_pool mbuf pool with Rx buffers 129 * @param rxq_entries Location for number of Rx ring entries 130 * @param evq_entries Location for number of event ring entries 131 * @param rxq_max_fill_level Location for maximum Rx ring fill level 132 * 133 * @return 0 or positive errno. 134 */ 135 typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc, 136 struct sfc_dp_rx_hw_limits *limits, 137 struct rte_mempool *mb_pool, 138 unsigned int *rxq_entries, 139 unsigned int *evq_entries, 140 unsigned int *rxq_max_fill_level); 141 142 /** 143 * Allocate and initialize datapath receive queue. 144 * 145 * @param port_id The port identifier 146 * @param queue_id The queue identifier 147 * @param pci_addr PCI function address 148 * @param socket_id Socket identifier to allocate memory 149 * @param info Receive queue information 150 * @param dp_rxqp Location for generic datapath receive queue pointer 151 * 152 * @return 0 or positive errno. 153 */ 154 typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id, 155 const struct rte_pci_addr *pci_addr, 156 int socket_id, 157 const struct sfc_dp_rx_qcreate_info *info, 158 struct sfc_dp_rxq **dp_rxqp); 159 160 /** 161 * Free resources allocated for datapath receive queue. 162 */ 163 typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq); 164 165 /** 166 * Receive queue start callback. 167 * 168 * It handovers EvQ to the datapath. 169 */ 170 typedef int (sfc_dp_rx_qstart_t)(struct sfc_dp_rxq *dp_rxq, 171 unsigned int evq_read_ptr, 172 const efx_rx_prefix_layout_t *pinfo); 173 174 /** 175 * Receive queue stop function called before flush. 176 */ 177 typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq, 178 unsigned int *evq_read_ptr); 179 180 /** 181 * Receive event handler used during queue flush only. 182 */ 183 typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id); 184 185 /** 186 * Packed stream receive event handler used during queue flush only. 187 */ 188 typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq, 189 unsigned int id); 190 191 /** 192 * Receive queue purge function called after queue flush. 193 * 194 * Should be used to free unused receive buffers. 195 */ 196 typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq); 197 198 /** Get packet types recognized/classified */ 199 typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)( 200 uint32_t tunnel_encaps); 201 202 /** Get number of pending Rx descriptors */ 203 typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq); 204 205 /** Check Rx descriptor status */ 206 typedef int (sfc_dp_rx_qdesc_status_t)(struct sfc_dp_rxq *dp_rxq, 207 uint16_t offset); 208 /** Enable Rx interrupts */ 209 typedef int (sfc_dp_rx_intr_enable_t)(struct sfc_dp_rxq *dp_rxq); 210 211 /** Disable Rx interrupts */ 212 typedef int (sfc_dp_rx_intr_disable_t)(struct sfc_dp_rxq *dp_rxq); 213 214 /** Get number of pushed Rx buffers */ 215 typedef unsigned int (sfc_dp_rx_get_pushed_t)(struct sfc_dp_rxq *dp_rxq); 216 217 /** Receive datapath definition */ 218 struct sfc_dp_rx { 219 struct sfc_dp dp; 220 221 unsigned int features; 222 #define SFC_DP_RX_FEAT_MULTI_PROCESS 0x1 223 #define SFC_DP_RX_FEAT_FLOW_FLAG 0x2 224 #define SFC_DP_RX_FEAT_FLOW_MARK 0x4 225 #define SFC_DP_RX_FEAT_INTR 0x8 226 #define SFC_DP_RX_FEAT_STATS 0x10 227 /** 228 * Rx offload capabilities supported by the datapath on device 229 * level only if HW/FW supports it. 230 */ 231 uint64_t dev_offload_capa; 232 /** 233 * Rx offload capabilities supported by the datapath per-queue 234 * if HW/FW supports it. 235 */ 236 uint64_t queue_offload_capa; 237 sfc_dp_rx_get_dev_info_t *get_dev_info; 238 sfc_dp_rx_pool_ops_supported_t *pool_ops_supported; 239 sfc_dp_rx_qsize_up_rings_t *qsize_up_rings; 240 sfc_dp_rx_qcreate_t *qcreate; 241 sfc_dp_rx_qdestroy_t *qdestroy; 242 sfc_dp_rx_qstart_t *qstart; 243 sfc_dp_rx_qstop_t *qstop; 244 sfc_dp_rx_qrx_ev_t *qrx_ev; 245 sfc_dp_rx_qrx_ps_ev_t *qrx_ps_ev; 246 sfc_dp_rx_qpurge_t *qpurge; 247 sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get; 248 sfc_dp_rx_qdesc_npending_t *qdesc_npending; 249 sfc_dp_rx_qdesc_status_t *qdesc_status; 250 sfc_dp_rx_intr_enable_t *intr_enable; 251 sfc_dp_rx_intr_disable_t *intr_disable; 252 sfc_dp_rx_get_pushed_t *get_pushed; 253 eth_rx_burst_t pkt_burst; 254 }; 255 256 static inline struct sfc_dp_rx * 257 sfc_dp_find_rx_by_name(struct sfc_dp_list *head, const char *name) 258 { 259 struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_RX, name); 260 261 return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp); 262 } 263 264 static inline struct sfc_dp_rx * 265 sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps) 266 { 267 struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_RX, avail_caps); 268 269 return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp); 270 } 271 272 static inline uint64_t 273 sfc_dp_rx_offload_capa(const struct sfc_dp_rx *dp_rx) 274 { 275 return dp_rx->dev_offload_capa | dp_rx->queue_offload_capa; 276 } 277 278 /** Get Rx datapath ops by the datapath RxQ handle */ 279 const struct sfc_dp_rx *sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq); 280 281 extern struct sfc_dp_rx sfc_efx_rx; 282 extern struct sfc_dp_rx sfc_ef10_rx; 283 extern struct sfc_dp_rx sfc_ef10_essb_rx; 284 extern struct sfc_dp_rx sfc_ef100_rx; 285 286 #ifdef __cplusplus 287 } 288 #endif 289 #endif /* _SFC_DP_RX_H */ 290