1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2014-2018 Netronome Systems, Inc. 3 * All rights reserved. 4 */ 5 6 /* 7 * vim:shiftwidth=8:noexpandtab 8 * 9 * @file dpdk/pmd/nfp_net_pmd.h 10 * 11 * Netronome NFP_NET PMD 12 */ 13 14 #ifndef _NFP_COMMON_H_ 15 #define _NFP_COMMON_H_ 16 17 #define NFP_NET_PMD_VERSION "0.1" 18 #define PCI_VENDOR_ID_NETRONOME 0x19ee 19 #define PCI_DEVICE_ID_NFP4000_PF_NIC 0x4000 20 #define PCI_DEVICE_ID_NFP6000_PF_NIC 0x6000 21 #define PCI_DEVICE_ID_NFP6000_VF_NIC 0x6003 22 23 /* Forward declaration */ 24 struct nfp_net_adapter; 25 26 #define NFP_TX_MAX_SEG UINT8_MAX 27 #define NFP_TX_MAX_MTU_SEG 8 28 29 /* Bar allocation */ 30 #define NFP_NET_CRTL_BAR 0 31 #define NFP_NET_TX_BAR 2 32 #define NFP_NET_RX_BAR 2 33 #define NFP_QCP_QUEUE_AREA_SZ 0x80000 34 35 /* Macros for accessing the Queue Controller Peripheral 'CSRs' */ 36 #define NFP_QCP_QUEUE_OFF(_x) ((_x) * 0x800) 37 #define NFP_QCP_QUEUE_ADD_RPTR 0x0000 38 #define NFP_QCP_QUEUE_ADD_WPTR 0x0004 39 #define NFP_QCP_QUEUE_STS_LO 0x0008 40 #define NFP_QCP_QUEUE_STS_LO_READPTR_mask (0x3ffff) 41 #define NFP_QCP_QUEUE_STS_HI 0x000c 42 #define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask (0x3ffff) 43 44 /* The offset of the queue controller queues in the PCIe Target */ 45 #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff))) 46 47 /* Maximum value which can be added to a queue with one transaction */ 48 #define NFP_QCP_MAX_ADD 0x7f 49 50 /* Interrupt definitions */ 51 #define NFP_NET_IRQ_LSC_IDX 0 52 53 /* Default values for RX/TX configuration */ 54 #define DEFAULT_RX_FREE_THRESH 32 55 #define DEFAULT_RX_PTHRESH 8 56 #define DEFAULT_RX_HTHRESH 8 57 #define DEFAULT_RX_WTHRESH 0 58 59 #define DEFAULT_TX_RS_THRESH 32 60 #define DEFAULT_TX_FREE_THRESH 32 61 #define DEFAULT_TX_PTHRESH 32 62 #define DEFAULT_TX_HTHRESH 0 63 #define DEFAULT_TX_WTHRESH 0 64 #define DEFAULT_TX_RSBIT_THRESH 32 65 66 /* Alignment for dma zones */ 67 #define NFP_MEMZONE_ALIGN 128 68 69 /* 70 * This is used by the reconfig protocol. It sets the maximum time waiting in 71 * milliseconds before a reconfig timeout happens. 72 */ 73 #define NFP_NET_POLL_TIMEOUT 5000 74 75 #define NFP_QCP_QUEUE_ADDR_SZ (0x800) 76 77 #define NFP_NET_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 78 #define NFP_NET_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 79 80 /* Version number helper defines */ 81 #define NFD_CFG_CLASS_VER_msk 0xff 82 #define NFD_CFG_CLASS_VER_shf 24 83 #define NFD_CFG_CLASS_VER(x) (((x) & 0xff) << 24) 84 #define NFD_CFG_CLASS_VER_of(x) (((x) >> 24) & 0xff) 85 #define NFD_CFG_CLASS_TYPE_msk 0xff 86 #define NFD_CFG_CLASS_TYPE_shf 16 87 #define NFD_CFG_CLASS_TYPE(x) (((x) & 0xff) << 16) 88 #define NFD_CFG_CLASS_TYPE_of(x) (((x) >> 16) & 0xff) 89 #define NFD_CFG_MAJOR_VERSION_msk 0xff 90 #define NFD_CFG_MAJOR_VERSION_shf 8 91 #define NFD_CFG_MAJOR_VERSION(x) (((x) & 0xff) << 8) 92 #define NFD_CFG_MAJOR_VERSION_of(x) (((x) >> 8) & 0xff) 93 #define NFD_CFG_MINOR_VERSION_msk 0xff 94 #define NFD_CFG_MINOR_VERSION_shf 0 95 #define NFD_CFG_MINOR_VERSION(x) (((x) & 0xff) << 0) 96 #define NFD_CFG_MINOR_VERSION_of(x) (((x) >> 0) & 0xff) 97 98 /* Number of supported physical ports */ 99 #define NFP_MAX_PHYPORTS 12 100 101 #include <linux/types.h> 102 #include <rte_io.h> 103 104 /* nfp_qcp_ptr - Read or Write Pointer of a queue */ 105 enum nfp_qcp_ptr { 106 NFP_QCP_READ_PTR = 0, 107 NFP_QCP_WRITE_PTR 108 }; 109 110 struct nfp_pf_dev { 111 /* Backpointer to associated pci device */ 112 struct rte_pci_device *pci_dev; 113 114 /* Array of physical ports belonging to this PF */ 115 struct nfp_net_hw *ports[NFP_MAX_PHYPORTS]; 116 117 /* Current values for control */ 118 uint32_t ctrl; 119 120 uint8_t *ctrl_bar; 121 uint8_t *tx_bar; 122 uint8_t *rx_bar; 123 124 uint8_t *qcp_cfg; 125 rte_spinlock_t reconfig_lock; 126 127 uint16_t flbufsz; 128 uint16_t device_id; 129 uint16_t vendor_id; 130 uint16_t subsystem_device_id; 131 uint16_t subsystem_vendor_id; 132 #if defined(DSTQ_SELECTION) 133 #if DSTQ_SELECTION 134 uint16_t device_function; 135 #endif 136 #endif 137 138 struct nfp_cpp *cpp; 139 struct nfp_cpp_area *ctrl_area; 140 struct nfp_cpp_area *hwqueues_area; 141 struct nfp_cpp_area *msix_area; 142 143 uint8_t *hw_queues; 144 uint8_t total_phyports; 145 bool multiport; 146 147 union eth_table_entry *eth_table; 148 149 struct nfp_hwinfo *hwinfo; 150 struct nfp_rtsym_table *sym_tbl; 151 uint32_t nfp_cpp_service_id; 152 }; 153 154 struct nfp_net_hw { 155 /* Backpointer to the PF this port belongs to */ 156 struct nfp_pf_dev *pf_dev; 157 158 /* Backpointer to the eth_dev of this port*/ 159 struct rte_eth_dev *eth_dev; 160 161 /* Info from the firmware */ 162 uint32_t ver; 163 uint32_t cap; 164 uint32_t max_mtu; 165 uint32_t mtu; 166 uint32_t rx_offset; 167 168 /* Current values for control */ 169 uint32_t ctrl; 170 171 uint8_t *ctrl_bar; 172 uint8_t *tx_bar; 173 uint8_t *rx_bar; 174 175 int stride_rx; 176 int stride_tx; 177 178 uint8_t *qcp_cfg; 179 rte_spinlock_t reconfig_lock; 180 181 uint32_t max_tx_queues; 182 uint32_t max_rx_queues; 183 uint16_t flbufsz; 184 uint16_t device_id; 185 uint16_t vendor_id; 186 uint16_t subsystem_device_id; 187 uint16_t subsystem_vendor_id; 188 #if defined(DSTQ_SELECTION) 189 #if DSTQ_SELECTION 190 uint16_t device_function; 191 #endif 192 #endif 193 194 uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; 195 196 /* Records starting point for counters */ 197 struct rte_eth_stats eth_stats_base; 198 199 struct nfp_cpp *cpp; 200 struct nfp_cpp_area *ctrl_area; 201 struct nfp_cpp_area *hwqueues_area; 202 struct nfp_cpp_area *msix_area; 203 204 uint8_t *hw_queues; 205 /* Sequential physical port number */ 206 uint8_t idx; 207 /* Internal port number as seen from NFP */ 208 uint8_t nfp_idx; 209 bool is_phyport; 210 211 union eth_table_entry *eth_table; 212 213 uint32_t nfp_cpp_service_id; 214 }; 215 216 struct nfp_net_adapter { 217 struct nfp_net_hw hw; 218 }; 219 220 static inline uint8_t nn_readb(volatile const void *addr) 221 { 222 return rte_read8(addr); 223 } 224 225 static inline void nn_writeb(uint8_t val, volatile void *addr) 226 { 227 rte_write8(val, addr); 228 } 229 230 static inline uint32_t nn_readl(volatile const void *addr) 231 { 232 return rte_read32(addr); 233 } 234 235 static inline void nn_writel(uint32_t val, volatile void *addr) 236 { 237 rte_write32(val, addr); 238 } 239 240 static inline void nn_writew(uint16_t val, volatile void *addr) 241 { 242 rte_write16(val, addr); 243 } 244 245 static inline uint64_t nn_readq(volatile void *addr) 246 { 247 const volatile uint32_t *p = addr; 248 uint32_t low, high; 249 250 high = nn_readl((volatile const void *)(p + 1)); 251 low = nn_readl((volatile const void *)p); 252 253 return low + ((uint64_t)high << 32); 254 } 255 256 static inline void nn_writeq(uint64_t val, volatile void *addr) 257 { 258 nn_writel(val >> 32, (volatile char *)addr + 4); 259 nn_writel(val, addr); 260 } 261 262 /* 263 * Functions to read/write from/to Config BAR 264 * Performs any endian conversion necessary. 265 */ 266 static inline uint8_t 267 nn_cfg_readb(struct nfp_net_hw *hw, int off) 268 { 269 return nn_readb(hw->ctrl_bar + off); 270 } 271 272 static inline void 273 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val) 274 { 275 nn_writeb(val, hw->ctrl_bar + off); 276 } 277 278 static inline uint32_t 279 nn_cfg_readl(struct nfp_net_hw *hw, int off) 280 { 281 return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off)); 282 } 283 284 static inline void 285 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val) 286 { 287 nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off); 288 } 289 290 static inline uint64_t 291 nn_cfg_readq(struct nfp_net_hw *hw, int off) 292 { 293 return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off)); 294 } 295 296 static inline void 297 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val) 298 { 299 nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off); 300 } 301 302 /* 303 * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue 304 * @q: Base address for queue structure 305 * @ptr: Add to the Read or Write pointer 306 * @val: Value to add to the queue pointer 307 * 308 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed. 309 */ 310 static inline void 311 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val) 312 { 313 uint32_t off; 314 315 if (ptr == NFP_QCP_READ_PTR) 316 off = NFP_QCP_QUEUE_ADD_RPTR; 317 else 318 off = NFP_QCP_QUEUE_ADD_WPTR; 319 320 while (val > NFP_QCP_MAX_ADD) { 321 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off); 322 val -= NFP_QCP_MAX_ADD; 323 } 324 325 nn_writel(rte_cpu_to_le_32(val), q + off); 326 } 327 328 /* 329 * nfp_qcp_read - Read the current Read/Write pointer value for a queue 330 * @q: Base address for queue structure 331 * @ptr: Read or Write pointer 332 */ 333 static inline uint32_t 334 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr) 335 { 336 uint32_t off; 337 uint32_t val; 338 339 if (ptr == NFP_QCP_READ_PTR) 340 off = NFP_QCP_QUEUE_STS_LO; 341 else 342 off = NFP_QCP_QUEUE_STS_HI; 343 344 val = rte_cpu_to_le_32(nn_readl(q + off)); 345 346 if (ptr == NFP_QCP_READ_PTR) 347 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask; 348 else 349 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask; 350 } 351 352 /* Prototypes for common NFP functions */ 353 int nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update); 354 int nfp_net_configure(struct rte_eth_dev *dev); 355 void nfp_net_enable_queues(struct rte_eth_dev *dev); 356 void nfp_net_disable_queues(struct rte_eth_dev *dev); 357 void nfp_net_params_setup(struct nfp_net_hw *hw); 358 void nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src); 359 void nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac); 360 int nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr); 361 int nfp_configure_rx_interrupt(struct rte_eth_dev *dev, 362 struct rte_intr_handle *intr_handle); 363 uint32_t nfp_check_offloads(struct rte_eth_dev *dev); 364 int nfp_net_promisc_enable(struct rte_eth_dev *dev); 365 int nfp_net_promisc_disable(struct rte_eth_dev *dev); 366 int nfp_net_link_update(struct rte_eth_dev *dev, 367 __rte_unused int wait_to_complete); 368 int nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 369 int nfp_net_stats_reset(struct rte_eth_dev *dev); 370 int nfp_net_infos_get(struct rte_eth_dev *dev, 371 struct rte_eth_dev_info *dev_info); 372 const uint32_t *nfp_net_supported_ptypes_get(struct rte_eth_dev *dev); 373 int nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); 374 int nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); 375 void nfp_net_params_setup(struct nfp_net_hw *hw); 376 void nfp_net_cfg_queue_setup(struct nfp_net_hw *hw); 377 void nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src); 378 void nfp_net_dev_interrupt_handler(void *param); 379 void nfp_net_dev_interrupt_delayed_handler(void *param); 380 int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 381 int nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask); 382 int nfp_net_reta_update(struct rte_eth_dev *dev, 383 struct rte_eth_rss_reta_entry64 *reta_conf, 384 uint16_t reta_size); 385 int nfp_net_reta_query(struct rte_eth_dev *dev, 386 struct rte_eth_rss_reta_entry64 *reta_conf, 387 uint16_t reta_size); 388 int nfp_net_rss_hash_update(struct rte_eth_dev *dev, 389 struct rte_eth_rss_conf *rss_conf); 390 int nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, 391 struct rte_eth_rss_conf *rss_conf); 392 int nfp_net_rss_config_default(struct rte_eth_dev *dev); 393 394 #define NFP_NET_DEV_PRIVATE_TO_HW(adapter)\ 395 (&((struct nfp_net_adapter *)adapter)->hw) 396 397 #define NFP_NET_DEV_PRIVATE_TO_PF(dev_priv)\ 398 (((struct nfp_net_hw *)dev_priv)->pf_dev) 399 400 #endif /* _NFP_COMMON_H_ */ 401 /* 402 * Local variables: 403 * c-file-style: "Linux" 404 * indent-tabs-mode: t 405 * End: 406 */ 407