1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 #include <inttypes.h> 13 #include <rte_string_fns.h> 14 #include <rte_byteorder.h> 15 #include <rte_common.h> 16 #include <rte_cycles.h> 17 18 #include <rte_interrupts.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_pci.h> 22 #include <rte_bus_pci.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_memory.h> 25 #include <rte_kvargs.h> 26 #include <rte_eal.h> 27 #include <rte_alarm.h> 28 #include <rte_ether.h> 29 #include <ethdev_driver.h> 30 #include <ethdev_pci.h> 31 #include <rte_malloc.h> 32 #include <rte_random.h> 33 #include <rte_dev.h> 34 #include <rte_hash_crc.h> 35 #ifdef RTE_LIB_SECURITY 36 #include <rte_security_driver.h> 37 #endif 38 39 #include "ixgbe_logs.h" 40 #include "base/ixgbe_api.h" 41 #include "base/ixgbe_vf.h" 42 #include "base/ixgbe_common.h" 43 #include "ixgbe_ethdev.h" 44 #include "ixgbe_bypass.h" 45 #include "ixgbe_rxtx.h" 46 #include "base/ixgbe_type.h" 47 #include "base/ixgbe_phy.h" 48 #include "base/ixgbe_osdep.h" 49 #include "ixgbe_regs.h" 50 51 /* 52 * High threshold controlling when to start sending XOFF frames. Must be at 53 * least 8 bytes less than receive packet buffer size. This value is in units 54 * of 1024 bytes. 55 */ 56 #define IXGBE_FC_HI 0x80 57 58 /* 59 * Low threshold controlling when to start sending XON frames. This value is 60 * in units of 1024 bytes. 61 */ 62 #define IXGBE_FC_LO 0x40 63 64 /* Timer value included in XOFF frames. */ 65 #define IXGBE_FC_PAUSE 0x680 66 67 /*Default value of Max Rx Queue*/ 68 #define IXGBE_MAX_RX_QUEUE_NUM 128 69 70 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 71 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 72 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ 73 74 #define IXGBE_MMW_SIZE_DEFAULT 0x4 75 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 76 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ 77 78 /* 79 * Default values for RX/TX configuration 80 */ 81 #define IXGBE_DEFAULT_RX_FREE_THRESH 32 82 #define IXGBE_DEFAULT_RX_PTHRESH 8 83 #define IXGBE_DEFAULT_RX_HTHRESH 8 84 #define IXGBE_DEFAULT_RX_WTHRESH 0 85 86 #define IXGBE_DEFAULT_TX_FREE_THRESH 32 87 #define IXGBE_DEFAULT_TX_PTHRESH 32 88 #define IXGBE_DEFAULT_TX_HTHRESH 0 89 #define IXGBE_DEFAULT_TX_WTHRESH 0 90 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 91 92 /* Bit shift and mask */ 93 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) 94 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) 95 #define IXGBE_8_BIT_WIDTH CHAR_BIT 96 #define IXGBE_8_BIT_MASK UINT8_MAX 97 98 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ 99 100 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) 101 102 /* Additional timesync values. */ 103 #define NSEC_PER_SEC 1000000000L 104 #define IXGBE_INCVAL_10GB 0x66666666 105 #define IXGBE_INCVAL_1GB 0x40000000 106 #define IXGBE_INCVAL_100 0x50000000 107 #define IXGBE_INCVAL_SHIFT_10GB 28 108 #define IXGBE_INCVAL_SHIFT_1GB 24 109 #define IXGBE_INCVAL_SHIFT_100 21 110 #define IXGBE_INCVAL_SHIFT_82599 7 111 #define IXGBE_INCPER_SHIFT_82599 24 112 113 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 114 115 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 116 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 117 #define IXGBE_ETAG_ETYPE 0x00005084 118 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff 119 #define IXGBE_ETAG_ETYPE_VALID 0x80000000 120 #define IXGBE_RAH_ADTYPE 0x40000000 121 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff 122 #define IXGBE_VMVIR_TAGA_MASK 0x18000000 123 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 124 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ 125 #define IXGBE_QDE_STRIP_TAG 0x00000004 126 #define IXGBE_VTEICR_MASK 0x07 127 128 #define IXGBE_EXVET_VET_EXT_SHIFT 16 129 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 130 131 #define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" 132 133 static const char * const ixgbevf_valid_arguments[] = { 134 IXGBEVF_DEVARG_PFLINK_FULLCHK, 135 NULL 136 }; 137 138 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); 139 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); 140 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); 141 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); 142 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); 143 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); 144 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); 145 static int ixgbe_dev_configure(struct rte_eth_dev *dev); 146 static int ixgbe_dev_start(struct rte_eth_dev *dev); 147 static int ixgbe_dev_stop(struct rte_eth_dev *dev); 148 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); 149 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); 150 static int ixgbe_dev_close(struct rte_eth_dev *dev); 151 static int ixgbe_dev_reset(struct rte_eth_dev *dev); 152 static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 153 static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 154 static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 155 static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 156 static int ixgbe_dev_link_update(struct rte_eth_dev *dev, 157 int wait_to_complete); 158 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, 159 struct rte_eth_stats *stats); 160 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, 161 struct rte_eth_xstat *xstats, unsigned n); 162 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, 163 struct rte_eth_xstat *xstats, unsigned n); 164 static int 165 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 166 uint64_t *values, unsigned int n); 167 static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev); 168 static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); 169 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 170 struct rte_eth_xstat_name *xstats_names, 171 unsigned int size); 172 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, 173 struct rte_eth_xstat_name *xstats_names, unsigned limit); 174 static int ixgbe_dev_xstats_get_names_by_id( 175 struct rte_eth_dev *dev, 176 const uint64_t *ids, 177 struct rte_eth_xstat_name *xstats_names, 178 unsigned int limit); 179 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 180 uint16_t queue_id, 181 uint8_t stat_idx, 182 uint8_t is_rx); 183 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 184 size_t fw_size); 185 static int ixgbe_dev_info_get(struct rte_eth_dev *dev, 186 struct rte_eth_dev_info *dev_info); 187 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 188 static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, 189 struct rte_eth_dev_info *dev_info); 190 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 191 192 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, 193 uint16_t vlan_id, int on); 194 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 195 enum rte_vlan_type vlan_type, 196 uint16_t tpid_id); 197 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 198 uint16_t queue, bool on); 199 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, 200 int on); 201 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, 202 int mask); 203 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); 204 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 205 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 206 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); 207 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); 208 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); 209 210 static int ixgbe_dev_led_on(struct rte_eth_dev *dev); 211 static int ixgbe_dev_led_off(struct rte_eth_dev *dev); 212 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, 213 struct rte_eth_fc_conf *fc_conf); 214 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, 215 struct rte_eth_fc_conf *fc_conf); 216 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 217 struct rte_eth_pfc_conf *pfc_conf); 218 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 219 struct rte_eth_rss_reta_entry64 *reta_conf, 220 uint16_t reta_size); 221 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 222 struct rte_eth_rss_reta_entry64 *reta_conf, 223 uint16_t reta_size); 224 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); 225 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 226 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 227 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 228 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); 229 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); 230 static void ixgbe_dev_interrupt_handler(void *param); 231 static void ixgbe_dev_interrupt_delayed_handler(void *param); 232 static void *ixgbe_dev_setup_link_thread_handler(void *param); 233 static int ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, 234 uint32_t timeout_ms); 235 236 static int ixgbe_add_rar(struct rte_eth_dev *dev, 237 struct rte_ether_addr *mac_addr, 238 uint32_t index, uint32_t pool); 239 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); 240 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, 241 struct rte_ether_addr *mac_addr); 242 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); 243 static bool is_device_supported(struct rte_eth_dev *dev, 244 struct rte_pci_driver *drv); 245 246 /* For Virtual Function support */ 247 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); 248 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); 249 static int ixgbevf_dev_configure(struct rte_eth_dev *dev); 250 static int ixgbevf_dev_start(struct rte_eth_dev *dev); 251 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, 252 int wait_to_complete); 253 static int ixgbevf_dev_stop(struct rte_eth_dev *dev); 254 static int ixgbevf_dev_close(struct rte_eth_dev *dev); 255 static int ixgbevf_dev_reset(struct rte_eth_dev *dev); 256 static void ixgbevf_intr_disable(struct rte_eth_dev *dev); 257 static void ixgbevf_intr_enable(struct rte_eth_dev *dev); 258 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, 259 struct rte_eth_stats *stats); 260 static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); 261 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 262 uint16_t vlan_id, int on); 263 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, 264 uint16_t queue, int on); 265 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 266 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 267 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 268 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 269 uint16_t queue_id); 270 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 271 uint16_t queue_id); 272 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 273 uint8_t queue, uint8_t msix_vector); 274 static void ixgbevf_configure_msix(struct rte_eth_dev *dev); 275 static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); 276 static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); 277 static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); 278 static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); 279 280 /* For Eth VMDQ APIs support */ 281 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct 282 rte_ether_addr * mac_addr, uint8_t on); 283 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); 284 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 285 uint16_t queue_id); 286 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 287 uint16_t queue_id); 288 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 289 uint8_t queue, uint8_t msix_vector); 290 static void ixgbe_configure_msix(struct rte_eth_dev *dev); 291 292 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, 293 struct rte_ether_addr *mac_addr, 294 uint32_t index, uint32_t pool); 295 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 296 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 297 struct rte_ether_addr *mac_addr); 298 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 299 struct ixgbe_5tuple_filter *filter); 300 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 301 struct ixgbe_5tuple_filter *filter); 302 static int ixgbe_dev_flow_ops_get(struct rte_eth_dev *dev, 303 const struct rte_flow_ops **ops); 304 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 305 306 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 307 struct rte_ether_addr *mc_addr_set, 308 uint32_t nb_mc_addr); 309 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 310 struct rte_eth_dcb_info *dcb_info); 311 312 static int ixgbe_get_reg_length(struct rte_eth_dev *dev); 313 static int ixgbe_get_regs(struct rte_eth_dev *dev, 314 struct rte_dev_reg_info *regs); 315 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); 316 static int ixgbe_get_eeprom(struct rte_eth_dev *dev, 317 struct rte_dev_eeprom_info *eeprom); 318 static int ixgbe_set_eeprom(struct rte_eth_dev *dev, 319 struct rte_dev_eeprom_info *eeprom); 320 321 static int ixgbe_get_module_info(struct rte_eth_dev *dev, 322 struct rte_eth_dev_module_info *modinfo); 323 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 324 struct rte_dev_eeprom_info *info); 325 326 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); 327 static int ixgbevf_get_regs(struct rte_eth_dev *dev, 328 struct rte_dev_reg_info *regs); 329 330 static int ixgbe_timesync_enable(struct rte_eth_dev *dev); 331 static int ixgbe_timesync_disable(struct rte_eth_dev *dev); 332 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 333 struct timespec *timestamp, 334 uint32_t flags); 335 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 336 struct timespec *timestamp); 337 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 338 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, 339 struct timespec *timestamp); 340 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, 341 const struct timespec *timestamp); 342 static void ixgbevf_dev_interrupt_handler(void *param); 343 344 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 345 struct rte_eth_udp_tunnel *udp_tunnel); 346 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 347 struct rte_eth_udp_tunnel *udp_tunnel); 348 static int ixgbe_filter_restore(struct rte_eth_dev *dev); 349 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); 350 static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw); 351 352 /* 353 * Define VF Stats MACRO for Non "cleared on read" register 354 */ 355 #define UPDATE_VF_STAT(reg, last, cur) \ 356 { \ 357 uint32_t latest = IXGBE_READ_REG(hw, reg); \ 358 cur += (latest - last) & UINT_MAX; \ 359 last = latest; \ 360 } 361 362 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ 363 { \ 364 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ 365 u64 new_msb = IXGBE_READ_REG(hw, msb); \ 366 u64 latest = ((new_msb << 32) | new_lsb); \ 367 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ 368 last = latest; \ 369 } 370 371 #define IXGBE_SET_HWSTRIP(h, q) do {\ 372 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 373 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 374 (h)->bitmap[idx] |= 1 << bit;\ 375 } while (0) 376 377 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\ 378 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 379 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 380 (h)->bitmap[idx] &= ~(1 << bit);\ 381 } while (0) 382 383 #define IXGBE_GET_HWSTRIP(h, q, r) do {\ 384 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 385 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 386 (r) = (h)->bitmap[idx] >> bit & 1;\ 387 } while (0) 388 389 /* 390 * The set of PCI devices this driver supports 391 */ 392 static const struct rte_pci_id pci_id_ixgbe_map[] = { 393 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, 394 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, 395 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, 396 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, 397 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, 398 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, 399 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, 400 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, 401 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, 402 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, 403 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, 404 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, 405 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, 406 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, 407 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, 408 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, 409 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, 410 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, 411 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, 412 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, 413 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, 414 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, 415 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, 416 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, 417 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, 418 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, 419 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, 420 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, 421 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, 422 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, 423 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, 424 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, 425 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, 426 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, 427 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, 428 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, 429 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, 430 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, 431 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, 432 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, 433 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, 434 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, 435 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, 436 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, 437 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, 438 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, 439 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, 440 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) }, 441 #ifdef RTE_LIBRTE_IXGBE_BYPASS 442 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, 443 #endif 444 { .vendor_id = 0, /* sentinel */ }, 445 }; 446 447 /* 448 * The set of PCI devices this driver supports (for 82599 VF) 449 */ 450 static const struct rte_pci_id pci_id_ixgbevf_map[] = { 451 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, 452 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, 453 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, 454 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, 455 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, 456 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, 457 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, 458 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, 459 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, 460 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, 461 { .vendor_id = 0, /* sentinel */ }, 462 }; 463 464 static const struct rte_eth_desc_lim rx_desc_lim = { 465 .nb_max = IXGBE_MAX_RING_DESC, 466 .nb_min = IXGBE_MIN_RING_DESC, 467 .nb_align = IXGBE_RXD_ALIGN, 468 }; 469 470 static const struct rte_eth_desc_lim tx_desc_lim = { 471 .nb_max = IXGBE_MAX_RING_DESC, 472 .nb_min = IXGBE_MIN_RING_DESC, 473 .nb_align = IXGBE_TXD_ALIGN, 474 .nb_seg_max = IXGBE_TX_MAX_SEG, 475 .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, 476 }; 477 478 static const struct eth_dev_ops ixgbe_eth_dev_ops = { 479 .dev_configure = ixgbe_dev_configure, 480 .dev_start = ixgbe_dev_start, 481 .dev_stop = ixgbe_dev_stop, 482 .dev_set_link_up = ixgbe_dev_set_link_up, 483 .dev_set_link_down = ixgbe_dev_set_link_down, 484 .dev_close = ixgbe_dev_close, 485 .dev_reset = ixgbe_dev_reset, 486 .promiscuous_enable = ixgbe_dev_promiscuous_enable, 487 .promiscuous_disable = ixgbe_dev_promiscuous_disable, 488 .allmulticast_enable = ixgbe_dev_allmulticast_enable, 489 .allmulticast_disable = ixgbe_dev_allmulticast_disable, 490 .link_update = ixgbe_dev_link_update, 491 .stats_get = ixgbe_dev_stats_get, 492 .xstats_get = ixgbe_dev_xstats_get, 493 .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, 494 .stats_reset = ixgbe_dev_stats_reset, 495 .xstats_reset = ixgbe_dev_xstats_reset, 496 .xstats_get_names = ixgbe_dev_xstats_get_names, 497 .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, 498 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, 499 .fw_version_get = ixgbe_fw_version_get, 500 .dev_infos_get = ixgbe_dev_info_get, 501 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 502 .mtu_set = ixgbe_dev_mtu_set, 503 .vlan_filter_set = ixgbe_vlan_filter_set, 504 .vlan_tpid_set = ixgbe_vlan_tpid_set, 505 .vlan_offload_set = ixgbe_vlan_offload_set, 506 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, 507 .rx_queue_start = ixgbe_dev_rx_queue_start, 508 .rx_queue_stop = ixgbe_dev_rx_queue_stop, 509 .tx_queue_start = ixgbe_dev_tx_queue_start, 510 .tx_queue_stop = ixgbe_dev_tx_queue_stop, 511 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 512 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, 513 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, 514 .rx_queue_release = ixgbe_dev_rx_queue_release, 515 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 516 .tx_queue_release = ixgbe_dev_tx_queue_release, 517 .dev_led_on = ixgbe_dev_led_on, 518 .dev_led_off = ixgbe_dev_led_off, 519 .flow_ctrl_get = ixgbe_flow_ctrl_get, 520 .flow_ctrl_set = ixgbe_flow_ctrl_set, 521 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, 522 .mac_addr_add = ixgbe_add_rar, 523 .mac_addr_remove = ixgbe_remove_rar, 524 .mac_addr_set = ixgbe_set_default_mac_addr, 525 .uc_hash_table_set = ixgbe_uc_hash_table_set, 526 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, 527 .set_queue_rate_limit = ixgbe_set_queue_rate_limit, 528 .reta_update = ixgbe_dev_rss_reta_update, 529 .reta_query = ixgbe_dev_rss_reta_query, 530 .rss_hash_update = ixgbe_dev_rss_hash_update, 531 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 532 .flow_ops_get = ixgbe_dev_flow_ops_get, 533 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 534 .rxq_info_get = ixgbe_rxq_info_get, 535 .txq_info_get = ixgbe_txq_info_get, 536 .timesync_enable = ixgbe_timesync_enable, 537 .timesync_disable = ixgbe_timesync_disable, 538 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, 539 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, 540 .get_reg = ixgbe_get_regs, 541 .get_eeprom_length = ixgbe_get_eeprom_length, 542 .get_eeprom = ixgbe_get_eeprom, 543 .set_eeprom = ixgbe_set_eeprom, 544 .get_module_info = ixgbe_get_module_info, 545 .get_module_eeprom = ixgbe_get_module_eeprom, 546 .get_dcb_info = ixgbe_dev_get_dcb_info, 547 .timesync_adjust_time = ixgbe_timesync_adjust_time, 548 .timesync_read_time = ixgbe_timesync_read_time, 549 .timesync_write_time = ixgbe_timesync_write_time, 550 .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, 551 .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, 552 .tm_ops_get = ixgbe_tm_ops_get, 553 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 554 .get_monitor_addr = ixgbe_get_monitor_addr, 555 }; 556 557 /* 558 * dev_ops for virtual function, bare necessities for basic vf 559 * operation have been implemented 560 */ 561 static const struct eth_dev_ops ixgbevf_eth_dev_ops = { 562 .dev_configure = ixgbevf_dev_configure, 563 .dev_start = ixgbevf_dev_start, 564 .dev_stop = ixgbevf_dev_stop, 565 .link_update = ixgbevf_dev_link_update, 566 .stats_get = ixgbevf_dev_stats_get, 567 .xstats_get = ixgbevf_dev_xstats_get, 568 .stats_reset = ixgbevf_dev_stats_reset, 569 .xstats_reset = ixgbevf_dev_stats_reset, 570 .xstats_get_names = ixgbevf_dev_xstats_get_names, 571 .dev_close = ixgbevf_dev_close, 572 .dev_reset = ixgbevf_dev_reset, 573 .promiscuous_enable = ixgbevf_dev_promiscuous_enable, 574 .promiscuous_disable = ixgbevf_dev_promiscuous_disable, 575 .allmulticast_enable = ixgbevf_dev_allmulticast_enable, 576 .allmulticast_disable = ixgbevf_dev_allmulticast_disable, 577 .dev_infos_get = ixgbevf_dev_info_get, 578 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 579 .mtu_set = ixgbevf_dev_set_mtu, 580 .vlan_filter_set = ixgbevf_vlan_filter_set, 581 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, 582 .vlan_offload_set = ixgbevf_vlan_offload_set, 583 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 584 .rx_queue_release = ixgbe_dev_rx_queue_release, 585 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 586 .tx_queue_release = ixgbe_dev_tx_queue_release, 587 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, 588 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, 589 .mac_addr_add = ixgbevf_add_mac_addr, 590 .mac_addr_remove = ixgbevf_remove_mac_addr, 591 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 592 .rxq_info_get = ixgbe_rxq_info_get, 593 .txq_info_get = ixgbe_txq_info_get, 594 .mac_addr_set = ixgbevf_set_default_mac_addr, 595 .get_reg = ixgbevf_get_regs, 596 .reta_update = ixgbe_dev_rss_reta_update, 597 .reta_query = ixgbe_dev_rss_reta_query, 598 .rss_hash_update = ixgbe_dev_rss_hash_update, 599 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 600 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 601 .get_monitor_addr = ixgbe_get_monitor_addr, 602 }; 603 604 /* store statistics names and its offset in stats structure */ 605 struct rte_ixgbe_xstats_name_off { 606 char name[RTE_ETH_XSTATS_NAME_SIZE]; 607 unsigned offset; 608 }; 609 610 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { 611 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, 612 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, 613 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, 614 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, 615 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, 616 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, 617 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, 618 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, 619 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, 620 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, 621 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, 622 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, 623 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, 624 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, 625 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 626 prc1023)}, 627 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 628 prc1522)}, 629 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, 630 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, 631 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, 632 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, 633 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, 634 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, 635 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, 636 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, 637 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, 638 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, 639 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, 640 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, 641 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, 642 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, 643 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, 644 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, 645 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 646 ptc1023)}, 647 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 648 ptc1522)}, 649 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, 650 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, 651 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, 652 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, 653 654 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, 655 fdirustat_add)}, 656 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, 657 fdirustat_remove)}, 658 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, 659 fdirfstat_fadd)}, 660 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, 661 fdirfstat_fremove)}, 662 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, 663 fdirmatch)}, 664 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, 665 fdirmiss)}, 666 667 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, 668 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, 669 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, 670 fclast)}, 671 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, 672 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, 673 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, 674 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, 675 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, 676 fcoe_noddp)}, 677 {"rx_fcoe_no_direct_data_placement_ext_buff", 678 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, 679 680 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 681 lxontxc)}, 682 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 683 lxonrxc)}, 684 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 685 lxofftxc)}, 686 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 687 lxoffrxc)}, 688 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, 689 }; 690 691 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ 692 sizeof(rte_ixgbe_stats_strings[0])) 693 694 /* MACsec statistics */ 695 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { 696 {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 697 out_pkts_untagged)}, 698 {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, 699 out_pkts_encrypted)}, 700 {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, 701 out_pkts_protected)}, 702 {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, 703 out_octets_encrypted)}, 704 {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, 705 out_octets_protected)}, 706 {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 707 in_pkts_untagged)}, 708 {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, 709 in_pkts_badtag)}, 710 {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, 711 in_pkts_nosci)}, 712 {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, 713 in_pkts_unknownsci)}, 714 {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, 715 in_octets_decrypted)}, 716 {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, 717 in_octets_validated)}, 718 {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, 719 in_pkts_unchecked)}, 720 {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, 721 in_pkts_delayed)}, 722 {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, 723 in_pkts_late)}, 724 {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, 725 in_pkts_ok)}, 726 {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, 727 in_pkts_invalid)}, 728 {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, 729 in_pkts_notvalid)}, 730 {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, 731 in_pkts_unusedsa)}, 732 {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, 733 in_pkts_notusingsa)}, 734 }; 735 736 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ 737 sizeof(rte_ixgbe_macsec_strings[0])) 738 739 /* Per-queue statistics */ 740 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { 741 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, 742 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, 743 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, 744 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, 745 }; 746 747 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ 748 sizeof(rte_ixgbe_rxq_strings[0])) 749 #define IXGBE_NB_RXQ_PRIO_VALUES 8 750 751 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { 752 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, 753 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, 754 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, 755 pxon2offc)}, 756 }; 757 758 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ 759 sizeof(rte_ixgbe_txq_strings[0])) 760 #define IXGBE_NB_TXQ_PRIO_VALUES 8 761 762 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { 763 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, 764 }; 765 766 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ 767 sizeof(rte_ixgbevf_stats_strings[0])) 768 769 /* 770 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. 771 */ 772 static inline int 773 ixgbe_is_sfp(struct ixgbe_hw *hw) 774 { 775 switch (hw->phy.type) { 776 case ixgbe_phy_sfp_avago: 777 case ixgbe_phy_sfp_ftl: 778 case ixgbe_phy_sfp_intel: 779 case ixgbe_phy_sfp_unknown: 780 case ixgbe_phy_sfp_passive_tyco: 781 case ixgbe_phy_sfp_passive_unknown: 782 return 1; 783 default: 784 /* x550em devices may be SFP, check media type */ 785 switch (hw->mac.type) { 786 case ixgbe_mac_X550EM_x: 787 case ixgbe_mac_X550EM_a: 788 switch (ixgbe_get_media_type(hw)) { 789 case ixgbe_media_type_fiber: 790 case ixgbe_media_type_fiber_qsfp: 791 return 1; 792 default: 793 break; 794 } 795 default: 796 break; 797 } 798 return 0; 799 } 800 } 801 802 static inline int32_t 803 ixgbe_pf_reset_hw(struct ixgbe_hw *hw) 804 { 805 uint32_t ctrl_ext; 806 int32_t status; 807 808 status = ixgbe_reset_hw(hw); 809 810 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 811 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 812 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 813 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 814 IXGBE_WRITE_FLUSH(hw); 815 816 if (status == IXGBE_ERR_SFP_NOT_PRESENT) 817 status = IXGBE_SUCCESS; 818 return status; 819 } 820 821 static inline void 822 ixgbe_enable_intr(struct rte_eth_dev *dev) 823 { 824 struct ixgbe_interrupt *intr = 825 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 826 struct ixgbe_hw *hw = 827 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 828 829 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); 830 IXGBE_WRITE_FLUSH(hw); 831 } 832 833 /* 834 * This function is based on ixgbe_disable_intr() in base/ixgbe.h. 835 */ 836 static void 837 ixgbe_disable_intr(struct ixgbe_hw *hw) 838 { 839 PMD_INIT_FUNC_TRACE(); 840 841 if (hw->mac.type == ixgbe_mac_82598EB) { 842 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); 843 } else { 844 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); 845 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); 846 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); 847 } 848 IXGBE_WRITE_FLUSH(hw); 849 } 850 851 /* 852 * This function resets queue statistics mapping registers. 853 * From Niantic datasheet, Initialization of Statistics section: 854 * "...if software requires the queue counters, the RQSMR and TQSM registers 855 * must be re-programmed following a device reset. 856 */ 857 static void 858 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) 859 { 860 uint32_t i; 861 862 for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { 863 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); 864 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); 865 } 866 } 867 868 869 static int 870 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 871 uint16_t queue_id, 872 uint8_t stat_idx, 873 uint8_t is_rx) 874 { 875 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 876 #define NB_QMAP_FIELDS_PER_QSM_REG 4 877 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f 878 879 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 880 struct ixgbe_stat_mapping_registers *stat_mappings = 881 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); 882 uint32_t qsmr_mask = 0; 883 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 884 uint32_t q_map; 885 uint8_t n, offset; 886 887 if ((hw->mac.type != ixgbe_mac_82599EB) && 888 (hw->mac.type != ixgbe_mac_X540) && 889 (hw->mac.type != ixgbe_mac_X550) && 890 (hw->mac.type != ixgbe_mac_X550EM_x) && 891 (hw->mac.type != ixgbe_mac_X550EM_a)) 892 return -ENOSYS; 893 894 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 895 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 896 queue_id, stat_idx); 897 898 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 899 if (n >= IXGBE_NB_STAT_MAPPING_REGS) { 900 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 901 return -EIO; 902 } 903 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 904 905 /* Now clear any previous stat_idx set */ 906 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 907 if (!is_rx) 908 stat_mappings->tqsm[n] &= ~clearing_mask; 909 else 910 stat_mappings->rqsmr[n] &= ~clearing_mask; 911 912 q_map = (uint32_t)stat_idx; 913 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 914 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 915 if (!is_rx) 916 stat_mappings->tqsm[n] |= qsmr_mask; 917 else 918 stat_mappings->rqsmr[n] |= qsmr_mask; 919 920 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 921 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 922 queue_id, stat_idx); 923 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 924 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); 925 926 /* Now write the mapping in the appropriate register */ 927 if (is_rx) { 928 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", 929 stat_mappings->rqsmr[n], n); 930 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); 931 } else { 932 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", 933 stat_mappings->tqsm[n], n); 934 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); 935 } 936 return 0; 937 } 938 939 static void 940 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) 941 { 942 struct ixgbe_stat_mapping_registers *stat_mappings = 943 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); 944 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 945 int i; 946 947 /* write whatever was in stat mapping table to the NIC */ 948 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { 949 /* rx */ 950 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); 951 952 /* tx */ 953 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); 954 } 955 } 956 957 static void 958 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) 959 { 960 uint8_t i; 961 struct ixgbe_dcb_tc_config *tc; 962 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; 963 964 dcb_config->num_tcs.pg_tcs = dcb_max_tc; 965 dcb_config->num_tcs.pfc_tcs = dcb_max_tc; 966 for (i = 0; i < dcb_max_tc; i++) { 967 tc = &dcb_config->tc_config[i]; 968 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; 969 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 970 (uint8_t)(100/dcb_max_tc + (i & 1)); 971 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; 972 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 973 (uint8_t)(100/dcb_max_tc + (i & 1)); 974 tc->pfc = ixgbe_dcb_pfc_disabled; 975 } 976 977 /* Initialize default user to priority mapping, UPx->TC0 */ 978 tc = &dcb_config->tc_config[0]; 979 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 980 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 981 for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { 982 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; 983 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; 984 } 985 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; 986 dcb_config->pfc_mode_enable = false; 987 dcb_config->vt_mode = true; 988 dcb_config->round_robin_enable = false; 989 /* support all DCB capabilities in 82599 */ 990 dcb_config->support.capabilities = 0xFF; 991 992 /*we only support 4 Tcs for X540, X550 */ 993 if (hw->mac.type == ixgbe_mac_X540 || 994 hw->mac.type == ixgbe_mac_X550 || 995 hw->mac.type == ixgbe_mac_X550EM_x || 996 hw->mac.type == ixgbe_mac_X550EM_a) { 997 dcb_config->num_tcs.pg_tcs = 4; 998 dcb_config->num_tcs.pfc_tcs = 4; 999 } 1000 } 1001 1002 /* 1003 * Ensure that all locks are released before first NVM or PHY access 1004 */ 1005 static void 1006 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) 1007 { 1008 uint16_t mask; 1009 1010 /* 1011 * Phy lock should not fail in this early stage. If this is the case, 1012 * it is due to an improper exit of the application. 1013 * So force the release of the faulty lock. Release of common lock 1014 * is done automatically by swfw_sync function. 1015 */ 1016 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; 1017 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1018 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); 1019 } 1020 ixgbe_release_swfw_semaphore(hw, mask); 1021 1022 /* 1023 * These ones are more tricky since they are common to all ports; but 1024 * swfw_sync retries last long enough (1s) to be almost sure that if 1025 * lock can not be taken it is due to an improper lock of the 1026 * semaphore. 1027 */ 1028 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; 1029 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1030 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1031 } 1032 ixgbe_release_swfw_semaphore(hw, mask); 1033 } 1034 1035 /* 1036 * This function is based on code in ixgbe_attach() in base/ixgbe.c. 1037 * It returns 0 on success. 1038 */ 1039 static int 1040 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 1041 { 1042 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1043 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1044 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1045 struct ixgbe_hw *hw = 1046 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1047 struct ixgbe_vfta *shadow_vfta = 1048 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1049 struct ixgbe_hwstrip *hwstrip = 1050 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1051 struct ixgbe_dcb_config *dcb_config = 1052 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); 1053 struct ixgbe_filter_info *filter_info = 1054 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1055 struct ixgbe_bw_conf *bw_conf = 1056 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); 1057 uint32_t ctrl_ext; 1058 uint16_t csum; 1059 int diag, i, ret; 1060 1061 PMD_INIT_FUNC_TRACE(); 1062 1063 ixgbe_dev_macsec_setting_reset(eth_dev); 1064 1065 eth_dev->dev_ops = &ixgbe_eth_dev_ops; 1066 eth_dev->rx_queue_count = ixgbe_dev_rx_queue_count; 1067 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1068 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1069 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1070 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1071 eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; 1072 1073 /* 1074 * For secondary processes, we don't initialise any further as primary 1075 * has already done this work. Only check we don't need a different 1076 * RX and TX function. 1077 */ 1078 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1079 struct ixgbe_tx_queue *txq; 1080 /* TX queue function in primary, set by last queue initialized 1081 * Tx queue may not initialized by primary process 1082 */ 1083 if (eth_dev->data->tx_queues) { 1084 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; 1085 ixgbe_set_tx_function(eth_dev, txq); 1086 } else { 1087 /* Use default TX function if we get here */ 1088 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 1089 "Using default TX function."); 1090 } 1091 1092 ixgbe_set_rx_function(eth_dev); 1093 1094 return 0; 1095 } 1096 1097 rte_atomic32_clear(&ad->link_thread_running); 1098 rte_eth_copy_pci_info(eth_dev, pci_dev); 1099 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1100 1101 /* Vendor and Device ID need to be set before init of shared code */ 1102 hw->device_id = pci_dev->id.device_id; 1103 hw->vendor_id = pci_dev->id.vendor_id; 1104 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1105 hw->allow_unsupported_sfp = 1; 1106 1107 /* Initialize the shared code (base driver) */ 1108 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1109 diag = ixgbe_bypass_init_shared_code(hw); 1110 #else 1111 diag = ixgbe_init_shared_code(hw); 1112 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1113 1114 if (diag != IXGBE_SUCCESS) { 1115 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); 1116 return -EIO; 1117 } 1118 1119 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { 1120 PMD_INIT_LOG(ERR, "\nERROR: " 1121 "Firmware recovery mode detected. Limiting functionality.\n" 1122 "Refer to the Intel(R) Ethernet Adapters and Devices " 1123 "User Guide for details on firmware recovery mode."); 1124 return -EIO; 1125 } 1126 1127 /* pick up the PCI bus settings for reporting later */ 1128 ixgbe_get_bus_info(hw); 1129 1130 /* Unlock any pending hardware semaphore */ 1131 ixgbe_swfw_lock_reset(hw); 1132 1133 #ifdef RTE_LIB_SECURITY 1134 /* Initialize security_ctx only for primary process*/ 1135 if (ixgbe_ipsec_ctx_create(eth_dev)) 1136 return -ENOMEM; 1137 #endif 1138 1139 /* Initialize DCB configuration*/ 1140 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); 1141 ixgbe_dcb_init(hw, dcb_config); 1142 /* Get Hardware Flow Control setting */ 1143 hw->fc.requested_mode = ixgbe_fc_none; 1144 hw->fc.current_mode = ixgbe_fc_none; 1145 hw->fc.pause_time = IXGBE_FC_PAUSE; 1146 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 1147 hw->fc.low_water[i] = IXGBE_FC_LO; 1148 hw->fc.high_water[i] = IXGBE_FC_HI; 1149 } 1150 hw->fc.send_xon = 1; 1151 1152 /* Make sure we have a good EEPROM before we read from it */ 1153 diag = ixgbe_validate_eeprom_checksum(hw, &csum); 1154 if (diag != IXGBE_SUCCESS) { 1155 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); 1156 return -EIO; 1157 } 1158 1159 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1160 diag = ixgbe_bypass_init_hw(hw); 1161 #else 1162 diag = ixgbe_init_hw(hw); 1163 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1164 1165 /* 1166 * Devices with copper phys will fail to initialise if ixgbe_init_hw() 1167 * is called too soon after the kernel driver unbinding/binding occurs. 1168 * The failure occurs in ixgbe_identify_phy_generic() for all devices, 1169 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is 1170 * also called. See ixgbe_identify_phy_82599(). The reason for the 1171 * failure is not known, and only occuts when virtualisation features 1172 * are disabled in the bios. A delay of 100ms was found to be enough by 1173 * trial-and-error, and is doubled to be safe. 1174 */ 1175 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { 1176 rte_delay_ms(200); 1177 diag = ixgbe_init_hw(hw); 1178 } 1179 1180 if (diag == IXGBE_ERR_SFP_NOT_PRESENT) 1181 diag = IXGBE_SUCCESS; 1182 1183 if (diag == IXGBE_ERR_EEPROM_VERSION) { 1184 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 1185 "LOM. Please be aware there may be issues associated " 1186 "with your hardware."); 1187 PMD_INIT_LOG(ERR, "If you are experiencing problems " 1188 "please contact your Intel or hardware representative " 1189 "who provided you with this hardware."); 1190 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) 1191 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 1192 if (diag) { 1193 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); 1194 return -EIO; 1195 } 1196 1197 /* Reset the hw statistics */ 1198 ixgbe_dev_stats_reset(eth_dev); 1199 1200 /* disable interrupt */ 1201 ixgbe_disable_intr(hw); 1202 1203 /* reset mappings for queue statistics hw counters*/ 1204 ixgbe_reset_qstat_mappings(hw); 1205 1206 /* Allocate memory for storing MAC addresses */ 1207 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * 1208 hw->mac.num_rar_entries, 0); 1209 if (eth_dev->data->mac_addrs == NULL) { 1210 PMD_INIT_LOG(ERR, 1211 "Failed to allocate %u bytes needed to store " 1212 "MAC addresses", 1213 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1214 return -ENOMEM; 1215 } 1216 /* Copy the permanent MAC address */ 1217 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 1218 ð_dev->data->mac_addrs[0]); 1219 1220 /* Allocate memory for storing hash filter MAC addresses */ 1221 eth_dev->data->hash_mac_addrs = rte_zmalloc( 1222 "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); 1223 if (eth_dev->data->hash_mac_addrs == NULL) { 1224 PMD_INIT_LOG(ERR, 1225 "Failed to allocate %d bytes needed to store MAC addresses", 1226 RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); 1227 rte_free(eth_dev->data->mac_addrs); 1228 eth_dev->data->mac_addrs = NULL; 1229 return -ENOMEM; 1230 } 1231 1232 /* initialize the vfta */ 1233 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1234 1235 /* initialize the hw strip bitmap*/ 1236 memset(hwstrip, 0, sizeof(*hwstrip)); 1237 1238 /* initialize PF if max_vfs not zero */ 1239 ret = ixgbe_pf_host_init(eth_dev); 1240 if (ret) 1241 goto err_pf_host_init; 1242 1243 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1244 /* let hardware know driver is loaded */ 1245 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 1246 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1247 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 1248 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 1249 IXGBE_WRITE_FLUSH(hw); 1250 1251 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 1252 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 1253 (int) hw->mac.type, (int) hw->phy.type, 1254 (int) hw->phy.sfp_type); 1255 else 1256 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 1257 (int) hw->mac.type, (int) hw->phy.type); 1258 1259 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1260 eth_dev->data->port_id, pci_dev->id.vendor_id, 1261 pci_dev->id.device_id); 1262 1263 rte_intr_callback_register(intr_handle, 1264 ixgbe_dev_interrupt_handler, eth_dev); 1265 1266 /* enable uio/vfio intr/eventfd mapping */ 1267 rte_intr_enable(intr_handle); 1268 1269 /* enable support intr */ 1270 ixgbe_enable_intr(eth_dev); 1271 1272 /* initialize filter info */ 1273 memset(filter_info, 0, 1274 sizeof(struct ixgbe_filter_info)); 1275 1276 /* initialize 5tuple filter list */ 1277 TAILQ_INIT(&filter_info->fivetuple_list); 1278 1279 /* initialize flow director filter list & hash */ 1280 ret = ixgbe_fdir_filter_init(eth_dev); 1281 if (ret) 1282 goto err_fdir_filter_init; 1283 1284 /* initialize l2 tunnel filter list & hash */ 1285 ret = ixgbe_l2_tn_filter_init(eth_dev); 1286 if (ret) 1287 goto err_l2_tn_filter_init; 1288 1289 /* initialize flow filter lists */ 1290 ixgbe_filterlist_init(); 1291 1292 /* initialize bandwidth configuration info */ 1293 memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); 1294 1295 /* initialize Traffic Manager configuration */ 1296 ixgbe_tm_conf_init(eth_dev); 1297 1298 return 0; 1299 1300 err_l2_tn_filter_init: 1301 ixgbe_fdir_filter_uninit(eth_dev); 1302 err_fdir_filter_init: 1303 ixgbe_disable_intr(hw); 1304 rte_intr_disable(intr_handle); 1305 rte_intr_callback_unregister(intr_handle, 1306 ixgbe_dev_interrupt_handler, eth_dev); 1307 ixgbe_pf_host_uninit(eth_dev); 1308 err_pf_host_init: 1309 rte_free(eth_dev->data->mac_addrs); 1310 eth_dev->data->mac_addrs = NULL; 1311 rte_free(eth_dev->data->hash_mac_addrs); 1312 eth_dev->data->hash_mac_addrs = NULL; 1313 return ret; 1314 } 1315 1316 static int 1317 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1318 { 1319 PMD_INIT_FUNC_TRACE(); 1320 1321 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1322 return 0; 1323 1324 ixgbe_dev_close(eth_dev); 1325 1326 return 0; 1327 } 1328 1329 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 1330 { 1331 struct ixgbe_filter_info *filter_info = 1332 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1333 struct ixgbe_5tuple_filter *p_5tuple; 1334 1335 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 1336 TAILQ_REMOVE(&filter_info->fivetuple_list, 1337 p_5tuple, 1338 entries); 1339 rte_free(p_5tuple); 1340 } 1341 memset(filter_info->fivetuple_mask, 0, 1342 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 1343 1344 return 0; 1345 } 1346 1347 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) 1348 { 1349 struct ixgbe_hw_fdir_info *fdir_info = 1350 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1351 struct ixgbe_fdir_filter *fdir_filter; 1352 1353 rte_free(fdir_info->hash_map); 1354 rte_hash_free(fdir_info->hash_handle); 1355 1356 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { 1357 TAILQ_REMOVE(&fdir_info->fdir_list, 1358 fdir_filter, 1359 entries); 1360 rte_free(fdir_filter); 1361 } 1362 1363 return 0; 1364 } 1365 1366 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) 1367 { 1368 struct ixgbe_l2_tn_info *l2_tn_info = 1369 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1370 struct ixgbe_l2_tn_filter *l2_tn_filter; 1371 1372 rte_free(l2_tn_info->hash_map); 1373 rte_hash_free(l2_tn_info->hash_handle); 1374 1375 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 1376 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, 1377 l2_tn_filter, 1378 entries); 1379 rte_free(l2_tn_filter); 1380 } 1381 1382 return 0; 1383 } 1384 1385 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) 1386 { 1387 struct ixgbe_hw_fdir_info *fdir_info = 1388 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1389 char fdir_hash_name[RTE_HASH_NAMESIZE]; 1390 struct rte_hash_parameters fdir_hash_params = { 1391 .name = fdir_hash_name, 1392 .entries = IXGBE_MAX_FDIR_FILTER_NUM, 1393 .key_len = sizeof(union ixgbe_atr_input), 1394 .hash_func = rte_hash_crc, 1395 .hash_func_init_val = 0, 1396 .socket_id = rte_socket_id(), 1397 }; 1398 1399 TAILQ_INIT(&fdir_info->fdir_list); 1400 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, 1401 "fdir_%s", eth_dev->device->name); 1402 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); 1403 if (!fdir_info->hash_handle) { 1404 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); 1405 return -EINVAL; 1406 } 1407 fdir_info->hash_map = rte_zmalloc("ixgbe", 1408 sizeof(struct ixgbe_fdir_filter *) * 1409 IXGBE_MAX_FDIR_FILTER_NUM, 1410 0); 1411 if (!fdir_info->hash_map) { 1412 PMD_INIT_LOG(ERR, 1413 "Failed to allocate memory for fdir hash map!"); 1414 rte_hash_free(fdir_info->hash_handle); 1415 return -ENOMEM; 1416 } 1417 fdir_info->mask_added = FALSE; 1418 1419 return 0; 1420 } 1421 1422 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) 1423 { 1424 struct ixgbe_l2_tn_info *l2_tn_info = 1425 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1426 char l2_tn_hash_name[RTE_HASH_NAMESIZE]; 1427 struct rte_hash_parameters l2_tn_hash_params = { 1428 .name = l2_tn_hash_name, 1429 .entries = IXGBE_MAX_L2_TN_FILTER_NUM, 1430 .key_len = sizeof(struct ixgbe_l2_tn_key), 1431 .hash_func = rte_hash_crc, 1432 .hash_func_init_val = 0, 1433 .socket_id = rte_socket_id(), 1434 }; 1435 1436 TAILQ_INIT(&l2_tn_info->l2_tn_list); 1437 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, 1438 "l2_tn_%s", eth_dev->device->name); 1439 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); 1440 if (!l2_tn_info->hash_handle) { 1441 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); 1442 return -EINVAL; 1443 } 1444 l2_tn_info->hash_map = rte_zmalloc("ixgbe", 1445 sizeof(struct ixgbe_l2_tn_filter *) * 1446 IXGBE_MAX_L2_TN_FILTER_NUM, 1447 0); 1448 if (!l2_tn_info->hash_map) { 1449 PMD_INIT_LOG(ERR, 1450 "Failed to allocate memory for L2 TN hash map!"); 1451 rte_hash_free(l2_tn_info->hash_handle); 1452 return -ENOMEM; 1453 } 1454 l2_tn_info->e_tag_en = FALSE; 1455 l2_tn_info->e_tag_fwd_en = FALSE; 1456 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; 1457 1458 return 0; 1459 } 1460 /* 1461 * Negotiate mailbox API version with the PF. 1462 * After reset API version is always set to the basic one (ixgbe_mbox_api_10). 1463 * Then we try to negotiate starting with the most recent one. 1464 * If all negotiation attempts fail, then we will proceed with 1465 * the default one (ixgbe_mbox_api_10). 1466 */ 1467 static void 1468 ixgbevf_negotiate_api(struct ixgbe_hw *hw) 1469 { 1470 int32_t i; 1471 1472 /* start with highest supported, proceed down */ 1473 static const enum ixgbe_pfvf_api_rev sup_ver[] = { 1474 ixgbe_mbox_api_13, 1475 ixgbe_mbox_api_12, 1476 ixgbe_mbox_api_11, 1477 ixgbe_mbox_api_10, 1478 }; 1479 1480 for (i = 0; 1481 i != RTE_DIM(sup_ver) && 1482 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; 1483 i++) 1484 ; 1485 } 1486 1487 static void 1488 generate_random_mac_addr(struct rte_ether_addr *mac_addr) 1489 { 1490 uint64_t random; 1491 1492 /* Set Organizationally Unique Identifier (OUI) prefix. */ 1493 mac_addr->addr_bytes[0] = 0x00; 1494 mac_addr->addr_bytes[1] = 0x09; 1495 mac_addr->addr_bytes[2] = 0xC0; 1496 /* Force indication of locally assigned MAC address. */ 1497 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 1498 /* Generate the last 3 bytes of the MAC address with a random number. */ 1499 random = rte_rand(); 1500 memcpy(&mac_addr->addr_bytes[3], &random, 3); 1501 } 1502 1503 static int 1504 devarg_handle_int(__rte_unused const char *key, const char *value, 1505 void *extra_args) 1506 { 1507 uint16_t *n = extra_args; 1508 1509 if (value == NULL || extra_args == NULL) 1510 return -EINVAL; 1511 1512 *n = (uint16_t)strtoul(value, NULL, 0); 1513 if (*n == USHRT_MAX && errno == ERANGE) 1514 return -1; 1515 1516 return 0; 1517 } 1518 1519 static void 1520 ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, 1521 struct rte_devargs *devargs) 1522 { 1523 struct rte_kvargs *kvlist; 1524 uint16_t pflink_fullchk; 1525 1526 if (devargs == NULL) 1527 return; 1528 1529 kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); 1530 if (kvlist == NULL) 1531 return; 1532 1533 if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && 1534 rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, 1535 devarg_handle_int, &pflink_fullchk) == 0 && 1536 pflink_fullchk == 1) 1537 adapter->pflink_fullchk = 1; 1538 1539 rte_kvargs_free(kvlist); 1540 } 1541 1542 /* 1543 * Virtual Function device init 1544 */ 1545 static int 1546 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) 1547 { 1548 int diag; 1549 uint32_t tc, tcs; 1550 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1551 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1552 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1553 struct ixgbe_hw *hw = 1554 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1555 struct ixgbe_vfta *shadow_vfta = 1556 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1557 struct ixgbe_hwstrip *hwstrip = 1558 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1559 struct rte_ether_addr *perm_addr = 1560 (struct rte_ether_addr *)hw->mac.perm_addr; 1561 1562 PMD_INIT_FUNC_TRACE(); 1563 1564 eth_dev->dev_ops = &ixgbevf_eth_dev_ops; 1565 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1566 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1567 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1568 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1569 1570 /* for secondary processes, we don't initialise any further as primary 1571 * has already done this work. Only check we don't need a different 1572 * RX function 1573 */ 1574 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1575 struct ixgbe_tx_queue *txq; 1576 /* TX queue function in primary, set by last queue initialized 1577 * Tx queue may not initialized by primary process 1578 */ 1579 if (eth_dev->data->tx_queues) { 1580 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; 1581 ixgbe_set_tx_function(eth_dev, txq); 1582 } else { 1583 /* Use default TX function if we get here */ 1584 PMD_INIT_LOG(NOTICE, 1585 "No TX queues configured yet. Using default TX function."); 1586 } 1587 1588 ixgbe_set_rx_function(eth_dev); 1589 1590 return 0; 1591 } 1592 1593 rte_atomic32_clear(&ad->link_thread_running); 1594 ixgbevf_parse_devargs(eth_dev->data->dev_private, 1595 pci_dev->device.devargs); 1596 1597 rte_eth_copy_pci_info(eth_dev, pci_dev); 1598 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1599 1600 hw->device_id = pci_dev->id.device_id; 1601 hw->vendor_id = pci_dev->id.vendor_id; 1602 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1603 1604 /* initialize the vfta */ 1605 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1606 1607 /* initialize the hw strip bitmap*/ 1608 memset(hwstrip, 0, sizeof(*hwstrip)); 1609 1610 /* Initialize the shared code (base driver) */ 1611 diag = ixgbe_init_shared_code(hw); 1612 if (diag != IXGBE_SUCCESS) { 1613 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); 1614 return -EIO; 1615 } 1616 1617 /* init_mailbox_params */ 1618 hw->mbx.ops.init_params(hw); 1619 1620 /* Reset the hw statistics */ 1621 ixgbevf_dev_stats_reset(eth_dev); 1622 1623 /* Disable the interrupts for VF */ 1624 ixgbevf_intr_disable(eth_dev); 1625 1626 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ 1627 diag = hw->mac.ops.reset_hw(hw); 1628 1629 /* 1630 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when 1631 * the underlying PF driver has not assigned a MAC address to the VF. 1632 * In this case, assign a random MAC address. 1633 */ 1634 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { 1635 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1636 /* 1637 * This error code will be propagated to the app by 1638 * rte_eth_dev_reset, so use a public error code rather than 1639 * the internal-only IXGBE_ERR_RESET_FAILED 1640 */ 1641 return -EAGAIN; 1642 } 1643 1644 /* negotiate mailbox API version to use with the PF. */ 1645 ixgbevf_negotiate_api(hw); 1646 1647 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 1648 ixgbevf_get_queues(hw, &tcs, &tc); 1649 1650 /* Allocate memory for storing MAC addresses */ 1651 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * 1652 hw->mac.num_rar_entries, 0); 1653 if (eth_dev->data->mac_addrs == NULL) { 1654 PMD_INIT_LOG(ERR, 1655 "Failed to allocate %u bytes needed to store " 1656 "MAC addresses", 1657 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1658 return -ENOMEM; 1659 } 1660 1661 /* Generate a random MAC address, if none was assigned by PF. */ 1662 if (rte_is_zero_ether_addr(perm_addr)) { 1663 generate_random_mac_addr(perm_addr); 1664 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 1665 if (diag) { 1666 rte_free(eth_dev->data->mac_addrs); 1667 eth_dev->data->mac_addrs = NULL; 1668 return diag; 1669 } 1670 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1671 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1672 RTE_ETHER_ADDR_PRT_FMT, 1673 RTE_ETHER_ADDR_BYTES(perm_addr)); 1674 } 1675 1676 /* Copy the permanent MAC address */ 1677 rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 1678 1679 /* reset the hardware with the new settings */ 1680 diag = hw->mac.ops.start_hw(hw); 1681 switch (diag) { 1682 case 0: 1683 break; 1684 1685 default: 1686 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1687 rte_free(eth_dev->data->mac_addrs); 1688 eth_dev->data->mac_addrs = NULL; 1689 return -EIO; 1690 } 1691 1692 rte_intr_callback_register(intr_handle, 1693 ixgbevf_dev_interrupt_handler, eth_dev); 1694 rte_intr_enable(intr_handle); 1695 ixgbevf_intr_enable(eth_dev); 1696 1697 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 1698 eth_dev->data->port_id, pci_dev->id.vendor_id, 1699 pci_dev->id.device_id, "ixgbe_mac_82599_vf"); 1700 1701 return 0; 1702 } 1703 1704 /* Virtual Function device uninit */ 1705 1706 static int 1707 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) 1708 { 1709 PMD_INIT_FUNC_TRACE(); 1710 1711 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1712 return 0; 1713 1714 ixgbevf_dev_close(eth_dev); 1715 1716 return 0; 1717 } 1718 1719 static int 1720 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1721 struct rte_pci_device *pci_dev) 1722 { 1723 char name[RTE_ETH_NAME_MAX_LEN]; 1724 struct rte_eth_dev *pf_ethdev; 1725 struct rte_eth_devargs eth_da; 1726 int i, retval; 1727 1728 if (pci_dev->device.devargs) { 1729 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, 1730 ð_da); 1731 if (retval) 1732 return retval; 1733 } else 1734 memset(ð_da, 0, sizeof(eth_da)); 1735 1736 if (eth_da.nb_representor_ports > 0 && 1737 eth_da.type != RTE_ETH_REPRESENTOR_VF) { 1738 PMD_DRV_LOG(ERR, "unsupported representor type: %s\n", 1739 pci_dev->device.devargs->args); 1740 return -ENOTSUP; 1741 } 1742 1743 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 1744 sizeof(struct ixgbe_adapter), 1745 eth_dev_pci_specific_init, pci_dev, 1746 eth_ixgbe_dev_init, NULL); 1747 1748 if (retval || eth_da.nb_representor_ports < 1) 1749 return retval; 1750 1751 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1752 if (pf_ethdev == NULL) 1753 return -ENODEV; 1754 1755 /* probe VF representor ports */ 1756 for (i = 0; i < eth_da.nb_representor_ports; i++) { 1757 struct ixgbe_vf_info *vfinfo; 1758 struct ixgbe_vf_representor representor; 1759 1760 vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( 1761 pf_ethdev->data->dev_private); 1762 if (vfinfo == NULL) { 1763 PMD_DRV_LOG(ERR, 1764 "no virtual functions supported by PF"); 1765 break; 1766 } 1767 1768 representor.vf_id = eth_da.representor_ports[i]; 1769 representor.switch_domain_id = vfinfo->switch_domain_id; 1770 representor.pf_ethdev = pf_ethdev; 1771 1772 /* representor port net_bdf_port */ 1773 snprintf(name, sizeof(name), "net_%s_representor_%d", 1774 pci_dev->device.name, 1775 eth_da.representor_ports[i]); 1776 1777 retval = rte_eth_dev_create(&pci_dev->device, name, 1778 sizeof(struct ixgbe_vf_representor), NULL, NULL, 1779 ixgbe_vf_representor_init, &representor); 1780 1781 if (retval) 1782 PMD_DRV_LOG(ERR, "failed to create ixgbe vf " 1783 "representor %s.", name); 1784 } 1785 1786 return 0; 1787 } 1788 1789 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) 1790 { 1791 struct rte_eth_dev *ethdev; 1792 1793 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1794 if (!ethdev) 1795 return 0; 1796 1797 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 1798 return rte_eth_dev_pci_generic_remove(pci_dev, 1799 ixgbe_vf_representor_uninit); 1800 else 1801 return rte_eth_dev_pci_generic_remove(pci_dev, 1802 eth_ixgbe_dev_uninit); 1803 } 1804 1805 static struct rte_pci_driver rte_ixgbe_pmd = { 1806 .id_table = pci_id_ixgbe_map, 1807 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1808 .probe = eth_ixgbe_pci_probe, 1809 .remove = eth_ixgbe_pci_remove, 1810 }; 1811 1812 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1813 struct rte_pci_device *pci_dev) 1814 { 1815 return rte_eth_dev_pci_generic_probe(pci_dev, 1816 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init); 1817 } 1818 1819 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) 1820 { 1821 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit); 1822 } 1823 1824 /* 1825 * virtual function driver struct 1826 */ 1827 static struct rte_pci_driver rte_ixgbevf_pmd = { 1828 .id_table = pci_id_ixgbevf_map, 1829 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1830 .probe = eth_ixgbevf_pci_probe, 1831 .remove = eth_ixgbevf_pci_remove, 1832 }; 1833 1834 static int 1835 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1836 { 1837 struct ixgbe_hw *hw = 1838 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1839 struct ixgbe_vfta *shadow_vfta = 1840 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1841 uint32_t vfta; 1842 uint32_t vid_idx; 1843 uint32_t vid_bit; 1844 1845 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 1846 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 1847 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); 1848 if (on) 1849 vfta |= vid_bit; 1850 else 1851 vfta &= ~vid_bit; 1852 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); 1853 1854 /* update local VFTA copy */ 1855 shadow_vfta->vfta[vid_idx] = vfta; 1856 1857 return 0; 1858 } 1859 1860 static void 1861 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 1862 { 1863 if (on) 1864 ixgbe_vlan_hw_strip_enable(dev, queue); 1865 else 1866 ixgbe_vlan_hw_strip_disable(dev, queue); 1867 } 1868 1869 static int 1870 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1871 enum rte_vlan_type vlan_type, 1872 uint16_t tpid) 1873 { 1874 struct ixgbe_hw *hw = 1875 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1876 int ret = 0; 1877 uint32_t reg; 1878 uint32_t qinq; 1879 1880 qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1881 qinq &= IXGBE_DMATXCTL_GDV; 1882 1883 switch (vlan_type) { 1884 case RTE_ETH_VLAN_TYPE_INNER: 1885 if (qinq) { 1886 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1887 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1888 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1889 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1890 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1891 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1892 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1893 } else { 1894 ret = -ENOTSUP; 1895 PMD_DRV_LOG(ERR, "Inner type is not supported" 1896 " by single VLAN"); 1897 } 1898 break; 1899 case RTE_ETH_VLAN_TYPE_OUTER: 1900 if (qinq) { 1901 /* Only the high 16-bits is valid */ 1902 IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << 1903 IXGBE_EXVET_VET_EXT_SHIFT); 1904 } else { 1905 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1906 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1907 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1908 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1909 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1910 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1911 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1912 } 1913 1914 break; 1915 default: 1916 ret = -EINVAL; 1917 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1918 break; 1919 } 1920 1921 return ret; 1922 } 1923 1924 void 1925 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1926 { 1927 struct ixgbe_hw *hw = 1928 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1929 uint32_t vlnctrl; 1930 1931 PMD_INIT_FUNC_TRACE(); 1932 1933 /* Filter Table Disable */ 1934 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1935 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1936 1937 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1938 } 1939 1940 void 1941 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1942 { 1943 struct ixgbe_hw *hw = 1944 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1945 struct ixgbe_vfta *shadow_vfta = 1946 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1947 uint32_t vlnctrl; 1948 uint16_t i; 1949 1950 PMD_INIT_FUNC_TRACE(); 1951 1952 /* Filter Table Enable */ 1953 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1954 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 1955 vlnctrl |= IXGBE_VLNCTRL_VFE; 1956 1957 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1958 1959 /* write whatever is in local vfta copy */ 1960 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1961 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); 1962 } 1963 1964 static void 1965 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 1966 { 1967 struct ixgbe_hwstrip *hwstrip = 1968 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); 1969 struct ixgbe_rx_queue *rxq; 1970 1971 if (queue >= IXGBE_MAX_RX_QUEUE_NUM) 1972 return; 1973 1974 if (on) 1975 IXGBE_SET_HWSTRIP(hwstrip, queue); 1976 else 1977 IXGBE_CLEAR_HWSTRIP(hwstrip, queue); 1978 1979 if (queue >= dev->data->nb_rx_queues) 1980 return; 1981 1982 rxq = dev->data->rx_queues[queue]; 1983 1984 if (on) { 1985 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 1986 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 1987 } else { 1988 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN; 1989 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 1990 } 1991 } 1992 1993 static void 1994 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 1995 { 1996 struct ixgbe_hw *hw = 1997 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1998 uint32_t ctrl; 1999 2000 PMD_INIT_FUNC_TRACE(); 2001 2002 if (hw->mac.type == ixgbe_mac_82598EB) { 2003 /* No queue level support */ 2004 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2005 return; 2006 } 2007 2008 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2009 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2010 ctrl &= ~IXGBE_RXDCTL_VME; 2011 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2012 2013 /* record those setting for HW strip per queue */ 2014 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 2015 } 2016 2017 static void 2018 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 2019 { 2020 struct ixgbe_hw *hw = 2021 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2022 uint32_t ctrl; 2023 2024 PMD_INIT_FUNC_TRACE(); 2025 2026 if (hw->mac.type == ixgbe_mac_82598EB) { 2027 /* No queue level supported */ 2028 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2029 return; 2030 } 2031 2032 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2033 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2034 ctrl |= IXGBE_RXDCTL_VME; 2035 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2036 2037 /* record those setting for HW strip per queue */ 2038 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 2039 } 2040 2041 static void 2042 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2043 { 2044 struct ixgbe_hw *hw = 2045 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2046 uint32_t ctrl; 2047 2048 PMD_INIT_FUNC_TRACE(); 2049 2050 /* DMATXCTRL: Geric Double VLAN Disable */ 2051 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2052 ctrl &= ~IXGBE_DMATXCTL_GDV; 2053 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2054 2055 /* CTRL_EXT: Global Double VLAN Disable */ 2056 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2057 ctrl &= ~IXGBE_EXTENDED_VLAN; 2058 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2059 2060 } 2061 2062 static void 2063 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2064 { 2065 struct ixgbe_hw *hw = 2066 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2067 uint32_t ctrl; 2068 2069 PMD_INIT_FUNC_TRACE(); 2070 2071 /* DMATXCTRL: Geric Double VLAN Enable */ 2072 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2073 ctrl |= IXGBE_DMATXCTL_GDV; 2074 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2075 2076 /* CTRL_EXT: Global Double VLAN Enable */ 2077 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2078 ctrl |= IXGBE_EXTENDED_VLAN; 2079 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2080 2081 /* Clear pooling mode of PFVTCTL. It's required by X550. */ 2082 if (hw->mac.type == ixgbe_mac_X550 || 2083 hw->mac.type == ixgbe_mac_X550EM_x || 2084 hw->mac.type == ixgbe_mac_X550EM_a) { 2085 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 2086 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 2087 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 2088 } 2089 2090 /* 2091 * VET EXT field in the EXVET register = 0x8100 by default 2092 * So no need to change. Same to VT field of DMATXCTL register 2093 */ 2094 } 2095 2096 void 2097 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 2098 { 2099 struct ixgbe_hw *hw = 2100 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2101 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 2102 uint32_t ctrl; 2103 uint16_t i; 2104 struct ixgbe_rx_queue *rxq; 2105 bool on; 2106 2107 PMD_INIT_FUNC_TRACE(); 2108 2109 if (hw->mac.type == ixgbe_mac_82598EB) { 2110 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 2111 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2112 ctrl |= IXGBE_VLNCTRL_VME; 2113 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2114 } else { 2115 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2116 ctrl &= ~IXGBE_VLNCTRL_VME; 2117 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2118 } 2119 } else { 2120 /* 2121 * Other 10G NIC, the VLAN strip can be setup 2122 * per queue in RXDCTL 2123 */ 2124 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2125 rxq = dev->data->rx_queues[i]; 2126 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 2127 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 2128 ctrl |= IXGBE_RXDCTL_VME; 2129 on = TRUE; 2130 } else { 2131 ctrl &= ~IXGBE_RXDCTL_VME; 2132 on = FALSE; 2133 } 2134 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 2135 2136 /* record those setting for HW strip per queue */ 2137 ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); 2138 } 2139 } 2140 } 2141 2142 static void 2143 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 2144 { 2145 uint16_t i; 2146 struct rte_eth_rxmode *rxmode; 2147 struct ixgbe_rx_queue *rxq; 2148 2149 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2150 rxmode = &dev->data->dev_conf.rxmode; 2151 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 2152 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2153 rxq = dev->data->rx_queues[i]; 2154 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 2155 } 2156 else 2157 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2158 rxq = dev->data->rx_queues[i]; 2159 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 2160 } 2161 } 2162 } 2163 2164 static int 2165 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 2166 { 2167 struct rte_eth_rxmode *rxmode; 2168 rxmode = &dev->data->dev_conf.rxmode; 2169 2170 if (mask & RTE_ETH_VLAN_STRIP_MASK) 2171 ixgbe_vlan_hw_strip_config(dev); 2172 2173 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 2174 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 2175 ixgbe_vlan_hw_filter_enable(dev); 2176 else 2177 ixgbe_vlan_hw_filter_disable(dev); 2178 } 2179 2180 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 2181 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 2182 ixgbe_vlan_hw_extend_enable(dev); 2183 else 2184 ixgbe_vlan_hw_extend_disable(dev); 2185 } 2186 2187 return 0; 2188 } 2189 2190 static int 2191 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2192 { 2193 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 2194 2195 ixgbe_vlan_offload_config(dev, mask); 2196 2197 return 0; 2198 } 2199 2200 static void 2201 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2202 { 2203 struct ixgbe_hw *hw = 2204 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2205 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ 2206 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2207 2208 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ 2209 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); 2210 } 2211 2212 static int 2213 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 2214 { 2215 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2216 2217 switch (nb_rx_q) { 2218 case 1: 2219 case 2: 2220 RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS; 2221 break; 2222 case 4: 2223 RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS; 2224 break; 2225 default: 2226 return -EINVAL; 2227 } 2228 2229 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 2230 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2231 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 2232 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2233 return 0; 2234 } 2235 2236 static int 2237 ixgbe_check_mq_mode(struct rte_eth_dev *dev) 2238 { 2239 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 2240 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2241 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2242 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2243 2244 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 2245 /* check multi-queue mode */ 2246 switch (dev_conf->rxmode.mq_mode) { 2247 case RTE_ETH_MQ_RX_VMDQ_DCB: 2248 PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); 2249 break; 2250 case RTE_ETH_MQ_RX_VMDQ_DCB_RSS: 2251 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 2252 PMD_INIT_LOG(ERR, "SRIOV active," 2253 " unsupported mq_mode rx %d.", 2254 dev_conf->rxmode.mq_mode); 2255 return -EINVAL; 2256 case RTE_ETH_MQ_RX_RSS: 2257 case RTE_ETH_MQ_RX_VMDQ_RSS: 2258 dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS; 2259 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 2260 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 2261 PMD_INIT_LOG(ERR, "SRIOV is active," 2262 " invalid queue number" 2263 " for VMDQ RSS, allowed" 2264 " value are 1, 2 or 4."); 2265 return -EINVAL; 2266 } 2267 break; 2268 case RTE_ETH_MQ_RX_VMDQ_ONLY: 2269 case RTE_ETH_MQ_RX_NONE: 2270 /* if nothing mq mode configure, use default scheme */ 2271 dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY; 2272 break; 2273 default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/ 2274 /* SRIOV only works in VMDq enable mode */ 2275 PMD_INIT_LOG(ERR, "SRIOV is active," 2276 " wrong mq_mode rx %d.", 2277 dev_conf->rxmode.mq_mode); 2278 return -EINVAL; 2279 } 2280 2281 switch (dev_conf->txmode.mq_mode) { 2282 case RTE_ETH_MQ_TX_VMDQ_DCB: 2283 PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); 2284 dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; 2285 break; 2286 default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */ 2287 dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY; 2288 break; 2289 } 2290 2291 /* check valid queue number */ 2292 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 2293 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 2294 PMD_INIT_LOG(ERR, "SRIOV is active," 2295 " nb_rx_q=%d nb_tx_q=%d queue number" 2296 " must be less than or equal to %d.", 2297 nb_rx_q, nb_tx_q, 2298 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 2299 return -EINVAL; 2300 } 2301 } else { 2302 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) { 2303 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 2304 " not supported."); 2305 return -EINVAL; 2306 } 2307 /* check configuration for vmdb+dcb mode */ 2308 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { 2309 const struct rte_eth_vmdq_dcb_conf *conf; 2310 2311 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2312 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 2313 IXGBE_VMDQ_DCB_NB_QUEUES); 2314 return -EINVAL; 2315 } 2316 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 2317 if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || 2318 conf->nb_queue_pools == RTE_ETH_32_POOLS)) { 2319 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2320 " nb_queue_pools must be %d or %d.", 2321 RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); 2322 return -EINVAL; 2323 } 2324 } 2325 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) { 2326 const struct rte_eth_vmdq_dcb_tx_conf *conf; 2327 2328 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2329 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 2330 IXGBE_VMDQ_DCB_NB_QUEUES); 2331 return -EINVAL; 2332 } 2333 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2334 if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || 2335 conf->nb_queue_pools == RTE_ETH_32_POOLS)) { 2336 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2337 " nb_queue_pools != %d and" 2338 " nb_queue_pools != %d.", 2339 RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); 2340 return -EINVAL; 2341 } 2342 } 2343 2344 /* For DCB mode check our configuration before we go further */ 2345 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) { 2346 const struct rte_eth_dcb_rx_conf *conf; 2347 2348 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 2349 if (!(conf->nb_tcs == RTE_ETH_4_TCS || 2350 conf->nb_tcs == RTE_ETH_8_TCS)) { 2351 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2352 " and nb_tcs != %d.", 2353 RTE_ETH_4_TCS, RTE_ETH_8_TCS); 2354 return -EINVAL; 2355 } 2356 } 2357 2358 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) { 2359 const struct rte_eth_dcb_tx_conf *conf; 2360 2361 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 2362 if (!(conf->nb_tcs == RTE_ETH_4_TCS || 2363 conf->nb_tcs == RTE_ETH_8_TCS)) { 2364 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2365 " and nb_tcs != %d.", 2366 RTE_ETH_4_TCS, RTE_ETH_8_TCS); 2367 return -EINVAL; 2368 } 2369 } 2370 2371 /* 2372 * When DCB/VT is off, maximum number of queues changes, 2373 * except for 82598EB, which remains constant. 2374 */ 2375 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE && 2376 hw->mac.type != ixgbe_mac_82598EB) { 2377 if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { 2378 PMD_INIT_LOG(ERR, 2379 "Neither VT nor DCB are enabled, " 2380 "nb_tx_q > %d.", 2381 IXGBE_NONE_MODE_TX_NB_QUEUES); 2382 return -EINVAL; 2383 } 2384 } 2385 } 2386 return 0; 2387 } 2388 2389 static int 2390 ixgbe_dev_configure(struct rte_eth_dev *dev) 2391 { 2392 struct ixgbe_interrupt *intr = 2393 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2394 struct ixgbe_adapter *adapter = dev->data->dev_private; 2395 int ret; 2396 2397 PMD_INIT_FUNC_TRACE(); 2398 2399 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 2400 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2401 2402 /* multiple queue mode checking */ 2403 ret = ixgbe_check_mq_mode(dev); 2404 if (ret != 0) { 2405 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", 2406 ret); 2407 return ret; 2408 } 2409 2410 /* set flag to update link status after init */ 2411 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2412 2413 /* 2414 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 2415 * allocation or vector Rx preconditions we will reset it. 2416 */ 2417 adapter->rx_bulk_alloc_allowed = true; 2418 adapter->rx_vec_allowed = true; 2419 2420 return 0; 2421 } 2422 2423 static void 2424 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 2425 { 2426 struct ixgbe_hw *hw = 2427 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2428 struct ixgbe_interrupt *intr = 2429 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2430 uint32_t gpie; 2431 2432 /* only set up it on X550EM_X */ 2433 if (hw->mac.type == ixgbe_mac_X550EM_x) { 2434 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2435 gpie |= IXGBE_SDP0_GPIEN_X550EM_x; 2436 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2437 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 2438 intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; 2439 } 2440 } 2441 2442 int 2443 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 2444 uint16_t tx_rate, uint64_t q_msk) 2445 { 2446 struct ixgbe_hw *hw; 2447 struct ixgbe_vf_info *vfinfo; 2448 struct rte_eth_link link; 2449 uint8_t nb_q_per_pool; 2450 uint32_t queue_stride; 2451 uint32_t queue_idx, idx = 0, vf_idx; 2452 uint32_t queue_end; 2453 uint16_t total_rate = 0; 2454 struct rte_pci_device *pci_dev; 2455 int ret; 2456 2457 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2458 ret = rte_eth_link_get_nowait(dev->data->port_id, &link); 2459 if (ret < 0) 2460 return ret; 2461 2462 if (vf >= pci_dev->max_vfs) 2463 return -EINVAL; 2464 2465 if (tx_rate > link.link_speed) 2466 return -EINVAL; 2467 2468 if (q_msk == 0) 2469 return 0; 2470 2471 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2472 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); 2473 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2474 queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2475 queue_idx = vf * queue_stride; 2476 queue_end = queue_idx + nb_q_per_pool - 1; 2477 if (queue_end >= hw->mac.max_tx_queues) 2478 return -EINVAL; 2479 2480 if (vfinfo) { 2481 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { 2482 if (vf_idx == vf) 2483 continue; 2484 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 2485 idx++) 2486 total_rate += vfinfo[vf_idx].tx_rate[idx]; 2487 } 2488 } else { 2489 return -EINVAL; 2490 } 2491 2492 /* Store tx_rate for this vf. */ 2493 for (idx = 0; idx < nb_q_per_pool; idx++) { 2494 if (((uint64_t)0x1 << idx) & q_msk) { 2495 if (vfinfo[vf].tx_rate[idx] != tx_rate) 2496 vfinfo[vf].tx_rate[idx] = tx_rate; 2497 total_rate += tx_rate; 2498 } 2499 } 2500 2501 if (total_rate > dev->data->dev_link.link_speed) { 2502 /* Reset stored TX rate of the VF if it causes exceed 2503 * link speed. 2504 */ 2505 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 2506 return -EINVAL; 2507 } 2508 2509 /* Set RTTBCNRC of each queue/pool for vf X */ 2510 for (; queue_idx <= queue_end; queue_idx++) { 2511 if (0x1 & q_msk) 2512 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 2513 q_msk = q_msk >> 1; 2514 } 2515 2516 return 0; 2517 } 2518 2519 static int 2520 ixgbe_flow_ctrl_enable(struct rte_eth_dev *dev, struct ixgbe_hw *hw) 2521 { 2522 struct ixgbe_adapter *adapter = dev->data->dev_private; 2523 int err; 2524 uint32_t mflcn; 2525 2526 ixgbe_setup_fc(hw); 2527 2528 err = ixgbe_fc_enable(hw); 2529 2530 /* Not negotiated is not an error case */ 2531 if (err == IXGBE_SUCCESS || err == IXGBE_ERR_FC_NOT_NEGOTIATED) { 2532 /* 2533 *check if we want to forward MAC frames - driver doesn't 2534 *have native capability to do that, 2535 *so we'll write the registers ourselves 2536 */ 2537 2538 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2539 2540 /* set or clear MFLCN.PMCF bit depending on configuration */ 2541 if (adapter->mac_ctrl_frame_fwd != 0) 2542 mflcn |= IXGBE_MFLCN_PMCF; 2543 else 2544 mflcn &= ~IXGBE_MFLCN_PMCF; 2545 2546 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); 2547 IXGBE_WRITE_FLUSH(hw); 2548 2549 return 0; 2550 } 2551 return err; 2552 } 2553 2554 /* 2555 * Configure device link speed and setup link. 2556 * It returns 0 on success. 2557 */ 2558 static int 2559 ixgbe_dev_start(struct rte_eth_dev *dev) 2560 { 2561 struct ixgbe_hw *hw = 2562 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2563 struct ixgbe_vf_info *vfinfo = 2564 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2565 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2566 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2567 uint32_t intr_vector = 0; 2568 int err; 2569 bool link_up = false, negotiate = 0; 2570 uint32_t speed = 0; 2571 uint32_t allowed_speeds = 0; 2572 int mask = 0; 2573 int status; 2574 uint16_t vf, idx; 2575 uint32_t *link_speeds; 2576 struct ixgbe_tm_conf *tm_conf = 2577 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2578 struct ixgbe_macsec_setting *macsec_setting = 2579 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 2580 2581 PMD_INIT_FUNC_TRACE(); 2582 2583 /* Stop the link setup handler before resetting the HW. */ 2584 ixgbe_dev_wait_setup_link_complete(dev, 0); 2585 2586 /* disable uio/vfio intr/eventfd mapping */ 2587 rte_intr_disable(intr_handle); 2588 2589 /* stop adapter */ 2590 hw->adapter_stopped = 0; 2591 ixgbe_stop_adapter(hw); 2592 2593 /* reinitialize adapter 2594 * this calls reset and start 2595 */ 2596 status = ixgbe_pf_reset_hw(hw); 2597 if (status != 0) 2598 return -1; 2599 hw->mac.ops.start_hw(hw); 2600 hw->mac.get_link_status = true; 2601 2602 /* configure PF module if SRIOV enabled */ 2603 ixgbe_pf_host_configure(dev); 2604 2605 ixgbe_dev_phy_intr_setup(dev); 2606 2607 /* check and configure queue intr-vector mapping */ 2608 if ((rte_intr_cap_multiple(intr_handle) || 2609 !RTE_ETH_DEV_SRIOV(dev).active) && 2610 dev->data->dev_conf.intr_conf.rxq != 0) { 2611 intr_vector = dev->data->nb_rx_queues; 2612 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { 2613 PMD_INIT_LOG(ERR, "At most %d intr queues supported", 2614 IXGBE_MAX_INTR_QUEUE_NUM); 2615 return -ENOTSUP; 2616 } 2617 if (rte_intr_efd_enable(intr_handle, intr_vector)) 2618 return -1; 2619 } 2620 2621 if (rte_intr_dp_is_en(intr_handle)) { 2622 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 2623 dev->data->nb_rx_queues)) { 2624 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 2625 " intr_vec", dev->data->nb_rx_queues); 2626 return -ENOMEM; 2627 } 2628 } 2629 2630 /* configure MSI-X for sleep until Rx interrupt */ 2631 ixgbe_configure_msix(dev); 2632 2633 /* initialize transmission unit */ 2634 ixgbe_dev_tx_init(dev); 2635 2636 /* This can fail when allocating mbufs for descriptor rings */ 2637 err = ixgbe_dev_rx_init(dev); 2638 if (err) { 2639 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 2640 goto error; 2641 } 2642 2643 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 2644 RTE_ETH_VLAN_EXTEND_MASK; 2645 err = ixgbe_vlan_offload_config(dev, mask); 2646 if (err) { 2647 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 2648 goto error; 2649 } 2650 2651 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) { 2652 /* Enable vlan filtering for VMDq */ 2653 ixgbe_vmdq_vlan_hw_filter_enable(dev); 2654 } 2655 2656 /* Configure DCB hw */ 2657 ixgbe_configure_dcb(dev); 2658 2659 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { 2660 err = ixgbe_fdir_configure(dev); 2661 if (err) 2662 goto error; 2663 } 2664 2665 /* Restore vf rate limit */ 2666 if (vfinfo != NULL) { 2667 for (vf = 0; vf < pci_dev->max_vfs; vf++) 2668 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 2669 if (vfinfo[vf].tx_rate[idx] != 0) 2670 ixgbe_set_vf_rate_limit( 2671 dev, vf, 2672 vfinfo[vf].tx_rate[idx], 2673 1 << idx); 2674 } 2675 2676 ixgbe_restore_statistics_mapping(dev); 2677 2678 err = ixgbe_flow_ctrl_enable(dev, hw); 2679 if (err < 0) { 2680 PMD_INIT_LOG(ERR, "enable flow ctrl err"); 2681 goto error; 2682 } 2683 2684 err = ixgbe_dev_rxtx_start(dev); 2685 if (err < 0) { 2686 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 2687 goto error; 2688 } 2689 2690 /* Skip link setup if loopback mode is enabled. */ 2691 if (dev->data->dev_conf.lpbk_mode != 0) { 2692 err = ixgbe_check_supported_loopback_mode(dev); 2693 if (err < 0) { 2694 PMD_INIT_LOG(ERR, "Unsupported loopback mode"); 2695 goto error; 2696 } else { 2697 goto skip_link_setup; 2698 } 2699 } 2700 2701 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 2702 err = hw->mac.ops.setup_sfp(hw); 2703 if (err) 2704 goto error; 2705 } 2706 2707 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2708 /* Turn on the copper */ 2709 ixgbe_set_phy_power(hw, true); 2710 } else { 2711 /* Turn on the laser */ 2712 ixgbe_enable_tx_laser(hw); 2713 } 2714 2715 err = ixgbe_check_link(hw, &speed, &link_up, 0); 2716 if (err) 2717 goto error; 2718 dev->data->dev_link.link_status = link_up; 2719 2720 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); 2721 if (err) 2722 goto error; 2723 2724 switch (hw->mac.type) { 2725 case ixgbe_mac_X550: 2726 case ixgbe_mac_X550EM_x: 2727 case ixgbe_mac_X550EM_a: 2728 allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G | 2729 RTE_ETH_LINK_SPEED_2_5G | RTE_ETH_LINK_SPEED_5G | 2730 RTE_ETH_LINK_SPEED_10G; 2731 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 2732 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 2733 allowed_speeds = RTE_ETH_LINK_SPEED_10M | 2734 RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G; 2735 break; 2736 default: 2737 allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G | 2738 RTE_ETH_LINK_SPEED_10G; 2739 } 2740 2741 link_speeds = &dev->data->dev_conf.link_speeds; 2742 2743 /* Ignore autoneg flag bit and check the validity of 2744 * link_speed 2745 */ 2746 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { 2747 PMD_INIT_LOG(ERR, "Invalid link setting"); 2748 goto error; 2749 } 2750 2751 speed = 0x0; 2752 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { 2753 switch (hw->mac.type) { 2754 case ixgbe_mac_82598EB: 2755 speed = IXGBE_LINK_SPEED_82598_AUTONEG; 2756 break; 2757 case ixgbe_mac_82599EB: 2758 case ixgbe_mac_X540: 2759 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2760 break; 2761 case ixgbe_mac_X550: 2762 case ixgbe_mac_X550EM_x: 2763 case ixgbe_mac_X550EM_a: 2764 speed = IXGBE_LINK_SPEED_X550_AUTONEG; 2765 break; 2766 default: 2767 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2768 } 2769 } else { 2770 if (*link_speeds & RTE_ETH_LINK_SPEED_10G) 2771 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2772 if (*link_speeds & RTE_ETH_LINK_SPEED_5G) 2773 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2774 if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G) 2775 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2776 if (*link_speeds & RTE_ETH_LINK_SPEED_1G) 2777 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2778 if (*link_speeds & RTE_ETH_LINK_SPEED_100M) 2779 speed |= IXGBE_LINK_SPEED_100_FULL; 2780 if (*link_speeds & RTE_ETH_LINK_SPEED_10M) 2781 speed |= IXGBE_LINK_SPEED_10_FULL; 2782 } 2783 2784 err = ixgbe_setup_link(hw, speed, link_up); 2785 if (err) 2786 goto error; 2787 2788 skip_link_setup: 2789 2790 if (rte_intr_allow_others(intr_handle)) { 2791 /* check if lsc interrupt is enabled */ 2792 if (dev->data->dev_conf.intr_conf.lsc != 0) 2793 ixgbe_dev_lsc_interrupt_setup(dev, TRUE); 2794 else 2795 ixgbe_dev_lsc_interrupt_setup(dev, FALSE); 2796 ixgbe_dev_macsec_interrupt_setup(dev); 2797 } else { 2798 rte_intr_callback_unregister(intr_handle, 2799 ixgbe_dev_interrupt_handler, dev); 2800 if (dev->data->dev_conf.intr_conf.lsc != 0) 2801 PMD_INIT_LOG(INFO, "lsc won't enable because of" 2802 " no intr multiplex"); 2803 } 2804 2805 /* check if rxq interrupt is enabled */ 2806 if (dev->data->dev_conf.intr_conf.rxq != 0 && 2807 rte_intr_dp_is_en(intr_handle)) 2808 ixgbe_dev_rxq_interrupt_setup(dev); 2809 2810 /* enable uio/vfio intr/eventfd mapping */ 2811 rte_intr_enable(intr_handle); 2812 2813 /* resume enabled intr since hw reset */ 2814 ixgbe_enable_intr(dev); 2815 ixgbe_l2_tunnel_conf(dev); 2816 ixgbe_filter_restore(dev); 2817 2818 if (tm_conf->root && !tm_conf->committed) 2819 PMD_DRV_LOG(WARNING, 2820 "please call hierarchy_commit() " 2821 "before starting the port"); 2822 2823 /* wait for the controller to acquire link */ 2824 err = ixgbe_wait_for_link_up(hw); 2825 if (err) 2826 goto error; 2827 2828 /* 2829 * Update link status right before return, because it may 2830 * start link configuration process in a separate thread. 2831 */ 2832 ixgbe_dev_link_update(dev, 0); 2833 2834 /* setup the macsec setting register */ 2835 if (macsec_setting->offload_en) 2836 ixgbe_dev_macsec_register_enable(dev, macsec_setting); 2837 2838 return 0; 2839 2840 error: 2841 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); 2842 ixgbe_dev_clear_queues(dev); 2843 return -EIO; 2844 } 2845 2846 /* 2847 * Stop device: disable rx and tx functions to allow for reconfiguring. 2848 */ 2849 static int 2850 ixgbe_dev_stop(struct rte_eth_dev *dev) 2851 { 2852 struct rte_eth_link link; 2853 struct ixgbe_adapter *adapter = dev->data->dev_private; 2854 struct ixgbe_hw *hw = 2855 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2856 struct ixgbe_vf_info *vfinfo = 2857 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2858 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2859 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2860 int vf; 2861 struct ixgbe_tm_conf *tm_conf = 2862 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2863 2864 if (hw->adapter_stopped) 2865 return 0; 2866 2867 PMD_INIT_FUNC_TRACE(); 2868 2869 ixgbe_dev_wait_setup_link_complete(dev, 0); 2870 2871 /* disable interrupts */ 2872 ixgbe_disable_intr(hw); 2873 2874 /* reset the NIC */ 2875 ixgbe_pf_reset_hw(hw); 2876 hw->adapter_stopped = 0; 2877 2878 /* stop adapter */ 2879 ixgbe_stop_adapter(hw); 2880 2881 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 2882 vfinfo[vf].clear_to_send = false; 2883 2884 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2885 /* Turn off the copper */ 2886 ixgbe_set_phy_power(hw, false); 2887 } else { 2888 /* Turn off the laser */ 2889 ixgbe_disable_tx_laser(hw); 2890 } 2891 2892 ixgbe_dev_clear_queues(dev); 2893 2894 /* Clear stored conf */ 2895 dev->data->scattered_rx = 0; 2896 dev->data->lro = 0; 2897 2898 /* Clear recorded link status */ 2899 memset(&link, 0, sizeof(link)); 2900 rte_eth_linkstatus_set(dev, &link); 2901 2902 if (!rte_intr_allow_others(intr_handle)) 2903 /* resume to the default handler */ 2904 rte_intr_callback_register(intr_handle, 2905 ixgbe_dev_interrupt_handler, 2906 (void *)dev); 2907 2908 /* Clean datapath event and queue/vec mapping */ 2909 rte_intr_efd_disable(intr_handle); 2910 rte_intr_vec_list_free(intr_handle); 2911 2912 /* reset hierarchy commit */ 2913 tm_conf->committed = false; 2914 2915 adapter->rss_reta_updated = 0; 2916 2917 hw->adapter_stopped = true; 2918 dev->data->dev_started = 0; 2919 2920 return 0; 2921 } 2922 2923 /* 2924 * Set device link up: enable tx. 2925 */ 2926 static int 2927 ixgbe_dev_set_link_up(struct rte_eth_dev *dev) 2928 { 2929 struct ixgbe_hw *hw = 2930 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2931 if (hw->mac.type == ixgbe_mac_82599EB) { 2932 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2933 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2934 /* Not supported in bypass mode */ 2935 PMD_INIT_LOG(ERR, "Set link up is not supported " 2936 "by device id 0x%x", hw->device_id); 2937 return -ENOTSUP; 2938 } 2939 #endif 2940 } 2941 2942 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2943 /* Turn on the copper */ 2944 ixgbe_set_phy_power(hw, true); 2945 } else { 2946 /* Turn on the laser */ 2947 ixgbe_enable_tx_laser(hw); 2948 ixgbe_dev_link_update(dev, 0); 2949 } 2950 2951 return 0; 2952 } 2953 2954 /* 2955 * Set device link down: disable tx. 2956 */ 2957 static int 2958 ixgbe_dev_set_link_down(struct rte_eth_dev *dev) 2959 { 2960 struct ixgbe_hw *hw = 2961 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2962 if (hw->mac.type == ixgbe_mac_82599EB) { 2963 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2964 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2965 /* Not supported in bypass mode */ 2966 PMD_INIT_LOG(ERR, "Set link down is not supported " 2967 "by device id 0x%x", hw->device_id); 2968 return -ENOTSUP; 2969 } 2970 #endif 2971 } 2972 2973 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2974 /* Turn off the copper */ 2975 ixgbe_set_phy_power(hw, false); 2976 } else { 2977 /* Turn off the laser */ 2978 ixgbe_disable_tx_laser(hw); 2979 ixgbe_dev_link_update(dev, 0); 2980 } 2981 2982 return 0; 2983 } 2984 2985 /* 2986 * Reset and stop device. 2987 */ 2988 static int 2989 ixgbe_dev_close(struct rte_eth_dev *dev) 2990 { 2991 struct ixgbe_hw *hw = 2992 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2993 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2994 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2995 int retries = 0; 2996 int ret; 2997 2998 PMD_INIT_FUNC_TRACE(); 2999 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3000 return 0; 3001 3002 ixgbe_pf_reset_hw(hw); 3003 3004 ret = ixgbe_dev_stop(dev); 3005 3006 ixgbe_dev_free_queues(dev); 3007 3008 ixgbe_disable_pcie_master(hw); 3009 3010 /* reprogram the RAR[0] in case user changed it. */ 3011 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 3012 3013 /* Unlock any pending hardware semaphore */ 3014 ixgbe_swfw_lock_reset(hw); 3015 3016 /* disable uio intr before callback unregister */ 3017 rte_intr_disable(intr_handle); 3018 3019 do { 3020 ret = rte_intr_callback_unregister(intr_handle, 3021 ixgbe_dev_interrupt_handler, dev); 3022 if (ret >= 0 || ret == -ENOENT) { 3023 break; 3024 } else if (ret != -EAGAIN) { 3025 PMD_INIT_LOG(ERR, 3026 "intr callback unregister failed: %d", 3027 ret); 3028 } 3029 rte_delay_ms(100); 3030 } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); 3031 3032 /* cancel the delay handler before remove dev */ 3033 rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev); 3034 3035 /* uninitialize PF if max_vfs not zero */ 3036 ixgbe_pf_host_uninit(dev); 3037 3038 /* remove all the fdir filters & hash */ 3039 ixgbe_fdir_filter_uninit(dev); 3040 3041 /* remove all the L2 tunnel filters & hash */ 3042 ixgbe_l2_tn_filter_uninit(dev); 3043 3044 /* Remove all ntuple filters of the device */ 3045 ixgbe_ntuple_filter_uninit(dev); 3046 3047 /* clear all the filters list */ 3048 ixgbe_filterlist_flush(); 3049 3050 /* Remove all Traffic Manager configuration */ 3051 ixgbe_tm_conf_uninit(dev); 3052 3053 #ifdef RTE_LIB_SECURITY 3054 rte_free(dev->security_ctx); 3055 dev->security_ctx = NULL; 3056 #endif 3057 3058 return ret; 3059 } 3060 3061 /* 3062 * Reset PF device. 3063 */ 3064 static int 3065 ixgbe_dev_reset(struct rte_eth_dev *dev) 3066 { 3067 int ret; 3068 3069 /* When a DPDK PMD PF begin to reset PF port, it should notify all 3070 * its VF to make them align with it. The detailed notification 3071 * mechanism is PMD specific. As to ixgbe PF, it is rather complex. 3072 * To avoid unexpected behavior in VF, currently reset of PF with 3073 * SR-IOV activation is not supported. It might be supported later. 3074 */ 3075 if (dev->data->sriov.active) 3076 return -ENOTSUP; 3077 3078 ret = eth_ixgbe_dev_uninit(dev); 3079 if (ret) 3080 return ret; 3081 3082 ret = eth_ixgbe_dev_init(dev, NULL); 3083 3084 return ret; 3085 } 3086 3087 static void 3088 ixgbe_read_stats_registers(struct ixgbe_hw *hw, 3089 struct ixgbe_hw_stats *hw_stats, 3090 struct ixgbe_macsec_stats *macsec_stats, 3091 uint64_t *total_missed_rx, uint64_t *total_qbrc, 3092 uint64_t *total_qprc, uint64_t *total_qprdc) 3093 { 3094 uint32_t bprc, lxon, lxoff, total; 3095 uint32_t delta_gprc = 0; 3096 unsigned i; 3097 /* Workaround for RX byte count not including CRC bytes when CRC 3098 * strip is enabled. CRC bytes are removed from counters when crc_strip 3099 * is disabled. 3100 */ 3101 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & 3102 IXGBE_HLREG0_RXCRCSTRP); 3103 3104 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 3105 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 3106 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 3107 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 3108 3109 for (i = 0; i < 8; i++) { 3110 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 3111 3112 /* global total per queue */ 3113 hw_stats->mpc[i] += mp; 3114 /* Running comprehensive total for stats display */ 3115 *total_missed_rx += hw_stats->mpc[i]; 3116 if (hw->mac.type == ixgbe_mac_82598EB) { 3117 hw_stats->rnbc[i] += 3118 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3119 hw_stats->pxonrxc[i] += 3120 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 3121 hw_stats->pxoffrxc[i] += 3122 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 3123 } else { 3124 hw_stats->pxonrxc[i] += 3125 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 3126 hw_stats->pxoffrxc[i] += 3127 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 3128 hw_stats->pxon2offc[i] += 3129 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 3130 } 3131 hw_stats->pxontxc[i] += 3132 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 3133 hw_stats->pxofftxc[i] += 3134 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 3135 } 3136 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3137 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3138 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3139 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3140 3141 delta_gprc += delta_qprc; 3142 3143 hw_stats->qprc[i] += delta_qprc; 3144 hw_stats->qptc[i] += delta_qptc; 3145 3146 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 3147 hw_stats->qbrc[i] += 3148 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); 3149 if (crc_strip == 0) 3150 hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; 3151 3152 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 3153 hw_stats->qbtc[i] += 3154 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); 3155 3156 hw_stats->qprdc[i] += delta_qprdc; 3157 *total_qprdc += hw_stats->qprdc[i]; 3158 3159 *total_qprc += hw_stats->qprc[i]; 3160 *total_qbrc += hw_stats->qbrc[i]; 3161 } 3162 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 3163 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 3164 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3165 3166 /* 3167 * An errata states that gprc actually counts good + missed packets: 3168 * Workaround to set gprc to summated queue packet receives 3169 */ 3170 hw_stats->gprc = *total_qprc; 3171 3172 if (hw->mac.type != ixgbe_mac_82598EB) { 3173 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 3174 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 3175 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 3176 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 3177 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 3178 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 3179 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3180 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3181 } else { 3182 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3183 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3184 /* 82598 only has a counter in the high register */ 3185 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3186 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3187 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3188 } 3189 uint64_t old_tpr = hw_stats->tpr; 3190 3191 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 3192 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 3193 3194 if (crc_strip == 0) 3195 hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; 3196 3197 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); 3198 hw_stats->gptc += delta_gptc; 3199 hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; 3200 hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 3201 3202 /* 3203 * Workaround: mprc hardware is incorrectly counting 3204 * broadcasts, so for now we subtract those. 3205 */ 3206 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3207 hw_stats->bprc += bprc; 3208 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3209 if (hw->mac.type == ixgbe_mac_82598EB) 3210 hw_stats->mprc -= bprc; 3211 3212 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3213 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3214 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 3215 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 3216 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3217 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3218 3219 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3220 hw_stats->lxontxc += lxon; 3221 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3222 hw_stats->lxofftxc += lxoff; 3223 total = lxon + lxoff; 3224 3225 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 3226 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 3227 hw_stats->gptc -= total; 3228 hw_stats->mptc -= total; 3229 hw_stats->ptc64 -= total; 3230 hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; 3231 3232 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 3233 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 3234 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3235 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 3236 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 3237 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 3238 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 3239 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 3240 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 3241 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 3242 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 3243 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 3244 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 3245 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 3246 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 3247 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 3248 /* Only read FCOE on 82599 */ 3249 if (hw->mac.type != ixgbe_mac_82598EB) { 3250 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 3251 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 3252 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 3253 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 3254 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 3255 } 3256 3257 /* Flow Director Stats registers */ 3258 if (hw->mac.type != ixgbe_mac_82598EB) { 3259 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 3260 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 3261 hw_stats->fdirustat_add += IXGBE_READ_REG(hw, 3262 IXGBE_FDIRUSTAT) & 0xFFFF; 3263 hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, 3264 IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; 3265 hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, 3266 IXGBE_FDIRFSTAT) & 0xFFFF; 3267 hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, 3268 IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; 3269 } 3270 /* MACsec Stats registers */ 3271 macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); 3272 macsec_stats->out_pkts_encrypted += 3273 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); 3274 macsec_stats->out_pkts_protected += 3275 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); 3276 macsec_stats->out_octets_encrypted += 3277 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); 3278 macsec_stats->out_octets_protected += 3279 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); 3280 macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); 3281 macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); 3282 macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); 3283 macsec_stats->in_pkts_unknownsci += 3284 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); 3285 macsec_stats->in_octets_decrypted += 3286 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); 3287 macsec_stats->in_octets_validated += 3288 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); 3289 macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); 3290 macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); 3291 macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); 3292 for (i = 0; i < 2; i++) { 3293 macsec_stats->in_pkts_ok += 3294 IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); 3295 macsec_stats->in_pkts_invalid += 3296 IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); 3297 macsec_stats->in_pkts_notvalid += 3298 IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); 3299 } 3300 macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); 3301 macsec_stats->in_pkts_notusingsa += 3302 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); 3303 } 3304 3305 /* 3306 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c 3307 */ 3308 static int 3309 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3310 { 3311 struct ixgbe_hw *hw = 3312 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3313 struct ixgbe_hw_stats *hw_stats = 3314 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3315 struct ixgbe_macsec_stats *macsec_stats = 3316 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3317 dev->data->dev_private); 3318 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3319 unsigned i; 3320 3321 total_missed_rx = 0; 3322 total_qbrc = 0; 3323 total_qprc = 0; 3324 total_qprdc = 0; 3325 3326 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3327 &total_qbrc, &total_qprc, &total_qprdc); 3328 3329 if (stats == NULL) 3330 return -EINVAL; 3331 3332 /* Fill out the rte_eth_stats statistics structure */ 3333 stats->ipackets = total_qprc; 3334 stats->ibytes = total_qbrc; 3335 stats->opackets = hw_stats->gptc; 3336 stats->obytes = hw_stats->gotc; 3337 3338 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3339 stats->q_ipackets[i] = hw_stats->qprc[i]; 3340 stats->q_opackets[i] = hw_stats->qptc[i]; 3341 stats->q_ibytes[i] = hw_stats->qbrc[i]; 3342 stats->q_obytes[i] = hw_stats->qbtc[i]; 3343 stats->q_errors[i] = hw_stats->qprdc[i]; 3344 } 3345 3346 /* Rx Errors */ 3347 stats->imissed = total_missed_rx; 3348 stats->ierrors = hw_stats->crcerrs + 3349 hw_stats->mspdc + 3350 hw_stats->rlec + 3351 hw_stats->ruc + 3352 hw_stats->roc + 3353 hw_stats->illerrc + 3354 hw_stats->errbc + 3355 hw_stats->rfc + 3356 hw_stats->fccrc + 3357 hw_stats->fclast; 3358 3359 /* 3360 * 82599 errata, UDP frames with a 0 checksum can be marked as checksum 3361 * errors. 3362 */ 3363 if (hw->mac.type != ixgbe_mac_82599EB) 3364 stats->ierrors += hw_stats->xec; 3365 3366 /* Tx Errors */ 3367 stats->oerrors = 0; 3368 return 0; 3369 } 3370 3371 static int 3372 ixgbe_dev_stats_reset(struct rte_eth_dev *dev) 3373 { 3374 struct ixgbe_hw_stats *stats = 3375 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3376 3377 /* HW registers are cleared on read */ 3378 ixgbe_dev_stats_get(dev, NULL); 3379 3380 /* Reset software totals */ 3381 memset(stats, 0, sizeof(*stats)); 3382 3383 return 0; 3384 } 3385 3386 /* This function calculates the number of xstats based on the current config */ 3387 static unsigned 3388 ixgbe_xstats_calc_num(void) { 3389 return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + 3390 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + 3391 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); 3392 } 3393 3394 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3395 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) 3396 { 3397 const unsigned cnt_stats = ixgbe_xstats_calc_num(); 3398 unsigned stat, i, count; 3399 3400 if (xstats_names != NULL) { 3401 count = 0; 3402 3403 /* Note: limit >= cnt_stats checked upstream 3404 * in rte_eth_xstats_names() 3405 */ 3406 3407 /* Extended stats from ixgbe_hw_stats */ 3408 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3409 strlcpy(xstats_names[count].name, 3410 rte_ixgbe_stats_strings[i].name, 3411 sizeof(xstats_names[count].name)); 3412 count++; 3413 } 3414 3415 /* MACsec Stats */ 3416 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3417 strlcpy(xstats_names[count].name, 3418 rte_ixgbe_macsec_strings[i].name, 3419 sizeof(xstats_names[count].name)); 3420 count++; 3421 } 3422 3423 /* RX Priority Stats */ 3424 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3425 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3426 snprintf(xstats_names[count].name, 3427 sizeof(xstats_names[count].name), 3428 "rx_priority%u_%s", i, 3429 rte_ixgbe_rxq_strings[stat].name); 3430 count++; 3431 } 3432 } 3433 3434 /* TX Priority Stats */ 3435 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3436 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3437 snprintf(xstats_names[count].name, 3438 sizeof(xstats_names[count].name), 3439 "tx_priority%u_%s", i, 3440 rte_ixgbe_txq_strings[stat].name); 3441 count++; 3442 } 3443 } 3444 } 3445 return cnt_stats; 3446 } 3447 3448 static int ixgbe_dev_xstats_get_names_by_id( 3449 struct rte_eth_dev *dev, 3450 const uint64_t *ids, 3451 struct rte_eth_xstat_name *xstats_names, 3452 unsigned int limit) 3453 { 3454 if (!ids) { 3455 const unsigned int cnt_stats = ixgbe_xstats_calc_num(); 3456 unsigned int stat, i, count; 3457 3458 if (xstats_names != NULL) { 3459 count = 0; 3460 3461 /* Note: limit >= cnt_stats checked upstream 3462 * in rte_eth_xstats_names() 3463 */ 3464 3465 /* Extended stats from ixgbe_hw_stats */ 3466 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3467 strlcpy(xstats_names[count].name, 3468 rte_ixgbe_stats_strings[i].name, 3469 sizeof(xstats_names[count].name)); 3470 count++; 3471 } 3472 3473 /* MACsec Stats */ 3474 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3475 strlcpy(xstats_names[count].name, 3476 rte_ixgbe_macsec_strings[i].name, 3477 sizeof(xstats_names[count].name)); 3478 count++; 3479 } 3480 3481 /* RX Priority Stats */ 3482 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3483 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3484 snprintf(xstats_names[count].name, 3485 sizeof(xstats_names[count].name), 3486 "rx_priority%u_%s", i, 3487 rte_ixgbe_rxq_strings[stat].name); 3488 count++; 3489 } 3490 } 3491 3492 /* TX Priority Stats */ 3493 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3494 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3495 snprintf(xstats_names[count].name, 3496 sizeof(xstats_names[count].name), 3497 "tx_priority%u_%s", i, 3498 rte_ixgbe_txq_strings[stat].name); 3499 count++; 3500 } 3501 } 3502 } 3503 return cnt_stats; 3504 } 3505 3506 uint16_t i; 3507 uint16_t size = ixgbe_xstats_calc_num(); 3508 struct rte_eth_xstat_name xstats_names_copy[size]; 3509 3510 ixgbe_dev_xstats_get_names_by_id(dev, NULL, xstats_names_copy, 3511 size); 3512 3513 for (i = 0; i < limit; i++) { 3514 if (ids[i] >= size) { 3515 PMD_INIT_LOG(ERR, "id value isn't valid"); 3516 return -1; 3517 } 3518 strcpy(xstats_names[i].name, 3519 xstats_names_copy[ids[i]].name); 3520 } 3521 return limit; 3522 } 3523 3524 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3525 struct rte_eth_xstat_name *xstats_names, unsigned limit) 3526 { 3527 unsigned i; 3528 3529 if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) 3530 return -ENOMEM; 3531 3532 if (xstats_names != NULL) 3533 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) 3534 strlcpy(xstats_names[i].name, 3535 rte_ixgbevf_stats_strings[i].name, 3536 sizeof(xstats_names[i].name)); 3537 return IXGBEVF_NB_XSTATS; 3538 } 3539 3540 static int 3541 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3542 unsigned n) 3543 { 3544 struct ixgbe_hw *hw = 3545 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3546 struct ixgbe_hw_stats *hw_stats = 3547 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3548 struct ixgbe_macsec_stats *macsec_stats = 3549 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3550 dev->data->dev_private); 3551 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3552 unsigned i, stat, count = 0; 3553 3554 count = ixgbe_xstats_calc_num(); 3555 3556 if (n < count) 3557 return count; 3558 3559 total_missed_rx = 0; 3560 total_qbrc = 0; 3561 total_qprc = 0; 3562 total_qprdc = 0; 3563 3564 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3565 &total_qbrc, &total_qprc, &total_qprdc); 3566 3567 /* If this is a reset xstats is NULL, and we have cleared the 3568 * registers by reading them. 3569 */ 3570 if (!xstats) 3571 return 0; 3572 3573 /* Extended stats from ixgbe_hw_stats */ 3574 count = 0; 3575 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3576 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3577 rte_ixgbe_stats_strings[i].offset); 3578 xstats[count].id = count; 3579 count++; 3580 } 3581 3582 /* MACsec Stats */ 3583 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3584 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + 3585 rte_ixgbe_macsec_strings[i].offset); 3586 xstats[count].id = count; 3587 count++; 3588 } 3589 3590 /* RX Priority Stats */ 3591 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3592 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3593 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3594 rte_ixgbe_rxq_strings[stat].offset + 3595 (sizeof(uint64_t) * i)); 3596 xstats[count].id = count; 3597 count++; 3598 } 3599 } 3600 3601 /* TX Priority Stats */ 3602 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3603 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3604 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3605 rte_ixgbe_txq_strings[stat].offset + 3606 (sizeof(uint64_t) * i)); 3607 xstats[count].id = count; 3608 count++; 3609 } 3610 } 3611 return count; 3612 } 3613 3614 static int 3615 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 3616 uint64_t *values, unsigned int n) 3617 { 3618 if (!ids) { 3619 struct ixgbe_hw *hw = 3620 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3621 struct ixgbe_hw_stats *hw_stats = 3622 IXGBE_DEV_PRIVATE_TO_STATS( 3623 dev->data->dev_private); 3624 struct ixgbe_macsec_stats *macsec_stats = 3625 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3626 dev->data->dev_private); 3627 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3628 unsigned int i, stat, count = 0; 3629 3630 count = ixgbe_xstats_calc_num(); 3631 3632 if (!ids && n < count) 3633 return count; 3634 3635 total_missed_rx = 0; 3636 total_qbrc = 0; 3637 total_qprc = 0; 3638 total_qprdc = 0; 3639 3640 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, 3641 &total_missed_rx, &total_qbrc, &total_qprc, 3642 &total_qprdc); 3643 3644 /* If this is a reset xstats is NULL, and we have cleared the 3645 * registers by reading them. 3646 */ 3647 if (!ids && !values) 3648 return 0; 3649 3650 /* Extended stats from ixgbe_hw_stats */ 3651 count = 0; 3652 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3653 values[count] = *(uint64_t *)(((char *)hw_stats) + 3654 rte_ixgbe_stats_strings[i].offset); 3655 count++; 3656 } 3657 3658 /* MACsec Stats */ 3659 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3660 values[count] = *(uint64_t *)(((char *)macsec_stats) + 3661 rte_ixgbe_macsec_strings[i].offset); 3662 count++; 3663 } 3664 3665 /* RX Priority Stats */ 3666 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3667 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3668 values[count] = 3669 *(uint64_t *)(((char *)hw_stats) + 3670 rte_ixgbe_rxq_strings[stat].offset + 3671 (sizeof(uint64_t) * i)); 3672 count++; 3673 } 3674 } 3675 3676 /* TX Priority Stats */ 3677 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3678 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3679 values[count] = 3680 *(uint64_t *)(((char *)hw_stats) + 3681 rte_ixgbe_txq_strings[stat].offset + 3682 (sizeof(uint64_t) * i)); 3683 count++; 3684 } 3685 } 3686 return count; 3687 } 3688 3689 uint16_t i; 3690 uint16_t size = ixgbe_xstats_calc_num(); 3691 uint64_t values_copy[size]; 3692 3693 ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); 3694 3695 for (i = 0; i < n; i++) { 3696 if (ids[i] >= size) { 3697 PMD_INIT_LOG(ERR, "id value isn't valid"); 3698 return -1; 3699 } 3700 values[i] = values_copy[ids[i]]; 3701 } 3702 return n; 3703 } 3704 3705 static int 3706 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) 3707 { 3708 struct ixgbe_hw_stats *stats = 3709 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3710 struct ixgbe_macsec_stats *macsec_stats = 3711 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3712 dev->data->dev_private); 3713 3714 unsigned count = ixgbe_xstats_calc_num(); 3715 3716 /* HW registers are cleared on read */ 3717 ixgbe_dev_xstats_get(dev, NULL, count); 3718 3719 /* Reset software totals */ 3720 memset(stats, 0, sizeof(*stats)); 3721 memset(macsec_stats, 0, sizeof(*macsec_stats)); 3722 3723 return 0; 3724 } 3725 3726 static void 3727 ixgbevf_update_stats(struct rte_eth_dev *dev) 3728 { 3729 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3730 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3731 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3732 3733 /* Good Rx packet, include VF loopback */ 3734 UPDATE_VF_STAT(IXGBE_VFGPRC, 3735 hw_stats->last_vfgprc, hw_stats->vfgprc); 3736 3737 /* Good Rx octets, include VF loopback */ 3738 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 3739 hw_stats->last_vfgorc, hw_stats->vfgorc); 3740 3741 /* Good Tx packet, include VF loopback */ 3742 UPDATE_VF_STAT(IXGBE_VFGPTC, 3743 hw_stats->last_vfgptc, hw_stats->vfgptc); 3744 3745 /* Good Tx octets, include VF loopback */ 3746 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 3747 hw_stats->last_vfgotc, hw_stats->vfgotc); 3748 3749 /* Rx Multicst Packet */ 3750 UPDATE_VF_STAT(IXGBE_VFMPRC, 3751 hw_stats->last_vfmprc, hw_stats->vfmprc); 3752 } 3753 3754 static int 3755 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3756 unsigned n) 3757 { 3758 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3759 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3760 unsigned i; 3761 3762 if (n < IXGBEVF_NB_XSTATS) 3763 return IXGBEVF_NB_XSTATS; 3764 3765 ixgbevf_update_stats(dev); 3766 3767 if (!xstats) 3768 return 0; 3769 3770 /* Extended stats */ 3771 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { 3772 xstats[i].id = i; 3773 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 3774 rte_ixgbevf_stats_strings[i].offset); 3775 } 3776 3777 return IXGBEVF_NB_XSTATS; 3778 } 3779 3780 static int 3781 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3782 { 3783 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3784 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3785 3786 ixgbevf_update_stats(dev); 3787 3788 if (stats == NULL) 3789 return -EINVAL; 3790 3791 stats->ipackets = hw_stats->vfgprc; 3792 stats->ibytes = hw_stats->vfgorc; 3793 stats->opackets = hw_stats->vfgptc; 3794 stats->obytes = hw_stats->vfgotc; 3795 return 0; 3796 } 3797 3798 static int 3799 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) 3800 { 3801 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3802 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3803 3804 /* Sync HW register to the last stats */ 3805 ixgbevf_dev_stats_get(dev, NULL); 3806 3807 /* reset HW current stats*/ 3808 hw_stats->vfgprc = 0; 3809 hw_stats->vfgorc = 0; 3810 hw_stats->vfgptc = 0; 3811 hw_stats->vfgotc = 0; 3812 hw_stats->vfmprc = 0; 3813 3814 return 0; 3815 } 3816 3817 static int 3818 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 3819 { 3820 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3821 u16 eeprom_verh, eeprom_verl; 3822 u32 etrack_id; 3823 int ret; 3824 3825 ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); 3826 ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); 3827 3828 etrack_id = (eeprom_verh << 16) | eeprom_verl; 3829 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); 3830 if (ret < 0) 3831 return -EINVAL; 3832 3833 ret += 1; /* add the size of '\0' */ 3834 if (fw_size < (size_t)ret) 3835 return ret; 3836 else 3837 return 0; 3838 } 3839 3840 static int 3841 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3842 { 3843 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3844 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3845 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 3846 3847 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3848 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3849 if (RTE_ETH_DEV_SRIOV(dev).active == 0) { 3850 /* 3851 * When DCB/VT is off, maximum number of queues changes, 3852 * except for 82598EB, which remains constant. 3853 */ 3854 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE && 3855 hw->mac.type != ixgbe_mac_82598EB) 3856 dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; 3857 } 3858 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ 3859 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ 3860 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3861 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3862 dev_info->max_vfs = pci_dev->max_vfs; 3863 if (hw->mac.type == ixgbe_mac_82598EB) 3864 dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; 3865 else 3866 dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; 3867 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3868 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3869 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 3870 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3871 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3872 dev_info->rx_queue_offload_capa); 3873 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3874 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3875 3876 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3877 .rx_thresh = { 3878 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3879 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3880 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3881 }, 3882 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3883 .rx_drop_en = 0, 3884 .offloads = 0, 3885 }; 3886 3887 dev_info->default_txconf = (struct rte_eth_txconf) { 3888 .tx_thresh = { 3889 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3890 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3891 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3892 }, 3893 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3894 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3895 .offloads = 0, 3896 }; 3897 3898 dev_info->rx_desc_lim = rx_desc_lim; 3899 dev_info->tx_desc_lim = tx_desc_lim; 3900 3901 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3902 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3903 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3904 3905 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G; 3906 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3907 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 3908 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M | 3909 RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G; 3910 3911 if (hw->mac.type == ixgbe_mac_X540 || 3912 hw->mac.type == ixgbe_mac_X540_vf || 3913 hw->mac.type == ixgbe_mac_X550 || 3914 hw->mac.type == ixgbe_mac_X550_vf) { 3915 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M; 3916 } 3917 if (hw->mac.type == ixgbe_mac_X550) { 3918 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G; 3919 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G; 3920 } 3921 3922 /* Driver-preferred Rx/Tx parameters */ 3923 dev_info->default_rxportconf.burst_size = 32; 3924 dev_info->default_txportconf.burst_size = 32; 3925 dev_info->default_rxportconf.nb_queues = 1; 3926 dev_info->default_txportconf.nb_queues = 1; 3927 dev_info->default_rxportconf.ring_size = 256; 3928 dev_info->default_txportconf.ring_size = 256; 3929 3930 return 0; 3931 } 3932 3933 static const uint32_t * 3934 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 3935 { 3936 static const uint32_t ptypes[] = { 3937 /* For non-vec functions, 3938 * refers to ixgbe_rxd_pkt_info_to_pkt_type(); 3939 * for vec functions, 3940 * refers to _recv_raw_pkts_vec(). 3941 */ 3942 RTE_PTYPE_L2_ETHER, 3943 RTE_PTYPE_L3_IPV4, 3944 RTE_PTYPE_L3_IPV4_EXT, 3945 RTE_PTYPE_L3_IPV6, 3946 RTE_PTYPE_L3_IPV6_EXT, 3947 RTE_PTYPE_L4_SCTP, 3948 RTE_PTYPE_L4_TCP, 3949 RTE_PTYPE_L4_UDP, 3950 RTE_PTYPE_TUNNEL_IP, 3951 RTE_PTYPE_INNER_L3_IPV6, 3952 RTE_PTYPE_INNER_L3_IPV6_EXT, 3953 RTE_PTYPE_INNER_L4_TCP, 3954 RTE_PTYPE_INNER_L4_UDP, 3955 RTE_PTYPE_UNKNOWN 3956 }; 3957 3958 if (dev->rx_pkt_burst == ixgbe_recv_pkts || 3959 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || 3960 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || 3961 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) 3962 return ptypes; 3963 3964 #if defined(RTE_ARCH_X86) || defined(__ARM_NEON) 3965 if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || 3966 dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) 3967 return ptypes; 3968 #endif 3969 return NULL; 3970 } 3971 3972 static int 3973 ixgbevf_dev_info_get(struct rte_eth_dev *dev, 3974 struct rte_eth_dev_info *dev_info) 3975 { 3976 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3977 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3978 3979 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3980 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3981 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ 3982 dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ 3983 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3984 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3985 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3986 dev_info->max_vfs = pci_dev->max_vfs; 3987 if (hw->mac.type == ixgbe_mac_82598EB) 3988 dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; 3989 else 3990 dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; 3991 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3992 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3993 dev_info->rx_queue_offload_capa); 3994 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3995 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3996 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3997 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3998 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3999 4000 dev_info->default_rxconf = (struct rte_eth_rxconf) { 4001 .rx_thresh = { 4002 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 4003 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 4004 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 4005 }, 4006 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 4007 .rx_drop_en = 0, 4008 .offloads = 0, 4009 }; 4010 4011 dev_info->default_txconf = (struct rte_eth_txconf) { 4012 .tx_thresh = { 4013 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 4014 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 4015 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 4016 }, 4017 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 4018 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 4019 .offloads = 0, 4020 }; 4021 4022 dev_info->rx_desc_lim = rx_desc_lim; 4023 dev_info->tx_desc_lim = tx_desc_lim; 4024 4025 return 0; 4026 } 4027 4028 static int 4029 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 4030 bool *link_up, int wait_to_complete) 4031 { 4032 struct ixgbe_adapter *adapter = container_of(hw, 4033 struct ixgbe_adapter, hw); 4034 struct ixgbe_mbx_info *mbx = &hw->mbx; 4035 struct ixgbe_mac_info *mac = &hw->mac; 4036 uint32_t links_reg, in_msg; 4037 int ret_val = 0; 4038 4039 /* If we were hit with a reset drop the link */ 4040 if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) 4041 mac->get_link_status = true; 4042 4043 if (!mac->get_link_status) 4044 goto out; 4045 4046 /* if link status is down no point in checking to see if pf is up */ 4047 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4048 if (!(links_reg & IXGBE_LINKS_UP)) 4049 goto out; 4050 4051 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs 4052 * before the link status is correct 4053 */ 4054 if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { 4055 int i; 4056 4057 for (i = 0; i < 5; i++) { 4058 rte_delay_us(100); 4059 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4060 4061 if (!(links_reg & IXGBE_LINKS_UP)) 4062 goto out; 4063 } 4064 } 4065 4066 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 4067 case IXGBE_LINKS_SPEED_10G_82599: 4068 *speed = IXGBE_LINK_SPEED_10GB_FULL; 4069 if (hw->mac.type >= ixgbe_mac_X550) { 4070 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4071 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 4072 } 4073 break; 4074 case IXGBE_LINKS_SPEED_1G_82599: 4075 *speed = IXGBE_LINK_SPEED_1GB_FULL; 4076 break; 4077 case IXGBE_LINKS_SPEED_100_82599: 4078 *speed = IXGBE_LINK_SPEED_100_FULL; 4079 if (hw->mac.type == ixgbe_mac_X550) { 4080 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4081 *speed = IXGBE_LINK_SPEED_5GB_FULL; 4082 } 4083 break; 4084 case IXGBE_LINKS_SPEED_10_X550EM_A: 4085 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4086 /* Since Reserved in older MAC's */ 4087 if (hw->mac.type >= ixgbe_mac_X550) 4088 *speed = IXGBE_LINK_SPEED_10_FULL; 4089 break; 4090 default: 4091 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4092 } 4093 4094 if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { 4095 if (*speed == IXGBE_LINK_SPEED_UNKNOWN) 4096 mac->get_link_status = true; 4097 else 4098 mac->get_link_status = false; 4099 4100 goto out; 4101 } 4102 4103 /* if the read failed it could just be a mailbox collision, best wait 4104 * until we are called again and don't report an error 4105 */ 4106 if (mbx->ops.read(hw, &in_msg, 1, 0)) 4107 goto out; 4108 4109 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { 4110 /* msg is not CTS and is NACK we must have lost CTS status */ 4111 if (in_msg & IXGBE_VT_MSGTYPE_NACK) 4112 mac->get_link_status = false; 4113 goto out; 4114 } 4115 4116 /* the pf is talking, if we timed out in the past we reinit */ 4117 if (!mbx->timeout) { 4118 ret_val = -1; 4119 goto out; 4120 } 4121 4122 /* if we passed all the tests above then the link is up and we no 4123 * longer need to check for link 4124 */ 4125 mac->get_link_status = false; 4126 4127 out: 4128 *link_up = !mac->get_link_status; 4129 return ret_val; 4130 } 4131 4132 /* 4133 * If @timeout_ms was 0, it means that it will not return until link complete. 4134 * It returns 1 on complete, return 0 on timeout. 4135 */ 4136 static int 4137 ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, uint32_t timeout_ms) 4138 { 4139 #define WARNING_TIMEOUT 9000 /* 9s in total */ 4140 struct ixgbe_adapter *ad = dev->data->dev_private; 4141 uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT; 4142 4143 while (rte_atomic32_read(&ad->link_thread_running)) { 4144 msec_delay(1); 4145 timeout--; 4146 4147 if (timeout_ms) { 4148 if (!timeout) 4149 return 0; 4150 } else if (!timeout) { 4151 /* It will not return until link complete */ 4152 timeout = WARNING_TIMEOUT; 4153 PMD_DRV_LOG(ERR, "IXGBE link thread not complete too long time!"); 4154 } 4155 } 4156 4157 return 1; 4158 } 4159 4160 static void * 4161 ixgbe_dev_setup_link_thread_handler(void *param) 4162 { 4163 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4164 struct ixgbe_adapter *ad = dev->data->dev_private; 4165 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4166 struct ixgbe_interrupt *intr = 4167 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4168 u32 speed; 4169 bool autoneg = false; 4170 4171 pthread_detach(pthread_self()); 4172 speed = hw->phy.autoneg_advertised; 4173 if (!speed) 4174 ixgbe_get_link_capabilities(hw, &speed, &autoneg); 4175 4176 ixgbe_setup_link(hw, speed, true); 4177 4178 intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 4179 rte_atomic32_clear(&ad->link_thread_running); 4180 return NULL; 4181 } 4182 4183 /* 4184 * In freebsd environment, nic_uio drivers do not support interrupts, 4185 * rte_intr_callback_register() will fail to register interrupts. 4186 * We can not make link status to change from down to up by interrupt 4187 * callback. So we need to wait for the controller to acquire link 4188 * when ports start. 4189 * It returns 0 on link up. 4190 */ 4191 static int 4192 ixgbe_wait_for_link_up(struct ixgbe_hw *hw) 4193 { 4194 #ifdef RTE_EXEC_ENV_FREEBSD 4195 int err, i; 4196 bool link_up = false; 4197 uint32_t speed = 0; 4198 const int nb_iter = 25; 4199 4200 for (i = 0; i < nb_iter; i++) { 4201 err = ixgbe_check_link(hw, &speed, &link_up, 0); 4202 if (err) 4203 return err; 4204 if (link_up) 4205 return 0; 4206 msec_delay(200); 4207 } 4208 4209 return 0; 4210 #else 4211 RTE_SET_USED(hw); 4212 return 0; 4213 #endif 4214 } 4215 4216 /* return 0 means link status changed, -1 means not changed */ 4217 int 4218 ixgbe_dev_link_update_share(struct rte_eth_dev *dev, 4219 int wait_to_complete, int vf) 4220 { 4221 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4222 struct ixgbe_adapter *ad = dev->data->dev_private; 4223 struct rte_eth_link link; 4224 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4225 struct ixgbe_interrupt *intr = 4226 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4227 bool link_up; 4228 int diag; 4229 int wait = 1; 4230 u32 esdp_reg; 4231 4232 memset(&link, 0, sizeof(link)); 4233 link.link_status = RTE_ETH_LINK_DOWN; 4234 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 4235 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 4236 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 4237 RTE_ETH_LINK_SPEED_FIXED); 4238 4239 hw->mac.get_link_status = true; 4240 4241 if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) 4242 return rte_eth_linkstatus_set(dev, &link); 4243 4244 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 4245 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 4246 wait = 0; 4247 4248 /* BSD has no interrupt mechanism, so force NIC status synchronization. */ 4249 #ifdef RTE_EXEC_ENV_FREEBSD 4250 wait = 1; 4251 #endif 4252 4253 if (vf) 4254 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); 4255 else 4256 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); 4257 4258 if (diag != 0) { 4259 link.link_speed = RTE_ETH_SPEED_NUM_100M; 4260 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 4261 return rte_eth_linkstatus_set(dev, &link); 4262 } 4263 4264 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4265 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 4266 if ((esdp_reg & IXGBE_ESDP_SDP3)) 4267 link_up = 0; 4268 } 4269 4270 if (link_up == 0) { 4271 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4272 ixgbe_dev_wait_setup_link_complete(dev, 0); 4273 if (rte_atomic32_test_and_set(&ad->link_thread_running)) { 4274 /* To avoid race condition between threads, set 4275 * the IXGBE_FLAG_NEED_LINK_CONFIG flag only 4276 * when there is no link thread running. 4277 */ 4278 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 4279 if (rte_ctrl_thread_create(&ad->link_thread_tid, 4280 "ixgbe-link-handler", 4281 NULL, 4282 ixgbe_dev_setup_link_thread_handler, 4283 dev) < 0) { 4284 PMD_DRV_LOG(ERR, 4285 "Create link thread failed!"); 4286 rte_atomic32_clear(&ad->link_thread_running); 4287 } 4288 } else { 4289 PMD_DRV_LOG(ERR, 4290 "Other link thread is running now!"); 4291 } 4292 } 4293 return rte_eth_linkstatus_set(dev, &link); 4294 } 4295 4296 link.link_status = RTE_ETH_LINK_UP; 4297 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 4298 4299 switch (link_speed) { 4300 default: 4301 case IXGBE_LINK_SPEED_UNKNOWN: 4302 link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 4303 break; 4304 4305 case IXGBE_LINK_SPEED_10_FULL: 4306 link.link_speed = RTE_ETH_SPEED_NUM_10M; 4307 break; 4308 4309 case IXGBE_LINK_SPEED_100_FULL: 4310 link.link_speed = RTE_ETH_SPEED_NUM_100M; 4311 break; 4312 4313 case IXGBE_LINK_SPEED_1GB_FULL: 4314 link.link_speed = RTE_ETH_SPEED_NUM_1G; 4315 break; 4316 4317 case IXGBE_LINK_SPEED_2_5GB_FULL: 4318 link.link_speed = RTE_ETH_SPEED_NUM_2_5G; 4319 break; 4320 4321 case IXGBE_LINK_SPEED_5GB_FULL: 4322 link.link_speed = RTE_ETH_SPEED_NUM_5G; 4323 break; 4324 4325 case IXGBE_LINK_SPEED_10GB_FULL: 4326 link.link_speed = RTE_ETH_SPEED_NUM_10G; 4327 break; 4328 } 4329 4330 return rte_eth_linkstatus_set(dev, &link); 4331 } 4332 4333 static int 4334 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4335 { 4336 return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); 4337 } 4338 4339 static int 4340 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4341 { 4342 return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); 4343 } 4344 4345 static int 4346 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 4347 { 4348 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4349 uint32_t fctrl; 4350 4351 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4352 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4353 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4354 4355 return 0; 4356 } 4357 4358 static int 4359 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 4360 { 4361 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4362 uint32_t fctrl; 4363 4364 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4365 fctrl &= (~IXGBE_FCTRL_UPE); 4366 if (dev->data->all_multicast == 1) 4367 fctrl |= IXGBE_FCTRL_MPE; 4368 else 4369 fctrl &= (~IXGBE_FCTRL_MPE); 4370 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4371 4372 return 0; 4373 } 4374 4375 static int 4376 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 4377 { 4378 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4379 uint32_t fctrl; 4380 4381 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4382 fctrl |= IXGBE_FCTRL_MPE; 4383 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4384 4385 return 0; 4386 } 4387 4388 static int 4389 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 4390 { 4391 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4392 uint32_t fctrl; 4393 4394 if (dev->data->promiscuous == 1) 4395 return 0; /* must remain in all_multicast mode */ 4396 4397 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4398 fctrl &= (~IXGBE_FCTRL_MPE); 4399 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4400 4401 return 0; 4402 } 4403 4404 /** 4405 * It clears the interrupt causes and enables the interrupt. 4406 * It will be called once only during nic initialized. 4407 * 4408 * @param dev 4409 * Pointer to struct rte_eth_dev. 4410 * @param on 4411 * Enable or Disable. 4412 * 4413 * @return 4414 * - On success, zero. 4415 * - On failure, a negative value. 4416 */ 4417 static int 4418 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 4419 { 4420 struct ixgbe_interrupt *intr = 4421 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4422 4423 ixgbe_dev_link_status_print(dev); 4424 if (on) 4425 intr->mask |= IXGBE_EICR_LSC; 4426 else 4427 intr->mask &= ~IXGBE_EICR_LSC; 4428 4429 return 0; 4430 } 4431 4432 /** 4433 * It clears the interrupt causes and enables the interrupt. 4434 * It will be called once only during nic initialized. 4435 * 4436 * @param dev 4437 * Pointer to struct rte_eth_dev. 4438 * 4439 * @return 4440 * - On success, zero. 4441 * - On failure, a negative value. 4442 */ 4443 static int 4444 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 4445 { 4446 struct ixgbe_interrupt *intr = 4447 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4448 4449 intr->mask |= IXGBE_EICR_RTX_QUEUE; 4450 4451 return 0; 4452 } 4453 4454 /** 4455 * It clears the interrupt causes and enables the interrupt. 4456 * It will be called once only during nic initialized. 4457 * 4458 * @param dev 4459 * Pointer to struct rte_eth_dev. 4460 * 4461 * @return 4462 * - On success, zero. 4463 * - On failure, a negative value. 4464 */ 4465 static int 4466 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 4467 { 4468 struct ixgbe_interrupt *intr = 4469 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4470 4471 intr->mask |= IXGBE_EICR_LINKSEC; 4472 4473 return 0; 4474 } 4475 4476 /* 4477 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. 4478 * 4479 * @param dev 4480 * Pointer to struct rte_eth_dev. 4481 * 4482 * @return 4483 * - On success, zero. 4484 * - On failure, a negative value. 4485 */ 4486 static int 4487 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 4488 { 4489 uint32_t eicr; 4490 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4491 struct ixgbe_interrupt *intr = 4492 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4493 4494 /* clear all cause mask */ 4495 ixgbe_disable_intr(hw); 4496 4497 /* read-on-clear nic registers here */ 4498 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4499 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 4500 4501 intr->flags = 0; 4502 4503 /* set flag for async link update */ 4504 if (eicr & IXGBE_EICR_LSC) 4505 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4506 4507 if (eicr & IXGBE_EICR_MAILBOX) 4508 intr->flags |= IXGBE_FLAG_MAILBOX; 4509 4510 if (eicr & IXGBE_EICR_LINKSEC) 4511 intr->flags |= IXGBE_FLAG_MACSEC; 4512 4513 if (hw->mac.type == ixgbe_mac_X550EM_x && 4514 hw->phy.type == ixgbe_phy_x550em_ext_t && 4515 (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) 4516 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; 4517 4518 return 0; 4519 } 4520 4521 /** 4522 * It gets and then prints the link status. 4523 * 4524 * @param dev 4525 * Pointer to struct rte_eth_dev. 4526 * 4527 * @return 4528 * - On success, zero. 4529 * - On failure, a negative value. 4530 */ 4531 static void 4532 ixgbe_dev_link_status_print(struct rte_eth_dev *dev) 4533 { 4534 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4535 struct rte_eth_link link; 4536 4537 rte_eth_linkstatus_get(dev, &link); 4538 4539 if (link.link_status) { 4540 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 4541 (int)(dev->data->port_id), 4542 (unsigned)link.link_speed, 4543 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? 4544 "full-duplex" : "half-duplex"); 4545 } else { 4546 PMD_INIT_LOG(INFO, " Port %d: Link Down", 4547 (int)(dev->data->port_id)); 4548 } 4549 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 4550 pci_dev->addr.domain, 4551 pci_dev->addr.bus, 4552 pci_dev->addr.devid, 4553 pci_dev->addr.function); 4554 } 4555 4556 /* 4557 * It executes link_update after knowing an interrupt occurred. 4558 * 4559 * @param dev 4560 * Pointer to struct rte_eth_dev. 4561 * 4562 * @return 4563 * - On success, zero. 4564 * - On failure, a negative value. 4565 */ 4566 static int 4567 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) 4568 { 4569 struct ixgbe_interrupt *intr = 4570 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4571 int64_t timeout; 4572 struct ixgbe_hw *hw = 4573 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4574 4575 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 4576 4577 if (intr->flags & IXGBE_FLAG_MAILBOX) { 4578 ixgbe_pf_mbx_process(dev); 4579 intr->flags &= ~IXGBE_FLAG_MAILBOX; 4580 } 4581 4582 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4583 ixgbe_handle_lasi(hw); 4584 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4585 } 4586 4587 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4588 struct rte_eth_link link; 4589 4590 /* get the link status before link update, for predicting later */ 4591 rte_eth_linkstatus_get(dev, &link); 4592 4593 ixgbe_dev_link_update(dev, 0); 4594 4595 /* likely to up */ 4596 if (!link.link_status) 4597 /* handle it 1 sec later, wait it being stable */ 4598 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; 4599 /* likely to down */ 4600 else 4601 /* handle it 4 sec later, wait it being stable */ 4602 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; 4603 4604 ixgbe_dev_link_status_print(dev); 4605 if (rte_eal_alarm_set(timeout * 1000, 4606 ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) 4607 PMD_DRV_LOG(ERR, "Error setting alarm"); 4608 else { 4609 /* remember original mask */ 4610 intr->mask_original = intr->mask; 4611 /* only disable lsc interrupt */ 4612 intr->mask &= ~IXGBE_EIMS_LSC; 4613 } 4614 } 4615 4616 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 4617 ixgbe_enable_intr(dev); 4618 4619 return 0; 4620 } 4621 4622 /** 4623 * Interrupt handler which shall be registered for alarm callback for delayed 4624 * handling specific interrupt to wait for the stable nic state. As the 4625 * NIC interrupt state is not stable for ixgbe after link is just down, 4626 * it needs to wait 4 seconds to get the stable status. 4627 * 4628 * @param handle 4629 * Pointer to interrupt handle. 4630 * @param param 4631 * The address of parameter (struct rte_eth_dev *) registered before. 4632 * 4633 * @return 4634 * void 4635 */ 4636 static void 4637 ixgbe_dev_interrupt_delayed_handler(void *param) 4638 { 4639 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4640 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4641 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 4642 struct ixgbe_interrupt *intr = 4643 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4644 struct ixgbe_hw *hw = 4645 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4646 uint32_t eicr; 4647 4648 ixgbe_disable_intr(hw); 4649 4650 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4651 if (eicr & IXGBE_EICR_MAILBOX) 4652 ixgbe_pf_mbx_process(dev); 4653 4654 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4655 ixgbe_handle_lasi(hw); 4656 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4657 } 4658 4659 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4660 ixgbe_dev_link_update(dev, 0); 4661 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 4662 ixgbe_dev_link_status_print(dev); 4663 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 4664 } 4665 4666 if (intr->flags & IXGBE_FLAG_MACSEC) { 4667 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, NULL); 4668 intr->flags &= ~IXGBE_FLAG_MACSEC; 4669 } 4670 4671 /* restore original mask */ 4672 intr->mask = intr->mask_original; 4673 intr->mask_original = 0; 4674 4675 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 4676 ixgbe_enable_intr(dev); 4677 rte_intr_ack(intr_handle); 4678 } 4679 4680 /** 4681 * Interrupt handler triggered by NIC for handling 4682 * specific interrupt. 4683 * 4684 * @param handle 4685 * Pointer to interrupt handle. 4686 * @param param 4687 * The address of parameter (struct rte_eth_dev *) registered before. 4688 * 4689 * @return 4690 * void 4691 */ 4692 static void 4693 ixgbe_dev_interrupt_handler(void *param) 4694 { 4695 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4696 4697 ixgbe_dev_interrupt_get_status(dev); 4698 ixgbe_dev_interrupt_action(dev); 4699 } 4700 4701 static int 4702 ixgbe_dev_led_on(struct rte_eth_dev *dev) 4703 { 4704 struct ixgbe_hw *hw; 4705 4706 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4707 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4708 } 4709 4710 static int 4711 ixgbe_dev_led_off(struct rte_eth_dev *dev) 4712 { 4713 struct ixgbe_hw *hw; 4714 4715 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4716 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4717 } 4718 4719 static int 4720 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4721 { 4722 struct ixgbe_hw *hw; 4723 uint32_t mflcn_reg; 4724 uint32_t fccfg_reg; 4725 int rx_pause; 4726 int tx_pause; 4727 4728 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4729 4730 fc_conf->pause_time = hw->fc.pause_time; 4731 fc_conf->high_water = hw->fc.high_water[0]; 4732 fc_conf->low_water = hw->fc.low_water[0]; 4733 fc_conf->send_xon = hw->fc.send_xon; 4734 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 4735 4736 /* 4737 * Return rx_pause status according to actual setting of 4738 * MFLCN register. 4739 */ 4740 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4741 if (mflcn_reg & IXGBE_MFLCN_PMCF) 4742 fc_conf->mac_ctrl_frame_fwd = 1; 4743 else 4744 fc_conf->mac_ctrl_frame_fwd = 0; 4745 4746 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) 4747 rx_pause = 1; 4748 else 4749 rx_pause = 0; 4750 4751 /* 4752 * Return tx_pause status according to actual setting of 4753 * FCCFG register. 4754 */ 4755 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4756 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) 4757 tx_pause = 1; 4758 else 4759 tx_pause = 0; 4760 4761 if (rx_pause && tx_pause) 4762 fc_conf->mode = RTE_ETH_FC_FULL; 4763 else if (rx_pause) 4764 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 4765 else if (tx_pause) 4766 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 4767 else 4768 fc_conf->mode = RTE_ETH_FC_NONE; 4769 4770 return 0; 4771 } 4772 4773 static int 4774 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4775 { 4776 struct ixgbe_hw *hw; 4777 struct ixgbe_adapter *adapter = dev->data->dev_private; 4778 int err; 4779 uint32_t rx_buf_size; 4780 uint32_t max_high_water; 4781 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4782 ixgbe_fc_none, 4783 ixgbe_fc_rx_pause, 4784 ixgbe_fc_tx_pause, 4785 ixgbe_fc_full 4786 }; 4787 4788 PMD_INIT_FUNC_TRACE(); 4789 4790 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4791 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); 4792 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4793 4794 /* 4795 * At least reserve one Ethernet frame for watermark 4796 * high_water/low_water in kilo bytes for ixgbe 4797 */ 4798 max_high_water = (rx_buf_size - 4799 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4800 if ((fc_conf->high_water > max_high_water) || 4801 (fc_conf->high_water < fc_conf->low_water)) { 4802 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4803 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4804 return -EINVAL; 4805 } 4806 4807 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; 4808 hw->fc.pause_time = fc_conf->pause_time; 4809 hw->fc.high_water[0] = fc_conf->high_water; 4810 hw->fc.low_water[0] = fc_conf->low_water; 4811 hw->fc.send_xon = fc_conf->send_xon; 4812 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 4813 adapter->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 4814 4815 err = ixgbe_flow_ctrl_enable(dev, hw); 4816 if (err < 0) { 4817 PMD_INIT_LOG(ERR, "ixgbe_flow_ctrl_enable = 0x%x", err); 4818 return -EIO; 4819 } 4820 return err; 4821 } 4822 4823 /** 4824 * ixgbe_pfc_enable_generic - Enable flow control 4825 * @hw: pointer to hardware structure 4826 * @tc_num: traffic class number 4827 * Enable flow control according to the current settings. 4828 */ 4829 static int 4830 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) 4831 { 4832 int ret_val = 0; 4833 uint32_t mflcn_reg, fccfg_reg; 4834 uint32_t reg; 4835 uint32_t fcrtl, fcrth; 4836 uint8_t i; 4837 uint8_t nb_rx_en; 4838 4839 /* Validate the water mark configuration */ 4840 if (!hw->fc.pause_time) { 4841 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4842 goto out; 4843 } 4844 4845 /* Low water mark of zero causes XOFF floods */ 4846 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 4847 /* High/Low water can not be 0 */ 4848 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { 4849 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4850 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4851 goto out; 4852 } 4853 4854 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { 4855 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4856 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4857 goto out; 4858 } 4859 } 4860 /* Negotiate the fc mode to use */ 4861 ixgbe_fc_autoneg(hw); 4862 4863 /* Disable any previous flow control settings */ 4864 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4865 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); 4866 4867 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4868 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 4869 4870 switch (hw->fc.current_mode) { 4871 case ixgbe_fc_none: 4872 /* 4873 * If the count of enabled RX Priority Flow control >1, 4874 * and the TX pause can not be disabled 4875 */ 4876 nb_rx_en = 0; 4877 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4878 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4879 if (reg & IXGBE_FCRTH_FCEN) 4880 nb_rx_en++; 4881 } 4882 if (nb_rx_en > 1) 4883 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4884 break; 4885 case ixgbe_fc_rx_pause: 4886 /* 4887 * Rx Flow control is enabled and Tx Flow control is 4888 * disabled by software override. Since there really 4889 * isn't a way to advertise that we are capable of RX 4890 * Pause ONLY, we will advertise that we support both 4891 * symmetric and asymmetric Rx PAUSE. Later, we will 4892 * disable the adapter's ability to send PAUSE frames. 4893 */ 4894 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4895 /* 4896 * If the count of enabled RX Priority Flow control >1, 4897 * and the TX pause can not be disabled 4898 */ 4899 nb_rx_en = 0; 4900 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4901 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4902 if (reg & IXGBE_FCRTH_FCEN) 4903 nb_rx_en++; 4904 } 4905 if (nb_rx_en > 1) 4906 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4907 break; 4908 case ixgbe_fc_tx_pause: 4909 /* 4910 * Tx Flow control is enabled, and Rx Flow control is 4911 * disabled by software override. 4912 */ 4913 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4914 break; 4915 case ixgbe_fc_full: 4916 /* Flow control (both Rx and Tx) is enabled by SW override. */ 4917 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4918 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4919 break; 4920 default: 4921 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); 4922 ret_val = IXGBE_ERR_CONFIG; 4923 goto out; 4924 } 4925 4926 /* Set 802.3x based flow control settings. */ 4927 mflcn_reg |= IXGBE_MFLCN_DPF; 4928 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 4929 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 4930 4931 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 4932 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 4933 hw->fc.high_water[tc_num]) { 4934 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; 4935 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); 4936 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; 4937 } else { 4938 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); 4939 /* 4940 * In order to prevent Tx hangs when the internal Tx 4941 * switch is enabled we must set the high water mark 4942 * to the maximum FCRTH value. This allows the Tx 4943 * switch to function even under heavy Rx workloads. 4944 */ 4945 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; 4946 } 4947 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); 4948 4949 /* Configure pause time (2 TCs per register) */ 4950 reg = hw->fc.pause_time * 0x00010001; 4951 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 4952 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 4953 4954 /* Configure flow control refresh threshold value */ 4955 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 4956 4957 out: 4958 return ret_val; 4959 } 4960 4961 static int 4962 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) 4963 { 4964 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4965 int32_t ret_val = IXGBE_NOT_IMPLEMENTED; 4966 4967 if (hw->mac.type != ixgbe_mac_82598EB) { 4968 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); 4969 } 4970 return ret_val; 4971 } 4972 4973 static int 4974 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) 4975 { 4976 int err; 4977 uint32_t rx_buf_size; 4978 uint32_t max_high_water; 4979 uint8_t tc_num; 4980 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; 4981 struct ixgbe_hw *hw = 4982 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4983 struct ixgbe_dcb_config *dcb_config = 4984 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 4985 4986 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4987 ixgbe_fc_none, 4988 ixgbe_fc_rx_pause, 4989 ixgbe_fc_tx_pause, 4990 ixgbe_fc_full 4991 }; 4992 4993 PMD_INIT_FUNC_TRACE(); 4994 4995 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); 4996 tc_num = map[pfc_conf->priority]; 4997 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); 4998 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4999 /* 5000 * At least reserve one Ethernet frame for watermark 5001 * high_water/low_water in kilo bytes for ixgbe 5002 */ 5003 max_high_water = (rx_buf_size - 5004 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 5005 if ((pfc_conf->fc.high_water > max_high_water) || 5006 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { 5007 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 5008 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 5009 return -EINVAL; 5010 } 5011 5012 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; 5013 hw->fc.pause_time = pfc_conf->fc.pause_time; 5014 hw->fc.send_xon = pfc_conf->fc.send_xon; 5015 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 5016 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 5017 5018 err = ixgbe_dcb_pfc_enable(dev, tc_num); 5019 5020 /* Not negotiated is not an error case */ 5021 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 5022 return 0; 5023 5024 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); 5025 return -EIO; 5026 } 5027 5028 static int 5029 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 5030 struct rte_eth_rss_reta_entry64 *reta_conf, 5031 uint16_t reta_size) 5032 { 5033 uint16_t i, sp_reta_size; 5034 uint8_t j, mask; 5035 uint32_t reta, r; 5036 uint16_t idx, shift; 5037 struct ixgbe_adapter *adapter = dev->data->dev_private; 5038 struct rte_eth_dev_data *dev_data = dev->data; 5039 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5040 uint32_t reta_reg; 5041 5042 PMD_INIT_FUNC_TRACE(); 5043 5044 if (!dev_data->dev_started) { 5045 PMD_DRV_LOG(ERR, 5046 "port %d must be started before rss reta update", 5047 dev_data->port_id); 5048 return -EIO; 5049 } 5050 5051 if (!ixgbe_rss_update_sp(hw->mac.type)) { 5052 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 5053 "NIC."); 5054 return -ENOTSUP; 5055 } 5056 5057 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5058 if (reta_size != sp_reta_size) { 5059 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5060 "(%d) doesn't match the number hardware can supported " 5061 "(%d)", reta_size, sp_reta_size); 5062 return -EINVAL; 5063 } 5064 5065 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5066 idx = i / RTE_ETH_RETA_GROUP_SIZE; 5067 shift = i % RTE_ETH_RETA_GROUP_SIZE; 5068 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5069 IXGBE_4_BIT_MASK); 5070 if (!mask) 5071 continue; 5072 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5073 if (mask == IXGBE_4_BIT_MASK) 5074 r = 0; 5075 else 5076 r = IXGBE_READ_REG(hw, reta_reg); 5077 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5078 if (mask & (0x1 << j)) 5079 reta |= reta_conf[idx].reta[shift + j] << 5080 (CHAR_BIT * j); 5081 else 5082 reta |= r & (IXGBE_8_BIT_MASK << 5083 (CHAR_BIT * j)); 5084 } 5085 IXGBE_WRITE_REG(hw, reta_reg, reta); 5086 } 5087 adapter->rss_reta_updated = 1; 5088 5089 return 0; 5090 } 5091 5092 static int 5093 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 5094 struct rte_eth_rss_reta_entry64 *reta_conf, 5095 uint16_t reta_size) 5096 { 5097 uint16_t i, sp_reta_size; 5098 uint8_t j, mask; 5099 uint32_t reta; 5100 uint16_t idx, shift; 5101 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5102 uint32_t reta_reg; 5103 5104 PMD_INIT_FUNC_TRACE(); 5105 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5106 if (reta_size != sp_reta_size) { 5107 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5108 "(%d) doesn't match the number hardware can supported " 5109 "(%d)", reta_size, sp_reta_size); 5110 return -EINVAL; 5111 } 5112 5113 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5114 idx = i / RTE_ETH_RETA_GROUP_SIZE; 5115 shift = i % RTE_ETH_RETA_GROUP_SIZE; 5116 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5117 IXGBE_4_BIT_MASK); 5118 if (!mask) 5119 continue; 5120 5121 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5122 reta = IXGBE_READ_REG(hw, reta_reg); 5123 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5124 if (mask & (0x1 << j)) 5125 reta_conf[idx].reta[shift + j] = 5126 ((reta >> (CHAR_BIT * j)) & 5127 IXGBE_8_BIT_MASK); 5128 } 5129 } 5130 5131 return 0; 5132 } 5133 5134 static int 5135 ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 5136 uint32_t index, uint32_t pool) 5137 { 5138 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5139 uint32_t enable_addr = 1; 5140 5141 return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, 5142 pool, enable_addr); 5143 } 5144 5145 static void 5146 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 5147 { 5148 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5149 5150 ixgbe_clear_rar(hw, index); 5151 } 5152 5153 static int 5154 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 5155 { 5156 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5157 5158 ixgbe_remove_rar(dev, 0); 5159 ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 5160 5161 return 0; 5162 } 5163 5164 static bool 5165 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 5166 { 5167 if (strcmp(dev->device->driver->name, drv->driver.name)) 5168 return false; 5169 5170 return true; 5171 } 5172 5173 bool 5174 is_ixgbe_supported(struct rte_eth_dev *dev) 5175 { 5176 return is_device_supported(dev, &rte_ixgbe_pmd); 5177 } 5178 5179 static int 5180 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 5181 { 5182 uint32_t hlreg0; 5183 uint32_t maxfrs; 5184 struct ixgbe_hw *hw; 5185 struct rte_eth_dev_info dev_info; 5186 uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; 5187 int ret; 5188 5189 ret = ixgbe_dev_info_get(dev, &dev_info); 5190 if (ret != 0) 5191 return ret; 5192 5193 /* check that mtu is within the allowed range */ 5194 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 5195 return -EINVAL; 5196 5197 /* If device is started, refuse mtu that requires the support of 5198 * scattered packets when this feature has not been enabled before. 5199 */ 5200 if (dev->data->dev_started && !dev->data->scattered_rx && 5201 frame_size + 2 * RTE_VLAN_HLEN > 5202 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 5203 PMD_INIT_LOG(ERR, "Stop port first."); 5204 return -EINVAL; 5205 } 5206 5207 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5208 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 5209 5210 /* switch to jumbo mode if needed */ 5211 if (mtu > RTE_ETHER_MTU) 5212 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 5213 else 5214 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 5215 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 5216 5217 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 5218 maxfrs &= 0x0000FFFF; 5219 maxfrs |= (frame_size << 16); 5220 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); 5221 5222 return 0; 5223 } 5224 5225 /* 5226 * Virtual Function operations 5227 */ 5228 static void 5229 ixgbevf_intr_disable(struct rte_eth_dev *dev) 5230 { 5231 struct ixgbe_interrupt *intr = 5232 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5233 struct ixgbe_hw *hw = 5234 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5235 5236 PMD_INIT_FUNC_TRACE(); 5237 5238 /* Clear interrupt mask to stop from interrupts being generated */ 5239 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 5240 5241 IXGBE_WRITE_FLUSH(hw); 5242 5243 /* Clear mask value. */ 5244 intr->mask = 0; 5245 } 5246 5247 static void 5248 ixgbevf_intr_enable(struct rte_eth_dev *dev) 5249 { 5250 struct ixgbe_interrupt *intr = 5251 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5252 struct ixgbe_hw *hw = 5253 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5254 5255 PMD_INIT_FUNC_TRACE(); 5256 5257 /* VF enable interrupt autoclean */ 5258 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); 5259 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); 5260 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); 5261 5262 IXGBE_WRITE_FLUSH(hw); 5263 5264 /* Save IXGBE_VTEIMS value to mask. */ 5265 intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; 5266 } 5267 5268 static int 5269 ixgbevf_dev_configure(struct rte_eth_dev *dev) 5270 { 5271 struct rte_eth_conf *conf = &dev->data->dev_conf; 5272 struct ixgbe_adapter *adapter = dev->data->dev_private; 5273 5274 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 5275 dev->data->port_id); 5276 5277 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 5278 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 5279 5280 /* 5281 * VF has no ability to enable/disable HW CRC 5282 * Keep the persistent behavior the same as Host PF 5283 */ 5284 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC 5285 if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) { 5286 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 5287 conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC; 5288 } 5289 #else 5290 if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) { 5291 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 5292 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; 5293 } 5294 #endif 5295 5296 /* 5297 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 5298 * allocation or vector Rx preconditions we will reset it. 5299 */ 5300 adapter->rx_bulk_alloc_allowed = true; 5301 adapter->rx_vec_allowed = true; 5302 5303 return 0; 5304 } 5305 5306 static int 5307 ixgbevf_dev_start(struct rte_eth_dev *dev) 5308 { 5309 struct ixgbe_hw *hw = 5310 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5311 uint32_t intr_vector = 0; 5312 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5313 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5314 5315 int err, mask = 0; 5316 5317 PMD_INIT_FUNC_TRACE(); 5318 5319 /* Stop the link setup handler before resetting the HW. */ 5320 ixgbe_dev_wait_setup_link_complete(dev, 0); 5321 5322 err = hw->mac.ops.reset_hw(hw); 5323 5324 /** 5325 * In this case, reuses the MAC address assigned by VF 5326 * initialization. 5327 */ 5328 if (err != IXGBE_SUCCESS && err != IXGBE_ERR_INVALID_MAC_ADDR) { 5329 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); 5330 return err; 5331 } 5332 5333 hw->mac.get_link_status = true; 5334 5335 /* negotiate mailbox API version to use with the PF. */ 5336 ixgbevf_negotiate_api(hw); 5337 5338 ixgbevf_dev_tx_init(dev); 5339 5340 /* This can fail when allocating mbufs for descriptor rings */ 5341 err = ixgbevf_dev_rx_init(dev); 5342 if (err) { 5343 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 5344 ixgbe_dev_clear_queues(dev); 5345 return err; 5346 } 5347 5348 /* Set vfta */ 5349 ixgbevf_set_vfta_all(dev, 1); 5350 5351 /* Set HW strip */ 5352 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 5353 RTE_ETH_VLAN_EXTEND_MASK; 5354 err = ixgbevf_vlan_offload_config(dev, mask); 5355 if (err) { 5356 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); 5357 ixgbe_dev_clear_queues(dev); 5358 return err; 5359 } 5360 5361 ixgbevf_dev_rxtx_start(dev); 5362 5363 /* check and configure queue intr-vector mapping */ 5364 if (rte_intr_cap_multiple(intr_handle) && 5365 dev->data->dev_conf.intr_conf.rxq) { 5366 /* According to datasheet, only vector 0/1/2 can be used, 5367 * now only one vector is used for Rx queue 5368 */ 5369 intr_vector = 1; 5370 if (rte_intr_efd_enable(intr_handle, intr_vector)) { 5371 ixgbe_dev_clear_queues(dev); 5372 return -1; 5373 } 5374 } 5375 5376 if (rte_intr_dp_is_en(intr_handle)) { 5377 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 5378 dev->data->nb_rx_queues)) { 5379 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 5380 " intr_vec", dev->data->nb_rx_queues); 5381 ixgbe_dev_clear_queues(dev); 5382 return -ENOMEM; 5383 } 5384 } 5385 ixgbevf_configure_msix(dev); 5386 5387 /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt 5388 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). 5389 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) 5390 * is not cleared, it will fail when following rte_intr_enable( ) tries 5391 * to map Rx queue interrupt to other VFIO vectors. 5392 * So clear uio/vfio intr/evevnfd first to avoid failure. 5393 */ 5394 rte_intr_disable(intr_handle); 5395 5396 rte_intr_enable(intr_handle); 5397 5398 /* Re-enable interrupt for VF */ 5399 ixgbevf_intr_enable(dev); 5400 5401 /* 5402 * Update link status right before return, because it may 5403 * start link configuration process in a separate thread. 5404 */ 5405 ixgbevf_dev_link_update(dev, 0); 5406 5407 hw->adapter_stopped = false; 5408 5409 return 0; 5410 } 5411 5412 static int 5413 ixgbevf_dev_stop(struct rte_eth_dev *dev) 5414 { 5415 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5416 struct ixgbe_adapter *adapter = dev->data->dev_private; 5417 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5418 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5419 5420 if (hw->adapter_stopped) 5421 return 0; 5422 5423 PMD_INIT_FUNC_TRACE(); 5424 5425 ixgbe_dev_wait_setup_link_complete(dev, 0); 5426 5427 ixgbevf_intr_disable(dev); 5428 5429 dev->data->dev_started = 0; 5430 hw->adapter_stopped = 1; 5431 ixgbe_stop_adapter(hw); 5432 5433 /* 5434 * Clear what we set, but we still keep shadow_vfta to 5435 * restore after device starts 5436 */ 5437 ixgbevf_set_vfta_all(dev, 0); 5438 5439 /* Clear stored conf */ 5440 dev->data->scattered_rx = 0; 5441 5442 ixgbe_dev_clear_queues(dev); 5443 5444 /* Clean datapath event and queue/vec mapping */ 5445 rte_intr_efd_disable(intr_handle); 5446 rte_intr_vec_list_free(intr_handle); 5447 5448 adapter->rss_reta_updated = 0; 5449 5450 return 0; 5451 } 5452 5453 static int 5454 ixgbevf_dev_close(struct rte_eth_dev *dev) 5455 { 5456 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5457 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5458 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5459 int ret; 5460 5461 PMD_INIT_FUNC_TRACE(); 5462 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5463 return 0; 5464 5465 ixgbe_reset_hw(hw); 5466 5467 ret = ixgbevf_dev_stop(dev); 5468 5469 ixgbe_dev_free_queues(dev); 5470 5471 /** 5472 * Remove the VF MAC address ro ensure 5473 * that the VF traffic goes to the PF 5474 * after stop, close and detach of the VF 5475 **/ 5476 ixgbevf_remove_mac_addr(dev, 0); 5477 5478 rte_intr_disable(intr_handle); 5479 rte_intr_callback_unregister(intr_handle, 5480 ixgbevf_dev_interrupt_handler, dev); 5481 5482 return ret; 5483 } 5484 5485 /* 5486 * Reset VF device 5487 */ 5488 static int 5489 ixgbevf_dev_reset(struct rte_eth_dev *dev) 5490 { 5491 int ret; 5492 5493 ret = eth_ixgbevf_dev_uninit(dev); 5494 if (ret) 5495 return ret; 5496 5497 ret = eth_ixgbevf_dev_init(dev); 5498 5499 return ret; 5500 } 5501 5502 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 5503 { 5504 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5505 struct ixgbe_vfta *shadow_vfta = 5506 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5507 int i = 0, j = 0, vfta = 0, mask = 1; 5508 5509 for (i = 0; i < IXGBE_VFTA_SIZE; i++) { 5510 vfta = shadow_vfta->vfta[i]; 5511 if (vfta) { 5512 mask = 1; 5513 for (j = 0; j < 32; j++) { 5514 if (vfta & mask) 5515 ixgbe_set_vfta(hw, (i<<5)+j, 0, 5516 on, false); 5517 mask <<= 1; 5518 } 5519 } 5520 } 5521 5522 } 5523 5524 static int 5525 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 5526 { 5527 struct ixgbe_hw *hw = 5528 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5529 struct ixgbe_vfta *shadow_vfta = 5530 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5531 uint32_t vid_idx = 0; 5532 uint32_t vid_bit = 0; 5533 int ret = 0; 5534 5535 PMD_INIT_FUNC_TRACE(); 5536 5537 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ 5538 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); 5539 if (ret) { 5540 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 5541 return ret; 5542 } 5543 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 5544 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 5545 5546 /* Save what we set and retore it after device reset */ 5547 if (on) 5548 shadow_vfta->vfta[vid_idx] |= vid_bit; 5549 else 5550 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 5551 5552 return 0; 5553 } 5554 5555 static void 5556 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 5557 { 5558 struct ixgbe_hw *hw = 5559 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5560 uint32_t ctrl; 5561 5562 PMD_INIT_FUNC_TRACE(); 5563 5564 if (queue >= hw->mac.max_rx_queues) 5565 return; 5566 5567 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 5568 if (on) 5569 ctrl |= IXGBE_RXDCTL_VME; 5570 else 5571 ctrl &= ~IXGBE_RXDCTL_VME; 5572 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 5573 5574 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); 5575 } 5576 5577 static int 5578 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 5579 { 5580 struct ixgbe_rx_queue *rxq; 5581 uint16_t i; 5582 int on = 0; 5583 5584 /* VF function only support hw strip feature, others are not support */ 5585 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 5586 for (i = 0; i < dev->data->nb_rx_queues; i++) { 5587 rxq = dev->data->rx_queues[i]; 5588 on = !!(rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 5589 ixgbevf_vlan_strip_queue_set(dev, i, on); 5590 } 5591 } 5592 5593 return 0; 5594 } 5595 5596 static int 5597 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 5598 { 5599 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 5600 5601 ixgbevf_vlan_offload_config(dev, mask); 5602 5603 return 0; 5604 } 5605 5606 int 5607 ixgbe_vt_check(struct ixgbe_hw *hw) 5608 { 5609 uint32_t reg_val; 5610 5611 /* if Virtualization Technology is enabled */ 5612 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 5613 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { 5614 PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); 5615 return -1; 5616 } 5617 5618 return 0; 5619 } 5620 5621 static uint32_t 5622 ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) 5623 { 5624 uint32_t vector = 0; 5625 5626 switch (hw->mac.mc_filter_type) { 5627 case 0: /* use bits [47:36] of the address */ 5628 vector = ((uc_addr->addr_bytes[4] >> 4) | 5629 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 5630 break; 5631 case 1: /* use bits [46:35] of the address */ 5632 vector = ((uc_addr->addr_bytes[4] >> 3) | 5633 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 5634 break; 5635 case 2: /* use bits [45:34] of the address */ 5636 vector = ((uc_addr->addr_bytes[4] >> 2) | 5637 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 5638 break; 5639 case 3: /* use bits [43:32] of the address */ 5640 vector = ((uc_addr->addr_bytes[4]) | 5641 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 5642 break; 5643 default: /* Invalid mc_filter_type */ 5644 break; 5645 } 5646 5647 /* vector can only be 12-bits or boundary will be exceeded */ 5648 vector &= 0xFFF; 5649 return vector; 5650 } 5651 5652 static int 5653 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, 5654 struct rte_ether_addr *mac_addr, uint8_t on) 5655 { 5656 uint32_t vector; 5657 uint32_t uta_idx; 5658 uint32_t reg_val; 5659 uint32_t uta_shift; 5660 uint32_t rc; 5661 const uint32_t ixgbe_uta_idx_mask = 0x7F; 5662 const uint32_t ixgbe_uta_bit_shift = 5; 5663 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; 5664 const uint32_t bit1 = 0x1; 5665 5666 struct ixgbe_hw *hw = 5667 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5668 struct ixgbe_uta_info *uta_info = 5669 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5670 5671 /* The UTA table only exists on 82599 hardware and newer */ 5672 if (hw->mac.type < ixgbe_mac_82599EB) 5673 return -ENOTSUP; 5674 5675 vector = ixgbe_uta_vector(hw, mac_addr); 5676 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; 5677 uta_shift = vector & ixgbe_uta_bit_mask; 5678 5679 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); 5680 if (rc == on) 5681 return 0; 5682 5683 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); 5684 if (on) { 5685 uta_info->uta_in_use++; 5686 reg_val |= (bit1 << uta_shift); 5687 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); 5688 } else { 5689 uta_info->uta_in_use--; 5690 reg_val &= ~(bit1 << uta_shift); 5691 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); 5692 } 5693 5694 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); 5695 5696 if (uta_info->uta_in_use > 0) 5697 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 5698 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 5699 else 5700 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 5701 5702 return 0; 5703 } 5704 5705 static int 5706 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 5707 { 5708 int i; 5709 struct ixgbe_hw *hw = 5710 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5711 struct ixgbe_uta_info *uta_info = 5712 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5713 5714 /* The UTA table only exists on 82599 hardware and newer */ 5715 if (hw->mac.type < ixgbe_mac_82599EB) 5716 return -ENOTSUP; 5717 5718 if (on) { 5719 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5720 uta_info->uta_shadow[i] = ~0; 5721 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); 5722 } 5723 } else { 5724 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5725 uta_info->uta_shadow[i] = 0; 5726 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 5727 } 5728 } 5729 return 0; 5730 5731 } 5732 5733 uint32_t 5734 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 5735 { 5736 uint32_t new_val = orig_val; 5737 5738 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) 5739 new_val |= IXGBE_VMOLR_AUPE; 5740 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC) 5741 new_val |= IXGBE_VMOLR_ROMPE; 5742 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC) 5743 new_val |= IXGBE_VMOLR_ROPE; 5744 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST) 5745 new_val |= IXGBE_VMOLR_BAM; 5746 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST) 5747 new_val |= IXGBE_VMOLR_MPE; 5748 5749 return new_val; 5750 } 5751 5752 static int 5753 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5754 { 5755 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5756 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5757 struct ixgbe_interrupt *intr = 5758 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5759 struct ixgbe_hw *hw = 5760 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5761 uint32_t vec = IXGBE_MISC_VEC_ID; 5762 5763 if (rte_intr_allow_others(intr_handle)) 5764 vec = IXGBE_RX_VEC_START; 5765 intr->mask |= (1 << vec); 5766 RTE_SET_USED(queue_id); 5767 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5768 5769 rte_intr_ack(intr_handle); 5770 5771 return 0; 5772 } 5773 5774 static int 5775 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5776 { 5777 struct ixgbe_interrupt *intr = 5778 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5779 struct ixgbe_hw *hw = 5780 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5781 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5782 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5783 uint32_t vec = IXGBE_MISC_VEC_ID; 5784 5785 if (rte_intr_allow_others(intr_handle)) 5786 vec = IXGBE_RX_VEC_START; 5787 intr->mask &= ~(1 << vec); 5788 RTE_SET_USED(queue_id); 5789 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5790 5791 return 0; 5792 } 5793 5794 static int 5795 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5796 { 5797 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5798 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5799 uint32_t mask; 5800 struct ixgbe_hw *hw = 5801 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5802 struct ixgbe_interrupt *intr = 5803 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5804 5805 if (queue_id < 16) { 5806 ixgbe_disable_intr(hw); 5807 intr->mask |= (1 << queue_id); 5808 ixgbe_enable_intr(dev); 5809 } else if (queue_id < 32) { 5810 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5811 mask &= (1 << queue_id); 5812 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5813 } else if (queue_id < 64) { 5814 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5815 mask &= (1 << (queue_id - 32)); 5816 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5817 } 5818 rte_intr_ack(intr_handle); 5819 5820 return 0; 5821 } 5822 5823 static int 5824 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5825 { 5826 uint32_t mask; 5827 struct ixgbe_hw *hw = 5828 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5829 struct ixgbe_interrupt *intr = 5830 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5831 5832 if (queue_id < 16) { 5833 ixgbe_disable_intr(hw); 5834 intr->mask &= ~(1 << queue_id); 5835 ixgbe_enable_intr(dev); 5836 } else if (queue_id < 32) { 5837 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5838 mask &= ~(1 << queue_id); 5839 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5840 } else if (queue_id < 64) { 5841 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5842 mask &= ~(1 << (queue_id - 32)); 5843 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5844 } 5845 5846 return 0; 5847 } 5848 5849 static void 5850 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 5851 uint8_t queue, uint8_t msix_vector) 5852 { 5853 uint32_t tmp, idx; 5854 5855 if (direction == -1) { 5856 /* other causes */ 5857 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5858 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 5859 tmp &= ~0xFF; 5860 tmp |= msix_vector; 5861 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); 5862 } else { 5863 /* rx or tx cause */ 5864 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5865 idx = ((16 * (queue & 1)) + (8 * direction)); 5866 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 5867 tmp &= ~(0xFF << idx); 5868 tmp |= (msix_vector << idx); 5869 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); 5870 } 5871 } 5872 5873 /** 5874 * set the IVAR registers, mapping interrupt causes to vectors 5875 * @param hw 5876 * pointer to ixgbe_hw struct 5877 * @direction 5878 * 0 for Rx, 1 for Tx, -1 for other causes 5879 * @queue 5880 * queue to map the corresponding interrupt to 5881 * @msix_vector 5882 * the vector to map to the corresponding queue 5883 */ 5884 static void 5885 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 5886 uint8_t queue, uint8_t msix_vector) 5887 { 5888 uint32_t tmp, idx; 5889 5890 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5891 if (hw->mac.type == ixgbe_mac_82598EB) { 5892 if (direction == -1) 5893 direction = 0; 5894 idx = (((direction * 64) + queue) >> 2) & 0x1F; 5895 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); 5896 tmp &= ~(0xFF << (8 * (queue & 0x3))); 5897 tmp |= (msix_vector << (8 * (queue & 0x3))); 5898 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); 5899 } else if ((hw->mac.type == ixgbe_mac_82599EB) || 5900 (hw->mac.type == ixgbe_mac_X540) || 5901 (hw->mac.type == ixgbe_mac_X550) || 5902 (hw->mac.type == ixgbe_mac_X550EM_x)) { 5903 if (direction == -1) { 5904 /* other causes */ 5905 idx = ((queue & 1) * 8); 5906 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5907 tmp &= ~(0xFF << idx); 5908 tmp |= (msix_vector << idx); 5909 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); 5910 } else { 5911 /* rx or tx causes */ 5912 idx = ((16 * (queue & 1)) + (8 * direction)); 5913 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 5914 tmp &= ~(0xFF << idx); 5915 tmp |= (msix_vector << idx); 5916 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); 5917 } 5918 } 5919 } 5920 5921 static void 5922 ixgbevf_configure_msix(struct rte_eth_dev *dev) 5923 { 5924 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5925 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5926 struct ixgbe_hw *hw = 5927 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5928 uint32_t q_idx; 5929 uint32_t vector_idx = IXGBE_MISC_VEC_ID; 5930 uint32_t base = IXGBE_MISC_VEC_ID; 5931 5932 /* Configure VF other cause ivar */ 5933 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); 5934 5935 /* won't configure msix register if no mapping is done 5936 * between intr vector and event fd. 5937 */ 5938 if (!rte_intr_dp_is_en(intr_handle)) 5939 return; 5940 5941 if (rte_intr_allow_others(intr_handle)) { 5942 base = IXGBE_RX_VEC_START; 5943 vector_idx = IXGBE_RX_VEC_START; 5944 } 5945 5946 /* Configure all RX queues of VF */ 5947 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 5948 /* Force all queue use vector 0, 5949 * as IXGBE_VF_MAXMSIVECTOR = 1 5950 */ 5951 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 5952 rte_intr_vec_list_index_set(intr_handle, q_idx, 5953 vector_idx); 5954 if (vector_idx < base + rte_intr_nb_efd_get(intr_handle) 5955 - 1) 5956 vector_idx++; 5957 } 5958 5959 /* As RX queue setting above show, all queues use the vector 0. 5960 * Set only the ITR value of IXGBE_MISC_VEC_ID. 5961 */ 5962 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), 5963 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 5964 | IXGBE_EITR_CNT_WDIS); 5965 } 5966 5967 /** 5968 * Sets up the hardware to properly generate MSI-X interrupts 5969 * @hw 5970 * board private structure 5971 */ 5972 static void 5973 ixgbe_configure_msix(struct rte_eth_dev *dev) 5974 { 5975 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5976 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5977 struct ixgbe_hw *hw = 5978 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5979 uint32_t queue_id, base = IXGBE_MISC_VEC_ID; 5980 uint32_t vec = IXGBE_MISC_VEC_ID; 5981 uint32_t mask; 5982 uint32_t gpie; 5983 5984 /* won't configure msix register if no mapping is done 5985 * between intr vector and event fd 5986 * but if misx has been enabled already, need to configure 5987 * auto clean, auto mask and throttling. 5988 */ 5989 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5990 if (!rte_intr_dp_is_en(intr_handle) && 5991 !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) 5992 return; 5993 5994 if (rte_intr_allow_others(intr_handle)) 5995 vec = base = IXGBE_RX_VEC_START; 5996 5997 /* setup GPIE for MSI-x mode */ 5998 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5999 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 6000 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; 6001 /* auto clearing and auto setting corresponding bits in EIMS 6002 * when MSI-X interrupt is triggered 6003 */ 6004 if (hw->mac.type == ixgbe_mac_82598EB) { 6005 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 6006 } else { 6007 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 6008 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 6009 } 6010 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 6011 6012 /* Populate the IVAR table and set the ITR values to the 6013 * corresponding register. 6014 */ 6015 if (rte_intr_dp_is_en(intr_handle)) { 6016 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 6017 queue_id++) { 6018 /* by default, 1:1 mapping */ 6019 ixgbe_set_ivar_map(hw, 0, queue_id, vec); 6020 rte_intr_vec_list_index_set(intr_handle, 6021 queue_id, vec); 6022 if (vec < base + rte_intr_nb_efd_get(intr_handle) 6023 - 1) 6024 vec++; 6025 } 6026 6027 switch (hw->mac.type) { 6028 case ixgbe_mac_82598EB: 6029 ixgbe_set_ivar_map(hw, -1, 6030 IXGBE_IVAR_OTHER_CAUSES_INDEX, 6031 IXGBE_MISC_VEC_ID); 6032 break; 6033 case ixgbe_mac_82599EB: 6034 case ixgbe_mac_X540: 6035 case ixgbe_mac_X550: 6036 case ixgbe_mac_X550EM_x: 6037 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); 6038 break; 6039 default: 6040 break; 6041 } 6042 } 6043 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), 6044 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 6045 | IXGBE_EITR_CNT_WDIS); 6046 6047 /* set up to autoclear timer, and the vectors */ 6048 mask = IXGBE_EIMS_ENABLE_MASK; 6049 mask &= ~(IXGBE_EIMS_OTHER | 6050 IXGBE_EIMS_MAILBOX | 6051 IXGBE_EIMS_LSC); 6052 6053 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 6054 } 6055 6056 int 6057 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 6058 uint16_t queue_idx, uint16_t tx_rate) 6059 { 6060 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6061 uint32_t rf_dec, rf_int; 6062 uint32_t bcnrc_val; 6063 uint16_t link_speed = dev->data->dev_link.link_speed; 6064 6065 if (queue_idx >= hw->mac.max_tx_queues) 6066 return -EINVAL; 6067 6068 if (tx_rate != 0) { 6069 /* Calculate the rate factor values to set */ 6070 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; 6071 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; 6072 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; 6073 6074 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 6075 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & 6076 IXGBE_RTTBCNRC_RF_INT_MASK_M); 6077 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 6078 } else { 6079 bcnrc_val = 0; 6080 } 6081 6082 /* 6083 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 6084 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise 6085 * set as 0x4. 6086 */ 6087 if (dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE) 6088 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME); 6089 else 6090 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_DEFAULT); 6091 6092 /* Set RTTBCNRC of queue X */ 6093 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); 6094 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 6095 IXGBE_WRITE_FLUSH(hw); 6096 6097 return 0; 6098 } 6099 6100 static int 6101 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 6102 __rte_unused uint32_t index, 6103 __rte_unused uint32_t pool) 6104 { 6105 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6106 int diag; 6107 6108 /* 6109 * On a 82599 VF, adding again the same MAC addr is not an idempotent 6110 * operation. Trap this case to avoid exhausting the [very limited] 6111 * set of PF resources used to store VF MAC addresses. 6112 */ 6113 if (memcmp(hw->mac.perm_addr, mac_addr, 6114 sizeof(struct rte_ether_addr)) == 0) 6115 return -1; 6116 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6117 if (diag != 0) 6118 PMD_DRV_LOG(ERR, "Unable to add MAC address " 6119 RTE_ETHER_ADDR_PRT_FMT " - diag=%d", 6120 RTE_ETHER_ADDR_BYTES(mac_addr), diag); 6121 return diag; 6122 } 6123 6124 static void 6125 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 6126 { 6127 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6128 struct rte_ether_addr *perm_addr = 6129 (struct rte_ether_addr *)hw->mac.perm_addr; 6130 struct rte_ether_addr *mac_addr; 6131 uint32_t i; 6132 int diag; 6133 6134 /* 6135 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does 6136 * not support the deletion of a given MAC address. 6137 * Instead, it imposes to delete all MAC addresses, then to add again 6138 * all MAC addresses with the exception of the one to be deleted. 6139 */ 6140 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); 6141 6142 /* 6143 * Add again all MAC addresses, with the exception of the deleted one 6144 * and of the permanent MAC address. 6145 */ 6146 for (i = 0, mac_addr = dev->data->mac_addrs; 6147 i < hw->mac.num_rar_entries; i++, mac_addr++) { 6148 /* Skip the deleted MAC address */ 6149 if (i == index) 6150 continue; 6151 /* Skip NULL MAC addresses */ 6152 if (rte_is_zero_ether_addr(mac_addr)) 6153 continue; 6154 /* Skip the permanent MAC address */ 6155 if (memcmp(perm_addr, mac_addr, 6156 sizeof(struct rte_ether_addr)) == 0) 6157 continue; 6158 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6159 if (diag != 0) 6160 PMD_DRV_LOG(ERR, 6161 "Adding again MAC address " 6162 RTE_ETHER_ADDR_PRT_FMT " failed " 6163 "diag=%d", RTE_ETHER_ADDR_BYTES(mac_addr), 6164 diag); 6165 } 6166 } 6167 6168 static int 6169 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 6170 struct rte_ether_addr *addr) 6171 { 6172 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6173 6174 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); 6175 6176 return 0; 6177 } 6178 6179 int 6180 ixgbe_syn_filter_set(struct rte_eth_dev *dev, 6181 struct rte_eth_syn_filter *filter, 6182 bool add) 6183 { 6184 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6185 struct ixgbe_filter_info *filter_info = 6186 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6187 uint32_t syn_info; 6188 uint32_t synqf; 6189 6190 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6191 return -EINVAL; 6192 6193 syn_info = filter_info->syn_info; 6194 6195 if (add) { 6196 if (syn_info & IXGBE_SYN_FILTER_ENABLE) 6197 return -EINVAL; 6198 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & 6199 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); 6200 6201 if (filter->hig_pri) 6202 synqf |= IXGBE_SYN_FILTER_SYNQFP; 6203 else 6204 synqf &= ~IXGBE_SYN_FILTER_SYNQFP; 6205 } else { 6206 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 6207 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) 6208 return -ENOENT; 6209 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); 6210 } 6211 6212 filter_info->syn_info = synqf; 6213 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 6214 IXGBE_WRITE_FLUSH(hw); 6215 return 0; 6216 } 6217 6218 6219 static inline enum ixgbe_5tuple_protocol 6220 convert_protocol_type(uint8_t protocol_value) 6221 { 6222 if (protocol_value == IPPROTO_TCP) 6223 return IXGBE_FILTER_PROTOCOL_TCP; 6224 else if (protocol_value == IPPROTO_UDP) 6225 return IXGBE_FILTER_PROTOCOL_UDP; 6226 else if (protocol_value == IPPROTO_SCTP) 6227 return IXGBE_FILTER_PROTOCOL_SCTP; 6228 else 6229 return IXGBE_FILTER_PROTOCOL_NONE; 6230 } 6231 6232 /* inject a 5-tuple filter to HW */ 6233 static inline void 6234 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, 6235 struct ixgbe_5tuple_filter *filter) 6236 { 6237 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6238 int i; 6239 uint32_t ftqf, sdpqf; 6240 uint32_t l34timir = 0; 6241 uint8_t mask = 0xff; 6242 6243 i = filter->index; 6244 6245 sdpqf = (uint32_t)(filter->filter_info.dst_port << 6246 IXGBE_SDPQF_DSTPORT_SHIFT); 6247 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); 6248 6249 ftqf = (uint32_t)(filter->filter_info.proto & 6250 IXGBE_FTQF_PROTOCOL_MASK); 6251 ftqf |= (uint32_t)((filter->filter_info.priority & 6252 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); 6253 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 6254 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; 6255 if (filter->filter_info.dst_ip_mask == 0) 6256 mask &= IXGBE_FTQF_DEST_ADDR_MASK; 6257 if (filter->filter_info.src_port_mask == 0) 6258 mask &= IXGBE_FTQF_SOURCE_PORT_MASK; 6259 if (filter->filter_info.dst_port_mask == 0) 6260 mask &= IXGBE_FTQF_DEST_PORT_MASK; 6261 if (filter->filter_info.proto_mask == 0) 6262 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; 6263 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; 6264 ftqf |= IXGBE_FTQF_POOL_MASK_EN; 6265 ftqf |= IXGBE_FTQF_QUEUE_ENABLE; 6266 6267 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); 6268 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); 6269 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); 6270 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); 6271 6272 l34timir |= IXGBE_L34T_IMIR_RESERVE; 6273 l34timir |= (uint32_t)(filter->queue << 6274 IXGBE_L34T_IMIR_QUEUE_SHIFT); 6275 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); 6276 } 6277 6278 /* 6279 * add a 5tuple filter 6280 * 6281 * @param 6282 * dev: Pointer to struct rte_eth_dev. 6283 * index: the index the filter allocates. 6284 * filter: pointer to the filter that will be added. 6285 * rx_queue: the queue id the filter assigned to. 6286 * 6287 * @return 6288 * - On success, zero. 6289 * - On failure, a negative value. 6290 */ 6291 static int 6292 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 6293 struct ixgbe_5tuple_filter *filter) 6294 { 6295 struct ixgbe_filter_info *filter_info = 6296 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6297 int i, idx, shift; 6298 6299 /* 6300 * look for an unused 5tuple filter index, 6301 * and insert the filter to list. 6302 */ 6303 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { 6304 idx = i / (sizeof(uint32_t) * NBBY); 6305 shift = i % (sizeof(uint32_t) * NBBY); 6306 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 6307 filter_info->fivetuple_mask[idx] |= 1 << shift; 6308 filter->index = i; 6309 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 6310 filter, 6311 entries); 6312 break; 6313 } 6314 } 6315 if (i >= IXGBE_MAX_FTQF_FILTERS) { 6316 PMD_DRV_LOG(ERR, "5tuple filters are full."); 6317 return -ENOSYS; 6318 } 6319 6320 ixgbe_inject_5tuple_filter(dev, filter); 6321 6322 return 0; 6323 } 6324 6325 /* 6326 * remove a 5tuple filter 6327 * 6328 * @param 6329 * dev: Pointer to struct rte_eth_dev. 6330 * filter: the pointer of the filter will be removed. 6331 */ 6332 static void 6333 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 6334 struct ixgbe_5tuple_filter *filter) 6335 { 6336 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6337 struct ixgbe_filter_info *filter_info = 6338 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6339 uint16_t index = filter->index; 6340 6341 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 6342 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 6343 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 6344 rte_free(filter); 6345 6346 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); 6347 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); 6348 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); 6349 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); 6350 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); 6351 } 6352 6353 static int 6354 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 6355 { 6356 struct ixgbe_hw *hw; 6357 uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; 6358 struct rte_eth_dev_data *dev_data = dev->data; 6359 6360 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6361 6362 if (mtu < RTE_ETHER_MIN_MTU || max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) 6363 return -EINVAL; 6364 6365 /* If device is started, refuse mtu that requires the support of 6366 * scattered packets when this feature has not been enabled before. 6367 */ 6368 if (dev_data->dev_started && !dev_data->scattered_rx && 6369 (max_frame + 2 * RTE_VLAN_HLEN > 6370 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 6371 PMD_INIT_LOG(ERR, "Stop port first."); 6372 return -EINVAL; 6373 } 6374 6375 /* 6376 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU 6377 * request of the version 2.0 of the mailbox API. 6378 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 6379 * of the mailbox API. 6380 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers 6381 * prior to 3.11.33 which contains the following change: 6382 * "ixgbe: Enable jumbo frames support w/ SR-IOV" 6383 */ 6384 if (ixgbevf_rlpml_set_vf(hw, max_frame)) 6385 return -EINVAL; 6386 6387 return 0; 6388 } 6389 6390 static inline struct ixgbe_5tuple_filter * 6391 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, 6392 struct ixgbe_5tuple_filter_info *key) 6393 { 6394 struct ixgbe_5tuple_filter *it; 6395 6396 TAILQ_FOREACH(it, filter_list, entries) { 6397 if (memcmp(key, &it->filter_info, 6398 sizeof(struct ixgbe_5tuple_filter_info)) == 0) { 6399 return it; 6400 } 6401 } 6402 return NULL; 6403 } 6404 6405 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ 6406 static inline int 6407 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 6408 struct ixgbe_5tuple_filter_info *filter_info) 6409 { 6410 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || 6411 filter->priority > IXGBE_5TUPLE_MAX_PRI || 6412 filter->priority < IXGBE_5TUPLE_MIN_PRI) 6413 return -EINVAL; 6414 6415 switch (filter->dst_ip_mask) { 6416 case UINT32_MAX: 6417 filter_info->dst_ip_mask = 0; 6418 filter_info->dst_ip = filter->dst_ip; 6419 break; 6420 case 0: 6421 filter_info->dst_ip_mask = 1; 6422 break; 6423 default: 6424 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 6425 return -EINVAL; 6426 } 6427 6428 switch (filter->src_ip_mask) { 6429 case UINT32_MAX: 6430 filter_info->src_ip_mask = 0; 6431 filter_info->src_ip = filter->src_ip; 6432 break; 6433 case 0: 6434 filter_info->src_ip_mask = 1; 6435 break; 6436 default: 6437 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 6438 return -EINVAL; 6439 } 6440 6441 switch (filter->dst_port_mask) { 6442 case UINT16_MAX: 6443 filter_info->dst_port_mask = 0; 6444 filter_info->dst_port = filter->dst_port; 6445 break; 6446 case 0: 6447 filter_info->dst_port_mask = 1; 6448 break; 6449 default: 6450 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 6451 return -EINVAL; 6452 } 6453 6454 switch (filter->src_port_mask) { 6455 case UINT16_MAX: 6456 filter_info->src_port_mask = 0; 6457 filter_info->src_port = filter->src_port; 6458 break; 6459 case 0: 6460 filter_info->src_port_mask = 1; 6461 break; 6462 default: 6463 PMD_DRV_LOG(ERR, "invalid src_port mask."); 6464 return -EINVAL; 6465 } 6466 6467 switch (filter->proto_mask) { 6468 case UINT8_MAX: 6469 filter_info->proto_mask = 0; 6470 filter_info->proto = 6471 convert_protocol_type(filter->proto); 6472 break; 6473 case 0: 6474 filter_info->proto_mask = 1; 6475 break; 6476 default: 6477 PMD_DRV_LOG(ERR, "invalid protocol mask."); 6478 return -EINVAL; 6479 } 6480 6481 filter_info->priority = (uint8_t)filter->priority; 6482 return 0; 6483 } 6484 6485 /* 6486 * add or delete a ntuple filter 6487 * 6488 * @param 6489 * dev: Pointer to struct rte_eth_dev. 6490 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 6491 * add: if true, add filter, if false, remove filter 6492 * 6493 * @return 6494 * - On success, zero. 6495 * - On failure, a negative value. 6496 */ 6497 int 6498 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 6499 struct rte_eth_ntuple_filter *ntuple_filter, 6500 bool add) 6501 { 6502 struct ixgbe_filter_info *filter_info = 6503 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6504 struct ixgbe_5tuple_filter_info filter_5tuple; 6505 struct ixgbe_5tuple_filter *filter; 6506 int ret; 6507 6508 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 6509 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 6510 return -EINVAL; 6511 } 6512 6513 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 6514 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 6515 if (ret < 0) 6516 return ret; 6517 6518 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 6519 &filter_5tuple); 6520 if (filter != NULL && add) { 6521 PMD_DRV_LOG(ERR, "filter exists."); 6522 return -EEXIST; 6523 } 6524 if (filter == NULL && !add) { 6525 PMD_DRV_LOG(ERR, "filter doesn't exist."); 6526 return -ENOENT; 6527 } 6528 6529 if (add) { 6530 filter = rte_zmalloc("ixgbe_5tuple_filter", 6531 sizeof(struct ixgbe_5tuple_filter), 0); 6532 if (filter == NULL) 6533 return -ENOMEM; 6534 rte_memcpy(&filter->filter_info, 6535 &filter_5tuple, 6536 sizeof(struct ixgbe_5tuple_filter_info)); 6537 filter->queue = ntuple_filter->queue; 6538 ret = ixgbe_add_5tuple_filter(dev, filter); 6539 if (ret < 0) { 6540 rte_free(filter); 6541 return ret; 6542 } 6543 } else 6544 ixgbe_remove_5tuple_filter(dev, filter); 6545 6546 return 0; 6547 } 6548 6549 int 6550 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 6551 struct rte_eth_ethertype_filter *filter, 6552 bool add) 6553 { 6554 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6555 struct ixgbe_filter_info *filter_info = 6556 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6557 uint32_t etqf = 0; 6558 uint32_t etqs = 0; 6559 int ret; 6560 struct ixgbe_ethertype_filter ethertype_filter; 6561 6562 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6563 return -EINVAL; 6564 6565 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || 6566 filter->ether_type == RTE_ETHER_TYPE_IPV6) { 6567 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 6568 " ethertype filter.", filter->ether_type); 6569 return -EINVAL; 6570 } 6571 6572 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 6573 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 6574 return -EINVAL; 6575 } 6576 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 6577 PMD_DRV_LOG(ERR, "drop option is unsupported."); 6578 return -EINVAL; 6579 } 6580 6581 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 6582 if (ret >= 0 && add) { 6583 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 6584 filter->ether_type); 6585 return -EEXIST; 6586 } 6587 if (ret < 0 && !add) { 6588 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 6589 filter->ether_type); 6590 return -ENOENT; 6591 } 6592 6593 if (add) { 6594 etqf = IXGBE_ETQF_FILTER_EN; 6595 etqf |= (uint32_t)filter->ether_type; 6596 etqs |= (uint32_t)((filter->queue << 6597 IXGBE_ETQS_RX_QUEUE_SHIFT) & 6598 IXGBE_ETQS_RX_QUEUE); 6599 etqs |= IXGBE_ETQS_QUEUE_EN; 6600 6601 ethertype_filter.ethertype = filter->ether_type; 6602 ethertype_filter.etqf = etqf; 6603 ethertype_filter.etqs = etqs; 6604 ethertype_filter.conf = FALSE; 6605 ret = ixgbe_ethertype_filter_insert(filter_info, 6606 ðertype_filter); 6607 if (ret < 0) { 6608 PMD_DRV_LOG(ERR, "ethertype filters are full."); 6609 return -ENOSPC; 6610 } 6611 } else { 6612 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 6613 if (ret < 0) 6614 return -ENOSYS; 6615 } 6616 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); 6617 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); 6618 IXGBE_WRITE_FLUSH(hw); 6619 6620 return 0; 6621 } 6622 6623 static int 6624 ixgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev, 6625 const struct rte_flow_ops **ops) 6626 { 6627 *ops = &ixgbe_flow_ops; 6628 return 0; 6629 } 6630 6631 static u8 * 6632 ixgbe_dev_addr_list_itr(__rte_unused struct ixgbe_hw *hw, 6633 u8 **mc_addr_ptr, u32 *vmdq) 6634 { 6635 u8 *mc_addr; 6636 6637 *vmdq = 0; 6638 mc_addr = *mc_addr_ptr; 6639 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 6640 return mc_addr; 6641 } 6642 6643 static int 6644 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 6645 struct rte_ether_addr *mc_addr_set, 6646 uint32_t nb_mc_addr) 6647 { 6648 struct ixgbe_hw *hw; 6649 u8 *mc_addr_list; 6650 6651 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6652 mc_addr_list = (u8 *)mc_addr_set; 6653 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 6654 ixgbe_dev_addr_list_itr, TRUE); 6655 } 6656 6657 static uint64_t 6658 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 6659 { 6660 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6661 uint64_t systime_cycles; 6662 6663 switch (hw->mac.type) { 6664 case ixgbe_mac_X550: 6665 case ixgbe_mac_X550EM_x: 6666 case ixgbe_mac_X550EM_a: 6667 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 6668 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6669 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6670 * NSEC_PER_SEC; 6671 break; 6672 default: 6673 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6674 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6675 << 32; 6676 } 6677 6678 return systime_cycles; 6679 } 6680 6681 static uint64_t 6682 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6683 { 6684 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6685 uint64_t rx_tstamp_cycles; 6686 6687 switch (hw->mac.type) { 6688 case ixgbe_mac_X550: 6689 case ixgbe_mac_X550EM_x: 6690 case ixgbe_mac_X550EM_a: 6691 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6692 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6693 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6694 * NSEC_PER_SEC; 6695 break; 6696 default: 6697 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6698 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6699 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6700 << 32; 6701 } 6702 6703 return rx_tstamp_cycles; 6704 } 6705 6706 static uint64_t 6707 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6708 { 6709 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6710 uint64_t tx_tstamp_cycles; 6711 6712 switch (hw->mac.type) { 6713 case ixgbe_mac_X550: 6714 case ixgbe_mac_X550EM_x: 6715 case ixgbe_mac_X550EM_a: 6716 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6717 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6718 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6719 * NSEC_PER_SEC; 6720 break; 6721 default: 6722 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6723 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6724 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6725 << 32; 6726 } 6727 6728 return tx_tstamp_cycles; 6729 } 6730 6731 static void 6732 ixgbe_start_timecounters(struct rte_eth_dev *dev) 6733 { 6734 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6735 struct ixgbe_adapter *adapter = dev->data->dev_private; 6736 struct rte_eth_link link; 6737 uint32_t incval = 0; 6738 uint32_t shift = 0; 6739 6740 /* Get current link speed. */ 6741 ixgbe_dev_link_update(dev, 1); 6742 rte_eth_linkstatus_get(dev, &link); 6743 6744 switch (link.link_speed) { 6745 case RTE_ETH_SPEED_NUM_100M: 6746 incval = IXGBE_INCVAL_100; 6747 shift = IXGBE_INCVAL_SHIFT_100; 6748 break; 6749 case RTE_ETH_SPEED_NUM_1G: 6750 incval = IXGBE_INCVAL_1GB; 6751 shift = IXGBE_INCVAL_SHIFT_1GB; 6752 break; 6753 case RTE_ETH_SPEED_NUM_10G: 6754 default: 6755 incval = IXGBE_INCVAL_10GB; 6756 shift = IXGBE_INCVAL_SHIFT_10GB; 6757 break; 6758 } 6759 6760 switch (hw->mac.type) { 6761 case ixgbe_mac_X550: 6762 case ixgbe_mac_X550EM_x: 6763 case ixgbe_mac_X550EM_a: 6764 /* Independent of link speed. */ 6765 incval = 1; 6766 /* Cycles read will be interpreted as ns. */ 6767 shift = 0; 6768 /* Fall-through */ 6769 case ixgbe_mac_X540: 6770 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); 6771 break; 6772 case ixgbe_mac_82599EB: 6773 incval >>= IXGBE_INCVAL_SHIFT_82599; 6774 shift -= IXGBE_INCVAL_SHIFT_82599; 6775 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 6776 (1 << IXGBE_INCPER_SHIFT_82599) | incval); 6777 break; 6778 default: 6779 /* Not supported. */ 6780 return; 6781 } 6782 6783 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 6784 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6785 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6786 6787 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6788 adapter->systime_tc.cc_shift = shift; 6789 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 6790 6791 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6792 adapter->rx_tstamp_tc.cc_shift = shift; 6793 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6794 6795 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6796 adapter->tx_tstamp_tc.cc_shift = shift; 6797 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6798 } 6799 6800 static int 6801 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 6802 { 6803 struct ixgbe_adapter *adapter = dev->data->dev_private; 6804 6805 adapter->systime_tc.nsec += delta; 6806 adapter->rx_tstamp_tc.nsec += delta; 6807 adapter->tx_tstamp_tc.nsec += delta; 6808 6809 return 0; 6810 } 6811 6812 static int 6813 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 6814 { 6815 uint64_t ns; 6816 struct ixgbe_adapter *adapter = dev->data->dev_private; 6817 6818 ns = rte_timespec_to_ns(ts); 6819 /* Set the timecounters to a new value. */ 6820 adapter->systime_tc.nsec = ns; 6821 adapter->rx_tstamp_tc.nsec = ns; 6822 adapter->tx_tstamp_tc.nsec = ns; 6823 6824 return 0; 6825 } 6826 6827 static int 6828 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 6829 { 6830 uint64_t ns, systime_cycles; 6831 struct ixgbe_adapter *adapter = dev->data->dev_private; 6832 6833 systime_cycles = ixgbe_read_systime_cyclecounter(dev); 6834 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 6835 *ts = rte_ns_to_timespec(ns); 6836 6837 return 0; 6838 } 6839 6840 static int 6841 ixgbe_timesync_enable(struct rte_eth_dev *dev) 6842 { 6843 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6844 uint32_t tsync_ctl; 6845 uint32_t tsauxc; 6846 6847 /* Stop the timesync system time. */ 6848 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); 6849 /* Reset the timesync system time value. */ 6850 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); 6851 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); 6852 6853 /* Enable system time for platforms where it isn't on by default. */ 6854 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); 6855 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; 6856 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); 6857 6858 ixgbe_start_timecounters(dev); 6859 6860 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6861 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 6862 (RTE_ETHER_TYPE_1588 | 6863 IXGBE_ETQF_FILTER_EN | 6864 IXGBE_ETQF_1588)); 6865 6866 /* Enable timestamping of received PTP packets. */ 6867 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6868 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; 6869 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6870 6871 /* Enable timestamping of transmitted PTP packets. */ 6872 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6873 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; 6874 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6875 6876 IXGBE_WRITE_FLUSH(hw); 6877 6878 return 0; 6879 } 6880 6881 static int 6882 ixgbe_timesync_disable(struct rte_eth_dev *dev) 6883 { 6884 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6885 uint32_t tsync_ctl; 6886 6887 /* Disable timestamping of transmitted PTP packets. */ 6888 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6889 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; 6890 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6891 6892 /* Disable timestamping of received PTP packets. */ 6893 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6894 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; 6895 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6896 6897 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6898 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 6899 6900 /* Stop incrementing the System Time registers. */ 6901 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); 6902 6903 return 0; 6904 } 6905 6906 static int 6907 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 6908 struct timespec *timestamp, 6909 uint32_t flags __rte_unused) 6910 { 6911 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6912 struct ixgbe_adapter *adapter = dev->data->dev_private; 6913 uint32_t tsync_rxctl; 6914 uint64_t rx_tstamp_cycles; 6915 uint64_t ns; 6916 6917 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6918 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) 6919 return -EINVAL; 6920 6921 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); 6922 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 6923 *timestamp = rte_ns_to_timespec(ns); 6924 6925 return 0; 6926 } 6927 6928 static int 6929 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 6930 struct timespec *timestamp) 6931 { 6932 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6933 struct ixgbe_adapter *adapter = dev->data->dev_private; 6934 uint32_t tsync_txctl; 6935 uint64_t tx_tstamp_cycles; 6936 uint64_t ns; 6937 6938 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6939 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) 6940 return -EINVAL; 6941 6942 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); 6943 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 6944 *timestamp = rte_ns_to_timespec(ns); 6945 6946 return 0; 6947 } 6948 6949 static int 6950 ixgbe_get_reg_length(struct rte_eth_dev *dev) 6951 { 6952 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6953 int count = 0; 6954 int g_ind = 0; 6955 const struct reg_info *reg_group; 6956 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 6957 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 6958 6959 while ((reg_group = reg_set[g_ind++])) 6960 count += ixgbe_regs_group_count(reg_group); 6961 6962 return count; 6963 } 6964 6965 static int 6966 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 6967 { 6968 int count = 0; 6969 int g_ind = 0; 6970 const struct reg_info *reg_group; 6971 6972 while ((reg_group = ixgbevf_regs[g_ind++])) 6973 count += ixgbe_regs_group_count(reg_group); 6974 6975 return count; 6976 } 6977 6978 static int 6979 ixgbe_get_regs(struct rte_eth_dev *dev, 6980 struct rte_dev_reg_info *regs) 6981 { 6982 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6983 uint32_t *data = regs->data; 6984 int g_ind = 0; 6985 int count = 0; 6986 const struct reg_info *reg_group; 6987 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 6988 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 6989 6990 if (data == NULL) { 6991 regs->length = ixgbe_get_reg_length(dev); 6992 regs->width = sizeof(uint32_t); 6993 return 0; 6994 } 6995 6996 /* Support only full register dump */ 6997 if ((regs->length == 0) || 6998 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { 6999 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7000 hw->device_id; 7001 while ((reg_group = reg_set[g_ind++])) 7002 count += ixgbe_read_regs_group(dev, &data[count], 7003 reg_group); 7004 return 0; 7005 } 7006 7007 return -ENOTSUP; 7008 } 7009 7010 static int 7011 ixgbevf_get_regs(struct rte_eth_dev *dev, 7012 struct rte_dev_reg_info *regs) 7013 { 7014 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7015 uint32_t *data = regs->data; 7016 int g_ind = 0; 7017 int count = 0; 7018 const struct reg_info *reg_group; 7019 7020 if (data == NULL) { 7021 regs->length = ixgbevf_get_reg_length(dev); 7022 regs->width = sizeof(uint32_t); 7023 return 0; 7024 } 7025 7026 /* Support only full register dump */ 7027 if ((regs->length == 0) || 7028 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { 7029 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7030 hw->device_id; 7031 while ((reg_group = ixgbevf_regs[g_ind++])) 7032 count += ixgbe_read_regs_group(dev, &data[count], 7033 reg_group); 7034 return 0; 7035 } 7036 7037 return -ENOTSUP; 7038 } 7039 7040 static int 7041 ixgbe_get_eeprom_length(struct rte_eth_dev *dev) 7042 { 7043 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7044 7045 /* Return unit is byte count */ 7046 return hw->eeprom.word_size * 2; 7047 } 7048 7049 static int 7050 ixgbe_get_eeprom(struct rte_eth_dev *dev, 7051 struct rte_dev_eeprom_info *in_eeprom) 7052 { 7053 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7054 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7055 uint16_t *data = in_eeprom->data; 7056 int first, length; 7057 7058 first = in_eeprom->offset >> 1; 7059 length = in_eeprom->length >> 1; 7060 if ((first > hw->eeprom.word_size) || 7061 ((first + length) > hw->eeprom.word_size)) 7062 return -EINVAL; 7063 7064 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7065 7066 return eeprom->ops.read_buffer(hw, first, length, data); 7067 } 7068 7069 static int 7070 ixgbe_set_eeprom(struct rte_eth_dev *dev, 7071 struct rte_dev_eeprom_info *in_eeprom) 7072 { 7073 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7074 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7075 uint16_t *data = in_eeprom->data; 7076 int first, length; 7077 7078 first = in_eeprom->offset >> 1; 7079 length = in_eeprom->length >> 1; 7080 if ((first > hw->eeprom.word_size) || 7081 ((first + length) > hw->eeprom.word_size)) 7082 return -EINVAL; 7083 7084 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7085 7086 return eeprom->ops.write_buffer(hw, first, length, data); 7087 } 7088 7089 static int 7090 ixgbe_get_module_info(struct rte_eth_dev *dev, 7091 struct rte_eth_dev_module_info *modinfo) 7092 { 7093 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7094 uint32_t status; 7095 uint8_t sff8472_rev, addr_mode; 7096 bool page_swap = false; 7097 7098 /* Check whether we support SFF-8472 or not */ 7099 status = hw->phy.ops.read_i2c_eeprom(hw, 7100 IXGBE_SFF_SFF_8472_COMP, 7101 &sff8472_rev); 7102 if (status != 0) 7103 return -EIO; 7104 7105 /* addressing mode is not supported */ 7106 status = hw->phy.ops.read_i2c_eeprom(hw, 7107 IXGBE_SFF_SFF_8472_SWAP, 7108 &addr_mode); 7109 if (status != 0) 7110 return -EIO; 7111 7112 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 7113 PMD_DRV_LOG(ERR, 7114 "Address change required to access page 0xA2, " 7115 "but not supported. Please report the module " 7116 "type to the driver maintainers."); 7117 page_swap = true; 7118 } 7119 7120 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { 7121 /* We have a SFP, but it does not support SFF-8472 */ 7122 modinfo->type = RTE_ETH_MODULE_SFF_8079; 7123 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 7124 } else { 7125 /* We have a SFP which supports a revision of SFF-8472. */ 7126 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7127 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7128 } 7129 7130 return 0; 7131 } 7132 7133 static int 7134 ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 7135 struct rte_dev_eeprom_info *info) 7136 { 7137 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7138 uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; 7139 uint8_t databyte = 0xFF; 7140 uint8_t *data = info->data; 7141 uint32_t i = 0; 7142 7143 for (i = info->offset; i < info->offset + info->length; i++) { 7144 if (i < RTE_ETH_MODULE_SFF_8079_LEN) 7145 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); 7146 else 7147 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); 7148 7149 if (status != 0) 7150 return -EIO; 7151 7152 data[i - info->offset] = databyte; 7153 } 7154 7155 return 0; 7156 } 7157 7158 uint16_t 7159 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { 7160 switch (mac_type) { 7161 case ixgbe_mac_X550: 7162 case ixgbe_mac_X550EM_x: 7163 case ixgbe_mac_X550EM_a: 7164 return RTE_ETH_RSS_RETA_SIZE_512; 7165 case ixgbe_mac_X550_vf: 7166 case ixgbe_mac_X550EM_x_vf: 7167 case ixgbe_mac_X550EM_a_vf: 7168 return RTE_ETH_RSS_RETA_SIZE_64; 7169 case ixgbe_mac_X540_vf: 7170 case ixgbe_mac_82599_vf: 7171 return 0; 7172 default: 7173 return RTE_ETH_RSS_RETA_SIZE_128; 7174 } 7175 } 7176 7177 uint32_t 7178 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { 7179 switch (mac_type) { 7180 case ixgbe_mac_X550: 7181 case ixgbe_mac_X550EM_x: 7182 case ixgbe_mac_X550EM_a: 7183 if (reta_idx < RTE_ETH_RSS_RETA_SIZE_128) 7184 return IXGBE_RETA(reta_idx >> 2); 7185 else 7186 return IXGBE_ERETA((reta_idx - RTE_ETH_RSS_RETA_SIZE_128) >> 2); 7187 case ixgbe_mac_X550_vf: 7188 case ixgbe_mac_X550EM_x_vf: 7189 case ixgbe_mac_X550EM_a_vf: 7190 return IXGBE_VFRETA(reta_idx >> 2); 7191 default: 7192 return IXGBE_RETA(reta_idx >> 2); 7193 } 7194 } 7195 7196 uint32_t 7197 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { 7198 switch (mac_type) { 7199 case ixgbe_mac_X550_vf: 7200 case ixgbe_mac_X550EM_x_vf: 7201 case ixgbe_mac_X550EM_a_vf: 7202 return IXGBE_VFMRQC; 7203 default: 7204 return IXGBE_MRQC; 7205 } 7206 } 7207 7208 uint32_t 7209 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { 7210 switch (mac_type) { 7211 case ixgbe_mac_X550_vf: 7212 case ixgbe_mac_X550EM_x_vf: 7213 case ixgbe_mac_X550EM_a_vf: 7214 return IXGBE_VFRSSRK(i); 7215 default: 7216 return IXGBE_RSSRK(i); 7217 } 7218 } 7219 7220 bool 7221 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { 7222 switch (mac_type) { 7223 case ixgbe_mac_82599_vf: 7224 case ixgbe_mac_X540_vf: 7225 return 0; 7226 default: 7227 return 1; 7228 } 7229 } 7230 7231 static int 7232 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 7233 struct rte_eth_dcb_info *dcb_info) 7234 { 7235 struct ixgbe_dcb_config *dcb_config = 7236 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 7237 struct ixgbe_dcb_tc_config *tc; 7238 struct rte_eth_dcb_tc_queue_mapping *tc_queue; 7239 uint8_t nb_tcs; 7240 uint8_t i, j; 7241 7242 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) 7243 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 7244 else 7245 dcb_info->nb_tcs = 1; 7246 7247 tc_queue = &dcb_info->tc_queue; 7248 nb_tcs = dcb_info->nb_tcs; 7249 7250 if (dcb_config->vt_mode) { /* vt is enabled*/ 7251 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 7252 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 7253 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) 7254 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 7255 if (RTE_ETH_DEV_SRIOV(dev).active > 0) { 7256 for (j = 0; j < nb_tcs; j++) { 7257 tc_queue->tc_rxq[0][j].base = j; 7258 tc_queue->tc_rxq[0][j].nb_queue = 1; 7259 tc_queue->tc_txq[0][j].base = j; 7260 tc_queue->tc_txq[0][j].nb_queue = 1; 7261 } 7262 } else { 7263 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 7264 for (j = 0; j < nb_tcs; j++) { 7265 tc_queue->tc_rxq[i][j].base = 7266 i * nb_tcs + j; 7267 tc_queue->tc_rxq[i][j].nb_queue = 1; 7268 tc_queue->tc_txq[i][j].base = 7269 i * nb_tcs + j; 7270 tc_queue->tc_txq[i][j].nb_queue = 1; 7271 } 7272 } 7273 } 7274 } else { /* vt is disabled*/ 7275 struct rte_eth_dcb_rx_conf *rx_conf = 7276 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 7277 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) 7278 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 7279 if (dcb_info->nb_tcs == RTE_ETH_4_TCS) { 7280 for (i = 0; i < dcb_info->nb_tcs; i++) { 7281 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 7282 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7283 } 7284 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7285 dcb_info->tc_queue.tc_txq[0][1].base = 64; 7286 dcb_info->tc_queue.tc_txq[0][2].base = 96; 7287 dcb_info->tc_queue.tc_txq[0][3].base = 112; 7288 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 7289 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7290 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7291 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7292 } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) { 7293 for (i = 0; i < dcb_info->nb_tcs; i++) { 7294 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 7295 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7296 } 7297 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7298 dcb_info->tc_queue.tc_txq[0][1].base = 32; 7299 dcb_info->tc_queue.tc_txq[0][2].base = 64; 7300 dcb_info->tc_queue.tc_txq[0][3].base = 80; 7301 dcb_info->tc_queue.tc_txq[0][4].base = 96; 7302 dcb_info->tc_queue.tc_txq[0][5].base = 104; 7303 dcb_info->tc_queue.tc_txq[0][6].base = 112; 7304 dcb_info->tc_queue.tc_txq[0][7].base = 120; 7305 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 7306 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7307 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7308 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7309 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 7310 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 7311 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 7312 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 7313 } 7314 } 7315 for (i = 0; i < dcb_info->nb_tcs; i++) { 7316 tc = &dcb_config->tc_config[i]; 7317 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; 7318 } 7319 return 0; 7320 } 7321 7322 /* Update e-tag ether type */ 7323 static int 7324 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, 7325 uint16_t ether_type) 7326 { 7327 uint32_t etag_etype; 7328 7329 if (hw->mac.type != ixgbe_mac_X550 && 7330 hw->mac.type != ixgbe_mac_X550EM_x && 7331 hw->mac.type != ixgbe_mac_X550EM_a) { 7332 return -ENOTSUP; 7333 } 7334 7335 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7336 etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; 7337 etag_etype |= ether_type; 7338 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7339 IXGBE_WRITE_FLUSH(hw); 7340 7341 return 0; 7342 } 7343 7344 /* Enable e-tag tunnel */ 7345 static int 7346 ixgbe_e_tag_enable(struct ixgbe_hw *hw) 7347 { 7348 uint32_t etag_etype; 7349 7350 if (hw->mac.type != ixgbe_mac_X550 && 7351 hw->mac.type != ixgbe_mac_X550EM_x && 7352 hw->mac.type != ixgbe_mac_X550EM_a) { 7353 return -ENOTSUP; 7354 } 7355 7356 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7357 etag_etype |= IXGBE_ETAG_ETYPE_VALID; 7358 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7359 IXGBE_WRITE_FLUSH(hw); 7360 7361 return 0; 7362 } 7363 7364 static int 7365 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, 7366 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7367 { 7368 int ret = 0; 7369 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7370 uint32_t i, rar_entries; 7371 uint32_t rar_low, rar_high; 7372 7373 if (hw->mac.type != ixgbe_mac_X550 && 7374 hw->mac.type != ixgbe_mac_X550EM_x && 7375 hw->mac.type != ixgbe_mac_X550EM_a) { 7376 return -ENOTSUP; 7377 } 7378 7379 rar_entries = ixgbe_get_num_rx_addrs(hw); 7380 7381 for (i = 1; i < rar_entries; i++) { 7382 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7383 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 7384 if ((rar_high & IXGBE_RAH_AV) && 7385 (rar_high & IXGBE_RAH_ADTYPE) && 7386 ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == 7387 l2_tunnel->tunnel_id)) { 7388 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 7389 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 7390 7391 ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); 7392 7393 return ret; 7394 } 7395 } 7396 7397 return ret; 7398 } 7399 7400 static int 7401 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, 7402 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7403 { 7404 int ret = 0; 7405 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7406 uint32_t i, rar_entries; 7407 uint32_t rar_low, rar_high; 7408 7409 if (hw->mac.type != ixgbe_mac_X550 && 7410 hw->mac.type != ixgbe_mac_X550EM_x && 7411 hw->mac.type != ixgbe_mac_X550EM_a) { 7412 return -ENOTSUP; 7413 } 7414 7415 /* One entry for one tunnel. Try to remove potential existing entry. */ 7416 ixgbe_e_tag_filter_del(dev, l2_tunnel); 7417 7418 rar_entries = ixgbe_get_num_rx_addrs(hw); 7419 7420 for (i = 1; i < rar_entries; i++) { 7421 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7422 if (rar_high & IXGBE_RAH_AV) { 7423 continue; 7424 } else { 7425 ixgbe_set_vmdq(hw, i, l2_tunnel->pool); 7426 rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; 7427 rar_low = l2_tunnel->tunnel_id; 7428 7429 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); 7430 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); 7431 7432 return ret; 7433 } 7434 } 7435 7436 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 7437 " Please remove a rule before adding a new one."); 7438 return -EINVAL; 7439 } 7440 7441 static inline struct ixgbe_l2_tn_filter * 7442 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, 7443 struct ixgbe_l2_tn_key *key) 7444 { 7445 int ret; 7446 7447 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); 7448 if (ret < 0) 7449 return NULL; 7450 7451 return l2_tn_info->hash_map[ret]; 7452 } 7453 7454 static inline int 7455 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7456 struct ixgbe_l2_tn_filter *l2_tn_filter) 7457 { 7458 int ret; 7459 7460 ret = rte_hash_add_key(l2_tn_info->hash_handle, 7461 &l2_tn_filter->key); 7462 7463 if (ret < 0) { 7464 PMD_DRV_LOG(ERR, 7465 "Failed to insert L2 tunnel filter" 7466 " to hash table %d!", 7467 ret); 7468 return ret; 7469 } 7470 7471 l2_tn_info->hash_map[ret] = l2_tn_filter; 7472 7473 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7474 7475 return 0; 7476 } 7477 7478 static inline int 7479 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7480 struct ixgbe_l2_tn_key *key) 7481 { 7482 int ret; 7483 struct ixgbe_l2_tn_filter *l2_tn_filter; 7484 7485 ret = rte_hash_del_key(l2_tn_info->hash_handle, key); 7486 7487 if (ret < 0) { 7488 PMD_DRV_LOG(ERR, 7489 "No such L2 tunnel filter to delete %d!", 7490 ret); 7491 return ret; 7492 } 7493 7494 l2_tn_filter = l2_tn_info->hash_map[ret]; 7495 l2_tn_info->hash_map[ret] = NULL; 7496 7497 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7498 rte_free(l2_tn_filter); 7499 7500 return 0; 7501 } 7502 7503 /* Add l2 tunnel filter */ 7504 int 7505 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 7506 struct ixgbe_l2_tunnel_conf *l2_tunnel, 7507 bool restore) 7508 { 7509 int ret; 7510 struct ixgbe_l2_tn_info *l2_tn_info = 7511 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7512 struct ixgbe_l2_tn_key key; 7513 struct ixgbe_l2_tn_filter *node; 7514 7515 if (!restore) { 7516 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7517 key.tn_id = l2_tunnel->tunnel_id; 7518 7519 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); 7520 7521 if (node) { 7522 PMD_DRV_LOG(ERR, 7523 "The L2 tunnel filter already exists!"); 7524 return -EINVAL; 7525 } 7526 7527 node = rte_zmalloc("ixgbe_l2_tn", 7528 sizeof(struct ixgbe_l2_tn_filter), 7529 0); 7530 if (!node) 7531 return -ENOMEM; 7532 7533 rte_memcpy(&node->key, 7534 &key, 7535 sizeof(struct ixgbe_l2_tn_key)); 7536 node->pool = l2_tunnel->pool; 7537 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); 7538 if (ret < 0) { 7539 rte_free(node); 7540 return ret; 7541 } 7542 } 7543 7544 switch (l2_tunnel->l2_tunnel_type) { 7545 case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: 7546 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); 7547 break; 7548 default: 7549 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7550 ret = -EINVAL; 7551 break; 7552 } 7553 7554 if ((!restore) && (ret < 0)) 7555 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7556 7557 return ret; 7558 } 7559 7560 /* Delete l2 tunnel filter */ 7561 int 7562 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 7563 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7564 { 7565 int ret; 7566 struct ixgbe_l2_tn_info *l2_tn_info = 7567 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7568 struct ixgbe_l2_tn_key key; 7569 7570 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7571 key.tn_id = l2_tunnel->tunnel_id; 7572 ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7573 if (ret < 0) 7574 return ret; 7575 7576 switch (l2_tunnel->l2_tunnel_type) { 7577 case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: 7578 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); 7579 break; 7580 default: 7581 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7582 ret = -EINVAL; 7583 break; 7584 } 7585 7586 return ret; 7587 } 7588 7589 static int 7590 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 7591 { 7592 int ret = 0; 7593 uint32_t ctrl; 7594 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7595 7596 if (hw->mac.type != ixgbe_mac_X550 && 7597 hw->mac.type != ixgbe_mac_X550EM_x && 7598 hw->mac.type != ixgbe_mac_X550EM_a) { 7599 return -ENOTSUP; 7600 } 7601 7602 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 7603 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 7604 if (en) 7605 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; 7606 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 7607 7608 return ret; 7609 } 7610 7611 static int 7612 ixgbe_update_vxlan_port(struct ixgbe_hw *hw, 7613 uint16_t port) 7614 { 7615 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); 7616 IXGBE_WRITE_FLUSH(hw); 7617 7618 return 0; 7619 } 7620 7621 /* There's only one register for VxLAN UDP port. 7622 * So, we cannot add several ports. Will update it. 7623 */ 7624 static int 7625 ixgbe_add_vxlan_port(struct ixgbe_hw *hw, 7626 uint16_t port) 7627 { 7628 if (port == 0) { 7629 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 7630 return -EINVAL; 7631 } 7632 7633 return ixgbe_update_vxlan_port(hw, port); 7634 } 7635 7636 /* We cannot delete the VxLAN port. For there's a register for VxLAN 7637 * UDP port, it must have a value. 7638 * So, will reset it to the original value 0. 7639 */ 7640 static int 7641 ixgbe_del_vxlan_port(struct ixgbe_hw *hw, 7642 uint16_t port) 7643 { 7644 uint16_t cur_port; 7645 7646 cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); 7647 7648 if (cur_port != port) { 7649 PMD_DRV_LOG(ERR, "Port %u does not exist.", port); 7650 return -EINVAL; 7651 } 7652 7653 return ixgbe_update_vxlan_port(hw, 0); 7654 } 7655 7656 /* Add UDP tunneling port */ 7657 static int 7658 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 7659 struct rte_eth_udp_tunnel *udp_tunnel) 7660 { 7661 int ret = 0; 7662 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7663 7664 if (hw->mac.type != ixgbe_mac_X550 && 7665 hw->mac.type != ixgbe_mac_X550EM_x && 7666 hw->mac.type != ixgbe_mac_X550EM_a) { 7667 return -ENOTSUP; 7668 } 7669 7670 if (udp_tunnel == NULL) 7671 return -EINVAL; 7672 7673 switch (udp_tunnel->prot_type) { 7674 case RTE_ETH_TUNNEL_TYPE_VXLAN: 7675 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); 7676 break; 7677 7678 case RTE_ETH_TUNNEL_TYPE_GENEVE: 7679 case RTE_ETH_TUNNEL_TYPE_TEREDO: 7680 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7681 ret = -EINVAL; 7682 break; 7683 7684 default: 7685 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7686 ret = -EINVAL; 7687 break; 7688 } 7689 7690 return ret; 7691 } 7692 7693 /* Remove UDP tunneling port */ 7694 static int 7695 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 7696 struct rte_eth_udp_tunnel *udp_tunnel) 7697 { 7698 int ret = 0; 7699 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7700 7701 if (hw->mac.type != ixgbe_mac_X550 && 7702 hw->mac.type != ixgbe_mac_X550EM_x && 7703 hw->mac.type != ixgbe_mac_X550EM_a) { 7704 return -ENOTSUP; 7705 } 7706 7707 if (udp_tunnel == NULL) 7708 return -EINVAL; 7709 7710 switch (udp_tunnel->prot_type) { 7711 case RTE_ETH_TUNNEL_TYPE_VXLAN: 7712 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); 7713 break; 7714 case RTE_ETH_TUNNEL_TYPE_GENEVE: 7715 case RTE_ETH_TUNNEL_TYPE_TEREDO: 7716 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7717 ret = -EINVAL; 7718 break; 7719 default: 7720 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7721 ret = -EINVAL; 7722 break; 7723 } 7724 7725 return ret; 7726 } 7727 7728 static int 7729 ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) 7730 { 7731 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7732 int ret; 7733 7734 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) { 7735 case IXGBE_SUCCESS: 7736 ret = 0; 7737 break; 7738 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7739 ret = -ENOTSUP; 7740 break; 7741 default: 7742 ret = -EAGAIN; 7743 break; 7744 } 7745 7746 return ret; 7747 } 7748 7749 static int 7750 ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) 7751 { 7752 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7753 int ret; 7754 7755 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) { 7756 case IXGBE_SUCCESS: 7757 ret = 0; 7758 break; 7759 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7760 ret = -ENOTSUP; 7761 break; 7762 default: 7763 ret = -EAGAIN; 7764 break; 7765 } 7766 7767 return ret; 7768 } 7769 7770 static int 7771 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 7772 { 7773 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7774 int ret; 7775 int mode = IXGBEVF_XCAST_MODE_ALLMULTI; 7776 7777 switch (hw->mac.ops.update_xcast_mode(hw, mode)) { 7778 case IXGBE_SUCCESS: 7779 ret = 0; 7780 break; 7781 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7782 ret = -ENOTSUP; 7783 break; 7784 default: 7785 ret = -EAGAIN; 7786 break; 7787 } 7788 7789 return ret; 7790 } 7791 7792 static int 7793 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 7794 { 7795 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7796 int ret; 7797 7798 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { 7799 case IXGBE_SUCCESS: 7800 ret = 0; 7801 break; 7802 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7803 ret = -ENOTSUP; 7804 break; 7805 default: 7806 ret = -EAGAIN; 7807 break; 7808 } 7809 7810 return ret; 7811 } 7812 7813 static void ixgbevf_mbx_process(struct rte_eth_dev *dev) 7814 { 7815 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7816 u32 in_msg = 0; 7817 7818 /* peek the message first */ 7819 in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); 7820 7821 /* PF reset VF event */ 7822 if (in_msg == IXGBE_PF_CONTROL_MSG) { 7823 /* dummy mbx read to ack pf */ 7824 if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) 7825 return; 7826 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 7827 NULL); 7828 } 7829 } 7830 7831 static int 7832 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 7833 { 7834 uint32_t eicr; 7835 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7836 struct ixgbe_interrupt *intr = 7837 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7838 ixgbevf_intr_disable(dev); 7839 7840 /* read-on-clear nic registers here */ 7841 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); 7842 intr->flags = 0; 7843 7844 /* only one misc vector supported - mailbox */ 7845 eicr &= IXGBE_VTEICR_MASK; 7846 if (eicr == IXGBE_MISC_VEC_ID) 7847 intr->flags |= IXGBE_FLAG_MAILBOX; 7848 7849 return 0; 7850 } 7851 7852 static int 7853 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) 7854 { 7855 struct ixgbe_interrupt *intr = 7856 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7857 7858 if (intr->flags & IXGBE_FLAG_MAILBOX) { 7859 ixgbevf_mbx_process(dev); 7860 intr->flags &= ~IXGBE_FLAG_MAILBOX; 7861 } 7862 7863 ixgbevf_intr_enable(dev); 7864 7865 return 0; 7866 } 7867 7868 static void 7869 ixgbevf_dev_interrupt_handler(void *param) 7870 { 7871 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 7872 7873 ixgbevf_dev_interrupt_get_status(dev); 7874 ixgbevf_dev_interrupt_action(dev); 7875 } 7876 7877 /** 7878 * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path 7879 * @hw: pointer to hardware structure 7880 * 7881 * Stops the transmit data path and waits for the HW to internally empty 7882 * the Tx security block 7883 **/ 7884 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) 7885 { 7886 #define IXGBE_MAX_SECTX_POLL 40 7887 7888 int i; 7889 int sectxreg; 7890 7891 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 7892 sectxreg |= IXGBE_SECTXCTRL_TX_DIS; 7893 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 7894 for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { 7895 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); 7896 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) 7897 break; 7898 /* Use interrupt-safe sleep just in case */ 7899 usec_delay(1000); 7900 } 7901 7902 /* For informational purposes only */ 7903 if (i >= IXGBE_MAX_SECTX_POLL) 7904 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " 7905 "path fully disabled. Continuing with init."); 7906 7907 return IXGBE_SUCCESS; 7908 } 7909 7910 /** 7911 * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path 7912 * @hw: pointer to hardware structure 7913 * 7914 * Enables the transmit data path. 7915 **/ 7916 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) 7917 { 7918 uint32_t sectxreg; 7919 7920 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 7921 sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; 7922 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 7923 IXGBE_WRITE_FLUSH(hw); 7924 7925 return IXGBE_SUCCESS; 7926 } 7927 7928 /* restore n-tuple filter */ 7929 static inline void 7930 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) 7931 { 7932 struct ixgbe_filter_info *filter_info = 7933 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7934 struct ixgbe_5tuple_filter *node; 7935 7936 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { 7937 ixgbe_inject_5tuple_filter(dev, node); 7938 } 7939 } 7940 7941 /* restore ethernet type filter */ 7942 static inline void 7943 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) 7944 { 7945 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7946 struct ixgbe_filter_info *filter_info = 7947 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7948 int i; 7949 7950 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 7951 if (filter_info->ethertype_mask & (1 << i)) { 7952 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 7953 filter_info->ethertype_filters[i].etqf); 7954 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 7955 filter_info->ethertype_filters[i].etqs); 7956 IXGBE_WRITE_FLUSH(hw); 7957 } 7958 } 7959 } 7960 7961 /* restore SYN filter */ 7962 static inline void 7963 ixgbe_syn_filter_restore(struct rte_eth_dev *dev) 7964 { 7965 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7966 struct ixgbe_filter_info *filter_info = 7967 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7968 uint32_t synqf; 7969 7970 synqf = filter_info->syn_info; 7971 7972 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 7973 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 7974 IXGBE_WRITE_FLUSH(hw); 7975 } 7976 } 7977 7978 /* restore L2 tunnel filter */ 7979 static inline void 7980 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) 7981 { 7982 struct ixgbe_l2_tn_info *l2_tn_info = 7983 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7984 struct ixgbe_l2_tn_filter *node; 7985 struct ixgbe_l2_tunnel_conf l2_tn_conf; 7986 7987 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { 7988 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; 7989 l2_tn_conf.tunnel_id = node->key.tn_id; 7990 l2_tn_conf.pool = node->pool; 7991 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); 7992 } 7993 } 7994 7995 /* restore rss filter */ 7996 static inline void 7997 ixgbe_rss_filter_restore(struct rte_eth_dev *dev) 7998 { 7999 struct ixgbe_filter_info *filter_info = 8000 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8001 8002 if (filter_info->rss_info.conf.queue_num) 8003 ixgbe_config_rss_filter(dev, 8004 &filter_info->rss_info, TRUE); 8005 } 8006 8007 static int 8008 ixgbe_filter_restore(struct rte_eth_dev *dev) 8009 { 8010 ixgbe_ntuple_filter_restore(dev); 8011 ixgbe_ethertype_filter_restore(dev); 8012 ixgbe_syn_filter_restore(dev); 8013 ixgbe_fdir_filter_restore(dev); 8014 ixgbe_l2_tn_filter_restore(dev); 8015 ixgbe_rss_filter_restore(dev); 8016 8017 return 0; 8018 } 8019 8020 static void 8021 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) 8022 { 8023 struct ixgbe_l2_tn_info *l2_tn_info = 8024 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8025 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8026 8027 if (l2_tn_info->e_tag_en) 8028 (void)ixgbe_e_tag_enable(hw); 8029 8030 if (l2_tn_info->e_tag_fwd_en) 8031 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); 8032 8033 (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); 8034 } 8035 8036 /* remove all the n-tuple filters */ 8037 void 8038 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) 8039 { 8040 struct ixgbe_filter_info *filter_info = 8041 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8042 struct ixgbe_5tuple_filter *p_5tuple; 8043 8044 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) 8045 ixgbe_remove_5tuple_filter(dev, p_5tuple); 8046 } 8047 8048 /* remove all the ether type filters */ 8049 void 8050 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) 8051 { 8052 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8053 struct ixgbe_filter_info *filter_info = 8054 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8055 int i; 8056 8057 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8058 if (filter_info->ethertype_mask & (1 << i) && 8059 !filter_info->ethertype_filters[i].conf) { 8060 (void)ixgbe_ethertype_filter_remove(filter_info, 8061 (uint8_t)i); 8062 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); 8063 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); 8064 IXGBE_WRITE_FLUSH(hw); 8065 } 8066 } 8067 } 8068 8069 /* remove the SYN filter */ 8070 void 8071 ixgbe_clear_syn_filter(struct rte_eth_dev *dev) 8072 { 8073 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8074 struct ixgbe_filter_info *filter_info = 8075 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8076 8077 if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { 8078 filter_info->syn_info = 0; 8079 8080 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); 8081 IXGBE_WRITE_FLUSH(hw); 8082 } 8083 } 8084 8085 /* remove all the L2 tunnel filters */ 8086 int 8087 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) 8088 { 8089 struct ixgbe_l2_tn_info *l2_tn_info = 8090 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8091 struct ixgbe_l2_tn_filter *l2_tn_filter; 8092 struct ixgbe_l2_tunnel_conf l2_tn_conf; 8093 int ret = 0; 8094 8095 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 8096 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; 8097 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; 8098 l2_tn_conf.pool = l2_tn_filter->pool; 8099 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); 8100 if (ret < 0) 8101 return ret; 8102 } 8103 8104 return 0; 8105 } 8106 8107 void 8108 ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev, 8109 struct ixgbe_macsec_setting *macsec_setting) 8110 { 8111 struct ixgbe_macsec_setting *macsec = 8112 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8113 8114 macsec->offload_en = macsec_setting->offload_en; 8115 macsec->encrypt_en = macsec_setting->encrypt_en; 8116 macsec->replayprotect_en = macsec_setting->replayprotect_en; 8117 } 8118 8119 void 8120 ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev) 8121 { 8122 struct ixgbe_macsec_setting *macsec = 8123 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8124 8125 macsec->offload_en = 0; 8126 macsec->encrypt_en = 0; 8127 macsec->replayprotect_en = 0; 8128 } 8129 8130 void 8131 ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, 8132 struct ixgbe_macsec_setting *macsec_setting) 8133 { 8134 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8135 uint32_t ctrl; 8136 uint8_t en = macsec_setting->encrypt_en; 8137 uint8_t rp = macsec_setting->replayprotect_en; 8138 8139 /** 8140 * Workaround: 8141 * As no ixgbe_disable_sec_rx_path equivalent is 8142 * implemented for tx in the base code, and we are 8143 * not allowed to modify the base code in DPDK, so 8144 * just call the hand-written one directly for now. 8145 * The hardware support has been checked by 8146 * ixgbe_disable_sec_rx_path(). 8147 */ 8148 ixgbe_disable_sec_tx_path_generic(hw); 8149 8150 /* Enable Ethernet CRC (required by MACsec offload) */ 8151 ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 8152 ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; 8153 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl); 8154 8155 /* Enable the TX and RX crypto engines */ 8156 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8157 ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS; 8158 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8159 8160 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8161 ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS; 8162 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8163 8164 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 8165 ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK; 8166 ctrl |= 0x3; 8167 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl); 8168 8169 /* Enable SA lookup */ 8170 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8171 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8172 ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT : 8173 IXGBE_LSECTXCTRL_AUTH; 8174 ctrl |= IXGBE_LSECTXCTRL_AISCI; 8175 ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8176 ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8177 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8178 8179 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8180 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8181 ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT; 8182 ctrl &= ~IXGBE_LSECRXCTRL_PLSH; 8183 if (rp) 8184 ctrl |= IXGBE_LSECRXCTRL_RP; 8185 else 8186 ctrl &= ~IXGBE_LSECRXCTRL_RP; 8187 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8188 8189 /* Start the data paths */ 8190 ixgbe_enable_sec_rx_path(hw); 8191 /** 8192 * Workaround: 8193 * As no ixgbe_enable_sec_rx_path equivalent is 8194 * implemented for tx in the base code, and we are 8195 * not allowed to modify the base code in DPDK, so 8196 * just call the hand-written one directly for now. 8197 */ 8198 ixgbe_enable_sec_tx_path_generic(hw); 8199 } 8200 8201 void 8202 ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) 8203 { 8204 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8205 uint32_t ctrl; 8206 8207 /** 8208 * Workaround: 8209 * As no ixgbe_disable_sec_rx_path equivalent is 8210 * implemented for tx in the base code, and we are 8211 * not allowed to modify the base code in DPDK, so 8212 * just call the hand-written one directly for now. 8213 * The hardware support has been checked by 8214 * ixgbe_disable_sec_rx_path(). 8215 */ 8216 ixgbe_disable_sec_tx_path_generic(hw); 8217 8218 /* Disable the TX and RX crypto engines */ 8219 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8220 ctrl |= IXGBE_SECTXCTRL_SECTX_DIS; 8221 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8222 8223 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8224 ctrl |= IXGBE_SECRXCTRL_SECRX_DIS; 8225 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8226 8227 /* Disable SA lookup */ 8228 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8229 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8230 ctrl |= IXGBE_LSECTXCTRL_DISABLE; 8231 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8232 8233 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8234 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8235 ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT; 8236 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8237 8238 /* Start the data paths */ 8239 ixgbe_enable_sec_rx_path(hw); 8240 /** 8241 * Workaround: 8242 * As no ixgbe_enable_sec_rx_path equivalent is 8243 * implemented for tx in the base code, and we are 8244 * not allowed to modify the base code in DPDK, so 8245 * just call the hand-written one directly for now. 8246 */ 8247 ixgbe_enable_sec_tx_path_generic(hw); 8248 } 8249 8250 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); 8251 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); 8252 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 8253 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); 8254 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); 8255 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); 8256 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, 8257 IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); 8258 8259 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_init, init, NOTICE); 8260 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_driver, driver, NOTICE); 8261 8262 #ifdef RTE_ETHDEV_DEBUG_RX 8263 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_rx, rx, DEBUG); 8264 #endif 8265 #ifdef RTE_ETHDEV_DEBUG_TX 8266 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_tx, tx, DEBUG); 8267 #endif 8268