1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 #include <inttypes.h> 13 #include <rte_string_fns.h> 14 #include <rte_byteorder.h> 15 #include <rte_common.h> 16 #include <rte_cycles.h> 17 18 #include <rte_interrupts.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_pci.h> 22 #include <rte_bus_pci.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_memory.h> 25 #include <rte_kvargs.h> 26 #include <rte_eal.h> 27 #include <rte_alarm.h> 28 #include <rte_ether.h> 29 #include <ethdev_driver.h> 30 #include <ethdev_pci.h> 31 #include <rte_malloc.h> 32 #include <rte_random.h> 33 #include <rte_dev.h> 34 #include <rte_hash_crc.h> 35 #ifdef RTE_LIB_SECURITY 36 #include <rte_security_driver.h> 37 #endif 38 39 #include "ixgbe_logs.h" 40 #include "base/ixgbe_api.h" 41 #include "base/ixgbe_vf.h" 42 #include "base/ixgbe_common.h" 43 #include "ixgbe_ethdev.h" 44 #include "ixgbe_bypass.h" 45 #include "ixgbe_rxtx.h" 46 #include "base/ixgbe_type.h" 47 #include "base/ixgbe_phy.h" 48 #include "base/ixgbe_osdep.h" 49 #include "ixgbe_regs.h" 50 51 /* 52 * High threshold controlling when to start sending XOFF frames. Must be at 53 * least 8 bytes less than receive packet buffer size. This value is in units 54 * of 1024 bytes. 55 */ 56 #define IXGBE_FC_HI 0x80 57 58 /* 59 * Low threshold controlling when to start sending XON frames. This value is 60 * in units of 1024 bytes. 61 */ 62 #define IXGBE_FC_LO 0x40 63 64 /* Timer value included in XOFF frames. */ 65 #define IXGBE_FC_PAUSE 0x680 66 67 /*Default value of Max Rx Queue*/ 68 #define IXGBE_MAX_RX_QUEUE_NUM 128 69 70 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 71 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 72 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ 73 74 #define IXGBE_MMW_SIZE_DEFAULT 0x4 75 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 76 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ 77 78 /* 79 * Default values for RX/TX configuration 80 */ 81 #define IXGBE_DEFAULT_RX_FREE_THRESH 32 82 #define IXGBE_DEFAULT_RX_PTHRESH 8 83 #define IXGBE_DEFAULT_RX_HTHRESH 8 84 #define IXGBE_DEFAULT_RX_WTHRESH 0 85 86 #define IXGBE_DEFAULT_TX_FREE_THRESH 32 87 #define IXGBE_DEFAULT_TX_PTHRESH 32 88 #define IXGBE_DEFAULT_TX_HTHRESH 0 89 #define IXGBE_DEFAULT_TX_WTHRESH 0 90 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 91 92 /* Bit shift and mask */ 93 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) 94 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) 95 #define IXGBE_8_BIT_WIDTH CHAR_BIT 96 #define IXGBE_8_BIT_MASK UINT8_MAX 97 98 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ 99 100 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) 101 102 /* Additional timesync values. */ 103 #define NSEC_PER_SEC 1000000000L 104 #define IXGBE_INCVAL_10GB 0x66666666 105 #define IXGBE_INCVAL_1GB 0x40000000 106 #define IXGBE_INCVAL_100 0x50000000 107 #define IXGBE_INCVAL_SHIFT_10GB 28 108 #define IXGBE_INCVAL_SHIFT_1GB 24 109 #define IXGBE_INCVAL_SHIFT_100 21 110 #define IXGBE_INCVAL_SHIFT_82599 7 111 #define IXGBE_INCPER_SHIFT_82599 24 112 113 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 114 115 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 116 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 117 #define IXGBE_ETAG_ETYPE 0x00005084 118 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff 119 #define IXGBE_ETAG_ETYPE_VALID 0x80000000 120 #define IXGBE_RAH_ADTYPE 0x40000000 121 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff 122 #define IXGBE_VMVIR_TAGA_MASK 0x18000000 123 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 124 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ 125 #define IXGBE_QDE_STRIP_TAG 0x00000004 126 #define IXGBE_VTEICR_MASK 0x07 127 128 #define IXGBE_EXVET_VET_EXT_SHIFT 16 129 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 130 131 #define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" 132 133 static const char * const ixgbevf_valid_arguments[] = { 134 IXGBEVF_DEVARG_PFLINK_FULLCHK, 135 NULL 136 }; 137 138 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); 139 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); 140 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); 141 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); 142 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); 143 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); 144 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); 145 static int ixgbe_dev_configure(struct rte_eth_dev *dev); 146 static int ixgbe_dev_start(struct rte_eth_dev *dev); 147 static int ixgbe_dev_stop(struct rte_eth_dev *dev); 148 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); 149 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); 150 static int ixgbe_dev_close(struct rte_eth_dev *dev); 151 static int ixgbe_dev_reset(struct rte_eth_dev *dev); 152 static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 153 static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 154 static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 155 static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 156 static int ixgbe_dev_link_update(struct rte_eth_dev *dev, 157 int wait_to_complete); 158 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, 159 struct rte_eth_stats *stats); 160 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, 161 struct rte_eth_xstat *xstats, unsigned n); 162 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, 163 struct rte_eth_xstat *xstats, unsigned n); 164 static int 165 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 166 uint64_t *values, unsigned int n); 167 static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev); 168 static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); 169 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 170 struct rte_eth_xstat_name *xstats_names, 171 unsigned int size); 172 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, 173 struct rte_eth_xstat_name *xstats_names, unsigned limit); 174 static int ixgbe_dev_xstats_get_names_by_id( 175 struct rte_eth_dev *dev, 176 const uint64_t *ids, 177 struct rte_eth_xstat_name *xstats_names, 178 unsigned int limit); 179 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 180 uint16_t queue_id, 181 uint8_t stat_idx, 182 uint8_t is_rx); 183 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 184 size_t fw_size); 185 static int ixgbe_dev_info_get(struct rte_eth_dev *dev, 186 struct rte_eth_dev_info *dev_info); 187 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 188 static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, 189 struct rte_eth_dev_info *dev_info); 190 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 191 192 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, 193 uint16_t vlan_id, int on); 194 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 195 enum rte_vlan_type vlan_type, 196 uint16_t tpid_id); 197 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 198 uint16_t queue, bool on); 199 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, 200 int on); 201 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, 202 int mask); 203 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); 204 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 205 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 206 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); 207 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); 208 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); 209 210 static int ixgbe_dev_led_on(struct rte_eth_dev *dev); 211 static int ixgbe_dev_led_off(struct rte_eth_dev *dev); 212 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, 213 struct rte_eth_fc_conf *fc_conf); 214 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, 215 struct rte_eth_fc_conf *fc_conf); 216 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 217 struct rte_eth_pfc_conf *pfc_conf); 218 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 219 struct rte_eth_rss_reta_entry64 *reta_conf, 220 uint16_t reta_size); 221 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 222 struct rte_eth_rss_reta_entry64 *reta_conf, 223 uint16_t reta_size); 224 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); 225 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 226 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 227 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 228 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); 229 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); 230 static void ixgbe_dev_interrupt_handler(void *param); 231 static void ixgbe_dev_interrupt_delayed_handler(void *param); 232 static void *ixgbe_dev_setup_link_thread_handler(void *param); 233 static int ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, 234 uint32_t timeout_ms); 235 236 static int ixgbe_add_rar(struct rte_eth_dev *dev, 237 struct rte_ether_addr *mac_addr, 238 uint32_t index, uint32_t pool); 239 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); 240 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, 241 struct rte_ether_addr *mac_addr); 242 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); 243 static bool is_device_supported(struct rte_eth_dev *dev, 244 struct rte_pci_driver *drv); 245 246 /* For Virtual Function support */ 247 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); 248 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); 249 static int ixgbevf_dev_configure(struct rte_eth_dev *dev); 250 static int ixgbevf_dev_start(struct rte_eth_dev *dev); 251 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, 252 int wait_to_complete); 253 static int ixgbevf_dev_stop(struct rte_eth_dev *dev); 254 static int ixgbevf_dev_close(struct rte_eth_dev *dev); 255 static int ixgbevf_dev_reset(struct rte_eth_dev *dev); 256 static void ixgbevf_intr_disable(struct rte_eth_dev *dev); 257 static void ixgbevf_intr_enable(struct rte_eth_dev *dev); 258 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, 259 struct rte_eth_stats *stats); 260 static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); 261 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 262 uint16_t vlan_id, int on); 263 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, 264 uint16_t queue, int on); 265 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 266 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 267 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 268 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 269 uint16_t queue_id); 270 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 271 uint16_t queue_id); 272 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 273 uint8_t queue, uint8_t msix_vector); 274 static void ixgbevf_configure_msix(struct rte_eth_dev *dev); 275 static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); 276 static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); 277 static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); 278 static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); 279 280 /* For Eth VMDQ APIs support */ 281 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct 282 rte_ether_addr * mac_addr, uint8_t on); 283 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); 284 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 285 uint16_t queue_id); 286 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 287 uint16_t queue_id); 288 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 289 uint8_t queue, uint8_t msix_vector); 290 static void ixgbe_configure_msix(struct rte_eth_dev *dev); 291 292 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, 293 struct rte_ether_addr *mac_addr, 294 uint32_t index, uint32_t pool); 295 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 296 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 297 struct rte_ether_addr *mac_addr); 298 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 299 struct ixgbe_5tuple_filter *filter); 300 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 301 struct ixgbe_5tuple_filter *filter); 302 static int ixgbe_dev_flow_ops_get(struct rte_eth_dev *dev, 303 const struct rte_flow_ops **ops); 304 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 305 306 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 307 struct rte_ether_addr *mc_addr_set, 308 uint32_t nb_mc_addr); 309 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 310 struct rte_eth_dcb_info *dcb_info); 311 312 static int ixgbe_get_reg_length(struct rte_eth_dev *dev); 313 static int ixgbe_get_regs(struct rte_eth_dev *dev, 314 struct rte_dev_reg_info *regs); 315 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); 316 static int ixgbe_get_eeprom(struct rte_eth_dev *dev, 317 struct rte_dev_eeprom_info *eeprom); 318 static int ixgbe_set_eeprom(struct rte_eth_dev *dev, 319 struct rte_dev_eeprom_info *eeprom); 320 321 static int ixgbe_get_module_info(struct rte_eth_dev *dev, 322 struct rte_eth_dev_module_info *modinfo); 323 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 324 struct rte_dev_eeprom_info *info); 325 326 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); 327 static int ixgbevf_get_regs(struct rte_eth_dev *dev, 328 struct rte_dev_reg_info *regs); 329 330 static int ixgbe_timesync_enable(struct rte_eth_dev *dev); 331 static int ixgbe_timesync_disable(struct rte_eth_dev *dev); 332 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 333 struct timespec *timestamp, 334 uint32_t flags); 335 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 336 struct timespec *timestamp); 337 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 338 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, 339 struct timespec *timestamp); 340 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, 341 const struct timespec *timestamp); 342 static void ixgbevf_dev_interrupt_handler(void *param); 343 344 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 345 struct rte_eth_udp_tunnel *udp_tunnel); 346 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 347 struct rte_eth_udp_tunnel *udp_tunnel); 348 static int ixgbe_filter_restore(struct rte_eth_dev *dev); 349 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); 350 static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw); 351 352 /* 353 * Define VF Stats MACRO for Non "cleared on read" register 354 */ 355 #define UPDATE_VF_STAT(reg, last, cur) \ 356 { \ 357 uint32_t latest = IXGBE_READ_REG(hw, reg); \ 358 cur += (latest - last) & UINT_MAX; \ 359 last = latest; \ 360 } 361 362 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ 363 { \ 364 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ 365 u64 new_msb = IXGBE_READ_REG(hw, msb); \ 366 u64 latest = ((new_msb << 32) | new_lsb); \ 367 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ 368 last = latest; \ 369 } 370 371 #define IXGBE_SET_HWSTRIP(h, q) do {\ 372 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 373 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 374 (h)->bitmap[idx] |= 1 << bit;\ 375 } while (0) 376 377 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\ 378 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 379 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 380 (h)->bitmap[idx] &= ~(1 << bit);\ 381 } while (0) 382 383 #define IXGBE_GET_HWSTRIP(h, q, r) do {\ 384 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 385 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 386 (r) = (h)->bitmap[idx] >> bit & 1;\ 387 } while (0) 388 389 /* 390 * The set of PCI devices this driver supports 391 */ 392 static const struct rte_pci_id pci_id_ixgbe_map[] = { 393 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, 394 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, 395 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, 396 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, 397 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, 398 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, 399 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, 400 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, 401 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, 402 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, 403 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, 404 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, 405 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, 406 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, 407 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, 408 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, 409 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, 410 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, 411 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, 412 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, 413 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, 414 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, 415 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, 416 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, 417 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, 418 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, 419 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, 420 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, 421 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, 422 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, 423 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, 424 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, 425 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, 426 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, 427 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, 428 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, 429 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, 430 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, 431 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, 432 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, 433 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, 434 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, 435 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, 436 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, 437 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, 438 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, 439 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, 440 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) }, 441 #ifdef RTE_LIBRTE_IXGBE_BYPASS 442 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, 443 #endif 444 { .vendor_id = 0, /* sentinel */ }, 445 }; 446 447 /* 448 * The set of PCI devices this driver supports (for 82599 VF) 449 */ 450 static const struct rte_pci_id pci_id_ixgbevf_map[] = { 451 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, 452 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, 453 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, 454 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, 455 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, 456 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, 457 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, 458 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, 459 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, 460 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, 461 { .vendor_id = 0, /* sentinel */ }, 462 }; 463 464 static const struct rte_eth_desc_lim rx_desc_lim = { 465 .nb_max = IXGBE_MAX_RING_DESC, 466 .nb_min = IXGBE_MIN_RING_DESC, 467 .nb_align = IXGBE_RXD_ALIGN, 468 }; 469 470 static const struct rte_eth_desc_lim tx_desc_lim = { 471 .nb_max = IXGBE_MAX_RING_DESC, 472 .nb_min = IXGBE_MIN_RING_DESC, 473 .nb_align = IXGBE_TXD_ALIGN, 474 .nb_seg_max = IXGBE_TX_MAX_SEG, 475 .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, 476 }; 477 478 static const struct eth_dev_ops ixgbe_eth_dev_ops = { 479 .dev_configure = ixgbe_dev_configure, 480 .dev_start = ixgbe_dev_start, 481 .dev_stop = ixgbe_dev_stop, 482 .dev_set_link_up = ixgbe_dev_set_link_up, 483 .dev_set_link_down = ixgbe_dev_set_link_down, 484 .dev_close = ixgbe_dev_close, 485 .dev_reset = ixgbe_dev_reset, 486 .promiscuous_enable = ixgbe_dev_promiscuous_enable, 487 .promiscuous_disable = ixgbe_dev_promiscuous_disable, 488 .allmulticast_enable = ixgbe_dev_allmulticast_enable, 489 .allmulticast_disable = ixgbe_dev_allmulticast_disable, 490 .link_update = ixgbe_dev_link_update, 491 .stats_get = ixgbe_dev_stats_get, 492 .xstats_get = ixgbe_dev_xstats_get, 493 .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, 494 .stats_reset = ixgbe_dev_stats_reset, 495 .xstats_reset = ixgbe_dev_xstats_reset, 496 .xstats_get_names = ixgbe_dev_xstats_get_names, 497 .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, 498 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, 499 .fw_version_get = ixgbe_fw_version_get, 500 .dev_infos_get = ixgbe_dev_info_get, 501 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 502 .mtu_set = ixgbe_dev_mtu_set, 503 .vlan_filter_set = ixgbe_vlan_filter_set, 504 .vlan_tpid_set = ixgbe_vlan_tpid_set, 505 .vlan_offload_set = ixgbe_vlan_offload_set, 506 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, 507 .rx_queue_start = ixgbe_dev_rx_queue_start, 508 .rx_queue_stop = ixgbe_dev_rx_queue_stop, 509 .tx_queue_start = ixgbe_dev_tx_queue_start, 510 .tx_queue_stop = ixgbe_dev_tx_queue_stop, 511 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 512 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, 513 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, 514 .rx_queue_release = ixgbe_dev_rx_queue_release, 515 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 516 .tx_queue_release = ixgbe_dev_tx_queue_release, 517 .dev_led_on = ixgbe_dev_led_on, 518 .dev_led_off = ixgbe_dev_led_off, 519 .flow_ctrl_get = ixgbe_flow_ctrl_get, 520 .flow_ctrl_set = ixgbe_flow_ctrl_set, 521 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, 522 .mac_addr_add = ixgbe_add_rar, 523 .mac_addr_remove = ixgbe_remove_rar, 524 .mac_addr_set = ixgbe_set_default_mac_addr, 525 .uc_hash_table_set = ixgbe_uc_hash_table_set, 526 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, 527 .set_queue_rate_limit = ixgbe_set_queue_rate_limit, 528 .reta_update = ixgbe_dev_rss_reta_update, 529 .reta_query = ixgbe_dev_rss_reta_query, 530 .rss_hash_update = ixgbe_dev_rss_hash_update, 531 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 532 .flow_ops_get = ixgbe_dev_flow_ops_get, 533 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 534 .rxq_info_get = ixgbe_rxq_info_get, 535 .txq_info_get = ixgbe_txq_info_get, 536 .timesync_enable = ixgbe_timesync_enable, 537 .timesync_disable = ixgbe_timesync_disable, 538 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, 539 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, 540 .get_reg = ixgbe_get_regs, 541 .get_eeprom_length = ixgbe_get_eeprom_length, 542 .get_eeprom = ixgbe_get_eeprom, 543 .set_eeprom = ixgbe_set_eeprom, 544 .get_module_info = ixgbe_get_module_info, 545 .get_module_eeprom = ixgbe_get_module_eeprom, 546 .get_dcb_info = ixgbe_dev_get_dcb_info, 547 .timesync_adjust_time = ixgbe_timesync_adjust_time, 548 .timesync_read_time = ixgbe_timesync_read_time, 549 .timesync_write_time = ixgbe_timesync_write_time, 550 .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, 551 .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, 552 .tm_ops_get = ixgbe_tm_ops_get, 553 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 554 .get_monitor_addr = ixgbe_get_monitor_addr, 555 }; 556 557 /* 558 * dev_ops for virtual function, bare necessities for basic vf 559 * operation have been implemented 560 */ 561 static const struct eth_dev_ops ixgbevf_eth_dev_ops = { 562 .dev_configure = ixgbevf_dev_configure, 563 .dev_start = ixgbevf_dev_start, 564 .dev_stop = ixgbevf_dev_stop, 565 .link_update = ixgbevf_dev_link_update, 566 .stats_get = ixgbevf_dev_stats_get, 567 .xstats_get = ixgbevf_dev_xstats_get, 568 .stats_reset = ixgbevf_dev_stats_reset, 569 .xstats_reset = ixgbevf_dev_stats_reset, 570 .xstats_get_names = ixgbevf_dev_xstats_get_names, 571 .dev_close = ixgbevf_dev_close, 572 .dev_reset = ixgbevf_dev_reset, 573 .promiscuous_enable = ixgbevf_dev_promiscuous_enable, 574 .promiscuous_disable = ixgbevf_dev_promiscuous_disable, 575 .allmulticast_enable = ixgbevf_dev_allmulticast_enable, 576 .allmulticast_disable = ixgbevf_dev_allmulticast_disable, 577 .dev_infos_get = ixgbevf_dev_info_get, 578 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 579 .mtu_set = ixgbevf_dev_set_mtu, 580 .vlan_filter_set = ixgbevf_vlan_filter_set, 581 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, 582 .vlan_offload_set = ixgbevf_vlan_offload_set, 583 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 584 .rx_queue_release = ixgbe_dev_rx_queue_release, 585 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 586 .tx_queue_release = ixgbe_dev_tx_queue_release, 587 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, 588 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, 589 .mac_addr_add = ixgbevf_add_mac_addr, 590 .mac_addr_remove = ixgbevf_remove_mac_addr, 591 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 592 .rxq_info_get = ixgbe_rxq_info_get, 593 .txq_info_get = ixgbe_txq_info_get, 594 .mac_addr_set = ixgbevf_set_default_mac_addr, 595 .get_reg = ixgbevf_get_regs, 596 .reta_update = ixgbe_dev_rss_reta_update, 597 .reta_query = ixgbe_dev_rss_reta_query, 598 .rss_hash_update = ixgbe_dev_rss_hash_update, 599 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 600 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 601 .get_monitor_addr = ixgbe_get_monitor_addr, 602 }; 603 604 /* store statistics names and its offset in stats structure */ 605 struct rte_ixgbe_xstats_name_off { 606 char name[RTE_ETH_XSTATS_NAME_SIZE]; 607 unsigned offset; 608 }; 609 610 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { 611 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, 612 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, 613 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, 614 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, 615 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, 616 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, 617 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, 618 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, 619 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, 620 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, 621 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, 622 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, 623 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, 624 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, 625 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 626 prc1023)}, 627 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 628 prc1522)}, 629 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, 630 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, 631 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, 632 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, 633 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, 634 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, 635 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, 636 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, 637 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, 638 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, 639 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, 640 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, 641 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, 642 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, 643 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, 644 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, 645 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 646 ptc1023)}, 647 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 648 ptc1522)}, 649 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, 650 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, 651 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, 652 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, 653 654 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, 655 fdirustat_add)}, 656 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, 657 fdirustat_remove)}, 658 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, 659 fdirfstat_fadd)}, 660 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, 661 fdirfstat_fremove)}, 662 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, 663 fdirmatch)}, 664 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, 665 fdirmiss)}, 666 667 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, 668 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, 669 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, 670 fclast)}, 671 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, 672 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, 673 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, 674 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, 675 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, 676 fcoe_noddp)}, 677 {"rx_fcoe_no_direct_data_placement_ext_buff", 678 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, 679 680 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 681 lxontxc)}, 682 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 683 lxonrxc)}, 684 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 685 lxofftxc)}, 686 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 687 lxoffrxc)}, 688 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, 689 }; 690 691 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ 692 sizeof(rte_ixgbe_stats_strings[0])) 693 694 /* MACsec statistics */ 695 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { 696 {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 697 out_pkts_untagged)}, 698 {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, 699 out_pkts_encrypted)}, 700 {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, 701 out_pkts_protected)}, 702 {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, 703 out_octets_encrypted)}, 704 {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, 705 out_octets_protected)}, 706 {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 707 in_pkts_untagged)}, 708 {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, 709 in_pkts_badtag)}, 710 {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, 711 in_pkts_nosci)}, 712 {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, 713 in_pkts_unknownsci)}, 714 {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, 715 in_octets_decrypted)}, 716 {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, 717 in_octets_validated)}, 718 {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, 719 in_pkts_unchecked)}, 720 {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, 721 in_pkts_delayed)}, 722 {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, 723 in_pkts_late)}, 724 {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, 725 in_pkts_ok)}, 726 {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, 727 in_pkts_invalid)}, 728 {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, 729 in_pkts_notvalid)}, 730 {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, 731 in_pkts_unusedsa)}, 732 {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, 733 in_pkts_notusingsa)}, 734 }; 735 736 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ 737 sizeof(rte_ixgbe_macsec_strings[0])) 738 739 /* Per-queue statistics */ 740 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { 741 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, 742 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, 743 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, 744 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, 745 }; 746 747 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ 748 sizeof(rte_ixgbe_rxq_strings[0])) 749 #define IXGBE_NB_RXQ_PRIO_VALUES 8 750 751 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { 752 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, 753 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, 754 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, 755 pxon2offc)}, 756 }; 757 758 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ 759 sizeof(rte_ixgbe_txq_strings[0])) 760 #define IXGBE_NB_TXQ_PRIO_VALUES 8 761 762 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { 763 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, 764 }; 765 766 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ 767 sizeof(rte_ixgbevf_stats_strings[0])) 768 769 /* 770 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. 771 */ 772 static inline int 773 ixgbe_is_sfp(struct ixgbe_hw *hw) 774 { 775 switch (hw->phy.type) { 776 case ixgbe_phy_sfp_avago: 777 case ixgbe_phy_sfp_ftl: 778 case ixgbe_phy_sfp_intel: 779 case ixgbe_phy_sfp_unknown: 780 case ixgbe_phy_sfp_passive_tyco: 781 case ixgbe_phy_sfp_passive_unknown: 782 return 1; 783 default: 784 return 0; 785 } 786 } 787 788 static inline int32_t 789 ixgbe_pf_reset_hw(struct ixgbe_hw *hw) 790 { 791 uint32_t ctrl_ext; 792 int32_t status; 793 794 status = ixgbe_reset_hw(hw); 795 796 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 797 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 798 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 799 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 800 IXGBE_WRITE_FLUSH(hw); 801 802 if (status == IXGBE_ERR_SFP_NOT_PRESENT) 803 status = IXGBE_SUCCESS; 804 return status; 805 } 806 807 static inline void 808 ixgbe_enable_intr(struct rte_eth_dev *dev) 809 { 810 struct ixgbe_interrupt *intr = 811 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 812 struct ixgbe_hw *hw = 813 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 814 815 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); 816 IXGBE_WRITE_FLUSH(hw); 817 } 818 819 /* 820 * This function is based on ixgbe_disable_intr() in base/ixgbe.h. 821 */ 822 static void 823 ixgbe_disable_intr(struct ixgbe_hw *hw) 824 { 825 PMD_INIT_FUNC_TRACE(); 826 827 if (hw->mac.type == ixgbe_mac_82598EB) { 828 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); 829 } else { 830 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); 831 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); 832 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); 833 } 834 IXGBE_WRITE_FLUSH(hw); 835 } 836 837 /* 838 * This function resets queue statistics mapping registers. 839 * From Niantic datasheet, Initialization of Statistics section: 840 * "...if software requires the queue counters, the RQSMR and TQSM registers 841 * must be re-programmed following a device reset. 842 */ 843 static void 844 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) 845 { 846 uint32_t i; 847 848 for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { 849 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); 850 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); 851 } 852 } 853 854 855 static int 856 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 857 uint16_t queue_id, 858 uint8_t stat_idx, 859 uint8_t is_rx) 860 { 861 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 862 #define NB_QMAP_FIELDS_PER_QSM_REG 4 863 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f 864 865 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 866 struct ixgbe_stat_mapping_registers *stat_mappings = 867 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); 868 uint32_t qsmr_mask = 0; 869 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 870 uint32_t q_map; 871 uint8_t n, offset; 872 873 if ((hw->mac.type != ixgbe_mac_82599EB) && 874 (hw->mac.type != ixgbe_mac_X540) && 875 (hw->mac.type != ixgbe_mac_X550) && 876 (hw->mac.type != ixgbe_mac_X550EM_x) && 877 (hw->mac.type != ixgbe_mac_X550EM_a)) 878 return -ENOSYS; 879 880 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 881 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 882 queue_id, stat_idx); 883 884 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 885 if (n >= IXGBE_NB_STAT_MAPPING_REGS) { 886 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 887 return -EIO; 888 } 889 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 890 891 /* Now clear any previous stat_idx set */ 892 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 893 if (!is_rx) 894 stat_mappings->tqsm[n] &= ~clearing_mask; 895 else 896 stat_mappings->rqsmr[n] &= ~clearing_mask; 897 898 q_map = (uint32_t)stat_idx; 899 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 900 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 901 if (!is_rx) 902 stat_mappings->tqsm[n] |= qsmr_mask; 903 else 904 stat_mappings->rqsmr[n] |= qsmr_mask; 905 906 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 907 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 908 queue_id, stat_idx); 909 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 910 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); 911 912 /* Now write the mapping in the appropriate register */ 913 if (is_rx) { 914 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", 915 stat_mappings->rqsmr[n], n); 916 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); 917 } else { 918 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", 919 stat_mappings->tqsm[n], n); 920 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); 921 } 922 return 0; 923 } 924 925 static void 926 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) 927 { 928 struct ixgbe_stat_mapping_registers *stat_mappings = 929 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); 930 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 931 int i; 932 933 /* write whatever was in stat mapping table to the NIC */ 934 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { 935 /* rx */ 936 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); 937 938 /* tx */ 939 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); 940 } 941 } 942 943 static void 944 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) 945 { 946 uint8_t i; 947 struct ixgbe_dcb_tc_config *tc; 948 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; 949 950 dcb_config->num_tcs.pg_tcs = dcb_max_tc; 951 dcb_config->num_tcs.pfc_tcs = dcb_max_tc; 952 for (i = 0; i < dcb_max_tc; i++) { 953 tc = &dcb_config->tc_config[i]; 954 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; 955 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 956 (uint8_t)(100/dcb_max_tc + (i & 1)); 957 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; 958 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 959 (uint8_t)(100/dcb_max_tc + (i & 1)); 960 tc->pfc = ixgbe_dcb_pfc_disabled; 961 } 962 963 /* Initialize default user to priority mapping, UPx->TC0 */ 964 tc = &dcb_config->tc_config[0]; 965 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 966 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 967 for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { 968 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; 969 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; 970 } 971 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; 972 dcb_config->pfc_mode_enable = false; 973 dcb_config->vt_mode = true; 974 dcb_config->round_robin_enable = false; 975 /* support all DCB capabilities in 82599 */ 976 dcb_config->support.capabilities = 0xFF; 977 978 /*we only support 4 Tcs for X540, X550 */ 979 if (hw->mac.type == ixgbe_mac_X540 || 980 hw->mac.type == ixgbe_mac_X550 || 981 hw->mac.type == ixgbe_mac_X550EM_x || 982 hw->mac.type == ixgbe_mac_X550EM_a) { 983 dcb_config->num_tcs.pg_tcs = 4; 984 dcb_config->num_tcs.pfc_tcs = 4; 985 } 986 } 987 988 /* 989 * Ensure that all locks are released before first NVM or PHY access 990 */ 991 static void 992 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) 993 { 994 uint16_t mask; 995 996 /* 997 * Phy lock should not fail in this early stage. If this is the case, 998 * it is due to an improper exit of the application. 999 * So force the release of the faulty lock. Release of common lock 1000 * is done automatically by swfw_sync function. 1001 */ 1002 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; 1003 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1004 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); 1005 } 1006 ixgbe_release_swfw_semaphore(hw, mask); 1007 1008 /* 1009 * These ones are more tricky since they are common to all ports; but 1010 * swfw_sync retries last long enough (1s) to be almost sure that if 1011 * lock can not be taken it is due to an improper lock of the 1012 * semaphore. 1013 */ 1014 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; 1015 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1016 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1017 } 1018 ixgbe_release_swfw_semaphore(hw, mask); 1019 } 1020 1021 /* 1022 * This function is based on code in ixgbe_attach() in base/ixgbe.c. 1023 * It returns 0 on success. 1024 */ 1025 static int 1026 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 1027 { 1028 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1029 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1030 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1031 struct ixgbe_hw *hw = 1032 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1033 struct ixgbe_vfta *shadow_vfta = 1034 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1035 struct ixgbe_hwstrip *hwstrip = 1036 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1037 struct ixgbe_dcb_config *dcb_config = 1038 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); 1039 struct ixgbe_filter_info *filter_info = 1040 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1041 struct ixgbe_bw_conf *bw_conf = 1042 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); 1043 uint32_t ctrl_ext; 1044 uint16_t csum; 1045 int diag, i, ret; 1046 1047 PMD_INIT_FUNC_TRACE(); 1048 1049 ixgbe_dev_macsec_setting_reset(eth_dev); 1050 1051 eth_dev->dev_ops = &ixgbe_eth_dev_ops; 1052 eth_dev->rx_queue_count = ixgbe_dev_rx_queue_count; 1053 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1054 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1055 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1056 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1057 eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; 1058 1059 /* 1060 * For secondary processes, we don't initialise any further as primary 1061 * has already done this work. Only check we don't need a different 1062 * RX and TX function. 1063 */ 1064 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1065 struct ixgbe_tx_queue *txq; 1066 /* TX queue function in primary, set by last queue initialized 1067 * Tx queue may not initialized by primary process 1068 */ 1069 if (eth_dev->data->tx_queues) { 1070 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; 1071 ixgbe_set_tx_function(eth_dev, txq); 1072 } else { 1073 /* Use default TX function if we get here */ 1074 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 1075 "Using default TX function."); 1076 } 1077 1078 ixgbe_set_rx_function(eth_dev); 1079 1080 return 0; 1081 } 1082 1083 rte_atomic32_clear(&ad->link_thread_running); 1084 rte_eth_copy_pci_info(eth_dev, pci_dev); 1085 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1086 1087 /* Vendor and Device ID need to be set before init of shared code */ 1088 hw->device_id = pci_dev->id.device_id; 1089 hw->vendor_id = pci_dev->id.vendor_id; 1090 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1091 hw->allow_unsupported_sfp = 1; 1092 1093 /* Initialize the shared code (base driver) */ 1094 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1095 diag = ixgbe_bypass_init_shared_code(hw); 1096 #else 1097 diag = ixgbe_init_shared_code(hw); 1098 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1099 1100 if (diag != IXGBE_SUCCESS) { 1101 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); 1102 return -EIO; 1103 } 1104 1105 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { 1106 PMD_INIT_LOG(ERR, "\nERROR: " 1107 "Firmware recovery mode detected. Limiting functionality.\n" 1108 "Refer to the Intel(R) Ethernet Adapters and Devices " 1109 "User Guide for details on firmware recovery mode."); 1110 return -EIO; 1111 } 1112 1113 /* pick up the PCI bus settings for reporting later */ 1114 ixgbe_get_bus_info(hw); 1115 1116 /* Unlock any pending hardware semaphore */ 1117 ixgbe_swfw_lock_reset(hw); 1118 1119 #ifdef RTE_LIB_SECURITY 1120 /* Initialize security_ctx only for primary process*/ 1121 if (ixgbe_ipsec_ctx_create(eth_dev)) 1122 return -ENOMEM; 1123 #endif 1124 1125 /* Initialize DCB configuration*/ 1126 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); 1127 ixgbe_dcb_init(hw, dcb_config); 1128 /* Get Hardware Flow Control setting */ 1129 hw->fc.requested_mode = ixgbe_fc_none; 1130 hw->fc.current_mode = ixgbe_fc_none; 1131 hw->fc.pause_time = IXGBE_FC_PAUSE; 1132 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 1133 hw->fc.low_water[i] = IXGBE_FC_LO; 1134 hw->fc.high_water[i] = IXGBE_FC_HI; 1135 } 1136 hw->fc.send_xon = 1; 1137 1138 /* Make sure we have a good EEPROM before we read from it */ 1139 diag = ixgbe_validate_eeprom_checksum(hw, &csum); 1140 if (diag != IXGBE_SUCCESS) { 1141 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); 1142 return -EIO; 1143 } 1144 1145 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1146 diag = ixgbe_bypass_init_hw(hw); 1147 #else 1148 diag = ixgbe_init_hw(hw); 1149 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1150 1151 /* 1152 * Devices with copper phys will fail to initialise if ixgbe_init_hw() 1153 * is called too soon after the kernel driver unbinding/binding occurs. 1154 * The failure occurs in ixgbe_identify_phy_generic() for all devices, 1155 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is 1156 * also called. See ixgbe_identify_phy_82599(). The reason for the 1157 * failure is not known, and only occuts when virtualisation features 1158 * are disabled in the bios. A delay of 100ms was found to be enough by 1159 * trial-and-error, and is doubled to be safe. 1160 */ 1161 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { 1162 rte_delay_ms(200); 1163 diag = ixgbe_init_hw(hw); 1164 } 1165 1166 if (diag == IXGBE_ERR_SFP_NOT_PRESENT) 1167 diag = IXGBE_SUCCESS; 1168 1169 if (diag == IXGBE_ERR_EEPROM_VERSION) { 1170 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 1171 "LOM. Please be aware there may be issues associated " 1172 "with your hardware."); 1173 PMD_INIT_LOG(ERR, "If you are experiencing problems " 1174 "please contact your Intel or hardware representative " 1175 "who provided you with this hardware."); 1176 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) 1177 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 1178 if (diag) { 1179 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); 1180 return -EIO; 1181 } 1182 1183 /* Reset the hw statistics */ 1184 ixgbe_dev_stats_reset(eth_dev); 1185 1186 /* disable interrupt */ 1187 ixgbe_disable_intr(hw); 1188 1189 /* reset mappings for queue statistics hw counters*/ 1190 ixgbe_reset_qstat_mappings(hw); 1191 1192 /* Allocate memory for storing MAC addresses */ 1193 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * 1194 hw->mac.num_rar_entries, 0); 1195 if (eth_dev->data->mac_addrs == NULL) { 1196 PMD_INIT_LOG(ERR, 1197 "Failed to allocate %u bytes needed to store " 1198 "MAC addresses", 1199 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1200 return -ENOMEM; 1201 } 1202 /* Copy the permanent MAC address */ 1203 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 1204 ð_dev->data->mac_addrs[0]); 1205 1206 /* Allocate memory for storing hash filter MAC addresses */ 1207 eth_dev->data->hash_mac_addrs = rte_zmalloc( 1208 "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); 1209 if (eth_dev->data->hash_mac_addrs == NULL) { 1210 PMD_INIT_LOG(ERR, 1211 "Failed to allocate %d bytes needed to store MAC addresses", 1212 RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); 1213 rte_free(eth_dev->data->mac_addrs); 1214 eth_dev->data->mac_addrs = NULL; 1215 return -ENOMEM; 1216 } 1217 1218 /* initialize the vfta */ 1219 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1220 1221 /* initialize the hw strip bitmap*/ 1222 memset(hwstrip, 0, sizeof(*hwstrip)); 1223 1224 /* initialize PF if max_vfs not zero */ 1225 ret = ixgbe_pf_host_init(eth_dev); 1226 if (ret) { 1227 rte_free(eth_dev->data->mac_addrs); 1228 eth_dev->data->mac_addrs = NULL; 1229 rte_free(eth_dev->data->hash_mac_addrs); 1230 eth_dev->data->hash_mac_addrs = NULL; 1231 return ret; 1232 } 1233 1234 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1235 /* let hardware know driver is loaded */ 1236 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 1237 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1238 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 1239 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 1240 IXGBE_WRITE_FLUSH(hw); 1241 1242 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 1243 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 1244 (int) hw->mac.type, (int) hw->phy.type, 1245 (int) hw->phy.sfp_type); 1246 else 1247 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 1248 (int) hw->mac.type, (int) hw->phy.type); 1249 1250 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1251 eth_dev->data->port_id, pci_dev->id.vendor_id, 1252 pci_dev->id.device_id); 1253 1254 rte_intr_callback_register(intr_handle, 1255 ixgbe_dev_interrupt_handler, eth_dev); 1256 1257 /* enable uio/vfio intr/eventfd mapping */ 1258 rte_intr_enable(intr_handle); 1259 1260 /* enable support intr */ 1261 ixgbe_enable_intr(eth_dev); 1262 1263 /* initialize filter info */ 1264 memset(filter_info, 0, 1265 sizeof(struct ixgbe_filter_info)); 1266 1267 /* initialize 5tuple filter list */ 1268 TAILQ_INIT(&filter_info->fivetuple_list); 1269 1270 /* initialize flow director filter list & hash */ 1271 ixgbe_fdir_filter_init(eth_dev); 1272 1273 /* initialize l2 tunnel filter list & hash */ 1274 ixgbe_l2_tn_filter_init(eth_dev); 1275 1276 /* initialize flow filter lists */ 1277 ixgbe_filterlist_init(); 1278 1279 /* initialize bandwidth configuration info */ 1280 memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); 1281 1282 /* initialize Traffic Manager configuration */ 1283 ixgbe_tm_conf_init(eth_dev); 1284 1285 return 0; 1286 } 1287 1288 static int 1289 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1290 { 1291 PMD_INIT_FUNC_TRACE(); 1292 1293 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1294 return 0; 1295 1296 ixgbe_dev_close(eth_dev); 1297 1298 return 0; 1299 } 1300 1301 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 1302 { 1303 struct ixgbe_filter_info *filter_info = 1304 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1305 struct ixgbe_5tuple_filter *p_5tuple; 1306 1307 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 1308 TAILQ_REMOVE(&filter_info->fivetuple_list, 1309 p_5tuple, 1310 entries); 1311 rte_free(p_5tuple); 1312 } 1313 memset(filter_info->fivetuple_mask, 0, 1314 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 1315 1316 return 0; 1317 } 1318 1319 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) 1320 { 1321 struct ixgbe_hw_fdir_info *fdir_info = 1322 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1323 struct ixgbe_fdir_filter *fdir_filter; 1324 1325 if (fdir_info->hash_map) 1326 rte_free(fdir_info->hash_map); 1327 if (fdir_info->hash_handle) 1328 rte_hash_free(fdir_info->hash_handle); 1329 1330 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { 1331 TAILQ_REMOVE(&fdir_info->fdir_list, 1332 fdir_filter, 1333 entries); 1334 rte_free(fdir_filter); 1335 } 1336 1337 return 0; 1338 } 1339 1340 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) 1341 { 1342 struct ixgbe_l2_tn_info *l2_tn_info = 1343 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1344 struct ixgbe_l2_tn_filter *l2_tn_filter; 1345 1346 if (l2_tn_info->hash_map) 1347 rte_free(l2_tn_info->hash_map); 1348 if (l2_tn_info->hash_handle) 1349 rte_hash_free(l2_tn_info->hash_handle); 1350 1351 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 1352 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, 1353 l2_tn_filter, 1354 entries); 1355 rte_free(l2_tn_filter); 1356 } 1357 1358 return 0; 1359 } 1360 1361 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) 1362 { 1363 struct ixgbe_hw_fdir_info *fdir_info = 1364 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1365 char fdir_hash_name[RTE_HASH_NAMESIZE]; 1366 struct rte_hash_parameters fdir_hash_params = { 1367 .name = fdir_hash_name, 1368 .entries = IXGBE_MAX_FDIR_FILTER_NUM, 1369 .key_len = sizeof(union ixgbe_atr_input), 1370 .hash_func = rte_hash_crc, 1371 .hash_func_init_val = 0, 1372 .socket_id = rte_socket_id(), 1373 }; 1374 1375 TAILQ_INIT(&fdir_info->fdir_list); 1376 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, 1377 "fdir_%s", eth_dev->device->name); 1378 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); 1379 if (!fdir_info->hash_handle) { 1380 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); 1381 return -EINVAL; 1382 } 1383 fdir_info->hash_map = rte_zmalloc("ixgbe", 1384 sizeof(struct ixgbe_fdir_filter *) * 1385 IXGBE_MAX_FDIR_FILTER_NUM, 1386 0); 1387 if (!fdir_info->hash_map) { 1388 PMD_INIT_LOG(ERR, 1389 "Failed to allocate memory for fdir hash map!"); 1390 rte_hash_free(fdir_info->hash_handle); 1391 return -ENOMEM; 1392 } 1393 fdir_info->mask_added = FALSE; 1394 1395 return 0; 1396 } 1397 1398 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) 1399 { 1400 struct ixgbe_l2_tn_info *l2_tn_info = 1401 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1402 char l2_tn_hash_name[RTE_HASH_NAMESIZE]; 1403 struct rte_hash_parameters l2_tn_hash_params = { 1404 .name = l2_tn_hash_name, 1405 .entries = IXGBE_MAX_L2_TN_FILTER_NUM, 1406 .key_len = sizeof(struct ixgbe_l2_tn_key), 1407 .hash_func = rte_hash_crc, 1408 .hash_func_init_val = 0, 1409 .socket_id = rte_socket_id(), 1410 }; 1411 1412 TAILQ_INIT(&l2_tn_info->l2_tn_list); 1413 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, 1414 "l2_tn_%s", eth_dev->device->name); 1415 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); 1416 if (!l2_tn_info->hash_handle) { 1417 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); 1418 return -EINVAL; 1419 } 1420 l2_tn_info->hash_map = rte_zmalloc("ixgbe", 1421 sizeof(struct ixgbe_l2_tn_filter *) * 1422 IXGBE_MAX_L2_TN_FILTER_NUM, 1423 0); 1424 if (!l2_tn_info->hash_map) { 1425 PMD_INIT_LOG(ERR, 1426 "Failed to allocate memory for L2 TN hash map!"); 1427 rte_hash_free(l2_tn_info->hash_handle); 1428 return -ENOMEM; 1429 } 1430 l2_tn_info->e_tag_en = FALSE; 1431 l2_tn_info->e_tag_fwd_en = FALSE; 1432 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; 1433 1434 return 0; 1435 } 1436 /* 1437 * Negotiate mailbox API version with the PF. 1438 * After reset API version is always set to the basic one (ixgbe_mbox_api_10). 1439 * Then we try to negotiate starting with the most recent one. 1440 * If all negotiation attempts fail, then we will proceed with 1441 * the default one (ixgbe_mbox_api_10). 1442 */ 1443 static void 1444 ixgbevf_negotiate_api(struct ixgbe_hw *hw) 1445 { 1446 int32_t i; 1447 1448 /* start with highest supported, proceed down */ 1449 static const enum ixgbe_pfvf_api_rev sup_ver[] = { 1450 ixgbe_mbox_api_13, 1451 ixgbe_mbox_api_12, 1452 ixgbe_mbox_api_11, 1453 ixgbe_mbox_api_10, 1454 }; 1455 1456 for (i = 0; 1457 i != RTE_DIM(sup_ver) && 1458 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; 1459 i++) 1460 ; 1461 } 1462 1463 static void 1464 generate_random_mac_addr(struct rte_ether_addr *mac_addr) 1465 { 1466 uint64_t random; 1467 1468 /* Set Organizationally Unique Identifier (OUI) prefix. */ 1469 mac_addr->addr_bytes[0] = 0x00; 1470 mac_addr->addr_bytes[1] = 0x09; 1471 mac_addr->addr_bytes[2] = 0xC0; 1472 /* Force indication of locally assigned MAC address. */ 1473 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 1474 /* Generate the last 3 bytes of the MAC address with a random number. */ 1475 random = rte_rand(); 1476 memcpy(&mac_addr->addr_bytes[3], &random, 3); 1477 } 1478 1479 static int 1480 devarg_handle_int(__rte_unused const char *key, const char *value, 1481 void *extra_args) 1482 { 1483 uint16_t *n = extra_args; 1484 1485 if (value == NULL || extra_args == NULL) 1486 return -EINVAL; 1487 1488 *n = (uint16_t)strtoul(value, NULL, 0); 1489 if (*n == USHRT_MAX && errno == ERANGE) 1490 return -1; 1491 1492 return 0; 1493 } 1494 1495 static void 1496 ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, 1497 struct rte_devargs *devargs) 1498 { 1499 struct rte_kvargs *kvlist; 1500 uint16_t pflink_fullchk; 1501 1502 if (devargs == NULL) 1503 return; 1504 1505 kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); 1506 if (kvlist == NULL) 1507 return; 1508 1509 if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && 1510 rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, 1511 devarg_handle_int, &pflink_fullchk) == 0 && 1512 pflink_fullchk == 1) 1513 adapter->pflink_fullchk = 1; 1514 1515 rte_kvargs_free(kvlist); 1516 } 1517 1518 /* 1519 * Virtual Function device init 1520 */ 1521 static int 1522 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) 1523 { 1524 int diag; 1525 uint32_t tc, tcs; 1526 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1527 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1528 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1529 struct ixgbe_hw *hw = 1530 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1531 struct ixgbe_vfta *shadow_vfta = 1532 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1533 struct ixgbe_hwstrip *hwstrip = 1534 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1535 struct rte_ether_addr *perm_addr = 1536 (struct rte_ether_addr *)hw->mac.perm_addr; 1537 1538 PMD_INIT_FUNC_TRACE(); 1539 1540 eth_dev->dev_ops = &ixgbevf_eth_dev_ops; 1541 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1542 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1543 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1544 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1545 1546 /* for secondary processes, we don't initialise any further as primary 1547 * has already done this work. Only check we don't need a different 1548 * RX function 1549 */ 1550 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1551 struct ixgbe_tx_queue *txq; 1552 /* TX queue function in primary, set by last queue initialized 1553 * Tx queue may not initialized by primary process 1554 */ 1555 if (eth_dev->data->tx_queues) { 1556 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; 1557 ixgbe_set_tx_function(eth_dev, txq); 1558 } else { 1559 /* Use default TX function if we get here */ 1560 PMD_INIT_LOG(NOTICE, 1561 "No TX queues configured yet. Using default TX function."); 1562 } 1563 1564 ixgbe_set_rx_function(eth_dev); 1565 1566 return 0; 1567 } 1568 1569 rte_atomic32_clear(&ad->link_thread_running); 1570 ixgbevf_parse_devargs(eth_dev->data->dev_private, 1571 pci_dev->device.devargs); 1572 1573 rte_eth_copy_pci_info(eth_dev, pci_dev); 1574 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1575 1576 hw->device_id = pci_dev->id.device_id; 1577 hw->vendor_id = pci_dev->id.vendor_id; 1578 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1579 1580 /* initialize the vfta */ 1581 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1582 1583 /* initialize the hw strip bitmap*/ 1584 memset(hwstrip, 0, sizeof(*hwstrip)); 1585 1586 /* Initialize the shared code (base driver) */ 1587 diag = ixgbe_init_shared_code(hw); 1588 if (diag != IXGBE_SUCCESS) { 1589 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); 1590 return -EIO; 1591 } 1592 1593 /* init_mailbox_params */ 1594 hw->mbx.ops.init_params(hw); 1595 1596 /* Reset the hw statistics */ 1597 ixgbevf_dev_stats_reset(eth_dev); 1598 1599 /* Disable the interrupts for VF */ 1600 ixgbevf_intr_disable(eth_dev); 1601 1602 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ 1603 diag = hw->mac.ops.reset_hw(hw); 1604 1605 /* 1606 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when 1607 * the underlying PF driver has not assigned a MAC address to the VF. 1608 * In this case, assign a random MAC address. 1609 */ 1610 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { 1611 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1612 /* 1613 * This error code will be propagated to the app by 1614 * rte_eth_dev_reset, so use a public error code rather than 1615 * the internal-only IXGBE_ERR_RESET_FAILED 1616 */ 1617 return -EAGAIN; 1618 } 1619 1620 /* negotiate mailbox API version to use with the PF. */ 1621 ixgbevf_negotiate_api(hw); 1622 1623 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 1624 ixgbevf_get_queues(hw, &tcs, &tc); 1625 1626 /* Allocate memory for storing MAC addresses */ 1627 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * 1628 hw->mac.num_rar_entries, 0); 1629 if (eth_dev->data->mac_addrs == NULL) { 1630 PMD_INIT_LOG(ERR, 1631 "Failed to allocate %u bytes needed to store " 1632 "MAC addresses", 1633 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1634 return -ENOMEM; 1635 } 1636 1637 /* Generate a random MAC address, if none was assigned by PF. */ 1638 if (rte_is_zero_ether_addr(perm_addr)) { 1639 generate_random_mac_addr(perm_addr); 1640 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 1641 if (diag) { 1642 rte_free(eth_dev->data->mac_addrs); 1643 eth_dev->data->mac_addrs = NULL; 1644 return diag; 1645 } 1646 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1647 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1648 RTE_ETHER_ADDR_PRT_FMT, 1649 RTE_ETHER_ADDR_BYTES(perm_addr)); 1650 } 1651 1652 /* Copy the permanent MAC address */ 1653 rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 1654 1655 /* reset the hardware with the new settings */ 1656 diag = hw->mac.ops.start_hw(hw); 1657 switch (diag) { 1658 case 0: 1659 break; 1660 1661 default: 1662 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1663 rte_free(eth_dev->data->mac_addrs); 1664 eth_dev->data->mac_addrs = NULL; 1665 return -EIO; 1666 } 1667 1668 rte_intr_callback_register(intr_handle, 1669 ixgbevf_dev_interrupt_handler, eth_dev); 1670 rte_intr_enable(intr_handle); 1671 ixgbevf_intr_enable(eth_dev); 1672 1673 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 1674 eth_dev->data->port_id, pci_dev->id.vendor_id, 1675 pci_dev->id.device_id, "ixgbe_mac_82599_vf"); 1676 1677 return 0; 1678 } 1679 1680 /* Virtual Function device uninit */ 1681 1682 static int 1683 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) 1684 { 1685 PMD_INIT_FUNC_TRACE(); 1686 1687 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1688 return 0; 1689 1690 ixgbevf_dev_close(eth_dev); 1691 1692 return 0; 1693 } 1694 1695 static int 1696 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1697 struct rte_pci_device *pci_dev) 1698 { 1699 char name[RTE_ETH_NAME_MAX_LEN]; 1700 struct rte_eth_dev *pf_ethdev; 1701 struct rte_eth_devargs eth_da; 1702 int i, retval; 1703 1704 if (pci_dev->device.devargs) { 1705 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, 1706 ð_da); 1707 if (retval) 1708 return retval; 1709 } else 1710 memset(ð_da, 0, sizeof(eth_da)); 1711 1712 if (eth_da.nb_representor_ports > 0 && 1713 eth_da.type != RTE_ETH_REPRESENTOR_VF) { 1714 PMD_DRV_LOG(ERR, "unsupported representor type: %s\n", 1715 pci_dev->device.devargs->args); 1716 return -ENOTSUP; 1717 } 1718 1719 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 1720 sizeof(struct ixgbe_adapter), 1721 eth_dev_pci_specific_init, pci_dev, 1722 eth_ixgbe_dev_init, NULL); 1723 1724 if (retval || eth_da.nb_representor_ports < 1) 1725 return retval; 1726 1727 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1728 if (pf_ethdev == NULL) 1729 return -ENODEV; 1730 1731 /* probe VF representor ports */ 1732 for (i = 0; i < eth_da.nb_representor_ports; i++) { 1733 struct ixgbe_vf_info *vfinfo; 1734 struct ixgbe_vf_representor representor; 1735 1736 vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( 1737 pf_ethdev->data->dev_private); 1738 if (vfinfo == NULL) { 1739 PMD_DRV_LOG(ERR, 1740 "no virtual functions supported by PF"); 1741 break; 1742 } 1743 1744 representor.vf_id = eth_da.representor_ports[i]; 1745 representor.switch_domain_id = vfinfo->switch_domain_id; 1746 representor.pf_ethdev = pf_ethdev; 1747 1748 /* representor port net_bdf_port */ 1749 snprintf(name, sizeof(name), "net_%s_representor_%d", 1750 pci_dev->device.name, 1751 eth_da.representor_ports[i]); 1752 1753 retval = rte_eth_dev_create(&pci_dev->device, name, 1754 sizeof(struct ixgbe_vf_representor), NULL, NULL, 1755 ixgbe_vf_representor_init, &representor); 1756 1757 if (retval) 1758 PMD_DRV_LOG(ERR, "failed to create ixgbe vf " 1759 "representor %s.", name); 1760 } 1761 1762 return 0; 1763 } 1764 1765 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) 1766 { 1767 struct rte_eth_dev *ethdev; 1768 1769 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1770 if (!ethdev) 1771 return 0; 1772 1773 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 1774 return rte_eth_dev_pci_generic_remove(pci_dev, 1775 ixgbe_vf_representor_uninit); 1776 else 1777 return rte_eth_dev_pci_generic_remove(pci_dev, 1778 eth_ixgbe_dev_uninit); 1779 } 1780 1781 static struct rte_pci_driver rte_ixgbe_pmd = { 1782 .id_table = pci_id_ixgbe_map, 1783 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1784 .probe = eth_ixgbe_pci_probe, 1785 .remove = eth_ixgbe_pci_remove, 1786 }; 1787 1788 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1789 struct rte_pci_device *pci_dev) 1790 { 1791 return rte_eth_dev_pci_generic_probe(pci_dev, 1792 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init); 1793 } 1794 1795 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) 1796 { 1797 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit); 1798 } 1799 1800 /* 1801 * virtual function driver struct 1802 */ 1803 static struct rte_pci_driver rte_ixgbevf_pmd = { 1804 .id_table = pci_id_ixgbevf_map, 1805 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1806 .probe = eth_ixgbevf_pci_probe, 1807 .remove = eth_ixgbevf_pci_remove, 1808 }; 1809 1810 static int 1811 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1812 { 1813 struct ixgbe_hw *hw = 1814 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1815 struct ixgbe_vfta *shadow_vfta = 1816 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1817 uint32_t vfta; 1818 uint32_t vid_idx; 1819 uint32_t vid_bit; 1820 1821 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 1822 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 1823 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); 1824 if (on) 1825 vfta |= vid_bit; 1826 else 1827 vfta &= ~vid_bit; 1828 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); 1829 1830 /* update local VFTA copy */ 1831 shadow_vfta->vfta[vid_idx] = vfta; 1832 1833 return 0; 1834 } 1835 1836 static void 1837 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 1838 { 1839 if (on) 1840 ixgbe_vlan_hw_strip_enable(dev, queue); 1841 else 1842 ixgbe_vlan_hw_strip_disable(dev, queue); 1843 } 1844 1845 static int 1846 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1847 enum rte_vlan_type vlan_type, 1848 uint16_t tpid) 1849 { 1850 struct ixgbe_hw *hw = 1851 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1852 int ret = 0; 1853 uint32_t reg; 1854 uint32_t qinq; 1855 1856 qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1857 qinq &= IXGBE_DMATXCTL_GDV; 1858 1859 switch (vlan_type) { 1860 case RTE_ETH_VLAN_TYPE_INNER: 1861 if (qinq) { 1862 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1863 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1864 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1865 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1866 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1867 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1868 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1869 } else { 1870 ret = -ENOTSUP; 1871 PMD_DRV_LOG(ERR, "Inner type is not supported" 1872 " by single VLAN"); 1873 } 1874 break; 1875 case RTE_ETH_VLAN_TYPE_OUTER: 1876 if (qinq) { 1877 /* Only the high 16-bits is valid */ 1878 IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << 1879 IXGBE_EXVET_VET_EXT_SHIFT); 1880 } else { 1881 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1882 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1883 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1884 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1885 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1886 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1887 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1888 } 1889 1890 break; 1891 default: 1892 ret = -EINVAL; 1893 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1894 break; 1895 } 1896 1897 return ret; 1898 } 1899 1900 void 1901 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1902 { 1903 struct ixgbe_hw *hw = 1904 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1905 uint32_t vlnctrl; 1906 1907 PMD_INIT_FUNC_TRACE(); 1908 1909 /* Filter Table Disable */ 1910 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1911 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1912 1913 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1914 } 1915 1916 void 1917 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1918 { 1919 struct ixgbe_hw *hw = 1920 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1921 struct ixgbe_vfta *shadow_vfta = 1922 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1923 uint32_t vlnctrl; 1924 uint16_t i; 1925 1926 PMD_INIT_FUNC_TRACE(); 1927 1928 /* Filter Table Enable */ 1929 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1930 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 1931 vlnctrl |= IXGBE_VLNCTRL_VFE; 1932 1933 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1934 1935 /* write whatever is in local vfta copy */ 1936 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1937 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); 1938 } 1939 1940 static void 1941 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 1942 { 1943 struct ixgbe_hwstrip *hwstrip = 1944 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); 1945 struct ixgbe_rx_queue *rxq; 1946 1947 if (queue >= IXGBE_MAX_RX_QUEUE_NUM) 1948 return; 1949 1950 if (on) 1951 IXGBE_SET_HWSTRIP(hwstrip, queue); 1952 else 1953 IXGBE_CLEAR_HWSTRIP(hwstrip, queue); 1954 1955 if (queue >= dev->data->nb_rx_queues) 1956 return; 1957 1958 rxq = dev->data->rx_queues[queue]; 1959 1960 if (on) { 1961 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 1962 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 1963 } else { 1964 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN; 1965 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 1966 } 1967 } 1968 1969 static void 1970 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 1971 { 1972 struct ixgbe_hw *hw = 1973 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1974 uint32_t ctrl; 1975 1976 PMD_INIT_FUNC_TRACE(); 1977 1978 if (hw->mac.type == ixgbe_mac_82598EB) { 1979 /* No queue level support */ 1980 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 1981 return; 1982 } 1983 1984 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1985 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 1986 ctrl &= ~IXGBE_RXDCTL_VME; 1987 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 1988 1989 /* record those setting for HW strip per queue */ 1990 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 1991 } 1992 1993 static void 1994 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 1995 { 1996 struct ixgbe_hw *hw = 1997 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1998 uint32_t ctrl; 1999 2000 PMD_INIT_FUNC_TRACE(); 2001 2002 if (hw->mac.type == ixgbe_mac_82598EB) { 2003 /* No queue level supported */ 2004 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2005 return; 2006 } 2007 2008 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2009 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2010 ctrl |= IXGBE_RXDCTL_VME; 2011 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2012 2013 /* record those setting for HW strip per queue */ 2014 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 2015 } 2016 2017 static void 2018 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2019 { 2020 struct ixgbe_hw *hw = 2021 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2022 uint32_t ctrl; 2023 2024 PMD_INIT_FUNC_TRACE(); 2025 2026 /* DMATXCTRL: Geric Double VLAN Disable */ 2027 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2028 ctrl &= ~IXGBE_DMATXCTL_GDV; 2029 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2030 2031 /* CTRL_EXT: Global Double VLAN Disable */ 2032 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2033 ctrl &= ~IXGBE_EXTENDED_VLAN; 2034 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2035 2036 } 2037 2038 static void 2039 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2040 { 2041 struct ixgbe_hw *hw = 2042 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2043 uint32_t ctrl; 2044 2045 PMD_INIT_FUNC_TRACE(); 2046 2047 /* DMATXCTRL: Geric Double VLAN Enable */ 2048 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2049 ctrl |= IXGBE_DMATXCTL_GDV; 2050 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2051 2052 /* CTRL_EXT: Global Double VLAN Enable */ 2053 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2054 ctrl |= IXGBE_EXTENDED_VLAN; 2055 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2056 2057 /* Clear pooling mode of PFVTCTL. It's required by X550. */ 2058 if (hw->mac.type == ixgbe_mac_X550 || 2059 hw->mac.type == ixgbe_mac_X550EM_x || 2060 hw->mac.type == ixgbe_mac_X550EM_a) { 2061 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 2062 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 2063 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 2064 } 2065 2066 /* 2067 * VET EXT field in the EXVET register = 0x8100 by default 2068 * So no need to change. Same to VT field of DMATXCTL register 2069 */ 2070 } 2071 2072 void 2073 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 2074 { 2075 struct ixgbe_hw *hw = 2076 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2077 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 2078 uint32_t ctrl; 2079 uint16_t i; 2080 struct ixgbe_rx_queue *rxq; 2081 bool on; 2082 2083 PMD_INIT_FUNC_TRACE(); 2084 2085 if (hw->mac.type == ixgbe_mac_82598EB) { 2086 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 2087 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2088 ctrl |= IXGBE_VLNCTRL_VME; 2089 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2090 } else { 2091 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2092 ctrl &= ~IXGBE_VLNCTRL_VME; 2093 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2094 } 2095 } else { 2096 /* 2097 * Other 10G NIC, the VLAN strip can be setup 2098 * per queue in RXDCTL 2099 */ 2100 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2101 rxq = dev->data->rx_queues[i]; 2102 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 2103 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 2104 ctrl |= IXGBE_RXDCTL_VME; 2105 on = TRUE; 2106 } else { 2107 ctrl &= ~IXGBE_RXDCTL_VME; 2108 on = FALSE; 2109 } 2110 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 2111 2112 /* record those setting for HW strip per queue */ 2113 ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); 2114 } 2115 } 2116 } 2117 2118 static void 2119 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 2120 { 2121 uint16_t i; 2122 struct rte_eth_rxmode *rxmode; 2123 struct ixgbe_rx_queue *rxq; 2124 2125 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2126 rxmode = &dev->data->dev_conf.rxmode; 2127 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 2128 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2129 rxq = dev->data->rx_queues[i]; 2130 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 2131 } 2132 else 2133 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2134 rxq = dev->data->rx_queues[i]; 2135 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 2136 } 2137 } 2138 } 2139 2140 static int 2141 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 2142 { 2143 struct rte_eth_rxmode *rxmode; 2144 rxmode = &dev->data->dev_conf.rxmode; 2145 2146 if (mask & RTE_ETH_VLAN_STRIP_MASK) 2147 ixgbe_vlan_hw_strip_config(dev); 2148 2149 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 2150 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 2151 ixgbe_vlan_hw_filter_enable(dev); 2152 else 2153 ixgbe_vlan_hw_filter_disable(dev); 2154 } 2155 2156 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 2157 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 2158 ixgbe_vlan_hw_extend_enable(dev); 2159 else 2160 ixgbe_vlan_hw_extend_disable(dev); 2161 } 2162 2163 return 0; 2164 } 2165 2166 static int 2167 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2168 { 2169 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 2170 2171 ixgbe_vlan_offload_config(dev, mask); 2172 2173 return 0; 2174 } 2175 2176 static void 2177 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2178 { 2179 struct ixgbe_hw *hw = 2180 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2181 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ 2182 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2183 2184 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ 2185 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); 2186 } 2187 2188 static int 2189 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 2190 { 2191 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2192 2193 switch (nb_rx_q) { 2194 case 1: 2195 case 2: 2196 RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS; 2197 break; 2198 case 4: 2199 RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS; 2200 break; 2201 default: 2202 return -EINVAL; 2203 } 2204 2205 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 2206 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2207 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 2208 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2209 return 0; 2210 } 2211 2212 static int 2213 ixgbe_check_mq_mode(struct rte_eth_dev *dev) 2214 { 2215 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 2216 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2217 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2218 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2219 2220 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 2221 /* check multi-queue mode */ 2222 switch (dev_conf->rxmode.mq_mode) { 2223 case RTE_ETH_MQ_RX_VMDQ_DCB: 2224 PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); 2225 break; 2226 case RTE_ETH_MQ_RX_VMDQ_DCB_RSS: 2227 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 2228 PMD_INIT_LOG(ERR, "SRIOV active," 2229 " unsupported mq_mode rx %d.", 2230 dev_conf->rxmode.mq_mode); 2231 return -EINVAL; 2232 case RTE_ETH_MQ_RX_RSS: 2233 case RTE_ETH_MQ_RX_VMDQ_RSS: 2234 dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS; 2235 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 2236 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 2237 PMD_INIT_LOG(ERR, "SRIOV is active," 2238 " invalid queue number" 2239 " for VMDQ RSS, allowed" 2240 " value are 1, 2 or 4."); 2241 return -EINVAL; 2242 } 2243 break; 2244 case RTE_ETH_MQ_RX_VMDQ_ONLY: 2245 case RTE_ETH_MQ_RX_NONE: 2246 /* if nothing mq mode configure, use default scheme */ 2247 dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY; 2248 break; 2249 default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/ 2250 /* SRIOV only works in VMDq enable mode */ 2251 PMD_INIT_LOG(ERR, "SRIOV is active," 2252 " wrong mq_mode rx %d.", 2253 dev_conf->rxmode.mq_mode); 2254 return -EINVAL; 2255 } 2256 2257 switch (dev_conf->txmode.mq_mode) { 2258 case RTE_ETH_MQ_TX_VMDQ_DCB: 2259 PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); 2260 dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; 2261 break; 2262 default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */ 2263 dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY; 2264 break; 2265 } 2266 2267 /* check valid queue number */ 2268 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 2269 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 2270 PMD_INIT_LOG(ERR, "SRIOV is active," 2271 " nb_rx_q=%d nb_tx_q=%d queue number" 2272 " must be less than or equal to %d.", 2273 nb_rx_q, nb_tx_q, 2274 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 2275 return -EINVAL; 2276 } 2277 } else { 2278 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) { 2279 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 2280 " not supported."); 2281 return -EINVAL; 2282 } 2283 /* check configuration for vmdb+dcb mode */ 2284 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { 2285 const struct rte_eth_vmdq_dcb_conf *conf; 2286 2287 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2288 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 2289 IXGBE_VMDQ_DCB_NB_QUEUES); 2290 return -EINVAL; 2291 } 2292 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 2293 if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || 2294 conf->nb_queue_pools == RTE_ETH_32_POOLS)) { 2295 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2296 " nb_queue_pools must be %d or %d.", 2297 RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); 2298 return -EINVAL; 2299 } 2300 } 2301 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) { 2302 const struct rte_eth_vmdq_dcb_tx_conf *conf; 2303 2304 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2305 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 2306 IXGBE_VMDQ_DCB_NB_QUEUES); 2307 return -EINVAL; 2308 } 2309 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2310 if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || 2311 conf->nb_queue_pools == RTE_ETH_32_POOLS)) { 2312 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2313 " nb_queue_pools != %d and" 2314 " nb_queue_pools != %d.", 2315 RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); 2316 return -EINVAL; 2317 } 2318 } 2319 2320 /* For DCB mode check our configuration before we go further */ 2321 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) { 2322 const struct rte_eth_dcb_rx_conf *conf; 2323 2324 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 2325 if (!(conf->nb_tcs == RTE_ETH_4_TCS || 2326 conf->nb_tcs == RTE_ETH_8_TCS)) { 2327 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2328 " and nb_tcs != %d.", 2329 RTE_ETH_4_TCS, RTE_ETH_8_TCS); 2330 return -EINVAL; 2331 } 2332 } 2333 2334 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) { 2335 const struct rte_eth_dcb_tx_conf *conf; 2336 2337 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 2338 if (!(conf->nb_tcs == RTE_ETH_4_TCS || 2339 conf->nb_tcs == RTE_ETH_8_TCS)) { 2340 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2341 " and nb_tcs != %d.", 2342 RTE_ETH_4_TCS, RTE_ETH_8_TCS); 2343 return -EINVAL; 2344 } 2345 } 2346 2347 /* 2348 * When DCB/VT is off, maximum number of queues changes, 2349 * except for 82598EB, which remains constant. 2350 */ 2351 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE && 2352 hw->mac.type != ixgbe_mac_82598EB) { 2353 if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { 2354 PMD_INIT_LOG(ERR, 2355 "Neither VT nor DCB are enabled, " 2356 "nb_tx_q > %d.", 2357 IXGBE_NONE_MODE_TX_NB_QUEUES); 2358 return -EINVAL; 2359 } 2360 } 2361 } 2362 return 0; 2363 } 2364 2365 static int 2366 ixgbe_dev_configure(struct rte_eth_dev *dev) 2367 { 2368 struct ixgbe_interrupt *intr = 2369 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2370 struct ixgbe_adapter *adapter = dev->data->dev_private; 2371 int ret; 2372 2373 PMD_INIT_FUNC_TRACE(); 2374 2375 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 2376 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2377 2378 /* multipe queue mode checking */ 2379 ret = ixgbe_check_mq_mode(dev); 2380 if (ret != 0) { 2381 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", 2382 ret); 2383 return ret; 2384 } 2385 2386 /* set flag to update link status after init */ 2387 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2388 2389 /* 2390 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 2391 * allocation or vector Rx preconditions we will reset it. 2392 */ 2393 adapter->rx_bulk_alloc_allowed = true; 2394 adapter->rx_vec_allowed = true; 2395 2396 return 0; 2397 } 2398 2399 static void 2400 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 2401 { 2402 struct ixgbe_hw *hw = 2403 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2404 struct ixgbe_interrupt *intr = 2405 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2406 uint32_t gpie; 2407 2408 /* only set up it on X550EM_X */ 2409 if (hw->mac.type == ixgbe_mac_X550EM_x) { 2410 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2411 gpie |= IXGBE_SDP0_GPIEN_X550EM_x; 2412 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2413 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 2414 intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; 2415 } 2416 } 2417 2418 int 2419 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 2420 uint16_t tx_rate, uint64_t q_msk) 2421 { 2422 struct ixgbe_hw *hw; 2423 struct ixgbe_vf_info *vfinfo; 2424 struct rte_eth_link link; 2425 uint8_t nb_q_per_pool; 2426 uint32_t queue_stride; 2427 uint32_t queue_idx, idx = 0, vf_idx; 2428 uint32_t queue_end; 2429 uint16_t total_rate = 0; 2430 struct rte_pci_device *pci_dev; 2431 int ret; 2432 2433 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2434 ret = rte_eth_link_get_nowait(dev->data->port_id, &link); 2435 if (ret < 0) 2436 return ret; 2437 2438 if (vf >= pci_dev->max_vfs) 2439 return -EINVAL; 2440 2441 if (tx_rate > link.link_speed) 2442 return -EINVAL; 2443 2444 if (q_msk == 0) 2445 return 0; 2446 2447 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2448 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); 2449 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2450 queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2451 queue_idx = vf * queue_stride; 2452 queue_end = queue_idx + nb_q_per_pool - 1; 2453 if (queue_end >= hw->mac.max_tx_queues) 2454 return -EINVAL; 2455 2456 if (vfinfo) { 2457 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { 2458 if (vf_idx == vf) 2459 continue; 2460 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 2461 idx++) 2462 total_rate += vfinfo[vf_idx].tx_rate[idx]; 2463 } 2464 } else { 2465 return -EINVAL; 2466 } 2467 2468 /* Store tx_rate for this vf. */ 2469 for (idx = 0; idx < nb_q_per_pool; idx++) { 2470 if (((uint64_t)0x1 << idx) & q_msk) { 2471 if (vfinfo[vf].tx_rate[idx] != tx_rate) 2472 vfinfo[vf].tx_rate[idx] = tx_rate; 2473 total_rate += tx_rate; 2474 } 2475 } 2476 2477 if (total_rate > dev->data->dev_link.link_speed) { 2478 /* Reset stored TX rate of the VF if it causes exceed 2479 * link speed. 2480 */ 2481 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 2482 return -EINVAL; 2483 } 2484 2485 /* Set RTTBCNRC of each queue/pool for vf X */ 2486 for (; queue_idx <= queue_end; queue_idx++) { 2487 if (0x1 & q_msk) 2488 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 2489 q_msk = q_msk >> 1; 2490 } 2491 2492 return 0; 2493 } 2494 2495 static int 2496 ixgbe_flow_ctrl_enable(struct rte_eth_dev *dev, struct ixgbe_hw *hw) 2497 { 2498 struct ixgbe_adapter *adapter = dev->data->dev_private; 2499 int err; 2500 uint32_t mflcn; 2501 2502 ixgbe_setup_fc(hw); 2503 2504 err = ixgbe_fc_enable(hw); 2505 2506 /* Not negotiated is not an error case */ 2507 if (err == IXGBE_SUCCESS || err == IXGBE_ERR_FC_NOT_NEGOTIATED) { 2508 /* 2509 *check if we want to forward MAC frames - driver doesn't 2510 *have native capability to do that, 2511 *so we'll write the registers ourselves 2512 */ 2513 2514 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2515 2516 /* set or clear MFLCN.PMCF bit depending on configuration */ 2517 if (adapter->mac_ctrl_frame_fwd != 0) 2518 mflcn |= IXGBE_MFLCN_PMCF; 2519 else 2520 mflcn &= ~IXGBE_MFLCN_PMCF; 2521 2522 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); 2523 IXGBE_WRITE_FLUSH(hw); 2524 2525 return 0; 2526 } 2527 return err; 2528 } 2529 2530 /* 2531 * Configure device link speed and setup link. 2532 * It returns 0 on success. 2533 */ 2534 static int 2535 ixgbe_dev_start(struct rte_eth_dev *dev) 2536 { 2537 struct ixgbe_hw *hw = 2538 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2539 struct ixgbe_vf_info *vfinfo = 2540 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2541 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2542 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2543 uint32_t intr_vector = 0; 2544 int err; 2545 bool link_up = false, negotiate = 0; 2546 uint32_t speed = 0; 2547 uint32_t allowed_speeds = 0; 2548 int mask = 0; 2549 int status; 2550 uint16_t vf, idx; 2551 uint32_t *link_speeds; 2552 struct ixgbe_tm_conf *tm_conf = 2553 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2554 struct ixgbe_macsec_setting *macsec_setting = 2555 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 2556 2557 PMD_INIT_FUNC_TRACE(); 2558 2559 /* Stop the link setup handler before resetting the HW. */ 2560 ixgbe_dev_wait_setup_link_complete(dev, 0); 2561 2562 /* disable uio/vfio intr/eventfd mapping */ 2563 rte_intr_disable(intr_handle); 2564 2565 /* stop adapter */ 2566 hw->adapter_stopped = 0; 2567 ixgbe_stop_adapter(hw); 2568 2569 /* reinitialize adapter 2570 * this calls reset and start 2571 */ 2572 status = ixgbe_pf_reset_hw(hw); 2573 if (status != 0) 2574 return -1; 2575 hw->mac.ops.start_hw(hw); 2576 hw->mac.get_link_status = true; 2577 2578 /* configure PF module if SRIOV enabled */ 2579 ixgbe_pf_host_configure(dev); 2580 2581 ixgbe_dev_phy_intr_setup(dev); 2582 2583 /* check and configure queue intr-vector mapping */ 2584 if ((rte_intr_cap_multiple(intr_handle) || 2585 !RTE_ETH_DEV_SRIOV(dev).active) && 2586 dev->data->dev_conf.intr_conf.rxq != 0) { 2587 intr_vector = dev->data->nb_rx_queues; 2588 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { 2589 PMD_INIT_LOG(ERR, "At most %d intr queues supported", 2590 IXGBE_MAX_INTR_QUEUE_NUM); 2591 return -ENOTSUP; 2592 } 2593 if (rte_intr_efd_enable(intr_handle, intr_vector)) 2594 return -1; 2595 } 2596 2597 if (rte_intr_dp_is_en(intr_handle)) { 2598 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 2599 dev->data->nb_rx_queues)) { 2600 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 2601 " intr_vec", dev->data->nb_rx_queues); 2602 return -ENOMEM; 2603 } 2604 } 2605 2606 /* confiugre msix for sleep until rx interrupt */ 2607 ixgbe_configure_msix(dev); 2608 2609 /* initialize transmission unit */ 2610 ixgbe_dev_tx_init(dev); 2611 2612 /* This can fail when allocating mbufs for descriptor rings */ 2613 err = ixgbe_dev_rx_init(dev); 2614 if (err) { 2615 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 2616 goto error; 2617 } 2618 2619 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 2620 RTE_ETH_VLAN_EXTEND_MASK; 2621 err = ixgbe_vlan_offload_config(dev, mask); 2622 if (err) { 2623 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 2624 goto error; 2625 } 2626 2627 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) { 2628 /* Enable vlan filtering for VMDq */ 2629 ixgbe_vmdq_vlan_hw_filter_enable(dev); 2630 } 2631 2632 /* Configure DCB hw */ 2633 ixgbe_configure_dcb(dev); 2634 2635 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { 2636 err = ixgbe_fdir_configure(dev); 2637 if (err) 2638 goto error; 2639 } 2640 2641 /* Restore vf rate limit */ 2642 if (vfinfo != NULL) { 2643 for (vf = 0; vf < pci_dev->max_vfs; vf++) 2644 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 2645 if (vfinfo[vf].tx_rate[idx] != 0) 2646 ixgbe_set_vf_rate_limit( 2647 dev, vf, 2648 vfinfo[vf].tx_rate[idx], 2649 1 << idx); 2650 } 2651 2652 ixgbe_restore_statistics_mapping(dev); 2653 2654 err = ixgbe_flow_ctrl_enable(dev, hw); 2655 if (err < 0) { 2656 PMD_INIT_LOG(ERR, "enable flow ctrl err"); 2657 goto error; 2658 } 2659 2660 err = ixgbe_dev_rxtx_start(dev); 2661 if (err < 0) { 2662 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 2663 goto error; 2664 } 2665 2666 /* Skip link setup if loopback mode is enabled. */ 2667 if (dev->data->dev_conf.lpbk_mode != 0) { 2668 err = ixgbe_check_supported_loopback_mode(dev); 2669 if (err < 0) { 2670 PMD_INIT_LOG(ERR, "Unsupported loopback mode"); 2671 goto error; 2672 } else { 2673 goto skip_link_setup; 2674 } 2675 } 2676 2677 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 2678 err = hw->mac.ops.setup_sfp(hw); 2679 if (err) 2680 goto error; 2681 } 2682 2683 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2684 /* Turn on the copper */ 2685 ixgbe_set_phy_power(hw, true); 2686 } else { 2687 /* Turn on the laser */ 2688 ixgbe_enable_tx_laser(hw); 2689 } 2690 2691 err = ixgbe_check_link(hw, &speed, &link_up, 0); 2692 if (err) 2693 goto error; 2694 dev->data->dev_link.link_status = link_up; 2695 2696 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); 2697 if (err) 2698 goto error; 2699 2700 switch (hw->mac.type) { 2701 case ixgbe_mac_X550: 2702 case ixgbe_mac_X550EM_x: 2703 case ixgbe_mac_X550EM_a: 2704 allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G | 2705 RTE_ETH_LINK_SPEED_2_5G | RTE_ETH_LINK_SPEED_5G | 2706 RTE_ETH_LINK_SPEED_10G; 2707 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 2708 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 2709 allowed_speeds = RTE_ETH_LINK_SPEED_10M | 2710 RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G; 2711 break; 2712 default: 2713 allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G | 2714 RTE_ETH_LINK_SPEED_10G; 2715 } 2716 2717 link_speeds = &dev->data->dev_conf.link_speeds; 2718 2719 /* Ignore autoneg flag bit and check the validity of 2720 * link_speed 2721 */ 2722 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { 2723 PMD_INIT_LOG(ERR, "Invalid link setting"); 2724 goto error; 2725 } 2726 2727 speed = 0x0; 2728 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { 2729 switch (hw->mac.type) { 2730 case ixgbe_mac_82598EB: 2731 speed = IXGBE_LINK_SPEED_82598_AUTONEG; 2732 break; 2733 case ixgbe_mac_82599EB: 2734 case ixgbe_mac_X540: 2735 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2736 break; 2737 case ixgbe_mac_X550: 2738 case ixgbe_mac_X550EM_x: 2739 case ixgbe_mac_X550EM_a: 2740 speed = IXGBE_LINK_SPEED_X550_AUTONEG; 2741 break; 2742 default: 2743 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2744 } 2745 } else { 2746 if (*link_speeds & RTE_ETH_LINK_SPEED_10G) 2747 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2748 if (*link_speeds & RTE_ETH_LINK_SPEED_5G) 2749 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2750 if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G) 2751 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2752 if (*link_speeds & RTE_ETH_LINK_SPEED_1G) 2753 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2754 if (*link_speeds & RTE_ETH_LINK_SPEED_100M) 2755 speed |= IXGBE_LINK_SPEED_100_FULL; 2756 if (*link_speeds & RTE_ETH_LINK_SPEED_10M) 2757 speed |= IXGBE_LINK_SPEED_10_FULL; 2758 } 2759 2760 err = ixgbe_setup_link(hw, speed, link_up); 2761 if (err) 2762 goto error; 2763 2764 skip_link_setup: 2765 2766 if (rte_intr_allow_others(intr_handle)) { 2767 /* check if lsc interrupt is enabled */ 2768 if (dev->data->dev_conf.intr_conf.lsc != 0) 2769 ixgbe_dev_lsc_interrupt_setup(dev, TRUE); 2770 else 2771 ixgbe_dev_lsc_interrupt_setup(dev, FALSE); 2772 ixgbe_dev_macsec_interrupt_setup(dev); 2773 } else { 2774 rte_intr_callback_unregister(intr_handle, 2775 ixgbe_dev_interrupt_handler, dev); 2776 if (dev->data->dev_conf.intr_conf.lsc != 0) 2777 PMD_INIT_LOG(INFO, "lsc won't enable because of" 2778 " no intr multiplex"); 2779 } 2780 2781 /* check if rxq interrupt is enabled */ 2782 if (dev->data->dev_conf.intr_conf.rxq != 0 && 2783 rte_intr_dp_is_en(intr_handle)) 2784 ixgbe_dev_rxq_interrupt_setup(dev); 2785 2786 /* enable uio/vfio intr/eventfd mapping */ 2787 rte_intr_enable(intr_handle); 2788 2789 /* resume enabled intr since hw reset */ 2790 ixgbe_enable_intr(dev); 2791 ixgbe_l2_tunnel_conf(dev); 2792 ixgbe_filter_restore(dev); 2793 2794 if (tm_conf->root && !tm_conf->committed) 2795 PMD_DRV_LOG(WARNING, 2796 "please call hierarchy_commit() " 2797 "before starting the port"); 2798 2799 /* wait for the controller to acquire link */ 2800 err = ixgbe_wait_for_link_up(hw); 2801 if (err) 2802 goto error; 2803 2804 /* 2805 * Update link status right before return, because it may 2806 * start link configuration process in a separate thread. 2807 */ 2808 ixgbe_dev_link_update(dev, 0); 2809 2810 /* setup the macsec setting register */ 2811 if (macsec_setting->offload_en) 2812 ixgbe_dev_macsec_register_enable(dev, macsec_setting); 2813 2814 return 0; 2815 2816 error: 2817 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); 2818 ixgbe_dev_clear_queues(dev); 2819 return -EIO; 2820 } 2821 2822 /* 2823 * Stop device: disable rx and tx functions to allow for reconfiguring. 2824 */ 2825 static int 2826 ixgbe_dev_stop(struct rte_eth_dev *dev) 2827 { 2828 struct rte_eth_link link; 2829 struct ixgbe_adapter *adapter = dev->data->dev_private; 2830 struct ixgbe_hw *hw = 2831 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2832 struct ixgbe_vf_info *vfinfo = 2833 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2834 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2835 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2836 int vf; 2837 struct ixgbe_tm_conf *tm_conf = 2838 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2839 2840 if (hw->adapter_stopped) 2841 return 0; 2842 2843 PMD_INIT_FUNC_TRACE(); 2844 2845 ixgbe_dev_wait_setup_link_complete(dev, 0); 2846 2847 /* disable interrupts */ 2848 ixgbe_disable_intr(hw); 2849 2850 /* reset the NIC */ 2851 ixgbe_pf_reset_hw(hw); 2852 hw->adapter_stopped = 0; 2853 2854 /* stop adapter */ 2855 ixgbe_stop_adapter(hw); 2856 2857 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 2858 vfinfo[vf].clear_to_send = false; 2859 2860 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2861 /* Turn off the copper */ 2862 ixgbe_set_phy_power(hw, false); 2863 } else { 2864 /* Turn off the laser */ 2865 ixgbe_disable_tx_laser(hw); 2866 } 2867 2868 ixgbe_dev_clear_queues(dev); 2869 2870 /* Clear stored conf */ 2871 dev->data->scattered_rx = 0; 2872 dev->data->lro = 0; 2873 2874 /* Clear recorded link status */ 2875 memset(&link, 0, sizeof(link)); 2876 rte_eth_linkstatus_set(dev, &link); 2877 2878 if (!rte_intr_allow_others(intr_handle)) 2879 /* resume to the default handler */ 2880 rte_intr_callback_register(intr_handle, 2881 ixgbe_dev_interrupt_handler, 2882 (void *)dev); 2883 2884 /* Clean datapath event and queue/vec mapping */ 2885 rte_intr_efd_disable(intr_handle); 2886 rte_intr_vec_list_free(intr_handle); 2887 2888 /* reset hierarchy commit */ 2889 tm_conf->committed = false; 2890 2891 adapter->rss_reta_updated = 0; 2892 2893 hw->adapter_stopped = true; 2894 dev->data->dev_started = 0; 2895 2896 return 0; 2897 } 2898 2899 /* 2900 * Set device link up: enable tx. 2901 */ 2902 static int 2903 ixgbe_dev_set_link_up(struct rte_eth_dev *dev) 2904 { 2905 struct ixgbe_hw *hw = 2906 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2907 if (hw->mac.type == ixgbe_mac_82599EB) { 2908 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2909 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2910 /* Not suported in bypass mode */ 2911 PMD_INIT_LOG(ERR, "Set link up is not supported " 2912 "by device id 0x%x", hw->device_id); 2913 return -ENOTSUP; 2914 } 2915 #endif 2916 } 2917 2918 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2919 /* Turn on the copper */ 2920 ixgbe_set_phy_power(hw, true); 2921 } else { 2922 /* Turn on the laser */ 2923 ixgbe_enable_tx_laser(hw); 2924 ixgbe_dev_link_update(dev, 0); 2925 } 2926 2927 return 0; 2928 } 2929 2930 /* 2931 * Set device link down: disable tx. 2932 */ 2933 static int 2934 ixgbe_dev_set_link_down(struct rte_eth_dev *dev) 2935 { 2936 struct ixgbe_hw *hw = 2937 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2938 if (hw->mac.type == ixgbe_mac_82599EB) { 2939 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2940 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2941 /* Not suported in bypass mode */ 2942 PMD_INIT_LOG(ERR, "Set link down is not supported " 2943 "by device id 0x%x", hw->device_id); 2944 return -ENOTSUP; 2945 } 2946 #endif 2947 } 2948 2949 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2950 /* Turn off the copper */ 2951 ixgbe_set_phy_power(hw, false); 2952 } else { 2953 /* Turn off the laser */ 2954 ixgbe_disable_tx_laser(hw); 2955 ixgbe_dev_link_update(dev, 0); 2956 } 2957 2958 return 0; 2959 } 2960 2961 /* 2962 * Reset and stop device. 2963 */ 2964 static int 2965 ixgbe_dev_close(struct rte_eth_dev *dev) 2966 { 2967 struct ixgbe_hw *hw = 2968 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2969 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2970 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2971 int retries = 0; 2972 int ret; 2973 2974 PMD_INIT_FUNC_TRACE(); 2975 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2976 return 0; 2977 2978 ixgbe_pf_reset_hw(hw); 2979 2980 ret = ixgbe_dev_stop(dev); 2981 2982 ixgbe_dev_free_queues(dev); 2983 2984 ixgbe_disable_pcie_master(hw); 2985 2986 /* reprogram the RAR[0] in case user changed it. */ 2987 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2988 2989 /* Unlock any pending hardware semaphore */ 2990 ixgbe_swfw_lock_reset(hw); 2991 2992 /* disable uio intr before callback unregister */ 2993 rte_intr_disable(intr_handle); 2994 2995 do { 2996 ret = rte_intr_callback_unregister(intr_handle, 2997 ixgbe_dev_interrupt_handler, dev); 2998 if (ret >= 0 || ret == -ENOENT) { 2999 break; 3000 } else if (ret != -EAGAIN) { 3001 PMD_INIT_LOG(ERR, 3002 "intr callback unregister failed: %d", 3003 ret); 3004 } 3005 rte_delay_ms(100); 3006 } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); 3007 3008 /* cancel the delay handler before remove dev */ 3009 rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev); 3010 3011 /* uninitialize PF if max_vfs not zero */ 3012 ixgbe_pf_host_uninit(dev); 3013 3014 /* remove all the fdir filters & hash */ 3015 ixgbe_fdir_filter_uninit(dev); 3016 3017 /* remove all the L2 tunnel filters & hash */ 3018 ixgbe_l2_tn_filter_uninit(dev); 3019 3020 /* Remove all ntuple filters of the device */ 3021 ixgbe_ntuple_filter_uninit(dev); 3022 3023 /* clear all the filters list */ 3024 ixgbe_filterlist_flush(); 3025 3026 /* Remove all Traffic Manager configuration */ 3027 ixgbe_tm_conf_uninit(dev); 3028 3029 #ifdef RTE_LIB_SECURITY 3030 rte_free(dev->security_ctx); 3031 #endif 3032 3033 return ret; 3034 } 3035 3036 /* 3037 * Reset PF device. 3038 */ 3039 static int 3040 ixgbe_dev_reset(struct rte_eth_dev *dev) 3041 { 3042 int ret; 3043 3044 /* When a DPDK PMD PF begin to reset PF port, it should notify all 3045 * its VF to make them align with it. The detailed notification 3046 * mechanism is PMD specific. As to ixgbe PF, it is rather complex. 3047 * To avoid unexpected behavior in VF, currently reset of PF with 3048 * SR-IOV activation is not supported. It might be supported later. 3049 */ 3050 if (dev->data->sriov.active) 3051 return -ENOTSUP; 3052 3053 ret = eth_ixgbe_dev_uninit(dev); 3054 if (ret) 3055 return ret; 3056 3057 ret = eth_ixgbe_dev_init(dev, NULL); 3058 3059 return ret; 3060 } 3061 3062 static void 3063 ixgbe_read_stats_registers(struct ixgbe_hw *hw, 3064 struct ixgbe_hw_stats *hw_stats, 3065 struct ixgbe_macsec_stats *macsec_stats, 3066 uint64_t *total_missed_rx, uint64_t *total_qbrc, 3067 uint64_t *total_qprc, uint64_t *total_qprdc) 3068 { 3069 uint32_t bprc, lxon, lxoff, total; 3070 uint32_t delta_gprc = 0; 3071 unsigned i; 3072 /* Workaround for RX byte count not including CRC bytes when CRC 3073 * strip is enabled. CRC bytes are removed from counters when crc_strip 3074 * is disabled. 3075 */ 3076 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & 3077 IXGBE_HLREG0_RXCRCSTRP); 3078 3079 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 3080 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 3081 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 3082 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 3083 3084 for (i = 0; i < 8; i++) { 3085 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 3086 3087 /* global total per queue */ 3088 hw_stats->mpc[i] += mp; 3089 /* Running comprehensive total for stats display */ 3090 *total_missed_rx += hw_stats->mpc[i]; 3091 if (hw->mac.type == ixgbe_mac_82598EB) { 3092 hw_stats->rnbc[i] += 3093 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3094 hw_stats->pxonrxc[i] += 3095 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 3096 hw_stats->pxoffrxc[i] += 3097 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 3098 } else { 3099 hw_stats->pxonrxc[i] += 3100 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 3101 hw_stats->pxoffrxc[i] += 3102 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 3103 hw_stats->pxon2offc[i] += 3104 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 3105 } 3106 hw_stats->pxontxc[i] += 3107 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 3108 hw_stats->pxofftxc[i] += 3109 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 3110 } 3111 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3112 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3113 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3114 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3115 3116 delta_gprc += delta_qprc; 3117 3118 hw_stats->qprc[i] += delta_qprc; 3119 hw_stats->qptc[i] += delta_qptc; 3120 3121 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 3122 hw_stats->qbrc[i] += 3123 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); 3124 if (crc_strip == 0) 3125 hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; 3126 3127 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 3128 hw_stats->qbtc[i] += 3129 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); 3130 3131 hw_stats->qprdc[i] += delta_qprdc; 3132 *total_qprdc += hw_stats->qprdc[i]; 3133 3134 *total_qprc += hw_stats->qprc[i]; 3135 *total_qbrc += hw_stats->qbrc[i]; 3136 } 3137 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 3138 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 3139 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3140 3141 /* 3142 * An errata states that gprc actually counts good + missed packets: 3143 * Workaround to set gprc to summated queue packet receives 3144 */ 3145 hw_stats->gprc = *total_qprc; 3146 3147 if (hw->mac.type != ixgbe_mac_82598EB) { 3148 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 3149 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 3150 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 3151 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 3152 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 3153 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 3154 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3155 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3156 } else { 3157 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3158 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3159 /* 82598 only has a counter in the high register */ 3160 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3161 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3162 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3163 } 3164 uint64_t old_tpr = hw_stats->tpr; 3165 3166 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 3167 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 3168 3169 if (crc_strip == 0) 3170 hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; 3171 3172 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); 3173 hw_stats->gptc += delta_gptc; 3174 hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; 3175 hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 3176 3177 /* 3178 * Workaround: mprc hardware is incorrectly counting 3179 * broadcasts, so for now we subtract those. 3180 */ 3181 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3182 hw_stats->bprc += bprc; 3183 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3184 if (hw->mac.type == ixgbe_mac_82598EB) 3185 hw_stats->mprc -= bprc; 3186 3187 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3188 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3189 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 3190 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 3191 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3192 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3193 3194 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3195 hw_stats->lxontxc += lxon; 3196 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3197 hw_stats->lxofftxc += lxoff; 3198 total = lxon + lxoff; 3199 3200 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 3201 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 3202 hw_stats->gptc -= total; 3203 hw_stats->mptc -= total; 3204 hw_stats->ptc64 -= total; 3205 hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; 3206 3207 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 3208 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 3209 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3210 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 3211 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 3212 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 3213 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 3214 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 3215 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 3216 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 3217 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 3218 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 3219 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 3220 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 3221 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 3222 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 3223 /* Only read FCOE on 82599 */ 3224 if (hw->mac.type != ixgbe_mac_82598EB) { 3225 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 3226 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 3227 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 3228 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 3229 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 3230 } 3231 3232 /* Flow Director Stats registers */ 3233 if (hw->mac.type != ixgbe_mac_82598EB) { 3234 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 3235 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 3236 hw_stats->fdirustat_add += IXGBE_READ_REG(hw, 3237 IXGBE_FDIRUSTAT) & 0xFFFF; 3238 hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, 3239 IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; 3240 hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, 3241 IXGBE_FDIRFSTAT) & 0xFFFF; 3242 hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, 3243 IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; 3244 } 3245 /* MACsec Stats registers */ 3246 macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); 3247 macsec_stats->out_pkts_encrypted += 3248 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); 3249 macsec_stats->out_pkts_protected += 3250 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); 3251 macsec_stats->out_octets_encrypted += 3252 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); 3253 macsec_stats->out_octets_protected += 3254 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); 3255 macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); 3256 macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); 3257 macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); 3258 macsec_stats->in_pkts_unknownsci += 3259 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); 3260 macsec_stats->in_octets_decrypted += 3261 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); 3262 macsec_stats->in_octets_validated += 3263 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); 3264 macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); 3265 macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); 3266 macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); 3267 for (i = 0; i < 2; i++) { 3268 macsec_stats->in_pkts_ok += 3269 IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); 3270 macsec_stats->in_pkts_invalid += 3271 IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); 3272 macsec_stats->in_pkts_notvalid += 3273 IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); 3274 } 3275 macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); 3276 macsec_stats->in_pkts_notusingsa += 3277 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); 3278 } 3279 3280 /* 3281 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c 3282 */ 3283 static int 3284 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3285 { 3286 struct ixgbe_hw *hw = 3287 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3288 struct ixgbe_hw_stats *hw_stats = 3289 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3290 struct ixgbe_macsec_stats *macsec_stats = 3291 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3292 dev->data->dev_private); 3293 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3294 unsigned i; 3295 3296 total_missed_rx = 0; 3297 total_qbrc = 0; 3298 total_qprc = 0; 3299 total_qprdc = 0; 3300 3301 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3302 &total_qbrc, &total_qprc, &total_qprdc); 3303 3304 if (stats == NULL) 3305 return -EINVAL; 3306 3307 /* Fill out the rte_eth_stats statistics structure */ 3308 stats->ipackets = total_qprc; 3309 stats->ibytes = total_qbrc; 3310 stats->opackets = hw_stats->gptc; 3311 stats->obytes = hw_stats->gotc; 3312 3313 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3314 stats->q_ipackets[i] = hw_stats->qprc[i]; 3315 stats->q_opackets[i] = hw_stats->qptc[i]; 3316 stats->q_ibytes[i] = hw_stats->qbrc[i]; 3317 stats->q_obytes[i] = hw_stats->qbtc[i]; 3318 stats->q_errors[i] = hw_stats->qprdc[i]; 3319 } 3320 3321 /* Rx Errors */ 3322 stats->imissed = total_missed_rx; 3323 stats->ierrors = hw_stats->crcerrs + 3324 hw_stats->mspdc + 3325 hw_stats->rlec + 3326 hw_stats->ruc + 3327 hw_stats->roc + 3328 hw_stats->illerrc + 3329 hw_stats->errbc + 3330 hw_stats->rfc + 3331 hw_stats->fccrc + 3332 hw_stats->fclast; 3333 3334 /* 3335 * 82599 errata, UDP frames with a 0 checksum can be marked as checksum 3336 * errors. 3337 */ 3338 if (hw->mac.type != ixgbe_mac_82599EB) 3339 stats->ierrors += hw_stats->xec; 3340 3341 /* Tx Errors */ 3342 stats->oerrors = 0; 3343 return 0; 3344 } 3345 3346 static int 3347 ixgbe_dev_stats_reset(struct rte_eth_dev *dev) 3348 { 3349 struct ixgbe_hw_stats *stats = 3350 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3351 3352 /* HW registers are cleared on read */ 3353 ixgbe_dev_stats_get(dev, NULL); 3354 3355 /* Reset software totals */ 3356 memset(stats, 0, sizeof(*stats)); 3357 3358 return 0; 3359 } 3360 3361 /* This function calculates the number of xstats based on the current config */ 3362 static unsigned 3363 ixgbe_xstats_calc_num(void) { 3364 return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + 3365 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + 3366 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); 3367 } 3368 3369 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3370 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) 3371 { 3372 const unsigned cnt_stats = ixgbe_xstats_calc_num(); 3373 unsigned stat, i, count; 3374 3375 if (xstats_names != NULL) { 3376 count = 0; 3377 3378 /* Note: limit >= cnt_stats checked upstream 3379 * in rte_eth_xstats_names() 3380 */ 3381 3382 /* Extended stats from ixgbe_hw_stats */ 3383 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3384 strlcpy(xstats_names[count].name, 3385 rte_ixgbe_stats_strings[i].name, 3386 sizeof(xstats_names[count].name)); 3387 count++; 3388 } 3389 3390 /* MACsec Stats */ 3391 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3392 strlcpy(xstats_names[count].name, 3393 rte_ixgbe_macsec_strings[i].name, 3394 sizeof(xstats_names[count].name)); 3395 count++; 3396 } 3397 3398 /* RX Priority Stats */ 3399 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3400 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3401 snprintf(xstats_names[count].name, 3402 sizeof(xstats_names[count].name), 3403 "rx_priority%u_%s", i, 3404 rte_ixgbe_rxq_strings[stat].name); 3405 count++; 3406 } 3407 } 3408 3409 /* TX Priority Stats */ 3410 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3411 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3412 snprintf(xstats_names[count].name, 3413 sizeof(xstats_names[count].name), 3414 "tx_priority%u_%s", i, 3415 rte_ixgbe_txq_strings[stat].name); 3416 count++; 3417 } 3418 } 3419 } 3420 return cnt_stats; 3421 } 3422 3423 static int ixgbe_dev_xstats_get_names_by_id( 3424 struct rte_eth_dev *dev, 3425 const uint64_t *ids, 3426 struct rte_eth_xstat_name *xstats_names, 3427 unsigned int limit) 3428 { 3429 if (!ids) { 3430 const unsigned int cnt_stats = ixgbe_xstats_calc_num(); 3431 unsigned int stat, i, count; 3432 3433 if (xstats_names != NULL) { 3434 count = 0; 3435 3436 /* Note: limit >= cnt_stats checked upstream 3437 * in rte_eth_xstats_names() 3438 */ 3439 3440 /* Extended stats from ixgbe_hw_stats */ 3441 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3442 strlcpy(xstats_names[count].name, 3443 rte_ixgbe_stats_strings[i].name, 3444 sizeof(xstats_names[count].name)); 3445 count++; 3446 } 3447 3448 /* MACsec Stats */ 3449 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3450 strlcpy(xstats_names[count].name, 3451 rte_ixgbe_macsec_strings[i].name, 3452 sizeof(xstats_names[count].name)); 3453 count++; 3454 } 3455 3456 /* RX Priority Stats */ 3457 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3458 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3459 snprintf(xstats_names[count].name, 3460 sizeof(xstats_names[count].name), 3461 "rx_priority%u_%s", i, 3462 rte_ixgbe_rxq_strings[stat].name); 3463 count++; 3464 } 3465 } 3466 3467 /* TX Priority Stats */ 3468 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3469 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3470 snprintf(xstats_names[count].name, 3471 sizeof(xstats_names[count].name), 3472 "tx_priority%u_%s", i, 3473 rte_ixgbe_txq_strings[stat].name); 3474 count++; 3475 } 3476 } 3477 } 3478 return cnt_stats; 3479 } 3480 3481 uint16_t i; 3482 uint16_t size = ixgbe_xstats_calc_num(); 3483 struct rte_eth_xstat_name xstats_names_copy[size]; 3484 3485 ixgbe_dev_xstats_get_names_by_id(dev, NULL, xstats_names_copy, 3486 size); 3487 3488 for (i = 0; i < limit; i++) { 3489 if (ids[i] >= size) { 3490 PMD_INIT_LOG(ERR, "id value isn't valid"); 3491 return -1; 3492 } 3493 strcpy(xstats_names[i].name, 3494 xstats_names_copy[ids[i]].name); 3495 } 3496 return limit; 3497 } 3498 3499 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3500 struct rte_eth_xstat_name *xstats_names, unsigned limit) 3501 { 3502 unsigned i; 3503 3504 if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) 3505 return -ENOMEM; 3506 3507 if (xstats_names != NULL) 3508 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) 3509 strlcpy(xstats_names[i].name, 3510 rte_ixgbevf_stats_strings[i].name, 3511 sizeof(xstats_names[i].name)); 3512 return IXGBEVF_NB_XSTATS; 3513 } 3514 3515 static int 3516 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3517 unsigned n) 3518 { 3519 struct ixgbe_hw *hw = 3520 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3521 struct ixgbe_hw_stats *hw_stats = 3522 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3523 struct ixgbe_macsec_stats *macsec_stats = 3524 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3525 dev->data->dev_private); 3526 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3527 unsigned i, stat, count = 0; 3528 3529 count = ixgbe_xstats_calc_num(); 3530 3531 if (n < count) 3532 return count; 3533 3534 total_missed_rx = 0; 3535 total_qbrc = 0; 3536 total_qprc = 0; 3537 total_qprdc = 0; 3538 3539 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3540 &total_qbrc, &total_qprc, &total_qprdc); 3541 3542 /* If this is a reset xstats is NULL, and we have cleared the 3543 * registers by reading them. 3544 */ 3545 if (!xstats) 3546 return 0; 3547 3548 /* Extended stats from ixgbe_hw_stats */ 3549 count = 0; 3550 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3551 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3552 rte_ixgbe_stats_strings[i].offset); 3553 xstats[count].id = count; 3554 count++; 3555 } 3556 3557 /* MACsec Stats */ 3558 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3559 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + 3560 rte_ixgbe_macsec_strings[i].offset); 3561 xstats[count].id = count; 3562 count++; 3563 } 3564 3565 /* RX Priority Stats */ 3566 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3567 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3568 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3569 rte_ixgbe_rxq_strings[stat].offset + 3570 (sizeof(uint64_t) * i)); 3571 xstats[count].id = count; 3572 count++; 3573 } 3574 } 3575 3576 /* TX Priority Stats */ 3577 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3578 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3579 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3580 rte_ixgbe_txq_strings[stat].offset + 3581 (sizeof(uint64_t) * i)); 3582 xstats[count].id = count; 3583 count++; 3584 } 3585 } 3586 return count; 3587 } 3588 3589 static int 3590 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 3591 uint64_t *values, unsigned int n) 3592 { 3593 if (!ids) { 3594 struct ixgbe_hw *hw = 3595 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3596 struct ixgbe_hw_stats *hw_stats = 3597 IXGBE_DEV_PRIVATE_TO_STATS( 3598 dev->data->dev_private); 3599 struct ixgbe_macsec_stats *macsec_stats = 3600 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3601 dev->data->dev_private); 3602 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3603 unsigned int i, stat, count = 0; 3604 3605 count = ixgbe_xstats_calc_num(); 3606 3607 if (!ids && n < count) 3608 return count; 3609 3610 total_missed_rx = 0; 3611 total_qbrc = 0; 3612 total_qprc = 0; 3613 total_qprdc = 0; 3614 3615 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, 3616 &total_missed_rx, &total_qbrc, &total_qprc, 3617 &total_qprdc); 3618 3619 /* If this is a reset xstats is NULL, and we have cleared the 3620 * registers by reading them. 3621 */ 3622 if (!ids && !values) 3623 return 0; 3624 3625 /* Extended stats from ixgbe_hw_stats */ 3626 count = 0; 3627 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3628 values[count] = *(uint64_t *)(((char *)hw_stats) + 3629 rte_ixgbe_stats_strings[i].offset); 3630 count++; 3631 } 3632 3633 /* MACsec Stats */ 3634 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3635 values[count] = *(uint64_t *)(((char *)macsec_stats) + 3636 rte_ixgbe_macsec_strings[i].offset); 3637 count++; 3638 } 3639 3640 /* RX Priority Stats */ 3641 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3642 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3643 values[count] = 3644 *(uint64_t *)(((char *)hw_stats) + 3645 rte_ixgbe_rxq_strings[stat].offset + 3646 (sizeof(uint64_t) * i)); 3647 count++; 3648 } 3649 } 3650 3651 /* TX Priority Stats */ 3652 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3653 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3654 values[count] = 3655 *(uint64_t *)(((char *)hw_stats) + 3656 rte_ixgbe_txq_strings[stat].offset + 3657 (sizeof(uint64_t) * i)); 3658 count++; 3659 } 3660 } 3661 return count; 3662 } 3663 3664 uint16_t i; 3665 uint16_t size = ixgbe_xstats_calc_num(); 3666 uint64_t values_copy[size]; 3667 3668 ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); 3669 3670 for (i = 0; i < n; i++) { 3671 if (ids[i] >= size) { 3672 PMD_INIT_LOG(ERR, "id value isn't valid"); 3673 return -1; 3674 } 3675 values[i] = values_copy[ids[i]]; 3676 } 3677 return n; 3678 } 3679 3680 static int 3681 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) 3682 { 3683 struct ixgbe_hw_stats *stats = 3684 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3685 struct ixgbe_macsec_stats *macsec_stats = 3686 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3687 dev->data->dev_private); 3688 3689 unsigned count = ixgbe_xstats_calc_num(); 3690 3691 /* HW registers are cleared on read */ 3692 ixgbe_dev_xstats_get(dev, NULL, count); 3693 3694 /* Reset software totals */ 3695 memset(stats, 0, sizeof(*stats)); 3696 memset(macsec_stats, 0, sizeof(*macsec_stats)); 3697 3698 return 0; 3699 } 3700 3701 static void 3702 ixgbevf_update_stats(struct rte_eth_dev *dev) 3703 { 3704 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3705 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3706 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3707 3708 /* Good Rx packet, include VF loopback */ 3709 UPDATE_VF_STAT(IXGBE_VFGPRC, 3710 hw_stats->last_vfgprc, hw_stats->vfgprc); 3711 3712 /* Good Rx octets, include VF loopback */ 3713 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 3714 hw_stats->last_vfgorc, hw_stats->vfgorc); 3715 3716 /* Good Tx packet, include VF loopback */ 3717 UPDATE_VF_STAT(IXGBE_VFGPTC, 3718 hw_stats->last_vfgptc, hw_stats->vfgptc); 3719 3720 /* Good Tx octets, include VF loopback */ 3721 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 3722 hw_stats->last_vfgotc, hw_stats->vfgotc); 3723 3724 /* Rx Multicst Packet */ 3725 UPDATE_VF_STAT(IXGBE_VFMPRC, 3726 hw_stats->last_vfmprc, hw_stats->vfmprc); 3727 } 3728 3729 static int 3730 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3731 unsigned n) 3732 { 3733 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3734 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3735 unsigned i; 3736 3737 if (n < IXGBEVF_NB_XSTATS) 3738 return IXGBEVF_NB_XSTATS; 3739 3740 ixgbevf_update_stats(dev); 3741 3742 if (!xstats) 3743 return 0; 3744 3745 /* Extended stats */ 3746 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { 3747 xstats[i].id = i; 3748 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 3749 rte_ixgbevf_stats_strings[i].offset); 3750 } 3751 3752 return IXGBEVF_NB_XSTATS; 3753 } 3754 3755 static int 3756 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3757 { 3758 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3759 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3760 3761 ixgbevf_update_stats(dev); 3762 3763 if (stats == NULL) 3764 return -EINVAL; 3765 3766 stats->ipackets = hw_stats->vfgprc; 3767 stats->ibytes = hw_stats->vfgorc; 3768 stats->opackets = hw_stats->vfgptc; 3769 stats->obytes = hw_stats->vfgotc; 3770 return 0; 3771 } 3772 3773 static int 3774 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) 3775 { 3776 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3777 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3778 3779 /* Sync HW register to the last stats */ 3780 ixgbevf_dev_stats_get(dev, NULL); 3781 3782 /* reset HW current stats*/ 3783 hw_stats->vfgprc = 0; 3784 hw_stats->vfgorc = 0; 3785 hw_stats->vfgptc = 0; 3786 hw_stats->vfgotc = 0; 3787 hw_stats->vfmprc = 0; 3788 3789 return 0; 3790 } 3791 3792 static int 3793 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 3794 { 3795 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3796 u16 eeprom_verh, eeprom_verl; 3797 u32 etrack_id; 3798 int ret; 3799 3800 ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); 3801 ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); 3802 3803 etrack_id = (eeprom_verh << 16) | eeprom_verl; 3804 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); 3805 if (ret < 0) 3806 return -EINVAL; 3807 3808 ret += 1; /* add the size of '\0' */ 3809 if (fw_size < (size_t)ret) 3810 return ret; 3811 else 3812 return 0; 3813 } 3814 3815 static int 3816 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3817 { 3818 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3819 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3820 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 3821 3822 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3823 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3824 if (RTE_ETH_DEV_SRIOV(dev).active == 0) { 3825 /* 3826 * When DCB/VT is off, maximum number of queues changes, 3827 * except for 82598EB, which remains constant. 3828 */ 3829 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE && 3830 hw->mac.type != ixgbe_mac_82598EB) 3831 dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; 3832 } 3833 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ 3834 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ 3835 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3836 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3837 dev_info->max_vfs = pci_dev->max_vfs; 3838 if (hw->mac.type == ixgbe_mac_82598EB) 3839 dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; 3840 else 3841 dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; 3842 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3843 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3844 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 3845 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3846 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3847 dev_info->rx_queue_offload_capa); 3848 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3849 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3850 3851 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3852 .rx_thresh = { 3853 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3854 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3855 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3856 }, 3857 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3858 .rx_drop_en = 0, 3859 .offloads = 0, 3860 }; 3861 3862 dev_info->default_txconf = (struct rte_eth_txconf) { 3863 .tx_thresh = { 3864 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3865 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3866 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3867 }, 3868 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3869 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3870 .offloads = 0, 3871 }; 3872 3873 dev_info->rx_desc_lim = rx_desc_lim; 3874 dev_info->tx_desc_lim = tx_desc_lim; 3875 3876 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3877 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3878 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3879 3880 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G; 3881 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3882 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 3883 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M | 3884 RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G; 3885 3886 if (hw->mac.type == ixgbe_mac_X540 || 3887 hw->mac.type == ixgbe_mac_X540_vf || 3888 hw->mac.type == ixgbe_mac_X550 || 3889 hw->mac.type == ixgbe_mac_X550_vf) { 3890 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M; 3891 } 3892 if (hw->mac.type == ixgbe_mac_X550) { 3893 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G; 3894 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G; 3895 } 3896 3897 /* Driver-preferred Rx/Tx parameters */ 3898 dev_info->default_rxportconf.burst_size = 32; 3899 dev_info->default_txportconf.burst_size = 32; 3900 dev_info->default_rxportconf.nb_queues = 1; 3901 dev_info->default_txportconf.nb_queues = 1; 3902 dev_info->default_rxportconf.ring_size = 256; 3903 dev_info->default_txportconf.ring_size = 256; 3904 3905 return 0; 3906 } 3907 3908 static const uint32_t * 3909 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 3910 { 3911 static const uint32_t ptypes[] = { 3912 /* For non-vec functions, 3913 * refers to ixgbe_rxd_pkt_info_to_pkt_type(); 3914 * for vec functions, 3915 * refers to _recv_raw_pkts_vec(). 3916 */ 3917 RTE_PTYPE_L2_ETHER, 3918 RTE_PTYPE_L3_IPV4, 3919 RTE_PTYPE_L3_IPV4_EXT, 3920 RTE_PTYPE_L3_IPV6, 3921 RTE_PTYPE_L3_IPV6_EXT, 3922 RTE_PTYPE_L4_SCTP, 3923 RTE_PTYPE_L4_TCP, 3924 RTE_PTYPE_L4_UDP, 3925 RTE_PTYPE_TUNNEL_IP, 3926 RTE_PTYPE_INNER_L3_IPV6, 3927 RTE_PTYPE_INNER_L3_IPV6_EXT, 3928 RTE_PTYPE_INNER_L4_TCP, 3929 RTE_PTYPE_INNER_L4_UDP, 3930 RTE_PTYPE_UNKNOWN 3931 }; 3932 3933 if (dev->rx_pkt_burst == ixgbe_recv_pkts || 3934 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || 3935 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || 3936 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) 3937 return ptypes; 3938 3939 #if defined(RTE_ARCH_X86) || defined(__ARM_NEON) 3940 if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || 3941 dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) 3942 return ptypes; 3943 #endif 3944 return NULL; 3945 } 3946 3947 static int 3948 ixgbevf_dev_info_get(struct rte_eth_dev *dev, 3949 struct rte_eth_dev_info *dev_info) 3950 { 3951 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3952 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3953 3954 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3955 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3956 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ 3957 dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ 3958 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3959 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3960 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3961 dev_info->max_vfs = pci_dev->max_vfs; 3962 if (hw->mac.type == ixgbe_mac_82598EB) 3963 dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; 3964 else 3965 dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; 3966 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3967 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3968 dev_info->rx_queue_offload_capa); 3969 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3970 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3971 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3972 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3973 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3974 3975 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3976 .rx_thresh = { 3977 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3978 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3979 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3980 }, 3981 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3982 .rx_drop_en = 0, 3983 .offloads = 0, 3984 }; 3985 3986 dev_info->default_txconf = (struct rte_eth_txconf) { 3987 .tx_thresh = { 3988 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3989 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3990 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3991 }, 3992 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3993 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3994 .offloads = 0, 3995 }; 3996 3997 dev_info->rx_desc_lim = rx_desc_lim; 3998 dev_info->tx_desc_lim = tx_desc_lim; 3999 4000 return 0; 4001 } 4002 4003 static int 4004 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 4005 bool *link_up, int wait_to_complete) 4006 { 4007 struct ixgbe_adapter *adapter = container_of(hw, 4008 struct ixgbe_adapter, hw); 4009 struct ixgbe_mbx_info *mbx = &hw->mbx; 4010 struct ixgbe_mac_info *mac = &hw->mac; 4011 uint32_t links_reg, in_msg; 4012 int ret_val = 0; 4013 4014 /* If we were hit with a reset drop the link */ 4015 if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) 4016 mac->get_link_status = true; 4017 4018 if (!mac->get_link_status) 4019 goto out; 4020 4021 /* if link status is down no point in checking to see if pf is up */ 4022 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4023 if (!(links_reg & IXGBE_LINKS_UP)) 4024 goto out; 4025 4026 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs 4027 * before the link status is correct 4028 */ 4029 if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { 4030 int i; 4031 4032 for (i = 0; i < 5; i++) { 4033 rte_delay_us(100); 4034 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4035 4036 if (!(links_reg & IXGBE_LINKS_UP)) 4037 goto out; 4038 } 4039 } 4040 4041 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 4042 case IXGBE_LINKS_SPEED_10G_82599: 4043 *speed = IXGBE_LINK_SPEED_10GB_FULL; 4044 if (hw->mac.type >= ixgbe_mac_X550) { 4045 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4046 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 4047 } 4048 break; 4049 case IXGBE_LINKS_SPEED_1G_82599: 4050 *speed = IXGBE_LINK_SPEED_1GB_FULL; 4051 break; 4052 case IXGBE_LINKS_SPEED_100_82599: 4053 *speed = IXGBE_LINK_SPEED_100_FULL; 4054 if (hw->mac.type == ixgbe_mac_X550) { 4055 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4056 *speed = IXGBE_LINK_SPEED_5GB_FULL; 4057 } 4058 break; 4059 case IXGBE_LINKS_SPEED_10_X550EM_A: 4060 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4061 /* Since Reserved in older MAC's */ 4062 if (hw->mac.type >= ixgbe_mac_X550) 4063 *speed = IXGBE_LINK_SPEED_10_FULL; 4064 break; 4065 default: 4066 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4067 } 4068 4069 if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { 4070 if (*speed == IXGBE_LINK_SPEED_UNKNOWN) 4071 mac->get_link_status = true; 4072 else 4073 mac->get_link_status = false; 4074 4075 goto out; 4076 } 4077 4078 /* if the read failed it could just be a mailbox collision, best wait 4079 * until we are called again and don't report an error 4080 */ 4081 if (mbx->ops.read(hw, &in_msg, 1, 0)) 4082 goto out; 4083 4084 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { 4085 /* msg is not CTS and is NACK we must have lost CTS status */ 4086 if (in_msg & IXGBE_VT_MSGTYPE_NACK) 4087 mac->get_link_status = false; 4088 goto out; 4089 } 4090 4091 /* the pf is talking, if we timed out in the past we reinit */ 4092 if (!mbx->timeout) { 4093 ret_val = -1; 4094 goto out; 4095 } 4096 4097 /* if we passed all the tests above then the link is up and we no 4098 * longer need to check for link 4099 */ 4100 mac->get_link_status = false; 4101 4102 out: 4103 *link_up = !mac->get_link_status; 4104 return ret_val; 4105 } 4106 4107 /* 4108 * If @timeout_ms was 0, it means that it will not return until link complete. 4109 * It returns 1 on complete, return 0 on timeout. 4110 */ 4111 static int 4112 ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, uint32_t timeout_ms) 4113 { 4114 #define WARNING_TIMEOUT 9000 /* 9s in total */ 4115 struct ixgbe_adapter *ad = dev->data->dev_private; 4116 uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT; 4117 4118 while (rte_atomic32_read(&ad->link_thread_running)) { 4119 msec_delay(1); 4120 timeout--; 4121 4122 if (timeout_ms) { 4123 if (!timeout) 4124 return 0; 4125 } else if (!timeout) { 4126 /* It will not return until link complete */ 4127 timeout = WARNING_TIMEOUT; 4128 PMD_DRV_LOG(ERR, "IXGBE link thread not complete too long time!"); 4129 } 4130 } 4131 4132 return 1; 4133 } 4134 4135 static void * 4136 ixgbe_dev_setup_link_thread_handler(void *param) 4137 { 4138 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4139 struct ixgbe_adapter *ad = dev->data->dev_private; 4140 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4141 struct ixgbe_interrupt *intr = 4142 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4143 u32 speed; 4144 bool autoneg = false; 4145 4146 pthread_detach(pthread_self()); 4147 speed = hw->phy.autoneg_advertised; 4148 if (!speed) 4149 ixgbe_get_link_capabilities(hw, &speed, &autoneg); 4150 4151 ixgbe_setup_link(hw, speed, true); 4152 4153 intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 4154 rte_atomic32_clear(&ad->link_thread_running); 4155 return NULL; 4156 } 4157 4158 /* 4159 * In freebsd environment, nic_uio drivers do not support interrupts, 4160 * rte_intr_callback_register() will fail to register interrupts. 4161 * We can not make link status to change from down to up by interrupt 4162 * callback. So we need to wait for the controller to acquire link 4163 * when ports start. 4164 * It returns 0 on link up. 4165 */ 4166 static int 4167 ixgbe_wait_for_link_up(struct ixgbe_hw *hw) 4168 { 4169 #ifdef RTE_EXEC_ENV_FREEBSD 4170 int err, i; 4171 bool link_up = false; 4172 uint32_t speed = 0; 4173 const int nb_iter = 25; 4174 4175 for (i = 0; i < nb_iter; i++) { 4176 err = ixgbe_check_link(hw, &speed, &link_up, 0); 4177 if (err) 4178 return err; 4179 if (link_up) 4180 return 0; 4181 msec_delay(200); 4182 } 4183 4184 return 0; 4185 #else 4186 RTE_SET_USED(hw); 4187 return 0; 4188 #endif 4189 } 4190 4191 /* return 0 means link status changed, -1 means not changed */ 4192 int 4193 ixgbe_dev_link_update_share(struct rte_eth_dev *dev, 4194 int wait_to_complete, int vf) 4195 { 4196 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4197 struct ixgbe_adapter *ad = dev->data->dev_private; 4198 struct rte_eth_link link; 4199 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4200 struct ixgbe_interrupt *intr = 4201 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4202 bool link_up; 4203 int diag; 4204 int wait = 1; 4205 u32 esdp_reg; 4206 4207 memset(&link, 0, sizeof(link)); 4208 link.link_status = RTE_ETH_LINK_DOWN; 4209 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 4210 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 4211 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 4212 RTE_ETH_LINK_SPEED_FIXED); 4213 4214 hw->mac.get_link_status = true; 4215 4216 if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) 4217 return rte_eth_linkstatus_set(dev, &link); 4218 4219 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 4220 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 4221 wait = 0; 4222 4223 /* BSD has no interrupt mechanism, so force NIC status synchronization. */ 4224 #ifdef RTE_EXEC_ENV_FREEBSD 4225 wait = 1; 4226 #endif 4227 4228 if (vf) 4229 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); 4230 else 4231 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); 4232 4233 if (diag != 0) { 4234 link.link_speed = RTE_ETH_SPEED_NUM_100M; 4235 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 4236 return rte_eth_linkstatus_set(dev, &link); 4237 } 4238 4239 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4240 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 4241 if ((esdp_reg & IXGBE_ESDP_SDP3)) 4242 link_up = 0; 4243 } 4244 4245 if (link_up == 0) { 4246 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4247 ixgbe_dev_wait_setup_link_complete(dev, 0); 4248 if (rte_atomic32_test_and_set(&ad->link_thread_running)) { 4249 /* To avoid race condition between threads, set 4250 * the IXGBE_FLAG_NEED_LINK_CONFIG flag only 4251 * when there is no link thread running. 4252 */ 4253 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 4254 if (rte_ctrl_thread_create(&ad->link_thread_tid, 4255 "ixgbe-link-handler", 4256 NULL, 4257 ixgbe_dev_setup_link_thread_handler, 4258 dev) < 0) { 4259 PMD_DRV_LOG(ERR, 4260 "Create link thread failed!"); 4261 rte_atomic32_clear(&ad->link_thread_running); 4262 } 4263 } else { 4264 PMD_DRV_LOG(ERR, 4265 "Other link thread is running now!"); 4266 } 4267 } 4268 return rte_eth_linkstatus_set(dev, &link); 4269 } 4270 4271 link.link_status = RTE_ETH_LINK_UP; 4272 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 4273 4274 switch (link_speed) { 4275 default: 4276 case IXGBE_LINK_SPEED_UNKNOWN: 4277 link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 4278 break; 4279 4280 case IXGBE_LINK_SPEED_10_FULL: 4281 link.link_speed = RTE_ETH_SPEED_NUM_10M; 4282 break; 4283 4284 case IXGBE_LINK_SPEED_100_FULL: 4285 link.link_speed = RTE_ETH_SPEED_NUM_100M; 4286 break; 4287 4288 case IXGBE_LINK_SPEED_1GB_FULL: 4289 link.link_speed = RTE_ETH_SPEED_NUM_1G; 4290 break; 4291 4292 case IXGBE_LINK_SPEED_2_5GB_FULL: 4293 link.link_speed = RTE_ETH_SPEED_NUM_2_5G; 4294 break; 4295 4296 case IXGBE_LINK_SPEED_5GB_FULL: 4297 link.link_speed = RTE_ETH_SPEED_NUM_5G; 4298 break; 4299 4300 case IXGBE_LINK_SPEED_10GB_FULL: 4301 link.link_speed = RTE_ETH_SPEED_NUM_10G; 4302 break; 4303 } 4304 4305 return rte_eth_linkstatus_set(dev, &link); 4306 } 4307 4308 static int 4309 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4310 { 4311 return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); 4312 } 4313 4314 static int 4315 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4316 { 4317 return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); 4318 } 4319 4320 static int 4321 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 4322 { 4323 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4324 uint32_t fctrl; 4325 4326 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4327 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4328 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4329 4330 return 0; 4331 } 4332 4333 static int 4334 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 4335 { 4336 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4337 uint32_t fctrl; 4338 4339 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4340 fctrl &= (~IXGBE_FCTRL_UPE); 4341 if (dev->data->all_multicast == 1) 4342 fctrl |= IXGBE_FCTRL_MPE; 4343 else 4344 fctrl &= (~IXGBE_FCTRL_MPE); 4345 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4346 4347 return 0; 4348 } 4349 4350 static int 4351 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 4352 { 4353 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4354 uint32_t fctrl; 4355 4356 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4357 fctrl |= IXGBE_FCTRL_MPE; 4358 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4359 4360 return 0; 4361 } 4362 4363 static int 4364 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 4365 { 4366 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4367 uint32_t fctrl; 4368 4369 if (dev->data->promiscuous == 1) 4370 return 0; /* must remain in all_multicast mode */ 4371 4372 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4373 fctrl &= (~IXGBE_FCTRL_MPE); 4374 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4375 4376 return 0; 4377 } 4378 4379 /** 4380 * It clears the interrupt causes and enables the interrupt. 4381 * It will be called once only during nic initialized. 4382 * 4383 * @param dev 4384 * Pointer to struct rte_eth_dev. 4385 * @param on 4386 * Enable or Disable. 4387 * 4388 * @return 4389 * - On success, zero. 4390 * - On failure, a negative value. 4391 */ 4392 static int 4393 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 4394 { 4395 struct ixgbe_interrupt *intr = 4396 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4397 4398 ixgbe_dev_link_status_print(dev); 4399 if (on) 4400 intr->mask |= IXGBE_EICR_LSC; 4401 else 4402 intr->mask &= ~IXGBE_EICR_LSC; 4403 4404 return 0; 4405 } 4406 4407 /** 4408 * It clears the interrupt causes and enables the interrupt. 4409 * It will be called once only during nic initialized. 4410 * 4411 * @param dev 4412 * Pointer to struct rte_eth_dev. 4413 * 4414 * @return 4415 * - On success, zero. 4416 * - On failure, a negative value. 4417 */ 4418 static int 4419 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 4420 { 4421 struct ixgbe_interrupt *intr = 4422 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4423 4424 intr->mask |= IXGBE_EICR_RTX_QUEUE; 4425 4426 return 0; 4427 } 4428 4429 /** 4430 * It clears the interrupt causes and enables the interrupt. 4431 * It will be called once only during nic initialized. 4432 * 4433 * @param dev 4434 * Pointer to struct rte_eth_dev. 4435 * 4436 * @return 4437 * - On success, zero. 4438 * - On failure, a negative value. 4439 */ 4440 static int 4441 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 4442 { 4443 struct ixgbe_interrupt *intr = 4444 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4445 4446 intr->mask |= IXGBE_EICR_LINKSEC; 4447 4448 return 0; 4449 } 4450 4451 /* 4452 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. 4453 * 4454 * @param dev 4455 * Pointer to struct rte_eth_dev. 4456 * 4457 * @return 4458 * - On success, zero. 4459 * - On failure, a negative value. 4460 */ 4461 static int 4462 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 4463 { 4464 uint32_t eicr; 4465 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4466 struct ixgbe_interrupt *intr = 4467 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4468 4469 /* clear all cause mask */ 4470 ixgbe_disable_intr(hw); 4471 4472 /* read-on-clear nic registers here */ 4473 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4474 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 4475 4476 intr->flags = 0; 4477 4478 /* set flag for async link update */ 4479 if (eicr & IXGBE_EICR_LSC) 4480 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4481 4482 if (eicr & IXGBE_EICR_MAILBOX) 4483 intr->flags |= IXGBE_FLAG_MAILBOX; 4484 4485 if (eicr & IXGBE_EICR_LINKSEC) 4486 intr->flags |= IXGBE_FLAG_MACSEC; 4487 4488 if (hw->mac.type == ixgbe_mac_X550EM_x && 4489 hw->phy.type == ixgbe_phy_x550em_ext_t && 4490 (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) 4491 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; 4492 4493 return 0; 4494 } 4495 4496 /** 4497 * It gets and then prints the link status. 4498 * 4499 * @param dev 4500 * Pointer to struct rte_eth_dev. 4501 * 4502 * @return 4503 * - On success, zero. 4504 * - On failure, a negative value. 4505 */ 4506 static void 4507 ixgbe_dev_link_status_print(struct rte_eth_dev *dev) 4508 { 4509 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4510 struct rte_eth_link link; 4511 4512 rte_eth_linkstatus_get(dev, &link); 4513 4514 if (link.link_status) { 4515 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 4516 (int)(dev->data->port_id), 4517 (unsigned)link.link_speed, 4518 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? 4519 "full-duplex" : "half-duplex"); 4520 } else { 4521 PMD_INIT_LOG(INFO, " Port %d: Link Down", 4522 (int)(dev->data->port_id)); 4523 } 4524 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 4525 pci_dev->addr.domain, 4526 pci_dev->addr.bus, 4527 pci_dev->addr.devid, 4528 pci_dev->addr.function); 4529 } 4530 4531 /* 4532 * It executes link_update after knowing an interrupt occurred. 4533 * 4534 * @param dev 4535 * Pointer to struct rte_eth_dev. 4536 * 4537 * @return 4538 * - On success, zero. 4539 * - On failure, a negative value. 4540 */ 4541 static int 4542 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) 4543 { 4544 struct ixgbe_interrupt *intr = 4545 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4546 int64_t timeout; 4547 struct ixgbe_hw *hw = 4548 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4549 4550 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 4551 4552 if (intr->flags & IXGBE_FLAG_MAILBOX) { 4553 ixgbe_pf_mbx_process(dev); 4554 intr->flags &= ~IXGBE_FLAG_MAILBOX; 4555 } 4556 4557 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4558 ixgbe_handle_lasi(hw); 4559 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4560 } 4561 4562 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4563 struct rte_eth_link link; 4564 4565 /* get the link status before link update, for predicting later */ 4566 rte_eth_linkstatus_get(dev, &link); 4567 4568 ixgbe_dev_link_update(dev, 0); 4569 4570 /* likely to up */ 4571 if (!link.link_status) 4572 /* handle it 1 sec later, wait it being stable */ 4573 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; 4574 /* likely to down */ 4575 else 4576 /* handle it 4 sec later, wait it being stable */ 4577 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; 4578 4579 ixgbe_dev_link_status_print(dev); 4580 if (rte_eal_alarm_set(timeout * 1000, 4581 ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) 4582 PMD_DRV_LOG(ERR, "Error setting alarm"); 4583 else { 4584 /* remember original mask */ 4585 intr->mask_original = intr->mask; 4586 /* only disable lsc interrupt */ 4587 intr->mask &= ~IXGBE_EIMS_LSC; 4588 } 4589 } 4590 4591 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 4592 ixgbe_enable_intr(dev); 4593 4594 return 0; 4595 } 4596 4597 /** 4598 * Interrupt handler which shall be registered for alarm callback for delayed 4599 * handling specific interrupt to wait for the stable nic state. As the 4600 * NIC interrupt state is not stable for ixgbe after link is just down, 4601 * it needs to wait 4 seconds to get the stable status. 4602 * 4603 * @param handle 4604 * Pointer to interrupt handle. 4605 * @param param 4606 * The address of parameter (struct rte_eth_dev *) regsitered before. 4607 * 4608 * @return 4609 * void 4610 */ 4611 static void 4612 ixgbe_dev_interrupt_delayed_handler(void *param) 4613 { 4614 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4615 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4616 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 4617 struct ixgbe_interrupt *intr = 4618 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4619 struct ixgbe_hw *hw = 4620 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4621 uint32_t eicr; 4622 4623 ixgbe_disable_intr(hw); 4624 4625 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4626 if (eicr & IXGBE_EICR_MAILBOX) 4627 ixgbe_pf_mbx_process(dev); 4628 4629 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4630 ixgbe_handle_lasi(hw); 4631 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4632 } 4633 4634 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4635 ixgbe_dev_link_update(dev, 0); 4636 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 4637 ixgbe_dev_link_status_print(dev); 4638 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 4639 } 4640 4641 if (intr->flags & IXGBE_FLAG_MACSEC) { 4642 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, NULL); 4643 intr->flags &= ~IXGBE_FLAG_MACSEC; 4644 } 4645 4646 /* restore original mask */ 4647 intr->mask = intr->mask_original; 4648 intr->mask_original = 0; 4649 4650 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 4651 ixgbe_enable_intr(dev); 4652 rte_intr_ack(intr_handle); 4653 } 4654 4655 /** 4656 * Interrupt handler triggered by NIC for handling 4657 * specific interrupt. 4658 * 4659 * @param handle 4660 * Pointer to interrupt handle. 4661 * @param param 4662 * The address of parameter (struct rte_eth_dev *) regsitered before. 4663 * 4664 * @return 4665 * void 4666 */ 4667 static void 4668 ixgbe_dev_interrupt_handler(void *param) 4669 { 4670 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4671 4672 ixgbe_dev_interrupt_get_status(dev); 4673 ixgbe_dev_interrupt_action(dev); 4674 } 4675 4676 static int 4677 ixgbe_dev_led_on(struct rte_eth_dev *dev) 4678 { 4679 struct ixgbe_hw *hw; 4680 4681 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4682 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4683 } 4684 4685 static int 4686 ixgbe_dev_led_off(struct rte_eth_dev *dev) 4687 { 4688 struct ixgbe_hw *hw; 4689 4690 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4691 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4692 } 4693 4694 static int 4695 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4696 { 4697 struct ixgbe_hw *hw; 4698 uint32_t mflcn_reg; 4699 uint32_t fccfg_reg; 4700 int rx_pause; 4701 int tx_pause; 4702 4703 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4704 4705 fc_conf->pause_time = hw->fc.pause_time; 4706 fc_conf->high_water = hw->fc.high_water[0]; 4707 fc_conf->low_water = hw->fc.low_water[0]; 4708 fc_conf->send_xon = hw->fc.send_xon; 4709 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 4710 4711 /* 4712 * Return rx_pause status according to actual setting of 4713 * MFLCN register. 4714 */ 4715 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4716 if (mflcn_reg & IXGBE_MFLCN_PMCF) 4717 fc_conf->mac_ctrl_frame_fwd = 1; 4718 else 4719 fc_conf->mac_ctrl_frame_fwd = 0; 4720 4721 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) 4722 rx_pause = 1; 4723 else 4724 rx_pause = 0; 4725 4726 /* 4727 * Return tx_pause status according to actual setting of 4728 * FCCFG register. 4729 */ 4730 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4731 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) 4732 tx_pause = 1; 4733 else 4734 tx_pause = 0; 4735 4736 if (rx_pause && tx_pause) 4737 fc_conf->mode = RTE_ETH_FC_FULL; 4738 else if (rx_pause) 4739 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 4740 else if (tx_pause) 4741 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 4742 else 4743 fc_conf->mode = RTE_ETH_FC_NONE; 4744 4745 return 0; 4746 } 4747 4748 static int 4749 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4750 { 4751 struct ixgbe_hw *hw; 4752 struct ixgbe_adapter *adapter = dev->data->dev_private; 4753 int err; 4754 uint32_t rx_buf_size; 4755 uint32_t max_high_water; 4756 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4757 ixgbe_fc_none, 4758 ixgbe_fc_rx_pause, 4759 ixgbe_fc_tx_pause, 4760 ixgbe_fc_full 4761 }; 4762 4763 PMD_INIT_FUNC_TRACE(); 4764 4765 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4766 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); 4767 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4768 4769 /* 4770 * At least reserve one Ethernet frame for watermark 4771 * high_water/low_water in kilo bytes for ixgbe 4772 */ 4773 max_high_water = (rx_buf_size - 4774 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4775 if ((fc_conf->high_water > max_high_water) || 4776 (fc_conf->high_water < fc_conf->low_water)) { 4777 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4778 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4779 return -EINVAL; 4780 } 4781 4782 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; 4783 hw->fc.pause_time = fc_conf->pause_time; 4784 hw->fc.high_water[0] = fc_conf->high_water; 4785 hw->fc.low_water[0] = fc_conf->low_water; 4786 hw->fc.send_xon = fc_conf->send_xon; 4787 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 4788 adapter->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 4789 4790 err = ixgbe_flow_ctrl_enable(dev, hw); 4791 if (err < 0) { 4792 PMD_INIT_LOG(ERR, "ixgbe_flow_ctrl_enable = 0x%x", err); 4793 return -EIO; 4794 } 4795 return err; 4796 } 4797 4798 /** 4799 * ixgbe_pfc_enable_generic - Enable flow control 4800 * @hw: pointer to hardware structure 4801 * @tc_num: traffic class number 4802 * Enable flow control according to the current settings. 4803 */ 4804 static int 4805 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) 4806 { 4807 int ret_val = 0; 4808 uint32_t mflcn_reg, fccfg_reg; 4809 uint32_t reg; 4810 uint32_t fcrtl, fcrth; 4811 uint8_t i; 4812 uint8_t nb_rx_en; 4813 4814 /* Validate the water mark configuration */ 4815 if (!hw->fc.pause_time) { 4816 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4817 goto out; 4818 } 4819 4820 /* Low water mark of zero causes XOFF floods */ 4821 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 4822 /* High/Low water can not be 0 */ 4823 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { 4824 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4825 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4826 goto out; 4827 } 4828 4829 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { 4830 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4831 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4832 goto out; 4833 } 4834 } 4835 /* Negotiate the fc mode to use */ 4836 ixgbe_fc_autoneg(hw); 4837 4838 /* Disable any previous flow control settings */ 4839 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4840 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); 4841 4842 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4843 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 4844 4845 switch (hw->fc.current_mode) { 4846 case ixgbe_fc_none: 4847 /* 4848 * If the count of enabled RX Priority Flow control >1, 4849 * and the TX pause can not be disabled 4850 */ 4851 nb_rx_en = 0; 4852 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4853 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4854 if (reg & IXGBE_FCRTH_FCEN) 4855 nb_rx_en++; 4856 } 4857 if (nb_rx_en > 1) 4858 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4859 break; 4860 case ixgbe_fc_rx_pause: 4861 /* 4862 * Rx Flow control is enabled and Tx Flow control is 4863 * disabled by software override. Since there really 4864 * isn't a way to advertise that we are capable of RX 4865 * Pause ONLY, we will advertise that we support both 4866 * symmetric and asymmetric Rx PAUSE. Later, we will 4867 * disable the adapter's ability to send PAUSE frames. 4868 */ 4869 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4870 /* 4871 * If the count of enabled RX Priority Flow control >1, 4872 * and the TX pause can not be disabled 4873 */ 4874 nb_rx_en = 0; 4875 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4876 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4877 if (reg & IXGBE_FCRTH_FCEN) 4878 nb_rx_en++; 4879 } 4880 if (nb_rx_en > 1) 4881 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4882 break; 4883 case ixgbe_fc_tx_pause: 4884 /* 4885 * Tx Flow control is enabled, and Rx Flow control is 4886 * disabled by software override. 4887 */ 4888 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4889 break; 4890 case ixgbe_fc_full: 4891 /* Flow control (both Rx and Tx) is enabled by SW override. */ 4892 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4893 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4894 break; 4895 default: 4896 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); 4897 ret_val = IXGBE_ERR_CONFIG; 4898 goto out; 4899 } 4900 4901 /* Set 802.3x based flow control settings. */ 4902 mflcn_reg |= IXGBE_MFLCN_DPF; 4903 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 4904 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 4905 4906 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 4907 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 4908 hw->fc.high_water[tc_num]) { 4909 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; 4910 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); 4911 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; 4912 } else { 4913 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); 4914 /* 4915 * In order to prevent Tx hangs when the internal Tx 4916 * switch is enabled we must set the high water mark 4917 * to the maximum FCRTH value. This allows the Tx 4918 * switch to function even under heavy Rx workloads. 4919 */ 4920 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; 4921 } 4922 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); 4923 4924 /* Configure pause time (2 TCs per register) */ 4925 reg = hw->fc.pause_time * 0x00010001; 4926 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 4927 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 4928 4929 /* Configure flow control refresh threshold value */ 4930 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 4931 4932 out: 4933 return ret_val; 4934 } 4935 4936 static int 4937 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) 4938 { 4939 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4940 int32_t ret_val = IXGBE_NOT_IMPLEMENTED; 4941 4942 if (hw->mac.type != ixgbe_mac_82598EB) { 4943 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); 4944 } 4945 return ret_val; 4946 } 4947 4948 static int 4949 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) 4950 { 4951 int err; 4952 uint32_t rx_buf_size; 4953 uint32_t max_high_water; 4954 uint8_t tc_num; 4955 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; 4956 struct ixgbe_hw *hw = 4957 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4958 struct ixgbe_dcb_config *dcb_config = 4959 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 4960 4961 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4962 ixgbe_fc_none, 4963 ixgbe_fc_rx_pause, 4964 ixgbe_fc_tx_pause, 4965 ixgbe_fc_full 4966 }; 4967 4968 PMD_INIT_FUNC_TRACE(); 4969 4970 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); 4971 tc_num = map[pfc_conf->priority]; 4972 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); 4973 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4974 /* 4975 * At least reserve one Ethernet frame for watermark 4976 * high_water/low_water in kilo bytes for ixgbe 4977 */ 4978 max_high_water = (rx_buf_size - 4979 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4980 if ((pfc_conf->fc.high_water > max_high_water) || 4981 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { 4982 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4983 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4984 return -EINVAL; 4985 } 4986 4987 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; 4988 hw->fc.pause_time = pfc_conf->fc.pause_time; 4989 hw->fc.send_xon = pfc_conf->fc.send_xon; 4990 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 4991 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 4992 4993 err = ixgbe_dcb_pfc_enable(dev, tc_num); 4994 4995 /* Not negotiated is not an error case */ 4996 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 4997 return 0; 4998 4999 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); 5000 return -EIO; 5001 } 5002 5003 static int 5004 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 5005 struct rte_eth_rss_reta_entry64 *reta_conf, 5006 uint16_t reta_size) 5007 { 5008 uint16_t i, sp_reta_size; 5009 uint8_t j, mask; 5010 uint32_t reta, r; 5011 uint16_t idx, shift; 5012 struct ixgbe_adapter *adapter = dev->data->dev_private; 5013 struct rte_eth_dev_data *dev_data = dev->data; 5014 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5015 uint32_t reta_reg; 5016 5017 PMD_INIT_FUNC_TRACE(); 5018 5019 if (!dev_data->dev_started) { 5020 PMD_DRV_LOG(ERR, 5021 "port %d must be started before rss reta update", 5022 dev_data->port_id); 5023 return -EIO; 5024 } 5025 5026 if (!ixgbe_rss_update_sp(hw->mac.type)) { 5027 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 5028 "NIC."); 5029 return -ENOTSUP; 5030 } 5031 5032 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5033 if (reta_size != sp_reta_size) { 5034 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5035 "(%d) doesn't match the number hardware can supported " 5036 "(%d)", reta_size, sp_reta_size); 5037 return -EINVAL; 5038 } 5039 5040 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5041 idx = i / RTE_ETH_RETA_GROUP_SIZE; 5042 shift = i % RTE_ETH_RETA_GROUP_SIZE; 5043 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5044 IXGBE_4_BIT_MASK); 5045 if (!mask) 5046 continue; 5047 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5048 if (mask == IXGBE_4_BIT_MASK) 5049 r = 0; 5050 else 5051 r = IXGBE_READ_REG(hw, reta_reg); 5052 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5053 if (mask & (0x1 << j)) 5054 reta |= reta_conf[idx].reta[shift + j] << 5055 (CHAR_BIT * j); 5056 else 5057 reta |= r & (IXGBE_8_BIT_MASK << 5058 (CHAR_BIT * j)); 5059 } 5060 IXGBE_WRITE_REG(hw, reta_reg, reta); 5061 } 5062 adapter->rss_reta_updated = 1; 5063 5064 return 0; 5065 } 5066 5067 static int 5068 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 5069 struct rte_eth_rss_reta_entry64 *reta_conf, 5070 uint16_t reta_size) 5071 { 5072 uint16_t i, sp_reta_size; 5073 uint8_t j, mask; 5074 uint32_t reta; 5075 uint16_t idx, shift; 5076 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5077 uint32_t reta_reg; 5078 5079 PMD_INIT_FUNC_TRACE(); 5080 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5081 if (reta_size != sp_reta_size) { 5082 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5083 "(%d) doesn't match the number hardware can supported " 5084 "(%d)", reta_size, sp_reta_size); 5085 return -EINVAL; 5086 } 5087 5088 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5089 idx = i / RTE_ETH_RETA_GROUP_SIZE; 5090 shift = i % RTE_ETH_RETA_GROUP_SIZE; 5091 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5092 IXGBE_4_BIT_MASK); 5093 if (!mask) 5094 continue; 5095 5096 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5097 reta = IXGBE_READ_REG(hw, reta_reg); 5098 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5099 if (mask & (0x1 << j)) 5100 reta_conf[idx].reta[shift + j] = 5101 ((reta >> (CHAR_BIT * j)) & 5102 IXGBE_8_BIT_MASK); 5103 } 5104 } 5105 5106 return 0; 5107 } 5108 5109 static int 5110 ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 5111 uint32_t index, uint32_t pool) 5112 { 5113 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5114 uint32_t enable_addr = 1; 5115 5116 return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, 5117 pool, enable_addr); 5118 } 5119 5120 static void 5121 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 5122 { 5123 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5124 5125 ixgbe_clear_rar(hw, index); 5126 } 5127 5128 static int 5129 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 5130 { 5131 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5132 5133 ixgbe_remove_rar(dev, 0); 5134 ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 5135 5136 return 0; 5137 } 5138 5139 static bool 5140 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 5141 { 5142 if (strcmp(dev->device->driver->name, drv->driver.name)) 5143 return false; 5144 5145 return true; 5146 } 5147 5148 bool 5149 is_ixgbe_supported(struct rte_eth_dev *dev) 5150 { 5151 return is_device_supported(dev, &rte_ixgbe_pmd); 5152 } 5153 5154 static int 5155 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 5156 { 5157 uint32_t hlreg0; 5158 uint32_t maxfrs; 5159 struct ixgbe_hw *hw; 5160 struct rte_eth_dev_info dev_info; 5161 uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; 5162 int ret; 5163 5164 ret = ixgbe_dev_info_get(dev, &dev_info); 5165 if (ret != 0) 5166 return ret; 5167 5168 /* check that mtu is within the allowed range */ 5169 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 5170 return -EINVAL; 5171 5172 /* If device is started, refuse mtu that requires the support of 5173 * scattered packets when this feature has not been enabled before. 5174 */ 5175 if (dev->data->dev_started && !dev->data->scattered_rx && 5176 frame_size + 2 * IXGBE_VLAN_TAG_SIZE > 5177 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 5178 PMD_INIT_LOG(ERR, "Stop port first."); 5179 return -EINVAL; 5180 } 5181 5182 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5183 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 5184 5185 /* switch to jumbo mode if needed */ 5186 if (mtu > RTE_ETHER_MTU) 5187 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 5188 else 5189 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 5190 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 5191 5192 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 5193 maxfrs &= 0x0000FFFF; 5194 maxfrs |= (frame_size << 16); 5195 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); 5196 5197 return 0; 5198 } 5199 5200 /* 5201 * Virtual Function operations 5202 */ 5203 static void 5204 ixgbevf_intr_disable(struct rte_eth_dev *dev) 5205 { 5206 struct ixgbe_interrupt *intr = 5207 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5208 struct ixgbe_hw *hw = 5209 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5210 5211 PMD_INIT_FUNC_TRACE(); 5212 5213 /* Clear interrupt mask to stop from interrupts being generated */ 5214 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 5215 5216 IXGBE_WRITE_FLUSH(hw); 5217 5218 /* Clear mask value. */ 5219 intr->mask = 0; 5220 } 5221 5222 static void 5223 ixgbevf_intr_enable(struct rte_eth_dev *dev) 5224 { 5225 struct ixgbe_interrupt *intr = 5226 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5227 struct ixgbe_hw *hw = 5228 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5229 5230 PMD_INIT_FUNC_TRACE(); 5231 5232 /* VF enable interrupt autoclean */ 5233 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); 5234 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); 5235 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); 5236 5237 IXGBE_WRITE_FLUSH(hw); 5238 5239 /* Save IXGBE_VTEIMS value to mask. */ 5240 intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; 5241 } 5242 5243 static int 5244 ixgbevf_dev_configure(struct rte_eth_dev *dev) 5245 { 5246 struct rte_eth_conf *conf = &dev->data->dev_conf; 5247 struct ixgbe_adapter *adapter = dev->data->dev_private; 5248 5249 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 5250 dev->data->port_id); 5251 5252 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 5253 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 5254 5255 /* 5256 * VF has no ability to enable/disable HW CRC 5257 * Keep the persistent behavior the same as Host PF 5258 */ 5259 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC 5260 if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) { 5261 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 5262 conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC; 5263 } 5264 #else 5265 if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) { 5266 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 5267 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; 5268 } 5269 #endif 5270 5271 /* 5272 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 5273 * allocation or vector Rx preconditions we will reset it. 5274 */ 5275 adapter->rx_bulk_alloc_allowed = true; 5276 adapter->rx_vec_allowed = true; 5277 5278 return 0; 5279 } 5280 5281 static int 5282 ixgbevf_dev_start(struct rte_eth_dev *dev) 5283 { 5284 struct ixgbe_hw *hw = 5285 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5286 uint32_t intr_vector = 0; 5287 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5288 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5289 5290 int err, mask = 0; 5291 5292 PMD_INIT_FUNC_TRACE(); 5293 5294 /* Stop the link setup handler before resetting the HW. */ 5295 ixgbe_dev_wait_setup_link_complete(dev, 0); 5296 5297 err = hw->mac.ops.reset_hw(hw); 5298 5299 /** 5300 * In this case, reuses the MAC address assigned by VF 5301 * initialization. 5302 */ 5303 if (err != IXGBE_SUCCESS && err != IXGBE_ERR_INVALID_MAC_ADDR) { 5304 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); 5305 return err; 5306 } 5307 5308 hw->mac.get_link_status = true; 5309 5310 /* negotiate mailbox API version to use with the PF. */ 5311 ixgbevf_negotiate_api(hw); 5312 5313 ixgbevf_dev_tx_init(dev); 5314 5315 /* This can fail when allocating mbufs for descriptor rings */ 5316 err = ixgbevf_dev_rx_init(dev); 5317 if (err) { 5318 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 5319 ixgbe_dev_clear_queues(dev); 5320 return err; 5321 } 5322 5323 /* Set vfta */ 5324 ixgbevf_set_vfta_all(dev, 1); 5325 5326 /* Set HW strip */ 5327 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 5328 RTE_ETH_VLAN_EXTEND_MASK; 5329 err = ixgbevf_vlan_offload_config(dev, mask); 5330 if (err) { 5331 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); 5332 ixgbe_dev_clear_queues(dev); 5333 return err; 5334 } 5335 5336 ixgbevf_dev_rxtx_start(dev); 5337 5338 /* check and configure queue intr-vector mapping */ 5339 if (rte_intr_cap_multiple(intr_handle) && 5340 dev->data->dev_conf.intr_conf.rxq) { 5341 /* According to datasheet, only vector 0/1/2 can be used, 5342 * now only one vector is used for Rx queue 5343 */ 5344 intr_vector = 1; 5345 if (rte_intr_efd_enable(intr_handle, intr_vector)) { 5346 ixgbe_dev_clear_queues(dev); 5347 return -1; 5348 } 5349 } 5350 5351 if (rte_intr_dp_is_en(intr_handle)) { 5352 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 5353 dev->data->nb_rx_queues)) { 5354 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 5355 " intr_vec", dev->data->nb_rx_queues); 5356 ixgbe_dev_clear_queues(dev); 5357 return -ENOMEM; 5358 } 5359 } 5360 ixgbevf_configure_msix(dev); 5361 5362 /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt 5363 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). 5364 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) 5365 * is not cleared, it will fail when following rte_intr_enable( ) tries 5366 * to map Rx queue interrupt to other VFIO vectors. 5367 * So clear uio/vfio intr/evevnfd first to avoid failure. 5368 */ 5369 rte_intr_disable(intr_handle); 5370 5371 rte_intr_enable(intr_handle); 5372 5373 /* Re-enable interrupt for VF */ 5374 ixgbevf_intr_enable(dev); 5375 5376 /* 5377 * Update link status right before return, because it may 5378 * start link configuration process in a separate thread. 5379 */ 5380 ixgbevf_dev_link_update(dev, 0); 5381 5382 hw->adapter_stopped = false; 5383 5384 return 0; 5385 } 5386 5387 static int 5388 ixgbevf_dev_stop(struct rte_eth_dev *dev) 5389 { 5390 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5391 struct ixgbe_adapter *adapter = dev->data->dev_private; 5392 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5393 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5394 5395 if (hw->adapter_stopped) 5396 return 0; 5397 5398 PMD_INIT_FUNC_TRACE(); 5399 5400 ixgbe_dev_wait_setup_link_complete(dev, 0); 5401 5402 ixgbevf_intr_disable(dev); 5403 5404 dev->data->dev_started = 0; 5405 hw->adapter_stopped = 1; 5406 ixgbe_stop_adapter(hw); 5407 5408 /* 5409 * Clear what we set, but we still keep shadow_vfta to 5410 * restore after device starts 5411 */ 5412 ixgbevf_set_vfta_all(dev, 0); 5413 5414 /* Clear stored conf */ 5415 dev->data->scattered_rx = 0; 5416 5417 ixgbe_dev_clear_queues(dev); 5418 5419 /* Clean datapath event and queue/vec mapping */ 5420 rte_intr_efd_disable(intr_handle); 5421 rte_intr_vec_list_free(intr_handle); 5422 5423 adapter->rss_reta_updated = 0; 5424 5425 return 0; 5426 } 5427 5428 static int 5429 ixgbevf_dev_close(struct rte_eth_dev *dev) 5430 { 5431 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5432 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5433 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5434 int ret; 5435 5436 PMD_INIT_FUNC_TRACE(); 5437 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5438 return 0; 5439 5440 ixgbe_reset_hw(hw); 5441 5442 ret = ixgbevf_dev_stop(dev); 5443 5444 ixgbe_dev_free_queues(dev); 5445 5446 /** 5447 * Remove the VF MAC address ro ensure 5448 * that the VF traffic goes to the PF 5449 * after stop, close and detach of the VF 5450 **/ 5451 ixgbevf_remove_mac_addr(dev, 0); 5452 5453 rte_intr_disable(intr_handle); 5454 rte_intr_callback_unregister(intr_handle, 5455 ixgbevf_dev_interrupt_handler, dev); 5456 5457 return ret; 5458 } 5459 5460 /* 5461 * Reset VF device 5462 */ 5463 static int 5464 ixgbevf_dev_reset(struct rte_eth_dev *dev) 5465 { 5466 int ret; 5467 5468 ret = eth_ixgbevf_dev_uninit(dev); 5469 if (ret) 5470 return ret; 5471 5472 ret = eth_ixgbevf_dev_init(dev); 5473 5474 return ret; 5475 } 5476 5477 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 5478 { 5479 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5480 struct ixgbe_vfta *shadow_vfta = 5481 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5482 int i = 0, j = 0, vfta = 0, mask = 1; 5483 5484 for (i = 0; i < IXGBE_VFTA_SIZE; i++) { 5485 vfta = shadow_vfta->vfta[i]; 5486 if (vfta) { 5487 mask = 1; 5488 for (j = 0; j < 32; j++) { 5489 if (vfta & mask) 5490 ixgbe_set_vfta(hw, (i<<5)+j, 0, 5491 on, false); 5492 mask <<= 1; 5493 } 5494 } 5495 } 5496 5497 } 5498 5499 static int 5500 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 5501 { 5502 struct ixgbe_hw *hw = 5503 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5504 struct ixgbe_vfta *shadow_vfta = 5505 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5506 uint32_t vid_idx = 0; 5507 uint32_t vid_bit = 0; 5508 int ret = 0; 5509 5510 PMD_INIT_FUNC_TRACE(); 5511 5512 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ 5513 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); 5514 if (ret) { 5515 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 5516 return ret; 5517 } 5518 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 5519 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 5520 5521 /* Save what we set and retore it after device reset */ 5522 if (on) 5523 shadow_vfta->vfta[vid_idx] |= vid_bit; 5524 else 5525 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 5526 5527 return 0; 5528 } 5529 5530 static void 5531 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 5532 { 5533 struct ixgbe_hw *hw = 5534 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5535 uint32_t ctrl; 5536 5537 PMD_INIT_FUNC_TRACE(); 5538 5539 if (queue >= hw->mac.max_rx_queues) 5540 return; 5541 5542 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 5543 if (on) 5544 ctrl |= IXGBE_RXDCTL_VME; 5545 else 5546 ctrl &= ~IXGBE_RXDCTL_VME; 5547 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 5548 5549 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); 5550 } 5551 5552 static int 5553 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 5554 { 5555 struct ixgbe_rx_queue *rxq; 5556 uint16_t i; 5557 int on = 0; 5558 5559 /* VF function only support hw strip feature, others are not support */ 5560 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 5561 for (i = 0; i < dev->data->nb_rx_queues; i++) { 5562 rxq = dev->data->rx_queues[i]; 5563 on = !!(rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 5564 ixgbevf_vlan_strip_queue_set(dev, i, on); 5565 } 5566 } 5567 5568 return 0; 5569 } 5570 5571 static int 5572 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 5573 { 5574 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 5575 5576 ixgbevf_vlan_offload_config(dev, mask); 5577 5578 return 0; 5579 } 5580 5581 int 5582 ixgbe_vt_check(struct ixgbe_hw *hw) 5583 { 5584 uint32_t reg_val; 5585 5586 /* if Virtualization Technology is enabled */ 5587 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 5588 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { 5589 PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); 5590 return -1; 5591 } 5592 5593 return 0; 5594 } 5595 5596 static uint32_t 5597 ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) 5598 { 5599 uint32_t vector = 0; 5600 5601 switch (hw->mac.mc_filter_type) { 5602 case 0: /* use bits [47:36] of the address */ 5603 vector = ((uc_addr->addr_bytes[4] >> 4) | 5604 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 5605 break; 5606 case 1: /* use bits [46:35] of the address */ 5607 vector = ((uc_addr->addr_bytes[4] >> 3) | 5608 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 5609 break; 5610 case 2: /* use bits [45:34] of the address */ 5611 vector = ((uc_addr->addr_bytes[4] >> 2) | 5612 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 5613 break; 5614 case 3: /* use bits [43:32] of the address */ 5615 vector = ((uc_addr->addr_bytes[4]) | 5616 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 5617 break; 5618 default: /* Invalid mc_filter_type */ 5619 break; 5620 } 5621 5622 /* vector can only be 12-bits or boundary will be exceeded */ 5623 vector &= 0xFFF; 5624 return vector; 5625 } 5626 5627 static int 5628 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, 5629 struct rte_ether_addr *mac_addr, uint8_t on) 5630 { 5631 uint32_t vector; 5632 uint32_t uta_idx; 5633 uint32_t reg_val; 5634 uint32_t uta_shift; 5635 uint32_t rc; 5636 const uint32_t ixgbe_uta_idx_mask = 0x7F; 5637 const uint32_t ixgbe_uta_bit_shift = 5; 5638 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; 5639 const uint32_t bit1 = 0x1; 5640 5641 struct ixgbe_hw *hw = 5642 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5643 struct ixgbe_uta_info *uta_info = 5644 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5645 5646 /* The UTA table only exists on 82599 hardware and newer */ 5647 if (hw->mac.type < ixgbe_mac_82599EB) 5648 return -ENOTSUP; 5649 5650 vector = ixgbe_uta_vector(hw, mac_addr); 5651 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; 5652 uta_shift = vector & ixgbe_uta_bit_mask; 5653 5654 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); 5655 if (rc == on) 5656 return 0; 5657 5658 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); 5659 if (on) { 5660 uta_info->uta_in_use++; 5661 reg_val |= (bit1 << uta_shift); 5662 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); 5663 } else { 5664 uta_info->uta_in_use--; 5665 reg_val &= ~(bit1 << uta_shift); 5666 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); 5667 } 5668 5669 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); 5670 5671 if (uta_info->uta_in_use > 0) 5672 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 5673 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 5674 else 5675 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 5676 5677 return 0; 5678 } 5679 5680 static int 5681 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 5682 { 5683 int i; 5684 struct ixgbe_hw *hw = 5685 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5686 struct ixgbe_uta_info *uta_info = 5687 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5688 5689 /* The UTA table only exists on 82599 hardware and newer */ 5690 if (hw->mac.type < ixgbe_mac_82599EB) 5691 return -ENOTSUP; 5692 5693 if (on) { 5694 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5695 uta_info->uta_shadow[i] = ~0; 5696 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); 5697 } 5698 } else { 5699 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5700 uta_info->uta_shadow[i] = 0; 5701 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 5702 } 5703 } 5704 return 0; 5705 5706 } 5707 5708 uint32_t 5709 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 5710 { 5711 uint32_t new_val = orig_val; 5712 5713 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) 5714 new_val |= IXGBE_VMOLR_AUPE; 5715 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC) 5716 new_val |= IXGBE_VMOLR_ROMPE; 5717 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC) 5718 new_val |= IXGBE_VMOLR_ROPE; 5719 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST) 5720 new_val |= IXGBE_VMOLR_BAM; 5721 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST) 5722 new_val |= IXGBE_VMOLR_MPE; 5723 5724 return new_val; 5725 } 5726 5727 static int 5728 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5729 { 5730 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5731 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5732 struct ixgbe_interrupt *intr = 5733 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5734 struct ixgbe_hw *hw = 5735 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5736 uint32_t vec = IXGBE_MISC_VEC_ID; 5737 5738 if (rte_intr_allow_others(intr_handle)) 5739 vec = IXGBE_RX_VEC_START; 5740 intr->mask |= (1 << vec); 5741 RTE_SET_USED(queue_id); 5742 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5743 5744 rte_intr_ack(intr_handle); 5745 5746 return 0; 5747 } 5748 5749 static int 5750 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5751 { 5752 struct ixgbe_interrupt *intr = 5753 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5754 struct ixgbe_hw *hw = 5755 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5756 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5757 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5758 uint32_t vec = IXGBE_MISC_VEC_ID; 5759 5760 if (rte_intr_allow_others(intr_handle)) 5761 vec = IXGBE_RX_VEC_START; 5762 intr->mask &= ~(1 << vec); 5763 RTE_SET_USED(queue_id); 5764 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5765 5766 return 0; 5767 } 5768 5769 static int 5770 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5771 { 5772 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5773 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5774 uint32_t mask; 5775 struct ixgbe_hw *hw = 5776 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5777 struct ixgbe_interrupt *intr = 5778 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5779 5780 if (queue_id < 16) { 5781 ixgbe_disable_intr(hw); 5782 intr->mask |= (1 << queue_id); 5783 ixgbe_enable_intr(dev); 5784 } else if (queue_id < 32) { 5785 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5786 mask &= (1 << queue_id); 5787 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5788 } else if (queue_id < 64) { 5789 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5790 mask &= (1 << (queue_id - 32)); 5791 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5792 } 5793 rte_intr_ack(intr_handle); 5794 5795 return 0; 5796 } 5797 5798 static int 5799 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5800 { 5801 uint32_t mask; 5802 struct ixgbe_hw *hw = 5803 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5804 struct ixgbe_interrupt *intr = 5805 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5806 5807 if (queue_id < 16) { 5808 ixgbe_disable_intr(hw); 5809 intr->mask &= ~(1 << queue_id); 5810 ixgbe_enable_intr(dev); 5811 } else if (queue_id < 32) { 5812 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5813 mask &= ~(1 << queue_id); 5814 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5815 } else if (queue_id < 64) { 5816 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5817 mask &= ~(1 << (queue_id - 32)); 5818 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5819 } 5820 5821 return 0; 5822 } 5823 5824 static void 5825 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 5826 uint8_t queue, uint8_t msix_vector) 5827 { 5828 uint32_t tmp, idx; 5829 5830 if (direction == -1) { 5831 /* other causes */ 5832 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5833 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 5834 tmp &= ~0xFF; 5835 tmp |= msix_vector; 5836 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); 5837 } else { 5838 /* rx or tx cause */ 5839 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5840 idx = ((16 * (queue & 1)) + (8 * direction)); 5841 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 5842 tmp &= ~(0xFF << idx); 5843 tmp |= (msix_vector << idx); 5844 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); 5845 } 5846 } 5847 5848 /** 5849 * set the IVAR registers, mapping interrupt causes to vectors 5850 * @param hw 5851 * pointer to ixgbe_hw struct 5852 * @direction 5853 * 0 for Rx, 1 for Tx, -1 for other causes 5854 * @queue 5855 * queue to map the corresponding interrupt to 5856 * @msix_vector 5857 * the vector to map to the corresponding queue 5858 */ 5859 static void 5860 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 5861 uint8_t queue, uint8_t msix_vector) 5862 { 5863 uint32_t tmp, idx; 5864 5865 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5866 if (hw->mac.type == ixgbe_mac_82598EB) { 5867 if (direction == -1) 5868 direction = 0; 5869 idx = (((direction * 64) + queue) >> 2) & 0x1F; 5870 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); 5871 tmp &= ~(0xFF << (8 * (queue & 0x3))); 5872 tmp |= (msix_vector << (8 * (queue & 0x3))); 5873 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); 5874 } else if ((hw->mac.type == ixgbe_mac_82599EB) || 5875 (hw->mac.type == ixgbe_mac_X540) || 5876 (hw->mac.type == ixgbe_mac_X550) || 5877 (hw->mac.type == ixgbe_mac_X550EM_x)) { 5878 if (direction == -1) { 5879 /* other causes */ 5880 idx = ((queue & 1) * 8); 5881 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5882 tmp &= ~(0xFF << idx); 5883 tmp |= (msix_vector << idx); 5884 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); 5885 } else { 5886 /* rx or tx causes */ 5887 idx = ((16 * (queue & 1)) + (8 * direction)); 5888 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 5889 tmp &= ~(0xFF << idx); 5890 tmp |= (msix_vector << idx); 5891 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); 5892 } 5893 } 5894 } 5895 5896 static void 5897 ixgbevf_configure_msix(struct rte_eth_dev *dev) 5898 { 5899 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5900 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5901 struct ixgbe_hw *hw = 5902 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5903 uint32_t q_idx; 5904 uint32_t vector_idx = IXGBE_MISC_VEC_ID; 5905 uint32_t base = IXGBE_MISC_VEC_ID; 5906 5907 /* Configure VF other cause ivar */ 5908 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); 5909 5910 /* won't configure msix register if no mapping is done 5911 * between intr vector and event fd. 5912 */ 5913 if (!rte_intr_dp_is_en(intr_handle)) 5914 return; 5915 5916 if (rte_intr_allow_others(intr_handle)) { 5917 base = IXGBE_RX_VEC_START; 5918 vector_idx = IXGBE_RX_VEC_START; 5919 } 5920 5921 /* Configure all RX queues of VF */ 5922 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 5923 /* Force all queue use vector 0, 5924 * as IXGBE_VF_MAXMSIVECOTR = 1 5925 */ 5926 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 5927 rte_intr_vec_list_index_set(intr_handle, q_idx, 5928 vector_idx); 5929 if (vector_idx < base + rte_intr_nb_efd_get(intr_handle) 5930 - 1) 5931 vector_idx++; 5932 } 5933 5934 /* As RX queue setting above show, all queues use the vector 0. 5935 * Set only the ITR value of IXGBE_MISC_VEC_ID. 5936 */ 5937 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), 5938 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 5939 | IXGBE_EITR_CNT_WDIS); 5940 } 5941 5942 /** 5943 * Sets up the hardware to properly generate MSI-X interrupts 5944 * @hw 5945 * board private structure 5946 */ 5947 static void 5948 ixgbe_configure_msix(struct rte_eth_dev *dev) 5949 { 5950 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5951 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5952 struct ixgbe_hw *hw = 5953 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5954 uint32_t queue_id, base = IXGBE_MISC_VEC_ID; 5955 uint32_t vec = IXGBE_MISC_VEC_ID; 5956 uint32_t mask; 5957 uint32_t gpie; 5958 5959 /* won't configure msix register if no mapping is done 5960 * between intr vector and event fd 5961 * but if misx has been enabled already, need to configure 5962 * auto clean, auto mask and throttling. 5963 */ 5964 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5965 if (!rte_intr_dp_is_en(intr_handle) && 5966 !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) 5967 return; 5968 5969 if (rte_intr_allow_others(intr_handle)) 5970 vec = base = IXGBE_RX_VEC_START; 5971 5972 /* setup GPIE for MSI-x mode */ 5973 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5974 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 5975 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; 5976 /* auto clearing and auto setting corresponding bits in EIMS 5977 * when MSI-X interrupt is triggered 5978 */ 5979 if (hw->mac.type == ixgbe_mac_82598EB) { 5980 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 5981 } else { 5982 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 5983 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 5984 } 5985 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 5986 5987 /* Populate the IVAR table and set the ITR values to the 5988 * corresponding register. 5989 */ 5990 if (rte_intr_dp_is_en(intr_handle)) { 5991 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 5992 queue_id++) { 5993 /* by default, 1:1 mapping */ 5994 ixgbe_set_ivar_map(hw, 0, queue_id, vec); 5995 rte_intr_vec_list_index_set(intr_handle, 5996 queue_id, vec); 5997 if (vec < base + rte_intr_nb_efd_get(intr_handle) 5998 - 1) 5999 vec++; 6000 } 6001 6002 switch (hw->mac.type) { 6003 case ixgbe_mac_82598EB: 6004 ixgbe_set_ivar_map(hw, -1, 6005 IXGBE_IVAR_OTHER_CAUSES_INDEX, 6006 IXGBE_MISC_VEC_ID); 6007 break; 6008 case ixgbe_mac_82599EB: 6009 case ixgbe_mac_X540: 6010 case ixgbe_mac_X550: 6011 case ixgbe_mac_X550EM_x: 6012 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); 6013 break; 6014 default: 6015 break; 6016 } 6017 } 6018 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), 6019 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 6020 | IXGBE_EITR_CNT_WDIS); 6021 6022 /* set up to autoclear timer, and the vectors */ 6023 mask = IXGBE_EIMS_ENABLE_MASK; 6024 mask &= ~(IXGBE_EIMS_OTHER | 6025 IXGBE_EIMS_MAILBOX | 6026 IXGBE_EIMS_LSC); 6027 6028 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 6029 } 6030 6031 int 6032 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 6033 uint16_t queue_idx, uint16_t tx_rate) 6034 { 6035 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6036 uint32_t rf_dec, rf_int; 6037 uint32_t bcnrc_val; 6038 uint16_t link_speed = dev->data->dev_link.link_speed; 6039 6040 if (queue_idx >= hw->mac.max_tx_queues) 6041 return -EINVAL; 6042 6043 if (tx_rate != 0) { 6044 /* Calculate the rate factor values to set */ 6045 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; 6046 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; 6047 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; 6048 6049 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 6050 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & 6051 IXGBE_RTTBCNRC_RF_INT_MASK_M); 6052 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 6053 } else { 6054 bcnrc_val = 0; 6055 } 6056 6057 /* 6058 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 6059 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise 6060 * set as 0x4. 6061 */ 6062 if (dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE) 6063 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME); 6064 else 6065 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_DEFAULT); 6066 6067 /* Set RTTBCNRC of queue X */ 6068 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); 6069 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 6070 IXGBE_WRITE_FLUSH(hw); 6071 6072 return 0; 6073 } 6074 6075 static int 6076 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 6077 __rte_unused uint32_t index, 6078 __rte_unused uint32_t pool) 6079 { 6080 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6081 int diag; 6082 6083 /* 6084 * On a 82599 VF, adding again the same MAC addr is not an idempotent 6085 * operation. Trap this case to avoid exhausting the [very limited] 6086 * set of PF resources used to store VF MAC addresses. 6087 */ 6088 if (memcmp(hw->mac.perm_addr, mac_addr, 6089 sizeof(struct rte_ether_addr)) == 0) 6090 return -1; 6091 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6092 if (diag != 0) 6093 PMD_DRV_LOG(ERR, "Unable to add MAC address " 6094 RTE_ETHER_ADDR_PRT_FMT " - diag=%d", 6095 RTE_ETHER_ADDR_BYTES(mac_addr), diag); 6096 return diag; 6097 } 6098 6099 static void 6100 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 6101 { 6102 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6103 struct rte_ether_addr *perm_addr = 6104 (struct rte_ether_addr *)hw->mac.perm_addr; 6105 struct rte_ether_addr *mac_addr; 6106 uint32_t i; 6107 int diag; 6108 6109 /* 6110 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does 6111 * not support the deletion of a given MAC address. 6112 * Instead, it imposes to delete all MAC addresses, then to add again 6113 * all MAC addresses with the exception of the one to be deleted. 6114 */ 6115 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); 6116 6117 /* 6118 * Add again all MAC addresses, with the exception of the deleted one 6119 * and of the permanent MAC address. 6120 */ 6121 for (i = 0, mac_addr = dev->data->mac_addrs; 6122 i < hw->mac.num_rar_entries; i++, mac_addr++) { 6123 /* Skip the deleted MAC address */ 6124 if (i == index) 6125 continue; 6126 /* Skip NULL MAC addresses */ 6127 if (rte_is_zero_ether_addr(mac_addr)) 6128 continue; 6129 /* Skip the permanent MAC address */ 6130 if (memcmp(perm_addr, mac_addr, 6131 sizeof(struct rte_ether_addr)) == 0) 6132 continue; 6133 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6134 if (diag != 0) 6135 PMD_DRV_LOG(ERR, 6136 "Adding again MAC address " 6137 RTE_ETHER_ADDR_PRT_FMT " failed " 6138 "diag=%d", RTE_ETHER_ADDR_BYTES(mac_addr), 6139 diag); 6140 } 6141 } 6142 6143 static int 6144 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 6145 struct rte_ether_addr *addr) 6146 { 6147 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6148 6149 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); 6150 6151 return 0; 6152 } 6153 6154 int 6155 ixgbe_syn_filter_set(struct rte_eth_dev *dev, 6156 struct rte_eth_syn_filter *filter, 6157 bool add) 6158 { 6159 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6160 struct ixgbe_filter_info *filter_info = 6161 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6162 uint32_t syn_info; 6163 uint32_t synqf; 6164 6165 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6166 return -EINVAL; 6167 6168 syn_info = filter_info->syn_info; 6169 6170 if (add) { 6171 if (syn_info & IXGBE_SYN_FILTER_ENABLE) 6172 return -EINVAL; 6173 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & 6174 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); 6175 6176 if (filter->hig_pri) 6177 synqf |= IXGBE_SYN_FILTER_SYNQFP; 6178 else 6179 synqf &= ~IXGBE_SYN_FILTER_SYNQFP; 6180 } else { 6181 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 6182 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) 6183 return -ENOENT; 6184 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); 6185 } 6186 6187 filter_info->syn_info = synqf; 6188 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 6189 IXGBE_WRITE_FLUSH(hw); 6190 return 0; 6191 } 6192 6193 6194 static inline enum ixgbe_5tuple_protocol 6195 convert_protocol_type(uint8_t protocol_value) 6196 { 6197 if (protocol_value == IPPROTO_TCP) 6198 return IXGBE_FILTER_PROTOCOL_TCP; 6199 else if (protocol_value == IPPROTO_UDP) 6200 return IXGBE_FILTER_PROTOCOL_UDP; 6201 else if (protocol_value == IPPROTO_SCTP) 6202 return IXGBE_FILTER_PROTOCOL_SCTP; 6203 else 6204 return IXGBE_FILTER_PROTOCOL_NONE; 6205 } 6206 6207 /* inject a 5-tuple filter to HW */ 6208 static inline void 6209 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, 6210 struct ixgbe_5tuple_filter *filter) 6211 { 6212 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6213 int i; 6214 uint32_t ftqf, sdpqf; 6215 uint32_t l34timir = 0; 6216 uint8_t mask = 0xff; 6217 6218 i = filter->index; 6219 6220 sdpqf = (uint32_t)(filter->filter_info.dst_port << 6221 IXGBE_SDPQF_DSTPORT_SHIFT); 6222 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); 6223 6224 ftqf = (uint32_t)(filter->filter_info.proto & 6225 IXGBE_FTQF_PROTOCOL_MASK); 6226 ftqf |= (uint32_t)((filter->filter_info.priority & 6227 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); 6228 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 6229 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; 6230 if (filter->filter_info.dst_ip_mask == 0) 6231 mask &= IXGBE_FTQF_DEST_ADDR_MASK; 6232 if (filter->filter_info.src_port_mask == 0) 6233 mask &= IXGBE_FTQF_SOURCE_PORT_MASK; 6234 if (filter->filter_info.dst_port_mask == 0) 6235 mask &= IXGBE_FTQF_DEST_PORT_MASK; 6236 if (filter->filter_info.proto_mask == 0) 6237 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; 6238 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; 6239 ftqf |= IXGBE_FTQF_POOL_MASK_EN; 6240 ftqf |= IXGBE_FTQF_QUEUE_ENABLE; 6241 6242 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); 6243 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); 6244 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); 6245 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); 6246 6247 l34timir |= IXGBE_L34T_IMIR_RESERVE; 6248 l34timir |= (uint32_t)(filter->queue << 6249 IXGBE_L34T_IMIR_QUEUE_SHIFT); 6250 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); 6251 } 6252 6253 /* 6254 * add a 5tuple filter 6255 * 6256 * @param 6257 * dev: Pointer to struct rte_eth_dev. 6258 * index: the index the filter allocates. 6259 * filter: ponter to the filter that will be added. 6260 * rx_queue: the queue id the filter assigned to. 6261 * 6262 * @return 6263 * - On success, zero. 6264 * - On failure, a negative value. 6265 */ 6266 static int 6267 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 6268 struct ixgbe_5tuple_filter *filter) 6269 { 6270 struct ixgbe_filter_info *filter_info = 6271 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6272 int i, idx, shift; 6273 6274 /* 6275 * look for an unused 5tuple filter index, 6276 * and insert the filter to list. 6277 */ 6278 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { 6279 idx = i / (sizeof(uint32_t) * NBBY); 6280 shift = i % (sizeof(uint32_t) * NBBY); 6281 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 6282 filter_info->fivetuple_mask[idx] |= 1 << shift; 6283 filter->index = i; 6284 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 6285 filter, 6286 entries); 6287 break; 6288 } 6289 } 6290 if (i >= IXGBE_MAX_FTQF_FILTERS) { 6291 PMD_DRV_LOG(ERR, "5tuple filters are full."); 6292 return -ENOSYS; 6293 } 6294 6295 ixgbe_inject_5tuple_filter(dev, filter); 6296 6297 return 0; 6298 } 6299 6300 /* 6301 * remove a 5tuple filter 6302 * 6303 * @param 6304 * dev: Pointer to struct rte_eth_dev. 6305 * filter: the pointer of the filter will be removed. 6306 */ 6307 static void 6308 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 6309 struct ixgbe_5tuple_filter *filter) 6310 { 6311 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6312 struct ixgbe_filter_info *filter_info = 6313 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6314 uint16_t index = filter->index; 6315 6316 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 6317 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 6318 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 6319 rte_free(filter); 6320 6321 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); 6322 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); 6323 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); 6324 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); 6325 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); 6326 } 6327 6328 static int 6329 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 6330 { 6331 struct ixgbe_hw *hw; 6332 uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; 6333 struct rte_eth_dev_data *dev_data = dev->data; 6334 6335 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6336 6337 if (mtu < RTE_ETHER_MIN_MTU || max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) 6338 return -EINVAL; 6339 6340 /* If device is started, refuse mtu that requires the support of 6341 * scattered packets when this feature has not been enabled before. 6342 */ 6343 if (dev_data->dev_started && !dev_data->scattered_rx && 6344 (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > 6345 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 6346 PMD_INIT_LOG(ERR, "Stop port first."); 6347 return -EINVAL; 6348 } 6349 6350 /* 6351 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU 6352 * request of the version 2.0 of the mailbox API. 6353 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 6354 * of the mailbox API. 6355 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers 6356 * prior to 3.11.33 which contains the following change: 6357 * "ixgbe: Enable jumbo frames support w/ SR-IOV" 6358 */ 6359 if (ixgbevf_rlpml_set_vf(hw, max_frame)) 6360 return -EINVAL; 6361 6362 return 0; 6363 } 6364 6365 static inline struct ixgbe_5tuple_filter * 6366 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, 6367 struct ixgbe_5tuple_filter_info *key) 6368 { 6369 struct ixgbe_5tuple_filter *it; 6370 6371 TAILQ_FOREACH(it, filter_list, entries) { 6372 if (memcmp(key, &it->filter_info, 6373 sizeof(struct ixgbe_5tuple_filter_info)) == 0) { 6374 return it; 6375 } 6376 } 6377 return NULL; 6378 } 6379 6380 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ 6381 static inline int 6382 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 6383 struct ixgbe_5tuple_filter_info *filter_info) 6384 { 6385 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || 6386 filter->priority > IXGBE_5TUPLE_MAX_PRI || 6387 filter->priority < IXGBE_5TUPLE_MIN_PRI) 6388 return -EINVAL; 6389 6390 switch (filter->dst_ip_mask) { 6391 case UINT32_MAX: 6392 filter_info->dst_ip_mask = 0; 6393 filter_info->dst_ip = filter->dst_ip; 6394 break; 6395 case 0: 6396 filter_info->dst_ip_mask = 1; 6397 break; 6398 default: 6399 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 6400 return -EINVAL; 6401 } 6402 6403 switch (filter->src_ip_mask) { 6404 case UINT32_MAX: 6405 filter_info->src_ip_mask = 0; 6406 filter_info->src_ip = filter->src_ip; 6407 break; 6408 case 0: 6409 filter_info->src_ip_mask = 1; 6410 break; 6411 default: 6412 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 6413 return -EINVAL; 6414 } 6415 6416 switch (filter->dst_port_mask) { 6417 case UINT16_MAX: 6418 filter_info->dst_port_mask = 0; 6419 filter_info->dst_port = filter->dst_port; 6420 break; 6421 case 0: 6422 filter_info->dst_port_mask = 1; 6423 break; 6424 default: 6425 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 6426 return -EINVAL; 6427 } 6428 6429 switch (filter->src_port_mask) { 6430 case UINT16_MAX: 6431 filter_info->src_port_mask = 0; 6432 filter_info->src_port = filter->src_port; 6433 break; 6434 case 0: 6435 filter_info->src_port_mask = 1; 6436 break; 6437 default: 6438 PMD_DRV_LOG(ERR, "invalid src_port mask."); 6439 return -EINVAL; 6440 } 6441 6442 switch (filter->proto_mask) { 6443 case UINT8_MAX: 6444 filter_info->proto_mask = 0; 6445 filter_info->proto = 6446 convert_protocol_type(filter->proto); 6447 break; 6448 case 0: 6449 filter_info->proto_mask = 1; 6450 break; 6451 default: 6452 PMD_DRV_LOG(ERR, "invalid protocol mask."); 6453 return -EINVAL; 6454 } 6455 6456 filter_info->priority = (uint8_t)filter->priority; 6457 return 0; 6458 } 6459 6460 /* 6461 * add or delete a ntuple filter 6462 * 6463 * @param 6464 * dev: Pointer to struct rte_eth_dev. 6465 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 6466 * add: if true, add filter, if false, remove filter 6467 * 6468 * @return 6469 * - On success, zero. 6470 * - On failure, a negative value. 6471 */ 6472 int 6473 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 6474 struct rte_eth_ntuple_filter *ntuple_filter, 6475 bool add) 6476 { 6477 struct ixgbe_filter_info *filter_info = 6478 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6479 struct ixgbe_5tuple_filter_info filter_5tuple; 6480 struct ixgbe_5tuple_filter *filter; 6481 int ret; 6482 6483 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 6484 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 6485 return -EINVAL; 6486 } 6487 6488 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 6489 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 6490 if (ret < 0) 6491 return ret; 6492 6493 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 6494 &filter_5tuple); 6495 if (filter != NULL && add) { 6496 PMD_DRV_LOG(ERR, "filter exists."); 6497 return -EEXIST; 6498 } 6499 if (filter == NULL && !add) { 6500 PMD_DRV_LOG(ERR, "filter doesn't exist."); 6501 return -ENOENT; 6502 } 6503 6504 if (add) { 6505 filter = rte_zmalloc("ixgbe_5tuple_filter", 6506 sizeof(struct ixgbe_5tuple_filter), 0); 6507 if (filter == NULL) 6508 return -ENOMEM; 6509 rte_memcpy(&filter->filter_info, 6510 &filter_5tuple, 6511 sizeof(struct ixgbe_5tuple_filter_info)); 6512 filter->queue = ntuple_filter->queue; 6513 ret = ixgbe_add_5tuple_filter(dev, filter); 6514 if (ret < 0) { 6515 rte_free(filter); 6516 return ret; 6517 } 6518 } else 6519 ixgbe_remove_5tuple_filter(dev, filter); 6520 6521 return 0; 6522 } 6523 6524 int 6525 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 6526 struct rte_eth_ethertype_filter *filter, 6527 bool add) 6528 { 6529 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6530 struct ixgbe_filter_info *filter_info = 6531 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6532 uint32_t etqf = 0; 6533 uint32_t etqs = 0; 6534 int ret; 6535 struct ixgbe_ethertype_filter ethertype_filter; 6536 6537 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6538 return -EINVAL; 6539 6540 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || 6541 filter->ether_type == RTE_ETHER_TYPE_IPV6) { 6542 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 6543 " ethertype filter.", filter->ether_type); 6544 return -EINVAL; 6545 } 6546 6547 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 6548 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 6549 return -EINVAL; 6550 } 6551 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 6552 PMD_DRV_LOG(ERR, "drop option is unsupported."); 6553 return -EINVAL; 6554 } 6555 6556 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 6557 if (ret >= 0 && add) { 6558 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 6559 filter->ether_type); 6560 return -EEXIST; 6561 } 6562 if (ret < 0 && !add) { 6563 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 6564 filter->ether_type); 6565 return -ENOENT; 6566 } 6567 6568 if (add) { 6569 etqf = IXGBE_ETQF_FILTER_EN; 6570 etqf |= (uint32_t)filter->ether_type; 6571 etqs |= (uint32_t)((filter->queue << 6572 IXGBE_ETQS_RX_QUEUE_SHIFT) & 6573 IXGBE_ETQS_RX_QUEUE); 6574 etqs |= IXGBE_ETQS_QUEUE_EN; 6575 6576 ethertype_filter.ethertype = filter->ether_type; 6577 ethertype_filter.etqf = etqf; 6578 ethertype_filter.etqs = etqs; 6579 ethertype_filter.conf = FALSE; 6580 ret = ixgbe_ethertype_filter_insert(filter_info, 6581 ðertype_filter); 6582 if (ret < 0) { 6583 PMD_DRV_LOG(ERR, "ethertype filters are full."); 6584 return -ENOSPC; 6585 } 6586 } else { 6587 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 6588 if (ret < 0) 6589 return -ENOSYS; 6590 } 6591 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); 6592 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); 6593 IXGBE_WRITE_FLUSH(hw); 6594 6595 return 0; 6596 } 6597 6598 static int 6599 ixgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev, 6600 const struct rte_flow_ops **ops) 6601 { 6602 *ops = &ixgbe_flow_ops; 6603 return 0; 6604 } 6605 6606 static u8 * 6607 ixgbe_dev_addr_list_itr(__rte_unused struct ixgbe_hw *hw, 6608 u8 **mc_addr_ptr, u32 *vmdq) 6609 { 6610 u8 *mc_addr; 6611 6612 *vmdq = 0; 6613 mc_addr = *mc_addr_ptr; 6614 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 6615 return mc_addr; 6616 } 6617 6618 static int 6619 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 6620 struct rte_ether_addr *mc_addr_set, 6621 uint32_t nb_mc_addr) 6622 { 6623 struct ixgbe_hw *hw; 6624 u8 *mc_addr_list; 6625 6626 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6627 mc_addr_list = (u8 *)mc_addr_set; 6628 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 6629 ixgbe_dev_addr_list_itr, TRUE); 6630 } 6631 6632 static uint64_t 6633 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 6634 { 6635 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6636 uint64_t systime_cycles; 6637 6638 switch (hw->mac.type) { 6639 case ixgbe_mac_X550: 6640 case ixgbe_mac_X550EM_x: 6641 case ixgbe_mac_X550EM_a: 6642 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 6643 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6644 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6645 * NSEC_PER_SEC; 6646 break; 6647 default: 6648 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6649 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6650 << 32; 6651 } 6652 6653 return systime_cycles; 6654 } 6655 6656 static uint64_t 6657 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6658 { 6659 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6660 uint64_t rx_tstamp_cycles; 6661 6662 switch (hw->mac.type) { 6663 case ixgbe_mac_X550: 6664 case ixgbe_mac_X550EM_x: 6665 case ixgbe_mac_X550EM_a: 6666 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6667 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6668 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6669 * NSEC_PER_SEC; 6670 break; 6671 default: 6672 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6673 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6674 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6675 << 32; 6676 } 6677 6678 return rx_tstamp_cycles; 6679 } 6680 6681 static uint64_t 6682 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6683 { 6684 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6685 uint64_t tx_tstamp_cycles; 6686 6687 switch (hw->mac.type) { 6688 case ixgbe_mac_X550: 6689 case ixgbe_mac_X550EM_x: 6690 case ixgbe_mac_X550EM_a: 6691 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6692 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6693 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6694 * NSEC_PER_SEC; 6695 break; 6696 default: 6697 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6698 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6699 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6700 << 32; 6701 } 6702 6703 return tx_tstamp_cycles; 6704 } 6705 6706 static void 6707 ixgbe_start_timecounters(struct rte_eth_dev *dev) 6708 { 6709 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6710 struct ixgbe_adapter *adapter = dev->data->dev_private; 6711 struct rte_eth_link link; 6712 uint32_t incval = 0; 6713 uint32_t shift = 0; 6714 6715 /* Get current link speed. */ 6716 ixgbe_dev_link_update(dev, 1); 6717 rte_eth_linkstatus_get(dev, &link); 6718 6719 switch (link.link_speed) { 6720 case RTE_ETH_SPEED_NUM_100M: 6721 incval = IXGBE_INCVAL_100; 6722 shift = IXGBE_INCVAL_SHIFT_100; 6723 break; 6724 case RTE_ETH_SPEED_NUM_1G: 6725 incval = IXGBE_INCVAL_1GB; 6726 shift = IXGBE_INCVAL_SHIFT_1GB; 6727 break; 6728 case RTE_ETH_SPEED_NUM_10G: 6729 default: 6730 incval = IXGBE_INCVAL_10GB; 6731 shift = IXGBE_INCVAL_SHIFT_10GB; 6732 break; 6733 } 6734 6735 switch (hw->mac.type) { 6736 case ixgbe_mac_X550: 6737 case ixgbe_mac_X550EM_x: 6738 case ixgbe_mac_X550EM_a: 6739 /* Independent of link speed. */ 6740 incval = 1; 6741 /* Cycles read will be interpreted as ns. */ 6742 shift = 0; 6743 /* Fall-through */ 6744 case ixgbe_mac_X540: 6745 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); 6746 break; 6747 case ixgbe_mac_82599EB: 6748 incval >>= IXGBE_INCVAL_SHIFT_82599; 6749 shift -= IXGBE_INCVAL_SHIFT_82599; 6750 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 6751 (1 << IXGBE_INCPER_SHIFT_82599) | incval); 6752 break; 6753 default: 6754 /* Not supported. */ 6755 return; 6756 } 6757 6758 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 6759 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6760 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6761 6762 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6763 adapter->systime_tc.cc_shift = shift; 6764 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 6765 6766 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6767 adapter->rx_tstamp_tc.cc_shift = shift; 6768 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6769 6770 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6771 adapter->tx_tstamp_tc.cc_shift = shift; 6772 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6773 } 6774 6775 static int 6776 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 6777 { 6778 struct ixgbe_adapter *adapter = dev->data->dev_private; 6779 6780 adapter->systime_tc.nsec += delta; 6781 adapter->rx_tstamp_tc.nsec += delta; 6782 adapter->tx_tstamp_tc.nsec += delta; 6783 6784 return 0; 6785 } 6786 6787 static int 6788 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 6789 { 6790 uint64_t ns; 6791 struct ixgbe_adapter *adapter = dev->data->dev_private; 6792 6793 ns = rte_timespec_to_ns(ts); 6794 /* Set the timecounters to a new value. */ 6795 adapter->systime_tc.nsec = ns; 6796 adapter->rx_tstamp_tc.nsec = ns; 6797 adapter->tx_tstamp_tc.nsec = ns; 6798 6799 return 0; 6800 } 6801 6802 static int 6803 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 6804 { 6805 uint64_t ns, systime_cycles; 6806 struct ixgbe_adapter *adapter = dev->data->dev_private; 6807 6808 systime_cycles = ixgbe_read_systime_cyclecounter(dev); 6809 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 6810 *ts = rte_ns_to_timespec(ns); 6811 6812 return 0; 6813 } 6814 6815 static int 6816 ixgbe_timesync_enable(struct rte_eth_dev *dev) 6817 { 6818 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6819 uint32_t tsync_ctl; 6820 uint32_t tsauxc; 6821 6822 /* Stop the timesync system time. */ 6823 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); 6824 /* Reset the timesync system time value. */ 6825 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); 6826 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); 6827 6828 /* Enable system time for platforms where it isn't on by default. */ 6829 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); 6830 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; 6831 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); 6832 6833 ixgbe_start_timecounters(dev); 6834 6835 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6836 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 6837 (RTE_ETHER_TYPE_1588 | 6838 IXGBE_ETQF_FILTER_EN | 6839 IXGBE_ETQF_1588)); 6840 6841 /* Enable timestamping of received PTP packets. */ 6842 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6843 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; 6844 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6845 6846 /* Enable timestamping of transmitted PTP packets. */ 6847 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6848 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; 6849 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6850 6851 IXGBE_WRITE_FLUSH(hw); 6852 6853 return 0; 6854 } 6855 6856 static int 6857 ixgbe_timesync_disable(struct rte_eth_dev *dev) 6858 { 6859 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6860 uint32_t tsync_ctl; 6861 6862 /* Disable timestamping of transmitted PTP packets. */ 6863 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6864 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; 6865 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6866 6867 /* Disable timestamping of received PTP packets. */ 6868 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6869 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; 6870 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6871 6872 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6873 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 6874 6875 /* Stop incrementating the System Time registers. */ 6876 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); 6877 6878 return 0; 6879 } 6880 6881 static int 6882 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 6883 struct timespec *timestamp, 6884 uint32_t flags __rte_unused) 6885 { 6886 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6887 struct ixgbe_adapter *adapter = dev->data->dev_private; 6888 uint32_t tsync_rxctl; 6889 uint64_t rx_tstamp_cycles; 6890 uint64_t ns; 6891 6892 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6893 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) 6894 return -EINVAL; 6895 6896 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); 6897 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 6898 *timestamp = rte_ns_to_timespec(ns); 6899 6900 return 0; 6901 } 6902 6903 static int 6904 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 6905 struct timespec *timestamp) 6906 { 6907 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6908 struct ixgbe_adapter *adapter = dev->data->dev_private; 6909 uint32_t tsync_txctl; 6910 uint64_t tx_tstamp_cycles; 6911 uint64_t ns; 6912 6913 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6914 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) 6915 return -EINVAL; 6916 6917 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); 6918 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 6919 *timestamp = rte_ns_to_timespec(ns); 6920 6921 return 0; 6922 } 6923 6924 static int 6925 ixgbe_get_reg_length(struct rte_eth_dev *dev) 6926 { 6927 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6928 int count = 0; 6929 int g_ind = 0; 6930 const struct reg_info *reg_group; 6931 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 6932 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 6933 6934 while ((reg_group = reg_set[g_ind++])) 6935 count += ixgbe_regs_group_count(reg_group); 6936 6937 return count; 6938 } 6939 6940 static int 6941 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 6942 { 6943 int count = 0; 6944 int g_ind = 0; 6945 const struct reg_info *reg_group; 6946 6947 while ((reg_group = ixgbevf_regs[g_ind++])) 6948 count += ixgbe_regs_group_count(reg_group); 6949 6950 return count; 6951 } 6952 6953 static int 6954 ixgbe_get_regs(struct rte_eth_dev *dev, 6955 struct rte_dev_reg_info *regs) 6956 { 6957 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6958 uint32_t *data = regs->data; 6959 int g_ind = 0; 6960 int count = 0; 6961 const struct reg_info *reg_group; 6962 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 6963 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 6964 6965 if (data == NULL) { 6966 regs->length = ixgbe_get_reg_length(dev); 6967 regs->width = sizeof(uint32_t); 6968 return 0; 6969 } 6970 6971 /* Support only full register dump */ 6972 if ((regs->length == 0) || 6973 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { 6974 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 6975 hw->device_id; 6976 while ((reg_group = reg_set[g_ind++])) 6977 count += ixgbe_read_regs_group(dev, &data[count], 6978 reg_group); 6979 return 0; 6980 } 6981 6982 return -ENOTSUP; 6983 } 6984 6985 static int 6986 ixgbevf_get_regs(struct rte_eth_dev *dev, 6987 struct rte_dev_reg_info *regs) 6988 { 6989 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6990 uint32_t *data = regs->data; 6991 int g_ind = 0; 6992 int count = 0; 6993 const struct reg_info *reg_group; 6994 6995 if (data == NULL) { 6996 regs->length = ixgbevf_get_reg_length(dev); 6997 regs->width = sizeof(uint32_t); 6998 return 0; 6999 } 7000 7001 /* Support only full register dump */ 7002 if ((regs->length == 0) || 7003 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { 7004 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7005 hw->device_id; 7006 while ((reg_group = ixgbevf_regs[g_ind++])) 7007 count += ixgbe_read_regs_group(dev, &data[count], 7008 reg_group); 7009 return 0; 7010 } 7011 7012 return -ENOTSUP; 7013 } 7014 7015 static int 7016 ixgbe_get_eeprom_length(struct rte_eth_dev *dev) 7017 { 7018 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7019 7020 /* Return unit is byte count */ 7021 return hw->eeprom.word_size * 2; 7022 } 7023 7024 static int 7025 ixgbe_get_eeprom(struct rte_eth_dev *dev, 7026 struct rte_dev_eeprom_info *in_eeprom) 7027 { 7028 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7029 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7030 uint16_t *data = in_eeprom->data; 7031 int first, length; 7032 7033 first = in_eeprom->offset >> 1; 7034 length = in_eeprom->length >> 1; 7035 if ((first > hw->eeprom.word_size) || 7036 ((first + length) > hw->eeprom.word_size)) 7037 return -EINVAL; 7038 7039 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7040 7041 return eeprom->ops.read_buffer(hw, first, length, data); 7042 } 7043 7044 static int 7045 ixgbe_set_eeprom(struct rte_eth_dev *dev, 7046 struct rte_dev_eeprom_info *in_eeprom) 7047 { 7048 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7049 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7050 uint16_t *data = in_eeprom->data; 7051 int first, length; 7052 7053 first = in_eeprom->offset >> 1; 7054 length = in_eeprom->length >> 1; 7055 if ((first > hw->eeprom.word_size) || 7056 ((first + length) > hw->eeprom.word_size)) 7057 return -EINVAL; 7058 7059 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7060 7061 return eeprom->ops.write_buffer(hw, first, length, data); 7062 } 7063 7064 static int 7065 ixgbe_get_module_info(struct rte_eth_dev *dev, 7066 struct rte_eth_dev_module_info *modinfo) 7067 { 7068 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7069 uint32_t status; 7070 uint8_t sff8472_rev, addr_mode; 7071 bool page_swap = false; 7072 7073 /* Check whether we support SFF-8472 or not */ 7074 status = hw->phy.ops.read_i2c_eeprom(hw, 7075 IXGBE_SFF_SFF_8472_COMP, 7076 &sff8472_rev); 7077 if (status != 0) 7078 return -EIO; 7079 7080 /* addressing mode is not supported */ 7081 status = hw->phy.ops.read_i2c_eeprom(hw, 7082 IXGBE_SFF_SFF_8472_SWAP, 7083 &addr_mode); 7084 if (status != 0) 7085 return -EIO; 7086 7087 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 7088 PMD_DRV_LOG(ERR, 7089 "Address change required to access page 0xA2, " 7090 "but not supported. Please report the module " 7091 "type to the driver maintainers."); 7092 page_swap = true; 7093 } 7094 7095 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { 7096 /* We have a SFP, but it does not support SFF-8472 */ 7097 modinfo->type = RTE_ETH_MODULE_SFF_8079; 7098 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 7099 } else { 7100 /* We have a SFP which supports a revision of SFF-8472. */ 7101 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7102 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7103 } 7104 7105 return 0; 7106 } 7107 7108 static int 7109 ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 7110 struct rte_dev_eeprom_info *info) 7111 { 7112 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7113 uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; 7114 uint8_t databyte = 0xFF; 7115 uint8_t *data = info->data; 7116 uint32_t i = 0; 7117 7118 for (i = info->offset; i < info->offset + info->length; i++) { 7119 if (i < RTE_ETH_MODULE_SFF_8079_LEN) 7120 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); 7121 else 7122 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); 7123 7124 if (status != 0) 7125 return -EIO; 7126 7127 data[i - info->offset] = databyte; 7128 } 7129 7130 return 0; 7131 } 7132 7133 uint16_t 7134 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { 7135 switch (mac_type) { 7136 case ixgbe_mac_X550: 7137 case ixgbe_mac_X550EM_x: 7138 case ixgbe_mac_X550EM_a: 7139 return RTE_ETH_RSS_RETA_SIZE_512; 7140 case ixgbe_mac_X550_vf: 7141 case ixgbe_mac_X550EM_x_vf: 7142 case ixgbe_mac_X550EM_a_vf: 7143 return RTE_ETH_RSS_RETA_SIZE_64; 7144 case ixgbe_mac_X540_vf: 7145 case ixgbe_mac_82599_vf: 7146 return 0; 7147 default: 7148 return RTE_ETH_RSS_RETA_SIZE_128; 7149 } 7150 } 7151 7152 uint32_t 7153 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { 7154 switch (mac_type) { 7155 case ixgbe_mac_X550: 7156 case ixgbe_mac_X550EM_x: 7157 case ixgbe_mac_X550EM_a: 7158 if (reta_idx < RTE_ETH_RSS_RETA_SIZE_128) 7159 return IXGBE_RETA(reta_idx >> 2); 7160 else 7161 return IXGBE_ERETA((reta_idx - RTE_ETH_RSS_RETA_SIZE_128) >> 2); 7162 case ixgbe_mac_X550_vf: 7163 case ixgbe_mac_X550EM_x_vf: 7164 case ixgbe_mac_X550EM_a_vf: 7165 return IXGBE_VFRETA(reta_idx >> 2); 7166 default: 7167 return IXGBE_RETA(reta_idx >> 2); 7168 } 7169 } 7170 7171 uint32_t 7172 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { 7173 switch (mac_type) { 7174 case ixgbe_mac_X550_vf: 7175 case ixgbe_mac_X550EM_x_vf: 7176 case ixgbe_mac_X550EM_a_vf: 7177 return IXGBE_VFMRQC; 7178 default: 7179 return IXGBE_MRQC; 7180 } 7181 } 7182 7183 uint32_t 7184 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { 7185 switch (mac_type) { 7186 case ixgbe_mac_X550_vf: 7187 case ixgbe_mac_X550EM_x_vf: 7188 case ixgbe_mac_X550EM_a_vf: 7189 return IXGBE_VFRSSRK(i); 7190 default: 7191 return IXGBE_RSSRK(i); 7192 } 7193 } 7194 7195 bool 7196 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { 7197 switch (mac_type) { 7198 case ixgbe_mac_82599_vf: 7199 case ixgbe_mac_X540_vf: 7200 return 0; 7201 default: 7202 return 1; 7203 } 7204 } 7205 7206 static int 7207 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 7208 struct rte_eth_dcb_info *dcb_info) 7209 { 7210 struct ixgbe_dcb_config *dcb_config = 7211 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 7212 struct ixgbe_dcb_tc_config *tc; 7213 struct rte_eth_dcb_tc_queue_mapping *tc_queue; 7214 uint8_t nb_tcs; 7215 uint8_t i, j; 7216 7217 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) 7218 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 7219 else 7220 dcb_info->nb_tcs = 1; 7221 7222 tc_queue = &dcb_info->tc_queue; 7223 nb_tcs = dcb_info->nb_tcs; 7224 7225 if (dcb_config->vt_mode) { /* vt is enabled*/ 7226 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 7227 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 7228 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) 7229 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 7230 if (RTE_ETH_DEV_SRIOV(dev).active > 0) { 7231 for (j = 0; j < nb_tcs; j++) { 7232 tc_queue->tc_rxq[0][j].base = j; 7233 tc_queue->tc_rxq[0][j].nb_queue = 1; 7234 tc_queue->tc_txq[0][j].base = j; 7235 tc_queue->tc_txq[0][j].nb_queue = 1; 7236 } 7237 } else { 7238 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 7239 for (j = 0; j < nb_tcs; j++) { 7240 tc_queue->tc_rxq[i][j].base = 7241 i * nb_tcs + j; 7242 tc_queue->tc_rxq[i][j].nb_queue = 1; 7243 tc_queue->tc_txq[i][j].base = 7244 i * nb_tcs + j; 7245 tc_queue->tc_txq[i][j].nb_queue = 1; 7246 } 7247 } 7248 } 7249 } else { /* vt is disabled*/ 7250 struct rte_eth_dcb_rx_conf *rx_conf = 7251 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 7252 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) 7253 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 7254 if (dcb_info->nb_tcs == RTE_ETH_4_TCS) { 7255 for (i = 0; i < dcb_info->nb_tcs; i++) { 7256 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 7257 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7258 } 7259 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7260 dcb_info->tc_queue.tc_txq[0][1].base = 64; 7261 dcb_info->tc_queue.tc_txq[0][2].base = 96; 7262 dcb_info->tc_queue.tc_txq[0][3].base = 112; 7263 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 7264 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7265 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7266 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7267 } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) { 7268 for (i = 0; i < dcb_info->nb_tcs; i++) { 7269 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 7270 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7271 } 7272 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7273 dcb_info->tc_queue.tc_txq[0][1].base = 32; 7274 dcb_info->tc_queue.tc_txq[0][2].base = 64; 7275 dcb_info->tc_queue.tc_txq[0][3].base = 80; 7276 dcb_info->tc_queue.tc_txq[0][4].base = 96; 7277 dcb_info->tc_queue.tc_txq[0][5].base = 104; 7278 dcb_info->tc_queue.tc_txq[0][6].base = 112; 7279 dcb_info->tc_queue.tc_txq[0][7].base = 120; 7280 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 7281 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7282 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7283 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7284 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 7285 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 7286 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 7287 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 7288 } 7289 } 7290 for (i = 0; i < dcb_info->nb_tcs; i++) { 7291 tc = &dcb_config->tc_config[i]; 7292 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; 7293 } 7294 return 0; 7295 } 7296 7297 /* Update e-tag ether type */ 7298 static int 7299 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, 7300 uint16_t ether_type) 7301 { 7302 uint32_t etag_etype; 7303 7304 if (hw->mac.type != ixgbe_mac_X550 && 7305 hw->mac.type != ixgbe_mac_X550EM_x && 7306 hw->mac.type != ixgbe_mac_X550EM_a) { 7307 return -ENOTSUP; 7308 } 7309 7310 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7311 etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; 7312 etag_etype |= ether_type; 7313 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7314 IXGBE_WRITE_FLUSH(hw); 7315 7316 return 0; 7317 } 7318 7319 /* Enable e-tag tunnel */ 7320 static int 7321 ixgbe_e_tag_enable(struct ixgbe_hw *hw) 7322 { 7323 uint32_t etag_etype; 7324 7325 if (hw->mac.type != ixgbe_mac_X550 && 7326 hw->mac.type != ixgbe_mac_X550EM_x && 7327 hw->mac.type != ixgbe_mac_X550EM_a) { 7328 return -ENOTSUP; 7329 } 7330 7331 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7332 etag_etype |= IXGBE_ETAG_ETYPE_VALID; 7333 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7334 IXGBE_WRITE_FLUSH(hw); 7335 7336 return 0; 7337 } 7338 7339 static int 7340 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, 7341 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7342 { 7343 int ret = 0; 7344 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7345 uint32_t i, rar_entries; 7346 uint32_t rar_low, rar_high; 7347 7348 if (hw->mac.type != ixgbe_mac_X550 && 7349 hw->mac.type != ixgbe_mac_X550EM_x && 7350 hw->mac.type != ixgbe_mac_X550EM_a) { 7351 return -ENOTSUP; 7352 } 7353 7354 rar_entries = ixgbe_get_num_rx_addrs(hw); 7355 7356 for (i = 1; i < rar_entries; i++) { 7357 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7358 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 7359 if ((rar_high & IXGBE_RAH_AV) && 7360 (rar_high & IXGBE_RAH_ADTYPE) && 7361 ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == 7362 l2_tunnel->tunnel_id)) { 7363 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 7364 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 7365 7366 ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); 7367 7368 return ret; 7369 } 7370 } 7371 7372 return ret; 7373 } 7374 7375 static int 7376 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, 7377 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7378 { 7379 int ret = 0; 7380 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7381 uint32_t i, rar_entries; 7382 uint32_t rar_low, rar_high; 7383 7384 if (hw->mac.type != ixgbe_mac_X550 && 7385 hw->mac.type != ixgbe_mac_X550EM_x && 7386 hw->mac.type != ixgbe_mac_X550EM_a) { 7387 return -ENOTSUP; 7388 } 7389 7390 /* One entry for one tunnel. Try to remove potential existing entry. */ 7391 ixgbe_e_tag_filter_del(dev, l2_tunnel); 7392 7393 rar_entries = ixgbe_get_num_rx_addrs(hw); 7394 7395 for (i = 1; i < rar_entries; i++) { 7396 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7397 if (rar_high & IXGBE_RAH_AV) { 7398 continue; 7399 } else { 7400 ixgbe_set_vmdq(hw, i, l2_tunnel->pool); 7401 rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; 7402 rar_low = l2_tunnel->tunnel_id; 7403 7404 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); 7405 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); 7406 7407 return ret; 7408 } 7409 } 7410 7411 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 7412 " Please remove a rule before adding a new one."); 7413 return -EINVAL; 7414 } 7415 7416 static inline struct ixgbe_l2_tn_filter * 7417 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, 7418 struct ixgbe_l2_tn_key *key) 7419 { 7420 int ret; 7421 7422 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); 7423 if (ret < 0) 7424 return NULL; 7425 7426 return l2_tn_info->hash_map[ret]; 7427 } 7428 7429 static inline int 7430 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7431 struct ixgbe_l2_tn_filter *l2_tn_filter) 7432 { 7433 int ret; 7434 7435 ret = rte_hash_add_key(l2_tn_info->hash_handle, 7436 &l2_tn_filter->key); 7437 7438 if (ret < 0) { 7439 PMD_DRV_LOG(ERR, 7440 "Failed to insert L2 tunnel filter" 7441 " to hash table %d!", 7442 ret); 7443 return ret; 7444 } 7445 7446 l2_tn_info->hash_map[ret] = l2_tn_filter; 7447 7448 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7449 7450 return 0; 7451 } 7452 7453 static inline int 7454 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7455 struct ixgbe_l2_tn_key *key) 7456 { 7457 int ret; 7458 struct ixgbe_l2_tn_filter *l2_tn_filter; 7459 7460 ret = rte_hash_del_key(l2_tn_info->hash_handle, key); 7461 7462 if (ret < 0) { 7463 PMD_DRV_LOG(ERR, 7464 "No such L2 tunnel filter to delete %d!", 7465 ret); 7466 return ret; 7467 } 7468 7469 l2_tn_filter = l2_tn_info->hash_map[ret]; 7470 l2_tn_info->hash_map[ret] = NULL; 7471 7472 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7473 rte_free(l2_tn_filter); 7474 7475 return 0; 7476 } 7477 7478 /* Add l2 tunnel filter */ 7479 int 7480 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 7481 struct ixgbe_l2_tunnel_conf *l2_tunnel, 7482 bool restore) 7483 { 7484 int ret; 7485 struct ixgbe_l2_tn_info *l2_tn_info = 7486 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7487 struct ixgbe_l2_tn_key key; 7488 struct ixgbe_l2_tn_filter *node; 7489 7490 if (!restore) { 7491 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7492 key.tn_id = l2_tunnel->tunnel_id; 7493 7494 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); 7495 7496 if (node) { 7497 PMD_DRV_LOG(ERR, 7498 "The L2 tunnel filter already exists!"); 7499 return -EINVAL; 7500 } 7501 7502 node = rte_zmalloc("ixgbe_l2_tn", 7503 sizeof(struct ixgbe_l2_tn_filter), 7504 0); 7505 if (!node) 7506 return -ENOMEM; 7507 7508 rte_memcpy(&node->key, 7509 &key, 7510 sizeof(struct ixgbe_l2_tn_key)); 7511 node->pool = l2_tunnel->pool; 7512 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); 7513 if (ret < 0) { 7514 rte_free(node); 7515 return ret; 7516 } 7517 } 7518 7519 switch (l2_tunnel->l2_tunnel_type) { 7520 case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: 7521 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); 7522 break; 7523 default: 7524 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7525 ret = -EINVAL; 7526 break; 7527 } 7528 7529 if ((!restore) && (ret < 0)) 7530 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7531 7532 return ret; 7533 } 7534 7535 /* Delete l2 tunnel filter */ 7536 int 7537 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 7538 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7539 { 7540 int ret; 7541 struct ixgbe_l2_tn_info *l2_tn_info = 7542 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7543 struct ixgbe_l2_tn_key key; 7544 7545 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7546 key.tn_id = l2_tunnel->tunnel_id; 7547 ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7548 if (ret < 0) 7549 return ret; 7550 7551 switch (l2_tunnel->l2_tunnel_type) { 7552 case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: 7553 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); 7554 break; 7555 default: 7556 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7557 ret = -EINVAL; 7558 break; 7559 } 7560 7561 return ret; 7562 } 7563 7564 static int 7565 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 7566 { 7567 int ret = 0; 7568 uint32_t ctrl; 7569 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7570 7571 if (hw->mac.type != ixgbe_mac_X550 && 7572 hw->mac.type != ixgbe_mac_X550EM_x && 7573 hw->mac.type != ixgbe_mac_X550EM_a) { 7574 return -ENOTSUP; 7575 } 7576 7577 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 7578 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 7579 if (en) 7580 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; 7581 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 7582 7583 return ret; 7584 } 7585 7586 static int 7587 ixgbe_update_vxlan_port(struct ixgbe_hw *hw, 7588 uint16_t port) 7589 { 7590 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); 7591 IXGBE_WRITE_FLUSH(hw); 7592 7593 return 0; 7594 } 7595 7596 /* There's only one register for VxLAN UDP port. 7597 * So, we cannot add several ports. Will update it. 7598 */ 7599 static int 7600 ixgbe_add_vxlan_port(struct ixgbe_hw *hw, 7601 uint16_t port) 7602 { 7603 if (port == 0) { 7604 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 7605 return -EINVAL; 7606 } 7607 7608 return ixgbe_update_vxlan_port(hw, port); 7609 } 7610 7611 /* We cannot delete the VxLAN port. For there's a register for VxLAN 7612 * UDP port, it must have a value. 7613 * So, will reset it to the original value 0. 7614 */ 7615 static int 7616 ixgbe_del_vxlan_port(struct ixgbe_hw *hw, 7617 uint16_t port) 7618 { 7619 uint16_t cur_port; 7620 7621 cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); 7622 7623 if (cur_port != port) { 7624 PMD_DRV_LOG(ERR, "Port %u does not exist.", port); 7625 return -EINVAL; 7626 } 7627 7628 return ixgbe_update_vxlan_port(hw, 0); 7629 } 7630 7631 /* Add UDP tunneling port */ 7632 static int 7633 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 7634 struct rte_eth_udp_tunnel *udp_tunnel) 7635 { 7636 int ret = 0; 7637 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7638 7639 if (hw->mac.type != ixgbe_mac_X550 && 7640 hw->mac.type != ixgbe_mac_X550EM_x && 7641 hw->mac.type != ixgbe_mac_X550EM_a) { 7642 return -ENOTSUP; 7643 } 7644 7645 if (udp_tunnel == NULL) 7646 return -EINVAL; 7647 7648 switch (udp_tunnel->prot_type) { 7649 case RTE_ETH_TUNNEL_TYPE_VXLAN: 7650 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); 7651 break; 7652 7653 case RTE_ETH_TUNNEL_TYPE_GENEVE: 7654 case RTE_ETH_TUNNEL_TYPE_TEREDO: 7655 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7656 ret = -EINVAL; 7657 break; 7658 7659 default: 7660 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7661 ret = -EINVAL; 7662 break; 7663 } 7664 7665 return ret; 7666 } 7667 7668 /* Remove UDP tunneling port */ 7669 static int 7670 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 7671 struct rte_eth_udp_tunnel *udp_tunnel) 7672 { 7673 int ret = 0; 7674 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7675 7676 if (hw->mac.type != ixgbe_mac_X550 && 7677 hw->mac.type != ixgbe_mac_X550EM_x && 7678 hw->mac.type != ixgbe_mac_X550EM_a) { 7679 return -ENOTSUP; 7680 } 7681 7682 if (udp_tunnel == NULL) 7683 return -EINVAL; 7684 7685 switch (udp_tunnel->prot_type) { 7686 case RTE_ETH_TUNNEL_TYPE_VXLAN: 7687 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); 7688 break; 7689 case RTE_ETH_TUNNEL_TYPE_GENEVE: 7690 case RTE_ETH_TUNNEL_TYPE_TEREDO: 7691 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7692 ret = -EINVAL; 7693 break; 7694 default: 7695 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7696 ret = -EINVAL; 7697 break; 7698 } 7699 7700 return ret; 7701 } 7702 7703 static int 7704 ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) 7705 { 7706 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7707 int ret; 7708 7709 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) { 7710 case IXGBE_SUCCESS: 7711 ret = 0; 7712 break; 7713 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7714 ret = -ENOTSUP; 7715 break; 7716 default: 7717 ret = -EAGAIN; 7718 break; 7719 } 7720 7721 return ret; 7722 } 7723 7724 static int 7725 ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) 7726 { 7727 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7728 int ret; 7729 7730 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) { 7731 case IXGBE_SUCCESS: 7732 ret = 0; 7733 break; 7734 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7735 ret = -ENOTSUP; 7736 break; 7737 default: 7738 ret = -EAGAIN; 7739 break; 7740 } 7741 7742 return ret; 7743 } 7744 7745 static int 7746 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 7747 { 7748 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7749 int ret; 7750 int mode = IXGBEVF_XCAST_MODE_ALLMULTI; 7751 7752 switch (hw->mac.ops.update_xcast_mode(hw, mode)) { 7753 case IXGBE_SUCCESS: 7754 ret = 0; 7755 break; 7756 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7757 ret = -ENOTSUP; 7758 break; 7759 default: 7760 ret = -EAGAIN; 7761 break; 7762 } 7763 7764 return ret; 7765 } 7766 7767 static int 7768 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 7769 { 7770 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7771 int ret; 7772 7773 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { 7774 case IXGBE_SUCCESS: 7775 ret = 0; 7776 break; 7777 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7778 ret = -ENOTSUP; 7779 break; 7780 default: 7781 ret = -EAGAIN; 7782 break; 7783 } 7784 7785 return ret; 7786 } 7787 7788 static void ixgbevf_mbx_process(struct rte_eth_dev *dev) 7789 { 7790 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7791 u32 in_msg = 0; 7792 7793 /* peek the message first */ 7794 in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); 7795 7796 /* PF reset VF event */ 7797 if (in_msg == IXGBE_PF_CONTROL_MSG) { 7798 /* dummy mbx read to ack pf */ 7799 if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) 7800 return; 7801 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 7802 NULL); 7803 } 7804 } 7805 7806 static int 7807 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 7808 { 7809 uint32_t eicr; 7810 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7811 struct ixgbe_interrupt *intr = 7812 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7813 ixgbevf_intr_disable(dev); 7814 7815 /* read-on-clear nic registers here */ 7816 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); 7817 intr->flags = 0; 7818 7819 /* only one misc vector supported - mailbox */ 7820 eicr &= IXGBE_VTEICR_MASK; 7821 if (eicr == IXGBE_MISC_VEC_ID) 7822 intr->flags |= IXGBE_FLAG_MAILBOX; 7823 7824 return 0; 7825 } 7826 7827 static int 7828 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) 7829 { 7830 struct ixgbe_interrupt *intr = 7831 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7832 7833 if (intr->flags & IXGBE_FLAG_MAILBOX) { 7834 ixgbevf_mbx_process(dev); 7835 intr->flags &= ~IXGBE_FLAG_MAILBOX; 7836 } 7837 7838 ixgbevf_intr_enable(dev); 7839 7840 return 0; 7841 } 7842 7843 static void 7844 ixgbevf_dev_interrupt_handler(void *param) 7845 { 7846 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 7847 7848 ixgbevf_dev_interrupt_get_status(dev); 7849 ixgbevf_dev_interrupt_action(dev); 7850 } 7851 7852 /** 7853 * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path 7854 * @hw: pointer to hardware structure 7855 * 7856 * Stops the transmit data path and waits for the HW to internally empty 7857 * the Tx security block 7858 **/ 7859 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) 7860 { 7861 #define IXGBE_MAX_SECTX_POLL 40 7862 7863 int i; 7864 int sectxreg; 7865 7866 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 7867 sectxreg |= IXGBE_SECTXCTRL_TX_DIS; 7868 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 7869 for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { 7870 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); 7871 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) 7872 break; 7873 /* Use interrupt-safe sleep just in case */ 7874 usec_delay(1000); 7875 } 7876 7877 /* For informational purposes only */ 7878 if (i >= IXGBE_MAX_SECTX_POLL) 7879 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " 7880 "path fully disabled. Continuing with init."); 7881 7882 return IXGBE_SUCCESS; 7883 } 7884 7885 /** 7886 * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path 7887 * @hw: pointer to hardware structure 7888 * 7889 * Enables the transmit data path. 7890 **/ 7891 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) 7892 { 7893 uint32_t sectxreg; 7894 7895 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 7896 sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; 7897 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 7898 IXGBE_WRITE_FLUSH(hw); 7899 7900 return IXGBE_SUCCESS; 7901 } 7902 7903 /* restore n-tuple filter */ 7904 static inline void 7905 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) 7906 { 7907 struct ixgbe_filter_info *filter_info = 7908 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7909 struct ixgbe_5tuple_filter *node; 7910 7911 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { 7912 ixgbe_inject_5tuple_filter(dev, node); 7913 } 7914 } 7915 7916 /* restore ethernet type filter */ 7917 static inline void 7918 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) 7919 { 7920 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7921 struct ixgbe_filter_info *filter_info = 7922 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7923 int i; 7924 7925 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 7926 if (filter_info->ethertype_mask & (1 << i)) { 7927 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 7928 filter_info->ethertype_filters[i].etqf); 7929 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 7930 filter_info->ethertype_filters[i].etqs); 7931 IXGBE_WRITE_FLUSH(hw); 7932 } 7933 } 7934 } 7935 7936 /* restore SYN filter */ 7937 static inline void 7938 ixgbe_syn_filter_restore(struct rte_eth_dev *dev) 7939 { 7940 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7941 struct ixgbe_filter_info *filter_info = 7942 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7943 uint32_t synqf; 7944 7945 synqf = filter_info->syn_info; 7946 7947 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 7948 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 7949 IXGBE_WRITE_FLUSH(hw); 7950 } 7951 } 7952 7953 /* restore L2 tunnel filter */ 7954 static inline void 7955 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) 7956 { 7957 struct ixgbe_l2_tn_info *l2_tn_info = 7958 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7959 struct ixgbe_l2_tn_filter *node; 7960 struct ixgbe_l2_tunnel_conf l2_tn_conf; 7961 7962 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { 7963 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; 7964 l2_tn_conf.tunnel_id = node->key.tn_id; 7965 l2_tn_conf.pool = node->pool; 7966 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); 7967 } 7968 } 7969 7970 /* restore rss filter */ 7971 static inline void 7972 ixgbe_rss_filter_restore(struct rte_eth_dev *dev) 7973 { 7974 struct ixgbe_filter_info *filter_info = 7975 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7976 7977 if (filter_info->rss_info.conf.queue_num) 7978 ixgbe_config_rss_filter(dev, 7979 &filter_info->rss_info, TRUE); 7980 } 7981 7982 static int 7983 ixgbe_filter_restore(struct rte_eth_dev *dev) 7984 { 7985 ixgbe_ntuple_filter_restore(dev); 7986 ixgbe_ethertype_filter_restore(dev); 7987 ixgbe_syn_filter_restore(dev); 7988 ixgbe_fdir_filter_restore(dev); 7989 ixgbe_l2_tn_filter_restore(dev); 7990 ixgbe_rss_filter_restore(dev); 7991 7992 return 0; 7993 } 7994 7995 static void 7996 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) 7997 { 7998 struct ixgbe_l2_tn_info *l2_tn_info = 7999 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8000 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8001 8002 if (l2_tn_info->e_tag_en) 8003 (void)ixgbe_e_tag_enable(hw); 8004 8005 if (l2_tn_info->e_tag_fwd_en) 8006 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); 8007 8008 (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); 8009 } 8010 8011 /* remove all the n-tuple filters */ 8012 void 8013 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) 8014 { 8015 struct ixgbe_filter_info *filter_info = 8016 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8017 struct ixgbe_5tuple_filter *p_5tuple; 8018 8019 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) 8020 ixgbe_remove_5tuple_filter(dev, p_5tuple); 8021 } 8022 8023 /* remove all the ether type filters */ 8024 void 8025 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) 8026 { 8027 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8028 struct ixgbe_filter_info *filter_info = 8029 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8030 int i; 8031 8032 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8033 if (filter_info->ethertype_mask & (1 << i) && 8034 !filter_info->ethertype_filters[i].conf) { 8035 (void)ixgbe_ethertype_filter_remove(filter_info, 8036 (uint8_t)i); 8037 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); 8038 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); 8039 IXGBE_WRITE_FLUSH(hw); 8040 } 8041 } 8042 } 8043 8044 /* remove the SYN filter */ 8045 void 8046 ixgbe_clear_syn_filter(struct rte_eth_dev *dev) 8047 { 8048 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8049 struct ixgbe_filter_info *filter_info = 8050 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8051 8052 if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { 8053 filter_info->syn_info = 0; 8054 8055 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); 8056 IXGBE_WRITE_FLUSH(hw); 8057 } 8058 } 8059 8060 /* remove all the L2 tunnel filters */ 8061 int 8062 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) 8063 { 8064 struct ixgbe_l2_tn_info *l2_tn_info = 8065 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8066 struct ixgbe_l2_tn_filter *l2_tn_filter; 8067 struct ixgbe_l2_tunnel_conf l2_tn_conf; 8068 int ret = 0; 8069 8070 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 8071 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; 8072 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; 8073 l2_tn_conf.pool = l2_tn_filter->pool; 8074 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); 8075 if (ret < 0) 8076 return ret; 8077 } 8078 8079 return 0; 8080 } 8081 8082 void 8083 ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev, 8084 struct ixgbe_macsec_setting *macsec_setting) 8085 { 8086 struct ixgbe_macsec_setting *macsec = 8087 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8088 8089 macsec->offload_en = macsec_setting->offload_en; 8090 macsec->encrypt_en = macsec_setting->encrypt_en; 8091 macsec->replayprotect_en = macsec_setting->replayprotect_en; 8092 } 8093 8094 void 8095 ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev) 8096 { 8097 struct ixgbe_macsec_setting *macsec = 8098 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8099 8100 macsec->offload_en = 0; 8101 macsec->encrypt_en = 0; 8102 macsec->replayprotect_en = 0; 8103 } 8104 8105 void 8106 ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, 8107 struct ixgbe_macsec_setting *macsec_setting) 8108 { 8109 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8110 uint32_t ctrl; 8111 uint8_t en = macsec_setting->encrypt_en; 8112 uint8_t rp = macsec_setting->replayprotect_en; 8113 8114 /** 8115 * Workaround: 8116 * As no ixgbe_disable_sec_rx_path equivalent is 8117 * implemented for tx in the base code, and we are 8118 * not allowed to modify the base code in DPDK, so 8119 * just call the hand-written one directly for now. 8120 * The hardware support has been checked by 8121 * ixgbe_disable_sec_rx_path(). 8122 */ 8123 ixgbe_disable_sec_tx_path_generic(hw); 8124 8125 /* Enable Ethernet CRC (required by MACsec offload) */ 8126 ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 8127 ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; 8128 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl); 8129 8130 /* Enable the TX and RX crypto engines */ 8131 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8132 ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS; 8133 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8134 8135 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8136 ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS; 8137 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8138 8139 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 8140 ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK; 8141 ctrl |= 0x3; 8142 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl); 8143 8144 /* Enable SA lookup */ 8145 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8146 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8147 ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT : 8148 IXGBE_LSECTXCTRL_AUTH; 8149 ctrl |= IXGBE_LSECTXCTRL_AISCI; 8150 ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8151 ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8152 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8153 8154 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8155 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8156 ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT; 8157 ctrl &= ~IXGBE_LSECRXCTRL_PLSH; 8158 if (rp) 8159 ctrl |= IXGBE_LSECRXCTRL_RP; 8160 else 8161 ctrl &= ~IXGBE_LSECRXCTRL_RP; 8162 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8163 8164 /* Start the data paths */ 8165 ixgbe_enable_sec_rx_path(hw); 8166 /** 8167 * Workaround: 8168 * As no ixgbe_enable_sec_rx_path equivalent is 8169 * implemented for tx in the base code, and we are 8170 * not allowed to modify the base code in DPDK, so 8171 * just call the hand-written one directly for now. 8172 */ 8173 ixgbe_enable_sec_tx_path_generic(hw); 8174 } 8175 8176 void 8177 ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) 8178 { 8179 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8180 uint32_t ctrl; 8181 8182 /** 8183 * Workaround: 8184 * As no ixgbe_disable_sec_rx_path equivalent is 8185 * implemented for tx in the base code, and we are 8186 * not allowed to modify the base code in DPDK, so 8187 * just call the hand-written one directly for now. 8188 * The hardware support has been checked by 8189 * ixgbe_disable_sec_rx_path(). 8190 */ 8191 ixgbe_disable_sec_tx_path_generic(hw); 8192 8193 /* Disable the TX and RX crypto engines */ 8194 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8195 ctrl |= IXGBE_SECTXCTRL_SECTX_DIS; 8196 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8197 8198 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8199 ctrl |= IXGBE_SECRXCTRL_SECRX_DIS; 8200 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8201 8202 /* Disable SA lookup */ 8203 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8204 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8205 ctrl |= IXGBE_LSECTXCTRL_DISABLE; 8206 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8207 8208 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8209 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8210 ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT; 8211 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8212 8213 /* Start the data paths */ 8214 ixgbe_enable_sec_rx_path(hw); 8215 /** 8216 * Workaround: 8217 * As no ixgbe_enable_sec_rx_path equivalent is 8218 * implemented for tx in the base code, and we are 8219 * not allowed to modify the base code in DPDK, so 8220 * just call the hand-written one directly for now. 8221 */ 8222 ixgbe_enable_sec_tx_path_generic(hw); 8223 } 8224 8225 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); 8226 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); 8227 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 8228 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); 8229 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); 8230 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); 8231 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, 8232 IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); 8233 8234 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_init, init, NOTICE); 8235 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_driver, driver, NOTICE); 8236 8237 #ifdef RTE_ETHDEV_DEBUG_RX 8238 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_rx, rx, DEBUG); 8239 #endif 8240 #ifdef RTE_ETHDEV_DEBUG_TX 8241 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_tx, tx, DEBUG); 8242 #endif 8243