1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 #include <inttypes.h> 13 #include <rte_string_fns.h> 14 #include <rte_byteorder.h> 15 #include <rte_common.h> 16 #include <rte_cycles.h> 17 18 #include <rte_interrupts.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_pci.h> 22 #include <rte_bus_pci.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_memory.h> 25 #include <rte_kvargs.h> 26 #include <rte_eal.h> 27 #include <rte_alarm.h> 28 #include <rte_ether.h> 29 #include <ethdev_driver.h> 30 #include <ethdev_pci.h> 31 #include <rte_malloc.h> 32 #include <rte_random.h> 33 #include <rte_dev.h> 34 #include <rte_hash_crc.h> 35 #ifdef RTE_LIB_SECURITY 36 #include <rte_security_driver.h> 37 #endif 38 39 #include "ixgbe_logs.h" 40 #include "base/ixgbe_api.h" 41 #include "base/ixgbe_vf.h" 42 #include "base/ixgbe_common.h" 43 #include "ixgbe_ethdev.h" 44 #include "ixgbe_bypass.h" 45 #include "ixgbe_rxtx.h" 46 #include "base/ixgbe_type.h" 47 #include "base/ixgbe_phy.h" 48 #include "base/ixgbe_osdep.h" 49 #include "ixgbe_regs.h" 50 51 /* 52 * High threshold controlling when to start sending XOFF frames. Must be at 53 * least 8 bytes less than receive packet buffer size. This value is in units 54 * of 1024 bytes. 55 */ 56 #define IXGBE_FC_HI 0x80 57 58 /* 59 * Low threshold controlling when to start sending XON frames. This value is 60 * in units of 1024 bytes. 61 */ 62 #define IXGBE_FC_LO 0x40 63 64 /* Timer value included in XOFF frames. */ 65 #define IXGBE_FC_PAUSE 0x680 66 67 /*Default value of Max Rx Queue*/ 68 #define IXGBE_MAX_RX_QUEUE_NUM 128 69 70 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 71 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 72 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ 73 74 #define IXGBE_MMW_SIZE_DEFAULT 0x4 75 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 76 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ 77 78 /* 79 * Default values for RX/TX configuration 80 */ 81 #define IXGBE_DEFAULT_RX_FREE_THRESH 32 82 #define IXGBE_DEFAULT_RX_PTHRESH 8 83 #define IXGBE_DEFAULT_RX_HTHRESH 8 84 #define IXGBE_DEFAULT_RX_WTHRESH 0 85 86 #define IXGBE_DEFAULT_TX_FREE_THRESH 32 87 #define IXGBE_DEFAULT_TX_PTHRESH 32 88 #define IXGBE_DEFAULT_TX_HTHRESH 0 89 #define IXGBE_DEFAULT_TX_WTHRESH 0 90 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 91 92 /* Bit shift and mask */ 93 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) 94 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) 95 #define IXGBE_8_BIT_WIDTH CHAR_BIT 96 #define IXGBE_8_BIT_MASK UINT8_MAX 97 98 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ 99 100 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) 101 102 /* Additional timesync values. */ 103 #define NSEC_PER_SEC 1000000000L 104 #define IXGBE_INCVAL_10GB 0x66666666 105 #define IXGBE_INCVAL_1GB 0x40000000 106 #define IXGBE_INCVAL_100 0x50000000 107 #define IXGBE_INCVAL_SHIFT_10GB 28 108 #define IXGBE_INCVAL_SHIFT_1GB 24 109 #define IXGBE_INCVAL_SHIFT_100 21 110 #define IXGBE_INCVAL_SHIFT_82599 7 111 #define IXGBE_INCPER_SHIFT_82599 24 112 113 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 114 115 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 116 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 117 #define IXGBE_ETAG_ETYPE 0x00005084 118 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff 119 #define IXGBE_ETAG_ETYPE_VALID 0x80000000 120 #define IXGBE_RAH_ADTYPE 0x40000000 121 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff 122 #define IXGBE_VMVIR_TAGA_MASK 0x18000000 123 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 124 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ 125 #define IXGBE_QDE_STRIP_TAG 0x00000004 126 #define IXGBE_VTEICR_MASK 0x07 127 128 #define IXGBE_EXVET_VET_EXT_SHIFT 16 129 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 130 131 #define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" 132 133 static const char * const ixgbevf_valid_arguments[] = { 134 IXGBEVF_DEVARG_PFLINK_FULLCHK, 135 NULL 136 }; 137 138 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); 139 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); 140 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); 141 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); 142 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); 143 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); 144 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); 145 static int ixgbe_dev_configure(struct rte_eth_dev *dev); 146 static int ixgbe_dev_start(struct rte_eth_dev *dev); 147 static int ixgbe_dev_stop(struct rte_eth_dev *dev); 148 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); 149 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); 150 static int ixgbe_dev_close(struct rte_eth_dev *dev); 151 static int ixgbe_dev_reset(struct rte_eth_dev *dev); 152 static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 153 static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 154 static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 155 static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 156 static int ixgbe_dev_link_update(struct rte_eth_dev *dev, 157 int wait_to_complete); 158 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, 159 struct rte_eth_stats *stats); 160 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, 161 struct rte_eth_xstat *xstats, unsigned n); 162 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, 163 struct rte_eth_xstat *xstats, unsigned n); 164 static int 165 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 166 uint64_t *values, unsigned int n); 167 static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev); 168 static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); 169 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 170 struct rte_eth_xstat_name *xstats_names, 171 unsigned int size); 172 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, 173 struct rte_eth_xstat_name *xstats_names, unsigned limit); 174 static int ixgbe_dev_xstats_get_names_by_id( 175 struct rte_eth_dev *dev, 176 const uint64_t *ids, 177 struct rte_eth_xstat_name *xstats_names, 178 unsigned int limit); 179 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 180 uint16_t queue_id, 181 uint8_t stat_idx, 182 uint8_t is_rx); 183 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 184 size_t fw_size); 185 static int ixgbe_dev_info_get(struct rte_eth_dev *dev, 186 struct rte_eth_dev_info *dev_info); 187 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 188 static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, 189 struct rte_eth_dev_info *dev_info); 190 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 191 192 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, 193 uint16_t vlan_id, int on); 194 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 195 enum rte_vlan_type vlan_type, 196 uint16_t tpid_id); 197 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 198 uint16_t queue, bool on); 199 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, 200 int on); 201 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, 202 int mask); 203 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); 204 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 205 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 206 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); 207 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); 208 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); 209 210 static int ixgbe_dev_led_on(struct rte_eth_dev *dev); 211 static int ixgbe_dev_led_off(struct rte_eth_dev *dev); 212 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, 213 struct rte_eth_fc_conf *fc_conf); 214 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, 215 struct rte_eth_fc_conf *fc_conf); 216 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 217 struct rte_eth_pfc_conf *pfc_conf); 218 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 219 struct rte_eth_rss_reta_entry64 *reta_conf, 220 uint16_t reta_size); 221 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 222 struct rte_eth_rss_reta_entry64 *reta_conf, 223 uint16_t reta_size); 224 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); 225 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 226 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 227 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 228 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); 229 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); 230 static void ixgbe_dev_interrupt_handler(void *param); 231 static void ixgbe_dev_interrupt_delayed_handler(void *param); 232 static void *ixgbe_dev_setup_link_thread_handler(void *param); 233 static int ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, 234 uint32_t timeout_ms); 235 236 static int ixgbe_add_rar(struct rte_eth_dev *dev, 237 struct rte_ether_addr *mac_addr, 238 uint32_t index, uint32_t pool); 239 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); 240 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, 241 struct rte_ether_addr *mac_addr); 242 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); 243 static bool is_device_supported(struct rte_eth_dev *dev, 244 struct rte_pci_driver *drv); 245 246 /* For Virtual Function support */ 247 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); 248 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); 249 static int ixgbevf_dev_configure(struct rte_eth_dev *dev); 250 static int ixgbevf_dev_start(struct rte_eth_dev *dev); 251 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, 252 int wait_to_complete); 253 static int ixgbevf_dev_stop(struct rte_eth_dev *dev); 254 static int ixgbevf_dev_close(struct rte_eth_dev *dev); 255 static int ixgbevf_dev_reset(struct rte_eth_dev *dev); 256 static void ixgbevf_intr_disable(struct rte_eth_dev *dev); 257 static void ixgbevf_intr_enable(struct rte_eth_dev *dev); 258 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, 259 struct rte_eth_stats *stats); 260 static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); 261 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 262 uint16_t vlan_id, int on); 263 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, 264 uint16_t queue, int on); 265 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 266 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 267 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 268 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 269 uint16_t queue_id); 270 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 271 uint16_t queue_id); 272 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 273 uint8_t queue, uint8_t msix_vector); 274 static void ixgbevf_configure_msix(struct rte_eth_dev *dev); 275 static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); 276 static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); 277 static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); 278 static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); 279 280 /* For Eth VMDQ APIs support */ 281 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct 282 rte_ether_addr * mac_addr, uint8_t on); 283 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); 284 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 285 uint16_t queue_id); 286 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 287 uint16_t queue_id); 288 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 289 uint8_t queue, uint8_t msix_vector); 290 static void ixgbe_configure_msix(struct rte_eth_dev *dev); 291 292 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, 293 struct rte_ether_addr *mac_addr, 294 uint32_t index, uint32_t pool); 295 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 296 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 297 struct rte_ether_addr *mac_addr); 298 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 299 struct ixgbe_5tuple_filter *filter); 300 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 301 struct ixgbe_5tuple_filter *filter); 302 static int ixgbe_dev_flow_ops_get(struct rte_eth_dev *dev, 303 const struct rte_flow_ops **ops); 304 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 305 306 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 307 struct rte_ether_addr *mc_addr_set, 308 uint32_t nb_mc_addr); 309 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 310 struct rte_eth_dcb_info *dcb_info); 311 312 static int ixgbe_get_reg_length(struct rte_eth_dev *dev); 313 static int ixgbe_get_regs(struct rte_eth_dev *dev, 314 struct rte_dev_reg_info *regs); 315 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); 316 static int ixgbe_get_eeprom(struct rte_eth_dev *dev, 317 struct rte_dev_eeprom_info *eeprom); 318 static int ixgbe_set_eeprom(struct rte_eth_dev *dev, 319 struct rte_dev_eeprom_info *eeprom); 320 321 static int ixgbe_get_module_info(struct rte_eth_dev *dev, 322 struct rte_eth_dev_module_info *modinfo); 323 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 324 struct rte_dev_eeprom_info *info); 325 326 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); 327 static int ixgbevf_get_regs(struct rte_eth_dev *dev, 328 struct rte_dev_reg_info *regs); 329 330 static int ixgbe_timesync_enable(struct rte_eth_dev *dev); 331 static int ixgbe_timesync_disable(struct rte_eth_dev *dev); 332 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 333 struct timespec *timestamp, 334 uint32_t flags); 335 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 336 struct timespec *timestamp); 337 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 338 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, 339 struct timespec *timestamp); 340 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, 341 const struct timespec *timestamp); 342 static void ixgbevf_dev_interrupt_handler(void *param); 343 344 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 345 struct rte_eth_udp_tunnel *udp_tunnel); 346 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 347 struct rte_eth_udp_tunnel *udp_tunnel); 348 static int ixgbe_filter_restore(struct rte_eth_dev *dev); 349 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); 350 static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw); 351 352 /* 353 * Define VF Stats MACRO for Non "cleared on read" register 354 */ 355 #define UPDATE_VF_STAT(reg, last, cur) \ 356 { \ 357 uint32_t latest = IXGBE_READ_REG(hw, reg); \ 358 cur += (latest - last) & UINT_MAX; \ 359 last = latest; \ 360 } 361 362 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ 363 { \ 364 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ 365 u64 new_msb = IXGBE_READ_REG(hw, msb); \ 366 u64 latest = ((new_msb << 32) | new_lsb); \ 367 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ 368 last = latest; \ 369 } 370 371 #define IXGBE_SET_HWSTRIP(h, q) do {\ 372 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 373 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 374 (h)->bitmap[idx] |= 1 << bit;\ 375 } while (0) 376 377 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\ 378 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 379 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 380 (h)->bitmap[idx] &= ~(1 << bit);\ 381 } while (0) 382 383 #define IXGBE_GET_HWSTRIP(h, q, r) do {\ 384 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 385 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 386 (r) = (h)->bitmap[idx] >> bit & 1;\ 387 } while (0) 388 389 /* 390 * The set of PCI devices this driver supports 391 */ 392 static const struct rte_pci_id pci_id_ixgbe_map[] = { 393 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, 394 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, 395 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, 396 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, 397 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, 398 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, 399 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, 400 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, 401 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, 402 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, 403 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, 404 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, 405 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, 406 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, 407 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, 408 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, 409 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, 410 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, 411 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, 412 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, 413 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, 414 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, 415 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, 416 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, 417 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, 418 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, 419 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, 420 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, 421 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, 422 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, 423 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, 424 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, 425 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, 426 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, 427 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, 428 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, 429 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, 430 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, 431 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, 432 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, 433 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, 434 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, 435 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, 436 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, 437 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, 438 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, 439 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, 440 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) }, 441 #ifdef RTE_LIBRTE_IXGBE_BYPASS 442 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, 443 #endif 444 { .vendor_id = 0, /* sentinel */ }, 445 }; 446 447 /* 448 * The set of PCI devices this driver supports (for 82599 VF) 449 */ 450 static const struct rte_pci_id pci_id_ixgbevf_map[] = { 451 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, 452 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, 453 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, 454 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, 455 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, 456 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, 457 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, 458 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, 459 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, 460 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, 461 { .vendor_id = 0, /* sentinel */ }, 462 }; 463 464 static const struct rte_eth_desc_lim rx_desc_lim = { 465 .nb_max = IXGBE_MAX_RING_DESC, 466 .nb_min = IXGBE_MIN_RING_DESC, 467 .nb_align = IXGBE_RXD_ALIGN, 468 }; 469 470 static const struct rte_eth_desc_lim tx_desc_lim = { 471 .nb_max = IXGBE_MAX_RING_DESC, 472 .nb_min = IXGBE_MIN_RING_DESC, 473 .nb_align = IXGBE_TXD_ALIGN, 474 .nb_seg_max = IXGBE_TX_MAX_SEG, 475 .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, 476 }; 477 478 static const struct eth_dev_ops ixgbe_eth_dev_ops = { 479 .dev_configure = ixgbe_dev_configure, 480 .dev_start = ixgbe_dev_start, 481 .dev_stop = ixgbe_dev_stop, 482 .dev_set_link_up = ixgbe_dev_set_link_up, 483 .dev_set_link_down = ixgbe_dev_set_link_down, 484 .dev_close = ixgbe_dev_close, 485 .dev_reset = ixgbe_dev_reset, 486 .promiscuous_enable = ixgbe_dev_promiscuous_enable, 487 .promiscuous_disable = ixgbe_dev_promiscuous_disable, 488 .allmulticast_enable = ixgbe_dev_allmulticast_enable, 489 .allmulticast_disable = ixgbe_dev_allmulticast_disable, 490 .link_update = ixgbe_dev_link_update, 491 .stats_get = ixgbe_dev_stats_get, 492 .xstats_get = ixgbe_dev_xstats_get, 493 .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, 494 .stats_reset = ixgbe_dev_stats_reset, 495 .xstats_reset = ixgbe_dev_xstats_reset, 496 .xstats_get_names = ixgbe_dev_xstats_get_names, 497 .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, 498 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, 499 .fw_version_get = ixgbe_fw_version_get, 500 .dev_infos_get = ixgbe_dev_info_get, 501 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 502 .mtu_set = ixgbe_dev_mtu_set, 503 .vlan_filter_set = ixgbe_vlan_filter_set, 504 .vlan_tpid_set = ixgbe_vlan_tpid_set, 505 .vlan_offload_set = ixgbe_vlan_offload_set, 506 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, 507 .rx_queue_start = ixgbe_dev_rx_queue_start, 508 .rx_queue_stop = ixgbe_dev_rx_queue_stop, 509 .tx_queue_start = ixgbe_dev_tx_queue_start, 510 .tx_queue_stop = ixgbe_dev_tx_queue_stop, 511 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 512 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, 513 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, 514 .rx_queue_release = ixgbe_dev_rx_queue_release, 515 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 516 .tx_queue_release = ixgbe_dev_tx_queue_release, 517 .dev_led_on = ixgbe_dev_led_on, 518 .dev_led_off = ixgbe_dev_led_off, 519 .flow_ctrl_get = ixgbe_flow_ctrl_get, 520 .flow_ctrl_set = ixgbe_flow_ctrl_set, 521 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, 522 .mac_addr_add = ixgbe_add_rar, 523 .mac_addr_remove = ixgbe_remove_rar, 524 .mac_addr_set = ixgbe_set_default_mac_addr, 525 .uc_hash_table_set = ixgbe_uc_hash_table_set, 526 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, 527 .set_queue_rate_limit = ixgbe_set_queue_rate_limit, 528 .reta_update = ixgbe_dev_rss_reta_update, 529 .reta_query = ixgbe_dev_rss_reta_query, 530 .rss_hash_update = ixgbe_dev_rss_hash_update, 531 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 532 .flow_ops_get = ixgbe_dev_flow_ops_get, 533 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 534 .rxq_info_get = ixgbe_rxq_info_get, 535 .txq_info_get = ixgbe_txq_info_get, 536 .timesync_enable = ixgbe_timesync_enable, 537 .timesync_disable = ixgbe_timesync_disable, 538 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, 539 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, 540 .get_reg = ixgbe_get_regs, 541 .get_eeprom_length = ixgbe_get_eeprom_length, 542 .get_eeprom = ixgbe_get_eeprom, 543 .set_eeprom = ixgbe_set_eeprom, 544 .get_module_info = ixgbe_get_module_info, 545 .get_module_eeprom = ixgbe_get_module_eeprom, 546 .get_dcb_info = ixgbe_dev_get_dcb_info, 547 .timesync_adjust_time = ixgbe_timesync_adjust_time, 548 .timesync_read_time = ixgbe_timesync_read_time, 549 .timesync_write_time = ixgbe_timesync_write_time, 550 .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, 551 .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, 552 .tm_ops_get = ixgbe_tm_ops_get, 553 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 554 .get_monitor_addr = ixgbe_get_monitor_addr, 555 }; 556 557 /* 558 * dev_ops for virtual function, bare necessities for basic vf 559 * operation have been implemented 560 */ 561 static const struct eth_dev_ops ixgbevf_eth_dev_ops = { 562 .dev_configure = ixgbevf_dev_configure, 563 .dev_start = ixgbevf_dev_start, 564 .dev_stop = ixgbevf_dev_stop, 565 .link_update = ixgbevf_dev_link_update, 566 .stats_get = ixgbevf_dev_stats_get, 567 .xstats_get = ixgbevf_dev_xstats_get, 568 .stats_reset = ixgbevf_dev_stats_reset, 569 .xstats_reset = ixgbevf_dev_stats_reset, 570 .xstats_get_names = ixgbevf_dev_xstats_get_names, 571 .dev_close = ixgbevf_dev_close, 572 .dev_reset = ixgbevf_dev_reset, 573 .promiscuous_enable = ixgbevf_dev_promiscuous_enable, 574 .promiscuous_disable = ixgbevf_dev_promiscuous_disable, 575 .allmulticast_enable = ixgbevf_dev_allmulticast_enable, 576 .allmulticast_disable = ixgbevf_dev_allmulticast_disable, 577 .dev_infos_get = ixgbevf_dev_info_get, 578 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 579 .mtu_set = ixgbevf_dev_set_mtu, 580 .vlan_filter_set = ixgbevf_vlan_filter_set, 581 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, 582 .vlan_offload_set = ixgbevf_vlan_offload_set, 583 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 584 .rx_queue_release = ixgbe_dev_rx_queue_release, 585 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 586 .tx_queue_release = ixgbe_dev_tx_queue_release, 587 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, 588 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, 589 .mac_addr_add = ixgbevf_add_mac_addr, 590 .mac_addr_remove = ixgbevf_remove_mac_addr, 591 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 592 .rxq_info_get = ixgbe_rxq_info_get, 593 .txq_info_get = ixgbe_txq_info_get, 594 .mac_addr_set = ixgbevf_set_default_mac_addr, 595 .get_reg = ixgbevf_get_regs, 596 .reta_update = ixgbe_dev_rss_reta_update, 597 .reta_query = ixgbe_dev_rss_reta_query, 598 .rss_hash_update = ixgbe_dev_rss_hash_update, 599 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 600 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 601 .get_monitor_addr = ixgbe_get_monitor_addr, 602 }; 603 604 /* store statistics names and its offset in stats structure */ 605 struct rte_ixgbe_xstats_name_off { 606 char name[RTE_ETH_XSTATS_NAME_SIZE]; 607 unsigned offset; 608 }; 609 610 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { 611 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, 612 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, 613 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, 614 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, 615 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, 616 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, 617 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, 618 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, 619 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, 620 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, 621 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, 622 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, 623 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, 624 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, 625 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 626 prc1023)}, 627 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 628 prc1522)}, 629 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, 630 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, 631 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, 632 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, 633 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, 634 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, 635 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, 636 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, 637 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, 638 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, 639 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, 640 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, 641 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, 642 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, 643 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, 644 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, 645 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 646 ptc1023)}, 647 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 648 ptc1522)}, 649 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, 650 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, 651 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, 652 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, 653 654 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, 655 fdirustat_add)}, 656 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, 657 fdirustat_remove)}, 658 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, 659 fdirfstat_fadd)}, 660 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, 661 fdirfstat_fremove)}, 662 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, 663 fdirmatch)}, 664 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, 665 fdirmiss)}, 666 667 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, 668 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, 669 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, 670 fclast)}, 671 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, 672 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, 673 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, 674 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, 675 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, 676 fcoe_noddp)}, 677 {"rx_fcoe_no_direct_data_placement_ext_buff", 678 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, 679 680 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 681 lxontxc)}, 682 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 683 lxonrxc)}, 684 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 685 lxofftxc)}, 686 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 687 lxoffrxc)}, 688 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, 689 }; 690 691 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ 692 sizeof(rte_ixgbe_stats_strings[0])) 693 694 /* MACsec statistics */ 695 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { 696 {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 697 out_pkts_untagged)}, 698 {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, 699 out_pkts_encrypted)}, 700 {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, 701 out_pkts_protected)}, 702 {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, 703 out_octets_encrypted)}, 704 {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, 705 out_octets_protected)}, 706 {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 707 in_pkts_untagged)}, 708 {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, 709 in_pkts_badtag)}, 710 {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, 711 in_pkts_nosci)}, 712 {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, 713 in_pkts_unknownsci)}, 714 {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, 715 in_octets_decrypted)}, 716 {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, 717 in_octets_validated)}, 718 {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, 719 in_pkts_unchecked)}, 720 {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, 721 in_pkts_delayed)}, 722 {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, 723 in_pkts_late)}, 724 {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, 725 in_pkts_ok)}, 726 {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, 727 in_pkts_invalid)}, 728 {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, 729 in_pkts_notvalid)}, 730 {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, 731 in_pkts_unusedsa)}, 732 {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, 733 in_pkts_notusingsa)}, 734 }; 735 736 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ 737 sizeof(rte_ixgbe_macsec_strings[0])) 738 739 /* Per-queue statistics */ 740 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { 741 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, 742 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, 743 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, 744 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, 745 }; 746 747 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ 748 sizeof(rte_ixgbe_rxq_strings[0])) 749 #define IXGBE_NB_RXQ_PRIO_VALUES 8 750 751 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { 752 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, 753 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, 754 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, 755 pxon2offc)}, 756 }; 757 758 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ 759 sizeof(rte_ixgbe_txq_strings[0])) 760 #define IXGBE_NB_TXQ_PRIO_VALUES 8 761 762 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { 763 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, 764 }; 765 766 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ 767 sizeof(rte_ixgbevf_stats_strings[0])) 768 769 /* 770 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. 771 */ 772 static inline int 773 ixgbe_is_sfp(struct ixgbe_hw *hw) 774 { 775 switch (hw->phy.type) { 776 case ixgbe_phy_sfp_avago: 777 case ixgbe_phy_sfp_ftl: 778 case ixgbe_phy_sfp_intel: 779 case ixgbe_phy_sfp_unknown: 780 case ixgbe_phy_sfp_passive_tyco: 781 case ixgbe_phy_sfp_passive_unknown: 782 return 1; 783 default: 784 return 0; 785 } 786 } 787 788 static inline int32_t 789 ixgbe_pf_reset_hw(struct ixgbe_hw *hw) 790 { 791 uint32_t ctrl_ext; 792 int32_t status; 793 794 status = ixgbe_reset_hw(hw); 795 796 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 797 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 798 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 799 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 800 IXGBE_WRITE_FLUSH(hw); 801 802 if (status == IXGBE_ERR_SFP_NOT_PRESENT) 803 status = IXGBE_SUCCESS; 804 return status; 805 } 806 807 static inline void 808 ixgbe_enable_intr(struct rte_eth_dev *dev) 809 { 810 struct ixgbe_interrupt *intr = 811 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 812 struct ixgbe_hw *hw = 813 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 814 815 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); 816 IXGBE_WRITE_FLUSH(hw); 817 } 818 819 /* 820 * This function is based on ixgbe_disable_intr() in base/ixgbe.h. 821 */ 822 static void 823 ixgbe_disable_intr(struct ixgbe_hw *hw) 824 { 825 PMD_INIT_FUNC_TRACE(); 826 827 if (hw->mac.type == ixgbe_mac_82598EB) { 828 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); 829 } else { 830 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); 831 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); 832 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); 833 } 834 IXGBE_WRITE_FLUSH(hw); 835 } 836 837 /* 838 * This function resets queue statistics mapping registers. 839 * From Niantic datasheet, Initialization of Statistics section: 840 * "...if software requires the queue counters, the RQSMR and TQSM registers 841 * must be re-programmed following a device reset. 842 */ 843 static void 844 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) 845 { 846 uint32_t i; 847 848 for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { 849 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); 850 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); 851 } 852 } 853 854 855 static int 856 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 857 uint16_t queue_id, 858 uint8_t stat_idx, 859 uint8_t is_rx) 860 { 861 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 862 #define NB_QMAP_FIELDS_PER_QSM_REG 4 863 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f 864 865 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 866 struct ixgbe_stat_mapping_registers *stat_mappings = 867 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); 868 uint32_t qsmr_mask = 0; 869 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 870 uint32_t q_map; 871 uint8_t n, offset; 872 873 if ((hw->mac.type != ixgbe_mac_82599EB) && 874 (hw->mac.type != ixgbe_mac_X540) && 875 (hw->mac.type != ixgbe_mac_X550) && 876 (hw->mac.type != ixgbe_mac_X550EM_x) && 877 (hw->mac.type != ixgbe_mac_X550EM_a)) 878 return -ENOSYS; 879 880 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 881 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 882 queue_id, stat_idx); 883 884 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 885 if (n >= IXGBE_NB_STAT_MAPPING_REGS) { 886 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 887 return -EIO; 888 } 889 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 890 891 /* Now clear any previous stat_idx set */ 892 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 893 if (!is_rx) 894 stat_mappings->tqsm[n] &= ~clearing_mask; 895 else 896 stat_mappings->rqsmr[n] &= ~clearing_mask; 897 898 q_map = (uint32_t)stat_idx; 899 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 900 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 901 if (!is_rx) 902 stat_mappings->tqsm[n] |= qsmr_mask; 903 else 904 stat_mappings->rqsmr[n] |= qsmr_mask; 905 906 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 907 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 908 queue_id, stat_idx); 909 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 910 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); 911 912 /* Now write the mapping in the appropriate register */ 913 if (is_rx) { 914 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", 915 stat_mappings->rqsmr[n], n); 916 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); 917 } else { 918 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", 919 stat_mappings->tqsm[n], n); 920 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); 921 } 922 return 0; 923 } 924 925 static void 926 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) 927 { 928 struct ixgbe_stat_mapping_registers *stat_mappings = 929 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); 930 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 931 int i; 932 933 /* write whatever was in stat mapping table to the NIC */ 934 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { 935 /* rx */ 936 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); 937 938 /* tx */ 939 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); 940 } 941 } 942 943 static void 944 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) 945 { 946 uint8_t i; 947 struct ixgbe_dcb_tc_config *tc; 948 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; 949 950 dcb_config->num_tcs.pg_tcs = dcb_max_tc; 951 dcb_config->num_tcs.pfc_tcs = dcb_max_tc; 952 for (i = 0; i < dcb_max_tc; i++) { 953 tc = &dcb_config->tc_config[i]; 954 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; 955 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 956 (uint8_t)(100/dcb_max_tc + (i & 1)); 957 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; 958 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 959 (uint8_t)(100/dcb_max_tc + (i & 1)); 960 tc->pfc = ixgbe_dcb_pfc_disabled; 961 } 962 963 /* Initialize default user to priority mapping, UPx->TC0 */ 964 tc = &dcb_config->tc_config[0]; 965 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 966 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 967 for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { 968 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; 969 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; 970 } 971 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; 972 dcb_config->pfc_mode_enable = false; 973 dcb_config->vt_mode = true; 974 dcb_config->round_robin_enable = false; 975 /* support all DCB capabilities in 82599 */ 976 dcb_config->support.capabilities = 0xFF; 977 978 /*we only support 4 Tcs for X540, X550 */ 979 if (hw->mac.type == ixgbe_mac_X540 || 980 hw->mac.type == ixgbe_mac_X550 || 981 hw->mac.type == ixgbe_mac_X550EM_x || 982 hw->mac.type == ixgbe_mac_X550EM_a) { 983 dcb_config->num_tcs.pg_tcs = 4; 984 dcb_config->num_tcs.pfc_tcs = 4; 985 } 986 } 987 988 /* 989 * Ensure that all locks are released before first NVM or PHY access 990 */ 991 static void 992 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) 993 { 994 uint16_t mask; 995 996 /* 997 * Phy lock should not fail in this early stage. If this is the case, 998 * it is due to an improper exit of the application. 999 * So force the release of the faulty lock. Release of common lock 1000 * is done automatically by swfw_sync function. 1001 */ 1002 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; 1003 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1004 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); 1005 } 1006 ixgbe_release_swfw_semaphore(hw, mask); 1007 1008 /* 1009 * These ones are more tricky since they are common to all ports; but 1010 * swfw_sync retries last long enough (1s) to be almost sure that if 1011 * lock can not be taken it is due to an improper lock of the 1012 * semaphore. 1013 */ 1014 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; 1015 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1016 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1017 } 1018 ixgbe_release_swfw_semaphore(hw, mask); 1019 } 1020 1021 /* 1022 * This function is based on code in ixgbe_attach() in base/ixgbe.c. 1023 * It returns 0 on success. 1024 */ 1025 static int 1026 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 1027 { 1028 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1029 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1030 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1031 struct ixgbe_hw *hw = 1032 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1033 struct ixgbe_vfta *shadow_vfta = 1034 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1035 struct ixgbe_hwstrip *hwstrip = 1036 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1037 struct ixgbe_dcb_config *dcb_config = 1038 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); 1039 struct ixgbe_filter_info *filter_info = 1040 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1041 struct ixgbe_bw_conf *bw_conf = 1042 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); 1043 uint32_t ctrl_ext; 1044 uint16_t csum; 1045 int diag, i, ret; 1046 1047 PMD_INIT_FUNC_TRACE(); 1048 1049 ixgbe_dev_macsec_setting_reset(eth_dev); 1050 1051 eth_dev->dev_ops = &ixgbe_eth_dev_ops; 1052 eth_dev->rx_queue_count = ixgbe_dev_rx_queue_count; 1053 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1054 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1055 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1056 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1057 eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; 1058 1059 /* 1060 * For secondary processes, we don't initialise any further as primary 1061 * has already done this work. Only check we don't need a different 1062 * RX and TX function. 1063 */ 1064 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1065 struct ixgbe_tx_queue *txq; 1066 /* TX queue function in primary, set by last queue initialized 1067 * Tx queue may not initialized by primary process 1068 */ 1069 if (eth_dev->data->tx_queues) { 1070 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; 1071 ixgbe_set_tx_function(eth_dev, txq); 1072 } else { 1073 /* Use default TX function if we get here */ 1074 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 1075 "Using default TX function."); 1076 } 1077 1078 ixgbe_set_rx_function(eth_dev); 1079 1080 return 0; 1081 } 1082 1083 rte_atomic32_clear(&ad->link_thread_running); 1084 rte_eth_copy_pci_info(eth_dev, pci_dev); 1085 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1086 1087 /* Vendor and Device ID need to be set before init of shared code */ 1088 hw->device_id = pci_dev->id.device_id; 1089 hw->vendor_id = pci_dev->id.vendor_id; 1090 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1091 hw->allow_unsupported_sfp = 1; 1092 1093 /* Initialize the shared code (base driver) */ 1094 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1095 diag = ixgbe_bypass_init_shared_code(hw); 1096 #else 1097 diag = ixgbe_init_shared_code(hw); 1098 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1099 1100 if (diag != IXGBE_SUCCESS) { 1101 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); 1102 return -EIO; 1103 } 1104 1105 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { 1106 PMD_INIT_LOG(ERR, "\nERROR: " 1107 "Firmware recovery mode detected. Limiting functionality.\n" 1108 "Refer to the Intel(R) Ethernet Adapters and Devices " 1109 "User Guide for details on firmware recovery mode."); 1110 return -EIO; 1111 } 1112 1113 /* pick up the PCI bus settings for reporting later */ 1114 ixgbe_get_bus_info(hw); 1115 1116 /* Unlock any pending hardware semaphore */ 1117 ixgbe_swfw_lock_reset(hw); 1118 1119 #ifdef RTE_LIB_SECURITY 1120 /* Initialize security_ctx only for primary process*/ 1121 if (ixgbe_ipsec_ctx_create(eth_dev)) 1122 return -ENOMEM; 1123 #endif 1124 1125 /* Initialize DCB configuration*/ 1126 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); 1127 ixgbe_dcb_init(hw, dcb_config); 1128 /* Get Hardware Flow Control setting */ 1129 hw->fc.requested_mode = ixgbe_fc_none; 1130 hw->fc.current_mode = ixgbe_fc_none; 1131 hw->fc.pause_time = IXGBE_FC_PAUSE; 1132 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 1133 hw->fc.low_water[i] = IXGBE_FC_LO; 1134 hw->fc.high_water[i] = IXGBE_FC_HI; 1135 } 1136 hw->fc.send_xon = 1; 1137 1138 /* Make sure we have a good EEPROM before we read from it */ 1139 diag = ixgbe_validate_eeprom_checksum(hw, &csum); 1140 if (diag != IXGBE_SUCCESS) { 1141 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); 1142 return -EIO; 1143 } 1144 1145 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1146 diag = ixgbe_bypass_init_hw(hw); 1147 #else 1148 diag = ixgbe_init_hw(hw); 1149 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1150 1151 /* 1152 * Devices with copper phys will fail to initialise if ixgbe_init_hw() 1153 * is called too soon after the kernel driver unbinding/binding occurs. 1154 * The failure occurs in ixgbe_identify_phy_generic() for all devices, 1155 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is 1156 * also called. See ixgbe_identify_phy_82599(). The reason for the 1157 * failure is not known, and only occuts when virtualisation features 1158 * are disabled in the bios. A delay of 100ms was found to be enough by 1159 * trial-and-error, and is doubled to be safe. 1160 */ 1161 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { 1162 rte_delay_ms(200); 1163 diag = ixgbe_init_hw(hw); 1164 } 1165 1166 if (diag == IXGBE_ERR_SFP_NOT_PRESENT) 1167 diag = IXGBE_SUCCESS; 1168 1169 if (diag == IXGBE_ERR_EEPROM_VERSION) { 1170 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 1171 "LOM. Please be aware there may be issues associated " 1172 "with your hardware."); 1173 PMD_INIT_LOG(ERR, "If you are experiencing problems " 1174 "please contact your Intel or hardware representative " 1175 "who provided you with this hardware."); 1176 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) 1177 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 1178 if (diag) { 1179 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); 1180 return -EIO; 1181 } 1182 1183 /* Reset the hw statistics */ 1184 ixgbe_dev_stats_reset(eth_dev); 1185 1186 /* disable interrupt */ 1187 ixgbe_disable_intr(hw); 1188 1189 /* reset mappings for queue statistics hw counters*/ 1190 ixgbe_reset_qstat_mappings(hw); 1191 1192 /* Allocate memory for storing MAC addresses */ 1193 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * 1194 hw->mac.num_rar_entries, 0); 1195 if (eth_dev->data->mac_addrs == NULL) { 1196 PMD_INIT_LOG(ERR, 1197 "Failed to allocate %u bytes needed to store " 1198 "MAC addresses", 1199 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1200 return -ENOMEM; 1201 } 1202 /* Copy the permanent MAC address */ 1203 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 1204 ð_dev->data->mac_addrs[0]); 1205 1206 /* Allocate memory for storing hash filter MAC addresses */ 1207 eth_dev->data->hash_mac_addrs = rte_zmalloc( 1208 "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); 1209 if (eth_dev->data->hash_mac_addrs == NULL) { 1210 PMD_INIT_LOG(ERR, 1211 "Failed to allocate %d bytes needed to store MAC addresses", 1212 RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); 1213 rte_free(eth_dev->data->mac_addrs); 1214 eth_dev->data->mac_addrs = NULL; 1215 return -ENOMEM; 1216 } 1217 1218 /* initialize the vfta */ 1219 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1220 1221 /* initialize the hw strip bitmap*/ 1222 memset(hwstrip, 0, sizeof(*hwstrip)); 1223 1224 /* initialize PF if max_vfs not zero */ 1225 ret = ixgbe_pf_host_init(eth_dev); 1226 if (ret) { 1227 rte_free(eth_dev->data->mac_addrs); 1228 eth_dev->data->mac_addrs = NULL; 1229 rte_free(eth_dev->data->hash_mac_addrs); 1230 eth_dev->data->hash_mac_addrs = NULL; 1231 return ret; 1232 } 1233 1234 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1235 /* let hardware know driver is loaded */ 1236 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 1237 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1238 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 1239 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 1240 IXGBE_WRITE_FLUSH(hw); 1241 1242 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 1243 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 1244 (int) hw->mac.type, (int) hw->phy.type, 1245 (int) hw->phy.sfp_type); 1246 else 1247 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 1248 (int) hw->mac.type, (int) hw->phy.type); 1249 1250 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1251 eth_dev->data->port_id, pci_dev->id.vendor_id, 1252 pci_dev->id.device_id); 1253 1254 rte_intr_callback_register(intr_handle, 1255 ixgbe_dev_interrupt_handler, eth_dev); 1256 1257 /* enable uio/vfio intr/eventfd mapping */ 1258 rte_intr_enable(intr_handle); 1259 1260 /* enable support intr */ 1261 ixgbe_enable_intr(eth_dev); 1262 1263 /* initialize filter info */ 1264 memset(filter_info, 0, 1265 sizeof(struct ixgbe_filter_info)); 1266 1267 /* initialize 5tuple filter list */ 1268 TAILQ_INIT(&filter_info->fivetuple_list); 1269 1270 /* initialize flow director filter list & hash */ 1271 ixgbe_fdir_filter_init(eth_dev); 1272 1273 /* initialize l2 tunnel filter list & hash */ 1274 ixgbe_l2_tn_filter_init(eth_dev); 1275 1276 /* initialize flow filter lists */ 1277 ixgbe_filterlist_init(); 1278 1279 /* initialize bandwidth configuration info */ 1280 memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); 1281 1282 /* initialize Traffic Manager configuration */ 1283 ixgbe_tm_conf_init(eth_dev); 1284 1285 return 0; 1286 } 1287 1288 static int 1289 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1290 { 1291 PMD_INIT_FUNC_TRACE(); 1292 1293 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1294 return 0; 1295 1296 ixgbe_dev_close(eth_dev); 1297 1298 return 0; 1299 } 1300 1301 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 1302 { 1303 struct ixgbe_filter_info *filter_info = 1304 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1305 struct ixgbe_5tuple_filter *p_5tuple; 1306 1307 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 1308 TAILQ_REMOVE(&filter_info->fivetuple_list, 1309 p_5tuple, 1310 entries); 1311 rte_free(p_5tuple); 1312 } 1313 memset(filter_info->fivetuple_mask, 0, 1314 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 1315 1316 return 0; 1317 } 1318 1319 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) 1320 { 1321 struct ixgbe_hw_fdir_info *fdir_info = 1322 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1323 struct ixgbe_fdir_filter *fdir_filter; 1324 1325 if (fdir_info->hash_map) 1326 rte_free(fdir_info->hash_map); 1327 if (fdir_info->hash_handle) 1328 rte_hash_free(fdir_info->hash_handle); 1329 1330 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { 1331 TAILQ_REMOVE(&fdir_info->fdir_list, 1332 fdir_filter, 1333 entries); 1334 rte_free(fdir_filter); 1335 } 1336 1337 return 0; 1338 } 1339 1340 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) 1341 { 1342 struct ixgbe_l2_tn_info *l2_tn_info = 1343 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1344 struct ixgbe_l2_tn_filter *l2_tn_filter; 1345 1346 if (l2_tn_info->hash_map) 1347 rte_free(l2_tn_info->hash_map); 1348 if (l2_tn_info->hash_handle) 1349 rte_hash_free(l2_tn_info->hash_handle); 1350 1351 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 1352 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, 1353 l2_tn_filter, 1354 entries); 1355 rte_free(l2_tn_filter); 1356 } 1357 1358 return 0; 1359 } 1360 1361 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) 1362 { 1363 struct ixgbe_hw_fdir_info *fdir_info = 1364 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1365 char fdir_hash_name[RTE_HASH_NAMESIZE]; 1366 struct rte_hash_parameters fdir_hash_params = { 1367 .name = fdir_hash_name, 1368 .entries = IXGBE_MAX_FDIR_FILTER_NUM, 1369 .key_len = sizeof(union ixgbe_atr_input), 1370 .hash_func = rte_hash_crc, 1371 .hash_func_init_val = 0, 1372 .socket_id = rte_socket_id(), 1373 }; 1374 1375 TAILQ_INIT(&fdir_info->fdir_list); 1376 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, 1377 "fdir_%s", eth_dev->device->name); 1378 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); 1379 if (!fdir_info->hash_handle) { 1380 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); 1381 return -EINVAL; 1382 } 1383 fdir_info->hash_map = rte_zmalloc("ixgbe", 1384 sizeof(struct ixgbe_fdir_filter *) * 1385 IXGBE_MAX_FDIR_FILTER_NUM, 1386 0); 1387 if (!fdir_info->hash_map) { 1388 PMD_INIT_LOG(ERR, 1389 "Failed to allocate memory for fdir hash map!"); 1390 rte_hash_free(fdir_info->hash_handle); 1391 return -ENOMEM; 1392 } 1393 fdir_info->mask_added = FALSE; 1394 1395 return 0; 1396 } 1397 1398 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) 1399 { 1400 struct ixgbe_l2_tn_info *l2_tn_info = 1401 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1402 char l2_tn_hash_name[RTE_HASH_NAMESIZE]; 1403 struct rte_hash_parameters l2_tn_hash_params = { 1404 .name = l2_tn_hash_name, 1405 .entries = IXGBE_MAX_L2_TN_FILTER_NUM, 1406 .key_len = sizeof(struct ixgbe_l2_tn_key), 1407 .hash_func = rte_hash_crc, 1408 .hash_func_init_val = 0, 1409 .socket_id = rte_socket_id(), 1410 }; 1411 1412 TAILQ_INIT(&l2_tn_info->l2_tn_list); 1413 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, 1414 "l2_tn_%s", eth_dev->device->name); 1415 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); 1416 if (!l2_tn_info->hash_handle) { 1417 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); 1418 return -EINVAL; 1419 } 1420 l2_tn_info->hash_map = rte_zmalloc("ixgbe", 1421 sizeof(struct ixgbe_l2_tn_filter *) * 1422 IXGBE_MAX_L2_TN_FILTER_NUM, 1423 0); 1424 if (!l2_tn_info->hash_map) { 1425 PMD_INIT_LOG(ERR, 1426 "Failed to allocate memory for L2 TN hash map!"); 1427 rte_hash_free(l2_tn_info->hash_handle); 1428 return -ENOMEM; 1429 } 1430 l2_tn_info->e_tag_en = FALSE; 1431 l2_tn_info->e_tag_fwd_en = FALSE; 1432 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; 1433 1434 return 0; 1435 } 1436 /* 1437 * Negotiate mailbox API version with the PF. 1438 * After reset API version is always set to the basic one (ixgbe_mbox_api_10). 1439 * Then we try to negotiate starting with the most recent one. 1440 * If all negotiation attempts fail, then we will proceed with 1441 * the default one (ixgbe_mbox_api_10). 1442 */ 1443 static void 1444 ixgbevf_negotiate_api(struct ixgbe_hw *hw) 1445 { 1446 int32_t i; 1447 1448 /* start with highest supported, proceed down */ 1449 static const enum ixgbe_pfvf_api_rev sup_ver[] = { 1450 ixgbe_mbox_api_13, 1451 ixgbe_mbox_api_12, 1452 ixgbe_mbox_api_11, 1453 ixgbe_mbox_api_10, 1454 }; 1455 1456 for (i = 0; 1457 i != RTE_DIM(sup_ver) && 1458 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; 1459 i++) 1460 ; 1461 } 1462 1463 static void 1464 generate_random_mac_addr(struct rte_ether_addr *mac_addr) 1465 { 1466 uint64_t random; 1467 1468 /* Set Organizationally Unique Identifier (OUI) prefix. */ 1469 mac_addr->addr_bytes[0] = 0x00; 1470 mac_addr->addr_bytes[1] = 0x09; 1471 mac_addr->addr_bytes[2] = 0xC0; 1472 /* Force indication of locally assigned MAC address. */ 1473 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 1474 /* Generate the last 3 bytes of the MAC address with a random number. */ 1475 random = rte_rand(); 1476 memcpy(&mac_addr->addr_bytes[3], &random, 3); 1477 } 1478 1479 static int 1480 devarg_handle_int(__rte_unused const char *key, const char *value, 1481 void *extra_args) 1482 { 1483 uint16_t *n = extra_args; 1484 1485 if (value == NULL || extra_args == NULL) 1486 return -EINVAL; 1487 1488 *n = (uint16_t)strtoul(value, NULL, 0); 1489 if (*n == USHRT_MAX && errno == ERANGE) 1490 return -1; 1491 1492 return 0; 1493 } 1494 1495 static void 1496 ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, 1497 struct rte_devargs *devargs) 1498 { 1499 struct rte_kvargs *kvlist; 1500 uint16_t pflink_fullchk; 1501 1502 if (devargs == NULL) 1503 return; 1504 1505 kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); 1506 if (kvlist == NULL) 1507 return; 1508 1509 if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && 1510 rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, 1511 devarg_handle_int, &pflink_fullchk) == 0 && 1512 pflink_fullchk == 1) 1513 adapter->pflink_fullchk = 1; 1514 1515 rte_kvargs_free(kvlist); 1516 } 1517 1518 /* 1519 * Virtual Function device init 1520 */ 1521 static int 1522 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) 1523 { 1524 int diag; 1525 uint32_t tc, tcs; 1526 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1527 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1528 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1529 struct ixgbe_hw *hw = 1530 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1531 struct ixgbe_vfta *shadow_vfta = 1532 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1533 struct ixgbe_hwstrip *hwstrip = 1534 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1535 struct rte_ether_addr *perm_addr = 1536 (struct rte_ether_addr *)hw->mac.perm_addr; 1537 1538 PMD_INIT_FUNC_TRACE(); 1539 1540 eth_dev->dev_ops = &ixgbevf_eth_dev_ops; 1541 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1542 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1543 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1544 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1545 1546 /* for secondary processes, we don't initialise any further as primary 1547 * has already done this work. Only check we don't need a different 1548 * RX function 1549 */ 1550 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1551 struct ixgbe_tx_queue *txq; 1552 /* TX queue function in primary, set by last queue initialized 1553 * Tx queue may not initialized by primary process 1554 */ 1555 if (eth_dev->data->tx_queues) { 1556 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; 1557 ixgbe_set_tx_function(eth_dev, txq); 1558 } else { 1559 /* Use default TX function if we get here */ 1560 PMD_INIT_LOG(NOTICE, 1561 "No TX queues configured yet. Using default TX function."); 1562 } 1563 1564 ixgbe_set_rx_function(eth_dev); 1565 1566 return 0; 1567 } 1568 1569 rte_atomic32_clear(&ad->link_thread_running); 1570 ixgbevf_parse_devargs(eth_dev->data->dev_private, 1571 pci_dev->device.devargs); 1572 1573 rte_eth_copy_pci_info(eth_dev, pci_dev); 1574 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1575 1576 hw->device_id = pci_dev->id.device_id; 1577 hw->vendor_id = pci_dev->id.vendor_id; 1578 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1579 1580 /* initialize the vfta */ 1581 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1582 1583 /* initialize the hw strip bitmap*/ 1584 memset(hwstrip, 0, sizeof(*hwstrip)); 1585 1586 /* Initialize the shared code (base driver) */ 1587 diag = ixgbe_init_shared_code(hw); 1588 if (diag != IXGBE_SUCCESS) { 1589 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); 1590 return -EIO; 1591 } 1592 1593 /* init_mailbox_params */ 1594 hw->mbx.ops.init_params(hw); 1595 1596 /* Reset the hw statistics */ 1597 ixgbevf_dev_stats_reset(eth_dev); 1598 1599 /* Disable the interrupts for VF */ 1600 ixgbevf_intr_disable(eth_dev); 1601 1602 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ 1603 diag = hw->mac.ops.reset_hw(hw); 1604 1605 /* 1606 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when 1607 * the underlying PF driver has not assigned a MAC address to the VF. 1608 * In this case, assign a random MAC address. 1609 */ 1610 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { 1611 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1612 /* 1613 * This error code will be propagated to the app by 1614 * rte_eth_dev_reset, so use a public error code rather than 1615 * the internal-only IXGBE_ERR_RESET_FAILED 1616 */ 1617 return -EAGAIN; 1618 } 1619 1620 /* negotiate mailbox API version to use with the PF. */ 1621 ixgbevf_negotiate_api(hw); 1622 1623 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 1624 ixgbevf_get_queues(hw, &tcs, &tc); 1625 1626 /* Allocate memory for storing MAC addresses */ 1627 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * 1628 hw->mac.num_rar_entries, 0); 1629 if (eth_dev->data->mac_addrs == NULL) { 1630 PMD_INIT_LOG(ERR, 1631 "Failed to allocate %u bytes needed to store " 1632 "MAC addresses", 1633 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1634 return -ENOMEM; 1635 } 1636 1637 /* Generate a random MAC address, if none was assigned by PF. */ 1638 if (rte_is_zero_ether_addr(perm_addr)) { 1639 generate_random_mac_addr(perm_addr); 1640 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 1641 if (diag) { 1642 rte_free(eth_dev->data->mac_addrs); 1643 eth_dev->data->mac_addrs = NULL; 1644 return diag; 1645 } 1646 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1647 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1648 RTE_ETHER_ADDR_PRT_FMT, 1649 RTE_ETHER_ADDR_BYTES(perm_addr)); 1650 } 1651 1652 /* Copy the permanent MAC address */ 1653 rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 1654 1655 /* reset the hardware with the new settings */ 1656 diag = hw->mac.ops.start_hw(hw); 1657 switch (diag) { 1658 case 0: 1659 break; 1660 1661 default: 1662 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1663 rte_free(eth_dev->data->mac_addrs); 1664 eth_dev->data->mac_addrs = NULL; 1665 return -EIO; 1666 } 1667 1668 rte_intr_callback_register(intr_handle, 1669 ixgbevf_dev_interrupt_handler, eth_dev); 1670 rte_intr_enable(intr_handle); 1671 ixgbevf_intr_enable(eth_dev); 1672 1673 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 1674 eth_dev->data->port_id, pci_dev->id.vendor_id, 1675 pci_dev->id.device_id, "ixgbe_mac_82599_vf"); 1676 1677 return 0; 1678 } 1679 1680 /* Virtual Function device uninit */ 1681 1682 static int 1683 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) 1684 { 1685 PMD_INIT_FUNC_TRACE(); 1686 1687 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1688 return 0; 1689 1690 ixgbevf_dev_close(eth_dev); 1691 1692 return 0; 1693 } 1694 1695 static int 1696 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1697 struct rte_pci_device *pci_dev) 1698 { 1699 char name[RTE_ETH_NAME_MAX_LEN]; 1700 struct rte_eth_dev *pf_ethdev; 1701 struct rte_eth_devargs eth_da; 1702 int i, retval; 1703 1704 if (pci_dev->device.devargs) { 1705 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, 1706 ð_da); 1707 if (retval) 1708 return retval; 1709 } else 1710 memset(ð_da, 0, sizeof(eth_da)); 1711 1712 if (eth_da.nb_representor_ports > 0 && 1713 eth_da.type != RTE_ETH_REPRESENTOR_VF) { 1714 PMD_DRV_LOG(ERR, "unsupported representor type: %s\n", 1715 pci_dev->device.devargs->args); 1716 return -ENOTSUP; 1717 } 1718 1719 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 1720 sizeof(struct ixgbe_adapter), 1721 eth_dev_pci_specific_init, pci_dev, 1722 eth_ixgbe_dev_init, NULL); 1723 1724 if (retval || eth_da.nb_representor_ports < 1) 1725 return retval; 1726 1727 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1728 if (pf_ethdev == NULL) 1729 return -ENODEV; 1730 1731 /* probe VF representor ports */ 1732 for (i = 0; i < eth_da.nb_representor_ports; i++) { 1733 struct ixgbe_vf_info *vfinfo; 1734 struct ixgbe_vf_representor representor; 1735 1736 vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( 1737 pf_ethdev->data->dev_private); 1738 if (vfinfo == NULL) { 1739 PMD_DRV_LOG(ERR, 1740 "no virtual functions supported by PF"); 1741 break; 1742 } 1743 1744 representor.vf_id = eth_da.representor_ports[i]; 1745 representor.switch_domain_id = vfinfo->switch_domain_id; 1746 representor.pf_ethdev = pf_ethdev; 1747 1748 /* representor port net_bdf_port */ 1749 snprintf(name, sizeof(name), "net_%s_representor_%d", 1750 pci_dev->device.name, 1751 eth_da.representor_ports[i]); 1752 1753 retval = rte_eth_dev_create(&pci_dev->device, name, 1754 sizeof(struct ixgbe_vf_representor), NULL, NULL, 1755 ixgbe_vf_representor_init, &representor); 1756 1757 if (retval) 1758 PMD_DRV_LOG(ERR, "failed to create ixgbe vf " 1759 "representor %s.", name); 1760 } 1761 1762 return 0; 1763 } 1764 1765 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) 1766 { 1767 struct rte_eth_dev *ethdev; 1768 1769 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1770 if (!ethdev) 1771 return 0; 1772 1773 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 1774 return rte_eth_dev_pci_generic_remove(pci_dev, 1775 ixgbe_vf_representor_uninit); 1776 else 1777 return rte_eth_dev_pci_generic_remove(pci_dev, 1778 eth_ixgbe_dev_uninit); 1779 } 1780 1781 static struct rte_pci_driver rte_ixgbe_pmd = { 1782 .id_table = pci_id_ixgbe_map, 1783 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1784 .probe = eth_ixgbe_pci_probe, 1785 .remove = eth_ixgbe_pci_remove, 1786 }; 1787 1788 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1789 struct rte_pci_device *pci_dev) 1790 { 1791 return rte_eth_dev_pci_generic_probe(pci_dev, 1792 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init); 1793 } 1794 1795 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) 1796 { 1797 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit); 1798 } 1799 1800 /* 1801 * virtual function driver struct 1802 */ 1803 static struct rte_pci_driver rte_ixgbevf_pmd = { 1804 .id_table = pci_id_ixgbevf_map, 1805 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1806 .probe = eth_ixgbevf_pci_probe, 1807 .remove = eth_ixgbevf_pci_remove, 1808 }; 1809 1810 static int 1811 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1812 { 1813 struct ixgbe_hw *hw = 1814 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1815 struct ixgbe_vfta *shadow_vfta = 1816 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1817 uint32_t vfta; 1818 uint32_t vid_idx; 1819 uint32_t vid_bit; 1820 1821 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 1822 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 1823 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); 1824 if (on) 1825 vfta |= vid_bit; 1826 else 1827 vfta &= ~vid_bit; 1828 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); 1829 1830 /* update local VFTA copy */ 1831 shadow_vfta->vfta[vid_idx] = vfta; 1832 1833 return 0; 1834 } 1835 1836 static void 1837 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 1838 { 1839 if (on) 1840 ixgbe_vlan_hw_strip_enable(dev, queue); 1841 else 1842 ixgbe_vlan_hw_strip_disable(dev, queue); 1843 } 1844 1845 static int 1846 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1847 enum rte_vlan_type vlan_type, 1848 uint16_t tpid) 1849 { 1850 struct ixgbe_hw *hw = 1851 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1852 int ret = 0; 1853 uint32_t reg; 1854 uint32_t qinq; 1855 1856 qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1857 qinq &= IXGBE_DMATXCTL_GDV; 1858 1859 switch (vlan_type) { 1860 case ETH_VLAN_TYPE_INNER: 1861 if (qinq) { 1862 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1863 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1864 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1865 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1866 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1867 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1868 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1869 } else { 1870 ret = -ENOTSUP; 1871 PMD_DRV_LOG(ERR, "Inner type is not supported" 1872 " by single VLAN"); 1873 } 1874 break; 1875 case ETH_VLAN_TYPE_OUTER: 1876 if (qinq) { 1877 /* Only the high 16-bits is valid */ 1878 IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << 1879 IXGBE_EXVET_VET_EXT_SHIFT); 1880 } else { 1881 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1882 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1883 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1884 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1885 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1886 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1887 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1888 } 1889 1890 break; 1891 default: 1892 ret = -EINVAL; 1893 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1894 break; 1895 } 1896 1897 return ret; 1898 } 1899 1900 void 1901 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1902 { 1903 struct ixgbe_hw *hw = 1904 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1905 uint32_t vlnctrl; 1906 1907 PMD_INIT_FUNC_TRACE(); 1908 1909 /* Filter Table Disable */ 1910 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1911 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1912 1913 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1914 } 1915 1916 void 1917 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1918 { 1919 struct ixgbe_hw *hw = 1920 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1921 struct ixgbe_vfta *shadow_vfta = 1922 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1923 uint32_t vlnctrl; 1924 uint16_t i; 1925 1926 PMD_INIT_FUNC_TRACE(); 1927 1928 /* Filter Table Enable */ 1929 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1930 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 1931 vlnctrl |= IXGBE_VLNCTRL_VFE; 1932 1933 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1934 1935 /* write whatever is in local vfta copy */ 1936 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1937 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); 1938 } 1939 1940 static void 1941 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 1942 { 1943 struct ixgbe_hwstrip *hwstrip = 1944 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); 1945 struct ixgbe_rx_queue *rxq; 1946 1947 if (queue >= IXGBE_MAX_RX_QUEUE_NUM) 1948 return; 1949 1950 if (on) 1951 IXGBE_SET_HWSTRIP(hwstrip, queue); 1952 else 1953 IXGBE_CLEAR_HWSTRIP(hwstrip, queue); 1954 1955 if (queue >= dev->data->nb_rx_queues) 1956 return; 1957 1958 rxq = dev->data->rx_queues[queue]; 1959 1960 if (on) { 1961 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; 1962 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 1963 } else { 1964 rxq->vlan_flags = PKT_RX_VLAN; 1965 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 1966 } 1967 } 1968 1969 static void 1970 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 1971 { 1972 struct ixgbe_hw *hw = 1973 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1974 uint32_t ctrl; 1975 1976 PMD_INIT_FUNC_TRACE(); 1977 1978 if (hw->mac.type == ixgbe_mac_82598EB) { 1979 /* No queue level support */ 1980 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 1981 return; 1982 } 1983 1984 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1985 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 1986 ctrl &= ~IXGBE_RXDCTL_VME; 1987 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 1988 1989 /* record those setting for HW strip per queue */ 1990 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 1991 } 1992 1993 static void 1994 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 1995 { 1996 struct ixgbe_hw *hw = 1997 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1998 uint32_t ctrl; 1999 2000 PMD_INIT_FUNC_TRACE(); 2001 2002 if (hw->mac.type == ixgbe_mac_82598EB) { 2003 /* No queue level supported */ 2004 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2005 return; 2006 } 2007 2008 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2009 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2010 ctrl |= IXGBE_RXDCTL_VME; 2011 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2012 2013 /* record those setting for HW strip per queue */ 2014 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 2015 } 2016 2017 static void 2018 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2019 { 2020 struct ixgbe_hw *hw = 2021 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2022 uint32_t ctrl; 2023 2024 PMD_INIT_FUNC_TRACE(); 2025 2026 /* DMATXCTRL: Geric Double VLAN Disable */ 2027 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2028 ctrl &= ~IXGBE_DMATXCTL_GDV; 2029 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2030 2031 /* CTRL_EXT: Global Double VLAN Disable */ 2032 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2033 ctrl &= ~IXGBE_EXTENDED_VLAN; 2034 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2035 2036 } 2037 2038 static void 2039 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2040 { 2041 struct ixgbe_hw *hw = 2042 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2043 uint32_t ctrl; 2044 2045 PMD_INIT_FUNC_TRACE(); 2046 2047 /* DMATXCTRL: Geric Double VLAN Enable */ 2048 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2049 ctrl |= IXGBE_DMATXCTL_GDV; 2050 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2051 2052 /* CTRL_EXT: Global Double VLAN Enable */ 2053 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2054 ctrl |= IXGBE_EXTENDED_VLAN; 2055 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2056 2057 /* Clear pooling mode of PFVTCTL. It's required by X550. */ 2058 if (hw->mac.type == ixgbe_mac_X550 || 2059 hw->mac.type == ixgbe_mac_X550EM_x || 2060 hw->mac.type == ixgbe_mac_X550EM_a) { 2061 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 2062 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 2063 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 2064 } 2065 2066 /* 2067 * VET EXT field in the EXVET register = 0x8100 by default 2068 * So no need to change. Same to VT field of DMATXCTL register 2069 */ 2070 } 2071 2072 void 2073 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 2074 { 2075 struct ixgbe_hw *hw = 2076 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2077 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 2078 uint32_t ctrl; 2079 uint16_t i; 2080 struct ixgbe_rx_queue *rxq; 2081 bool on; 2082 2083 PMD_INIT_FUNC_TRACE(); 2084 2085 if (hw->mac.type == ixgbe_mac_82598EB) { 2086 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 2087 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2088 ctrl |= IXGBE_VLNCTRL_VME; 2089 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2090 } else { 2091 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2092 ctrl &= ~IXGBE_VLNCTRL_VME; 2093 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2094 } 2095 } else { 2096 /* 2097 * Other 10G NIC, the VLAN strip can be setup 2098 * per queue in RXDCTL 2099 */ 2100 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2101 rxq = dev->data->rx_queues[i]; 2102 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 2103 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 2104 ctrl |= IXGBE_RXDCTL_VME; 2105 on = TRUE; 2106 } else { 2107 ctrl &= ~IXGBE_RXDCTL_VME; 2108 on = FALSE; 2109 } 2110 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 2111 2112 /* record those setting for HW strip per queue */ 2113 ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); 2114 } 2115 } 2116 } 2117 2118 static void 2119 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 2120 { 2121 uint16_t i; 2122 struct rte_eth_rxmode *rxmode; 2123 struct ixgbe_rx_queue *rxq; 2124 2125 if (mask & ETH_VLAN_STRIP_MASK) { 2126 rxmode = &dev->data->dev_conf.rxmode; 2127 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2128 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2129 rxq = dev->data->rx_queues[i]; 2130 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2131 } 2132 else 2133 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2134 rxq = dev->data->rx_queues[i]; 2135 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2136 } 2137 } 2138 } 2139 2140 static int 2141 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 2142 { 2143 struct rte_eth_rxmode *rxmode; 2144 rxmode = &dev->data->dev_conf.rxmode; 2145 2146 if (mask & ETH_VLAN_STRIP_MASK) { 2147 ixgbe_vlan_hw_strip_config(dev); 2148 } 2149 2150 if (mask & ETH_VLAN_FILTER_MASK) { 2151 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2152 ixgbe_vlan_hw_filter_enable(dev); 2153 else 2154 ixgbe_vlan_hw_filter_disable(dev); 2155 } 2156 2157 if (mask & ETH_VLAN_EXTEND_MASK) { 2158 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2159 ixgbe_vlan_hw_extend_enable(dev); 2160 else 2161 ixgbe_vlan_hw_extend_disable(dev); 2162 } 2163 2164 return 0; 2165 } 2166 2167 static int 2168 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2169 { 2170 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 2171 2172 ixgbe_vlan_offload_config(dev, mask); 2173 2174 return 0; 2175 } 2176 2177 static void 2178 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2179 { 2180 struct ixgbe_hw *hw = 2181 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2182 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ 2183 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2184 2185 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ 2186 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); 2187 } 2188 2189 static int 2190 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 2191 { 2192 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2193 2194 switch (nb_rx_q) { 2195 case 1: 2196 case 2: 2197 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; 2198 break; 2199 case 4: 2200 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; 2201 break; 2202 default: 2203 return -EINVAL; 2204 } 2205 2206 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 2207 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2208 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 2209 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2210 return 0; 2211 } 2212 2213 static int 2214 ixgbe_check_mq_mode(struct rte_eth_dev *dev) 2215 { 2216 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 2217 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2218 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2219 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2220 2221 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 2222 /* check multi-queue mode */ 2223 switch (dev_conf->rxmode.mq_mode) { 2224 case ETH_MQ_RX_VMDQ_DCB: 2225 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); 2226 break; 2227 case ETH_MQ_RX_VMDQ_DCB_RSS: 2228 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 2229 PMD_INIT_LOG(ERR, "SRIOV active," 2230 " unsupported mq_mode rx %d.", 2231 dev_conf->rxmode.mq_mode); 2232 return -EINVAL; 2233 case ETH_MQ_RX_RSS: 2234 case ETH_MQ_RX_VMDQ_RSS: 2235 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; 2236 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 2237 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 2238 PMD_INIT_LOG(ERR, "SRIOV is active," 2239 " invalid queue number" 2240 " for VMDQ RSS, allowed" 2241 " value are 1, 2 or 4."); 2242 return -EINVAL; 2243 } 2244 break; 2245 case ETH_MQ_RX_VMDQ_ONLY: 2246 case ETH_MQ_RX_NONE: 2247 /* if nothing mq mode configure, use default scheme */ 2248 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; 2249 break; 2250 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ 2251 /* SRIOV only works in VMDq enable mode */ 2252 PMD_INIT_LOG(ERR, "SRIOV is active," 2253 " wrong mq_mode rx %d.", 2254 dev_conf->rxmode.mq_mode); 2255 return -EINVAL; 2256 } 2257 2258 switch (dev_conf->txmode.mq_mode) { 2259 case ETH_MQ_TX_VMDQ_DCB: 2260 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); 2261 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2262 break; 2263 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ 2264 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; 2265 break; 2266 } 2267 2268 /* check valid queue number */ 2269 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 2270 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 2271 PMD_INIT_LOG(ERR, "SRIOV is active," 2272 " nb_rx_q=%d nb_tx_q=%d queue number" 2273 " must be less than or equal to %d.", 2274 nb_rx_q, nb_tx_q, 2275 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 2276 return -EINVAL; 2277 } 2278 } else { 2279 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { 2280 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 2281 " not supported."); 2282 return -EINVAL; 2283 } 2284 /* check configuration for vmdb+dcb mode */ 2285 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { 2286 const struct rte_eth_vmdq_dcb_conf *conf; 2287 2288 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2289 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 2290 IXGBE_VMDQ_DCB_NB_QUEUES); 2291 return -EINVAL; 2292 } 2293 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 2294 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2295 conf->nb_queue_pools == ETH_32_POOLS)) { 2296 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2297 " nb_queue_pools must be %d or %d.", 2298 ETH_16_POOLS, ETH_32_POOLS); 2299 return -EINVAL; 2300 } 2301 } 2302 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { 2303 const struct rte_eth_vmdq_dcb_tx_conf *conf; 2304 2305 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2306 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 2307 IXGBE_VMDQ_DCB_NB_QUEUES); 2308 return -EINVAL; 2309 } 2310 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2311 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2312 conf->nb_queue_pools == ETH_32_POOLS)) { 2313 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2314 " nb_queue_pools != %d and" 2315 " nb_queue_pools != %d.", 2316 ETH_16_POOLS, ETH_32_POOLS); 2317 return -EINVAL; 2318 } 2319 } 2320 2321 /* For DCB mode check our configuration before we go further */ 2322 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { 2323 const struct rte_eth_dcb_rx_conf *conf; 2324 2325 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 2326 if (!(conf->nb_tcs == ETH_4_TCS || 2327 conf->nb_tcs == ETH_8_TCS)) { 2328 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2329 " and nb_tcs != %d.", 2330 ETH_4_TCS, ETH_8_TCS); 2331 return -EINVAL; 2332 } 2333 } 2334 2335 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { 2336 const struct rte_eth_dcb_tx_conf *conf; 2337 2338 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 2339 if (!(conf->nb_tcs == ETH_4_TCS || 2340 conf->nb_tcs == ETH_8_TCS)) { 2341 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2342 " and nb_tcs != %d.", 2343 ETH_4_TCS, ETH_8_TCS); 2344 return -EINVAL; 2345 } 2346 } 2347 2348 /* 2349 * When DCB/VT is off, maximum number of queues changes, 2350 * except for 82598EB, which remains constant. 2351 */ 2352 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 2353 hw->mac.type != ixgbe_mac_82598EB) { 2354 if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { 2355 PMD_INIT_LOG(ERR, 2356 "Neither VT nor DCB are enabled, " 2357 "nb_tx_q > %d.", 2358 IXGBE_NONE_MODE_TX_NB_QUEUES); 2359 return -EINVAL; 2360 } 2361 } 2362 } 2363 return 0; 2364 } 2365 2366 static int 2367 ixgbe_dev_configure(struct rte_eth_dev *dev) 2368 { 2369 struct ixgbe_interrupt *intr = 2370 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2371 struct ixgbe_adapter *adapter = dev->data->dev_private; 2372 int ret; 2373 2374 PMD_INIT_FUNC_TRACE(); 2375 2376 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 2377 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 2378 2379 /* multipe queue mode checking */ 2380 ret = ixgbe_check_mq_mode(dev); 2381 if (ret != 0) { 2382 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", 2383 ret); 2384 return ret; 2385 } 2386 2387 /* set flag to update link status after init */ 2388 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2389 2390 /* 2391 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 2392 * allocation or vector Rx preconditions we will reset it. 2393 */ 2394 adapter->rx_bulk_alloc_allowed = true; 2395 adapter->rx_vec_allowed = true; 2396 2397 return 0; 2398 } 2399 2400 static void 2401 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 2402 { 2403 struct ixgbe_hw *hw = 2404 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2405 struct ixgbe_interrupt *intr = 2406 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2407 uint32_t gpie; 2408 2409 /* only set up it on X550EM_X */ 2410 if (hw->mac.type == ixgbe_mac_X550EM_x) { 2411 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2412 gpie |= IXGBE_SDP0_GPIEN_X550EM_x; 2413 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2414 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 2415 intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; 2416 } 2417 } 2418 2419 int 2420 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 2421 uint16_t tx_rate, uint64_t q_msk) 2422 { 2423 struct ixgbe_hw *hw; 2424 struct ixgbe_vf_info *vfinfo; 2425 struct rte_eth_link link; 2426 uint8_t nb_q_per_pool; 2427 uint32_t queue_stride; 2428 uint32_t queue_idx, idx = 0, vf_idx; 2429 uint32_t queue_end; 2430 uint16_t total_rate = 0; 2431 struct rte_pci_device *pci_dev; 2432 int ret; 2433 2434 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2435 ret = rte_eth_link_get_nowait(dev->data->port_id, &link); 2436 if (ret < 0) 2437 return ret; 2438 2439 if (vf >= pci_dev->max_vfs) 2440 return -EINVAL; 2441 2442 if (tx_rate > link.link_speed) 2443 return -EINVAL; 2444 2445 if (q_msk == 0) 2446 return 0; 2447 2448 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2449 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); 2450 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2451 queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2452 queue_idx = vf * queue_stride; 2453 queue_end = queue_idx + nb_q_per_pool - 1; 2454 if (queue_end >= hw->mac.max_tx_queues) 2455 return -EINVAL; 2456 2457 if (vfinfo) { 2458 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { 2459 if (vf_idx == vf) 2460 continue; 2461 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 2462 idx++) 2463 total_rate += vfinfo[vf_idx].tx_rate[idx]; 2464 } 2465 } else { 2466 return -EINVAL; 2467 } 2468 2469 /* Store tx_rate for this vf. */ 2470 for (idx = 0; idx < nb_q_per_pool; idx++) { 2471 if (((uint64_t)0x1 << idx) & q_msk) { 2472 if (vfinfo[vf].tx_rate[idx] != tx_rate) 2473 vfinfo[vf].tx_rate[idx] = tx_rate; 2474 total_rate += tx_rate; 2475 } 2476 } 2477 2478 if (total_rate > dev->data->dev_link.link_speed) { 2479 /* Reset stored TX rate of the VF if it causes exceed 2480 * link speed. 2481 */ 2482 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 2483 return -EINVAL; 2484 } 2485 2486 /* Set RTTBCNRC of each queue/pool for vf X */ 2487 for (; queue_idx <= queue_end; queue_idx++) { 2488 if (0x1 & q_msk) 2489 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 2490 q_msk = q_msk >> 1; 2491 } 2492 2493 return 0; 2494 } 2495 2496 static int 2497 ixgbe_flow_ctrl_enable(struct rte_eth_dev *dev, struct ixgbe_hw *hw) 2498 { 2499 struct ixgbe_adapter *adapter = dev->data->dev_private; 2500 int err; 2501 uint32_t mflcn; 2502 2503 ixgbe_setup_fc(hw); 2504 2505 err = ixgbe_fc_enable(hw); 2506 2507 /* Not negotiated is not an error case */ 2508 if (err == IXGBE_SUCCESS || err == IXGBE_ERR_FC_NOT_NEGOTIATED) { 2509 /* 2510 *check if we want to forward MAC frames - driver doesn't 2511 *have native capability to do that, 2512 *so we'll write the registers ourselves 2513 */ 2514 2515 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2516 2517 /* set or clear MFLCN.PMCF bit depending on configuration */ 2518 if (adapter->mac_ctrl_frame_fwd != 0) 2519 mflcn |= IXGBE_MFLCN_PMCF; 2520 else 2521 mflcn &= ~IXGBE_MFLCN_PMCF; 2522 2523 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); 2524 IXGBE_WRITE_FLUSH(hw); 2525 2526 return 0; 2527 } 2528 return err; 2529 } 2530 2531 /* 2532 * Configure device link speed and setup link. 2533 * It returns 0 on success. 2534 */ 2535 static int 2536 ixgbe_dev_start(struct rte_eth_dev *dev) 2537 { 2538 struct ixgbe_hw *hw = 2539 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2540 struct ixgbe_vf_info *vfinfo = 2541 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2542 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2543 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2544 uint32_t intr_vector = 0; 2545 int err; 2546 bool link_up = false, negotiate = 0; 2547 uint32_t speed = 0; 2548 uint32_t allowed_speeds = 0; 2549 int mask = 0; 2550 int status; 2551 uint16_t vf, idx; 2552 uint32_t *link_speeds; 2553 struct ixgbe_tm_conf *tm_conf = 2554 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2555 struct ixgbe_macsec_setting *macsec_setting = 2556 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 2557 2558 PMD_INIT_FUNC_TRACE(); 2559 2560 /* Stop the link setup handler before resetting the HW. */ 2561 ixgbe_dev_wait_setup_link_complete(dev, 0); 2562 2563 /* disable uio/vfio intr/eventfd mapping */ 2564 rte_intr_disable(intr_handle); 2565 2566 /* stop adapter */ 2567 hw->adapter_stopped = 0; 2568 ixgbe_stop_adapter(hw); 2569 2570 /* reinitialize adapter 2571 * this calls reset and start 2572 */ 2573 status = ixgbe_pf_reset_hw(hw); 2574 if (status != 0) 2575 return -1; 2576 hw->mac.ops.start_hw(hw); 2577 hw->mac.get_link_status = true; 2578 2579 /* configure PF module if SRIOV enabled */ 2580 ixgbe_pf_host_configure(dev); 2581 2582 ixgbe_dev_phy_intr_setup(dev); 2583 2584 /* check and configure queue intr-vector mapping */ 2585 if ((rte_intr_cap_multiple(intr_handle) || 2586 !RTE_ETH_DEV_SRIOV(dev).active) && 2587 dev->data->dev_conf.intr_conf.rxq != 0) { 2588 intr_vector = dev->data->nb_rx_queues; 2589 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { 2590 PMD_INIT_LOG(ERR, "At most %d intr queues supported", 2591 IXGBE_MAX_INTR_QUEUE_NUM); 2592 return -ENOTSUP; 2593 } 2594 if (rte_intr_efd_enable(intr_handle, intr_vector)) 2595 return -1; 2596 } 2597 2598 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 2599 intr_handle->intr_vec = 2600 rte_zmalloc("intr_vec", 2601 dev->data->nb_rx_queues * sizeof(int), 0); 2602 if (intr_handle->intr_vec == NULL) { 2603 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 2604 " intr_vec", dev->data->nb_rx_queues); 2605 return -ENOMEM; 2606 } 2607 } 2608 2609 /* confiugre msix for sleep until rx interrupt */ 2610 ixgbe_configure_msix(dev); 2611 2612 /* initialize transmission unit */ 2613 ixgbe_dev_tx_init(dev); 2614 2615 /* This can fail when allocating mbufs for descriptor rings */ 2616 err = ixgbe_dev_rx_init(dev); 2617 if (err) { 2618 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 2619 goto error; 2620 } 2621 2622 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 2623 ETH_VLAN_EXTEND_MASK; 2624 err = ixgbe_vlan_offload_config(dev, mask); 2625 if (err) { 2626 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 2627 goto error; 2628 } 2629 2630 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 2631 /* Enable vlan filtering for VMDq */ 2632 ixgbe_vmdq_vlan_hw_filter_enable(dev); 2633 } 2634 2635 /* Configure DCB hw */ 2636 ixgbe_configure_dcb(dev); 2637 2638 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { 2639 err = ixgbe_fdir_configure(dev); 2640 if (err) 2641 goto error; 2642 } 2643 2644 /* Restore vf rate limit */ 2645 if (vfinfo != NULL) { 2646 for (vf = 0; vf < pci_dev->max_vfs; vf++) 2647 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 2648 if (vfinfo[vf].tx_rate[idx] != 0) 2649 ixgbe_set_vf_rate_limit( 2650 dev, vf, 2651 vfinfo[vf].tx_rate[idx], 2652 1 << idx); 2653 } 2654 2655 ixgbe_restore_statistics_mapping(dev); 2656 2657 err = ixgbe_flow_ctrl_enable(dev, hw); 2658 if (err < 0) { 2659 PMD_INIT_LOG(ERR, "enable flow ctrl err"); 2660 goto error; 2661 } 2662 2663 err = ixgbe_dev_rxtx_start(dev); 2664 if (err < 0) { 2665 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 2666 goto error; 2667 } 2668 2669 /* Skip link setup if loopback mode is enabled. */ 2670 if (dev->data->dev_conf.lpbk_mode != 0) { 2671 err = ixgbe_check_supported_loopback_mode(dev); 2672 if (err < 0) { 2673 PMD_INIT_LOG(ERR, "Unsupported loopback mode"); 2674 goto error; 2675 } else { 2676 goto skip_link_setup; 2677 } 2678 } 2679 2680 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 2681 err = hw->mac.ops.setup_sfp(hw); 2682 if (err) 2683 goto error; 2684 } 2685 2686 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2687 /* Turn on the copper */ 2688 ixgbe_set_phy_power(hw, true); 2689 } else { 2690 /* Turn on the laser */ 2691 ixgbe_enable_tx_laser(hw); 2692 } 2693 2694 err = ixgbe_check_link(hw, &speed, &link_up, 0); 2695 if (err) 2696 goto error; 2697 dev->data->dev_link.link_status = link_up; 2698 2699 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); 2700 if (err) 2701 goto error; 2702 2703 switch (hw->mac.type) { 2704 case ixgbe_mac_X550: 2705 case ixgbe_mac_X550EM_x: 2706 case ixgbe_mac_X550EM_a: 2707 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 2708 ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G | 2709 ETH_LINK_SPEED_10G; 2710 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 2711 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 2712 allowed_speeds = ETH_LINK_SPEED_10M | 2713 ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; 2714 break; 2715 default: 2716 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 2717 ETH_LINK_SPEED_10G; 2718 } 2719 2720 link_speeds = &dev->data->dev_conf.link_speeds; 2721 2722 /* Ignore autoneg flag bit and check the validity of 2723 * link_speed 2724 */ 2725 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { 2726 PMD_INIT_LOG(ERR, "Invalid link setting"); 2727 goto error; 2728 } 2729 2730 speed = 0x0; 2731 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { 2732 switch (hw->mac.type) { 2733 case ixgbe_mac_82598EB: 2734 speed = IXGBE_LINK_SPEED_82598_AUTONEG; 2735 break; 2736 case ixgbe_mac_82599EB: 2737 case ixgbe_mac_X540: 2738 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2739 break; 2740 case ixgbe_mac_X550: 2741 case ixgbe_mac_X550EM_x: 2742 case ixgbe_mac_X550EM_a: 2743 speed = IXGBE_LINK_SPEED_X550_AUTONEG; 2744 break; 2745 default: 2746 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2747 } 2748 } else { 2749 if (*link_speeds & ETH_LINK_SPEED_10G) 2750 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2751 if (*link_speeds & ETH_LINK_SPEED_5G) 2752 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2753 if (*link_speeds & ETH_LINK_SPEED_2_5G) 2754 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2755 if (*link_speeds & ETH_LINK_SPEED_1G) 2756 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2757 if (*link_speeds & ETH_LINK_SPEED_100M) 2758 speed |= IXGBE_LINK_SPEED_100_FULL; 2759 if (*link_speeds & ETH_LINK_SPEED_10M) 2760 speed |= IXGBE_LINK_SPEED_10_FULL; 2761 } 2762 2763 err = ixgbe_setup_link(hw, speed, link_up); 2764 if (err) 2765 goto error; 2766 2767 skip_link_setup: 2768 2769 if (rte_intr_allow_others(intr_handle)) { 2770 /* check if lsc interrupt is enabled */ 2771 if (dev->data->dev_conf.intr_conf.lsc != 0) 2772 ixgbe_dev_lsc_interrupt_setup(dev, TRUE); 2773 else 2774 ixgbe_dev_lsc_interrupt_setup(dev, FALSE); 2775 ixgbe_dev_macsec_interrupt_setup(dev); 2776 } else { 2777 rte_intr_callback_unregister(intr_handle, 2778 ixgbe_dev_interrupt_handler, dev); 2779 if (dev->data->dev_conf.intr_conf.lsc != 0) 2780 PMD_INIT_LOG(INFO, "lsc won't enable because of" 2781 " no intr multiplex"); 2782 } 2783 2784 /* check if rxq interrupt is enabled */ 2785 if (dev->data->dev_conf.intr_conf.rxq != 0 && 2786 rte_intr_dp_is_en(intr_handle)) 2787 ixgbe_dev_rxq_interrupt_setup(dev); 2788 2789 /* enable uio/vfio intr/eventfd mapping */ 2790 rte_intr_enable(intr_handle); 2791 2792 /* resume enabled intr since hw reset */ 2793 ixgbe_enable_intr(dev); 2794 ixgbe_l2_tunnel_conf(dev); 2795 ixgbe_filter_restore(dev); 2796 2797 if (tm_conf->root && !tm_conf->committed) 2798 PMD_DRV_LOG(WARNING, 2799 "please call hierarchy_commit() " 2800 "before starting the port"); 2801 2802 /* wait for the controller to acquire link */ 2803 err = ixgbe_wait_for_link_up(hw); 2804 if (err) 2805 goto error; 2806 2807 /* 2808 * Update link status right before return, because it may 2809 * start link configuration process in a separate thread. 2810 */ 2811 ixgbe_dev_link_update(dev, 0); 2812 2813 /* setup the macsec setting register */ 2814 if (macsec_setting->offload_en) 2815 ixgbe_dev_macsec_register_enable(dev, macsec_setting); 2816 2817 return 0; 2818 2819 error: 2820 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); 2821 ixgbe_dev_clear_queues(dev); 2822 return -EIO; 2823 } 2824 2825 /* 2826 * Stop device: disable rx and tx functions to allow for reconfiguring. 2827 */ 2828 static int 2829 ixgbe_dev_stop(struct rte_eth_dev *dev) 2830 { 2831 struct rte_eth_link link; 2832 struct ixgbe_adapter *adapter = dev->data->dev_private; 2833 struct ixgbe_hw *hw = 2834 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2835 struct ixgbe_vf_info *vfinfo = 2836 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2837 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2838 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2839 int vf; 2840 struct ixgbe_tm_conf *tm_conf = 2841 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2842 2843 if (hw->adapter_stopped) 2844 return 0; 2845 2846 PMD_INIT_FUNC_TRACE(); 2847 2848 ixgbe_dev_wait_setup_link_complete(dev, 0); 2849 2850 /* disable interrupts */ 2851 ixgbe_disable_intr(hw); 2852 2853 /* reset the NIC */ 2854 ixgbe_pf_reset_hw(hw); 2855 hw->adapter_stopped = 0; 2856 2857 /* stop adapter */ 2858 ixgbe_stop_adapter(hw); 2859 2860 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 2861 vfinfo[vf].clear_to_send = false; 2862 2863 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2864 /* Turn off the copper */ 2865 ixgbe_set_phy_power(hw, false); 2866 } else { 2867 /* Turn off the laser */ 2868 ixgbe_disable_tx_laser(hw); 2869 } 2870 2871 ixgbe_dev_clear_queues(dev); 2872 2873 /* Clear stored conf */ 2874 dev->data->scattered_rx = 0; 2875 dev->data->lro = 0; 2876 2877 /* Clear recorded link status */ 2878 memset(&link, 0, sizeof(link)); 2879 rte_eth_linkstatus_set(dev, &link); 2880 2881 if (!rte_intr_allow_others(intr_handle)) 2882 /* resume to the default handler */ 2883 rte_intr_callback_register(intr_handle, 2884 ixgbe_dev_interrupt_handler, 2885 (void *)dev); 2886 2887 /* Clean datapath event and queue/vec mapping */ 2888 rte_intr_efd_disable(intr_handle); 2889 if (intr_handle->intr_vec != NULL) { 2890 rte_free(intr_handle->intr_vec); 2891 intr_handle->intr_vec = NULL; 2892 } 2893 2894 /* reset hierarchy commit */ 2895 tm_conf->committed = false; 2896 2897 adapter->rss_reta_updated = 0; 2898 2899 hw->adapter_stopped = true; 2900 dev->data->dev_started = 0; 2901 2902 return 0; 2903 } 2904 2905 /* 2906 * Set device link up: enable tx. 2907 */ 2908 static int 2909 ixgbe_dev_set_link_up(struct rte_eth_dev *dev) 2910 { 2911 struct ixgbe_hw *hw = 2912 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2913 if (hw->mac.type == ixgbe_mac_82599EB) { 2914 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2915 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2916 /* Not suported in bypass mode */ 2917 PMD_INIT_LOG(ERR, "Set link up is not supported " 2918 "by device id 0x%x", hw->device_id); 2919 return -ENOTSUP; 2920 } 2921 #endif 2922 } 2923 2924 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2925 /* Turn on the copper */ 2926 ixgbe_set_phy_power(hw, true); 2927 } else { 2928 /* Turn on the laser */ 2929 ixgbe_enable_tx_laser(hw); 2930 ixgbe_dev_link_update(dev, 0); 2931 } 2932 2933 return 0; 2934 } 2935 2936 /* 2937 * Set device link down: disable tx. 2938 */ 2939 static int 2940 ixgbe_dev_set_link_down(struct rte_eth_dev *dev) 2941 { 2942 struct ixgbe_hw *hw = 2943 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2944 if (hw->mac.type == ixgbe_mac_82599EB) { 2945 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2946 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2947 /* Not suported in bypass mode */ 2948 PMD_INIT_LOG(ERR, "Set link down is not supported " 2949 "by device id 0x%x", hw->device_id); 2950 return -ENOTSUP; 2951 } 2952 #endif 2953 } 2954 2955 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2956 /* Turn off the copper */ 2957 ixgbe_set_phy_power(hw, false); 2958 } else { 2959 /* Turn off the laser */ 2960 ixgbe_disable_tx_laser(hw); 2961 ixgbe_dev_link_update(dev, 0); 2962 } 2963 2964 return 0; 2965 } 2966 2967 /* 2968 * Reset and stop device. 2969 */ 2970 static int 2971 ixgbe_dev_close(struct rte_eth_dev *dev) 2972 { 2973 struct ixgbe_hw *hw = 2974 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2975 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2976 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2977 int retries = 0; 2978 int ret; 2979 2980 PMD_INIT_FUNC_TRACE(); 2981 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2982 return 0; 2983 2984 ixgbe_pf_reset_hw(hw); 2985 2986 ret = ixgbe_dev_stop(dev); 2987 2988 ixgbe_dev_free_queues(dev); 2989 2990 ixgbe_disable_pcie_master(hw); 2991 2992 /* reprogram the RAR[0] in case user changed it. */ 2993 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2994 2995 /* Unlock any pending hardware semaphore */ 2996 ixgbe_swfw_lock_reset(hw); 2997 2998 /* disable uio intr before callback unregister */ 2999 rte_intr_disable(intr_handle); 3000 3001 do { 3002 ret = rte_intr_callback_unregister(intr_handle, 3003 ixgbe_dev_interrupt_handler, dev); 3004 if (ret >= 0 || ret == -ENOENT) { 3005 break; 3006 } else if (ret != -EAGAIN) { 3007 PMD_INIT_LOG(ERR, 3008 "intr callback unregister failed: %d", 3009 ret); 3010 } 3011 rte_delay_ms(100); 3012 } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); 3013 3014 /* cancel the delay handler before remove dev */ 3015 rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev); 3016 3017 /* uninitialize PF if max_vfs not zero */ 3018 ixgbe_pf_host_uninit(dev); 3019 3020 /* remove all the fdir filters & hash */ 3021 ixgbe_fdir_filter_uninit(dev); 3022 3023 /* remove all the L2 tunnel filters & hash */ 3024 ixgbe_l2_tn_filter_uninit(dev); 3025 3026 /* Remove all ntuple filters of the device */ 3027 ixgbe_ntuple_filter_uninit(dev); 3028 3029 /* clear all the filters list */ 3030 ixgbe_filterlist_flush(); 3031 3032 /* Remove all Traffic Manager configuration */ 3033 ixgbe_tm_conf_uninit(dev); 3034 3035 #ifdef RTE_LIB_SECURITY 3036 rte_free(dev->security_ctx); 3037 #endif 3038 3039 return ret; 3040 } 3041 3042 /* 3043 * Reset PF device. 3044 */ 3045 static int 3046 ixgbe_dev_reset(struct rte_eth_dev *dev) 3047 { 3048 int ret; 3049 3050 /* When a DPDK PMD PF begin to reset PF port, it should notify all 3051 * its VF to make them align with it. The detailed notification 3052 * mechanism is PMD specific. As to ixgbe PF, it is rather complex. 3053 * To avoid unexpected behavior in VF, currently reset of PF with 3054 * SR-IOV activation is not supported. It might be supported later. 3055 */ 3056 if (dev->data->sriov.active) 3057 return -ENOTSUP; 3058 3059 ret = eth_ixgbe_dev_uninit(dev); 3060 if (ret) 3061 return ret; 3062 3063 ret = eth_ixgbe_dev_init(dev, NULL); 3064 3065 return ret; 3066 } 3067 3068 static void 3069 ixgbe_read_stats_registers(struct ixgbe_hw *hw, 3070 struct ixgbe_hw_stats *hw_stats, 3071 struct ixgbe_macsec_stats *macsec_stats, 3072 uint64_t *total_missed_rx, uint64_t *total_qbrc, 3073 uint64_t *total_qprc, uint64_t *total_qprdc) 3074 { 3075 uint32_t bprc, lxon, lxoff, total; 3076 uint32_t delta_gprc = 0; 3077 unsigned i; 3078 /* Workaround for RX byte count not including CRC bytes when CRC 3079 * strip is enabled. CRC bytes are removed from counters when crc_strip 3080 * is disabled. 3081 */ 3082 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & 3083 IXGBE_HLREG0_RXCRCSTRP); 3084 3085 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 3086 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 3087 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 3088 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 3089 3090 for (i = 0; i < 8; i++) { 3091 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 3092 3093 /* global total per queue */ 3094 hw_stats->mpc[i] += mp; 3095 /* Running comprehensive total for stats display */ 3096 *total_missed_rx += hw_stats->mpc[i]; 3097 if (hw->mac.type == ixgbe_mac_82598EB) { 3098 hw_stats->rnbc[i] += 3099 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3100 hw_stats->pxonrxc[i] += 3101 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 3102 hw_stats->pxoffrxc[i] += 3103 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 3104 } else { 3105 hw_stats->pxonrxc[i] += 3106 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 3107 hw_stats->pxoffrxc[i] += 3108 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 3109 hw_stats->pxon2offc[i] += 3110 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 3111 } 3112 hw_stats->pxontxc[i] += 3113 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 3114 hw_stats->pxofftxc[i] += 3115 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 3116 } 3117 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3118 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3119 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3120 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3121 3122 delta_gprc += delta_qprc; 3123 3124 hw_stats->qprc[i] += delta_qprc; 3125 hw_stats->qptc[i] += delta_qptc; 3126 3127 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 3128 hw_stats->qbrc[i] += 3129 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); 3130 if (crc_strip == 0) 3131 hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; 3132 3133 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 3134 hw_stats->qbtc[i] += 3135 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); 3136 3137 hw_stats->qprdc[i] += delta_qprdc; 3138 *total_qprdc += hw_stats->qprdc[i]; 3139 3140 *total_qprc += hw_stats->qprc[i]; 3141 *total_qbrc += hw_stats->qbrc[i]; 3142 } 3143 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 3144 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 3145 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3146 3147 /* 3148 * An errata states that gprc actually counts good + missed packets: 3149 * Workaround to set gprc to summated queue packet receives 3150 */ 3151 hw_stats->gprc = *total_qprc; 3152 3153 if (hw->mac.type != ixgbe_mac_82598EB) { 3154 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 3155 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 3156 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 3157 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 3158 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 3159 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 3160 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3161 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3162 } else { 3163 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3164 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3165 /* 82598 only has a counter in the high register */ 3166 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3167 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3168 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3169 } 3170 uint64_t old_tpr = hw_stats->tpr; 3171 3172 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 3173 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 3174 3175 if (crc_strip == 0) 3176 hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; 3177 3178 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); 3179 hw_stats->gptc += delta_gptc; 3180 hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; 3181 hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 3182 3183 /* 3184 * Workaround: mprc hardware is incorrectly counting 3185 * broadcasts, so for now we subtract those. 3186 */ 3187 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3188 hw_stats->bprc += bprc; 3189 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3190 if (hw->mac.type == ixgbe_mac_82598EB) 3191 hw_stats->mprc -= bprc; 3192 3193 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3194 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3195 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 3196 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 3197 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3198 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3199 3200 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3201 hw_stats->lxontxc += lxon; 3202 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3203 hw_stats->lxofftxc += lxoff; 3204 total = lxon + lxoff; 3205 3206 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 3207 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 3208 hw_stats->gptc -= total; 3209 hw_stats->mptc -= total; 3210 hw_stats->ptc64 -= total; 3211 hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; 3212 3213 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 3214 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 3215 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3216 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 3217 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 3218 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 3219 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 3220 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 3221 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 3222 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 3223 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 3224 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 3225 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 3226 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 3227 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 3228 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 3229 /* Only read FCOE on 82599 */ 3230 if (hw->mac.type != ixgbe_mac_82598EB) { 3231 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 3232 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 3233 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 3234 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 3235 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 3236 } 3237 3238 /* Flow Director Stats registers */ 3239 if (hw->mac.type != ixgbe_mac_82598EB) { 3240 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 3241 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 3242 hw_stats->fdirustat_add += IXGBE_READ_REG(hw, 3243 IXGBE_FDIRUSTAT) & 0xFFFF; 3244 hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, 3245 IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; 3246 hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, 3247 IXGBE_FDIRFSTAT) & 0xFFFF; 3248 hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, 3249 IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; 3250 } 3251 /* MACsec Stats registers */ 3252 macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); 3253 macsec_stats->out_pkts_encrypted += 3254 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); 3255 macsec_stats->out_pkts_protected += 3256 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); 3257 macsec_stats->out_octets_encrypted += 3258 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); 3259 macsec_stats->out_octets_protected += 3260 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); 3261 macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); 3262 macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); 3263 macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); 3264 macsec_stats->in_pkts_unknownsci += 3265 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); 3266 macsec_stats->in_octets_decrypted += 3267 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); 3268 macsec_stats->in_octets_validated += 3269 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); 3270 macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); 3271 macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); 3272 macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); 3273 for (i = 0; i < 2; i++) { 3274 macsec_stats->in_pkts_ok += 3275 IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); 3276 macsec_stats->in_pkts_invalid += 3277 IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); 3278 macsec_stats->in_pkts_notvalid += 3279 IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); 3280 } 3281 macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); 3282 macsec_stats->in_pkts_notusingsa += 3283 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); 3284 } 3285 3286 /* 3287 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c 3288 */ 3289 static int 3290 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3291 { 3292 struct ixgbe_hw *hw = 3293 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3294 struct ixgbe_hw_stats *hw_stats = 3295 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3296 struct ixgbe_macsec_stats *macsec_stats = 3297 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3298 dev->data->dev_private); 3299 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3300 unsigned i; 3301 3302 total_missed_rx = 0; 3303 total_qbrc = 0; 3304 total_qprc = 0; 3305 total_qprdc = 0; 3306 3307 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3308 &total_qbrc, &total_qprc, &total_qprdc); 3309 3310 if (stats == NULL) 3311 return -EINVAL; 3312 3313 /* Fill out the rte_eth_stats statistics structure */ 3314 stats->ipackets = total_qprc; 3315 stats->ibytes = total_qbrc; 3316 stats->opackets = hw_stats->gptc; 3317 stats->obytes = hw_stats->gotc; 3318 3319 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3320 stats->q_ipackets[i] = hw_stats->qprc[i]; 3321 stats->q_opackets[i] = hw_stats->qptc[i]; 3322 stats->q_ibytes[i] = hw_stats->qbrc[i]; 3323 stats->q_obytes[i] = hw_stats->qbtc[i]; 3324 stats->q_errors[i] = hw_stats->qprdc[i]; 3325 } 3326 3327 /* Rx Errors */ 3328 stats->imissed = total_missed_rx; 3329 stats->ierrors = hw_stats->crcerrs + 3330 hw_stats->mspdc + 3331 hw_stats->rlec + 3332 hw_stats->ruc + 3333 hw_stats->roc + 3334 hw_stats->illerrc + 3335 hw_stats->errbc + 3336 hw_stats->rfc + 3337 hw_stats->fccrc + 3338 hw_stats->fclast; 3339 3340 /* 3341 * 82599 errata, UDP frames with a 0 checksum can be marked as checksum 3342 * errors. 3343 */ 3344 if (hw->mac.type != ixgbe_mac_82599EB) 3345 stats->ierrors += hw_stats->xec; 3346 3347 /* Tx Errors */ 3348 stats->oerrors = 0; 3349 return 0; 3350 } 3351 3352 static int 3353 ixgbe_dev_stats_reset(struct rte_eth_dev *dev) 3354 { 3355 struct ixgbe_hw_stats *stats = 3356 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3357 3358 /* HW registers are cleared on read */ 3359 ixgbe_dev_stats_get(dev, NULL); 3360 3361 /* Reset software totals */ 3362 memset(stats, 0, sizeof(*stats)); 3363 3364 return 0; 3365 } 3366 3367 /* This function calculates the number of xstats based on the current config */ 3368 static unsigned 3369 ixgbe_xstats_calc_num(void) { 3370 return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + 3371 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + 3372 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); 3373 } 3374 3375 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3376 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) 3377 { 3378 const unsigned cnt_stats = ixgbe_xstats_calc_num(); 3379 unsigned stat, i, count; 3380 3381 if (xstats_names != NULL) { 3382 count = 0; 3383 3384 /* Note: limit >= cnt_stats checked upstream 3385 * in rte_eth_xstats_names() 3386 */ 3387 3388 /* Extended stats from ixgbe_hw_stats */ 3389 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3390 strlcpy(xstats_names[count].name, 3391 rte_ixgbe_stats_strings[i].name, 3392 sizeof(xstats_names[count].name)); 3393 count++; 3394 } 3395 3396 /* MACsec Stats */ 3397 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3398 strlcpy(xstats_names[count].name, 3399 rte_ixgbe_macsec_strings[i].name, 3400 sizeof(xstats_names[count].name)); 3401 count++; 3402 } 3403 3404 /* RX Priority Stats */ 3405 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3406 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3407 snprintf(xstats_names[count].name, 3408 sizeof(xstats_names[count].name), 3409 "rx_priority%u_%s", i, 3410 rte_ixgbe_rxq_strings[stat].name); 3411 count++; 3412 } 3413 } 3414 3415 /* TX Priority Stats */ 3416 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3417 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3418 snprintf(xstats_names[count].name, 3419 sizeof(xstats_names[count].name), 3420 "tx_priority%u_%s", i, 3421 rte_ixgbe_txq_strings[stat].name); 3422 count++; 3423 } 3424 } 3425 } 3426 return cnt_stats; 3427 } 3428 3429 static int ixgbe_dev_xstats_get_names_by_id( 3430 struct rte_eth_dev *dev, 3431 const uint64_t *ids, 3432 struct rte_eth_xstat_name *xstats_names, 3433 unsigned int limit) 3434 { 3435 if (!ids) { 3436 const unsigned int cnt_stats = ixgbe_xstats_calc_num(); 3437 unsigned int stat, i, count; 3438 3439 if (xstats_names != NULL) { 3440 count = 0; 3441 3442 /* Note: limit >= cnt_stats checked upstream 3443 * in rte_eth_xstats_names() 3444 */ 3445 3446 /* Extended stats from ixgbe_hw_stats */ 3447 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3448 strlcpy(xstats_names[count].name, 3449 rte_ixgbe_stats_strings[i].name, 3450 sizeof(xstats_names[count].name)); 3451 count++; 3452 } 3453 3454 /* MACsec Stats */ 3455 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3456 strlcpy(xstats_names[count].name, 3457 rte_ixgbe_macsec_strings[i].name, 3458 sizeof(xstats_names[count].name)); 3459 count++; 3460 } 3461 3462 /* RX Priority Stats */ 3463 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3464 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3465 snprintf(xstats_names[count].name, 3466 sizeof(xstats_names[count].name), 3467 "rx_priority%u_%s", i, 3468 rte_ixgbe_rxq_strings[stat].name); 3469 count++; 3470 } 3471 } 3472 3473 /* TX Priority Stats */ 3474 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3475 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3476 snprintf(xstats_names[count].name, 3477 sizeof(xstats_names[count].name), 3478 "tx_priority%u_%s", i, 3479 rte_ixgbe_txq_strings[stat].name); 3480 count++; 3481 } 3482 } 3483 } 3484 return cnt_stats; 3485 } 3486 3487 uint16_t i; 3488 uint16_t size = ixgbe_xstats_calc_num(); 3489 struct rte_eth_xstat_name xstats_names_copy[size]; 3490 3491 ixgbe_dev_xstats_get_names_by_id(dev, NULL, xstats_names_copy, 3492 size); 3493 3494 for (i = 0; i < limit; i++) { 3495 if (ids[i] >= size) { 3496 PMD_INIT_LOG(ERR, "id value isn't valid"); 3497 return -1; 3498 } 3499 strcpy(xstats_names[i].name, 3500 xstats_names_copy[ids[i]].name); 3501 } 3502 return limit; 3503 } 3504 3505 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3506 struct rte_eth_xstat_name *xstats_names, unsigned limit) 3507 { 3508 unsigned i; 3509 3510 if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) 3511 return -ENOMEM; 3512 3513 if (xstats_names != NULL) 3514 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) 3515 strlcpy(xstats_names[i].name, 3516 rte_ixgbevf_stats_strings[i].name, 3517 sizeof(xstats_names[i].name)); 3518 return IXGBEVF_NB_XSTATS; 3519 } 3520 3521 static int 3522 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3523 unsigned n) 3524 { 3525 struct ixgbe_hw *hw = 3526 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3527 struct ixgbe_hw_stats *hw_stats = 3528 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3529 struct ixgbe_macsec_stats *macsec_stats = 3530 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3531 dev->data->dev_private); 3532 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3533 unsigned i, stat, count = 0; 3534 3535 count = ixgbe_xstats_calc_num(); 3536 3537 if (n < count) 3538 return count; 3539 3540 total_missed_rx = 0; 3541 total_qbrc = 0; 3542 total_qprc = 0; 3543 total_qprdc = 0; 3544 3545 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3546 &total_qbrc, &total_qprc, &total_qprdc); 3547 3548 /* If this is a reset xstats is NULL, and we have cleared the 3549 * registers by reading them. 3550 */ 3551 if (!xstats) 3552 return 0; 3553 3554 /* Extended stats from ixgbe_hw_stats */ 3555 count = 0; 3556 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3557 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3558 rte_ixgbe_stats_strings[i].offset); 3559 xstats[count].id = count; 3560 count++; 3561 } 3562 3563 /* MACsec Stats */ 3564 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3565 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + 3566 rte_ixgbe_macsec_strings[i].offset); 3567 xstats[count].id = count; 3568 count++; 3569 } 3570 3571 /* RX Priority Stats */ 3572 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3573 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3574 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3575 rte_ixgbe_rxq_strings[stat].offset + 3576 (sizeof(uint64_t) * i)); 3577 xstats[count].id = count; 3578 count++; 3579 } 3580 } 3581 3582 /* TX Priority Stats */ 3583 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3584 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3585 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3586 rte_ixgbe_txq_strings[stat].offset + 3587 (sizeof(uint64_t) * i)); 3588 xstats[count].id = count; 3589 count++; 3590 } 3591 } 3592 return count; 3593 } 3594 3595 static int 3596 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 3597 uint64_t *values, unsigned int n) 3598 { 3599 if (!ids) { 3600 struct ixgbe_hw *hw = 3601 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3602 struct ixgbe_hw_stats *hw_stats = 3603 IXGBE_DEV_PRIVATE_TO_STATS( 3604 dev->data->dev_private); 3605 struct ixgbe_macsec_stats *macsec_stats = 3606 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3607 dev->data->dev_private); 3608 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3609 unsigned int i, stat, count = 0; 3610 3611 count = ixgbe_xstats_calc_num(); 3612 3613 if (!ids && n < count) 3614 return count; 3615 3616 total_missed_rx = 0; 3617 total_qbrc = 0; 3618 total_qprc = 0; 3619 total_qprdc = 0; 3620 3621 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, 3622 &total_missed_rx, &total_qbrc, &total_qprc, 3623 &total_qprdc); 3624 3625 /* If this is a reset xstats is NULL, and we have cleared the 3626 * registers by reading them. 3627 */ 3628 if (!ids && !values) 3629 return 0; 3630 3631 /* Extended stats from ixgbe_hw_stats */ 3632 count = 0; 3633 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3634 values[count] = *(uint64_t *)(((char *)hw_stats) + 3635 rte_ixgbe_stats_strings[i].offset); 3636 count++; 3637 } 3638 3639 /* MACsec Stats */ 3640 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3641 values[count] = *(uint64_t *)(((char *)macsec_stats) + 3642 rte_ixgbe_macsec_strings[i].offset); 3643 count++; 3644 } 3645 3646 /* RX Priority Stats */ 3647 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3648 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3649 values[count] = 3650 *(uint64_t *)(((char *)hw_stats) + 3651 rte_ixgbe_rxq_strings[stat].offset + 3652 (sizeof(uint64_t) * i)); 3653 count++; 3654 } 3655 } 3656 3657 /* TX Priority Stats */ 3658 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3659 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3660 values[count] = 3661 *(uint64_t *)(((char *)hw_stats) + 3662 rte_ixgbe_txq_strings[stat].offset + 3663 (sizeof(uint64_t) * i)); 3664 count++; 3665 } 3666 } 3667 return count; 3668 } 3669 3670 uint16_t i; 3671 uint16_t size = ixgbe_xstats_calc_num(); 3672 uint64_t values_copy[size]; 3673 3674 ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); 3675 3676 for (i = 0; i < n; i++) { 3677 if (ids[i] >= size) { 3678 PMD_INIT_LOG(ERR, "id value isn't valid"); 3679 return -1; 3680 } 3681 values[i] = values_copy[ids[i]]; 3682 } 3683 return n; 3684 } 3685 3686 static int 3687 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) 3688 { 3689 struct ixgbe_hw_stats *stats = 3690 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3691 struct ixgbe_macsec_stats *macsec_stats = 3692 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3693 dev->data->dev_private); 3694 3695 unsigned count = ixgbe_xstats_calc_num(); 3696 3697 /* HW registers are cleared on read */ 3698 ixgbe_dev_xstats_get(dev, NULL, count); 3699 3700 /* Reset software totals */ 3701 memset(stats, 0, sizeof(*stats)); 3702 memset(macsec_stats, 0, sizeof(*macsec_stats)); 3703 3704 return 0; 3705 } 3706 3707 static void 3708 ixgbevf_update_stats(struct rte_eth_dev *dev) 3709 { 3710 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3711 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3712 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3713 3714 /* Good Rx packet, include VF loopback */ 3715 UPDATE_VF_STAT(IXGBE_VFGPRC, 3716 hw_stats->last_vfgprc, hw_stats->vfgprc); 3717 3718 /* Good Rx octets, include VF loopback */ 3719 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 3720 hw_stats->last_vfgorc, hw_stats->vfgorc); 3721 3722 /* Good Tx packet, include VF loopback */ 3723 UPDATE_VF_STAT(IXGBE_VFGPTC, 3724 hw_stats->last_vfgptc, hw_stats->vfgptc); 3725 3726 /* Good Tx octets, include VF loopback */ 3727 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 3728 hw_stats->last_vfgotc, hw_stats->vfgotc); 3729 3730 /* Rx Multicst Packet */ 3731 UPDATE_VF_STAT(IXGBE_VFMPRC, 3732 hw_stats->last_vfmprc, hw_stats->vfmprc); 3733 } 3734 3735 static int 3736 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3737 unsigned n) 3738 { 3739 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3740 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3741 unsigned i; 3742 3743 if (n < IXGBEVF_NB_XSTATS) 3744 return IXGBEVF_NB_XSTATS; 3745 3746 ixgbevf_update_stats(dev); 3747 3748 if (!xstats) 3749 return 0; 3750 3751 /* Extended stats */ 3752 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { 3753 xstats[i].id = i; 3754 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 3755 rte_ixgbevf_stats_strings[i].offset); 3756 } 3757 3758 return IXGBEVF_NB_XSTATS; 3759 } 3760 3761 static int 3762 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3763 { 3764 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3765 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3766 3767 ixgbevf_update_stats(dev); 3768 3769 if (stats == NULL) 3770 return -EINVAL; 3771 3772 stats->ipackets = hw_stats->vfgprc; 3773 stats->ibytes = hw_stats->vfgorc; 3774 stats->opackets = hw_stats->vfgptc; 3775 stats->obytes = hw_stats->vfgotc; 3776 return 0; 3777 } 3778 3779 static int 3780 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) 3781 { 3782 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3783 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3784 3785 /* Sync HW register to the last stats */ 3786 ixgbevf_dev_stats_get(dev, NULL); 3787 3788 /* reset HW current stats*/ 3789 hw_stats->vfgprc = 0; 3790 hw_stats->vfgorc = 0; 3791 hw_stats->vfgptc = 0; 3792 hw_stats->vfgotc = 0; 3793 hw_stats->vfmprc = 0; 3794 3795 return 0; 3796 } 3797 3798 static int 3799 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 3800 { 3801 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3802 u16 eeprom_verh, eeprom_verl; 3803 u32 etrack_id; 3804 int ret; 3805 3806 ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); 3807 ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); 3808 3809 etrack_id = (eeprom_verh << 16) | eeprom_verl; 3810 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); 3811 if (ret < 0) 3812 return -EINVAL; 3813 3814 ret += 1; /* add the size of '\0' */ 3815 if (fw_size < (size_t)ret) 3816 return ret; 3817 else 3818 return 0; 3819 } 3820 3821 static int 3822 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3823 { 3824 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3825 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3826 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 3827 3828 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3829 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3830 if (RTE_ETH_DEV_SRIOV(dev).active == 0) { 3831 /* 3832 * When DCB/VT is off, maximum number of queues changes, 3833 * except for 82598EB, which remains constant. 3834 */ 3835 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 3836 hw->mac.type != ixgbe_mac_82598EB) 3837 dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; 3838 } 3839 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ 3840 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ 3841 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3842 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3843 dev_info->max_vfs = pci_dev->max_vfs; 3844 if (hw->mac.type == ixgbe_mac_82598EB) 3845 dev_info->max_vmdq_pools = ETH_16_POOLS; 3846 else 3847 dev_info->max_vmdq_pools = ETH_64_POOLS; 3848 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3849 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3850 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 3851 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3852 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3853 dev_info->rx_queue_offload_capa); 3854 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3855 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3856 3857 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3858 .rx_thresh = { 3859 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3860 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3861 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3862 }, 3863 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3864 .rx_drop_en = 0, 3865 .offloads = 0, 3866 }; 3867 3868 dev_info->default_txconf = (struct rte_eth_txconf) { 3869 .tx_thresh = { 3870 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3871 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3872 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3873 }, 3874 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3875 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3876 .offloads = 0, 3877 }; 3878 3879 dev_info->rx_desc_lim = rx_desc_lim; 3880 dev_info->tx_desc_lim = tx_desc_lim; 3881 3882 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3883 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3884 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3885 3886 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; 3887 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3888 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 3889 dev_info->speed_capa = ETH_LINK_SPEED_10M | 3890 ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; 3891 3892 if (hw->mac.type == ixgbe_mac_X540 || 3893 hw->mac.type == ixgbe_mac_X540_vf || 3894 hw->mac.type == ixgbe_mac_X550 || 3895 hw->mac.type == ixgbe_mac_X550_vf) { 3896 dev_info->speed_capa |= ETH_LINK_SPEED_100M; 3897 } 3898 if (hw->mac.type == ixgbe_mac_X550) { 3899 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; 3900 dev_info->speed_capa |= ETH_LINK_SPEED_5G; 3901 } 3902 3903 /* Driver-preferred Rx/Tx parameters */ 3904 dev_info->default_rxportconf.burst_size = 32; 3905 dev_info->default_txportconf.burst_size = 32; 3906 dev_info->default_rxportconf.nb_queues = 1; 3907 dev_info->default_txportconf.nb_queues = 1; 3908 dev_info->default_rxportconf.ring_size = 256; 3909 dev_info->default_txportconf.ring_size = 256; 3910 3911 return 0; 3912 } 3913 3914 static const uint32_t * 3915 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 3916 { 3917 static const uint32_t ptypes[] = { 3918 /* For non-vec functions, 3919 * refers to ixgbe_rxd_pkt_info_to_pkt_type(); 3920 * for vec functions, 3921 * refers to _recv_raw_pkts_vec(). 3922 */ 3923 RTE_PTYPE_L2_ETHER, 3924 RTE_PTYPE_L3_IPV4, 3925 RTE_PTYPE_L3_IPV4_EXT, 3926 RTE_PTYPE_L3_IPV6, 3927 RTE_PTYPE_L3_IPV6_EXT, 3928 RTE_PTYPE_L4_SCTP, 3929 RTE_PTYPE_L4_TCP, 3930 RTE_PTYPE_L4_UDP, 3931 RTE_PTYPE_TUNNEL_IP, 3932 RTE_PTYPE_INNER_L3_IPV6, 3933 RTE_PTYPE_INNER_L3_IPV6_EXT, 3934 RTE_PTYPE_INNER_L4_TCP, 3935 RTE_PTYPE_INNER_L4_UDP, 3936 RTE_PTYPE_UNKNOWN 3937 }; 3938 3939 if (dev->rx_pkt_burst == ixgbe_recv_pkts || 3940 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || 3941 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || 3942 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) 3943 return ptypes; 3944 3945 #if defined(RTE_ARCH_X86) || defined(__ARM_NEON) 3946 if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || 3947 dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) 3948 return ptypes; 3949 #endif 3950 return NULL; 3951 } 3952 3953 static int 3954 ixgbevf_dev_info_get(struct rte_eth_dev *dev, 3955 struct rte_eth_dev_info *dev_info) 3956 { 3957 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3958 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3959 3960 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3961 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3962 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ 3963 dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ 3964 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3965 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3966 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3967 dev_info->max_vfs = pci_dev->max_vfs; 3968 if (hw->mac.type == ixgbe_mac_82598EB) 3969 dev_info->max_vmdq_pools = ETH_16_POOLS; 3970 else 3971 dev_info->max_vmdq_pools = ETH_64_POOLS; 3972 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3973 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3974 dev_info->rx_queue_offload_capa); 3975 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3976 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3977 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3978 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3979 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3980 3981 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3982 .rx_thresh = { 3983 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3984 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3985 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3986 }, 3987 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3988 .rx_drop_en = 0, 3989 .offloads = 0, 3990 }; 3991 3992 dev_info->default_txconf = (struct rte_eth_txconf) { 3993 .tx_thresh = { 3994 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3995 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3996 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3997 }, 3998 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3999 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 4000 .offloads = 0, 4001 }; 4002 4003 dev_info->rx_desc_lim = rx_desc_lim; 4004 dev_info->tx_desc_lim = tx_desc_lim; 4005 4006 return 0; 4007 } 4008 4009 static int 4010 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 4011 bool *link_up, int wait_to_complete) 4012 { 4013 struct ixgbe_adapter *adapter = container_of(hw, 4014 struct ixgbe_adapter, hw); 4015 struct ixgbe_mbx_info *mbx = &hw->mbx; 4016 struct ixgbe_mac_info *mac = &hw->mac; 4017 uint32_t links_reg, in_msg; 4018 int ret_val = 0; 4019 4020 /* If we were hit with a reset drop the link */ 4021 if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) 4022 mac->get_link_status = true; 4023 4024 if (!mac->get_link_status) 4025 goto out; 4026 4027 /* if link status is down no point in checking to see if pf is up */ 4028 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4029 if (!(links_reg & IXGBE_LINKS_UP)) 4030 goto out; 4031 4032 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs 4033 * before the link status is correct 4034 */ 4035 if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { 4036 int i; 4037 4038 for (i = 0; i < 5; i++) { 4039 rte_delay_us(100); 4040 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4041 4042 if (!(links_reg & IXGBE_LINKS_UP)) 4043 goto out; 4044 } 4045 } 4046 4047 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 4048 case IXGBE_LINKS_SPEED_10G_82599: 4049 *speed = IXGBE_LINK_SPEED_10GB_FULL; 4050 if (hw->mac.type >= ixgbe_mac_X550) { 4051 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4052 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 4053 } 4054 break; 4055 case IXGBE_LINKS_SPEED_1G_82599: 4056 *speed = IXGBE_LINK_SPEED_1GB_FULL; 4057 break; 4058 case IXGBE_LINKS_SPEED_100_82599: 4059 *speed = IXGBE_LINK_SPEED_100_FULL; 4060 if (hw->mac.type == ixgbe_mac_X550) { 4061 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4062 *speed = IXGBE_LINK_SPEED_5GB_FULL; 4063 } 4064 break; 4065 case IXGBE_LINKS_SPEED_10_X550EM_A: 4066 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4067 /* Since Reserved in older MAC's */ 4068 if (hw->mac.type >= ixgbe_mac_X550) 4069 *speed = IXGBE_LINK_SPEED_10_FULL; 4070 break; 4071 default: 4072 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4073 } 4074 4075 if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { 4076 if (*speed == IXGBE_LINK_SPEED_UNKNOWN) 4077 mac->get_link_status = true; 4078 else 4079 mac->get_link_status = false; 4080 4081 goto out; 4082 } 4083 4084 /* if the read failed it could just be a mailbox collision, best wait 4085 * until we are called again and don't report an error 4086 */ 4087 if (mbx->ops.read(hw, &in_msg, 1, 0)) 4088 goto out; 4089 4090 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { 4091 /* msg is not CTS and is NACK we must have lost CTS status */ 4092 if (in_msg & IXGBE_VT_MSGTYPE_NACK) 4093 mac->get_link_status = false; 4094 goto out; 4095 } 4096 4097 /* the pf is talking, if we timed out in the past we reinit */ 4098 if (!mbx->timeout) { 4099 ret_val = -1; 4100 goto out; 4101 } 4102 4103 /* if we passed all the tests above then the link is up and we no 4104 * longer need to check for link 4105 */ 4106 mac->get_link_status = false; 4107 4108 out: 4109 *link_up = !mac->get_link_status; 4110 return ret_val; 4111 } 4112 4113 /* 4114 * If @timeout_ms was 0, it means that it will not return until link complete. 4115 * It returns 1 on complete, return 0 on timeout. 4116 */ 4117 static int 4118 ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, uint32_t timeout_ms) 4119 { 4120 #define WARNING_TIMEOUT 9000 /* 9s in total */ 4121 struct ixgbe_adapter *ad = dev->data->dev_private; 4122 uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT; 4123 4124 while (rte_atomic32_read(&ad->link_thread_running)) { 4125 msec_delay(1); 4126 timeout--; 4127 4128 if (timeout_ms) { 4129 if (!timeout) 4130 return 0; 4131 } else if (!timeout) { 4132 /* It will not return until link complete */ 4133 timeout = WARNING_TIMEOUT; 4134 PMD_DRV_LOG(ERR, "IXGBE link thread not complete too long time!"); 4135 } 4136 } 4137 4138 return 1; 4139 } 4140 4141 static void * 4142 ixgbe_dev_setup_link_thread_handler(void *param) 4143 { 4144 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4145 struct ixgbe_adapter *ad = dev->data->dev_private; 4146 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4147 struct ixgbe_interrupt *intr = 4148 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4149 u32 speed; 4150 bool autoneg = false; 4151 4152 pthread_detach(pthread_self()); 4153 speed = hw->phy.autoneg_advertised; 4154 if (!speed) 4155 ixgbe_get_link_capabilities(hw, &speed, &autoneg); 4156 4157 ixgbe_setup_link(hw, speed, true); 4158 4159 intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 4160 rte_atomic32_clear(&ad->link_thread_running); 4161 return NULL; 4162 } 4163 4164 /* 4165 * In freebsd environment, nic_uio drivers do not support interrupts, 4166 * rte_intr_callback_register() will fail to register interrupts. 4167 * We can not make link status to change from down to up by interrupt 4168 * callback. So we need to wait for the controller to acquire link 4169 * when ports start. 4170 * It returns 0 on link up. 4171 */ 4172 static int 4173 ixgbe_wait_for_link_up(struct ixgbe_hw *hw) 4174 { 4175 #ifdef RTE_EXEC_ENV_FREEBSD 4176 int err, i; 4177 bool link_up = false; 4178 uint32_t speed = 0; 4179 const int nb_iter = 25; 4180 4181 for (i = 0; i < nb_iter; i++) { 4182 err = ixgbe_check_link(hw, &speed, &link_up, 0); 4183 if (err) 4184 return err; 4185 if (link_up) 4186 return 0; 4187 msec_delay(200); 4188 } 4189 4190 return 0; 4191 #else 4192 RTE_SET_USED(hw); 4193 return 0; 4194 #endif 4195 } 4196 4197 /* return 0 means link status changed, -1 means not changed */ 4198 int 4199 ixgbe_dev_link_update_share(struct rte_eth_dev *dev, 4200 int wait_to_complete, int vf) 4201 { 4202 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4203 struct ixgbe_adapter *ad = dev->data->dev_private; 4204 struct rte_eth_link link; 4205 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4206 struct ixgbe_interrupt *intr = 4207 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4208 bool link_up; 4209 int diag; 4210 int wait = 1; 4211 u32 esdp_reg; 4212 4213 memset(&link, 0, sizeof(link)); 4214 link.link_status = ETH_LINK_DOWN; 4215 link.link_speed = ETH_SPEED_NUM_NONE; 4216 link.link_duplex = ETH_LINK_HALF_DUPLEX; 4217 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 4218 ETH_LINK_SPEED_FIXED); 4219 4220 hw->mac.get_link_status = true; 4221 4222 if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) 4223 return rte_eth_linkstatus_set(dev, &link); 4224 4225 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 4226 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 4227 wait = 0; 4228 4229 /* BSD has no interrupt mechanism, so force NIC status synchronization. */ 4230 #ifdef RTE_EXEC_ENV_FREEBSD 4231 wait = 1; 4232 #endif 4233 4234 if (vf) 4235 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); 4236 else 4237 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); 4238 4239 if (diag != 0) { 4240 link.link_speed = ETH_SPEED_NUM_100M; 4241 link.link_duplex = ETH_LINK_FULL_DUPLEX; 4242 return rte_eth_linkstatus_set(dev, &link); 4243 } 4244 4245 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4246 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 4247 if ((esdp_reg & IXGBE_ESDP_SDP3)) 4248 link_up = 0; 4249 } 4250 4251 if (link_up == 0) { 4252 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4253 ixgbe_dev_wait_setup_link_complete(dev, 0); 4254 if (rte_atomic32_test_and_set(&ad->link_thread_running)) { 4255 /* To avoid race condition between threads, set 4256 * the IXGBE_FLAG_NEED_LINK_CONFIG flag only 4257 * when there is no link thread running. 4258 */ 4259 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 4260 if (rte_ctrl_thread_create(&ad->link_thread_tid, 4261 "ixgbe-link-handler", 4262 NULL, 4263 ixgbe_dev_setup_link_thread_handler, 4264 dev) < 0) { 4265 PMD_DRV_LOG(ERR, 4266 "Create link thread failed!"); 4267 rte_atomic32_clear(&ad->link_thread_running); 4268 } 4269 } else { 4270 PMD_DRV_LOG(ERR, 4271 "Other link thread is running now!"); 4272 } 4273 } 4274 return rte_eth_linkstatus_set(dev, &link); 4275 } 4276 4277 link.link_status = ETH_LINK_UP; 4278 link.link_duplex = ETH_LINK_FULL_DUPLEX; 4279 4280 switch (link_speed) { 4281 default: 4282 case IXGBE_LINK_SPEED_UNKNOWN: 4283 link.link_speed = ETH_SPEED_NUM_UNKNOWN; 4284 break; 4285 4286 case IXGBE_LINK_SPEED_10_FULL: 4287 link.link_speed = ETH_SPEED_NUM_10M; 4288 break; 4289 4290 case IXGBE_LINK_SPEED_100_FULL: 4291 link.link_speed = ETH_SPEED_NUM_100M; 4292 break; 4293 4294 case IXGBE_LINK_SPEED_1GB_FULL: 4295 link.link_speed = ETH_SPEED_NUM_1G; 4296 break; 4297 4298 case IXGBE_LINK_SPEED_2_5GB_FULL: 4299 link.link_speed = ETH_SPEED_NUM_2_5G; 4300 break; 4301 4302 case IXGBE_LINK_SPEED_5GB_FULL: 4303 link.link_speed = ETH_SPEED_NUM_5G; 4304 break; 4305 4306 case IXGBE_LINK_SPEED_10GB_FULL: 4307 link.link_speed = ETH_SPEED_NUM_10G; 4308 break; 4309 } 4310 4311 return rte_eth_linkstatus_set(dev, &link); 4312 } 4313 4314 static int 4315 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4316 { 4317 return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); 4318 } 4319 4320 static int 4321 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4322 { 4323 return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); 4324 } 4325 4326 static int 4327 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 4328 { 4329 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4330 uint32_t fctrl; 4331 4332 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4333 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4334 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4335 4336 return 0; 4337 } 4338 4339 static int 4340 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 4341 { 4342 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4343 uint32_t fctrl; 4344 4345 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4346 fctrl &= (~IXGBE_FCTRL_UPE); 4347 if (dev->data->all_multicast == 1) 4348 fctrl |= IXGBE_FCTRL_MPE; 4349 else 4350 fctrl &= (~IXGBE_FCTRL_MPE); 4351 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4352 4353 return 0; 4354 } 4355 4356 static int 4357 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 4358 { 4359 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4360 uint32_t fctrl; 4361 4362 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4363 fctrl |= IXGBE_FCTRL_MPE; 4364 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4365 4366 return 0; 4367 } 4368 4369 static int 4370 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 4371 { 4372 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4373 uint32_t fctrl; 4374 4375 if (dev->data->promiscuous == 1) 4376 return 0; /* must remain in all_multicast mode */ 4377 4378 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4379 fctrl &= (~IXGBE_FCTRL_MPE); 4380 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4381 4382 return 0; 4383 } 4384 4385 /** 4386 * It clears the interrupt causes and enables the interrupt. 4387 * It will be called once only during nic initialized. 4388 * 4389 * @param dev 4390 * Pointer to struct rte_eth_dev. 4391 * @param on 4392 * Enable or Disable. 4393 * 4394 * @return 4395 * - On success, zero. 4396 * - On failure, a negative value. 4397 */ 4398 static int 4399 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 4400 { 4401 struct ixgbe_interrupt *intr = 4402 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4403 4404 ixgbe_dev_link_status_print(dev); 4405 if (on) 4406 intr->mask |= IXGBE_EICR_LSC; 4407 else 4408 intr->mask &= ~IXGBE_EICR_LSC; 4409 4410 return 0; 4411 } 4412 4413 /** 4414 * It clears the interrupt causes and enables the interrupt. 4415 * It will be called once only during nic initialized. 4416 * 4417 * @param dev 4418 * Pointer to struct rte_eth_dev. 4419 * 4420 * @return 4421 * - On success, zero. 4422 * - On failure, a negative value. 4423 */ 4424 static int 4425 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 4426 { 4427 struct ixgbe_interrupt *intr = 4428 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4429 4430 intr->mask |= IXGBE_EICR_RTX_QUEUE; 4431 4432 return 0; 4433 } 4434 4435 /** 4436 * It clears the interrupt causes and enables the interrupt. 4437 * It will be called once only during nic initialized. 4438 * 4439 * @param dev 4440 * Pointer to struct rte_eth_dev. 4441 * 4442 * @return 4443 * - On success, zero. 4444 * - On failure, a negative value. 4445 */ 4446 static int 4447 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 4448 { 4449 struct ixgbe_interrupt *intr = 4450 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4451 4452 intr->mask |= IXGBE_EICR_LINKSEC; 4453 4454 return 0; 4455 } 4456 4457 /* 4458 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. 4459 * 4460 * @param dev 4461 * Pointer to struct rte_eth_dev. 4462 * 4463 * @return 4464 * - On success, zero. 4465 * - On failure, a negative value. 4466 */ 4467 static int 4468 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 4469 { 4470 uint32_t eicr; 4471 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4472 struct ixgbe_interrupt *intr = 4473 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4474 4475 /* clear all cause mask */ 4476 ixgbe_disable_intr(hw); 4477 4478 /* read-on-clear nic registers here */ 4479 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4480 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 4481 4482 intr->flags = 0; 4483 4484 /* set flag for async link update */ 4485 if (eicr & IXGBE_EICR_LSC) 4486 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4487 4488 if (eicr & IXGBE_EICR_MAILBOX) 4489 intr->flags |= IXGBE_FLAG_MAILBOX; 4490 4491 if (eicr & IXGBE_EICR_LINKSEC) 4492 intr->flags |= IXGBE_FLAG_MACSEC; 4493 4494 if (hw->mac.type == ixgbe_mac_X550EM_x && 4495 hw->phy.type == ixgbe_phy_x550em_ext_t && 4496 (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) 4497 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; 4498 4499 return 0; 4500 } 4501 4502 /** 4503 * It gets and then prints the link status. 4504 * 4505 * @param dev 4506 * Pointer to struct rte_eth_dev. 4507 * 4508 * @return 4509 * - On success, zero. 4510 * - On failure, a negative value. 4511 */ 4512 static void 4513 ixgbe_dev_link_status_print(struct rte_eth_dev *dev) 4514 { 4515 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4516 struct rte_eth_link link; 4517 4518 rte_eth_linkstatus_get(dev, &link); 4519 4520 if (link.link_status) { 4521 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 4522 (int)(dev->data->port_id), 4523 (unsigned)link.link_speed, 4524 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 4525 "full-duplex" : "half-duplex"); 4526 } else { 4527 PMD_INIT_LOG(INFO, " Port %d: Link Down", 4528 (int)(dev->data->port_id)); 4529 } 4530 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 4531 pci_dev->addr.domain, 4532 pci_dev->addr.bus, 4533 pci_dev->addr.devid, 4534 pci_dev->addr.function); 4535 } 4536 4537 /* 4538 * It executes link_update after knowing an interrupt occurred. 4539 * 4540 * @param dev 4541 * Pointer to struct rte_eth_dev. 4542 * 4543 * @return 4544 * - On success, zero. 4545 * - On failure, a negative value. 4546 */ 4547 static int 4548 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) 4549 { 4550 struct ixgbe_interrupt *intr = 4551 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4552 int64_t timeout; 4553 struct ixgbe_hw *hw = 4554 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4555 4556 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 4557 4558 if (intr->flags & IXGBE_FLAG_MAILBOX) { 4559 ixgbe_pf_mbx_process(dev); 4560 intr->flags &= ~IXGBE_FLAG_MAILBOX; 4561 } 4562 4563 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4564 ixgbe_handle_lasi(hw); 4565 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4566 } 4567 4568 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4569 struct rte_eth_link link; 4570 4571 /* get the link status before link update, for predicting later */ 4572 rte_eth_linkstatus_get(dev, &link); 4573 4574 ixgbe_dev_link_update(dev, 0); 4575 4576 /* likely to up */ 4577 if (!link.link_status) 4578 /* handle it 1 sec later, wait it being stable */ 4579 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; 4580 /* likely to down */ 4581 else 4582 /* handle it 4 sec later, wait it being stable */ 4583 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; 4584 4585 ixgbe_dev_link_status_print(dev); 4586 if (rte_eal_alarm_set(timeout * 1000, 4587 ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) 4588 PMD_DRV_LOG(ERR, "Error setting alarm"); 4589 else { 4590 /* remember original mask */ 4591 intr->mask_original = intr->mask; 4592 /* only disable lsc interrupt */ 4593 intr->mask &= ~IXGBE_EIMS_LSC; 4594 } 4595 } 4596 4597 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 4598 ixgbe_enable_intr(dev); 4599 4600 return 0; 4601 } 4602 4603 /** 4604 * Interrupt handler which shall be registered for alarm callback for delayed 4605 * handling specific interrupt to wait for the stable nic state. As the 4606 * NIC interrupt state is not stable for ixgbe after link is just down, 4607 * it needs to wait 4 seconds to get the stable status. 4608 * 4609 * @param handle 4610 * Pointer to interrupt handle. 4611 * @param param 4612 * The address of parameter (struct rte_eth_dev *) regsitered before. 4613 * 4614 * @return 4615 * void 4616 */ 4617 static void 4618 ixgbe_dev_interrupt_delayed_handler(void *param) 4619 { 4620 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4621 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4622 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4623 struct ixgbe_interrupt *intr = 4624 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4625 struct ixgbe_hw *hw = 4626 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4627 uint32_t eicr; 4628 4629 ixgbe_disable_intr(hw); 4630 4631 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4632 if (eicr & IXGBE_EICR_MAILBOX) 4633 ixgbe_pf_mbx_process(dev); 4634 4635 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4636 ixgbe_handle_lasi(hw); 4637 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4638 } 4639 4640 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4641 ixgbe_dev_link_update(dev, 0); 4642 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 4643 ixgbe_dev_link_status_print(dev); 4644 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 4645 } 4646 4647 if (intr->flags & IXGBE_FLAG_MACSEC) { 4648 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, NULL); 4649 intr->flags &= ~IXGBE_FLAG_MACSEC; 4650 } 4651 4652 /* restore original mask */ 4653 intr->mask = intr->mask_original; 4654 intr->mask_original = 0; 4655 4656 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 4657 ixgbe_enable_intr(dev); 4658 rte_intr_ack(intr_handle); 4659 } 4660 4661 /** 4662 * Interrupt handler triggered by NIC for handling 4663 * specific interrupt. 4664 * 4665 * @param handle 4666 * Pointer to interrupt handle. 4667 * @param param 4668 * The address of parameter (struct rte_eth_dev *) regsitered before. 4669 * 4670 * @return 4671 * void 4672 */ 4673 static void 4674 ixgbe_dev_interrupt_handler(void *param) 4675 { 4676 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4677 4678 ixgbe_dev_interrupt_get_status(dev); 4679 ixgbe_dev_interrupt_action(dev); 4680 } 4681 4682 static int 4683 ixgbe_dev_led_on(struct rte_eth_dev *dev) 4684 { 4685 struct ixgbe_hw *hw; 4686 4687 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4688 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4689 } 4690 4691 static int 4692 ixgbe_dev_led_off(struct rte_eth_dev *dev) 4693 { 4694 struct ixgbe_hw *hw; 4695 4696 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4697 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4698 } 4699 4700 static int 4701 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4702 { 4703 struct ixgbe_hw *hw; 4704 uint32_t mflcn_reg; 4705 uint32_t fccfg_reg; 4706 int rx_pause; 4707 int tx_pause; 4708 4709 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4710 4711 fc_conf->pause_time = hw->fc.pause_time; 4712 fc_conf->high_water = hw->fc.high_water[0]; 4713 fc_conf->low_water = hw->fc.low_water[0]; 4714 fc_conf->send_xon = hw->fc.send_xon; 4715 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 4716 4717 /* 4718 * Return rx_pause status according to actual setting of 4719 * MFLCN register. 4720 */ 4721 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4722 if (mflcn_reg & IXGBE_MFLCN_PMCF) 4723 fc_conf->mac_ctrl_frame_fwd = 1; 4724 else 4725 fc_conf->mac_ctrl_frame_fwd = 0; 4726 4727 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) 4728 rx_pause = 1; 4729 else 4730 rx_pause = 0; 4731 4732 /* 4733 * Return tx_pause status according to actual setting of 4734 * FCCFG register. 4735 */ 4736 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4737 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) 4738 tx_pause = 1; 4739 else 4740 tx_pause = 0; 4741 4742 if (rx_pause && tx_pause) 4743 fc_conf->mode = RTE_FC_FULL; 4744 else if (rx_pause) 4745 fc_conf->mode = RTE_FC_RX_PAUSE; 4746 else if (tx_pause) 4747 fc_conf->mode = RTE_FC_TX_PAUSE; 4748 else 4749 fc_conf->mode = RTE_FC_NONE; 4750 4751 return 0; 4752 } 4753 4754 static int 4755 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4756 { 4757 struct ixgbe_hw *hw; 4758 struct ixgbe_adapter *adapter = dev->data->dev_private; 4759 int err; 4760 uint32_t rx_buf_size; 4761 uint32_t max_high_water; 4762 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4763 ixgbe_fc_none, 4764 ixgbe_fc_rx_pause, 4765 ixgbe_fc_tx_pause, 4766 ixgbe_fc_full 4767 }; 4768 4769 PMD_INIT_FUNC_TRACE(); 4770 4771 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4772 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); 4773 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4774 4775 /* 4776 * At least reserve one Ethernet frame for watermark 4777 * high_water/low_water in kilo bytes for ixgbe 4778 */ 4779 max_high_water = (rx_buf_size - 4780 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4781 if ((fc_conf->high_water > max_high_water) || 4782 (fc_conf->high_water < fc_conf->low_water)) { 4783 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4784 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4785 return -EINVAL; 4786 } 4787 4788 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; 4789 hw->fc.pause_time = fc_conf->pause_time; 4790 hw->fc.high_water[0] = fc_conf->high_water; 4791 hw->fc.low_water[0] = fc_conf->low_water; 4792 hw->fc.send_xon = fc_conf->send_xon; 4793 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 4794 adapter->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 4795 4796 err = ixgbe_flow_ctrl_enable(dev, hw); 4797 if (err < 0) { 4798 PMD_INIT_LOG(ERR, "ixgbe_flow_ctrl_enable = 0x%x", err); 4799 return -EIO; 4800 } 4801 return err; 4802 } 4803 4804 /** 4805 * ixgbe_pfc_enable_generic - Enable flow control 4806 * @hw: pointer to hardware structure 4807 * @tc_num: traffic class number 4808 * Enable flow control according to the current settings. 4809 */ 4810 static int 4811 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) 4812 { 4813 int ret_val = 0; 4814 uint32_t mflcn_reg, fccfg_reg; 4815 uint32_t reg; 4816 uint32_t fcrtl, fcrth; 4817 uint8_t i; 4818 uint8_t nb_rx_en; 4819 4820 /* Validate the water mark configuration */ 4821 if (!hw->fc.pause_time) { 4822 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4823 goto out; 4824 } 4825 4826 /* Low water mark of zero causes XOFF floods */ 4827 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 4828 /* High/Low water can not be 0 */ 4829 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { 4830 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4831 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4832 goto out; 4833 } 4834 4835 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { 4836 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4837 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4838 goto out; 4839 } 4840 } 4841 /* Negotiate the fc mode to use */ 4842 ixgbe_fc_autoneg(hw); 4843 4844 /* Disable any previous flow control settings */ 4845 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4846 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); 4847 4848 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4849 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 4850 4851 switch (hw->fc.current_mode) { 4852 case ixgbe_fc_none: 4853 /* 4854 * If the count of enabled RX Priority Flow control >1, 4855 * and the TX pause can not be disabled 4856 */ 4857 nb_rx_en = 0; 4858 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4859 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4860 if (reg & IXGBE_FCRTH_FCEN) 4861 nb_rx_en++; 4862 } 4863 if (nb_rx_en > 1) 4864 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4865 break; 4866 case ixgbe_fc_rx_pause: 4867 /* 4868 * Rx Flow control is enabled and Tx Flow control is 4869 * disabled by software override. Since there really 4870 * isn't a way to advertise that we are capable of RX 4871 * Pause ONLY, we will advertise that we support both 4872 * symmetric and asymmetric Rx PAUSE. Later, we will 4873 * disable the adapter's ability to send PAUSE frames. 4874 */ 4875 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4876 /* 4877 * If the count of enabled RX Priority Flow control >1, 4878 * and the TX pause can not be disabled 4879 */ 4880 nb_rx_en = 0; 4881 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4882 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4883 if (reg & IXGBE_FCRTH_FCEN) 4884 nb_rx_en++; 4885 } 4886 if (nb_rx_en > 1) 4887 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4888 break; 4889 case ixgbe_fc_tx_pause: 4890 /* 4891 * Tx Flow control is enabled, and Rx Flow control is 4892 * disabled by software override. 4893 */ 4894 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4895 break; 4896 case ixgbe_fc_full: 4897 /* Flow control (both Rx and Tx) is enabled by SW override. */ 4898 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4899 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4900 break; 4901 default: 4902 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); 4903 ret_val = IXGBE_ERR_CONFIG; 4904 goto out; 4905 } 4906 4907 /* Set 802.3x based flow control settings. */ 4908 mflcn_reg |= IXGBE_MFLCN_DPF; 4909 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 4910 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 4911 4912 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 4913 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 4914 hw->fc.high_water[tc_num]) { 4915 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; 4916 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); 4917 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; 4918 } else { 4919 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); 4920 /* 4921 * In order to prevent Tx hangs when the internal Tx 4922 * switch is enabled we must set the high water mark 4923 * to the maximum FCRTH value. This allows the Tx 4924 * switch to function even under heavy Rx workloads. 4925 */ 4926 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; 4927 } 4928 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); 4929 4930 /* Configure pause time (2 TCs per register) */ 4931 reg = hw->fc.pause_time * 0x00010001; 4932 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 4933 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 4934 4935 /* Configure flow control refresh threshold value */ 4936 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 4937 4938 out: 4939 return ret_val; 4940 } 4941 4942 static int 4943 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) 4944 { 4945 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4946 int32_t ret_val = IXGBE_NOT_IMPLEMENTED; 4947 4948 if (hw->mac.type != ixgbe_mac_82598EB) { 4949 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); 4950 } 4951 return ret_val; 4952 } 4953 4954 static int 4955 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) 4956 { 4957 int err; 4958 uint32_t rx_buf_size; 4959 uint32_t max_high_water; 4960 uint8_t tc_num; 4961 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; 4962 struct ixgbe_hw *hw = 4963 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4964 struct ixgbe_dcb_config *dcb_config = 4965 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 4966 4967 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4968 ixgbe_fc_none, 4969 ixgbe_fc_rx_pause, 4970 ixgbe_fc_tx_pause, 4971 ixgbe_fc_full 4972 }; 4973 4974 PMD_INIT_FUNC_TRACE(); 4975 4976 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); 4977 tc_num = map[pfc_conf->priority]; 4978 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); 4979 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4980 /* 4981 * At least reserve one Ethernet frame for watermark 4982 * high_water/low_water in kilo bytes for ixgbe 4983 */ 4984 max_high_water = (rx_buf_size - 4985 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4986 if ((pfc_conf->fc.high_water > max_high_water) || 4987 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { 4988 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4989 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4990 return -EINVAL; 4991 } 4992 4993 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; 4994 hw->fc.pause_time = pfc_conf->fc.pause_time; 4995 hw->fc.send_xon = pfc_conf->fc.send_xon; 4996 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 4997 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 4998 4999 err = ixgbe_dcb_pfc_enable(dev, tc_num); 5000 5001 /* Not negotiated is not an error case */ 5002 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 5003 return 0; 5004 5005 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); 5006 return -EIO; 5007 } 5008 5009 static int 5010 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 5011 struct rte_eth_rss_reta_entry64 *reta_conf, 5012 uint16_t reta_size) 5013 { 5014 uint16_t i, sp_reta_size; 5015 uint8_t j, mask; 5016 uint32_t reta, r; 5017 uint16_t idx, shift; 5018 struct ixgbe_adapter *adapter = dev->data->dev_private; 5019 struct rte_eth_dev_data *dev_data = dev->data; 5020 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5021 uint32_t reta_reg; 5022 5023 PMD_INIT_FUNC_TRACE(); 5024 5025 if (!dev_data->dev_started) { 5026 PMD_DRV_LOG(ERR, 5027 "port %d must be started before rss reta update", 5028 dev_data->port_id); 5029 return -EIO; 5030 } 5031 5032 if (!ixgbe_rss_update_sp(hw->mac.type)) { 5033 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 5034 "NIC."); 5035 return -ENOTSUP; 5036 } 5037 5038 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5039 if (reta_size != sp_reta_size) { 5040 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5041 "(%d) doesn't match the number hardware can supported " 5042 "(%d)", reta_size, sp_reta_size); 5043 return -EINVAL; 5044 } 5045 5046 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5047 idx = i / RTE_RETA_GROUP_SIZE; 5048 shift = i % RTE_RETA_GROUP_SIZE; 5049 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5050 IXGBE_4_BIT_MASK); 5051 if (!mask) 5052 continue; 5053 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5054 if (mask == IXGBE_4_BIT_MASK) 5055 r = 0; 5056 else 5057 r = IXGBE_READ_REG(hw, reta_reg); 5058 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5059 if (mask & (0x1 << j)) 5060 reta |= reta_conf[idx].reta[shift + j] << 5061 (CHAR_BIT * j); 5062 else 5063 reta |= r & (IXGBE_8_BIT_MASK << 5064 (CHAR_BIT * j)); 5065 } 5066 IXGBE_WRITE_REG(hw, reta_reg, reta); 5067 } 5068 adapter->rss_reta_updated = 1; 5069 5070 return 0; 5071 } 5072 5073 static int 5074 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 5075 struct rte_eth_rss_reta_entry64 *reta_conf, 5076 uint16_t reta_size) 5077 { 5078 uint16_t i, sp_reta_size; 5079 uint8_t j, mask; 5080 uint32_t reta; 5081 uint16_t idx, shift; 5082 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5083 uint32_t reta_reg; 5084 5085 PMD_INIT_FUNC_TRACE(); 5086 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5087 if (reta_size != sp_reta_size) { 5088 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5089 "(%d) doesn't match the number hardware can supported " 5090 "(%d)", reta_size, sp_reta_size); 5091 return -EINVAL; 5092 } 5093 5094 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5095 idx = i / RTE_RETA_GROUP_SIZE; 5096 shift = i % RTE_RETA_GROUP_SIZE; 5097 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5098 IXGBE_4_BIT_MASK); 5099 if (!mask) 5100 continue; 5101 5102 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5103 reta = IXGBE_READ_REG(hw, reta_reg); 5104 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5105 if (mask & (0x1 << j)) 5106 reta_conf[idx].reta[shift + j] = 5107 ((reta >> (CHAR_BIT * j)) & 5108 IXGBE_8_BIT_MASK); 5109 } 5110 } 5111 5112 return 0; 5113 } 5114 5115 static int 5116 ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 5117 uint32_t index, uint32_t pool) 5118 { 5119 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5120 uint32_t enable_addr = 1; 5121 5122 return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, 5123 pool, enable_addr); 5124 } 5125 5126 static void 5127 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 5128 { 5129 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5130 5131 ixgbe_clear_rar(hw, index); 5132 } 5133 5134 static int 5135 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 5136 { 5137 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5138 5139 ixgbe_remove_rar(dev, 0); 5140 ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 5141 5142 return 0; 5143 } 5144 5145 static bool 5146 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 5147 { 5148 if (strcmp(dev->device->driver->name, drv->driver.name)) 5149 return false; 5150 5151 return true; 5152 } 5153 5154 bool 5155 is_ixgbe_supported(struct rte_eth_dev *dev) 5156 { 5157 return is_device_supported(dev, &rte_ixgbe_pmd); 5158 } 5159 5160 static int 5161 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 5162 { 5163 uint32_t hlreg0; 5164 uint32_t maxfrs; 5165 struct ixgbe_hw *hw; 5166 struct rte_eth_dev_info dev_info; 5167 uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; 5168 struct rte_eth_dev_data *dev_data = dev->data; 5169 int ret; 5170 5171 ret = ixgbe_dev_info_get(dev, &dev_info); 5172 if (ret != 0) 5173 return ret; 5174 5175 /* check that mtu is within the allowed range */ 5176 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 5177 return -EINVAL; 5178 5179 /* If device is started, refuse mtu that requires the support of 5180 * scattered packets when this feature has not been enabled before. 5181 */ 5182 if (dev_data->dev_started && !dev_data->scattered_rx && 5183 (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > 5184 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 5185 PMD_INIT_LOG(ERR, "Stop port first."); 5186 return -EINVAL; 5187 } 5188 5189 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5190 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 5191 5192 /* switch to jumbo mode if needed */ 5193 if (frame_size > IXGBE_ETH_MAX_LEN) { 5194 dev->data->dev_conf.rxmode.offloads |= 5195 DEV_RX_OFFLOAD_JUMBO_FRAME; 5196 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 5197 } else { 5198 dev->data->dev_conf.rxmode.offloads &= 5199 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 5200 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 5201 } 5202 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 5203 5204 /* update max frame size */ 5205 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 5206 5207 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 5208 maxfrs &= 0x0000FFFF; 5209 maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); 5210 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); 5211 5212 return 0; 5213 } 5214 5215 /* 5216 * Virtual Function operations 5217 */ 5218 static void 5219 ixgbevf_intr_disable(struct rte_eth_dev *dev) 5220 { 5221 struct ixgbe_interrupt *intr = 5222 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5223 struct ixgbe_hw *hw = 5224 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5225 5226 PMD_INIT_FUNC_TRACE(); 5227 5228 /* Clear interrupt mask to stop from interrupts being generated */ 5229 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 5230 5231 IXGBE_WRITE_FLUSH(hw); 5232 5233 /* Clear mask value. */ 5234 intr->mask = 0; 5235 } 5236 5237 static void 5238 ixgbevf_intr_enable(struct rte_eth_dev *dev) 5239 { 5240 struct ixgbe_interrupt *intr = 5241 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5242 struct ixgbe_hw *hw = 5243 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5244 5245 PMD_INIT_FUNC_TRACE(); 5246 5247 /* VF enable interrupt autoclean */ 5248 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); 5249 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); 5250 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); 5251 5252 IXGBE_WRITE_FLUSH(hw); 5253 5254 /* Save IXGBE_VTEIMS value to mask. */ 5255 intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; 5256 } 5257 5258 static int 5259 ixgbevf_dev_configure(struct rte_eth_dev *dev) 5260 { 5261 struct rte_eth_conf *conf = &dev->data->dev_conf; 5262 struct ixgbe_adapter *adapter = dev->data->dev_private; 5263 5264 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 5265 dev->data->port_id); 5266 5267 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 5268 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 5269 5270 /* 5271 * VF has no ability to enable/disable HW CRC 5272 * Keep the persistent behavior the same as Host PF 5273 */ 5274 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC 5275 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { 5276 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 5277 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; 5278 } 5279 #else 5280 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { 5281 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 5282 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; 5283 } 5284 #endif 5285 5286 /* 5287 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 5288 * allocation or vector Rx preconditions we will reset it. 5289 */ 5290 adapter->rx_bulk_alloc_allowed = true; 5291 adapter->rx_vec_allowed = true; 5292 5293 return 0; 5294 } 5295 5296 static int 5297 ixgbevf_dev_start(struct rte_eth_dev *dev) 5298 { 5299 struct ixgbe_hw *hw = 5300 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5301 uint32_t intr_vector = 0; 5302 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5303 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5304 5305 int err, mask = 0; 5306 5307 PMD_INIT_FUNC_TRACE(); 5308 5309 /* Stop the link setup handler before resetting the HW. */ 5310 ixgbe_dev_wait_setup_link_complete(dev, 0); 5311 5312 err = hw->mac.ops.reset_hw(hw); 5313 5314 /** 5315 * In this case, reuses the MAC address assigned by VF 5316 * initialization. 5317 */ 5318 if (err != IXGBE_SUCCESS && err != IXGBE_ERR_INVALID_MAC_ADDR) { 5319 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); 5320 return err; 5321 } 5322 5323 hw->mac.get_link_status = true; 5324 5325 /* negotiate mailbox API version to use with the PF. */ 5326 ixgbevf_negotiate_api(hw); 5327 5328 ixgbevf_dev_tx_init(dev); 5329 5330 /* This can fail when allocating mbufs for descriptor rings */ 5331 err = ixgbevf_dev_rx_init(dev); 5332 if (err) { 5333 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 5334 ixgbe_dev_clear_queues(dev); 5335 return err; 5336 } 5337 5338 /* Set vfta */ 5339 ixgbevf_set_vfta_all(dev, 1); 5340 5341 /* Set HW strip */ 5342 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 5343 ETH_VLAN_EXTEND_MASK; 5344 err = ixgbevf_vlan_offload_config(dev, mask); 5345 if (err) { 5346 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); 5347 ixgbe_dev_clear_queues(dev); 5348 return err; 5349 } 5350 5351 ixgbevf_dev_rxtx_start(dev); 5352 5353 /* check and configure queue intr-vector mapping */ 5354 if (rte_intr_cap_multiple(intr_handle) && 5355 dev->data->dev_conf.intr_conf.rxq) { 5356 /* According to datasheet, only vector 0/1/2 can be used, 5357 * now only one vector is used for Rx queue 5358 */ 5359 intr_vector = 1; 5360 if (rte_intr_efd_enable(intr_handle, intr_vector)) { 5361 ixgbe_dev_clear_queues(dev); 5362 return -1; 5363 } 5364 } 5365 5366 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 5367 intr_handle->intr_vec = 5368 rte_zmalloc("intr_vec", 5369 dev->data->nb_rx_queues * sizeof(int), 0); 5370 if (intr_handle->intr_vec == NULL) { 5371 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 5372 " intr_vec", dev->data->nb_rx_queues); 5373 ixgbe_dev_clear_queues(dev); 5374 return -ENOMEM; 5375 } 5376 } 5377 ixgbevf_configure_msix(dev); 5378 5379 /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt 5380 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). 5381 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) 5382 * is not cleared, it will fail when following rte_intr_enable( ) tries 5383 * to map Rx queue interrupt to other VFIO vectors. 5384 * So clear uio/vfio intr/evevnfd first to avoid failure. 5385 */ 5386 rte_intr_disable(intr_handle); 5387 5388 rte_intr_enable(intr_handle); 5389 5390 /* Re-enable interrupt for VF */ 5391 ixgbevf_intr_enable(dev); 5392 5393 /* 5394 * Update link status right before return, because it may 5395 * start link configuration process in a separate thread. 5396 */ 5397 ixgbevf_dev_link_update(dev, 0); 5398 5399 hw->adapter_stopped = false; 5400 5401 return 0; 5402 } 5403 5404 static int 5405 ixgbevf_dev_stop(struct rte_eth_dev *dev) 5406 { 5407 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5408 struct ixgbe_adapter *adapter = dev->data->dev_private; 5409 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5410 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5411 5412 if (hw->adapter_stopped) 5413 return 0; 5414 5415 PMD_INIT_FUNC_TRACE(); 5416 5417 ixgbe_dev_wait_setup_link_complete(dev, 0); 5418 5419 ixgbevf_intr_disable(dev); 5420 5421 dev->data->dev_started = 0; 5422 hw->adapter_stopped = 1; 5423 ixgbe_stop_adapter(hw); 5424 5425 /* 5426 * Clear what we set, but we still keep shadow_vfta to 5427 * restore after device starts 5428 */ 5429 ixgbevf_set_vfta_all(dev, 0); 5430 5431 /* Clear stored conf */ 5432 dev->data->scattered_rx = 0; 5433 5434 ixgbe_dev_clear_queues(dev); 5435 5436 /* Clean datapath event and queue/vec mapping */ 5437 rte_intr_efd_disable(intr_handle); 5438 if (intr_handle->intr_vec != NULL) { 5439 rte_free(intr_handle->intr_vec); 5440 intr_handle->intr_vec = NULL; 5441 } 5442 5443 adapter->rss_reta_updated = 0; 5444 5445 return 0; 5446 } 5447 5448 static int 5449 ixgbevf_dev_close(struct rte_eth_dev *dev) 5450 { 5451 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5452 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5453 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5454 int ret; 5455 5456 PMD_INIT_FUNC_TRACE(); 5457 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5458 return 0; 5459 5460 ixgbe_reset_hw(hw); 5461 5462 ret = ixgbevf_dev_stop(dev); 5463 5464 ixgbe_dev_free_queues(dev); 5465 5466 /** 5467 * Remove the VF MAC address ro ensure 5468 * that the VF traffic goes to the PF 5469 * after stop, close and detach of the VF 5470 **/ 5471 ixgbevf_remove_mac_addr(dev, 0); 5472 5473 rte_intr_disable(intr_handle); 5474 rte_intr_callback_unregister(intr_handle, 5475 ixgbevf_dev_interrupt_handler, dev); 5476 5477 return ret; 5478 } 5479 5480 /* 5481 * Reset VF device 5482 */ 5483 static int 5484 ixgbevf_dev_reset(struct rte_eth_dev *dev) 5485 { 5486 int ret; 5487 5488 ret = eth_ixgbevf_dev_uninit(dev); 5489 if (ret) 5490 return ret; 5491 5492 ret = eth_ixgbevf_dev_init(dev); 5493 5494 return ret; 5495 } 5496 5497 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 5498 { 5499 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5500 struct ixgbe_vfta *shadow_vfta = 5501 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5502 int i = 0, j = 0, vfta = 0, mask = 1; 5503 5504 for (i = 0; i < IXGBE_VFTA_SIZE; i++) { 5505 vfta = shadow_vfta->vfta[i]; 5506 if (vfta) { 5507 mask = 1; 5508 for (j = 0; j < 32; j++) { 5509 if (vfta & mask) 5510 ixgbe_set_vfta(hw, (i<<5)+j, 0, 5511 on, false); 5512 mask <<= 1; 5513 } 5514 } 5515 } 5516 5517 } 5518 5519 static int 5520 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 5521 { 5522 struct ixgbe_hw *hw = 5523 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5524 struct ixgbe_vfta *shadow_vfta = 5525 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5526 uint32_t vid_idx = 0; 5527 uint32_t vid_bit = 0; 5528 int ret = 0; 5529 5530 PMD_INIT_FUNC_TRACE(); 5531 5532 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ 5533 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); 5534 if (ret) { 5535 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 5536 return ret; 5537 } 5538 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 5539 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 5540 5541 /* Save what we set and retore it after device reset */ 5542 if (on) 5543 shadow_vfta->vfta[vid_idx] |= vid_bit; 5544 else 5545 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 5546 5547 return 0; 5548 } 5549 5550 static void 5551 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 5552 { 5553 struct ixgbe_hw *hw = 5554 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5555 uint32_t ctrl; 5556 5557 PMD_INIT_FUNC_TRACE(); 5558 5559 if (queue >= hw->mac.max_rx_queues) 5560 return; 5561 5562 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 5563 if (on) 5564 ctrl |= IXGBE_RXDCTL_VME; 5565 else 5566 ctrl &= ~IXGBE_RXDCTL_VME; 5567 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 5568 5569 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); 5570 } 5571 5572 static int 5573 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 5574 { 5575 struct ixgbe_rx_queue *rxq; 5576 uint16_t i; 5577 int on = 0; 5578 5579 /* VF function only support hw strip feature, others are not support */ 5580 if (mask & ETH_VLAN_STRIP_MASK) { 5581 for (i = 0; i < dev->data->nb_rx_queues; i++) { 5582 rxq = dev->data->rx_queues[i]; 5583 on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 5584 ixgbevf_vlan_strip_queue_set(dev, i, on); 5585 } 5586 } 5587 5588 return 0; 5589 } 5590 5591 static int 5592 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 5593 { 5594 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 5595 5596 ixgbevf_vlan_offload_config(dev, mask); 5597 5598 return 0; 5599 } 5600 5601 int 5602 ixgbe_vt_check(struct ixgbe_hw *hw) 5603 { 5604 uint32_t reg_val; 5605 5606 /* if Virtualization Technology is enabled */ 5607 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 5608 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { 5609 PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); 5610 return -1; 5611 } 5612 5613 return 0; 5614 } 5615 5616 static uint32_t 5617 ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) 5618 { 5619 uint32_t vector = 0; 5620 5621 switch (hw->mac.mc_filter_type) { 5622 case 0: /* use bits [47:36] of the address */ 5623 vector = ((uc_addr->addr_bytes[4] >> 4) | 5624 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 5625 break; 5626 case 1: /* use bits [46:35] of the address */ 5627 vector = ((uc_addr->addr_bytes[4] >> 3) | 5628 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 5629 break; 5630 case 2: /* use bits [45:34] of the address */ 5631 vector = ((uc_addr->addr_bytes[4] >> 2) | 5632 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 5633 break; 5634 case 3: /* use bits [43:32] of the address */ 5635 vector = ((uc_addr->addr_bytes[4]) | 5636 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 5637 break; 5638 default: /* Invalid mc_filter_type */ 5639 break; 5640 } 5641 5642 /* vector can only be 12-bits or boundary will be exceeded */ 5643 vector &= 0xFFF; 5644 return vector; 5645 } 5646 5647 static int 5648 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, 5649 struct rte_ether_addr *mac_addr, uint8_t on) 5650 { 5651 uint32_t vector; 5652 uint32_t uta_idx; 5653 uint32_t reg_val; 5654 uint32_t uta_shift; 5655 uint32_t rc; 5656 const uint32_t ixgbe_uta_idx_mask = 0x7F; 5657 const uint32_t ixgbe_uta_bit_shift = 5; 5658 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; 5659 const uint32_t bit1 = 0x1; 5660 5661 struct ixgbe_hw *hw = 5662 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5663 struct ixgbe_uta_info *uta_info = 5664 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5665 5666 /* The UTA table only exists on 82599 hardware and newer */ 5667 if (hw->mac.type < ixgbe_mac_82599EB) 5668 return -ENOTSUP; 5669 5670 vector = ixgbe_uta_vector(hw, mac_addr); 5671 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; 5672 uta_shift = vector & ixgbe_uta_bit_mask; 5673 5674 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); 5675 if (rc == on) 5676 return 0; 5677 5678 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); 5679 if (on) { 5680 uta_info->uta_in_use++; 5681 reg_val |= (bit1 << uta_shift); 5682 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); 5683 } else { 5684 uta_info->uta_in_use--; 5685 reg_val &= ~(bit1 << uta_shift); 5686 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); 5687 } 5688 5689 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); 5690 5691 if (uta_info->uta_in_use > 0) 5692 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 5693 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 5694 else 5695 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 5696 5697 return 0; 5698 } 5699 5700 static int 5701 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 5702 { 5703 int i; 5704 struct ixgbe_hw *hw = 5705 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5706 struct ixgbe_uta_info *uta_info = 5707 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5708 5709 /* The UTA table only exists on 82599 hardware and newer */ 5710 if (hw->mac.type < ixgbe_mac_82599EB) 5711 return -ENOTSUP; 5712 5713 if (on) { 5714 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5715 uta_info->uta_shadow[i] = ~0; 5716 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); 5717 } 5718 } else { 5719 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5720 uta_info->uta_shadow[i] = 0; 5721 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 5722 } 5723 } 5724 return 0; 5725 5726 } 5727 5728 uint32_t 5729 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 5730 { 5731 uint32_t new_val = orig_val; 5732 5733 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) 5734 new_val |= IXGBE_VMOLR_AUPE; 5735 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) 5736 new_val |= IXGBE_VMOLR_ROMPE; 5737 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) 5738 new_val |= IXGBE_VMOLR_ROPE; 5739 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) 5740 new_val |= IXGBE_VMOLR_BAM; 5741 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) 5742 new_val |= IXGBE_VMOLR_MPE; 5743 5744 return new_val; 5745 } 5746 5747 static int 5748 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5749 { 5750 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5751 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5752 struct ixgbe_interrupt *intr = 5753 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5754 struct ixgbe_hw *hw = 5755 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5756 uint32_t vec = IXGBE_MISC_VEC_ID; 5757 5758 if (rte_intr_allow_others(intr_handle)) 5759 vec = IXGBE_RX_VEC_START; 5760 intr->mask |= (1 << vec); 5761 RTE_SET_USED(queue_id); 5762 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5763 5764 rte_intr_ack(intr_handle); 5765 5766 return 0; 5767 } 5768 5769 static int 5770 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5771 { 5772 struct ixgbe_interrupt *intr = 5773 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5774 struct ixgbe_hw *hw = 5775 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5776 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5777 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5778 uint32_t vec = IXGBE_MISC_VEC_ID; 5779 5780 if (rte_intr_allow_others(intr_handle)) 5781 vec = IXGBE_RX_VEC_START; 5782 intr->mask &= ~(1 << vec); 5783 RTE_SET_USED(queue_id); 5784 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5785 5786 return 0; 5787 } 5788 5789 static int 5790 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5791 { 5792 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5793 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5794 uint32_t mask; 5795 struct ixgbe_hw *hw = 5796 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5797 struct ixgbe_interrupt *intr = 5798 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5799 5800 if (queue_id < 16) { 5801 ixgbe_disable_intr(hw); 5802 intr->mask |= (1 << queue_id); 5803 ixgbe_enable_intr(dev); 5804 } else if (queue_id < 32) { 5805 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5806 mask &= (1 << queue_id); 5807 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5808 } else if (queue_id < 64) { 5809 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5810 mask &= (1 << (queue_id - 32)); 5811 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5812 } 5813 rte_intr_ack(intr_handle); 5814 5815 return 0; 5816 } 5817 5818 static int 5819 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5820 { 5821 uint32_t mask; 5822 struct ixgbe_hw *hw = 5823 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5824 struct ixgbe_interrupt *intr = 5825 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5826 5827 if (queue_id < 16) { 5828 ixgbe_disable_intr(hw); 5829 intr->mask &= ~(1 << queue_id); 5830 ixgbe_enable_intr(dev); 5831 } else if (queue_id < 32) { 5832 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5833 mask &= ~(1 << queue_id); 5834 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5835 } else if (queue_id < 64) { 5836 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5837 mask &= ~(1 << (queue_id - 32)); 5838 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5839 } 5840 5841 return 0; 5842 } 5843 5844 static void 5845 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 5846 uint8_t queue, uint8_t msix_vector) 5847 { 5848 uint32_t tmp, idx; 5849 5850 if (direction == -1) { 5851 /* other causes */ 5852 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5853 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 5854 tmp &= ~0xFF; 5855 tmp |= msix_vector; 5856 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); 5857 } else { 5858 /* rx or tx cause */ 5859 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5860 idx = ((16 * (queue & 1)) + (8 * direction)); 5861 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 5862 tmp &= ~(0xFF << idx); 5863 tmp |= (msix_vector << idx); 5864 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); 5865 } 5866 } 5867 5868 /** 5869 * set the IVAR registers, mapping interrupt causes to vectors 5870 * @param hw 5871 * pointer to ixgbe_hw struct 5872 * @direction 5873 * 0 for Rx, 1 for Tx, -1 for other causes 5874 * @queue 5875 * queue to map the corresponding interrupt to 5876 * @msix_vector 5877 * the vector to map to the corresponding queue 5878 */ 5879 static void 5880 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 5881 uint8_t queue, uint8_t msix_vector) 5882 { 5883 uint32_t tmp, idx; 5884 5885 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5886 if (hw->mac.type == ixgbe_mac_82598EB) { 5887 if (direction == -1) 5888 direction = 0; 5889 idx = (((direction * 64) + queue) >> 2) & 0x1F; 5890 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); 5891 tmp &= ~(0xFF << (8 * (queue & 0x3))); 5892 tmp |= (msix_vector << (8 * (queue & 0x3))); 5893 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); 5894 } else if ((hw->mac.type == ixgbe_mac_82599EB) || 5895 (hw->mac.type == ixgbe_mac_X540) || 5896 (hw->mac.type == ixgbe_mac_X550) || 5897 (hw->mac.type == ixgbe_mac_X550EM_x)) { 5898 if (direction == -1) { 5899 /* other causes */ 5900 idx = ((queue & 1) * 8); 5901 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5902 tmp &= ~(0xFF << idx); 5903 tmp |= (msix_vector << idx); 5904 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); 5905 } else { 5906 /* rx or tx causes */ 5907 idx = ((16 * (queue & 1)) + (8 * direction)); 5908 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 5909 tmp &= ~(0xFF << idx); 5910 tmp |= (msix_vector << idx); 5911 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); 5912 } 5913 } 5914 } 5915 5916 static void 5917 ixgbevf_configure_msix(struct rte_eth_dev *dev) 5918 { 5919 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5920 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5921 struct ixgbe_hw *hw = 5922 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5923 uint32_t q_idx; 5924 uint32_t vector_idx = IXGBE_MISC_VEC_ID; 5925 uint32_t base = IXGBE_MISC_VEC_ID; 5926 5927 /* Configure VF other cause ivar */ 5928 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); 5929 5930 /* won't configure msix register if no mapping is done 5931 * between intr vector and event fd. 5932 */ 5933 if (!rte_intr_dp_is_en(intr_handle)) 5934 return; 5935 5936 if (rte_intr_allow_others(intr_handle)) { 5937 base = IXGBE_RX_VEC_START; 5938 vector_idx = IXGBE_RX_VEC_START; 5939 } 5940 5941 /* Configure all RX queues of VF */ 5942 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 5943 /* Force all queue use vector 0, 5944 * as IXGBE_VF_MAXMSIVECOTR = 1 5945 */ 5946 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 5947 intr_handle->intr_vec[q_idx] = vector_idx; 5948 if (vector_idx < base + intr_handle->nb_efd - 1) 5949 vector_idx++; 5950 } 5951 5952 /* As RX queue setting above show, all queues use the vector 0. 5953 * Set only the ITR value of IXGBE_MISC_VEC_ID. 5954 */ 5955 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), 5956 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 5957 | IXGBE_EITR_CNT_WDIS); 5958 } 5959 5960 /** 5961 * Sets up the hardware to properly generate MSI-X interrupts 5962 * @hw 5963 * board private structure 5964 */ 5965 static void 5966 ixgbe_configure_msix(struct rte_eth_dev *dev) 5967 { 5968 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5969 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5970 struct ixgbe_hw *hw = 5971 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5972 uint32_t queue_id, base = IXGBE_MISC_VEC_ID; 5973 uint32_t vec = IXGBE_MISC_VEC_ID; 5974 uint32_t mask; 5975 uint32_t gpie; 5976 5977 /* won't configure msix register if no mapping is done 5978 * between intr vector and event fd 5979 * but if misx has been enabled already, need to configure 5980 * auto clean, auto mask and throttling. 5981 */ 5982 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5983 if (!rte_intr_dp_is_en(intr_handle) && 5984 !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) 5985 return; 5986 5987 if (rte_intr_allow_others(intr_handle)) 5988 vec = base = IXGBE_RX_VEC_START; 5989 5990 /* setup GPIE for MSI-x mode */ 5991 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5992 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 5993 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; 5994 /* auto clearing and auto setting corresponding bits in EIMS 5995 * when MSI-X interrupt is triggered 5996 */ 5997 if (hw->mac.type == ixgbe_mac_82598EB) { 5998 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 5999 } else { 6000 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 6001 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 6002 } 6003 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 6004 6005 /* Populate the IVAR table and set the ITR values to the 6006 * corresponding register. 6007 */ 6008 if (rte_intr_dp_is_en(intr_handle)) { 6009 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 6010 queue_id++) { 6011 /* by default, 1:1 mapping */ 6012 ixgbe_set_ivar_map(hw, 0, queue_id, vec); 6013 intr_handle->intr_vec[queue_id] = vec; 6014 if (vec < base + intr_handle->nb_efd - 1) 6015 vec++; 6016 } 6017 6018 switch (hw->mac.type) { 6019 case ixgbe_mac_82598EB: 6020 ixgbe_set_ivar_map(hw, -1, 6021 IXGBE_IVAR_OTHER_CAUSES_INDEX, 6022 IXGBE_MISC_VEC_ID); 6023 break; 6024 case ixgbe_mac_82599EB: 6025 case ixgbe_mac_X540: 6026 case ixgbe_mac_X550: 6027 case ixgbe_mac_X550EM_x: 6028 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); 6029 break; 6030 default: 6031 break; 6032 } 6033 } 6034 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), 6035 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 6036 | IXGBE_EITR_CNT_WDIS); 6037 6038 /* set up to autoclear timer, and the vectors */ 6039 mask = IXGBE_EIMS_ENABLE_MASK; 6040 mask &= ~(IXGBE_EIMS_OTHER | 6041 IXGBE_EIMS_MAILBOX | 6042 IXGBE_EIMS_LSC); 6043 6044 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 6045 } 6046 6047 int 6048 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 6049 uint16_t queue_idx, uint16_t tx_rate) 6050 { 6051 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6052 struct rte_eth_rxmode *rxmode; 6053 uint32_t rf_dec, rf_int; 6054 uint32_t bcnrc_val; 6055 uint16_t link_speed = dev->data->dev_link.link_speed; 6056 6057 if (queue_idx >= hw->mac.max_tx_queues) 6058 return -EINVAL; 6059 6060 if (tx_rate != 0) { 6061 /* Calculate the rate factor values to set */ 6062 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; 6063 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; 6064 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; 6065 6066 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 6067 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & 6068 IXGBE_RTTBCNRC_RF_INT_MASK_M); 6069 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 6070 } else { 6071 bcnrc_val = 0; 6072 } 6073 6074 rxmode = &dev->data->dev_conf.rxmode; 6075 /* 6076 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 6077 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise 6078 * set as 0x4. 6079 */ 6080 if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && 6081 (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) 6082 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 6083 IXGBE_MMW_SIZE_JUMBO_FRAME); 6084 else 6085 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 6086 IXGBE_MMW_SIZE_DEFAULT); 6087 6088 /* Set RTTBCNRC of queue X */ 6089 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); 6090 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 6091 IXGBE_WRITE_FLUSH(hw); 6092 6093 return 0; 6094 } 6095 6096 static int 6097 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 6098 __rte_unused uint32_t index, 6099 __rte_unused uint32_t pool) 6100 { 6101 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6102 int diag; 6103 6104 /* 6105 * On a 82599 VF, adding again the same MAC addr is not an idempotent 6106 * operation. Trap this case to avoid exhausting the [very limited] 6107 * set of PF resources used to store VF MAC addresses. 6108 */ 6109 if (memcmp(hw->mac.perm_addr, mac_addr, 6110 sizeof(struct rte_ether_addr)) == 0) 6111 return -1; 6112 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6113 if (diag != 0) 6114 PMD_DRV_LOG(ERR, "Unable to add MAC address " 6115 RTE_ETHER_ADDR_PRT_FMT " - diag=%d", 6116 RTE_ETHER_ADDR_BYTES(mac_addr), diag); 6117 return diag; 6118 } 6119 6120 static void 6121 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 6122 { 6123 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6124 struct rte_ether_addr *perm_addr = 6125 (struct rte_ether_addr *)hw->mac.perm_addr; 6126 struct rte_ether_addr *mac_addr; 6127 uint32_t i; 6128 int diag; 6129 6130 /* 6131 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does 6132 * not support the deletion of a given MAC address. 6133 * Instead, it imposes to delete all MAC addresses, then to add again 6134 * all MAC addresses with the exception of the one to be deleted. 6135 */ 6136 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); 6137 6138 /* 6139 * Add again all MAC addresses, with the exception of the deleted one 6140 * and of the permanent MAC address. 6141 */ 6142 for (i = 0, mac_addr = dev->data->mac_addrs; 6143 i < hw->mac.num_rar_entries; i++, mac_addr++) { 6144 /* Skip the deleted MAC address */ 6145 if (i == index) 6146 continue; 6147 /* Skip NULL MAC addresses */ 6148 if (rte_is_zero_ether_addr(mac_addr)) 6149 continue; 6150 /* Skip the permanent MAC address */ 6151 if (memcmp(perm_addr, mac_addr, 6152 sizeof(struct rte_ether_addr)) == 0) 6153 continue; 6154 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6155 if (diag != 0) 6156 PMD_DRV_LOG(ERR, 6157 "Adding again MAC address " 6158 RTE_ETHER_ADDR_PRT_FMT " failed " 6159 "diag=%d", RTE_ETHER_ADDR_BYTES(mac_addr), 6160 diag); 6161 } 6162 } 6163 6164 static int 6165 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 6166 struct rte_ether_addr *addr) 6167 { 6168 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6169 6170 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); 6171 6172 return 0; 6173 } 6174 6175 int 6176 ixgbe_syn_filter_set(struct rte_eth_dev *dev, 6177 struct rte_eth_syn_filter *filter, 6178 bool add) 6179 { 6180 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6181 struct ixgbe_filter_info *filter_info = 6182 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6183 uint32_t syn_info; 6184 uint32_t synqf; 6185 6186 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6187 return -EINVAL; 6188 6189 syn_info = filter_info->syn_info; 6190 6191 if (add) { 6192 if (syn_info & IXGBE_SYN_FILTER_ENABLE) 6193 return -EINVAL; 6194 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & 6195 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); 6196 6197 if (filter->hig_pri) 6198 synqf |= IXGBE_SYN_FILTER_SYNQFP; 6199 else 6200 synqf &= ~IXGBE_SYN_FILTER_SYNQFP; 6201 } else { 6202 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 6203 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) 6204 return -ENOENT; 6205 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); 6206 } 6207 6208 filter_info->syn_info = synqf; 6209 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 6210 IXGBE_WRITE_FLUSH(hw); 6211 return 0; 6212 } 6213 6214 6215 static inline enum ixgbe_5tuple_protocol 6216 convert_protocol_type(uint8_t protocol_value) 6217 { 6218 if (protocol_value == IPPROTO_TCP) 6219 return IXGBE_FILTER_PROTOCOL_TCP; 6220 else if (protocol_value == IPPROTO_UDP) 6221 return IXGBE_FILTER_PROTOCOL_UDP; 6222 else if (protocol_value == IPPROTO_SCTP) 6223 return IXGBE_FILTER_PROTOCOL_SCTP; 6224 else 6225 return IXGBE_FILTER_PROTOCOL_NONE; 6226 } 6227 6228 /* inject a 5-tuple filter to HW */ 6229 static inline void 6230 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, 6231 struct ixgbe_5tuple_filter *filter) 6232 { 6233 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6234 int i; 6235 uint32_t ftqf, sdpqf; 6236 uint32_t l34timir = 0; 6237 uint8_t mask = 0xff; 6238 6239 i = filter->index; 6240 6241 sdpqf = (uint32_t)(filter->filter_info.dst_port << 6242 IXGBE_SDPQF_DSTPORT_SHIFT); 6243 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); 6244 6245 ftqf = (uint32_t)(filter->filter_info.proto & 6246 IXGBE_FTQF_PROTOCOL_MASK); 6247 ftqf |= (uint32_t)((filter->filter_info.priority & 6248 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); 6249 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 6250 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; 6251 if (filter->filter_info.dst_ip_mask == 0) 6252 mask &= IXGBE_FTQF_DEST_ADDR_MASK; 6253 if (filter->filter_info.src_port_mask == 0) 6254 mask &= IXGBE_FTQF_SOURCE_PORT_MASK; 6255 if (filter->filter_info.dst_port_mask == 0) 6256 mask &= IXGBE_FTQF_DEST_PORT_MASK; 6257 if (filter->filter_info.proto_mask == 0) 6258 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; 6259 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; 6260 ftqf |= IXGBE_FTQF_POOL_MASK_EN; 6261 ftqf |= IXGBE_FTQF_QUEUE_ENABLE; 6262 6263 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); 6264 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); 6265 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); 6266 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); 6267 6268 l34timir |= IXGBE_L34T_IMIR_RESERVE; 6269 l34timir |= (uint32_t)(filter->queue << 6270 IXGBE_L34T_IMIR_QUEUE_SHIFT); 6271 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); 6272 } 6273 6274 /* 6275 * add a 5tuple filter 6276 * 6277 * @param 6278 * dev: Pointer to struct rte_eth_dev. 6279 * index: the index the filter allocates. 6280 * filter: ponter to the filter that will be added. 6281 * rx_queue: the queue id the filter assigned to. 6282 * 6283 * @return 6284 * - On success, zero. 6285 * - On failure, a negative value. 6286 */ 6287 static int 6288 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 6289 struct ixgbe_5tuple_filter *filter) 6290 { 6291 struct ixgbe_filter_info *filter_info = 6292 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6293 int i, idx, shift; 6294 6295 /* 6296 * look for an unused 5tuple filter index, 6297 * and insert the filter to list. 6298 */ 6299 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { 6300 idx = i / (sizeof(uint32_t) * NBBY); 6301 shift = i % (sizeof(uint32_t) * NBBY); 6302 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 6303 filter_info->fivetuple_mask[idx] |= 1 << shift; 6304 filter->index = i; 6305 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 6306 filter, 6307 entries); 6308 break; 6309 } 6310 } 6311 if (i >= IXGBE_MAX_FTQF_FILTERS) { 6312 PMD_DRV_LOG(ERR, "5tuple filters are full."); 6313 return -ENOSYS; 6314 } 6315 6316 ixgbe_inject_5tuple_filter(dev, filter); 6317 6318 return 0; 6319 } 6320 6321 /* 6322 * remove a 5tuple filter 6323 * 6324 * @param 6325 * dev: Pointer to struct rte_eth_dev. 6326 * filter: the pointer of the filter will be removed. 6327 */ 6328 static void 6329 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 6330 struct ixgbe_5tuple_filter *filter) 6331 { 6332 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6333 struct ixgbe_filter_info *filter_info = 6334 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6335 uint16_t index = filter->index; 6336 6337 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 6338 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 6339 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 6340 rte_free(filter); 6341 6342 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); 6343 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); 6344 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); 6345 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); 6346 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); 6347 } 6348 6349 static int 6350 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 6351 { 6352 struct ixgbe_hw *hw; 6353 uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; 6354 struct rte_eth_dev_data *dev_data = dev->data; 6355 6356 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6357 6358 if (mtu < RTE_ETHER_MIN_MTU || 6359 max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) 6360 return -EINVAL; 6361 6362 /* If device is started, refuse mtu that requires the support of 6363 * scattered packets when this feature has not been enabled before. 6364 */ 6365 if (dev_data->dev_started && !dev_data->scattered_rx && 6366 (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > 6367 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 6368 PMD_INIT_LOG(ERR, "Stop port first."); 6369 return -EINVAL; 6370 } 6371 6372 /* 6373 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU 6374 * request of the version 2.0 of the mailbox API. 6375 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 6376 * of the mailbox API. 6377 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers 6378 * prior to 3.11.33 which contains the following change: 6379 * "ixgbe: Enable jumbo frames support w/ SR-IOV" 6380 */ 6381 if (ixgbevf_rlpml_set_vf(hw, max_frame)) 6382 return -EINVAL; 6383 6384 /* update max frame size */ 6385 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; 6386 return 0; 6387 } 6388 6389 static inline struct ixgbe_5tuple_filter * 6390 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, 6391 struct ixgbe_5tuple_filter_info *key) 6392 { 6393 struct ixgbe_5tuple_filter *it; 6394 6395 TAILQ_FOREACH(it, filter_list, entries) { 6396 if (memcmp(key, &it->filter_info, 6397 sizeof(struct ixgbe_5tuple_filter_info)) == 0) { 6398 return it; 6399 } 6400 } 6401 return NULL; 6402 } 6403 6404 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ 6405 static inline int 6406 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 6407 struct ixgbe_5tuple_filter_info *filter_info) 6408 { 6409 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || 6410 filter->priority > IXGBE_5TUPLE_MAX_PRI || 6411 filter->priority < IXGBE_5TUPLE_MIN_PRI) 6412 return -EINVAL; 6413 6414 switch (filter->dst_ip_mask) { 6415 case UINT32_MAX: 6416 filter_info->dst_ip_mask = 0; 6417 filter_info->dst_ip = filter->dst_ip; 6418 break; 6419 case 0: 6420 filter_info->dst_ip_mask = 1; 6421 break; 6422 default: 6423 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 6424 return -EINVAL; 6425 } 6426 6427 switch (filter->src_ip_mask) { 6428 case UINT32_MAX: 6429 filter_info->src_ip_mask = 0; 6430 filter_info->src_ip = filter->src_ip; 6431 break; 6432 case 0: 6433 filter_info->src_ip_mask = 1; 6434 break; 6435 default: 6436 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 6437 return -EINVAL; 6438 } 6439 6440 switch (filter->dst_port_mask) { 6441 case UINT16_MAX: 6442 filter_info->dst_port_mask = 0; 6443 filter_info->dst_port = filter->dst_port; 6444 break; 6445 case 0: 6446 filter_info->dst_port_mask = 1; 6447 break; 6448 default: 6449 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 6450 return -EINVAL; 6451 } 6452 6453 switch (filter->src_port_mask) { 6454 case UINT16_MAX: 6455 filter_info->src_port_mask = 0; 6456 filter_info->src_port = filter->src_port; 6457 break; 6458 case 0: 6459 filter_info->src_port_mask = 1; 6460 break; 6461 default: 6462 PMD_DRV_LOG(ERR, "invalid src_port mask."); 6463 return -EINVAL; 6464 } 6465 6466 switch (filter->proto_mask) { 6467 case UINT8_MAX: 6468 filter_info->proto_mask = 0; 6469 filter_info->proto = 6470 convert_protocol_type(filter->proto); 6471 break; 6472 case 0: 6473 filter_info->proto_mask = 1; 6474 break; 6475 default: 6476 PMD_DRV_LOG(ERR, "invalid protocol mask."); 6477 return -EINVAL; 6478 } 6479 6480 filter_info->priority = (uint8_t)filter->priority; 6481 return 0; 6482 } 6483 6484 /* 6485 * add or delete a ntuple filter 6486 * 6487 * @param 6488 * dev: Pointer to struct rte_eth_dev. 6489 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 6490 * add: if true, add filter, if false, remove filter 6491 * 6492 * @return 6493 * - On success, zero. 6494 * - On failure, a negative value. 6495 */ 6496 int 6497 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 6498 struct rte_eth_ntuple_filter *ntuple_filter, 6499 bool add) 6500 { 6501 struct ixgbe_filter_info *filter_info = 6502 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6503 struct ixgbe_5tuple_filter_info filter_5tuple; 6504 struct ixgbe_5tuple_filter *filter; 6505 int ret; 6506 6507 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 6508 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 6509 return -EINVAL; 6510 } 6511 6512 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 6513 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 6514 if (ret < 0) 6515 return ret; 6516 6517 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 6518 &filter_5tuple); 6519 if (filter != NULL && add) { 6520 PMD_DRV_LOG(ERR, "filter exists."); 6521 return -EEXIST; 6522 } 6523 if (filter == NULL && !add) { 6524 PMD_DRV_LOG(ERR, "filter doesn't exist."); 6525 return -ENOENT; 6526 } 6527 6528 if (add) { 6529 filter = rte_zmalloc("ixgbe_5tuple_filter", 6530 sizeof(struct ixgbe_5tuple_filter), 0); 6531 if (filter == NULL) 6532 return -ENOMEM; 6533 rte_memcpy(&filter->filter_info, 6534 &filter_5tuple, 6535 sizeof(struct ixgbe_5tuple_filter_info)); 6536 filter->queue = ntuple_filter->queue; 6537 ret = ixgbe_add_5tuple_filter(dev, filter); 6538 if (ret < 0) { 6539 rte_free(filter); 6540 return ret; 6541 } 6542 } else 6543 ixgbe_remove_5tuple_filter(dev, filter); 6544 6545 return 0; 6546 } 6547 6548 int 6549 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 6550 struct rte_eth_ethertype_filter *filter, 6551 bool add) 6552 { 6553 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6554 struct ixgbe_filter_info *filter_info = 6555 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6556 uint32_t etqf = 0; 6557 uint32_t etqs = 0; 6558 int ret; 6559 struct ixgbe_ethertype_filter ethertype_filter; 6560 6561 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6562 return -EINVAL; 6563 6564 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || 6565 filter->ether_type == RTE_ETHER_TYPE_IPV6) { 6566 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 6567 " ethertype filter.", filter->ether_type); 6568 return -EINVAL; 6569 } 6570 6571 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 6572 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 6573 return -EINVAL; 6574 } 6575 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 6576 PMD_DRV_LOG(ERR, "drop option is unsupported."); 6577 return -EINVAL; 6578 } 6579 6580 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 6581 if (ret >= 0 && add) { 6582 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 6583 filter->ether_type); 6584 return -EEXIST; 6585 } 6586 if (ret < 0 && !add) { 6587 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 6588 filter->ether_type); 6589 return -ENOENT; 6590 } 6591 6592 if (add) { 6593 etqf = IXGBE_ETQF_FILTER_EN; 6594 etqf |= (uint32_t)filter->ether_type; 6595 etqs |= (uint32_t)((filter->queue << 6596 IXGBE_ETQS_RX_QUEUE_SHIFT) & 6597 IXGBE_ETQS_RX_QUEUE); 6598 etqs |= IXGBE_ETQS_QUEUE_EN; 6599 6600 ethertype_filter.ethertype = filter->ether_type; 6601 ethertype_filter.etqf = etqf; 6602 ethertype_filter.etqs = etqs; 6603 ethertype_filter.conf = FALSE; 6604 ret = ixgbe_ethertype_filter_insert(filter_info, 6605 ðertype_filter); 6606 if (ret < 0) { 6607 PMD_DRV_LOG(ERR, "ethertype filters are full."); 6608 return -ENOSPC; 6609 } 6610 } else { 6611 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 6612 if (ret < 0) 6613 return -ENOSYS; 6614 } 6615 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); 6616 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); 6617 IXGBE_WRITE_FLUSH(hw); 6618 6619 return 0; 6620 } 6621 6622 static int 6623 ixgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev, 6624 const struct rte_flow_ops **ops) 6625 { 6626 *ops = &ixgbe_flow_ops; 6627 return 0; 6628 } 6629 6630 static u8 * 6631 ixgbe_dev_addr_list_itr(__rte_unused struct ixgbe_hw *hw, 6632 u8 **mc_addr_ptr, u32 *vmdq) 6633 { 6634 u8 *mc_addr; 6635 6636 *vmdq = 0; 6637 mc_addr = *mc_addr_ptr; 6638 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 6639 return mc_addr; 6640 } 6641 6642 static int 6643 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 6644 struct rte_ether_addr *mc_addr_set, 6645 uint32_t nb_mc_addr) 6646 { 6647 struct ixgbe_hw *hw; 6648 u8 *mc_addr_list; 6649 6650 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6651 mc_addr_list = (u8 *)mc_addr_set; 6652 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 6653 ixgbe_dev_addr_list_itr, TRUE); 6654 } 6655 6656 static uint64_t 6657 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 6658 { 6659 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6660 uint64_t systime_cycles; 6661 6662 switch (hw->mac.type) { 6663 case ixgbe_mac_X550: 6664 case ixgbe_mac_X550EM_x: 6665 case ixgbe_mac_X550EM_a: 6666 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 6667 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6668 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6669 * NSEC_PER_SEC; 6670 break; 6671 default: 6672 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6673 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6674 << 32; 6675 } 6676 6677 return systime_cycles; 6678 } 6679 6680 static uint64_t 6681 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6682 { 6683 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6684 uint64_t rx_tstamp_cycles; 6685 6686 switch (hw->mac.type) { 6687 case ixgbe_mac_X550: 6688 case ixgbe_mac_X550EM_x: 6689 case ixgbe_mac_X550EM_a: 6690 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6691 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6692 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6693 * NSEC_PER_SEC; 6694 break; 6695 default: 6696 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6697 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6698 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6699 << 32; 6700 } 6701 6702 return rx_tstamp_cycles; 6703 } 6704 6705 static uint64_t 6706 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6707 { 6708 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6709 uint64_t tx_tstamp_cycles; 6710 6711 switch (hw->mac.type) { 6712 case ixgbe_mac_X550: 6713 case ixgbe_mac_X550EM_x: 6714 case ixgbe_mac_X550EM_a: 6715 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6716 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6717 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6718 * NSEC_PER_SEC; 6719 break; 6720 default: 6721 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6722 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6723 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6724 << 32; 6725 } 6726 6727 return tx_tstamp_cycles; 6728 } 6729 6730 static void 6731 ixgbe_start_timecounters(struct rte_eth_dev *dev) 6732 { 6733 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6734 struct ixgbe_adapter *adapter = dev->data->dev_private; 6735 struct rte_eth_link link; 6736 uint32_t incval = 0; 6737 uint32_t shift = 0; 6738 6739 /* Get current link speed. */ 6740 ixgbe_dev_link_update(dev, 1); 6741 rte_eth_linkstatus_get(dev, &link); 6742 6743 switch (link.link_speed) { 6744 case ETH_SPEED_NUM_100M: 6745 incval = IXGBE_INCVAL_100; 6746 shift = IXGBE_INCVAL_SHIFT_100; 6747 break; 6748 case ETH_SPEED_NUM_1G: 6749 incval = IXGBE_INCVAL_1GB; 6750 shift = IXGBE_INCVAL_SHIFT_1GB; 6751 break; 6752 case ETH_SPEED_NUM_10G: 6753 default: 6754 incval = IXGBE_INCVAL_10GB; 6755 shift = IXGBE_INCVAL_SHIFT_10GB; 6756 break; 6757 } 6758 6759 switch (hw->mac.type) { 6760 case ixgbe_mac_X550: 6761 case ixgbe_mac_X550EM_x: 6762 case ixgbe_mac_X550EM_a: 6763 /* Independent of link speed. */ 6764 incval = 1; 6765 /* Cycles read will be interpreted as ns. */ 6766 shift = 0; 6767 /* Fall-through */ 6768 case ixgbe_mac_X540: 6769 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); 6770 break; 6771 case ixgbe_mac_82599EB: 6772 incval >>= IXGBE_INCVAL_SHIFT_82599; 6773 shift -= IXGBE_INCVAL_SHIFT_82599; 6774 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 6775 (1 << IXGBE_INCPER_SHIFT_82599) | incval); 6776 break; 6777 default: 6778 /* Not supported. */ 6779 return; 6780 } 6781 6782 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 6783 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6784 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6785 6786 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6787 adapter->systime_tc.cc_shift = shift; 6788 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 6789 6790 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6791 adapter->rx_tstamp_tc.cc_shift = shift; 6792 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6793 6794 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6795 adapter->tx_tstamp_tc.cc_shift = shift; 6796 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6797 } 6798 6799 static int 6800 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 6801 { 6802 struct ixgbe_adapter *adapter = dev->data->dev_private; 6803 6804 adapter->systime_tc.nsec += delta; 6805 adapter->rx_tstamp_tc.nsec += delta; 6806 adapter->tx_tstamp_tc.nsec += delta; 6807 6808 return 0; 6809 } 6810 6811 static int 6812 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 6813 { 6814 uint64_t ns; 6815 struct ixgbe_adapter *adapter = dev->data->dev_private; 6816 6817 ns = rte_timespec_to_ns(ts); 6818 /* Set the timecounters to a new value. */ 6819 adapter->systime_tc.nsec = ns; 6820 adapter->rx_tstamp_tc.nsec = ns; 6821 adapter->tx_tstamp_tc.nsec = ns; 6822 6823 return 0; 6824 } 6825 6826 static int 6827 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 6828 { 6829 uint64_t ns, systime_cycles; 6830 struct ixgbe_adapter *adapter = dev->data->dev_private; 6831 6832 systime_cycles = ixgbe_read_systime_cyclecounter(dev); 6833 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 6834 *ts = rte_ns_to_timespec(ns); 6835 6836 return 0; 6837 } 6838 6839 static int 6840 ixgbe_timesync_enable(struct rte_eth_dev *dev) 6841 { 6842 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6843 uint32_t tsync_ctl; 6844 uint32_t tsauxc; 6845 6846 /* Stop the timesync system time. */ 6847 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); 6848 /* Reset the timesync system time value. */ 6849 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); 6850 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); 6851 6852 /* Enable system time for platforms where it isn't on by default. */ 6853 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); 6854 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; 6855 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); 6856 6857 ixgbe_start_timecounters(dev); 6858 6859 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6860 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 6861 (RTE_ETHER_TYPE_1588 | 6862 IXGBE_ETQF_FILTER_EN | 6863 IXGBE_ETQF_1588)); 6864 6865 /* Enable timestamping of received PTP packets. */ 6866 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6867 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; 6868 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6869 6870 /* Enable timestamping of transmitted PTP packets. */ 6871 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6872 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; 6873 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6874 6875 IXGBE_WRITE_FLUSH(hw); 6876 6877 return 0; 6878 } 6879 6880 static int 6881 ixgbe_timesync_disable(struct rte_eth_dev *dev) 6882 { 6883 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6884 uint32_t tsync_ctl; 6885 6886 /* Disable timestamping of transmitted PTP packets. */ 6887 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6888 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; 6889 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6890 6891 /* Disable timestamping of received PTP packets. */ 6892 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6893 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; 6894 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6895 6896 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6897 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 6898 6899 /* Stop incrementating the System Time registers. */ 6900 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); 6901 6902 return 0; 6903 } 6904 6905 static int 6906 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 6907 struct timespec *timestamp, 6908 uint32_t flags __rte_unused) 6909 { 6910 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6911 struct ixgbe_adapter *adapter = dev->data->dev_private; 6912 uint32_t tsync_rxctl; 6913 uint64_t rx_tstamp_cycles; 6914 uint64_t ns; 6915 6916 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6917 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) 6918 return -EINVAL; 6919 6920 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); 6921 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 6922 *timestamp = rte_ns_to_timespec(ns); 6923 6924 return 0; 6925 } 6926 6927 static int 6928 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 6929 struct timespec *timestamp) 6930 { 6931 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6932 struct ixgbe_adapter *adapter = dev->data->dev_private; 6933 uint32_t tsync_txctl; 6934 uint64_t tx_tstamp_cycles; 6935 uint64_t ns; 6936 6937 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6938 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) 6939 return -EINVAL; 6940 6941 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); 6942 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 6943 *timestamp = rte_ns_to_timespec(ns); 6944 6945 return 0; 6946 } 6947 6948 static int 6949 ixgbe_get_reg_length(struct rte_eth_dev *dev) 6950 { 6951 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6952 int count = 0; 6953 int g_ind = 0; 6954 const struct reg_info *reg_group; 6955 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 6956 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 6957 6958 while ((reg_group = reg_set[g_ind++])) 6959 count += ixgbe_regs_group_count(reg_group); 6960 6961 return count; 6962 } 6963 6964 static int 6965 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 6966 { 6967 int count = 0; 6968 int g_ind = 0; 6969 const struct reg_info *reg_group; 6970 6971 while ((reg_group = ixgbevf_regs[g_ind++])) 6972 count += ixgbe_regs_group_count(reg_group); 6973 6974 return count; 6975 } 6976 6977 static int 6978 ixgbe_get_regs(struct rte_eth_dev *dev, 6979 struct rte_dev_reg_info *regs) 6980 { 6981 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6982 uint32_t *data = regs->data; 6983 int g_ind = 0; 6984 int count = 0; 6985 const struct reg_info *reg_group; 6986 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 6987 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 6988 6989 if (data == NULL) { 6990 regs->length = ixgbe_get_reg_length(dev); 6991 regs->width = sizeof(uint32_t); 6992 return 0; 6993 } 6994 6995 /* Support only full register dump */ 6996 if ((regs->length == 0) || 6997 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { 6998 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 6999 hw->device_id; 7000 while ((reg_group = reg_set[g_ind++])) 7001 count += ixgbe_read_regs_group(dev, &data[count], 7002 reg_group); 7003 return 0; 7004 } 7005 7006 return -ENOTSUP; 7007 } 7008 7009 static int 7010 ixgbevf_get_regs(struct rte_eth_dev *dev, 7011 struct rte_dev_reg_info *regs) 7012 { 7013 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7014 uint32_t *data = regs->data; 7015 int g_ind = 0; 7016 int count = 0; 7017 const struct reg_info *reg_group; 7018 7019 if (data == NULL) { 7020 regs->length = ixgbevf_get_reg_length(dev); 7021 regs->width = sizeof(uint32_t); 7022 return 0; 7023 } 7024 7025 /* Support only full register dump */ 7026 if ((regs->length == 0) || 7027 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { 7028 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7029 hw->device_id; 7030 while ((reg_group = ixgbevf_regs[g_ind++])) 7031 count += ixgbe_read_regs_group(dev, &data[count], 7032 reg_group); 7033 return 0; 7034 } 7035 7036 return -ENOTSUP; 7037 } 7038 7039 static int 7040 ixgbe_get_eeprom_length(struct rte_eth_dev *dev) 7041 { 7042 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7043 7044 /* Return unit is byte count */ 7045 return hw->eeprom.word_size * 2; 7046 } 7047 7048 static int 7049 ixgbe_get_eeprom(struct rte_eth_dev *dev, 7050 struct rte_dev_eeprom_info *in_eeprom) 7051 { 7052 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7053 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7054 uint16_t *data = in_eeprom->data; 7055 int first, length; 7056 7057 first = in_eeprom->offset >> 1; 7058 length = in_eeprom->length >> 1; 7059 if ((first > hw->eeprom.word_size) || 7060 ((first + length) > hw->eeprom.word_size)) 7061 return -EINVAL; 7062 7063 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7064 7065 return eeprom->ops.read_buffer(hw, first, length, data); 7066 } 7067 7068 static int 7069 ixgbe_set_eeprom(struct rte_eth_dev *dev, 7070 struct rte_dev_eeprom_info *in_eeprom) 7071 { 7072 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7073 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7074 uint16_t *data = in_eeprom->data; 7075 int first, length; 7076 7077 first = in_eeprom->offset >> 1; 7078 length = in_eeprom->length >> 1; 7079 if ((first > hw->eeprom.word_size) || 7080 ((first + length) > hw->eeprom.word_size)) 7081 return -EINVAL; 7082 7083 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7084 7085 return eeprom->ops.write_buffer(hw, first, length, data); 7086 } 7087 7088 static int 7089 ixgbe_get_module_info(struct rte_eth_dev *dev, 7090 struct rte_eth_dev_module_info *modinfo) 7091 { 7092 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7093 uint32_t status; 7094 uint8_t sff8472_rev, addr_mode; 7095 bool page_swap = false; 7096 7097 /* Check whether we support SFF-8472 or not */ 7098 status = hw->phy.ops.read_i2c_eeprom(hw, 7099 IXGBE_SFF_SFF_8472_COMP, 7100 &sff8472_rev); 7101 if (status != 0) 7102 return -EIO; 7103 7104 /* addressing mode is not supported */ 7105 status = hw->phy.ops.read_i2c_eeprom(hw, 7106 IXGBE_SFF_SFF_8472_SWAP, 7107 &addr_mode); 7108 if (status != 0) 7109 return -EIO; 7110 7111 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 7112 PMD_DRV_LOG(ERR, 7113 "Address change required to access page 0xA2, " 7114 "but not supported. Please report the module " 7115 "type to the driver maintainers."); 7116 page_swap = true; 7117 } 7118 7119 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { 7120 /* We have a SFP, but it does not support SFF-8472 */ 7121 modinfo->type = RTE_ETH_MODULE_SFF_8079; 7122 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 7123 } else { 7124 /* We have a SFP which supports a revision of SFF-8472. */ 7125 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7126 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7127 } 7128 7129 return 0; 7130 } 7131 7132 static int 7133 ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 7134 struct rte_dev_eeprom_info *info) 7135 { 7136 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7137 uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; 7138 uint8_t databyte = 0xFF; 7139 uint8_t *data = info->data; 7140 uint32_t i = 0; 7141 7142 for (i = info->offset; i < info->offset + info->length; i++) { 7143 if (i < RTE_ETH_MODULE_SFF_8079_LEN) 7144 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); 7145 else 7146 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); 7147 7148 if (status != 0) 7149 return -EIO; 7150 7151 data[i - info->offset] = databyte; 7152 } 7153 7154 return 0; 7155 } 7156 7157 uint16_t 7158 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { 7159 switch (mac_type) { 7160 case ixgbe_mac_X550: 7161 case ixgbe_mac_X550EM_x: 7162 case ixgbe_mac_X550EM_a: 7163 return ETH_RSS_RETA_SIZE_512; 7164 case ixgbe_mac_X550_vf: 7165 case ixgbe_mac_X550EM_x_vf: 7166 case ixgbe_mac_X550EM_a_vf: 7167 return ETH_RSS_RETA_SIZE_64; 7168 case ixgbe_mac_X540_vf: 7169 case ixgbe_mac_82599_vf: 7170 return 0; 7171 default: 7172 return ETH_RSS_RETA_SIZE_128; 7173 } 7174 } 7175 7176 uint32_t 7177 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { 7178 switch (mac_type) { 7179 case ixgbe_mac_X550: 7180 case ixgbe_mac_X550EM_x: 7181 case ixgbe_mac_X550EM_a: 7182 if (reta_idx < ETH_RSS_RETA_SIZE_128) 7183 return IXGBE_RETA(reta_idx >> 2); 7184 else 7185 return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); 7186 case ixgbe_mac_X550_vf: 7187 case ixgbe_mac_X550EM_x_vf: 7188 case ixgbe_mac_X550EM_a_vf: 7189 return IXGBE_VFRETA(reta_idx >> 2); 7190 default: 7191 return IXGBE_RETA(reta_idx >> 2); 7192 } 7193 } 7194 7195 uint32_t 7196 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { 7197 switch (mac_type) { 7198 case ixgbe_mac_X550_vf: 7199 case ixgbe_mac_X550EM_x_vf: 7200 case ixgbe_mac_X550EM_a_vf: 7201 return IXGBE_VFMRQC; 7202 default: 7203 return IXGBE_MRQC; 7204 } 7205 } 7206 7207 uint32_t 7208 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { 7209 switch (mac_type) { 7210 case ixgbe_mac_X550_vf: 7211 case ixgbe_mac_X550EM_x_vf: 7212 case ixgbe_mac_X550EM_a_vf: 7213 return IXGBE_VFRSSRK(i); 7214 default: 7215 return IXGBE_RSSRK(i); 7216 } 7217 } 7218 7219 bool 7220 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { 7221 switch (mac_type) { 7222 case ixgbe_mac_82599_vf: 7223 case ixgbe_mac_X540_vf: 7224 return 0; 7225 default: 7226 return 1; 7227 } 7228 } 7229 7230 static int 7231 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 7232 struct rte_eth_dcb_info *dcb_info) 7233 { 7234 struct ixgbe_dcb_config *dcb_config = 7235 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 7236 struct ixgbe_dcb_tc_config *tc; 7237 struct rte_eth_dcb_tc_queue_mapping *tc_queue; 7238 uint8_t nb_tcs; 7239 uint8_t i, j; 7240 7241 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) 7242 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 7243 else 7244 dcb_info->nb_tcs = 1; 7245 7246 tc_queue = &dcb_info->tc_queue; 7247 nb_tcs = dcb_info->nb_tcs; 7248 7249 if (dcb_config->vt_mode) { /* vt is enabled*/ 7250 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 7251 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 7252 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 7253 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 7254 if (RTE_ETH_DEV_SRIOV(dev).active > 0) { 7255 for (j = 0; j < nb_tcs; j++) { 7256 tc_queue->tc_rxq[0][j].base = j; 7257 tc_queue->tc_rxq[0][j].nb_queue = 1; 7258 tc_queue->tc_txq[0][j].base = j; 7259 tc_queue->tc_txq[0][j].nb_queue = 1; 7260 } 7261 } else { 7262 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 7263 for (j = 0; j < nb_tcs; j++) { 7264 tc_queue->tc_rxq[i][j].base = 7265 i * nb_tcs + j; 7266 tc_queue->tc_rxq[i][j].nb_queue = 1; 7267 tc_queue->tc_txq[i][j].base = 7268 i * nb_tcs + j; 7269 tc_queue->tc_txq[i][j].nb_queue = 1; 7270 } 7271 } 7272 } 7273 } else { /* vt is disabled*/ 7274 struct rte_eth_dcb_rx_conf *rx_conf = 7275 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 7276 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 7277 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 7278 if (dcb_info->nb_tcs == ETH_4_TCS) { 7279 for (i = 0; i < dcb_info->nb_tcs; i++) { 7280 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 7281 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7282 } 7283 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7284 dcb_info->tc_queue.tc_txq[0][1].base = 64; 7285 dcb_info->tc_queue.tc_txq[0][2].base = 96; 7286 dcb_info->tc_queue.tc_txq[0][3].base = 112; 7287 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 7288 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7289 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7290 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7291 } else if (dcb_info->nb_tcs == ETH_8_TCS) { 7292 for (i = 0; i < dcb_info->nb_tcs; i++) { 7293 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 7294 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7295 } 7296 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7297 dcb_info->tc_queue.tc_txq[0][1].base = 32; 7298 dcb_info->tc_queue.tc_txq[0][2].base = 64; 7299 dcb_info->tc_queue.tc_txq[0][3].base = 80; 7300 dcb_info->tc_queue.tc_txq[0][4].base = 96; 7301 dcb_info->tc_queue.tc_txq[0][5].base = 104; 7302 dcb_info->tc_queue.tc_txq[0][6].base = 112; 7303 dcb_info->tc_queue.tc_txq[0][7].base = 120; 7304 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 7305 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7306 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7307 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7308 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 7309 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 7310 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 7311 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 7312 } 7313 } 7314 for (i = 0; i < dcb_info->nb_tcs; i++) { 7315 tc = &dcb_config->tc_config[i]; 7316 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; 7317 } 7318 return 0; 7319 } 7320 7321 /* Update e-tag ether type */ 7322 static int 7323 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, 7324 uint16_t ether_type) 7325 { 7326 uint32_t etag_etype; 7327 7328 if (hw->mac.type != ixgbe_mac_X550 && 7329 hw->mac.type != ixgbe_mac_X550EM_x && 7330 hw->mac.type != ixgbe_mac_X550EM_a) { 7331 return -ENOTSUP; 7332 } 7333 7334 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7335 etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; 7336 etag_etype |= ether_type; 7337 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7338 IXGBE_WRITE_FLUSH(hw); 7339 7340 return 0; 7341 } 7342 7343 /* Enable e-tag tunnel */ 7344 static int 7345 ixgbe_e_tag_enable(struct ixgbe_hw *hw) 7346 { 7347 uint32_t etag_etype; 7348 7349 if (hw->mac.type != ixgbe_mac_X550 && 7350 hw->mac.type != ixgbe_mac_X550EM_x && 7351 hw->mac.type != ixgbe_mac_X550EM_a) { 7352 return -ENOTSUP; 7353 } 7354 7355 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7356 etag_etype |= IXGBE_ETAG_ETYPE_VALID; 7357 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7358 IXGBE_WRITE_FLUSH(hw); 7359 7360 return 0; 7361 } 7362 7363 static int 7364 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, 7365 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7366 { 7367 int ret = 0; 7368 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7369 uint32_t i, rar_entries; 7370 uint32_t rar_low, rar_high; 7371 7372 if (hw->mac.type != ixgbe_mac_X550 && 7373 hw->mac.type != ixgbe_mac_X550EM_x && 7374 hw->mac.type != ixgbe_mac_X550EM_a) { 7375 return -ENOTSUP; 7376 } 7377 7378 rar_entries = ixgbe_get_num_rx_addrs(hw); 7379 7380 for (i = 1; i < rar_entries; i++) { 7381 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7382 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 7383 if ((rar_high & IXGBE_RAH_AV) && 7384 (rar_high & IXGBE_RAH_ADTYPE) && 7385 ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == 7386 l2_tunnel->tunnel_id)) { 7387 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 7388 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 7389 7390 ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); 7391 7392 return ret; 7393 } 7394 } 7395 7396 return ret; 7397 } 7398 7399 static int 7400 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, 7401 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7402 { 7403 int ret = 0; 7404 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7405 uint32_t i, rar_entries; 7406 uint32_t rar_low, rar_high; 7407 7408 if (hw->mac.type != ixgbe_mac_X550 && 7409 hw->mac.type != ixgbe_mac_X550EM_x && 7410 hw->mac.type != ixgbe_mac_X550EM_a) { 7411 return -ENOTSUP; 7412 } 7413 7414 /* One entry for one tunnel. Try to remove potential existing entry. */ 7415 ixgbe_e_tag_filter_del(dev, l2_tunnel); 7416 7417 rar_entries = ixgbe_get_num_rx_addrs(hw); 7418 7419 for (i = 1; i < rar_entries; i++) { 7420 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7421 if (rar_high & IXGBE_RAH_AV) { 7422 continue; 7423 } else { 7424 ixgbe_set_vmdq(hw, i, l2_tunnel->pool); 7425 rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; 7426 rar_low = l2_tunnel->tunnel_id; 7427 7428 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); 7429 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); 7430 7431 return ret; 7432 } 7433 } 7434 7435 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 7436 " Please remove a rule before adding a new one."); 7437 return -EINVAL; 7438 } 7439 7440 static inline struct ixgbe_l2_tn_filter * 7441 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, 7442 struct ixgbe_l2_tn_key *key) 7443 { 7444 int ret; 7445 7446 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); 7447 if (ret < 0) 7448 return NULL; 7449 7450 return l2_tn_info->hash_map[ret]; 7451 } 7452 7453 static inline int 7454 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7455 struct ixgbe_l2_tn_filter *l2_tn_filter) 7456 { 7457 int ret; 7458 7459 ret = rte_hash_add_key(l2_tn_info->hash_handle, 7460 &l2_tn_filter->key); 7461 7462 if (ret < 0) { 7463 PMD_DRV_LOG(ERR, 7464 "Failed to insert L2 tunnel filter" 7465 " to hash table %d!", 7466 ret); 7467 return ret; 7468 } 7469 7470 l2_tn_info->hash_map[ret] = l2_tn_filter; 7471 7472 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7473 7474 return 0; 7475 } 7476 7477 static inline int 7478 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7479 struct ixgbe_l2_tn_key *key) 7480 { 7481 int ret; 7482 struct ixgbe_l2_tn_filter *l2_tn_filter; 7483 7484 ret = rte_hash_del_key(l2_tn_info->hash_handle, key); 7485 7486 if (ret < 0) { 7487 PMD_DRV_LOG(ERR, 7488 "No such L2 tunnel filter to delete %d!", 7489 ret); 7490 return ret; 7491 } 7492 7493 l2_tn_filter = l2_tn_info->hash_map[ret]; 7494 l2_tn_info->hash_map[ret] = NULL; 7495 7496 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7497 rte_free(l2_tn_filter); 7498 7499 return 0; 7500 } 7501 7502 /* Add l2 tunnel filter */ 7503 int 7504 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 7505 struct ixgbe_l2_tunnel_conf *l2_tunnel, 7506 bool restore) 7507 { 7508 int ret; 7509 struct ixgbe_l2_tn_info *l2_tn_info = 7510 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7511 struct ixgbe_l2_tn_key key; 7512 struct ixgbe_l2_tn_filter *node; 7513 7514 if (!restore) { 7515 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7516 key.tn_id = l2_tunnel->tunnel_id; 7517 7518 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); 7519 7520 if (node) { 7521 PMD_DRV_LOG(ERR, 7522 "The L2 tunnel filter already exists!"); 7523 return -EINVAL; 7524 } 7525 7526 node = rte_zmalloc("ixgbe_l2_tn", 7527 sizeof(struct ixgbe_l2_tn_filter), 7528 0); 7529 if (!node) 7530 return -ENOMEM; 7531 7532 rte_memcpy(&node->key, 7533 &key, 7534 sizeof(struct ixgbe_l2_tn_key)); 7535 node->pool = l2_tunnel->pool; 7536 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); 7537 if (ret < 0) { 7538 rte_free(node); 7539 return ret; 7540 } 7541 } 7542 7543 switch (l2_tunnel->l2_tunnel_type) { 7544 case RTE_L2_TUNNEL_TYPE_E_TAG: 7545 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); 7546 break; 7547 default: 7548 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7549 ret = -EINVAL; 7550 break; 7551 } 7552 7553 if ((!restore) && (ret < 0)) 7554 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7555 7556 return ret; 7557 } 7558 7559 /* Delete l2 tunnel filter */ 7560 int 7561 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 7562 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7563 { 7564 int ret; 7565 struct ixgbe_l2_tn_info *l2_tn_info = 7566 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7567 struct ixgbe_l2_tn_key key; 7568 7569 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7570 key.tn_id = l2_tunnel->tunnel_id; 7571 ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7572 if (ret < 0) 7573 return ret; 7574 7575 switch (l2_tunnel->l2_tunnel_type) { 7576 case RTE_L2_TUNNEL_TYPE_E_TAG: 7577 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); 7578 break; 7579 default: 7580 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7581 ret = -EINVAL; 7582 break; 7583 } 7584 7585 return ret; 7586 } 7587 7588 static int 7589 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 7590 { 7591 int ret = 0; 7592 uint32_t ctrl; 7593 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7594 7595 if (hw->mac.type != ixgbe_mac_X550 && 7596 hw->mac.type != ixgbe_mac_X550EM_x && 7597 hw->mac.type != ixgbe_mac_X550EM_a) { 7598 return -ENOTSUP; 7599 } 7600 7601 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 7602 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 7603 if (en) 7604 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; 7605 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 7606 7607 return ret; 7608 } 7609 7610 static int 7611 ixgbe_update_vxlan_port(struct ixgbe_hw *hw, 7612 uint16_t port) 7613 { 7614 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); 7615 IXGBE_WRITE_FLUSH(hw); 7616 7617 return 0; 7618 } 7619 7620 /* There's only one register for VxLAN UDP port. 7621 * So, we cannot add several ports. Will update it. 7622 */ 7623 static int 7624 ixgbe_add_vxlan_port(struct ixgbe_hw *hw, 7625 uint16_t port) 7626 { 7627 if (port == 0) { 7628 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 7629 return -EINVAL; 7630 } 7631 7632 return ixgbe_update_vxlan_port(hw, port); 7633 } 7634 7635 /* We cannot delete the VxLAN port. For there's a register for VxLAN 7636 * UDP port, it must have a value. 7637 * So, will reset it to the original value 0. 7638 */ 7639 static int 7640 ixgbe_del_vxlan_port(struct ixgbe_hw *hw, 7641 uint16_t port) 7642 { 7643 uint16_t cur_port; 7644 7645 cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); 7646 7647 if (cur_port != port) { 7648 PMD_DRV_LOG(ERR, "Port %u does not exist.", port); 7649 return -EINVAL; 7650 } 7651 7652 return ixgbe_update_vxlan_port(hw, 0); 7653 } 7654 7655 /* Add UDP tunneling port */ 7656 static int 7657 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 7658 struct rte_eth_udp_tunnel *udp_tunnel) 7659 { 7660 int ret = 0; 7661 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7662 7663 if (hw->mac.type != ixgbe_mac_X550 && 7664 hw->mac.type != ixgbe_mac_X550EM_x && 7665 hw->mac.type != ixgbe_mac_X550EM_a) { 7666 return -ENOTSUP; 7667 } 7668 7669 if (udp_tunnel == NULL) 7670 return -EINVAL; 7671 7672 switch (udp_tunnel->prot_type) { 7673 case RTE_TUNNEL_TYPE_VXLAN: 7674 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); 7675 break; 7676 7677 case RTE_TUNNEL_TYPE_GENEVE: 7678 case RTE_TUNNEL_TYPE_TEREDO: 7679 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7680 ret = -EINVAL; 7681 break; 7682 7683 default: 7684 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7685 ret = -EINVAL; 7686 break; 7687 } 7688 7689 return ret; 7690 } 7691 7692 /* Remove UDP tunneling port */ 7693 static int 7694 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 7695 struct rte_eth_udp_tunnel *udp_tunnel) 7696 { 7697 int ret = 0; 7698 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7699 7700 if (hw->mac.type != ixgbe_mac_X550 && 7701 hw->mac.type != ixgbe_mac_X550EM_x && 7702 hw->mac.type != ixgbe_mac_X550EM_a) { 7703 return -ENOTSUP; 7704 } 7705 7706 if (udp_tunnel == NULL) 7707 return -EINVAL; 7708 7709 switch (udp_tunnel->prot_type) { 7710 case RTE_TUNNEL_TYPE_VXLAN: 7711 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); 7712 break; 7713 case RTE_TUNNEL_TYPE_GENEVE: 7714 case RTE_TUNNEL_TYPE_TEREDO: 7715 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7716 ret = -EINVAL; 7717 break; 7718 default: 7719 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7720 ret = -EINVAL; 7721 break; 7722 } 7723 7724 return ret; 7725 } 7726 7727 static int 7728 ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) 7729 { 7730 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7731 int ret; 7732 7733 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) { 7734 case IXGBE_SUCCESS: 7735 ret = 0; 7736 break; 7737 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7738 ret = -ENOTSUP; 7739 break; 7740 default: 7741 ret = -EAGAIN; 7742 break; 7743 } 7744 7745 return ret; 7746 } 7747 7748 static int 7749 ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) 7750 { 7751 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7752 int ret; 7753 7754 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) { 7755 case IXGBE_SUCCESS: 7756 ret = 0; 7757 break; 7758 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7759 ret = -ENOTSUP; 7760 break; 7761 default: 7762 ret = -EAGAIN; 7763 break; 7764 } 7765 7766 return ret; 7767 } 7768 7769 static int 7770 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 7771 { 7772 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7773 int ret; 7774 int mode = IXGBEVF_XCAST_MODE_ALLMULTI; 7775 7776 switch (hw->mac.ops.update_xcast_mode(hw, mode)) { 7777 case IXGBE_SUCCESS: 7778 ret = 0; 7779 break; 7780 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7781 ret = -ENOTSUP; 7782 break; 7783 default: 7784 ret = -EAGAIN; 7785 break; 7786 } 7787 7788 return ret; 7789 } 7790 7791 static int 7792 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 7793 { 7794 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7795 int ret; 7796 7797 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { 7798 case IXGBE_SUCCESS: 7799 ret = 0; 7800 break; 7801 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7802 ret = -ENOTSUP; 7803 break; 7804 default: 7805 ret = -EAGAIN; 7806 break; 7807 } 7808 7809 return ret; 7810 } 7811 7812 static void ixgbevf_mbx_process(struct rte_eth_dev *dev) 7813 { 7814 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7815 u32 in_msg = 0; 7816 7817 /* peek the message first */ 7818 in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); 7819 7820 /* PF reset VF event */ 7821 if (in_msg == IXGBE_PF_CONTROL_MSG) { 7822 /* dummy mbx read to ack pf */ 7823 if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) 7824 return; 7825 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 7826 NULL); 7827 } 7828 } 7829 7830 static int 7831 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 7832 { 7833 uint32_t eicr; 7834 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7835 struct ixgbe_interrupt *intr = 7836 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7837 ixgbevf_intr_disable(dev); 7838 7839 /* read-on-clear nic registers here */ 7840 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); 7841 intr->flags = 0; 7842 7843 /* only one misc vector supported - mailbox */ 7844 eicr &= IXGBE_VTEICR_MASK; 7845 if (eicr == IXGBE_MISC_VEC_ID) 7846 intr->flags |= IXGBE_FLAG_MAILBOX; 7847 7848 return 0; 7849 } 7850 7851 static int 7852 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) 7853 { 7854 struct ixgbe_interrupt *intr = 7855 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7856 7857 if (intr->flags & IXGBE_FLAG_MAILBOX) { 7858 ixgbevf_mbx_process(dev); 7859 intr->flags &= ~IXGBE_FLAG_MAILBOX; 7860 } 7861 7862 ixgbevf_intr_enable(dev); 7863 7864 return 0; 7865 } 7866 7867 static void 7868 ixgbevf_dev_interrupt_handler(void *param) 7869 { 7870 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 7871 7872 ixgbevf_dev_interrupt_get_status(dev); 7873 ixgbevf_dev_interrupt_action(dev); 7874 } 7875 7876 /** 7877 * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path 7878 * @hw: pointer to hardware structure 7879 * 7880 * Stops the transmit data path and waits for the HW to internally empty 7881 * the Tx security block 7882 **/ 7883 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) 7884 { 7885 #define IXGBE_MAX_SECTX_POLL 40 7886 7887 int i; 7888 int sectxreg; 7889 7890 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 7891 sectxreg |= IXGBE_SECTXCTRL_TX_DIS; 7892 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 7893 for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { 7894 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); 7895 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) 7896 break; 7897 /* Use interrupt-safe sleep just in case */ 7898 usec_delay(1000); 7899 } 7900 7901 /* For informational purposes only */ 7902 if (i >= IXGBE_MAX_SECTX_POLL) 7903 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " 7904 "path fully disabled. Continuing with init."); 7905 7906 return IXGBE_SUCCESS; 7907 } 7908 7909 /** 7910 * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path 7911 * @hw: pointer to hardware structure 7912 * 7913 * Enables the transmit data path. 7914 **/ 7915 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) 7916 { 7917 uint32_t sectxreg; 7918 7919 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 7920 sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; 7921 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 7922 IXGBE_WRITE_FLUSH(hw); 7923 7924 return IXGBE_SUCCESS; 7925 } 7926 7927 /* restore n-tuple filter */ 7928 static inline void 7929 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) 7930 { 7931 struct ixgbe_filter_info *filter_info = 7932 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7933 struct ixgbe_5tuple_filter *node; 7934 7935 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { 7936 ixgbe_inject_5tuple_filter(dev, node); 7937 } 7938 } 7939 7940 /* restore ethernet type filter */ 7941 static inline void 7942 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) 7943 { 7944 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7945 struct ixgbe_filter_info *filter_info = 7946 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7947 int i; 7948 7949 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 7950 if (filter_info->ethertype_mask & (1 << i)) { 7951 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 7952 filter_info->ethertype_filters[i].etqf); 7953 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 7954 filter_info->ethertype_filters[i].etqs); 7955 IXGBE_WRITE_FLUSH(hw); 7956 } 7957 } 7958 } 7959 7960 /* restore SYN filter */ 7961 static inline void 7962 ixgbe_syn_filter_restore(struct rte_eth_dev *dev) 7963 { 7964 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7965 struct ixgbe_filter_info *filter_info = 7966 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7967 uint32_t synqf; 7968 7969 synqf = filter_info->syn_info; 7970 7971 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 7972 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 7973 IXGBE_WRITE_FLUSH(hw); 7974 } 7975 } 7976 7977 /* restore L2 tunnel filter */ 7978 static inline void 7979 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) 7980 { 7981 struct ixgbe_l2_tn_info *l2_tn_info = 7982 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7983 struct ixgbe_l2_tn_filter *node; 7984 struct ixgbe_l2_tunnel_conf l2_tn_conf; 7985 7986 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { 7987 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; 7988 l2_tn_conf.tunnel_id = node->key.tn_id; 7989 l2_tn_conf.pool = node->pool; 7990 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); 7991 } 7992 } 7993 7994 /* restore rss filter */ 7995 static inline void 7996 ixgbe_rss_filter_restore(struct rte_eth_dev *dev) 7997 { 7998 struct ixgbe_filter_info *filter_info = 7999 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8000 8001 if (filter_info->rss_info.conf.queue_num) 8002 ixgbe_config_rss_filter(dev, 8003 &filter_info->rss_info, TRUE); 8004 } 8005 8006 static int 8007 ixgbe_filter_restore(struct rte_eth_dev *dev) 8008 { 8009 ixgbe_ntuple_filter_restore(dev); 8010 ixgbe_ethertype_filter_restore(dev); 8011 ixgbe_syn_filter_restore(dev); 8012 ixgbe_fdir_filter_restore(dev); 8013 ixgbe_l2_tn_filter_restore(dev); 8014 ixgbe_rss_filter_restore(dev); 8015 8016 return 0; 8017 } 8018 8019 static void 8020 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) 8021 { 8022 struct ixgbe_l2_tn_info *l2_tn_info = 8023 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8024 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8025 8026 if (l2_tn_info->e_tag_en) 8027 (void)ixgbe_e_tag_enable(hw); 8028 8029 if (l2_tn_info->e_tag_fwd_en) 8030 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); 8031 8032 (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); 8033 } 8034 8035 /* remove all the n-tuple filters */ 8036 void 8037 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) 8038 { 8039 struct ixgbe_filter_info *filter_info = 8040 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8041 struct ixgbe_5tuple_filter *p_5tuple; 8042 8043 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) 8044 ixgbe_remove_5tuple_filter(dev, p_5tuple); 8045 } 8046 8047 /* remove all the ether type filters */ 8048 void 8049 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) 8050 { 8051 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8052 struct ixgbe_filter_info *filter_info = 8053 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8054 int i; 8055 8056 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8057 if (filter_info->ethertype_mask & (1 << i) && 8058 !filter_info->ethertype_filters[i].conf) { 8059 (void)ixgbe_ethertype_filter_remove(filter_info, 8060 (uint8_t)i); 8061 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); 8062 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); 8063 IXGBE_WRITE_FLUSH(hw); 8064 } 8065 } 8066 } 8067 8068 /* remove the SYN filter */ 8069 void 8070 ixgbe_clear_syn_filter(struct rte_eth_dev *dev) 8071 { 8072 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8073 struct ixgbe_filter_info *filter_info = 8074 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8075 8076 if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { 8077 filter_info->syn_info = 0; 8078 8079 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); 8080 IXGBE_WRITE_FLUSH(hw); 8081 } 8082 } 8083 8084 /* remove all the L2 tunnel filters */ 8085 int 8086 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) 8087 { 8088 struct ixgbe_l2_tn_info *l2_tn_info = 8089 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8090 struct ixgbe_l2_tn_filter *l2_tn_filter; 8091 struct ixgbe_l2_tunnel_conf l2_tn_conf; 8092 int ret = 0; 8093 8094 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 8095 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; 8096 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; 8097 l2_tn_conf.pool = l2_tn_filter->pool; 8098 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); 8099 if (ret < 0) 8100 return ret; 8101 } 8102 8103 return 0; 8104 } 8105 8106 void 8107 ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev, 8108 struct ixgbe_macsec_setting *macsec_setting) 8109 { 8110 struct ixgbe_macsec_setting *macsec = 8111 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8112 8113 macsec->offload_en = macsec_setting->offload_en; 8114 macsec->encrypt_en = macsec_setting->encrypt_en; 8115 macsec->replayprotect_en = macsec_setting->replayprotect_en; 8116 } 8117 8118 void 8119 ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev) 8120 { 8121 struct ixgbe_macsec_setting *macsec = 8122 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8123 8124 macsec->offload_en = 0; 8125 macsec->encrypt_en = 0; 8126 macsec->replayprotect_en = 0; 8127 } 8128 8129 void 8130 ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, 8131 struct ixgbe_macsec_setting *macsec_setting) 8132 { 8133 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8134 uint32_t ctrl; 8135 uint8_t en = macsec_setting->encrypt_en; 8136 uint8_t rp = macsec_setting->replayprotect_en; 8137 8138 /** 8139 * Workaround: 8140 * As no ixgbe_disable_sec_rx_path equivalent is 8141 * implemented for tx in the base code, and we are 8142 * not allowed to modify the base code in DPDK, so 8143 * just call the hand-written one directly for now. 8144 * The hardware support has been checked by 8145 * ixgbe_disable_sec_rx_path(). 8146 */ 8147 ixgbe_disable_sec_tx_path_generic(hw); 8148 8149 /* Enable Ethernet CRC (required by MACsec offload) */ 8150 ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 8151 ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; 8152 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl); 8153 8154 /* Enable the TX and RX crypto engines */ 8155 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8156 ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS; 8157 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8158 8159 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8160 ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS; 8161 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8162 8163 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 8164 ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK; 8165 ctrl |= 0x3; 8166 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl); 8167 8168 /* Enable SA lookup */ 8169 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8170 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8171 ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT : 8172 IXGBE_LSECTXCTRL_AUTH; 8173 ctrl |= IXGBE_LSECTXCTRL_AISCI; 8174 ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8175 ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8176 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8177 8178 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8179 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8180 ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT; 8181 ctrl &= ~IXGBE_LSECRXCTRL_PLSH; 8182 if (rp) 8183 ctrl |= IXGBE_LSECRXCTRL_RP; 8184 else 8185 ctrl &= ~IXGBE_LSECRXCTRL_RP; 8186 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8187 8188 /* Start the data paths */ 8189 ixgbe_enable_sec_rx_path(hw); 8190 /** 8191 * Workaround: 8192 * As no ixgbe_enable_sec_rx_path equivalent is 8193 * implemented for tx in the base code, and we are 8194 * not allowed to modify the base code in DPDK, so 8195 * just call the hand-written one directly for now. 8196 */ 8197 ixgbe_enable_sec_tx_path_generic(hw); 8198 } 8199 8200 void 8201 ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) 8202 { 8203 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8204 uint32_t ctrl; 8205 8206 /** 8207 * Workaround: 8208 * As no ixgbe_disable_sec_rx_path equivalent is 8209 * implemented for tx in the base code, and we are 8210 * not allowed to modify the base code in DPDK, so 8211 * just call the hand-written one directly for now. 8212 * The hardware support has been checked by 8213 * ixgbe_disable_sec_rx_path(). 8214 */ 8215 ixgbe_disable_sec_tx_path_generic(hw); 8216 8217 /* Disable the TX and RX crypto engines */ 8218 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8219 ctrl |= IXGBE_SECTXCTRL_SECTX_DIS; 8220 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8221 8222 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8223 ctrl |= IXGBE_SECRXCTRL_SECRX_DIS; 8224 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8225 8226 /* Disable SA lookup */ 8227 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8228 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8229 ctrl |= IXGBE_LSECTXCTRL_DISABLE; 8230 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8231 8232 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8233 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8234 ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT; 8235 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8236 8237 /* Start the data paths */ 8238 ixgbe_enable_sec_rx_path(hw); 8239 /** 8240 * Workaround: 8241 * As no ixgbe_enable_sec_rx_path equivalent is 8242 * implemented for tx in the base code, and we are 8243 * not allowed to modify the base code in DPDK, so 8244 * just call the hand-written one directly for now. 8245 */ 8246 ixgbe_enable_sec_tx_path_generic(hw); 8247 } 8248 8249 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); 8250 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); 8251 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 8252 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); 8253 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); 8254 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); 8255 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, 8256 IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); 8257 8258 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_init, init, NOTICE); 8259 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_driver, driver, NOTICE); 8260 8261 #ifdef RTE_ETHDEV_DEBUG_RX 8262 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_rx, rx, DEBUG); 8263 #endif 8264 #ifdef RTE_ETHDEV_DEBUG_TX 8265 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_tx, tx, DEBUG); 8266 #endif 8267