1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 #include <inttypes.h> 13 #include <rte_string_fns.h> 14 #include <rte_byteorder.h> 15 #include <rte_common.h> 16 #include <rte_cycles.h> 17 18 #include <rte_interrupts.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_pci.h> 22 #include <rte_bus_pci.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_memory.h> 25 #include <rte_kvargs.h> 26 #include <rte_eal.h> 27 #include <rte_alarm.h> 28 #include <rte_ether.h> 29 #include <ethdev_driver.h> 30 #include <ethdev_pci.h> 31 #include <rte_malloc.h> 32 #include <rte_random.h> 33 #include <rte_dev.h> 34 #include <rte_hash_crc.h> 35 #ifdef RTE_LIB_SECURITY 36 #include <rte_security_driver.h> 37 #endif 38 39 #include "ixgbe_logs.h" 40 #include "base/ixgbe_api.h" 41 #include "base/ixgbe_vf.h" 42 #include "base/ixgbe_common.h" 43 #include "ixgbe_ethdev.h" 44 #include "ixgbe_bypass.h" 45 #include "ixgbe_rxtx.h" 46 #include "base/ixgbe_type.h" 47 #include "base/ixgbe_phy.h" 48 #include "base/ixgbe_osdep.h" 49 #include "ixgbe_regs.h" 50 51 /* 52 * High threshold controlling when to start sending XOFF frames. Must be at 53 * least 8 bytes less than receive packet buffer size. This value is in units 54 * of 1024 bytes. 55 */ 56 #define IXGBE_FC_HI 0x80 57 58 /* 59 * Low threshold controlling when to start sending XON frames. This value is 60 * in units of 1024 bytes. 61 */ 62 #define IXGBE_FC_LO 0x40 63 64 /* Timer value included in XOFF frames. */ 65 #define IXGBE_FC_PAUSE 0x680 66 67 /*Default value of Max Rx Queue*/ 68 #define IXGBE_MAX_RX_QUEUE_NUM 128 69 70 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 71 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 72 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ 73 74 #define IXGBE_MMW_SIZE_DEFAULT 0x4 75 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 76 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ 77 78 /* 79 * Default values for RX/TX configuration 80 */ 81 #define IXGBE_DEFAULT_RX_FREE_THRESH 32 82 #define IXGBE_DEFAULT_RX_PTHRESH 8 83 #define IXGBE_DEFAULT_RX_HTHRESH 8 84 #define IXGBE_DEFAULT_RX_WTHRESH 0 85 86 #define IXGBE_DEFAULT_TX_FREE_THRESH 32 87 #define IXGBE_DEFAULT_TX_PTHRESH 32 88 #define IXGBE_DEFAULT_TX_HTHRESH 0 89 #define IXGBE_DEFAULT_TX_WTHRESH 0 90 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 91 92 /* Bit shift and mask */ 93 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) 94 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) 95 #define IXGBE_8_BIT_WIDTH CHAR_BIT 96 #define IXGBE_8_BIT_MASK UINT8_MAX 97 98 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ 99 100 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) 101 102 /* Additional timesync values. */ 103 #define NSEC_PER_SEC 1000000000L 104 #define IXGBE_INCVAL_10GB 0x66666666 105 #define IXGBE_INCVAL_1GB 0x40000000 106 #define IXGBE_INCVAL_100 0x50000000 107 #define IXGBE_INCVAL_SHIFT_10GB 28 108 #define IXGBE_INCVAL_SHIFT_1GB 24 109 #define IXGBE_INCVAL_SHIFT_100 21 110 #define IXGBE_INCVAL_SHIFT_82599 7 111 #define IXGBE_INCPER_SHIFT_82599 24 112 113 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 114 115 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 116 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 117 #define IXGBE_ETAG_ETYPE 0x00005084 118 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff 119 #define IXGBE_ETAG_ETYPE_VALID 0x80000000 120 #define IXGBE_RAH_ADTYPE 0x40000000 121 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff 122 #define IXGBE_VMVIR_TAGA_MASK 0x18000000 123 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 124 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ 125 #define IXGBE_QDE_STRIP_TAG 0x00000004 126 #define IXGBE_VTEICR_MASK 0x07 127 128 #define IXGBE_EXVET_VET_EXT_SHIFT 16 129 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 130 131 #define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" 132 133 static const char * const ixgbevf_valid_arguments[] = { 134 IXGBEVF_DEVARG_PFLINK_FULLCHK, 135 NULL 136 }; 137 138 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); 139 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); 140 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); 141 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); 142 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); 143 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); 144 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); 145 static int ixgbe_dev_configure(struct rte_eth_dev *dev); 146 static int ixgbe_dev_start(struct rte_eth_dev *dev); 147 static int ixgbe_dev_stop(struct rte_eth_dev *dev); 148 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); 149 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); 150 static int ixgbe_dev_close(struct rte_eth_dev *dev); 151 static int ixgbe_dev_reset(struct rte_eth_dev *dev); 152 static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 153 static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 154 static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 155 static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 156 static int ixgbe_dev_link_update(struct rte_eth_dev *dev, 157 int wait_to_complete); 158 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, 159 struct rte_eth_stats *stats); 160 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, 161 struct rte_eth_xstat *xstats, unsigned n); 162 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, 163 struct rte_eth_xstat *xstats, unsigned n); 164 static int 165 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 166 uint64_t *values, unsigned int n); 167 static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev); 168 static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); 169 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 170 struct rte_eth_xstat_name *xstats_names, 171 unsigned int size); 172 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, 173 struct rte_eth_xstat_name *xstats_names, unsigned limit); 174 static int ixgbe_dev_xstats_get_names_by_id( 175 struct rte_eth_dev *dev, 176 const uint64_t *ids, 177 struct rte_eth_xstat_name *xstats_names, 178 unsigned int limit); 179 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 180 uint16_t queue_id, 181 uint8_t stat_idx, 182 uint8_t is_rx); 183 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 184 size_t fw_size); 185 static int ixgbe_dev_info_get(struct rte_eth_dev *dev, 186 struct rte_eth_dev_info *dev_info); 187 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 188 static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, 189 struct rte_eth_dev_info *dev_info); 190 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 191 192 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, 193 uint16_t vlan_id, int on); 194 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 195 enum rte_vlan_type vlan_type, 196 uint16_t tpid_id); 197 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 198 uint16_t queue, bool on); 199 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, 200 int on); 201 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, 202 int mask); 203 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); 204 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 205 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 206 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); 207 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); 208 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); 209 210 static int ixgbe_dev_led_on(struct rte_eth_dev *dev); 211 static int ixgbe_dev_led_off(struct rte_eth_dev *dev); 212 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, 213 struct rte_eth_fc_conf *fc_conf); 214 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, 215 struct rte_eth_fc_conf *fc_conf); 216 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 217 struct rte_eth_pfc_conf *pfc_conf); 218 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 219 struct rte_eth_rss_reta_entry64 *reta_conf, 220 uint16_t reta_size); 221 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 222 struct rte_eth_rss_reta_entry64 *reta_conf, 223 uint16_t reta_size); 224 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); 225 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 226 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 227 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 228 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); 229 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); 230 static void ixgbe_dev_interrupt_handler(void *param); 231 static void ixgbe_dev_interrupt_delayed_handler(void *param); 232 static void *ixgbe_dev_setup_link_thread_handler(void *param); 233 static int ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, 234 uint32_t timeout_ms); 235 236 static int ixgbe_add_rar(struct rte_eth_dev *dev, 237 struct rte_ether_addr *mac_addr, 238 uint32_t index, uint32_t pool); 239 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); 240 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, 241 struct rte_ether_addr *mac_addr); 242 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); 243 static bool is_device_supported(struct rte_eth_dev *dev, 244 struct rte_pci_driver *drv); 245 246 /* For Virtual Function support */ 247 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); 248 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); 249 static int ixgbevf_dev_configure(struct rte_eth_dev *dev); 250 static int ixgbevf_dev_start(struct rte_eth_dev *dev); 251 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, 252 int wait_to_complete); 253 static int ixgbevf_dev_stop(struct rte_eth_dev *dev); 254 static int ixgbevf_dev_close(struct rte_eth_dev *dev); 255 static int ixgbevf_dev_reset(struct rte_eth_dev *dev); 256 static void ixgbevf_intr_disable(struct rte_eth_dev *dev); 257 static void ixgbevf_intr_enable(struct rte_eth_dev *dev); 258 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, 259 struct rte_eth_stats *stats); 260 static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); 261 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 262 uint16_t vlan_id, int on); 263 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, 264 uint16_t queue, int on); 265 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 266 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 267 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 268 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 269 uint16_t queue_id); 270 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 271 uint16_t queue_id); 272 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 273 uint8_t queue, uint8_t msix_vector); 274 static void ixgbevf_configure_msix(struct rte_eth_dev *dev); 275 static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); 276 static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); 277 static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); 278 static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); 279 280 /* For Eth VMDQ APIs support */ 281 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct 282 rte_ether_addr * mac_addr, uint8_t on); 283 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); 284 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 285 uint16_t queue_id); 286 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 287 uint16_t queue_id); 288 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 289 uint8_t queue, uint8_t msix_vector); 290 static void ixgbe_configure_msix(struct rte_eth_dev *dev); 291 292 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, 293 struct rte_ether_addr *mac_addr, 294 uint32_t index, uint32_t pool); 295 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 296 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 297 struct rte_ether_addr *mac_addr); 298 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 299 struct ixgbe_5tuple_filter *filter); 300 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 301 struct ixgbe_5tuple_filter *filter); 302 static int ixgbe_dev_flow_ops_get(struct rte_eth_dev *dev, 303 const struct rte_flow_ops **ops); 304 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 305 306 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 307 struct rte_ether_addr *mc_addr_set, 308 uint32_t nb_mc_addr); 309 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 310 struct rte_eth_dcb_info *dcb_info); 311 312 static int ixgbe_get_reg_length(struct rte_eth_dev *dev); 313 static int ixgbe_get_regs(struct rte_eth_dev *dev, 314 struct rte_dev_reg_info *regs); 315 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); 316 static int ixgbe_get_eeprom(struct rte_eth_dev *dev, 317 struct rte_dev_eeprom_info *eeprom); 318 static int ixgbe_set_eeprom(struct rte_eth_dev *dev, 319 struct rte_dev_eeprom_info *eeprom); 320 321 static int ixgbe_get_module_info(struct rte_eth_dev *dev, 322 struct rte_eth_dev_module_info *modinfo); 323 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 324 struct rte_dev_eeprom_info *info); 325 326 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); 327 static int ixgbevf_get_regs(struct rte_eth_dev *dev, 328 struct rte_dev_reg_info *regs); 329 330 static int ixgbe_timesync_enable(struct rte_eth_dev *dev); 331 static int ixgbe_timesync_disable(struct rte_eth_dev *dev); 332 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 333 struct timespec *timestamp, 334 uint32_t flags); 335 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 336 struct timespec *timestamp); 337 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 338 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, 339 struct timespec *timestamp); 340 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, 341 const struct timespec *timestamp); 342 static void ixgbevf_dev_interrupt_handler(void *param); 343 344 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 345 struct rte_eth_udp_tunnel *udp_tunnel); 346 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 347 struct rte_eth_udp_tunnel *udp_tunnel); 348 static int ixgbe_filter_restore(struct rte_eth_dev *dev); 349 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); 350 static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw); 351 352 /* 353 * Define VF Stats MACRO for Non "cleared on read" register 354 */ 355 #define UPDATE_VF_STAT(reg, last, cur) \ 356 { \ 357 uint32_t latest = IXGBE_READ_REG(hw, reg); \ 358 cur += (latest - last) & UINT_MAX; \ 359 last = latest; \ 360 } 361 362 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ 363 { \ 364 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ 365 u64 new_msb = IXGBE_READ_REG(hw, msb); \ 366 u64 latest = ((new_msb << 32) | new_lsb); \ 367 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ 368 last = latest; \ 369 } 370 371 #define IXGBE_SET_HWSTRIP(h, q) do {\ 372 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 373 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 374 (h)->bitmap[idx] |= 1 << bit;\ 375 } while (0) 376 377 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\ 378 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 379 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 380 (h)->bitmap[idx] &= ~(1 << bit);\ 381 } while (0) 382 383 #define IXGBE_GET_HWSTRIP(h, q, r) do {\ 384 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 385 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 386 (r) = (h)->bitmap[idx] >> bit & 1;\ 387 } while (0) 388 389 /* 390 * The set of PCI devices this driver supports 391 */ 392 static const struct rte_pci_id pci_id_ixgbe_map[] = { 393 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, 394 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, 395 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, 396 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, 397 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, 398 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, 399 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, 400 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, 401 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, 402 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, 403 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, 404 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, 405 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, 406 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, 407 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, 408 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, 409 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, 410 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, 411 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, 412 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, 413 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, 414 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, 415 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, 416 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, 417 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, 418 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, 419 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, 420 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, 421 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, 422 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, 423 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, 424 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, 425 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, 426 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, 427 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, 428 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, 429 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, 430 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, 431 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, 432 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, 433 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, 434 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, 435 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, 436 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, 437 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, 438 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, 439 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, 440 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) }, 441 #ifdef RTE_LIBRTE_IXGBE_BYPASS 442 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, 443 #endif 444 { .vendor_id = 0, /* sentinel */ }, 445 }; 446 447 /* 448 * The set of PCI devices this driver supports (for 82599 VF) 449 */ 450 static const struct rte_pci_id pci_id_ixgbevf_map[] = { 451 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, 452 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, 453 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, 454 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, 455 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, 456 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, 457 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, 458 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, 459 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, 460 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, 461 { .vendor_id = 0, /* sentinel */ }, 462 }; 463 464 static const struct rte_eth_desc_lim rx_desc_lim = { 465 .nb_max = IXGBE_MAX_RING_DESC, 466 .nb_min = IXGBE_MIN_RING_DESC, 467 .nb_align = IXGBE_RXD_ALIGN, 468 }; 469 470 static const struct rte_eth_desc_lim tx_desc_lim = { 471 .nb_max = IXGBE_MAX_RING_DESC, 472 .nb_min = IXGBE_MIN_RING_DESC, 473 .nb_align = IXGBE_TXD_ALIGN, 474 .nb_seg_max = IXGBE_TX_MAX_SEG, 475 .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, 476 }; 477 478 static const struct eth_dev_ops ixgbe_eth_dev_ops = { 479 .dev_configure = ixgbe_dev_configure, 480 .dev_start = ixgbe_dev_start, 481 .dev_stop = ixgbe_dev_stop, 482 .dev_set_link_up = ixgbe_dev_set_link_up, 483 .dev_set_link_down = ixgbe_dev_set_link_down, 484 .dev_close = ixgbe_dev_close, 485 .dev_reset = ixgbe_dev_reset, 486 .promiscuous_enable = ixgbe_dev_promiscuous_enable, 487 .promiscuous_disable = ixgbe_dev_promiscuous_disable, 488 .allmulticast_enable = ixgbe_dev_allmulticast_enable, 489 .allmulticast_disable = ixgbe_dev_allmulticast_disable, 490 .link_update = ixgbe_dev_link_update, 491 .stats_get = ixgbe_dev_stats_get, 492 .xstats_get = ixgbe_dev_xstats_get, 493 .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, 494 .stats_reset = ixgbe_dev_stats_reset, 495 .xstats_reset = ixgbe_dev_xstats_reset, 496 .xstats_get_names = ixgbe_dev_xstats_get_names, 497 .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, 498 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, 499 .fw_version_get = ixgbe_fw_version_get, 500 .dev_infos_get = ixgbe_dev_info_get, 501 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 502 .mtu_set = ixgbe_dev_mtu_set, 503 .vlan_filter_set = ixgbe_vlan_filter_set, 504 .vlan_tpid_set = ixgbe_vlan_tpid_set, 505 .vlan_offload_set = ixgbe_vlan_offload_set, 506 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, 507 .rx_queue_start = ixgbe_dev_rx_queue_start, 508 .rx_queue_stop = ixgbe_dev_rx_queue_stop, 509 .tx_queue_start = ixgbe_dev_tx_queue_start, 510 .tx_queue_stop = ixgbe_dev_tx_queue_stop, 511 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 512 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, 513 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, 514 .rx_queue_release = ixgbe_dev_rx_queue_release, 515 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 516 .tx_queue_release = ixgbe_dev_tx_queue_release, 517 .dev_led_on = ixgbe_dev_led_on, 518 .dev_led_off = ixgbe_dev_led_off, 519 .flow_ctrl_get = ixgbe_flow_ctrl_get, 520 .flow_ctrl_set = ixgbe_flow_ctrl_set, 521 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, 522 .mac_addr_add = ixgbe_add_rar, 523 .mac_addr_remove = ixgbe_remove_rar, 524 .mac_addr_set = ixgbe_set_default_mac_addr, 525 .uc_hash_table_set = ixgbe_uc_hash_table_set, 526 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, 527 .set_queue_rate_limit = ixgbe_set_queue_rate_limit, 528 .reta_update = ixgbe_dev_rss_reta_update, 529 .reta_query = ixgbe_dev_rss_reta_query, 530 .rss_hash_update = ixgbe_dev_rss_hash_update, 531 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 532 .flow_ops_get = ixgbe_dev_flow_ops_get, 533 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 534 .rxq_info_get = ixgbe_rxq_info_get, 535 .txq_info_get = ixgbe_txq_info_get, 536 .timesync_enable = ixgbe_timesync_enable, 537 .timesync_disable = ixgbe_timesync_disable, 538 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, 539 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, 540 .get_reg = ixgbe_get_regs, 541 .get_eeprom_length = ixgbe_get_eeprom_length, 542 .get_eeprom = ixgbe_get_eeprom, 543 .set_eeprom = ixgbe_set_eeprom, 544 .get_module_info = ixgbe_get_module_info, 545 .get_module_eeprom = ixgbe_get_module_eeprom, 546 .get_dcb_info = ixgbe_dev_get_dcb_info, 547 .timesync_adjust_time = ixgbe_timesync_adjust_time, 548 .timesync_read_time = ixgbe_timesync_read_time, 549 .timesync_write_time = ixgbe_timesync_write_time, 550 .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, 551 .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, 552 .tm_ops_get = ixgbe_tm_ops_get, 553 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 554 .get_monitor_addr = ixgbe_get_monitor_addr, 555 }; 556 557 /* 558 * dev_ops for virtual function, bare necessities for basic vf 559 * operation have been implemented 560 */ 561 static const struct eth_dev_ops ixgbevf_eth_dev_ops = { 562 .dev_configure = ixgbevf_dev_configure, 563 .dev_start = ixgbevf_dev_start, 564 .dev_stop = ixgbevf_dev_stop, 565 .link_update = ixgbevf_dev_link_update, 566 .stats_get = ixgbevf_dev_stats_get, 567 .xstats_get = ixgbevf_dev_xstats_get, 568 .stats_reset = ixgbevf_dev_stats_reset, 569 .xstats_reset = ixgbevf_dev_stats_reset, 570 .xstats_get_names = ixgbevf_dev_xstats_get_names, 571 .dev_close = ixgbevf_dev_close, 572 .dev_reset = ixgbevf_dev_reset, 573 .promiscuous_enable = ixgbevf_dev_promiscuous_enable, 574 .promiscuous_disable = ixgbevf_dev_promiscuous_disable, 575 .allmulticast_enable = ixgbevf_dev_allmulticast_enable, 576 .allmulticast_disable = ixgbevf_dev_allmulticast_disable, 577 .dev_infos_get = ixgbevf_dev_info_get, 578 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 579 .mtu_set = ixgbevf_dev_set_mtu, 580 .vlan_filter_set = ixgbevf_vlan_filter_set, 581 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, 582 .vlan_offload_set = ixgbevf_vlan_offload_set, 583 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 584 .rx_queue_release = ixgbe_dev_rx_queue_release, 585 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 586 .tx_queue_release = ixgbe_dev_tx_queue_release, 587 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, 588 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, 589 .mac_addr_add = ixgbevf_add_mac_addr, 590 .mac_addr_remove = ixgbevf_remove_mac_addr, 591 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 592 .rxq_info_get = ixgbe_rxq_info_get, 593 .txq_info_get = ixgbe_txq_info_get, 594 .mac_addr_set = ixgbevf_set_default_mac_addr, 595 .get_reg = ixgbevf_get_regs, 596 .reta_update = ixgbe_dev_rss_reta_update, 597 .reta_query = ixgbe_dev_rss_reta_query, 598 .rss_hash_update = ixgbe_dev_rss_hash_update, 599 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 600 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 601 .get_monitor_addr = ixgbe_get_monitor_addr, 602 }; 603 604 /* store statistics names and its offset in stats structure */ 605 struct rte_ixgbe_xstats_name_off { 606 char name[RTE_ETH_XSTATS_NAME_SIZE]; 607 unsigned offset; 608 }; 609 610 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { 611 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, 612 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, 613 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, 614 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, 615 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, 616 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, 617 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, 618 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, 619 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, 620 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, 621 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, 622 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, 623 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, 624 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, 625 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 626 prc1023)}, 627 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 628 prc1522)}, 629 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, 630 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, 631 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, 632 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, 633 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, 634 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, 635 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, 636 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, 637 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, 638 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, 639 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, 640 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, 641 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, 642 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, 643 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, 644 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, 645 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 646 ptc1023)}, 647 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 648 ptc1522)}, 649 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, 650 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, 651 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, 652 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, 653 654 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, 655 fdirustat_add)}, 656 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, 657 fdirustat_remove)}, 658 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, 659 fdirfstat_fadd)}, 660 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, 661 fdirfstat_fremove)}, 662 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, 663 fdirmatch)}, 664 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, 665 fdirmiss)}, 666 667 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, 668 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, 669 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, 670 fclast)}, 671 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, 672 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, 673 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, 674 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, 675 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, 676 fcoe_noddp)}, 677 {"rx_fcoe_no_direct_data_placement_ext_buff", 678 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, 679 680 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 681 lxontxc)}, 682 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 683 lxonrxc)}, 684 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 685 lxofftxc)}, 686 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 687 lxoffrxc)}, 688 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, 689 }; 690 691 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ 692 sizeof(rte_ixgbe_stats_strings[0])) 693 694 /* MACsec statistics */ 695 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { 696 {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 697 out_pkts_untagged)}, 698 {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, 699 out_pkts_encrypted)}, 700 {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, 701 out_pkts_protected)}, 702 {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, 703 out_octets_encrypted)}, 704 {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, 705 out_octets_protected)}, 706 {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 707 in_pkts_untagged)}, 708 {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, 709 in_pkts_badtag)}, 710 {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, 711 in_pkts_nosci)}, 712 {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, 713 in_pkts_unknownsci)}, 714 {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, 715 in_octets_decrypted)}, 716 {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, 717 in_octets_validated)}, 718 {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, 719 in_pkts_unchecked)}, 720 {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, 721 in_pkts_delayed)}, 722 {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, 723 in_pkts_late)}, 724 {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, 725 in_pkts_ok)}, 726 {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, 727 in_pkts_invalid)}, 728 {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, 729 in_pkts_notvalid)}, 730 {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, 731 in_pkts_unusedsa)}, 732 {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, 733 in_pkts_notusingsa)}, 734 }; 735 736 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ 737 sizeof(rte_ixgbe_macsec_strings[0])) 738 739 /* Per-queue statistics */ 740 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { 741 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, 742 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, 743 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, 744 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, 745 }; 746 747 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ 748 sizeof(rte_ixgbe_rxq_strings[0])) 749 #define IXGBE_NB_RXQ_PRIO_VALUES 8 750 751 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { 752 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, 753 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, 754 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, 755 pxon2offc)}, 756 }; 757 758 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ 759 sizeof(rte_ixgbe_txq_strings[0])) 760 #define IXGBE_NB_TXQ_PRIO_VALUES 8 761 762 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { 763 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, 764 }; 765 766 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ 767 sizeof(rte_ixgbevf_stats_strings[0])) 768 769 /* 770 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. 771 */ 772 static inline int 773 ixgbe_is_sfp(struct ixgbe_hw *hw) 774 { 775 switch (hw->phy.type) { 776 case ixgbe_phy_sfp_avago: 777 case ixgbe_phy_sfp_ftl: 778 case ixgbe_phy_sfp_intel: 779 case ixgbe_phy_sfp_unknown: 780 case ixgbe_phy_sfp_passive_tyco: 781 case ixgbe_phy_sfp_passive_unknown: 782 return 1; 783 default: 784 return 0; 785 } 786 } 787 788 static inline int32_t 789 ixgbe_pf_reset_hw(struct ixgbe_hw *hw) 790 { 791 uint32_t ctrl_ext; 792 int32_t status; 793 794 status = ixgbe_reset_hw(hw); 795 796 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 797 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 798 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 799 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 800 IXGBE_WRITE_FLUSH(hw); 801 802 if (status == IXGBE_ERR_SFP_NOT_PRESENT) 803 status = IXGBE_SUCCESS; 804 return status; 805 } 806 807 static inline void 808 ixgbe_enable_intr(struct rte_eth_dev *dev) 809 { 810 struct ixgbe_interrupt *intr = 811 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 812 struct ixgbe_hw *hw = 813 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 814 815 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); 816 IXGBE_WRITE_FLUSH(hw); 817 } 818 819 /* 820 * This function is based on ixgbe_disable_intr() in base/ixgbe.h. 821 */ 822 static void 823 ixgbe_disable_intr(struct ixgbe_hw *hw) 824 { 825 PMD_INIT_FUNC_TRACE(); 826 827 if (hw->mac.type == ixgbe_mac_82598EB) { 828 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); 829 } else { 830 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); 831 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); 832 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); 833 } 834 IXGBE_WRITE_FLUSH(hw); 835 } 836 837 /* 838 * This function resets queue statistics mapping registers. 839 * From Niantic datasheet, Initialization of Statistics section: 840 * "...if software requires the queue counters, the RQSMR and TQSM registers 841 * must be re-programmed following a device reset. 842 */ 843 static void 844 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) 845 { 846 uint32_t i; 847 848 for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { 849 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); 850 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); 851 } 852 } 853 854 855 static int 856 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 857 uint16_t queue_id, 858 uint8_t stat_idx, 859 uint8_t is_rx) 860 { 861 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 862 #define NB_QMAP_FIELDS_PER_QSM_REG 4 863 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f 864 865 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 866 struct ixgbe_stat_mapping_registers *stat_mappings = 867 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); 868 uint32_t qsmr_mask = 0; 869 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 870 uint32_t q_map; 871 uint8_t n, offset; 872 873 if ((hw->mac.type != ixgbe_mac_82599EB) && 874 (hw->mac.type != ixgbe_mac_X540) && 875 (hw->mac.type != ixgbe_mac_X550) && 876 (hw->mac.type != ixgbe_mac_X550EM_x) && 877 (hw->mac.type != ixgbe_mac_X550EM_a)) 878 return -ENOSYS; 879 880 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 881 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 882 queue_id, stat_idx); 883 884 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 885 if (n >= IXGBE_NB_STAT_MAPPING_REGS) { 886 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 887 return -EIO; 888 } 889 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 890 891 /* Now clear any previous stat_idx set */ 892 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 893 if (!is_rx) 894 stat_mappings->tqsm[n] &= ~clearing_mask; 895 else 896 stat_mappings->rqsmr[n] &= ~clearing_mask; 897 898 q_map = (uint32_t)stat_idx; 899 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 900 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 901 if (!is_rx) 902 stat_mappings->tqsm[n] |= qsmr_mask; 903 else 904 stat_mappings->rqsmr[n] |= qsmr_mask; 905 906 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 907 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 908 queue_id, stat_idx); 909 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 910 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); 911 912 /* Now write the mapping in the appropriate register */ 913 if (is_rx) { 914 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", 915 stat_mappings->rqsmr[n], n); 916 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); 917 } else { 918 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", 919 stat_mappings->tqsm[n], n); 920 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); 921 } 922 return 0; 923 } 924 925 static void 926 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) 927 { 928 struct ixgbe_stat_mapping_registers *stat_mappings = 929 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); 930 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 931 int i; 932 933 /* write whatever was in stat mapping table to the NIC */ 934 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { 935 /* rx */ 936 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); 937 938 /* tx */ 939 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); 940 } 941 } 942 943 static void 944 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) 945 { 946 uint8_t i; 947 struct ixgbe_dcb_tc_config *tc; 948 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; 949 950 dcb_config->num_tcs.pg_tcs = dcb_max_tc; 951 dcb_config->num_tcs.pfc_tcs = dcb_max_tc; 952 for (i = 0; i < dcb_max_tc; i++) { 953 tc = &dcb_config->tc_config[i]; 954 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; 955 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 956 (uint8_t)(100/dcb_max_tc + (i & 1)); 957 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; 958 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 959 (uint8_t)(100/dcb_max_tc + (i & 1)); 960 tc->pfc = ixgbe_dcb_pfc_disabled; 961 } 962 963 /* Initialize default user to priority mapping, UPx->TC0 */ 964 tc = &dcb_config->tc_config[0]; 965 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 966 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 967 for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { 968 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; 969 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; 970 } 971 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; 972 dcb_config->pfc_mode_enable = false; 973 dcb_config->vt_mode = true; 974 dcb_config->round_robin_enable = false; 975 /* support all DCB capabilities in 82599 */ 976 dcb_config->support.capabilities = 0xFF; 977 978 /*we only support 4 Tcs for X540, X550 */ 979 if (hw->mac.type == ixgbe_mac_X540 || 980 hw->mac.type == ixgbe_mac_X550 || 981 hw->mac.type == ixgbe_mac_X550EM_x || 982 hw->mac.type == ixgbe_mac_X550EM_a) { 983 dcb_config->num_tcs.pg_tcs = 4; 984 dcb_config->num_tcs.pfc_tcs = 4; 985 } 986 } 987 988 /* 989 * Ensure that all locks are released before first NVM or PHY access 990 */ 991 static void 992 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) 993 { 994 uint16_t mask; 995 996 /* 997 * Phy lock should not fail in this early stage. If this is the case, 998 * it is due to an improper exit of the application. 999 * So force the release of the faulty lock. Release of common lock 1000 * is done automatically by swfw_sync function. 1001 */ 1002 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; 1003 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1004 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); 1005 } 1006 ixgbe_release_swfw_semaphore(hw, mask); 1007 1008 /* 1009 * These ones are more tricky since they are common to all ports; but 1010 * swfw_sync retries last long enough (1s) to be almost sure that if 1011 * lock can not be taken it is due to an improper lock of the 1012 * semaphore. 1013 */ 1014 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; 1015 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1016 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1017 } 1018 ixgbe_release_swfw_semaphore(hw, mask); 1019 } 1020 1021 /* 1022 * This function is based on code in ixgbe_attach() in base/ixgbe.c. 1023 * It returns 0 on success. 1024 */ 1025 static int 1026 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 1027 { 1028 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1029 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1030 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1031 struct ixgbe_hw *hw = 1032 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1033 struct ixgbe_vfta *shadow_vfta = 1034 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1035 struct ixgbe_hwstrip *hwstrip = 1036 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1037 struct ixgbe_dcb_config *dcb_config = 1038 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); 1039 struct ixgbe_filter_info *filter_info = 1040 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1041 struct ixgbe_bw_conf *bw_conf = 1042 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); 1043 uint32_t ctrl_ext; 1044 uint16_t csum; 1045 int diag, i, ret; 1046 1047 PMD_INIT_FUNC_TRACE(); 1048 1049 ixgbe_dev_macsec_setting_reset(eth_dev); 1050 1051 eth_dev->dev_ops = &ixgbe_eth_dev_ops; 1052 eth_dev->rx_queue_count = ixgbe_dev_rx_queue_count; 1053 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1054 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1055 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1056 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1057 eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; 1058 1059 /* 1060 * For secondary processes, we don't initialise any further as primary 1061 * has already done this work. Only check we don't need a different 1062 * RX and TX function. 1063 */ 1064 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1065 struct ixgbe_tx_queue *txq; 1066 /* TX queue function in primary, set by last queue initialized 1067 * Tx queue may not initialized by primary process 1068 */ 1069 if (eth_dev->data->tx_queues) { 1070 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; 1071 ixgbe_set_tx_function(eth_dev, txq); 1072 } else { 1073 /* Use default TX function if we get here */ 1074 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 1075 "Using default TX function."); 1076 } 1077 1078 ixgbe_set_rx_function(eth_dev); 1079 1080 return 0; 1081 } 1082 1083 rte_atomic32_clear(&ad->link_thread_running); 1084 rte_eth_copy_pci_info(eth_dev, pci_dev); 1085 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1086 1087 /* Vendor and Device ID need to be set before init of shared code */ 1088 hw->device_id = pci_dev->id.device_id; 1089 hw->vendor_id = pci_dev->id.vendor_id; 1090 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1091 hw->allow_unsupported_sfp = 1; 1092 1093 /* Initialize the shared code (base driver) */ 1094 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1095 diag = ixgbe_bypass_init_shared_code(hw); 1096 #else 1097 diag = ixgbe_init_shared_code(hw); 1098 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1099 1100 if (diag != IXGBE_SUCCESS) { 1101 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); 1102 return -EIO; 1103 } 1104 1105 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { 1106 PMD_INIT_LOG(ERR, "\nERROR: " 1107 "Firmware recovery mode detected. Limiting functionality.\n" 1108 "Refer to the Intel(R) Ethernet Adapters and Devices " 1109 "User Guide for details on firmware recovery mode."); 1110 return -EIO; 1111 } 1112 1113 /* pick up the PCI bus settings for reporting later */ 1114 ixgbe_get_bus_info(hw); 1115 1116 /* Unlock any pending hardware semaphore */ 1117 ixgbe_swfw_lock_reset(hw); 1118 1119 #ifdef RTE_LIB_SECURITY 1120 /* Initialize security_ctx only for primary process*/ 1121 if (ixgbe_ipsec_ctx_create(eth_dev)) 1122 return -ENOMEM; 1123 #endif 1124 1125 /* Initialize DCB configuration*/ 1126 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); 1127 ixgbe_dcb_init(hw, dcb_config); 1128 /* Get Hardware Flow Control setting */ 1129 hw->fc.requested_mode = ixgbe_fc_none; 1130 hw->fc.current_mode = ixgbe_fc_none; 1131 hw->fc.pause_time = IXGBE_FC_PAUSE; 1132 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 1133 hw->fc.low_water[i] = IXGBE_FC_LO; 1134 hw->fc.high_water[i] = IXGBE_FC_HI; 1135 } 1136 hw->fc.send_xon = 1; 1137 1138 /* Make sure we have a good EEPROM before we read from it */ 1139 diag = ixgbe_validate_eeprom_checksum(hw, &csum); 1140 if (diag != IXGBE_SUCCESS) { 1141 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); 1142 return -EIO; 1143 } 1144 1145 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1146 diag = ixgbe_bypass_init_hw(hw); 1147 #else 1148 diag = ixgbe_init_hw(hw); 1149 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1150 1151 /* 1152 * Devices with copper phys will fail to initialise if ixgbe_init_hw() 1153 * is called too soon after the kernel driver unbinding/binding occurs. 1154 * The failure occurs in ixgbe_identify_phy_generic() for all devices, 1155 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is 1156 * also called. See ixgbe_identify_phy_82599(). The reason for the 1157 * failure is not known, and only occuts when virtualisation features 1158 * are disabled in the bios. A delay of 100ms was found to be enough by 1159 * trial-and-error, and is doubled to be safe. 1160 */ 1161 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { 1162 rte_delay_ms(200); 1163 diag = ixgbe_init_hw(hw); 1164 } 1165 1166 if (diag == IXGBE_ERR_SFP_NOT_PRESENT) 1167 diag = IXGBE_SUCCESS; 1168 1169 if (diag == IXGBE_ERR_EEPROM_VERSION) { 1170 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 1171 "LOM. Please be aware there may be issues associated " 1172 "with your hardware."); 1173 PMD_INIT_LOG(ERR, "If you are experiencing problems " 1174 "please contact your Intel or hardware representative " 1175 "who provided you with this hardware."); 1176 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) 1177 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 1178 if (diag) { 1179 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); 1180 return -EIO; 1181 } 1182 1183 /* Reset the hw statistics */ 1184 ixgbe_dev_stats_reset(eth_dev); 1185 1186 /* disable interrupt */ 1187 ixgbe_disable_intr(hw); 1188 1189 /* reset mappings for queue statistics hw counters*/ 1190 ixgbe_reset_qstat_mappings(hw); 1191 1192 /* Allocate memory for storing MAC addresses */ 1193 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * 1194 hw->mac.num_rar_entries, 0); 1195 if (eth_dev->data->mac_addrs == NULL) { 1196 PMD_INIT_LOG(ERR, 1197 "Failed to allocate %u bytes needed to store " 1198 "MAC addresses", 1199 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1200 return -ENOMEM; 1201 } 1202 /* Copy the permanent MAC address */ 1203 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 1204 ð_dev->data->mac_addrs[0]); 1205 1206 /* Allocate memory for storing hash filter MAC addresses */ 1207 eth_dev->data->hash_mac_addrs = rte_zmalloc( 1208 "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); 1209 if (eth_dev->data->hash_mac_addrs == NULL) { 1210 PMD_INIT_LOG(ERR, 1211 "Failed to allocate %d bytes needed to store MAC addresses", 1212 RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); 1213 rte_free(eth_dev->data->mac_addrs); 1214 eth_dev->data->mac_addrs = NULL; 1215 return -ENOMEM; 1216 } 1217 1218 /* initialize the vfta */ 1219 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1220 1221 /* initialize the hw strip bitmap*/ 1222 memset(hwstrip, 0, sizeof(*hwstrip)); 1223 1224 /* initialize PF if max_vfs not zero */ 1225 ret = ixgbe_pf_host_init(eth_dev); 1226 if (ret) 1227 goto err_pf_host_init; 1228 1229 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1230 /* let hardware know driver is loaded */ 1231 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 1232 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1233 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 1234 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 1235 IXGBE_WRITE_FLUSH(hw); 1236 1237 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 1238 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 1239 (int) hw->mac.type, (int) hw->phy.type, 1240 (int) hw->phy.sfp_type); 1241 else 1242 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 1243 (int) hw->mac.type, (int) hw->phy.type); 1244 1245 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1246 eth_dev->data->port_id, pci_dev->id.vendor_id, 1247 pci_dev->id.device_id); 1248 1249 rte_intr_callback_register(intr_handle, 1250 ixgbe_dev_interrupt_handler, eth_dev); 1251 1252 /* enable uio/vfio intr/eventfd mapping */ 1253 rte_intr_enable(intr_handle); 1254 1255 /* enable support intr */ 1256 ixgbe_enable_intr(eth_dev); 1257 1258 /* initialize filter info */ 1259 memset(filter_info, 0, 1260 sizeof(struct ixgbe_filter_info)); 1261 1262 /* initialize 5tuple filter list */ 1263 TAILQ_INIT(&filter_info->fivetuple_list); 1264 1265 /* initialize flow director filter list & hash */ 1266 ret = ixgbe_fdir_filter_init(eth_dev); 1267 if (ret) 1268 goto err_fdir_filter_init; 1269 1270 /* initialize l2 tunnel filter list & hash */ 1271 ret = ixgbe_l2_tn_filter_init(eth_dev); 1272 if (ret) 1273 goto err_l2_tn_filter_init; 1274 1275 /* initialize flow filter lists */ 1276 ixgbe_filterlist_init(); 1277 1278 /* initialize bandwidth configuration info */ 1279 memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); 1280 1281 /* initialize Traffic Manager configuration */ 1282 ixgbe_tm_conf_init(eth_dev); 1283 1284 return 0; 1285 1286 err_l2_tn_filter_init: 1287 ixgbe_fdir_filter_uninit(eth_dev); 1288 err_fdir_filter_init: 1289 ixgbe_disable_intr(hw); 1290 rte_intr_disable(intr_handle); 1291 rte_intr_callback_unregister(intr_handle, 1292 ixgbe_dev_interrupt_handler, eth_dev); 1293 ixgbe_pf_host_uninit(eth_dev); 1294 err_pf_host_init: 1295 rte_free(eth_dev->data->mac_addrs); 1296 eth_dev->data->mac_addrs = NULL; 1297 rte_free(eth_dev->data->hash_mac_addrs); 1298 eth_dev->data->hash_mac_addrs = NULL; 1299 return ret; 1300 } 1301 1302 static int 1303 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1304 { 1305 PMD_INIT_FUNC_TRACE(); 1306 1307 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1308 return 0; 1309 1310 ixgbe_dev_close(eth_dev); 1311 1312 return 0; 1313 } 1314 1315 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 1316 { 1317 struct ixgbe_filter_info *filter_info = 1318 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1319 struct ixgbe_5tuple_filter *p_5tuple; 1320 1321 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 1322 TAILQ_REMOVE(&filter_info->fivetuple_list, 1323 p_5tuple, 1324 entries); 1325 rte_free(p_5tuple); 1326 } 1327 memset(filter_info->fivetuple_mask, 0, 1328 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 1329 1330 return 0; 1331 } 1332 1333 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) 1334 { 1335 struct ixgbe_hw_fdir_info *fdir_info = 1336 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1337 struct ixgbe_fdir_filter *fdir_filter; 1338 1339 rte_free(fdir_info->hash_map); 1340 rte_hash_free(fdir_info->hash_handle); 1341 1342 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { 1343 TAILQ_REMOVE(&fdir_info->fdir_list, 1344 fdir_filter, 1345 entries); 1346 rte_free(fdir_filter); 1347 } 1348 1349 return 0; 1350 } 1351 1352 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) 1353 { 1354 struct ixgbe_l2_tn_info *l2_tn_info = 1355 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1356 struct ixgbe_l2_tn_filter *l2_tn_filter; 1357 1358 rte_free(l2_tn_info->hash_map); 1359 rte_hash_free(l2_tn_info->hash_handle); 1360 1361 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 1362 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, 1363 l2_tn_filter, 1364 entries); 1365 rte_free(l2_tn_filter); 1366 } 1367 1368 return 0; 1369 } 1370 1371 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) 1372 { 1373 struct ixgbe_hw_fdir_info *fdir_info = 1374 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1375 char fdir_hash_name[RTE_HASH_NAMESIZE]; 1376 struct rte_hash_parameters fdir_hash_params = { 1377 .name = fdir_hash_name, 1378 .entries = IXGBE_MAX_FDIR_FILTER_NUM, 1379 .key_len = sizeof(union ixgbe_atr_input), 1380 .hash_func = rte_hash_crc, 1381 .hash_func_init_val = 0, 1382 .socket_id = rte_socket_id(), 1383 }; 1384 1385 TAILQ_INIT(&fdir_info->fdir_list); 1386 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, 1387 "fdir_%s", eth_dev->device->name); 1388 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); 1389 if (!fdir_info->hash_handle) { 1390 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); 1391 return -EINVAL; 1392 } 1393 fdir_info->hash_map = rte_zmalloc("ixgbe", 1394 sizeof(struct ixgbe_fdir_filter *) * 1395 IXGBE_MAX_FDIR_FILTER_NUM, 1396 0); 1397 if (!fdir_info->hash_map) { 1398 PMD_INIT_LOG(ERR, 1399 "Failed to allocate memory for fdir hash map!"); 1400 rte_hash_free(fdir_info->hash_handle); 1401 return -ENOMEM; 1402 } 1403 fdir_info->mask_added = FALSE; 1404 1405 return 0; 1406 } 1407 1408 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) 1409 { 1410 struct ixgbe_l2_tn_info *l2_tn_info = 1411 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1412 char l2_tn_hash_name[RTE_HASH_NAMESIZE]; 1413 struct rte_hash_parameters l2_tn_hash_params = { 1414 .name = l2_tn_hash_name, 1415 .entries = IXGBE_MAX_L2_TN_FILTER_NUM, 1416 .key_len = sizeof(struct ixgbe_l2_tn_key), 1417 .hash_func = rte_hash_crc, 1418 .hash_func_init_val = 0, 1419 .socket_id = rte_socket_id(), 1420 }; 1421 1422 TAILQ_INIT(&l2_tn_info->l2_tn_list); 1423 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, 1424 "l2_tn_%s", eth_dev->device->name); 1425 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); 1426 if (!l2_tn_info->hash_handle) { 1427 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); 1428 return -EINVAL; 1429 } 1430 l2_tn_info->hash_map = rte_zmalloc("ixgbe", 1431 sizeof(struct ixgbe_l2_tn_filter *) * 1432 IXGBE_MAX_L2_TN_FILTER_NUM, 1433 0); 1434 if (!l2_tn_info->hash_map) { 1435 PMD_INIT_LOG(ERR, 1436 "Failed to allocate memory for L2 TN hash map!"); 1437 rte_hash_free(l2_tn_info->hash_handle); 1438 return -ENOMEM; 1439 } 1440 l2_tn_info->e_tag_en = FALSE; 1441 l2_tn_info->e_tag_fwd_en = FALSE; 1442 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; 1443 1444 return 0; 1445 } 1446 /* 1447 * Negotiate mailbox API version with the PF. 1448 * After reset API version is always set to the basic one (ixgbe_mbox_api_10). 1449 * Then we try to negotiate starting with the most recent one. 1450 * If all negotiation attempts fail, then we will proceed with 1451 * the default one (ixgbe_mbox_api_10). 1452 */ 1453 static void 1454 ixgbevf_negotiate_api(struct ixgbe_hw *hw) 1455 { 1456 int32_t i; 1457 1458 /* start with highest supported, proceed down */ 1459 static const enum ixgbe_pfvf_api_rev sup_ver[] = { 1460 ixgbe_mbox_api_13, 1461 ixgbe_mbox_api_12, 1462 ixgbe_mbox_api_11, 1463 ixgbe_mbox_api_10, 1464 }; 1465 1466 for (i = 0; 1467 i != RTE_DIM(sup_ver) && 1468 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; 1469 i++) 1470 ; 1471 } 1472 1473 static void 1474 generate_random_mac_addr(struct rte_ether_addr *mac_addr) 1475 { 1476 uint64_t random; 1477 1478 /* Set Organizationally Unique Identifier (OUI) prefix. */ 1479 mac_addr->addr_bytes[0] = 0x00; 1480 mac_addr->addr_bytes[1] = 0x09; 1481 mac_addr->addr_bytes[2] = 0xC0; 1482 /* Force indication of locally assigned MAC address. */ 1483 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 1484 /* Generate the last 3 bytes of the MAC address with a random number. */ 1485 random = rte_rand(); 1486 memcpy(&mac_addr->addr_bytes[3], &random, 3); 1487 } 1488 1489 static int 1490 devarg_handle_int(__rte_unused const char *key, const char *value, 1491 void *extra_args) 1492 { 1493 uint16_t *n = extra_args; 1494 1495 if (value == NULL || extra_args == NULL) 1496 return -EINVAL; 1497 1498 *n = (uint16_t)strtoul(value, NULL, 0); 1499 if (*n == USHRT_MAX && errno == ERANGE) 1500 return -1; 1501 1502 return 0; 1503 } 1504 1505 static void 1506 ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, 1507 struct rte_devargs *devargs) 1508 { 1509 struct rte_kvargs *kvlist; 1510 uint16_t pflink_fullchk; 1511 1512 if (devargs == NULL) 1513 return; 1514 1515 kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); 1516 if (kvlist == NULL) 1517 return; 1518 1519 if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && 1520 rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, 1521 devarg_handle_int, &pflink_fullchk) == 0 && 1522 pflink_fullchk == 1) 1523 adapter->pflink_fullchk = 1; 1524 1525 rte_kvargs_free(kvlist); 1526 } 1527 1528 /* 1529 * Virtual Function device init 1530 */ 1531 static int 1532 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) 1533 { 1534 int diag; 1535 uint32_t tc, tcs; 1536 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1537 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1538 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1539 struct ixgbe_hw *hw = 1540 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1541 struct ixgbe_vfta *shadow_vfta = 1542 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1543 struct ixgbe_hwstrip *hwstrip = 1544 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1545 struct rte_ether_addr *perm_addr = 1546 (struct rte_ether_addr *)hw->mac.perm_addr; 1547 1548 PMD_INIT_FUNC_TRACE(); 1549 1550 eth_dev->dev_ops = &ixgbevf_eth_dev_ops; 1551 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1552 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1553 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1554 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1555 1556 /* for secondary processes, we don't initialise any further as primary 1557 * has already done this work. Only check we don't need a different 1558 * RX function 1559 */ 1560 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1561 struct ixgbe_tx_queue *txq; 1562 /* TX queue function in primary, set by last queue initialized 1563 * Tx queue may not initialized by primary process 1564 */ 1565 if (eth_dev->data->tx_queues) { 1566 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; 1567 ixgbe_set_tx_function(eth_dev, txq); 1568 } else { 1569 /* Use default TX function if we get here */ 1570 PMD_INIT_LOG(NOTICE, 1571 "No TX queues configured yet. Using default TX function."); 1572 } 1573 1574 ixgbe_set_rx_function(eth_dev); 1575 1576 return 0; 1577 } 1578 1579 rte_atomic32_clear(&ad->link_thread_running); 1580 ixgbevf_parse_devargs(eth_dev->data->dev_private, 1581 pci_dev->device.devargs); 1582 1583 rte_eth_copy_pci_info(eth_dev, pci_dev); 1584 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1585 1586 hw->device_id = pci_dev->id.device_id; 1587 hw->vendor_id = pci_dev->id.vendor_id; 1588 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1589 1590 /* initialize the vfta */ 1591 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1592 1593 /* initialize the hw strip bitmap*/ 1594 memset(hwstrip, 0, sizeof(*hwstrip)); 1595 1596 /* Initialize the shared code (base driver) */ 1597 diag = ixgbe_init_shared_code(hw); 1598 if (diag != IXGBE_SUCCESS) { 1599 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); 1600 return -EIO; 1601 } 1602 1603 /* init_mailbox_params */ 1604 hw->mbx.ops.init_params(hw); 1605 1606 /* Reset the hw statistics */ 1607 ixgbevf_dev_stats_reset(eth_dev); 1608 1609 /* Disable the interrupts for VF */ 1610 ixgbevf_intr_disable(eth_dev); 1611 1612 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ 1613 diag = hw->mac.ops.reset_hw(hw); 1614 1615 /* 1616 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when 1617 * the underlying PF driver has not assigned a MAC address to the VF. 1618 * In this case, assign a random MAC address. 1619 */ 1620 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { 1621 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1622 /* 1623 * This error code will be propagated to the app by 1624 * rte_eth_dev_reset, so use a public error code rather than 1625 * the internal-only IXGBE_ERR_RESET_FAILED 1626 */ 1627 return -EAGAIN; 1628 } 1629 1630 /* negotiate mailbox API version to use with the PF. */ 1631 ixgbevf_negotiate_api(hw); 1632 1633 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 1634 ixgbevf_get_queues(hw, &tcs, &tc); 1635 1636 /* Allocate memory for storing MAC addresses */ 1637 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * 1638 hw->mac.num_rar_entries, 0); 1639 if (eth_dev->data->mac_addrs == NULL) { 1640 PMD_INIT_LOG(ERR, 1641 "Failed to allocate %u bytes needed to store " 1642 "MAC addresses", 1643 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1644 return -ENOMEM; 1645 } 1646 1647 /* Generate a random MAC address, if none was assigned by PF. */ 1648 if (rte_is_zero_ether_addr(perm_addr)) { 1649 generate_random_mac_addr(perm_addr); 1650 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 1651 if (diag) { 1652 rte_free(eth_dev->data->mac_addrs); 1653 eth_dev->data->mac_addrs = NULL; 1654 return diag; 1655 } 1656 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1657 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1658 RTE_ETHER_ADDR_PRT_FMT, 1659 RTE_ETHER_ADDR_BYTES(perm_addr)); 1660 } 1661 1662 /* Copy the permanent MAC address */ 1663 rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 1664 1665 /* reset the hardware with the new settings */ 1666 diag = hw->mac.ops.start_hw(hw); 1667 switch (diag) { 1668 case 0: 1669 break; 1670 1671 default: 1672 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1673 rte_free(eth_dev->data->mac_addrs); 1674 eth_dev->data->mac_addrs = NULL; 1675 return -EIO; 1676 } 1677 1678 rte_intr_callback_register(intr_handle, 1679 ixgbevf_dev_interrupt_handler, eth_dev); 1680 rte_intr_enable(intr_handle); 1681 ixgbevf_intr_enable(eth_dev); 1682 1683 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 1684 eth_dev->data->port_id, pci_dev->id.vendor_id, 1685 pci_dev->id.device_id, "ixgbe_mac_82599_vf"); 1686 1687 return 0; 1688 } 1689 1690 /* Virtual Function device uninit */ 1691 1692 static int 1693 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) 1694 { 1695 PMD_INIT_FUNC_TRACE(); 1696 1697 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1698 return 0; 1699 1700 ixgbevf_dev_close(eth_dev); 1701 1702 return 0; 1703 } 1704 1705 static int 1706 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1707 struct rte_pci_device *pci_dev) 1708 { 1709 char name[RTE_ETH_NAME_MAX_LEN]; 1710 struct rte_eth_dev *pf_ethdev; 1711 struct rte_eth_devargs eth_da; 1712 int i, retval; 1713 1714 if (pci_dev->device.devargs) { 1715 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, 1716 ð_da); 1717 if (retval) 1718 return retval; 1719 } else 1720 memset(ð_da, 0, sizeof(eth_da)); 1721 1722 if (eth_da.nb_representor_ports > 0 && 1723 eth_da.type != RTE_ETH_REPRESENTOR_VF) { 1724 PMD_DRV_LOG(ERR, "unsupported representor type: %s\n", 1725 pci_dev->device.devargs->args); 1726 return -ENOTSUP; 1727 } 1728 1729 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 1730 sizeof(struct ixgbe_adapter), 1731 eth_dev_pci_specific_init, pci_dev, 1732 eth_ixgbe_dev_init, NULL); 1733 1734 if (retval || eth_da.nb_representor_ports < 1) 1735 return retval; 1736 1737 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1738 if (pf_ethdev == NULL) 1739 return -ENODEV; 1740 1741 /* probe VF representor ports */ 1742 for (i = 0; i < eth_da.nb_representor_ports; i++) { 1743 struct ixgbe_vf_info *vfinfo; 1744 struct ixgbe_vf_representor representor; 1745 1746 vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( 1747 pf_ethdev->data->dev_private); 1748 if (vfinfo == NULL) { 1749 PMD_DRV_LOG(ERR, 1750 "no virtual functions supported by PF"); 1751 break; 1752 } 1753 1754 representor.vf_id = eth_da.representor_ports[i]; 1755 representor.switch_domain_id = vfinfo->switch_domain_id; 1756 representor.pf_ethdev = pf_ethdev; 1757 1758 /* representor port net_bdf_port */ 1759 snprintf(name, sizeof(name), "net_%s_representor_%d", 1760 pci_dev->device.name, 1761 eth_da.representor_ports[i]); 1762 1763 retval = rte_eth_dev_create(&pci_dev->device, name, 1764 sizeof(struct ixgbe_vf_representor), NULL, NULL, 1765 ixgbe_vf_representor_init, &representor); 1766 1767 if (retval) 1768 PMD_DRV_LOG(ERR, "failed to create ixgbe vf " 1769 "representor %s.", name); 1770 } 1771 1772 return 0; 1773 } 1774 1775 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) 1776 { 1777 struct rte_eth_dev *ethdev; 1778 1779 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1780 if (!ethdev) 1781 return 0; 1782 1783 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 1784 return rte_eth_dev_pci_generic_remove(pci_dev, 1785 ixgbe_vf_representor_uninit); 1786 else 1787 return rte_eth_dev_pci_generic_remove(pci_dev, 1788 eth_ixgbe_dev_uninit); 1789 } 1790 1791 static struct rte_pci_driver rte_ixgbe_pmd = { 1792 .id_table = pci_id_ixgbe_map, 1793 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1794 .probe = eth_ixgbe_pci_probe, 1795 .remove = eth_ixgbe_pci_remove, 1796 }; 1797 1798 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1799 struct rte_pci_device *pci_dev) 1800 { 1801 return rte_eth_dev_pci_generic_probe(pci_dev, 1802 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init); 1803 } 1804 1805 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) 1806 { 1807 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit); 1808 } 1809 1810 /* 1811 * virtual function driver struct 1812 */ 1813 static struct rte_pci_driver rte_ixgbevf_pmd = { 1814 .id_table = pci_id_ixgbevf_map, 1815 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1816 .probe = eth_ixgbevf_pci_probe, 1817 .remove = eth_ixgbevf_pci_remove, 1818 }; 1819 1820 static int 1821 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1822 { 1823 struct ixgbe_hw *hw = 1824 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1825 struct ixgbe_vfta *shadow_vfta = 1826 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1827 uint32_t vfta; 1828 uint32_t vid_idx; 1829 uint32_t vid_bit; 1830 1831 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 1832 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 1833 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); 1834 if (on) 1835 vfta |= vid_bit; 1836 else 1837 vfta &= ~vid_bit; 1838 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); 1839 1840 /* update local VFTA copy */ 1841 shadow_vfta->vfta[vid_idx] = vfta; 1842 1843 return 0; 1844 } 1845 1846 static void 1847 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 1848 { 1849 if (on) 1850 ixgbe_vlan_hw_strip_enable(dev, queue); 1851 else 1852 ixgbe_vlan_hw_strip_disable(dev, queue); 1853 } 1854 1855 static int 1856 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1857 enum rte_vlan_type vlan_type, 1858 uint16_t tpid) 1859 { 1860 struct ixgbe_hw *hw = 1861 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1862 int ret = 0; 1863 uint32_t reg; 1864 uint32_t qinq; 1865 1866 qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1867 qinq &= IXGBE_DMATXCTL_GDV; 1868 1869 switch (vlan_type) { 1870 case RTE_ETH_VLAN_TYPE_INNER: 1871 if (qinq) { 1872 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1873 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1874 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1875 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1876 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1877 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1878 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1879 } else { 1880 ret = -ENOTSUP; 1881 PMD_DRV_LOG(ERR, "Inner type is not supported" 1882 " by single VLAN"); 1883 } 1884 break; 1885 case RTE_ETH_VLAN_TYPE_OUTER: 1886 if (qinq) { 1887 /* Only the high 16-bits is valid */ 1888 IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << 1889 IXGBE_EXVET_VET_EXT_SHIFT); 1890 } else { 1891 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1892 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1893 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1894 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1895 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1896 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1897 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1898 } 1899 1900 break; 1901 default: 1902 ret = -EINVAL; 1903 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1904 break; 1905 } 1906 1907 return ret; 1908 } 1909 1910 void 1911 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1912 { 1913 struct ixgbe_hw *hw = 1914 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1915 uint32_t vlnctrl; 1916 1917 PMD_INIT_FUNC_TRACE(); 1918 1919 /* Filter Table Disable */ 1920 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1921 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1922 1923 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1924 } 1925 1926 void 1927 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1928 { 1929 struct ixgbe_hw *hw = 1930 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1931 struct ixgbe_vfta *shadow_vfta = 1932 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1933 uint32_t vlnctrl; 1934 uint16_t i; 1935 1936 PMD_INIT_FUNC_TRACE(); 1937 1938 /* Filter Table Enable */ 1939 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1940 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 1941 vlnctrl |= IXGBE_VLNCTRL_VFE; 1942 1943 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1944 1945 /* write whatever is in local vfta copy */ 1946 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1947 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); 1948 } 1949 1950 static void 1951 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 1952 { 1953 struct ixgbe_hwstrip *hwstrip = 1954 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); 1955 struct ixgbe_rx_queue *rxq; 1956 1957 if (queue >= IXGBE_MAX_RX_QUEUE_NUM) 1958 return; 1959 1960 if (on) 1961 IXGBE_SET_HWSTRIP(hwstrip, queue); 1962 else 1963 IXGBE_CLEAR_HWSTRIP(hwstrip, queue); 1964 1965 if (queue >= dev->data->nb_rx_queues) 1966 return; 1967 1968 rxq = dev->data->rx_queues[queue]; 1969 1970 if (on) { 1971 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 1972 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 1973 } else { 1974 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN; 1975 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 1976 } 1977 } 1978 1979 static void 1980 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 1981 { 1982 struct ixgbe_hw *hw = 1983 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1984 uint32_t ctrl; 1985 1986 PMD_INIT_FUNC_TRACE(); 1987 1988 if (hw->mac.type == ixgbe_mac_82598EB) { 1989 /* No queue level support */ 1990 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 1991 return; 1992 } 1993 1994 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1995 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 1996 ctrl &= ~IXGBE_RXDCTL_VME; 1997 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 1998 1999 /* record those setting for HW strip per queue */ 2000 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 2001 } 2002 2003 static void 2004 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 2005 { 2006 struct ixgbe_hw *hw = 2007 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2008 uint32_t ctrl; 2009 2010 PMD_INIT_FUNC_TRACE(); 2011 2012 if (hw->mac.type == ixgbe_mac_82598EB) { 2013 /* No queue level supported */ 2014 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2015 return; 2016 } 2017 2018 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2019 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2020 ctrl |= IXGBE_RXDCTL_VME; 2021 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2022 2023 /* record those setting for HW strip per queue */ 2024 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 2025 } 2026 2027 static void 2028 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2029 { 2030 struct ixgbe_hw *hw = 2031 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2032 uint32_t ctrl; 2033 2034 PMD_INIT_FUNC_TRACE(); 2035 2036 /* DMATXCTRL: Geric Double VLAN Disable */ 2037 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2038 ctrl &= ~IXGBE_DMATXCTL_GDV; 2039 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2040 2041 /* CTRL_EXT: Global Double VLAN Disable */ 2042 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2043 ctrl &= ~IXGBE_EXTENDED_VLAN; 2044 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2045 2046 } 2047 2048 static void 2049 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2050 { 2051 struct ixgbe_hw *hw = 2052 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2053 uint32_t ctrl; 2054 2055 PMD_INIT_FUNC_TRACE(); 2056 2057 /* DMATXCTRL: Geric Double VLAN Enable */ 2058 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2059 ctrl |= IXGBE_DMATXCTL_GDV; 2060 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2061 2062 /* CTRL_EXT: Global Double VLAN Enable */ 2063 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2064 ctrl |= IXGBE_EXTENDED_VLAN; 2065 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2066 2067 /* Clear pooling mode of PFVTCTL. It's required by X550. */ 2068 if (hw->mac.type == ixgbe_mac_X550 || 2069 hw->mac.type == ixgbe_mac_X550EM_x || 2070 hw->mac.type == ixgbe_mac_X550EM_a) { 2071 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 2072 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 2073 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 2074 } 2075 2076 /* 2077 * VET EXT field in the EXVET register = 0x8100 by default 2078 * So no need to change. Same to VT field of DMATXCTL register 2079 */ 2080 } 2081 2082 void 2083 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 2084 { 2085 struct ixgbe_hw *hw = 2086 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2087 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 2088 uint32_t ctrl; 2089 uint16_t i; 2090 struct ixgbe_rx_queue *rxq; 2091 bool on; 2092 2093 PMD_INIT_FUNC_TRACE(); 2094 2095 if (hw->mac.type == ixgbe_mac_82598EB) { 2096 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 2097 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2098 ctrl |= IXGBE_VLNCTRL_VME; 2099 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2100 } else { 2101 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2102 ctrl &= ~IXGBE_VLNCTRL_VME; 2103 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2104 } 2105 } else { 2106 /* 2107 * Other 10G NIC, the VLAN strip can be setup 2108 * per queue in RXDCTL 2109 */ 2110 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2111 rxq = dev->data->rx_queues[i]; 2112 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 2113 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 2114 ctrl |= IXGBE_RXDCTL_VME; 2115 on = TRUE; 2116 } else { 2117 ctrl &= ~IXGBE_RXDCTL_VME; 2118 on = FALSE; 2119 } 2120 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 2121 2122 /* record those setting for HW strip per queue */ 2123 ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); 2124 } 2125 } 2126 } 2127 2128 static void 2129 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 2130 { 2131 uint16_t i; 2132 struct rte_eth_rxmode *rxmode; 2133 struct ixgbe_rx_queue *rxq; 2134 2135 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2136 rxmode = &dev->data->dev_conf.rxmode; 2137 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 2138 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2139 rxq = dev->data->rx_queues[i]; 2140 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 2141 } 2142 else 2143 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2144 rxq = dev->data->rx_queues[i]; 2145 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 2146 } 2147 } 2148 } 2149 2150 static int 2151 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 2152 { 2153 struct rte_eth_rxmode *rxmode; 2154 rxmode = &dev->data->dev_conf.rxmode; 2155 2156 if (mask & RTE_ETH_VLAN_STRIP_MASK) 2157 ixgbe_vlan_hw_strip_config(dev); 2158 2159 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 2160 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 2161 ixgbe_vlan_hw_filter_enable(dev); 2162 else 2163 ixgbe_vlan_hw_filter_disable(dev); 2164 } 2165 2166 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 2167 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 2168 ixgbe_vlan_hw_extend_enable(dev); 2169 else 2170 ixgbe_vlan_hw_extend_disable(dev); 2171 } 2172 2173 return 0; 2174 } 2175 2176 static int 2177 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2178 { 2179 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 2180 2181 ixgbe_vlan_offload_config(dev, mask); 2182 2183 return 0; 2184 } 2185 2186 static void 2187 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2188 { 2189 struct ixgbe_hw *hw = 2190 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2191 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ 2192 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2193 2194 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ 2195 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); 2196 } 2197 2198 static int 2199 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 2200 { 2201 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2202 2203 switch (nb_rx_q) { 2204 case 1: 2205 case 2: 2206 RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS; 2207 break; 2208 case 4: 2209 RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS; 2210 break; 2211 default: 2212 return -EINVAL; 2213 } 2214 2215 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 2216 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2217 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 2218 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2219 return 0; 2220 } 2221 2222 static int 2223 ixgbe_check_mq_mode(struct rte_eth_dev *dev) 2224 { 2225 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 2226 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2227 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2228 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2229 2230 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 2231 /* check multi-queue mode */ 2232 switch (dev_conf->rxmode.mq_mode) { 2233 case RTE_ETH_MQ_RX_VMDQ_DCB: 2234 PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); 2235 break; 2236 case RTE_ETH_MQ_RX_VMDQ_DCB_RSS: 2237 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 2238 PMD_INIT_LOG(ERR, "SRIOV active," 2239 " unsupported mq_mode rx %d.", 2240 dev_conf->rxmode.mq_mode); 2241 return -EINVAL; 2242 case RTE_ETH_MQ_RX_RSS: 2243 case RTE_ETH_MQ_RX_VMDQ_RSS: 2244 dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS; 2245 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 2246 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 2247 PMD_INIT_LOG(ERR, "SRIOV is active," 2248 " invalid queue number" 2249 " for VMDQ RSS, allowed" 2250 " value are 1, 2 or 4."); 2251 return -EINVAL; 2252 } 2253 break; 2254 case RTE_ETH_MQ_RX_VMDQ_ONLY: 2255 case RTE_ETH_MQ_RX_NONE: 2256 /* if nothing mq mode configure, use default scheme */ 2257 dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY; 2258 break; 2259 default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/ 2260 /* SRIOV only works in VMDq enable mode */ 2261 PMD_INIT_LOG(ERR, "SRIOV is active," 2262 " wrong mq_mode rx %d.", 2263 dev_conf->rxmode.mq_mode); 2264 return -EINVAL; 2265 } 2266 2267 switch (dev_conf->txmode.mq_mode) { 2268 case RTE_ETH_MQ_TX_VMDQ_DCB: 2269 PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); 2270 dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; 2271 break; 2272 default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */ 2273 dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY; 2274 break; 2275 } 2276 2277 /* check valid queue number */ 2278 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 2279 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 2280 PMD_INIT_LOG(ERR, "SRIOV is active," 2281 " nb_rx_q=%d nb_tx_q=%d queue number" 2282 " must be less than or equal to %d.", 2283 nb_rx_q, nb_tx_q, 2284 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 2285 return -EINVAL; 2286 } 2287 } else { 2288 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) { 2289 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 2290 " not supported."); 2291 return -EINVAL; 2292 } 2293 /* check configuration for vmdb+dcb mode */ 2294 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { 2295 const struct rte_eth_vmdq_dcb_conf *conf; 2296 2297 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2298 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 2299 IXGBE_VMDQ_DCB_NB_QUEUES); 2300 return -EINVAL; 2301 } 2302 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 2303 if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || 2304 conf->nb_queue_pools == RTE_ETH_32_POOLS)) { 2305 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2306 " nb_queue_pools must be %d or %d.", 2307 RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); 2308 return -EINVAL; 2309 } 2310 } 2311 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) { 2312 const struct rte_eth_vmdq_dcb_tx_conf *conf; 2313 2314 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2315 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 2316 IXGBE_VMDQ_DCB_NB_QUEUES); 2317 return -EINVAL; 2318 } 2319 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2320 if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || 2321 conf->nb_queue_pools == RTE_ETH_32_POOLS)) { 2322 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2323 " nb_queue_pools != %d and" 2324 " nb_queue_pools != %d.", 2325 RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); 2326 return -EINVAL; 2327 } 2328 } 2329 2330 /* For DCB mode check our configuration before we go further */ 2331 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) { 2332 const struct rte_eth_dcb_rx_conf *conf; 2333 2334 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 2335 if (!(conf->nb_tcs == RTE_ETH_4_TCS || 2336 conf->nb_tcs == RTE_ETH_8_TCS)) { 2337 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2338 " and nb_tcs != %d.", 2339 RTE_ETH_4_TCS, RTE_ETH_8_TCS); 2340 return -EINVAL; 2341 } 2342 } 2343 2344 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) { 2345 const struct rte_eth_dcb_tx_conf *conf; 2346 2347 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 2348 if (!(conf->nb_tcs == RTE_ETH_4_TCS || 2349 conf->nb_tcs == RTE_ETH_8_TCS)) { 2350 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2351 " and nb_tcs != %d.", 2352 RTE_ETH_4_TCS, RTE_ETH_8_TCS); 2353 return -EINVAL; 2354 } 2355 } 2356 2357 /* 2358 * When DCB/VT is off, maximum number of queues changes, 2359 * except for 82598EB, which remains constant. 2360 */ 2361 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE && 2362 hw->mac.type != ixgbe_mac_82598EB) { 2363 if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { 2364 PMD_INIT_LOG(ERR, 2365 "Neither VT nor DCB are enabled, " 2366 "nb_tx_q > %d.", 2367 IXGBE_NONE_MODE_TX_NB_QUEUES); 2368 return -EINVAL; 2369 } 2370 } 2371 } 2372 return 0; 2373 } 2374 2375 static int 2376 ixgbe_dev_configure(struct rte_eth_dev *dev) 2377 { 2378 struct ixgbe_interrupt *intr = 2379 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2380 struct ixgbe_adapter *adapter = dev->data->dev_private; 2381 int ret; 2382 2383 PMD_INIT_FUNC_TRACE(); 2384 2385 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 2386 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2387 2388 /* multiple queue mode checking */ 2389 ret = ixgbe_check_mq_mode(dev); 2390 if (ret != 0) { 2391 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", 2392 ret); 2393 return ret; 2394 } 2395 2396 /* set flag to update link status after init */ 2397 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2398 2399 /* 2400 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 2401 * allocation or vector Rx preconditions we will reset it. 2402 */ 2403 adapter->rx_bulk_alloc_allowed = true; 2404 adapter->rx_vec_allowed = true; 2405 2406 return 0; 2407 } 2408 2409 static void 2410 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 2411 { 2412 struct ixgbe_hw *hw = 2413 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2414 struct ixgbe_interrupt *intr = 2415 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2416 uint32_t gpie; 2417 2418 /* only set up it on X550EM_X */ 2419 if (hw->mac.type == ixgbe_mac_X550EM_x) { 2420 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2421 gpie |= IXGBE_SDP0_GPIEN_X550EM_x; 2422 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2423 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 2424 intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; 2425 } 2426 } 2427 2428 int 2429 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 2430 uint16_t tx_rate, uint64_t q_msk) 2431 { 2432 struct ixgbe_hw *hw; 2433 struct ixgbe_vf_info *vfinfo; 2434 struct rte_eth_link link; 2435 uint8_t nb_q_per_pool; 2436 uint32_t queue_stride; 2437 uint32_t queue_idx, idx = 0, vf_idx; 2438 uint32_t queue_end; 2439 uint16_t total_rate = 0; 2440 struct rte_pci_device *pci_dev; 2441 int ret; 2442 2443 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2444 ret = rte_eth_link_get_nowait(dev->data->port_id, &link); 2445 if (ret < 0) 2446 return ret; 2447 2448 if (vf >= pci_dev->max_vfs) 2449 return -EINVAL; 2450 2451 if (tx_rate > link.link_speed) 2452 return -EINVAL; 2453 2454 if (q_msk == 0) 2455 return 0; 2456 2457 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2458 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); 2459 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2460 queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2461 queue_idx = vf * queue_stride; 2462 queue_end = queue_idx + nb_q_per_pool - 1; 2463 if (queue_end >= hw->mac.max_tx_queues) 2464 return -EINVAL; 2465 2466 if (vfinfo) { 2467 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { 2468 if (vf_idx == vf) 2469 continue; 2470 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 2471 idx++) 2472 total_rate += vfinfo[vf_idx].tx_rate[idx]; 2473 } 2474 } else { 2475 return -EINVAL; 2476 } 2477 2478 /* Store tx_rate for this vf. */ 2479 for (idx = 0; idx < nb_q_per_pool; idx++) { 2480 if (((uint64_t)0x1 << idx) & q_msk) { 2481 if (vfinfo[vf].tx_rate[idx] != tx_rate) 2482 vfinfo[vf].tx_rate[idx] = tx_rate; 2483 total_rate += tx_rate; 2484 } 2485 } 2486 2487 if (total_rate > dev->data->dev_link.link_speed) { 2488 /* Reset stored TX rate of the VF if it causes exceed 2489 * link speed. 2490 */ 2491 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 2492 return -EINVAL; 2493 } 2494 2495 /* Set RTTBCNRC of each queue/pool for vf X */ 2496 for (; queue_idx <= queue_end; queue_idx++) { 2497 if (0x1 & q_msk) 2498 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 2499 q_msk = q_msk >> 1; 2500 } 2501 2502 return 0; 2503 } 2504 2505 static int 2506 ixgbe_flow_ctrl_enable(struct rte_eth_dev *dev, struct ixgbe_hw *hw) 2507 { 2508 struct ixgbe_adapter *adapter = dev->data->dev_private; 2509 int err; 2510 uint32_t mflcn; 2511 2512 ixgbe_setup_fc(hw); 2513 2514 err = ixgbe_fc_enable(hw); 2515 2516 /* Not negotiated is not an error case */ 2517 if (err == IXGBE_SUCCESS || err == IXGBE_ERR_FC_NOT_NEGOTIATED) { 2518 /* 2519 *check if we want to forward MAC frames - driver doesn't 2520 *have native capability to do that, 2521 *so we'll write the registers ourselves 2522 */ 2523 2524 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2525 2526 /* set or clear MFLCN.PMCF bit depending on configuration */ 2527 if (adapter->mac_ctrl_frame_fwd != 0) 2528 mflcn |= IXGBE_MFLCN_PMCF; 2529 else 2530 mflcn &= ~IXGBE_MFLCN_PMCF; 2531 2532 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); 2533 IXGBE_WRITE_FLUSH(hw); 2534 2535 return 0; 2536 } 2537 return err; 2538 } 2539 2540 /* 2541 * Configure device link speed and setup link. 2542 * It returns 0 on success. 2543 */ 2544 static int 2545 ixgbe_dev_start(struct rte_eth_dev *dev) 2546 { 2547 struct ixgbe_hw *hw = 2548 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2549 struct ixgbe_vf_info *vfinfo = 2550 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2551 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2552 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2553 uint32_t intr_vector = 0; 2554 int err; 2555 bool link_up = false, negotiate = 0; 2556 uint32_t speed = 0; 2557 uint32_t allowed_speeds = 0; 2558 int mask = 0; 2559 int status; 2560 uint16_t vf, idx; 2561 uint32_t *link_speeds; 2562 struct ixgbe_tm_conf *tm_conf = 2563 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2564 struct ixgbe_macsec_setting *macsec_setting = 2565 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 2566 2567 PMD_INIT_FUNC_TRACE(); 2568 2569 /* Stop the link setup handler before resetting the HW. */ 2570 ixgbe_dev_wait_setup_link_complete(dev, 0); 2571 2572 /* disable uio/vfio intr/eventfd mapping */ 2573 rte_intr_disable(intr_handle); 2574 2575 /* stop adapter */ 2576 hw->adapter_stopped = 0; 2577 ixgbe_stop_adapter(hw); 2578 2579 /* reinitialize adapter 2580 * this calls reset and start 2581 */ 2582 status = ixgbe_pf_reset_hw(hw); 2583 if (status != 0) 2584 return -1; 2585 hw->mac.ops.start_hw(hw); 2586 hw->mac.get_link_status = true; 2587 2588 /* configure PF module if SRIOV enabled */ 2589 ixgbe_pf_host_configure(dev); 2590 2591 ixgbe_dev_phy_intr_setup(dev); 2592 2593 /* check and configure queue intr-vector mapping */ 2594 if ((rte_intr_cap_multiple(intr_handle) || 2595 !RTE_ETH_DEV_SRIOV(dev).active) && 2596 dev->data->dev_conf.intr_conf.rxq != 0) { 2597 intr_vector = dev->data->nb_rx_queues; 2598 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { 2599 PMD_INIT_LOG(ERR, "At most %d intr queues supported", 2600 IXGBE_MAX_INTR_QUEUE_NUM); 2601 return -ENOTSUP; 2602 } 2603 if (rte_intr_efd_enable(intr_handle, intr_vector)) 2604 return -1; 2605 } 2606 2607 if (rte_intr_dp_is_en(intr_handle)) { 2608 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 2609 dev->data->nb_rx_queues)) { 2610 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 2611 " intr_vec", dev->data->nb_rx_queues); 2612 return -ENOMEM; 2613 } 2614 } 2615 2616 /* configure MSI-X for sleep until Rx interrupt */ 2617 ixgbe_configure_msix(dev); 2618 2619 /* initialize transmission unit */ 2620 ixgbe_dev_tx_init(dev); 2621 2622 /* This can fail when allocating mbufs for descriptor rings */ 2623 err = ixgbe_dev_rx_init(dev); 2624 if (err) { 2625 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 2626 goto error; 2627 } 2628 2629 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 2630 RTE_ETH_VLAN_EXTEND_MASK; 2631 err = ixgbe_vlan_offload_config(dev, mask); 2632 if (err) { 2633 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 2634 goto error; 2635 } 2636 2637 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) { 2638 /* Enable vlan filtering for VMDq */ 2639 ixgbe_vmdq_vlan_hw_filter_enable(dev); 2640 } 2641 2642 /* Configure DCB hw */ 2643 ixgbe_configure_dcb(dev); 2644 2645 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { 2646 err = ixgbe_fdir_configure(dev); 2647 if (err) 2648 goto error; 2649 } 2650 2651 /* Restore vf rate limit */ 2652 if (vfinfo != NULL) { 2653 for (vf = 0; vf < pci_dev->max_vfs; vf++) 2654 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 2655 if (vfinfo[vf].tx_rate[idx] != 0) 2656 ixgbe_set_vf_rate_limit( 2657 dev, vf, 2658 vfinfo[vf].tx_rate[idx], 2659 1 << idx); 2660 } 2661 2662 ixgbe_restore_statistics_mapping(dev); 2663 2664 err = ixgbe_flow_ctrl_enable(dev, hw); 2665 if (err < 0) { 2666 PMD_INIT_LOG(ERR, "enable flow ctrl err"); 2667 goto error; 2668 } 2669 2670 err = ixgbe_dev_rxtx_start(dev); 2671 if (err < 0) { 2672 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 2673 goto error; 2674 } 2675 2676 /* Skip link setup if loopback mode is enabled. */ 2677 if (dev->data->dev_conf.lpbk_mode != 0) { 2678 err = ixgbe_check_supported_loopback_mode(dev); 2679 if (err < 0) { 2680 PMD_INIT_LOG(ERR, "Unsupported loopback mode"); 2681 goto error; 2682 } else { 2683 goto skip_link_setup; 2684 } 2685 } 2686 2687 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 2688 err = hw->mac.ops.setup_sfp(hw); 2689 if (err) 2690 goto error; 2691 } 2692 2693 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2694 /* Turn on the copper */ 2695 ixgbe_set_phy_power(hw, true); 2696 } else { 2697 /* Turn on the laser */ 2698 ixgbe_enable_tx_laser(hw); 2699 } 2700 2701 err = ixgbe_check_link(hw, &speed, &link_up, 0); 2702 if (err) 2703 goto error; 2704 dev->data->dev_link.link_status = link_up; 2705 2706 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); 2707 if (err) 2708 goto error; 2709 2710 switch (hw->mac.type) { 2711 case ixgbe_mac_X550: 2712 case ixgbe_mac_X550EM_x: 2713 case ixgbe_mac_X550EM_a: 2714 allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G | 2715 RTE_ETH_LINK_SPEED_2_5G | RTE_ETH_LINK_SPEED_5G | 2716 RTE_ETH_LINK_SPEED_10G; 2717 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 2718 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 2719 allowed_speeds = RTE_ETH_LINK_SPEED_10M | 2720 RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G; 2721 break; 2722 default: 2723 allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G | 2724 RTE_ETH_LINK_SPEED_10G; 2725 } 2726 2727 link_speeds = &dev->data->dev_conf.link_speeds; 2728 2729 /* Ignore autoneg flag bit and check the validity of 2730 * link_speed 2731 */ 2732 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { 2733 PMD_INIT_LOG(ERR, "Invalid link setting"); 2734 goto error; 2735 } 2736 2737 speed = 0x0; 2738 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { 2739 switch (hw->mac.type) { 2740 case ixgbe_mac_82598EB: 2741 speed = IXGBE_LINK_SPEED_82598_AUTONEG; 2742 break; 2743 case ixgbe_mac_82599EB: 2744 case ixgbe_mac_X540: 2745 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2746 break; 2747 case ixgbe_mac_X550: 2748 case ixgbe_mac_X550EM_x: 2749 case ixgbe_mac_X550EM_a: 2750 speed = IXGBE_LINK_SPEED_X550_AUTONEG; 2751 break; 2752 default: 2753 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2754 } 2755 } else { 2756 if (*link_speeds & RTE_ETH_LINK_SPEED_10G) 2757 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2758 if (*link_speeds & RTE_ETH_LINK_SPEED_5G) 2759 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2760 if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G) 2761 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2762 if (*link_speeds & RTE_ETH_LINK_SPEED_1G) 2763 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2764 if (*link_speeds & RTE_ETH_LINK_SPEED_100M) 2765 speed |= IXGBE_LINK_SPEED_100_FULL; 2766 if (*link_speeds & RTE_ETH_LINK_SPEED_10M) 2767 speed |= IXGBE_LINK_SPEED_10_FULL; 2768 } 2769 2770 err = ixgbe_setup_link(hw, speed, link_up); 2771 if (err) 2772 goto error; 2773 2774 skip_link_setup: 2775 2776 if (rte_intr_allow_others(intr_handle)) { 2777 /* check if lsc interrupt is enabled */ 2778 if (dev->data->dev_conf.intr_conf.lsc != 0) 2779 ixgbe_dev_lsc_interrupt_setup(dev, TRUE); 2780 else 2781 ixgbe_dev_lsc_interrupt_setup(dev, FALSE); 2782 ixgbe_dev_macsec_interrupt_setup(dev); 2783 } else { 2784 rte_intr_callback_unregister(intr_handle, 2785 ixgbe_dev_interrupt_handler, dev); 2786 if (dev->data->dev_conf.intr_conf.lsc != 0) 2787 PMD_INIT_LOG(INFO, "lsc won't enable because of" 2788 " no intr multiplex"); 2789 } 2790 2791 /* check if rxq interrupt is enabled */ 2792 if (dev->data->dev_conf.intr_conf.rxq != 0 && 2793 rte_intr_dp_is_en(intr_handle)) 2794 ixgbe_dev_rxq_interrupt_setup(dev); 2795 2796 /* enable uio/vfio intr/eventfd mapping */ 2797 rte_intr_enable(intr_handle); 2798 2799 /* resume enabled intr since hw reset */ 2800 ixgbe_enable_intr(dev); 2801 ixgbe_l2_tunnel_conf(dev); 2802 ixgbe_filter_restore(dev); 2803 2804 if (tm_conf->root && !tm_conf->committed) 2805 PMD_DRV_LOG(WARNING, 2806 "please call hierarchy_commit() " 2807 "before starting the port"); 2808 2809 /* wait for the controller to acquire link */ 2810 err = ixgbe_wait_for_link_up(hw); 2811 if (err) 2812 goto error; 2813 2814 /* 2815 * Update link status right before return, because it may 2816 * start link configuration process in a separate thread. 2817 */ 2818 ixgbe_dev_link_update(dev, 0); 2819 2820 /* setup the macsec setting register */ 2821 if (macsec_setting->offload_en) 2822 ixgbe_dev_macsec_register_enable(dev, macsec_setting); 2823 2824 return 0; 2825 2826 error: 2827 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); 2828 ixgbe_dev_clear_queues(dev); 2829 return -EIO; 2830 } 2831 2832 /* 2833 * Stop device: disable rx and tx functions to allow for reconfiguring. 2834 */ 2835 static int 2836 ixgbe_dev_stop(struct rte_eth_dev *dev) 2837 { 2838 struct rte_eth_link link; 2839 struct ixgbe_adapter *adapter = dev->data->dev_private; 2840 struct ixgbe_hw *hw = 2841 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2842 struct ixgbe_vf_info *vfinfo = 2843 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2844 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2845 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2846 int vf; 2847 struct ixgbe_tm_conf *tm_conf = 2848 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2849 2850 if (hw->adapter_stopped) 2851 return 0; 2852 2853 PMD_INIT_FUNC_TRACE(); 2854 2855 ixgbe_dev_wait_setup_link_complete(dev, 0); 2856 2857 /* disable interrupts */ 2858 ixgbe_disable_intr(hw); 2859 2860 /* reset the NIC */ 2861 ixgbe_pf_reset_hw(hw); 2862 hw->adapter_stopped = 0; 2863 2864 /* stop adapter */ 2865 ixgbe_stop_adapter(hw); 2866 2867 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 2868 vfinfo[vf].clear_to_send = false; 2869 2870 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2871 /* Turn off the copper */ 2872 ixgbe_set_phy_power(hw, false); 2873 } else { 2874 /* Turn off the laser */ 2875 ixgbe_disable_tx_laser(hw); 2876 } 2877 2878 ixgbe_dev_clear_queues(dev); 2879 2880 /* Clear stored conf */ 2881 dev->data->scattered_rx = 0; 2882 dev->data->lro = 0; 2883 2884 /* Clear recorded link status */ 2885 memset(&link, 0, sizeof(link)); 2886 rte_eth_linkstatus_set(dev, &link); 2887 2888 if (!rte_intr_allow_others(intr_handle)) 2889 /* resume to the default handler */ 2890 rte_intr_callback_register(intr_handle, 2891 ixgbe_dev_interrupt_handler, 2892 (void *)dev); 2893 2894 /* Clean datapath event and queue/vec mapping */ 2895 rte_intr_efd_disable(intr_handle); 2896 rte_intr_vec_list_free(intr_handle); 2897 2898 /* reset hierarchy commit */ 2899 tm_conf->committed = false; 2900 2901 adapter->rss_reta_updated = 0; 2902 2903 hw->adapter_stopped = true; 2904 dev->data->dev_started = 0; 2905 2906 return 0; 2907 } 2908 2909 /* 2910 * Set device link up: enable tx. 2911 */ 2912 static int 2913 ixgbe_dev_set_link_up(struct rte_eth_dev *dev) 2914 { 2915 struct ixgbe_hw *hw = 2916 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2917 if (hw->mac.type == ixgbe_mac_82599EB) { 2918 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2919 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2920 /* Not supported in bypass mode */ 2921 PMD_INIT_LOG(ERR, "Set link up is not supported " 2922 "by device id 0x%x", hw->device_id); 2923 return -ENOTSUP; 2924 } 2925 #endif 2926 } 2927 2928 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2929 /* Turn on the copper */ 2930 ixgbe_set_phy_power(hw, true); 2931 } else { 2932 /* Turn on the laser */ 2933 ixgbe_enable_tx_laser(hw); 2934 ixgbe_dev_link_update(dev, 0); 2935 } 2936 2937 return 0; 2938 } 2939 2940 /* 2941 * Set device link down: disable tx. 2942 */ 2943 static int 2944 ixgbe_dev_set_link_down(struct rte_eth_dev *dev) 2945 { 2946 struct ixgbe_hw *hw = 2947 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2948 if (hw->mac.type == ixgbe_mac_82599EB) { 2949 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2950 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2951 /* Not supported in bypass mode */ 2952 PMD_INIT_LOG(ERR, "Set link down is not supported " 2953 "by device id 0x%x", hw->device_id); 2954 return -ENOTSUP; 2955 } 2956 #endif 2957 } 2958 2959 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2960 /* Turn off the copper */ 2961 ixgbe_set_phy_power(hw, false); 2962 } else { 2963 /* Turn off the laser */ 2964 ixgbe_disable_tx_laser(hw); 2965 ixgbe_dev_link_update(dev, 0); 2966 } 2967 2968 return 0; 2969 } 2970 2971 /* 2972 * Reset and stop device. 2973 */ 2974 static int 2975 ixgbe_dev_close(struct rte_eth_dev *dev) 2976 { 2977 struct ixgbe_hw *hw = 2978 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2979 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2980 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2981 int retries = 0; 2982 int ret; 2983 2984 PMD_INIT_FUNC_TRACE(); 2985 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2986 return 0; 2987 2988 ixgbe_pf_reset_hw(hw); 2989 2990 ret = ixgbe_dev_stop(dev); 2991 2992 ixgbe_dev_free_queues(dev); 2993 2994 ixgbe_disable_pcie_master(hw); 2995 2996 /* reprogram the RAR[0] in case user changed it. */ 2997 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2998 2999 /* Unlock any pending hardware semaphore */ 3000 ixgbe_swfw_lock_reset(hw); 3001 3002 /* disable uio intr before callback unregister */ 3003 rte_intr_disable(intr_handle); 3004 3005 do { 3006 ret = rte_intr_callback_unregister(intr_handle, 3007 ixgbe_dev_interrupt_handler, dev); 3008 if (ret >= 0 || ret == -ENOENT) { 3009 break; 3010 } else if (ret != -EAGAIN) { 3011 PMD_INIT_LOG(ERR, 3012 "intr callback unregister failed: %d", 3013 ret); 3014 } 3015 rte_delay_ms(100); 3016 } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); 3017 3018 /* cancel the delay handler before remove dev */ 3019 rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev); 3020 3021 /* uninitialize PF if max_vfs not zero */ 3022 ixgbe_pf_host_uninit(dev); 3023 3024 /* remove all the fdir filters & hash */ 3025 ixgbe_fdir_filter_uninit(dev); 3026 3027 /* remove all the L2 tunnel filters & hash */ 3028 ixgbe_l2_tn_filter_uninit(dev); 3029 3030 /* Remove all ntuple filters of the device */ 3031 ixgbe_ntuple_filter_uninit(dev); 3032 3033 /* clear all the filters list */ 3034 ixgbe_filterlist_flush(); 3035 3036 /* Remove all Traffic Manager configuration */ 3037 ixgbe_tm_conf_uninit(dev); 3038 3039 #ifdef RTE_LIB_SECURITY 3040 rte_free(dev->security_ctx); 3041 dev->security_ctx = NULL; 3042 #endif 3043 3044 return ret; 3045 } 3046 3047 /* 3048 * Reset PF device. 3049 */ 3050 static int 3051 ixgbe_dev_reset(struct rte_eth_dev *dev) 3052 { 3053 int ret; 3054 3055 /* When a DPDK PMD PF begin to reset PF port, it should notify all 3056 * its VF to make them align with it. The detailed notification 3057 * mechanism is PMD specific. As to ixgbe PF, it is rather complex. 3058 * To avoid unexpected behavior in VF, currently reset of PF with 3059 * SR-IOV activation is not supported. It might be supported later. 3060 */ 3061 if (dev->data->sriov.active) 3062 return -ENOTSUP; 3063 3064 ret = eth_ixgbe_dev_uninit(dev); 3065 if (ret) 3066 return ret; 3067 3068 ret = eth_ixgbe_dev_init(dev, NULL); 3069 3070 return ret; 3071 } 3072 3073 static void 3074 ixgbe_read_stats_registers(struct ixgbe_hw *hw, 3075 struct ixgbe_hw_stats *hw_stats, 3076 struct ixgbe_macsec_stats *macsec_stats, 3077 uint64_t *total_missed_rx, uint64_t *total_qbrc, 3078 uint64_t *total_qprc, uint64_t *total_qprdc) 3079 { 3080 uint32_t bprc, lxon, lxoff, total; 3081 uint32_t delta_gprc = 0; 3082 unsigned i; 3083 /* Workaround for RX byte count not including CRC bytes when CRC 3084 * strip is enabled. CRC bytes are removed from counters when crc_strip 3085 * is disabled. 3086 */ 3087 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & 3088 IXGBE_HLREG0_RXCRCSTRP); 3089 3090 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 3091 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 3092 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 3093 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 3094 3095 for (i = 0; i < 8; i++) { 3096 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 3097 3098 /* global total per queue */ 3099 hw_stats->mpc[i] += mp; 3100 /* Running comprehensive total for stats display */ 3101 *total_missed_rx += hw_stats->mpc[i]; 3102 if (hw->mac.type == ixgbe_mac_82598EB) { 3103 hw_stats->rnbc[i] += 3104 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3105 hw_stats->pxonrxc[i] += 3106 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 3107 hw_stats->pxoffrxc[i] += 3108 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 3109 } else { 3110 hw_stats->pxonrxc[i] += 3111 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 3112 hw_stats->pxoffrxc[i] += 3113 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 3114 hw_stats->pxon2offc[i] += 3115 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 3116 } 3117 hw_stats->pxontxc[i] += 3118 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 3119 hw_stats->pxofftxc[i] += 3120 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 3121 } 3122 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3123 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3124 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3125 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3126 3127 delta_gprc += delta_qprc; 3128 3129 hw_stats->qprc[i] += delta_qprc; 3130 hw_stats->qptc[i] += delta_qptc; 3131 3132 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 3133 hw_stats->qbrc[i] += 3134 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); 3135 if (crc_strip == 0) 3136 hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; 3137 3138 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 3139 hw_stats->qbtc[i] += 3140 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); 3141 3142 hw_stats->qprdc[i] += delta_qprdc; 3143 *total_qprdc += hw_stats->qprdc[i]; 3144 3145 *total_qprc += hw_stats->qprc[i]; 3146 *total_qbrc += hw_stats->qbrc[i]; 3147 } 3148 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 3149 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 3150 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3151 3152 /* 3153 * An errata states that gprc actually counts good + missed packets: 3154 * Workaround to set gprc to summated queue packet receives 3155 */ 3156 hw_stats->gprc = *total_qprc; 3157 3158 if (hw->mac.type != ixgbe_mac_82598EB) { 3159 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 3160 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 3161 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 3162 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 3163 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 3164 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 3165 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3166 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3167 } else { 3168 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3169 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3170 /* 82598 only has a counter in the high register */ 3171 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3172 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3173 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3174 } 3175 uint64_t old_tpr = hw_stats->tpr; 3176 3177 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 3178 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 3179 3180 if (crc_strip == 0) 3181 hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; 3182 3183 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); 3184 hw_stats->gptc += delta_gptc; 3185 hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; 3186 hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 3187 3188 /* 3189 * Workaround: mprc hardware is incorrectly counting 3190 * broadcasts, so for now we subtract those. 3191 */ 3192 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3193 hw_stats->bprc += bprc; 3194 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3195 if (hw->mac.type == ixgbe_mac_82598EB) 3196 hw_stats->mprc -= bprc; 3197 3198 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3199 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3200 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 3201 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 3202 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3203 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3204 3205 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3206 hw_stats->lxontxc += lxon; 3207 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3208 hw_stats->lxofftxc += lxoff; 3209 total = lxon + lxoff; 3210 3211 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 3212 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 3213 hw_stats->gptc -= total; 3214 hw_stats->mptc -= total; 3215 hw_stats->ptc64 -= total; 3216 hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; 3217 3218 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 3219 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 3220 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3221 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 3222 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 3223 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 3224 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 3225 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 3226 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 3227 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 3228 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 3229 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 3230 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 3231 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 3232 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 3233 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 3234 /* Only read FCOE on 82599 */ 3235 if (hw->mac.type != ixgbe_mac_82598EB) { 3236 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 3237 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 3238 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 3239 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 3240 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 3241 } 3242 3243 /* Flow Director Stats registers */ 3244 if (hw->mac.type != ixgbe_mac_82598EB) { 3245 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 3246 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 3247 hw_stats->fdirustat_add += IXGBE_READ_REG(hw, 3248 IXGBE_FDIRUSTAT) & 0xFFFF; 3249 hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, 3250 IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; 3251 hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, 3252 IXGBE_FDIRFSTAT) & 0xFFFF; 3253 hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, 3254 IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; 3255 } 3256 /* MACsec Stats registers */ 3257 macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); 3258 macsec_stats->out_pkts_encrypted += 3259 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); 3260 macsec_stats->out_pkts_protected += 3261 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); 3262 macsec_stats->out_octets_encrypted += 3263 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); 3264 macsec_stats->out_octets_protected += 3265 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); 3266 macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); 3267 macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); 3268 macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); 3269 macsec_stats->in_pkts_unknownsci += 3270 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); 3271 macsec_stats->in_octets_decrypted += 3272 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); 3273 macsec_stats->in_octets_validated += 3274 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); 3275 macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); 3276 macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); 3277 macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); 3278 for (i = 0; i < 2; i++) { 3279 macsec_stats->in_pkts_ok += 3280 IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); 3281 macsec_stats->in_pkts_invalid += 3282 IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); 3283 macsec_stats->in_pkts_notvalid += 3284 IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); 3285 } 3286 macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); 3287 macsec_stats->in_pkts_notusingsa += 3288 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); 3289 } 3290 3291 /* 3292 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c 3293 */ 3294 static int 3295 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3296 { 3297 struct ixgbe_hw *hw = 3298 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3299 struct ixgbe_hw_stats *hw_stats = 3300 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3301 struct ixgbe_macsec_stats *macsec_stats = 3302 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3303 dev->data->dev_private); 3304 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3305 unsigned i; 3306 3307 total_missed_rx = 0; 3308 total_qbrc = 0; 3309 total_qprc = 0; 3310 total_qprdc = 0; 3311 3312 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3313 &total_qbrc, &total_qprc, &total_qprdc); 3314 3315 if (stats == NULL) 3316 return -EINVAL; 3317 3318 /* Fill out the rte_eth_stats statistics structure */ 3319 stats->ipackets = total_qprc; 3320 stats->ibytes = total_qbrc; 3321 stats->opackets = hw_stats->gptc; 3322 stats->obytes = hw_stats->gotc; 3323 3324 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3325 stats->q_ipackets[i] = hw_stats->qprc[i]; 3326 stats->q_opackets[i] = hw_stats->qptc[i]; 3327 stats->q_ibytes[i] = hw_stats->qbrc[i]; 3328 stats->q_obytes[i] = hw_stats->qbtc[i]; 3329 stats->q_errors[i] = hw_stats->qprdc[i]; 3330 } 3331 3332 /* Rx Errors */ 3333 stats->imissed = total_missed_rx; 3334 stats->ierrors = hw_stats->crcerrs + 3335 hw_stats->mspdc + 3336 hw_stats->rlec + 3337 hw_stats->ruc + 3338 hw_stats->roc + 3339 hw_stats->illerrc + 3340 hw_stats->errbc + 3341 hw_stats->rfc + 3342 hw_stats->fccrc + 3343 hw_stats->fclast; 3344 3345 /* 3346 * 82599 errata, UDP frames with a 0 checksum can be marked as checksum 3347 * errors. 3348 */ 3349 if (hw->mac.type != ixgbe_mac_82599EB) 3350 stats->ierrors += hw_stats->xec; 3351 3352 /* Tx Errors */ 3353 stats->oerrors = 0; 3354 return 0; 3355 } 3356 3357 static int 3358 ixgbe_dev_stats_reset(struct rte_eth_dev *dev) 3359 { 3360 struct ixgbe_hw_stats *stats = 3361 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3362 3363 /* HW registers are cleared on read */ 3364 ixgbe_dev_stats_get(dev, NULL); 3365 3366 /* Reset software totals */ 3367 memset(stats, 0, sizeof(*stats)); 3368 3369 return 0; 3370 } 3371 3372 /* This function calculates the number of xstats based on the current config */ 3373 static unsigned 3374 ixgbe_xstats_calc_num(void) { 3375 return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + 3376 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + 3377 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); 3378 } 3379 3380 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3381 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) 3382 { 3383 const unsigned cnt_stats = ixgbe_xstats_calc_num(); 3384 unsigned stat, i, count; 3385 3386 if (xstats_names != NULL) { 3387 count = 0; 3388 3389 /* Note: limit >= cnt_stats checked upstream 3390 * in rte_eth_xstats_names() 3391 */ 3392 3393 /* Extended stats from ixgbe_hw_stats */ 3394 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3395 strlcpy(xstats_names[count].name, 3396 rte_ixgbe_stats_strings[i].name, 3397 sizeof(xstats_names[count].name)); 3398 count++; 3399 } 3400 3401 /* MACsec Stats */ 3402 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3403 strlcpy(xstats_names[count].name, 3404 rte_ixgbe_macsec_strings[i].name, 3405 sizeof(xstats_names[count].name)); 3406 count++; 3407 } 3408 3409 /* RX Priority Stats */ 3410 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3411 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3412 snprintf(xstats_names[count].name, 3413 sizeof(xstats_names[count].name), 3414 "rx_priority%u_%s", i, 3415 rte_ixgbe_rxq_strings[stat].name); 3416 count++; 3417 } 3418 } 3419 3420 /* TX Priority Stats */ 3421 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3422 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3423 snprintf(xstats_names[count].name, 3424 sizeof(xstats_names[count].name), 3425 "tx_priority%u_%s", i, 3426 rte_ixgbe_txq_strings[stat].name); 3427 count++; 3428 } 3429 } 3430 } 3431 return cnt_stats; 3432 } 3433 3434 static int ixgbe_dev_xstats_get_names_by_id( 3435 struct rte_eth_dev *dev, 3436 const uint64_t *ids, 3437 struct rte_eth_xstat_name *xstats_names, 3438 unsigned int limit) 3439 { 3440 if (!ids) { 3441 const unsigned int cnt_stats = ixgbe_xstats_calc_num(); 3442 unsigned int stat, i, count; 3443 3444 if (xstats_names != NULL) { 3445 count = 0; 3446 3447 /* Note: limit >= cnt_stats checked upstream 3448 * in rte_eth_xstats_names() 3449 */ 3450 3451 /* Extended stats from ixgbe_hw_stats */ 3452 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3453 strlcpy(xstats_names[count].name, 3454 rte_ixgbe_stats_strings[i].name, 3455 sizeof(xstats_names[count].name)); 3456 count++; 3457 } 3458 3459 /* MACsec Stats */ 3460 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3461 strlcpy(xstats_names[count].name, 3462 rte_ixgbe_macsec_strings[i].name, 3463 sizeof(xstats_names[count].name)); 3464 count++; 3465 } 3466 3467 /* RX Priority Stats */ 3468 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3469 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3470 snprintf(xstats_names[count].name, 3471 sizeof(xstats_names[count].name), 3472 "rx_priority%u_%s", i, 3473 rte_ixgbe_rxq_strings[stat].name); 3474 count++; 3475 } 3476 } 3477 3478 /* TX Priority Stats */ 3479 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3480 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3481 snprintf(xstats_names[count].name, 3482 sizeof(xstats_names[count].name), 3483 "tx_priority%u_%s", i, 3484 rte_ixgbe_txq_strings[stat].name); 3485 count++; 3486 } 3487 } 3488 } 3489 return cnt_stats; 3490 } 3491 3492 uint16_t i; 3493 uint16_t size = ixgbe_xstats_calc_num(); 3494 struct rte_eth_xstat_name xstats_names_copy[size]; 3495 3496 ixgbe_dev_xstats_get_names_by_id(dev, NULL, xstats_names_copy, 3497 size); 3498 3499 for (i = 0; i < limit; i++) { 3500 if (ids[i] >= size) { 3501 PMD_INIT_LOG(ERR, "id value isn't valid"); 3502 return -1; 3503 } 3504 strcpy(xstats_names[i].name, 3505 xstats_names_copy[ids[i]].name); 3506 } 3507 return limit; 3508 } 3509 3510 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3511 struct rte_eth_xstat_name *xstats_names, unsigned limit) 3512 { 3513 unsigned i; 3514 3515 if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) 3516 return -ENOMEM; 3517 3518 if (xstats_names != NULL) 3519 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) 3520 strlcpy(xstats_names[i].name, 3521 rte_ixgbevf_stats_strings[i].name, 3522 sizeof(xstats_names[i].name)); 3523 return IXGBEVF_NB_XSTATS; 3524 } 3525 3526 static int 3527 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3528 unsigned n) 3529 { 3530 struct ixgbe_hw *hw = 3531 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3532 struct ixgbe_hw_stats *hw_stats = 3533 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3534 struct ixgbe_macsec_stats *macsec_stats = 3535 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3536 dev->data->dev_private); 3537 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3538 unsigned i, stat, count = 0; 3539 3540 count = ixgbe_xstats_calc_num(); 3541 3542 if (n < count) 3543 return count; 3544 3545 total_missed_rx = 0; 3546 total_qbrc = 0; 3547 total_qprc = 0; 3548 total_qprdc = 0; 3549 3550 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3551 &total_qbrc, &total_qprc, &total_qprdc); 3552 3553 /* If this is a reset xstats is NULL, and we have cleared the 3554 * registers by reading them. 3555 */ 3556 if (!xstats) 3557 return 0; 3558 3559 /* Extended stats from ixgbe_hw_stats */ 3560 count = 0; 3561 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3562 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3563 rte_ixgbe_stats_strings[i].offset); 3564 xstats[count].id = count; 3565 count++; 3566 } 3567 3568 /* MACsec Stats */ 3569 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3570 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + 3571 rte_ixgbe_macsec_strings[i].offset); 3572 xstats[count].id = count; 3573 count++; 3574 } 3575 3576 /* RX Priority Stats */ 3577 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3578 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3579 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3580 rte_ixgbe_rxq_strings[stat].offset + 3581 (sizeof(uint64_t) * i)); 3582 xstats[count].id = count; 3583 count++; 3584 } 3585 } 3586 3587 /* TX Priority Stats */ 3588 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3589 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3590 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3591 rte_ixgbe_txq_strings[stat].offset + 3592 (sizeof(uint64_t) * i)); 3593 xstats[count].id = count; 3594 count++; 3595 } 3596 } 3597 return count; 3598 } 3599 3600 static int 3601 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 3602 uint64_t *values, unsigned int n) 3603 { 3604 if (!ids) { 3605 struct ixgbe_hw *hw = 3606 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3607 struct ixgbe_hw_stats *hw_stats = 3608 IXGBE_DEV_PRIVATE_TO_STATS( 3609 dev->data->dev_private); 3610 struct ixgbe_macsec_stats *macsec_stats = 3611 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3612 dev->data->dev_private); 3613 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3614 unsigned int i, stat, count = 0; 3615 3616 count = ixgbe_xstats_calc_num(); 3617 3618 if (!ids && n < count) 3619 return count; 3620 3621 total_missed_rx = 0; 3622 total_qbrc = 0; 3623 total_qprc = 0; 3624 total_qprdc = 0; 3625 3626 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, 3627 &total_missed_rx, &total_qbrc, &total_qprc, 3628 &total_qprdc); 3629 3630 /* If this is a reset xstats is NULL, and we have cleared the 3631 * registers by reading them. 3632 */ 3633 if (!ids && !values) 3634 return 0; 3635 3636 /* Extended stats from ixgbe_hw_stats */ 3637 count = 0; 3638 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3639 values[count] = *(uint64_t *)(((char *)hw_stats) + 3640 rte_ixgbe_stats_strings[i].offset); 3641 count++; 3642 } 3643 3644 /* MACsec Stats */ 3645 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3646 values[count] = *(uint64_t *)(((char *)macsec_stats) + 3647 rte_ixgbe_macsec_strings[i].offset); 3648 count++; 3649 } 3650 3651 /* RX Priority Stats */ 3652 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3653 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3654 values[count] = 3655 *(uint64_t *)(((char *)hw_stats) + 3656 rte_ixgbe_rxq_strings[stat].offset + 3657 (sizeof(uint64_t) * i)); 3658 count++; 3659 } 3660 } 3661 3662 /* TX Priority Stats */ 3663 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3664 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3665 values[count] = 3666 *(uint64_t *)(((char *)hw_stats) + 3667 rte_ixgbe_txq_strings[stat].offset + 3668 (sizeof(uint64_t) * i)); 3669 count++; 3670 } 3671 } 3672 return count; 3673 } 3674 3675 uint16_t i; 3676 uint16_t size = ixgbe_xstats_calc_num(); 3677 uint64_t values_copy[size]; 3678 3679 ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); 3680 3681 for (i = 0; i < n; i++) { 3682 if (ids[i] >= size) { 3683 PMD_INIT_LOG(ERR, "id value isn't valid"); 3684 return -1; 3685 } 3686 values[i] = values_copy[ids[i]]; 3687 } 3688 return n; 3689 } 3690 3691 static int 3692 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) 3693 { 3694 struct ixgbe_hw_stats *stats = 3695 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3696 struct ixgbe_macsec_stats *macsec_stats = 3697 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3698 dev->data->dev_private); 3699 3700 unsigned count = ixgbe_xstats_calc_num(); 3701 3702 /* HW registers are cleared on read */ 3703 ixgbe_dev_xstats_get(dev, NULL, count); 3704 3705 /* Reset software totals */ 3706 memset(stats, 0, sizeof(*stats)); 3707 memset(macsec_stats, 0, sizeof(*macsec_stats)); 3708 3709 return 0; 3710 } 3711 3712 static void 3713 ixgbevf_update_stats(struct rte_eth_dev *dev) 3714 { 3715 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3716 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3717 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3718 3719 /* Good Rx packet, include VF loopback */ 3720 UPDATE_VF_STAT(IXGBE_VFGPRC, 3721 hw_stats->last_vfgprc, hw_stats->vfgprc); 3722 3723 /* Good Rx octets, include VF loopback */ 3724 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 3725 hw_stats->last_vfgorc, hw_stats->vfgorc); 3726 3727 /* Good Tx packet, include VF loopback */ 3728 UPDATE_VF_STAT(IXGBE_VFGPTC, 3729 hw_stats->last_vfgptc, hw_stats->vfgptc); 3730 3731 /* Good Tx octets, include VF loopback */ 3732 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 3733 hw_stats->last_vfgotc, hw_stats->vfgotc); 3734 3735 /* Rx Multicst Packet */ 3736 UPDATE_VF_STAT(IXGBE_VFMPRC, 3737 hw_stats->last_vfmprc, hw_stats->vfmprc); 3738 } 3739 3740 static int 3741 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3742 unsigned n) 3743 { 3744 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3745 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3746 unsigned i; 3747 3748 if (n < IXGBEVF_NB_XSTATS) 3749 return IXGBEVF_NB_XSTATS; 3750 3751 ixgbevf_update_stats(dev); 3752 3753 if (!xstats) 3754 return 0; 3755 3756 /* Extended stats */ 3757 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { 3758 xstats[i].id = i; 3759 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 3760 rte_ixgbevf_stats_strings[i].offset); 3761 } 3762 3763 return IXGBEVF_NB_XSTATS; 3764 } 3765 3766 static int 3767 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3768 { 3769 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3770 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3771 3772 ixgbevf_update_stats(dev); 3773 3774 if (stats == NULL) 3775 return -EINVAL; 3776 3777 stats->ipackets = hw_stats->vfgprc; 3778 stats->ibytes = hw_stats->vfgorc; 3779 stats->opackets = hw_stats->vfgptc; 3780 stats->obytes = hw_stats->vfgotc; 3781 return 0; 3782 } 3783 3784 static int 3785 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) 3786 { 3787 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3788 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3789 3790 /* Sync HW register to the last stats */ 3791 ixgbevf_dev_stats_get(dev, NULL); 3792 3793 /* reset HW current stats*/ 3794 hw_stats->vfgprc = 0; 3795 hw_stats->vfgorc = 0; 3796 hw_stats->vfgptc = 0; 3797 hw_stats->vfgotc = 0; 3798 hw_stats->vfmprc = 0; 3799 3800 return 0; 3801 } 3802 3803 static int 3804 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 3805 { 3806 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3807 u16 eeprom_verh, eeprom_verl; 3808 u32 etrack_id; 3809 int ret; 3810 3811 ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); 3812 ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); 3813 3814 etrack_id = (eeprom_verh << 16) | eeprom_verl; 3815 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); 3816 if (ret < 0) 3817 return -EINVAL; 3818 3819 ret += 1; /* add the size of '\0' */ 3820 if (fw_size < (size_t)ret) 3821 return ret; 3822 else 3823 return 0; 3824 } 3825 3826 static int 3827 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3828 { 3829 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3830 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3831 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 3832 3833 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3834 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3835 if (RTE_ETH_DEV_SRIOV(dev).active == 0) { 3836 /* 3837 * When DCB/VT is off, maximum number of queues changes, 3838 * except for 82598EB, which remains constant. 3839 */ 3840 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE && 3841 hw->mac.type != ixgbe_mac_82598EB) 3842 dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; 3843 } 3844 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ 3845 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ 3846 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3847 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3848 dev_info->max_vfs = pci_dev->max_vfs; 3849 if (hw->mac.type == ixgbe_mac_82598EB) 3850 dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; 3851 else 3852 dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; 3853 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3854 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3855 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 3856 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3857 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3858 dev_info->rx_queue_offload_capa); 3859 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3860 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3861 3862 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3863 .rx_thresh = { 3864 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3865 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3866 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3867 }, 3868 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3869 .rx_drop_en = 0, 3870 .offloads = 0, 3871 }; 3872 3873 dev_info->default_txconf = (struct rte_eth_txconf) { 3874 .tx_thresh = { 3875 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3876 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3877 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3878 }, 3879 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3880 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3881 .offloads = 0, 3882 }; 3883 3884 dev_info->rx_desc_lim = rx_desc_lim; 3885 dev_info->tx_desc_lim = tx_desc_lim; 3886 3887 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3888 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3889 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3890 3891 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G; 3892 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3893 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 3894 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M | 3895 RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G; 3896 3897 if (hw->mac.type == ixgbe_mac_X540 || 3898 hw->mac.type == ixgbe_mac_X540_vf || 3899 hw->mac.type == ixgbe_mac_X550 || 3900 hw->mac.type == ixgbe_mac_X550_vf) { 3901 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M; 3902 } 3903 if (hw->mac.type == ixgbe_mac_X550) { 3904 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G; 3905 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G; 3906 } 3907 3908 /* Driver-preferred Rx/Tx parameters */ 3909 dev_info->default_rxportconf.burst_size = 32; 3910 dev_info->default_txportconf.burst_size = 32; 3911 dev_info->default_rxportconf.nb_queues = 1; 3912 dev_info->default_txportconf.nb_queues = 1; 3913 dev_info->default_rxportconf.ring_size = 256; 3914 dev_info->default_txportconf.ring_size = 256; 3915 3916 return 0; 3917 } 3918 3919 static const uint32_t * 3920 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 3921 { 3922 static const uint32_t ptypes[] = { 3923 /* For non-vec functions, 3924 * refers to ixgbe_rxd_pkt_info_to_pkt_type(); 3925 * for vec functions, 3926 * refers to _recv_raw_pkts_vec(). 3927 */ 3928 RTE_PTYPE_L2_ETHER, 3929 RTE_PTYPE_L3_IPV4, 3930 RTE_PTYPE_L3_IPV4_EXT, 3931 RTE_PTYPE_L3_IPV6, 3932 RTE_PTYPE_L3_IPV6_EXT, 3933 RTE_PTYPE_L4_SCTP, 3934 RTE_PTYPE_L4_TCP, 3935 RTE_PTYPE_L4_UDP, 3936 RTE_PTYPE_TUNNEL_IP, 3937 RTE_PTYPE_INNER_L3_IPV6, 3938 RTE_PTYPE_INNER_L3_IPV6_EXT, 3939 RTE_PTYPE_INNER_L4_TCP, 3940 RTE_PTYPE_INNER_L4_UDP, 3941 RTE_PTYPE_UNKNOWN 3942 }; 3943 3944 if (dev->rx_pkt_burst == ixgbe_recv_pkts || 3945 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || 3946 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || 3947 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) 3948 return ptypes; 3949 3950 #if defined(RTE_ARCH_X86) || defined(__ARM_NEON) 3951 if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || 3952 dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) 3953 return ptypes; 3954 #endif 3955 return NULL; 3956 } 3957 3958 static int 3959 ixgbevf_dev_info_get(struct rte_eth_dev *dev, 3960 struct rte_eth_dev_info *dev_info) 3961 { 3962 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3963 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3964 3965 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3966 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3967 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ 3968 dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ 3969 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3970 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3971 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3972 dev_info->max_vfs = pci_dev->max_vfs; 3973 if (hw->mac.type == ixgbe_mac_82598EB) 3974 dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; 3975 else 3976 dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; 3977 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3978 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3979 dev_info->rx_queue_offload_capa); 3980 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3981 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3982 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3983 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3984 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3985 3986 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3987 .rx_thresh = { 3988 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3989 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3990 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3991 }, 3992 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3993 .rx_drop_en = 0, 3994 .offloads = 0, 3995 }; 3996 3997 dev_info->default_txconf = (struct rte_eth_txconf) { 3998 .tx_thresh = { 3999 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 4000 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 4001 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 4002 }, 4003 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 4004 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 4005 .offloads = 0, 4006 }; 4007 4008 dev_info->rx_desc_lim = rx_desc_lim; 4009 dev_info->tx_desc_lim = tx_desc_lim; 4010 4011 return 0; 4012 } 4013 4014 static int 4015 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 4016 bool *link_up, int wait_to_complete) 4017 { 4018 struct ixgbe_adapter *adapter = container_of(hw, 4019 struct ixgbe_adapter, hw); 4020 struct ixgbe_mbx_info *mbx = &hw->mbx; 4021 struct ixgbe_mac_info *mac = &hw->mac; 4022 uint32_t links_reg, in_msg; 4023 int ret_val = 0; 4024 4025 /* If we were hit with a reset drop the link */ 4026 if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) 4027 mac->get_link_status = true; 4028 4029 if (!mac->get_link_status) 4030 goto out; 4031 4032 /* if link status is down no point in checking to see if pf is up */ 4033 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4034 if (!(links_reg & IXGBE_LINKS_UP)) 4035 goto out; 4036 4037 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs 4038 * before the link status is correct 4039 */ 4040 if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { 4041 int i; 4042 4043 for (i = 0; i < 5; i++) { 4044 rte_delay_us(100); 4045 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4046 4047 if (!(links_reg & IXGBE_LINKS_UP)) 4048 goto out; 4049 } 4050 } 4051 4052 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 4053 case IXGBE_LINKS_SPEED_10G_82599: 4054 *speed = IXGBE_LINK_SPEED_10GB_FULL; 4055 if (hw->mac.type >= ixgbe_mac_X550) { 4056 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4057 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 4058 } 4059 break; 4060 case IXGBE_LINKS_SPEED_1G_82599: 4061 *speed = IXGBE_LINK_SPEED_1GB_FULL; 4062 break; 4063 case IXGBE_LINKS_SPEED_100_82599: 4064 *speed = IXGBE_LINK_SPEED_100_FULL; 4065 if (hw->mac.type == ixgbe_mac_X550) { 4066 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4067 *speed = IXGBE_LINK_SPEED_5GB_FULL; 4068 } 4069 break; 4070 case IXGBE_LINKS_SPEED_10_X550EM_A: 4071 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4072 /* Since Reserved in older MAC's */ 4073 if (hw->mac.type >= ixgbe_mac_X550) 4074 *speed = IXGBE_LINK_SPEED_10_FULL; 4075 break; 4076 default: 4077 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4078 } 4079 4080 if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { 4081 if (*speed == IXGBE_LINK_SPEED_UNKNOWN) 4082 mac->get_link_status = true; 4083 else 4084 mac->get_link_status = false; 4085 4086 goto out; 4087 } 4088 4089 /* if the read failed it could just be a mailbox collision, best wait 4090 * until we are called again and don't report an error 4091 */ 4092 if (mbx->ops.read(hw, &in_msg, 1, 0)) 4093 goto out; 4094 4095 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { 4096 /* msg is not CTS and is NACK we must have lost CTS status */ 4097 if (in_msg & IXGBE_VT_MSGTYPE_NACK) 4098 mac->get_link_status = false; 4099 goto out; 4100 } 4101 4102 /* the pf is talking, if we timed out in the past we reinit */ 4103 if (!mbx->timeout) { 4104 ret_val = -1; 4105 goto out; 4106 } 4107 4108 /* if we passed all the tests above then the link is up and we no 4109 * longer need to check for link 4110 */ 4111 mac->get_link_status = false; 4112 4113 out: 4114 *link_up = !mac->get_link_status; 4115 return ret_val; 4116 } 4117 4118 /* 4119 * If @timeout_ms was 0, it means that it will not return until link complete. 4120 * It returns 1 on complete, return 0 on timeout. 4121 */ 4122 static int 4123 ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, uint32_t timeout_ms) 4124 { 4125 #define WARNING_TIMEOUT 9000 /* 9s in total */ 4126 struct ixgbe_adapter *ad = dev->data->dev_private; 4127 uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT; 4128 4129 while (rte_atomic32_read(&ad->link_thread_running)) { 4130 msec_delay(1); 4131 timeout--; 4132 4133 if (timeout_ms) { 4134 if (!timeout) 4135 return 0; 4136 } else if (!timeout) { 4137 /* It will not return until link complete */ 4138 timeout = WARNING_TIMEOUT; 4139 PMD_DRV_LOG(ERR, "IXGBE link thread not complete too long time!"); 4140 } 4141 } 4142 4143 return 1; 4144 } 4145 4146 static void * 4147 ixgbe_dev_setup_link_thread_handler(void *param) 4148 { 4149 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4150 struct ixgbe_adapter *ad = dev->data->dev_private; 4151 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4152 struct ixgbe_interrupt *intr = 4153 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4154 u32 speed; 4155 bool autoneg = false; 4156 4157 pthread_detach(pthread_self()); 4158 speed = hw->phy.autoneg_advertised; 4159 if (!speed) 4160 ixgbe_get_link_capabilities(hw, &speed, &autoneg); 4161 4162 ixgbe_setup_link(hw, speed, true); 4163 4164 intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 4165 rte_atomic32_clear(&ad->link_thread_running); 4166 return NULL; 4167 } 4168 4169 /* 4170 * In freebsd environment, nic_uio drivers do not support interrupts, 4171 * rte_intr_callback_register() will fail to register interrupts. 4172 * We can not make link status to change from down to up by interrupt 4173 * callback. So we need to wait for the controller to acquire link 4174 * when ports start. 4175 * It returns 0 on link up. 4176 */ 4177 static int 4178 ixgbe_wait_for_link_up(struct ixgbe_hw *hw) 4179 { 4180 #ifdef RTE_EXEC_ENV_FREEBSD 4181 int err, i; 4182 bool link_up = false; 4183 uint32_t speed = 0; 4184 const int nb_iter = 25; 4185 4186 for (i = 0; i < nb_iter; i++) { 4187 err = ixgbe_check_link(hw, &speed, &link_up, 0); 4188 if (err) 4189 return err; 4190 if (link_up) 4191 return 0; 4192 msec_delay(200); 4193 } 4194 4195 return 0; 4196 #else 4197 RTE_SET_USED(hw); 4198 return 0; 4199 #endif 4200 } 4201 4202 /* return 0 means link status changed, -1 means not changed */ 4203 int 4204 ixgbe_dev_link_update_share(struct rte_eth_dev *dev, 4205 int wait_to_complete, int vf) 4206 { 4207 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4208 struct ixgbe_adapter *ad = dev->data->dev_private; 4209 struct rte_eth_link link; 4210 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4211 struct ixgbe_interrupt *intr = 4212 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4213 bool link_up; 4214 int diag; 4215 int wait = 1; 4216 u32 esdp_reg; 4217 4218 memset(&link, 0, sizeof(link)); 4219 link.link_status = RTE_ETH_LINK_DOWN; 4220 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 4221 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 4222 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 4223 RTE_ETH_LINK_SPEED_FIXED); 4224 4225 hw->mac.get_link_status = true; 4226 4227 if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) 4228 return rte_eth_linkstatus_set(dev, &link); 4229 4230 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 4231 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 4232 wait = 0; 4233 4234 /* BSD has no interrupt mechanism, so force NIC status synchronization. */ 4235 #ifdef RTE_EXEC_ENV_FREEBSD 4236 wait = 1; 4237 #endif 4238 4239 if (vf) 4240 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); 4241 else 4242 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); 4243 4244 if (diag != 0) { 4245 link.link_speed = RTE_ETH_SPEED_NUM_100M; 4246 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 4247 return rte_eth_linkstatus_set(dev, &link); 4248 } 4249 4250 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4251 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 4252 if ((esdp_reg & IXGBE_ESDP_SDP3)) 4253 link_up = 0; 4254 } 4255 4256 if (link_up == 0) { 4257 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4258 ixgbe_dev_wait_setup_link_complete(dev, 0); 4259 if (rte_atomic32_test_and_set(&ad->link_thread_running)) { 4260 /* To avoid race condition between threads, set 4261 * the IXGBE_FLAG_NEED_LINK_CONFIG flag only 4262 * when there is no link thread running. 4263 */ 4264 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 4265 if (rte_ctrl_thread_create(&ad->link_thread_tid, 4266 "ixgbe-link-handler", 4267 NULL, 4268 ixgbe_dev_setup_link_thread_handler, 4269 dev) < 0) { 4270 PMD_DRV_LOG(ERR, 4271 "Create link thread failed!"); 4272 rte_atomic32_clear(&ad->link_thread_running); 4273 } 4274 } else { 4275 PMD_DRV_LOG(ERR, 4276 "Other link thread is running now!"); 4277 } 4278 } 4279 return rte_eth_linkstatus_set(dev, &link); 4280 } 4281 4282 link.link_status = RTE_ETH_LINK_UP; 4283 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 4284 4285 switch (link_speed) { 4286 default: 4287 case IXGBE_LINK_SPEED_UNKNOWN: 4288 link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 4289 break; 4290 4291 case IXGBE_LINK_SPEED_10_FULL: 4292 link.link_speed = RTE_ETH_SPEED_NUM_10M; 4293 break; 4294 4295 case IXGBE_LINK_SPEED_100_FULL: 4296 link.link_speed = RTE_ETH_SPEED_NUM_100M; 4297 break; 4298 4299 case IXGBE_LINK_SPEED_1GB_FULL: 4300 link.link_speed = RTE_ETH_SPEED_NUM_1G; 4301 break; 4302 4303 case IXGBE_LINK_SPEED_2_5GB_FULL: 4304 link.link_speed = RTE_ETH_SPEED_NUM_2_5G; 4305 break; 4306 4307 case IXGBE_LINK_SPEED_5GB_FULL: 4308 link.link_speed = RTE_ETH_SPEED_NUM_5G; 4309 break; 4310 4311 case IXGBE_LINK_SPEED_10GB_FULL: 4312 link.link_speed = RTE_ETH_SPEED_NUM_10G; 4313 break; 4314 } 4315 4316 return rte_eth_linkstatus_set(dev, &link); 4317 } 4318 4319 static int 4320 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4321 { 4322 return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); 4323 } 4324 4325 static int 4326 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4327 { 4328 return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); 4329 } 4330 4331 static int 4332 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 4333 { 4334 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4335 uint32_t fctrl; 4336 4337 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4338 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4339 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4340 4341 return 0; 4342 } 4343 4344 static int 4345 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 4346 { 4347 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4348 uint32_t fctrl; 4349 4350 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4351 fctrl &= (~IXGBE_FCTRL_UPE); 4352 if (dev->data->all_multicast == 1) 4353 fctrl |= IXGBE_FCTRL_MPE; 4354 else 4355 fctrl &= (~IXGBE_FCTRL_MPE); 4356 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4357 4358 return 0; 4359 } 4360 4361 static int 4362 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 4363 { 4364 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4365 uint32_t fctrl; 4366 4367 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4368 fctrl |= IXGBE_FCTRL_MPE; 4369 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4370 4371 return 0; 4372 } 4373 4374 static int 4375 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 4376 { 4377 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4378 uint32_t fctrl; 4379 4380 if (dev->data->promiscuous == 1) 4381 return 0; /* must remain in all_multicast mode */ 4382 4383 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4384 fctrl &= (~IXGBE_FCTRL_MPE); 4385 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4386 4387 return 0; 4388 } 4389 4390 /** 4391 * It clears the interrupt causes and enables the interrupt. 4392 * It will be called once only during nic initialized. 4393 * 4394 * @param dev 4395 * Pointer to struct rte_eth_dev. 4396 * @param on 4397 * Enable or Disable. 4398 * 4399 * @return 4400 * - On success, zero. 4401 * - On failure, a negative value. 4402 */ 4403 static int 4404 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 4405 { 4406 struct ixgbe_interrupt *intr = 4407 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4408 4409 ixgbe_dev_link_status_print(dev); 4410 if (on) 4411 intr->mask |= IXGBE_EICR_LSC; 4412 else 4413 intr->mask &= ~IXGBE_EICR_LSC; 4414 4415 return 0; 4416 } 4417 4418 /** 4419 * It clears the interrupt causes and enables the interrupt. 4420 * It will be called once only during nic initialized. 4421 * 4422 * @param dev 4423 * Pointer to struct rte_eth_dev. 4424 * 4425 * @return 4426 * - On success, zero. 4427 * - On failure, a negative value. 4428 */ 4429 static int 4430 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 4431 { 4432 struct ixgbe_interrupt *intr = 4433 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4434 4435 intr->mask |= IXGBE_EICR_RTX_QUEUE; 4436 4437 return 0; 4438 } 4439 4440 /** 4441 * It clears the interrupt causes and enables the interrupt. 4442 * It will be called once only during nic initialized. 4443 * 4444 * @param dev 4445 * Pointer to struct rte_eth_dev. 4446 * 4447 * @return 4448 * - On success, zero. 4449 * - On failure, a negative value. 4450 */ 4451 static int 4452 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 4453 { 4454 struct ixgbe_interrupt *intr = 4455 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4456 4457 intr->mask |= IXGBE_EICR_LINKSEC; 4458 4459 return 0; 4460 } 4461 4462 /* 4463 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. 4464 * 4465 * @param dev 4466 * Pointer to struct rte_eth_dev. 4467 * 4468 * @return 4469 * - On success, zero. 4470 * - On failure, a negative value. 4471 */ 4472 static int 4473 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 4474 { 4475 uint32_t eicr; 4476 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4477 struct ixgbe_interrupt *intr = 4478 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4479 4480 /* clear all cause mask */ 4481 ixgbe_disable_intr(hw); 4482 4483 /* read-on-clear nic registers here */ 4484 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4485 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 4486 4487 intr->flags = 0; 4488 4489 /* set flag for async link update */ 4490 if (eicr & IXGBE_EICR_LSC) 4491 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4492 4493 if (eicr & IXGBE_EICR_MAILBOX) 4494 intr->flags |= IXGBE_FLAG_MAILBOX; 4495 4496 if (eicr & IXGBE_EICR_LINKSEC) 4497 intr->flags |= IXGBE_FLAG_MACSEC; 4498 4499 if (hw->mac.type == ixgbe_mac_X550EM_x && 4500 hw->phy.type == ixgbe_phy_x550em_ext_t && 4501 (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) 4502 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; 4503 4504 return 0; 4505 } 4506 4507 /** 4508 * It gets and then prints the link status. 4509 * 4510 * @param dev 4511 * Pointer to struct rte_eth_dev. 4512 * 4513 * @return 4514 * - On success, zero. 4515 * - On failure, a negative value. 4516 */ 4517 static void 4518 ixgbe_dev_link_status_print(struct rte_eth_dev *dev) 4519 { 4520 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4521 struct rte_eth_link link; 4522 4523 rte_eth_linkstatus_get(dev, &link); 4524 4525 if (link.link_status) { 4526 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 4527 (int)(dev->data->port_id), 4528 (unsigned)link.link_speed, 4529 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? 4530 "full-duplex" : "half-duplex"); 4531 } else { 4532 PMD_INIT_LOG(INFO, " Port %d: Link Down", 4533 (int)(dev->data->port_id)); 4534 } 4535 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 4536 pci_dev->addr.domain, 4537 pci_dev->addr.bus, 4538 pci_dev->addr.devid, 4539 pci_dev->addr.function); 4540 } 4541 4542 /* 4543 * It executes link_update after knowing an interrupt occurred. 4544 * 4545 * @param dev 4546 * Pointer to struct rte_eth_dev. 4547 * 4548 * @return 4549 * - On success, zero. 4550 * - On failure, a negative value. 4551 */ 4552 static int 4553 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) 4554 { 4555 struct ixgbe_interrupt *intr = 4556 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4557 int64_t timeout; 4558 struct ixgbe_hw *hw = 4559 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4560 4561 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 4562 4563 if (intr->flags & IXGBE_FLAG_MAILBOX) { 4564 ixgbe_pf_mbx_process(dev); 4565 intr->flags &= ~IXGBE_FLAG_MAILBOX; 4566 } 4567 4568 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4569 ixgbe_handle_lasi(hw); 4570 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4571 } 4572 4573 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4574 struct rte_eth_link link; 4575 4576 /* get the link status before link update, for predicting later */ 4577 rte_eth_linkstatus_get(dev, &link); 4578 4579 ixgbe_dev_link_update(dev, 0); 4580 4581 /* likely to up */ 4582 if (!link.link_status) 4583 /* handle it 1 sec later, wait it being stable */ 4584 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; 4585 /* likely to down */ 4586 else 4587 /* handle it 4 sec later, wait it being stable */ 4588 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; 4589 4590 ixgbe_dev_link_status_print(dev); 4591 if (rte_eal_alarm_set(timeout * 1000, 4592 ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) 4593 PMD_DRV_LOG(ERR, "Error setting alarm"); 4594 else { 4595 /* remember original mask */ 4596 intr->mask_original = intr->mask; 4597 /* only disable lsc interrupt */ 4598 intr->mask &= ~IXGBE_EIMS_LSC; 4599 } 4600 } 4601 4602 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 4603 ixgbe_enable_intr(dev); 4604 4605 return 0; 4606 } 4607 4608 /** 4609 * Interrupt handler which shall be registered for alarm callback for delayed 4610 * handling specific interrupt to wait for the stable nic state. As the 4611 * NIC interrupt state is not stable for ixgbe after link is just down, 4612 * it needs to wait 4 seconds to get the stable status. 4613 * 4614 * @param handle 4615 * Pointer to interrupt handle. 4616 * @param param 4617 * The address of parameter (struct rte_eth_dev *) registered before. 4618 * 4619 * @return 4620 * void 4621 */ 4622 static void 4623 ixgbe_dev_interrupt_delayed_handler(void *param) 4624 { 4625 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4626 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4627 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 4628 struct ixgbe_interrupt *intr = 4629 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4630 struct ixgbe_hw *hw = 4631 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4632 uint32_t eicr; 4633 4634 ixgbe_disable_intr(hw); 4635 4636 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4637 if (eicr & IXGBE_EICR_MAILBOX) 4638 ixgbe_pf_mbx_process(dev); 4639 4640 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4641 ixgbe_handle_lasi(hw); 4642 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4643 } 4644 4645 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4646 ixgbe_dev_link_update(dev, 0); 4647 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 4648 ixgbe_dev_link_status_print(dev); 4649 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 4650 } 4651 4652 if (intr->flags & IXGBE_FLAG_MACSEC) { 4653 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, NULL); 4654 intr->flags &= ~IXGBE_FLAG_MACSEC; 4655 } 4656 4657 /* restore original mask */ 4658 intr->mask = intr->mask_original; 4659 intr->mask_original = 0; 4660 4661 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 4662 ixgbe_enable_intr(dev); 4663 rte_intr_ack(intr_handle); 4664 } 4665 4666 /** 4667 * Interrupt handler triggered by NIC for handling 4668 * specific interrupt. 4669 * 4670 * @param handle 4671 * Pointer to interrupt handle. 4672 * @param param 4673 * The address of parameter (struct rte_eth_dev *) registered before. 4674 * 4675 * @return 4676 * void 4677 */ 4678 static void 4679 ixgbe_dev_interrupt_handler(void *param) 4680 { 4681 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4682 4683 ixgbe_dev_interrupt_get_status(dev); 4684 ixgbe_dev_interrupt_action(dev); 4685 } 4686 4687 static int 4688 ixgbe_dev_led_on(struct rte_eth_dev *dev) 4689 { 4690 struct ixgbe_hw *hw; 4691 4692 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4693 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4694 } 4695 4696 static int 4697 ixgbe_dev_led_off(struct rte_eth_dev *dev) 4698 { 4699 struct ixgbe_hw *hw; 4700 4701 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4702 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4703 } 4704 4705 static int 4706 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4707 { 4708 struct ixgbe_hw *hw; 4709 uint32_t mflcn_reg; 4710 uint32_t fccfg_reg; 4711 int rx_pause; 4712 int tx_pause; 4713 4714 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4715 4716 fc_conf->pause_time = hw->fc.pause_time; 4717 fc_conf->high_water = hw->fc.high_water[0]; 4718 fc_conf->low_water = hw->fc.low_water[0]; 4719 fc_conf->send_xon = hw->fc.send_xon; 4720 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 4721 4722 /* 4723 * Return rx_pause status according to actual setting of 4724 * MFLCN register. 4725 */ 4726 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4727 if (mflcn_reg & IXGBE_MFLCN_PMCF) 4728 fc_conf->mac_ctrl_frame_fwd = 1; 4729 else 4730 fc_conf->mac_ctrl_frame_fwd = 0; 4731 4732 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) 4733 rx_pause = 1; 4734 else 4735 rx_pause = 0; 4736 4737 /* 4738 * Return tx_pause status according to actual setting of 4739 * FCCFG register. 4740 */ 4741 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4742 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) 4743 tx_pause = 1; 4744 else 4745 tx_pause = 0; 4746 4747 if (rx_pause && tx_pause) 4748 fc_conf->mode = RTE_ETH_FC_FULL; 4749 else if (rx_pause) 4750 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 4751 else if (tx_pause) 4752 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 4753 else 4754 fc_conf->mode = RTE_ETH_FC_NONE; 4755 4756 return 0; 4757 } 4758 4759 static int 4760 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4761 { 4762 struct ixgbe_hw *hw; 4763 struct ixgbe_adapter *adapter = dev->data->dev_private; 4764 int err; 4765 uint32_t rx_buf_size; 4766 uint32_t max_high_water; 4767 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4768 ixgbe_fc_none, 4769 ixgbe_fc_rx_pause, 4770 ixgbe_fc_tx_pause, 4771 ixgbe_fc_full 4772 }; 4773 4774 PMD_INIT_FUNC_TRACE(); 4775 4776 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4777 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); 4778 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4779 4780 /* 4781 * At least reserve one Ethernet frame for watermark 4782 * high_water/low_water in kilo bytes for ixgbe 4783 */ 4784 max_high_water = (rx_buf_size - 4785 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4786 if ((fc_conf->high_water > max_high_water) || 4787 (fc_conf->high_water < fc_conf->low_water)) { 4788 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4789 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4790 return -EINVAL; 4791 } 4792 4793 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; 4794 hw->fc.pause_time = fc_conf->pause_time; 4795 hw->fc.high_water[0] = fc_conf->high_water; 4796 hw->fc.low_water[0] = fc_conf->low_water; 4797 hw->fc.send_xon = fc_conf->send_xon; 4798 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 4799 adapter->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 4800 4801 err = ixgbe_flow_ctrl_enable(dev, hw); 4802 if (err < 0) { 4803 PMD_INIT_LOG(ERR, "ixgbe_flow_ctrl_enable = 0x%x", err); 4804 return -EIO; 4805 } 4806 return err; 4807 } 4808 4809 /** 4810 * ixgbe_pfc_enable_generic - Enable flow control 4811 * @hw: pointer to hardware structure 4812 * @tc_num: traffic class number 4813 * Enable flow control according to the current settings. 4814 */ 4815 static int 4816 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) 4817 { 4818 int ret_val = 0; 4819 uint32_t mflcn_reg, fccfg_reg; 4820 uint32_t reg; 4821 uint32_t fcrtl, fcrth; 4822 uint8_t i; 4823 uint8_t nb_rx_en; 4824 4825 /* Validate the water mark configuration */ 4826 if (!hw->fc.pause_time) { 4827 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4828 goto out; 4829 } 4830 4831 /* Low water mark of zero causes XOFF floods */ 4832 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 4833 /* High/Low water can not be 0 */ 4834 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { 4835 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4836 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4837 goto out; 4838 } 4839 4840 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { 4841 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4842 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4843 goto out; 4844 } 4845 } 4846 /* Negotiate the fc mode to use */ 4847 ixgbe_fc_autoneg(hw); 4848 4849 /* Disable any previous flow control settings */ 4850 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4851 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); 4852 4853 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4854 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 4855 4856 switch (hw->fc.current_mode) { 4857 case ixgbe_fc_none: 4858 /* 4859 * If the count of enabled RX Priority Flow control >1, 4860 * and the TX pause can not be disabled 4861 */ 4862 nb_rx_en = 0; 4863 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4864 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4865 if (reg & IXGBE_FCRTH_FCEN) 4866 nb_rx_en++; 4867 } 4868 if (nb_rx_en > 1) 4869 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4870 break; 4871 case ixgbe_fc_rx_pause: 4872 /* 4873 * Rx Flow control is enabled and Tx Flow control is 4874 * disabled by software override. Since there really 4875 * isn't a way to advertise that we are capable of RX 4876 * Pause ONLY, we will advertise that we support both 4877 * symmetric and asymmetric Rx PAUSE. Later, we will 4878 * disable the adapter's ability to send PAUSE frames. 4879 */ 4880 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4881 /* 4882 * If the count of enabled RX Priority Flow control >1, 4883 * and the TX pause can not be disabled 4884 */ 4885 nb_rx_en = 0; 4886 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4887 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4888 if (reg & IXGBE_FCRTH_FCEN) 4889 nb_rx_en++; 4890 } 4891 if (nb_rx_en > 1) 4892 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4893 break; 4894 case ixgbe_fc_tx_pause: 4895 /* 4896 * Tx Flow control is enabled, and Rx Flow control is 4897 * disabled by software override. 4898 */ 4899 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4900 break; 4901 case ixgbe_fc_full: 4902 /* Flow control (both Rx and Tx) is enabled by SW override. */ 4903 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4904 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4905 break; 4906 default: 4907 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); 4908 ret_val = IXGBE_ERR_CONFIG; 4909 goto out; 4910 } 4911 4912 /* Set 802.3x based flow control settings. */ 4913 mflcn_reg |= IXGBE_MFLCN_DPF; 4914 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 4915 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 4916 4917 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 4918 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 4919 hw->fc.high_water[tc_num]) { 4920 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; 4921 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); 4922 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; 4923 } else { 4924 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); 4925 /* 4926 * In order to prevent Tx hangs when the internal Tx 4927 * switch is enabled we must set the high water mark 4928 * to the maximum FCRTH value. This allows the Tx 4929 * switch to function even under heavy Rx workloads. 4930 */ 4931 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; 4932 } 4933 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); 4934 4935 /* Configure pause time (2 TCs per register) */ 4936 reg = hw->fc.pause_time * 0x00010001; 4937 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 4938 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 4939 4940 /* Configure flow control refresh threshold value */ 4941 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 4942 4943 out: 4944 return ret_val; 4945 } 4946 4947 static int 4948 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) 4949 { 4950 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4951 int32_t ret_val = IXGBE_NOT_IMPLEMENTED; 4952 4953 if (hw->mac.type != ixgbe_mac_82598EB) { 4954 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); 4955 } 4956 return ret_val; 4957 } 4958 4959 static int 4960 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) 4961 { 4962 int err; 4963 uint32_t rx_buf_size; 4964 uint32_t max_high_water; 4965 uint8_t tc_num; 4966 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; 4967 struct ixgbe_hw *hw = 4968 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4969 struct ixgbe_dcb_config *dcb_config = 4970 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 4971 4972 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4973 ixgbe_fc_none, 4974 ixgbe_fc_rx_pause, 4975 ixgbe_fc_tx_pause, 4976 ixgbe_fc_full 4977 }; 4978 4979 PMD_INIT_FUNC_TRACE(); 4980 4981 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); 4982 tc_num = map[pfc_conf->priority]; 4983 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); 4984 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4985 /* 4986 * At least reserve one Ethernet frame for watermark 4987 * high_water/low_water in kilo bytes for ixgbe 4988 */ 4989 max_high_water = (rx_buf_size - 4990 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4991 if ((pfc_conf->fc.high_water > max_high_water) || 4992 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { 4993 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4994 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4995 return -EINVAL; 4996 } 4997 4998 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; 4999 hw->fc.pause_time = pfc_conf->fc.pause_time; 5000 hw->fc.send_xon = pfc_conf->fc.send_xon; 5001 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 5002 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 5003 5004 err = ixgbe_dcb_pfc_enable(dev, tc_num); 5005 5006 /* Not negotiated is not an error case */ 5007 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 5008 return 0; 5009 5010 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); 5011 return -EIO; 5012 } 5013 5014 static int 5015 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 5016 struct rte_eth_rss_reta_entry64 *reta_conf, 5017 uint16_t reta_size) 5018 { 5019 uint16_t i, sp_reta_size; 5020 uint8_t j, mask; 5021 uint32_t reta, r; 5022 uint16_t idx, shift; 5023 struct ixgbe_adapter *adapter = dev->data->dev_private; 5024 struct rte_eth_dev_data *dev_data = dev->data; 5025 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5026 uint32_t reta_reg; 5027 5028 PMD_INIT_FUNC_TRACE(); 5029 5030 if (!dev_data->dev_started) { 5031 PMD_DRV_LOG(ERR, 5032 "port %d must be started before rss reta update", 5033 dev_data->port_id); 5034 return -EIO; 5035 } 5036 5037 if (!ixgbe_rss_update_sp(hw->mac.type)) { 5038 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 5039 "NIC."); 5040 return -ENOTSUP; 5041 } 5042 5043 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5044 if (reta_size != sp_reta_size) { 5045 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5046 "(%d) doesn't match the number hardware can supported " 5047 "(%d)", reta_size, sp_reta_size); 5048 return -EINVAL; 5049 } 5050 5051 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5052 idx = i / RTE_ETH_RETA_GROUP_SIZE; 5053 shift = i % RTE_ETH_RETA_GROUP_SIZE; 5054 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5055 IXGBE_4_BIT_MASK); 5056 if (!mask) 5057 continue; 5058 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5059 if (mask == IXGBE_4_BIT_MASK) 5060 r = 0; 5061 else 5062 r = IXGBE_READ_REG(hw, reta_reg); 5063 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5064 if (mask & (0x1 << j)) 5065 reta |= reta_conf[idx].reta[shift + j] << 5066 (CHAR_BIT * j); 5067 else 5068 reta |= r & (IXGBE_8_BIT_MASK << 5069 (CHAR_BIT * j)); 5070 } 5071 IXGBE_WRITE_REG(hw, reta_reg, reta); 5072 } 5073 adapter->rss_reta_updated = 1; 5074 5075 return 0; 5076 } 5077 5078 static int 5079 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 5080 struct rte_eth_rss_reta_entry64 *reta_conf, 5081 uint16_t reta_size) 5082 { 5083 uint16_t i, sp_reta_size; 5084 uint8_t j, mask; 5085 uint32_t reta; 5086 uint16_t idx, shift; 5087 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5088 uint32_t reta_reg; 5089 5090 PMD_INIT_FUNC_TRACE(); 5091 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5092 if (reta_size != sp_reta_size) { 5093 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5094 "(%d) doesn't match the number hardware can supported " 5095 "(%d)", reta_size, sp_reta_size); 5096 return -EINVAL; 5097 } 5098 5099 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5100 idx = i / RTE_ETH_RETA_GROUP_SIZE; 5101 shift = i % RTE_ETH_RETA_GROUP_SIZE; 5102 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5103 IXGBE_4_BIT_MASK); 5104 if (!mask) 5105 continue; 5106 5107 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5108 reta = IXGBE_READ_REG(hw, reta_reg); 5109 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5110 if (mask & (0x1 << j)) 5111 reta_conf[idx].reta[shift + j] = 5112 ((reta >> (CHAR_BIT * j)) & 5113 IXGBE_8_BIT_MASK); 5114 } 5115 } 5116 5117 return 0; 5118 } 5119 5120 static int 5121 ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 5122 uint32_t index, uint32_t pool) 5123 { 5124 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5125 uint32_t enable_addr = 1; 5126 5127 return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, 5128 pool, enable_addr); 5129 } 5130 5131 static void 5132 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 5133 { 5134 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5135 5136 ixgbe_clear_rar(hw, index); 5137 } 5138 5139 static int 5140 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 5141 { 5142 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5143 5144 ixgbe_remove_rar(dev, 0); 5145 ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 5146 5147 return 0; 5148 } 5149 5150 static bool 5151 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 5152 { 5153 if (strcmp(dev->device->driver->name, drv->driver.name)) 5154 return false; 5155 5156 return true; 5157 } 5158 5159 bool 5160 is_ixgbe_supported(struct rte_eth_dev *dev) 5161 { 5162 return is_device_supported(dev, &rte_ixgbe_pmd); 5163 } 5164 5165 static int 5166 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 5167 { 5168 uint32_t hlreg0; 5169 uint32_t maxfrs; 5170 struct ixgbe_hw *hw; 5171 struct rte_eth_dev_info dev_info; 5172 uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; 5173 int ret; 5174 5175 ret = ixgbe_dev_info_get(dev, &dev_info); 5176 if (ret != 0) 5177 return ret; 5178 5179 /* check that mtu is within the allowed range */ 5180 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 5181 return -EINVAL; 5182 5183 /* If device is started, refuse mtu that requires the support of 5184 * scattered packets when this feature has not been enabled before. 5185 */ 5186 if (dev->data->dev_started && !dev->data->scattered_rx && 5187 frame_size + 2 * RTE_VLAN_HLEN > 5188 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 5189 PMD_INIT_LOG(ERR, "Stop port first."); 5190 return -EINVAL; 5191 } 5192 5193 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5194 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 5195 5196 /* switch to jumbo mode if needed */ 5197 if (mtu > RTE_ETHER_MTU) 5198 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 5199 else 5200 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 5201 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 5202 5203 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 5204 maxfrs &= 0x0000FFFF; 5205 maxfrs |= (frame_size << 16); 5206 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); 5207 5208 return 0; 5209 } 5210 5211 /* 5212 * Virtual Function operations 5213 */ 5214 static void 5215 ixgbevf_intr_disable(struct rte_eth_dev *dev) 5216 { 5217 struct ixgbe_interrupt *intr = 5218 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5219 struct ixgbe_hw *hw = 5220 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5221 5222 PMD_INIT_FUNC_TRACE(); 5223 5224 /* Clear interrupt mask to stop from interrupts being generated */ 5225 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 5226 5227 IXGBE_WRITE_FLUSH(hw); 5228 5229 /* Clear mask value. */ 5230 intr->mask = 0; 5231 } 5232 5233 static void 5234 ixgbevf_intr_enable(struct rte_eth_dev *dev) 5235 { 5236 struct ixgbe_interrupt *intr = 5237 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5238 struct ixgbe_hw *hw = 5239 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5240 5241 PMD_INIT_FUNC_TRACE(); 5242 5243 /* VF enable interrupt autoclean */ 5244 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); 5245 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); 5246 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); 5247 5248 IXGBE_WRITE_FLUSH(hw); 5249 5250 /* Save IXGBE_VTEIMS value to mask. */ 5251 intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; 5252 } 5253 5254 static int 5255 ixgbevf_dev_configure(struct rte_eth_dev *dev) 5256 { 5257 struct rte_eth_conf *conf = &dev->data->dev_conf; 5258 struct ixgbe_adapter *adapter = dev->data->dev_private; 5259 5260 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 5261 dev->data->port_id); 5262 5263 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 5264 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 5265 5266 /* 5267 * VF has no ability to enable/disable HW CRC 5268 * Keep the persistent behavior the same as Host PF 5269 */ 5270 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC 5271 if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) { 5272 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 5273 conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC; 5274 } 5275 #else 5276 if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) { 5277 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 5278 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; 5279 } 5280 #endif 5281 5282 /* 5283 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 5284 * allocation or vector Rx preconditions we will reset it. 5285 */ 5286 adapter->rx_bulk_alloc_allowed = true; 5287 adapter->rx_vec_allowed = true; 5288 5289 return 0; 5290 } 5291 5292 static int 5293 ixgbevf_dev_start(struct rte_eth_dev *dev) 5294 { 5295 struct ixgbe_hw *hw = 5296 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5297 uint32_t intr_vector = 0; 5298 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5299 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5300 5301 int err, mask = 0; 5302 5303 PMD_INIT_FUNC_TRACE(); 5304 5305 /* Stop the link setup handler before resetting the HW. */ 5306 ixgbe_dev_wait_setup_link_complete(dev, 0); 5307 5308 err = hw->mac.ops.reset_hw(hw); 5309 5310 /** 5311 * In this case, reuses the MAC address assigned by VF 5312 * initialization. 5313 */ 5314 if (err != IXGBE_SUCCESS && err != IXGBE_ERR_INVALID_MAC_ADDR) { 5315 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); 5316 return err; 5317 } 5318 5319 hw->mac.get_link_status = true; 5320 5321 /* negotiate mailbox API version to use with the PF. */ 5322 ixgbevf_negotiate_api(hw); 5323 5324 ixgbevf_dev_tx_init(dev); 5325 5326 /* This can fail when allocating mbufs for descriptor rings */ 5327 err = ixgbevf_dev_rx_init(dev); 5328 if (err) { 5329 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 5330 ixgbe_dev_clear_queues(dev); 5331 return err; 5332 } 5333 5334 /* Set vfta */ 5335 ixgbevf_set_vfta_all(dev, 1); 5336 5337 /* Set HW strip */ 5338 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 5339 RTE_ETH_VLAN_EXTEND_MASK; 5340 err = ixgbevf_vlan_offload_config(dev, mask); 5341 if (err) { 5342 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); 5343 ixgbe_dev_clear_queues(dev); 5344 return err; 5345 } 5346 5347 ixgbevf_dev_rxtx_start(dev); 5348 5349 /* check and configure queue intr-vector mapping */ 5350 if (rte_intr_cap_multiple(intr_handle) && 5351 dev->data->dev_conf.intr_conf.rxq) { 5352 /* According to datasheet, only vector 0/1/2 can be used, 5353 * now only one vector is used for Rx queue 5354 */ 5355 intr_vector = 1; 5356 if (rte_intr_efd_enable(intr_handle, intr_vector)) { 5357 ixgbe_dev_clear_queues(dev); 5358 return -1; 5359 } 5360 } 5361 5362 if (rte_intr_dp_is_en(intr_handle)) { 5363 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 5364 dev->data->nb_rx_queues)) { 5365 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 5366 " intr_vec", dev->data->nb_rx_queues); 5367 ixgbe_dev_clear_queues(dev); 5368 return -ENOMEM; 5369 } 5370 } 5371 ixgbevf_configure_msix(dev); 5372 5373 /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt 5374 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). 5375 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) 5376 * is not cleared, it will fail when following rte_intr_enable( ) tries 5377 * to map Rx queue interrupt to other VFIO vectors. 5378 * So clear uio/vfio intr/evevnfd first to avoid failure. 5379 */ 5380 rte_intr_disable(intr_handle); 5381 5382 rte_intr_enable(intr_handle); 5383 5384 /* Re-enable interrupt for VF */ 5385 ixgbevf_intr_enable(dev); 5386 5387 /* 5388 * Update link status right before return, because it may 5389 * start link configuration process in a separate thread. 5390 */ 5391 ixgbevf_dev_link_update(dev, 0); 5392 5393 hw->adapter_stopped = false; 5394 5395 return 0; 5396 } 5397 5398 static int 5399 ixgbevf_dev_stop(struct rte_eth_dev *dev) 5400 { 5401 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5402 struct ixgbe_adapter *adapter = dev->data->dev_private; 5403 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5404 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5405 5406 if (hw->adapter_stopped) 5407 return 0; 5408 5409 PMD_INIT_FUNC_TRACE(); 5410 5411 ixgbe_dev_wait_setup_link_complete(dev, 0); 5412 5413 ixgbevf_intr_disable(dev); 5414 5415 dev->data->dev_started = 0; 5416 hw->adapter_stopped = 1; 5417 ixgbe_stop_adapter(hw); 5418 5419 /* 5420 * Clear what we set, but we still keep shadow_vfta to 5421 * restore after device starts 5422 */ 5423 ixgbevf_set_vfta_all(dev, 0); 5424 5425 /* Clear stored conf */ 5426 dev->data->scattered_rx = 0; 5427 5428 ixgbe_dev_clear_queues(dev); 5429 5430 /* Clean datapath event and queue/vec mapping */ 5431 rte_intr_efd_disable(intr_handle); 5432 rte_intr_vec_list_free(intr_handle); 5433 5434 adapter->rss_reta_updated = 0; 5435 5436 return 0; 5437 } 5438 5439 static int 5440 ixgbevf_dev_close(struct rte_eth_dev *dev) 5441 { 5442 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5443 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5444 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5445 int ret; 5446 5447 PMD_INIT_FUNC_TRACE(); 5448 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5449 return 0; 5450 5451 ixgbe_reset_hw(hw); 5452 5453 ret = ixgbevf_dev_stop(dev); 5454 5455 ixgbe_dev_free_queues(dev); 5456 5457 /** 5458 * Remove the VF MAC address ro ensure 5459 * that the VF traffic goes to the PF 5460 * after stop, close and detach of the VF 5461 **/ 5462 ixgbevf_remove_mac_addr(dev, 0); 5463 5464 rte_intr_disable(intr_handle); 5465 rte_intr_callback_unregister(intr_handle, 5466 ixgbevf_dev_interrupt_handler, dev); 5467 5468 return ret; 5469 } 5470 5471 /* 5472 * Reset VF device 5473 */ 5474 static int 5475 ixgbevf_dev_reset(struct rte_eth_dev *dev) 5476 { 5477 int ret; 5478 5479 ret = eth_ixgbevf_dev_uninit(dev); 5480 if (ret) 5481 return ret; 5482 5483 ret = eth_ixgbevf_dev_init(dev); 5484 5485 return ret; 5486 } 5487 5488 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 5489 { 5490 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5491 struct ixgbe_vfta *shadow_vfta = 5492 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5493 int i = 0, j = 0, vfta = 0, mask = 1; 5494 5495 for (i = 0; i < IXGBE_VFTA_SIZE; i++) { 5496 vfta = shadow_vfta->vfta[i]; 5497 if (vfta) { 5498 mask = 1; 5499 for (j = 0; j < 32; j++) { 5500 if (vfta & mask) 5501 ixgbe_set_vfta(hw, (i<<5)+j, 0, 5502 on, false); 5503 mask <<= 1; 5504 } 5505 } 5506 } 5507 5508 } 5509 5510 static int 5511 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 5512 { 5513 struct ixgbe_hw *hw = 5514 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5515 struct ixgbe_vfta *shadow_vfta = 5516 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5517 uint32_t vid_idx = 0; 5518 uint32_t vid_bit = 0; 5519 int ret = 0; 5520 5521 PMD_INIT_FUNC_TRACE(); 5522 5523 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ 5524 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); 5525 if (ret) { 5526 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 5527 return ret; 5528 } 5529 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 5530 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 5531 5532 /* Save what we set and retore it after device reset */ 5533 if (on) 5534 shadow_vfta->vfta[vid_idx] |= vid_bit; 5535 else 5536 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 5537 5538 return 0; 5539 } 5540 5541 static void 5542 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 5543 { 5544 struct ixgbe_hw *hw = 5545 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5546 uint32_t ctrl; 5547 5548 PMD_INIT_FUNC_TRACE(); 5549 5550 if (queue >= hw->mac.max_rx_queues) 5551 return; 5552 5553 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 5554 if (on) 5555 ctrl |= IXGBE_RXDCTL_VME; 5556 else 5557 ctrl &= ~IXGBE_RXDCTL_VME; 5558 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 5559 5560 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); 5561 } 5562 5563 static int 5564 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 5565 { 5566 struct ixgbe_rx_queue *rxq; 5567 uint16_t i; 5568 int on = 0; 5569 5570 /* VF function only support hw strip feature, others are not support */ 5571 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 5572 for (i = 0; i < dev->data->nb_rx_queues; i++) { 5573 rxq = dev->data->rx_queues[i]; 5574 on = !!(rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 5575 ixgbevf_vlan_strip_queue_set(dev, i, on); 5576 } 5577 } 5578 5579 return 0; 5580 } 5581 5582 static int 5583 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 5584 { 5585 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 5586 5587 ixgbevf_vlan_offload_config(dev, mask); 5588 5589 return 0; 5590 } 5591 5592 int 5593 ixgbe_vt_check(struct ixgbe_hw *hw) 5594 { 5595 uint32_t reg_val; 5596 5597 /* if Virtualization Technology is enabled */ 5598 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 5599 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { 5600 PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); 5601 return -1; 5602 } 5603 5604 return 0; 5605 } 5606 5607 static uint32_t 5608 ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) 5609 { 5610 uint32_t vector = 0; 5611 5612 switch (hw->mac.mc_filter_type) { 5613 case 0: /* use bits [47:36] of the address */ 5614 vector = ((uc_addr->addr_bytes[4] >> 4) | 5615 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 5616 break; 5617 case 1: /* use bits [46:35] of the address */ 5618 vector = ((uc_addr->addr_bytes[4] >> 3) | 5619 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 5620 break; 5621 case 2: /* use bits [45:34] of the address */ 5622 vector = ((uc_addr->addr_bytes[4] >> 2) | 5623 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 5624 break; 5625 case 3: /* use bits [43:32] of the address */ 5626 vector = ((uc_addr->addr_bytes[4]) | 5627 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 5628 break; 5629 default: /* Invalid mc_filter_type */ 5630 break; 5631 } 5632 5633 /* vector can only be 12-bits or boundary will be exceeded */ 5634 vector &= 0xFFF; 5635 return vector; 5636 } 5637 5638 static int 5639 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, 5640 struct rte_ether_addr *mac_addr, uint8_t on) 5641 { 5642 uint32_t vector; 5643 uint32_t uta_idx; 5644 uint32_t reg_val; 5645 uint32_t uta_shift; 5646 uint32_t rc; 5647 const uint32_t ixgbe_uta_idx_mask = 0x7F; 5648 const uint32_t ixgbe_uta_bit_shift = 5; 5649 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; 5650 const uint32_t bit1 = 0x1; 5651 5652 struct ixgbe_hw *hw = 5653 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5654 struct ixgbe_uta_info *uta_info = 5655 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5656 5657 /* The UTA table only exists on 82599 hardware and newer */ 5658 if (hw->mac.type < ixgbe_mac_82599EB) 5659 return -ENOTSUP; 5660 5661 vector = ixgbe_uta_vector(hw, mac_addr); 5662 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; 5663 uta_shift = vector & ixgbe_uta_bit_mask; 5664 5665 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); 5666 if (rc == on) 5667 return 0; 5668 5669 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); 5670 if (on) { 5671 uta_info->uta_in_use++; 5672 reg_val |= (bit1 << uta_shift); 5673 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); 5674 } else { 5675 uta_info->uta_in_use--; 5676 reg_val &= ~(bit1 << uta_shift); 5677 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); 5678 } 5679 5680 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); 5681 5682 if (uta_info->uta_in_use > 0) 5683 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 5684 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 5685 else 5686 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 5687 5688 return 0; 5689 } 5690 5691 static int 5692 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 5693 { 5694 int i; 5695 struct ixgbe_hw *hw = 5696 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5697 struct ixgbe_uta_info *uta_info = 5698 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5699 5700 /* The UTA table only exists on 82599 hardware and newer */ 5701 if (hw->mac.type < ixgbe_mac_82599EB) 5702 return -ENOTSUP; 5703 5704 if (on) { 5705 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5706 uta_info->uta_shadow[i] = ~0; 5707 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); 5708 } 5709 } else { 5710 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5711 uta_info->uta_shadow[i] = 0; 5712 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 5713 } 5714 } 5715 return 0; 5716 5717 } 5718 5719 uint32_t 5720 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 5721 { 5722 uint32_t new_val = orig_val; 5723 5724 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) 5725 new_val |= IXGBE_VMOLR_AUPE; 5726 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC) 5727 new_val |= IXGBE_VMOLR_ROMPE; 5728 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC) 5729 new_val |= IXGBE_VMOLR_ROPE; 5730 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST) 5731 new_val |= IXGBE_VMOLR_BAM; 5732 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST) 5733 new_val |= IXGBE_VMOLR_MPE; 5734 5735 return new_val; 5736 } 5737 5738 static int 5739 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5740 { 5741 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5742 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5743 struct ixgbe_interrupt *intr = 5744 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5745 struct ixgbe_hw *hw = 5746 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5747 uint32_t vec = IXGBE_MISC_VEC_ID; 5748 5749 if (rte_intr_allow_others(intr_handle)) 5750 vec = IXGBE_RX_VEC_START; 5751 intr->mask |= (1 << vec); 5752 RTE_SET_USED(queue_id); 5753 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5754 5755 rte_intr_ack(intr_handle); 5756 5757 return 0; 5758 } 5759 5760 static int 5761 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5762 { 5763 struct ixgbe_interrupt *intr = 5764 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5765 struct ixgbe_hw *hw = 5766 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5767 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5768 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5769 uint32_t vec = IXGBE_MISC_VEC_ID; 5770 5771 if (rte_intr_allow_others(intr_handle)) 5772 vec = IXGBE_RX_VEC_START; 5773 intr->mask &= ~(1 << vec); 5774 RTE_SET_USED(queue_id); 5775 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5776 5777 return 0; 5778 } 5779 5780 static int 5781 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5782 { 5783 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5784 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5785 uint32_t mask; 5786 struct ixgbe_hw *hw = 5787 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5788 struct ixgbe_interrupt *intr = 5789 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5790 5791 if (queue_id < 16) { 5792 ixgbe_disable_intr(hw); 5793 intr->mask |= (1 << queue_id); 5794 ixgbe_enable_intr(dev); 5795 } else if (queue_id < 32) { 5796 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5797 mask &= (1 << queue_id); 5798 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5799 } else if (queue_id < 64) { 5800 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5801 mask &= (1 << (queue_id - 32)); 5802 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5803 } 5804 rte_intr_ack(intr_handle); 5805 5806 return 0; 5807 } 5808 5809 static int 5810 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5811 { 5812 uint32_t mask; 5813 struct ixgbe_hw *hw = 5814 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5815 struct ixgbe_interrupt *intr = 5816 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5817 5818 if (queue_id < 16) { 5819 ixgbe_disable_intr(hw); 5820 intr->mask &= ~(1 << queue_id); 5821 ixgbe_enable_intr(dev); 5822 } else if (queue_id < 32) { 5823 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5824 mask &= ~(1 << queue_id); 5825 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5826 } else if (queue_id < 64) { 5827 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5828 mask &= ~(1 << (queue_id - 32)); 5829 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5830 } 5831 5832 return 0; 5833 } 5834 5835 static void 5836 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 5837 uint8_t queue, uint8_t msix_vector) 5838 { 5839 uint32_t tmp, idx; 5840 5841 if (direction == -1) { 5842 /* other causes */ 5843 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5844 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 5845 tmp &= ~0xFF; 5846 tmp |= msix_vector; 5847 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); 5848 } else { 5849 /* rx or tx cause */ 5850 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5851 idx = ((16 * (queue & 1)) + (8 * direction)); 5852 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 5853 tmp &= ~(0xFF << idx); 5854 tmp |= (msix_vector << idx); 5855 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); 5856 } 5857 } 5858 5859 /** 5860 * set the IVAR registers, mapping interrupt causes to vectors 5861 * @param hw 5862 * pointer to ixgbe_hw struct 5863 * @direction 5864 * 0 for Rx, 1 for Tx, -1 for other causes 5865 * @queue 5866 * queue to map the corresponding interrupt to 5867 * @msix_vector 5868 * the vector to map to the corresponding queue 5869 */ 5870 static void 5871 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 5872 uint8_t queue, uint8_t msix_vector) 5873 { 5874 uint32_t tmp, idx; 5875 5876 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5877 if (hw->mac.type == ixgbe_mac_82598EB) { 5878 if (direction == -1) 5879 direction = 0; 5880 idx = (((direction * 64) + queue) >> 2) & 0x1F; 5881 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); 5882 tmp &= ~(0xFF << (8 * (queue & 0x3))); 5883 tmp |= (msix_vector << (8 * (queue & 0x3))); 5884 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); 5885 } else if ((hw->mac.type == ixgbe_mac_82599EB) || 5886 (hw->mac.type == ixgbe_mac_X540) || 5887 (hw->mac.type == ixgbe_mac_X550) || 5888 (hw->mac.type == ixgbe_mac_X550EM_x)) { 5889 if (direction == -1) { 5890 /* other causes */ 5891 idx = ((queue & 1) * 8); 5892 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5893 tmp &= ~(0xFF << idx); 5894 tmp |= (msix_vector << idx); 5895 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); 5896 } else { 5897 /* rx or tx causes */ 5898 idx = ((16 * (queue & 1)) + (8 * direction)); 5899 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 5900 tmp &= ~(0xFF << idx); 5901 tmp |= (msix_vector << idx); 5902 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); 5903 } 5904 } 5905 } 5906 5907 static void 5908 ixgbevf_configure_msix(struct rte_eth_dev *dev) 5909 { 5910 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5911 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5912 struct ixgbe_hw *hw = 5913 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5914 uint32_t q_idx; 5915 uint32_t vector_idx = IXGBE_MISC_VEC_ID; 5916 uint32_t base = IXGBE_MISC_VEC_ID; 5917 5918 /* Configure VF other cause ivar */ 5919 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); 5920 5921 /* won't configure msix register if no mapping is done 5922 * between intr vector and event fd. 5923 */ 5924 if (!rte_intr_dp_is_en(intr_handle)) 5925 return; 5926 5927 if (rte_intr_allow_others(intr_handle)) { 5928 base = IXGBE_RX_VEC_START; 5929 vector_idx = IXGBE_RX_VEC_START; 5930 } 5931 5932 /* Configure all RX queues of VF */ 5933 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 5934 /* Force all queue use vector 0, 5935 * as IXGBE_VF_MAXMSIVECTOR = 1 5936 */ 5937 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 5938 rte_intr_vec_list_index_set(intr_handle, q_idx, 5939 vector_idx); 5940 if (vector_idx < base + rte_intr_nb_efd_get(intr_handle) 5941 - 1) 5942 vector_idx++; 5943 } 5944 5945 /* As RX queue setting above show, all queues use the vector 0. 5946 * Set only the ITR value of IXGBE_MISC_VEC_ID. 5947 */ 5948 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), 5949 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 5950 | IXGBE_EITR_CNT_WDIS); 5951 } 5952 5953 /** 5954 * Sets up the hardware to properly generate MSI-X interrupts 5955 * @hw 5956 * board private structure 5957 */ 5958 static void 5959 ixgbe_configure_msix(struct rte_eth_dev *dev) 5960 { 5961 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5962 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5963 struct ixgbe_hw *hw = 5964 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5965 uint32_t queue_id, base = IXGBE_MISC_VEC_ID; 5966 uint32_t vec = IXGBE_MISC_VEC_ID; 5967 uint32_t mask; 5968 uint32_t gpie; 5969 5970 /* won't configure msix register if no mapping is done 5971 * between intr vector and event fd 5972 * but if misx has been enabled already, need to configure 5973 * auto clean, auto mask and throttling. 5974 */ 5975 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5976 if (!rte_intr_dp_is_en(intr_handle) && 5977 !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) 5978 return; 5979 5980 if (rte_intr_allow_others(intr_handle)) 5981 vec = base = IXGBE_RX_VEC_START; 5982 5983 /* setup GPIE for MSI-x mode */ 5984 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5985 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 5986 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; 5987 /* auto clearing and auto setting corresponding bits in EIMS 5988 * when MSI-X interrupt is triggered 5989 */ 5990 if (hw->mac.type == ixgbe_mac_82598EB) { 5991 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 5992 } else { 5993 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 5994 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 5995 } 5996 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 5997 5998 /* Populate the IVAR table and set the ITR values to the 5999 * corresponding register. 6000 */ 6001 if (rte_intr_dp_is_en(intr_handle)) { 6002 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 6003 queue_id++) { 6004 /* by default, 1:1 mapping */ 6005 ixgbe_set_ivar_map(hw, 0, queue_id, vec); 6006 rte_intr_vec_list_index_set(intr_handle, 6007 queue_id, vec); 6008 if (vec < base + rte_intr_nb_efd_get(intr_handle) 6009 - 1) 6010 vec++; 6011 } 6012 6013 switch (hw->mac.type) { 6014 case ixgbe_mac_82598EB: 6015 ixgbe_set_ivar_map(hw, -1, 6016 IXGBE_IVAR_OTHER_CAUSES_INDEX, 6017 IXGBE_MISC_VEC_ID); 6018 break; 6019 case ixgbe_mac_82599EB: 6020 case ixgbe_mac_X540: 6021 case ixgbe_mac_X550: 6022 case ixgbe_mac_X550EM_x: 6023 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); 6024 break; 6025 default: 6026 break; 6027 } 6028 } 6029 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), 6030 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 6031 | IXGBE_EITR_CNT_WDIS); 6032 6033 /* set up to autoclear timer, and the vectors */ 6034 mask = IXGBE_EIMS_ENABLE_MASK; 6035 mask &= ~(IXGBE_EIMS_OTHER | 6036 IXGBE_EIMS_MAILBOX | 6037 IXGBE_EIMS_LSC); 6038 6039 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 6040 } 6041 6042 int 6043 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 6044 uint16_t queue_idx, uint16_t tx_rate) 6045 { 6046 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6047 uint32_t rf_dec, rf_int; 6048 uint32_t bcnrc_val; 6049 uint16_t link_speed = dev->data->dev_link.link_speed; 6050 6051 if (queue_idx >= hw->mac.max_tx_queues) 6052 return -EINVAL; 6053 6054 if (tx_rate != 0) { 6055 /* Calculate the rate factor values to set */ 6056 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; 6057 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; 6058 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; 6059 6060 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 6061 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & 6062 IXGBE_RTTBCNRC_RF_INT_MASK_M); 6063 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 6064 } else { 6065 bcnrc_val = 0; 6066 } 6067 6068 /* 6069 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 6070 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise 6071 * set as 0x4. 6072 */ 6073 if (dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE) 6074 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME); 6075 else 6076 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_DEFAULT); 6077 6078 /* Set RTTBCNRC of queue X */ 6079 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); 6080 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 6081 IXGBE_WRITE_FLUSH(hw); 6082 6083 return 0; 6084 } 6085 6086 static int 6087 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 6088 __rte_unused uint32_t index, 6089 __rte_unused uint32_t pool) 6090 { 6091 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6092 int diag; 6093 6094 /* 6095 * On a 82599 VF, adding again the same MAC addr is not an idempotent 6096 * operation. Trap this case to avoid exhausting the [very limited] 6097 * set of PF resources used to store VF MAC addresses. 6098 */ 6099 if (memcmp(hw->mac.perm_addr, mac_addr, 6100 sizeof(struct rte_ether_addr)) == 0) 6101 return -1; 6102 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6103 if (diag != 0) 6104 PMD_DRV_LOG(ERR, "Unable to add MAC address " 6105 RTE_ETHER_ADDR_PRT_FMT " - diag=%d", 6106 RTE_ETHER_ADDR_BYTES(mac_addr), diag); 6107 return diag; 6108 } 6109 6110 static void 6111 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 6112 { 6113 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6114 struct rte_ether_addr *perm_addr = 6115 (struct rte_ether_addr *)hw->mac.perm_addr; 6116 struct rte_ether_addr *mac_addr; 6117 uint32_t i; 6118 int diag; 6119 6120 /* 6121 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does 6122 * not support the deletion of a given MAC address. 6123 * Instead, it imposes to delete all MAC addresses, then to add again 6124 * all MAC addresses with the exception of the one to be deleted. 6125 */ 6126 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); 6127 6128 /* 6129 * Add again all MAC addresses, with the exception of the deleted one 6130 * and of the permanent MAC address. 6131 */ 6132 for (i = 0, mac_addr = dev->data->mac_addrs; 6133 i < hw->mac.num_rar_entries; i++, mac_addr++) { 6134 /* Skip the deleted MAC address */ 6135 if (i == index) 6136 continue; 6137 /* Skip NULL MAC addresses */ 6138 if (rte_is_zero_ether_addr(mac_addr)) 6139 continue; 6140 /* Skip the permanent MAC address */ 6141 if (memcmp(perm_addr, mac_addr, 6142 sizeof(struct rte_ether_addr)) == 0) 6143 continue; 6144 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6145 if (diag != 0) 6146 PMD_DRV_LOG(ERR, 6147 "Adding again MAC address " 6148 RTE_ETHER_ADDR_PRT_FMT " failed " 6149 "diag=%d", RTE_ETHER_ADDR_BYTES(mac_addr), 6150 diag); 6151 } 6152 } 6153 6154 static int 6155 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 6156 struct rte_ether_addr *addr) 6157 { 6158 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6159 6160 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); 6161 6162 return 0; 6163 } 6164 6165 int 6166 ixgbe_syn_filter_set(struct rte_eth_dev *dev, 6167 struct rte_eth_syn_filter *filter, 6168 bool add) 6169 { 6170 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6171 struct ixgbe_filter_info *filter_info = 6172 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6173 uint32_t syn_info; 6174 uint32_t synqf; 6175 6176 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6177 return -EINVAL; 6178 6179 syn_info = filter_info->syn_info; 6180 6181 if (add) { 6182 if (syn_info & IXGBE_SYN_FILTER_ENABLE) 6183 return -EINVAL; 6184 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & 6185 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); 6186 6187 if (filter->hig_pri) 6188 synqf |= IXGBE_SYN_FILTER_SYNQFP; 6189 else 6190 synqf &= ~IXGBE_SYN_FILTER_SYNQFP; 6191 } else { 6192 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 6193 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) 6194 return -ENOENT; 6195 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); 6196 } 6197 6198 filter_info->syn_info = synqf; 6199 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 6200 IXGBE_WRITE_FLUSH(hw); 6201 return 0; 6202 } 6203 6204 6205 static inline enum ixgbe_5tuple_protocol 6206 convert_protocol_type(uint8_t protocol_value) 6207 { 6208 if (protocol_value == IPPROTO_TCP) 6209 return IXGBE_FILTER_PROTOCOL_TCP; 6210 else if (protocol_value == IPPROTO_UDP) 6211 return IXGBE_FILTER_PROTOCOL_UDP; 6212 else if (protocol_value == IPPROTO_SCTP) 6213 return IXGBE_FILTER_PROTOCOL_SCTP; 6214 else 6215 return IXGBE_FILTER_PROTOCOL_NONE; 6216 } 6217 6218 /* inject a 5-tuple filter to HW */ 6219 static inline void 6220 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, 6221 struct ixgbe_5tuple_filter *filter) 6222 { 6223 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6224 int i; 6225 uint32_t ftqf, sdpqf; 6226 uint32_t l34timir = 0; 6227 uint8_t mask = 0xff; 6228 6229 i = filter->index; 6230 6231 sdpqf = (uint32_t)(filter->filter_info.dst_port << 6232 IXGBE_SDPQF_DSTPORT_SHIFT); 6233 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); 6234 6235 ftqf = (uint32_t)(filter->filter_info.proto & 6236 IXGBE_FTQF_PROTOCOL_MASK); 6237 ftqf |= (uint32_t)((filter->filter_info.priority & 6238 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); 6239 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 6240 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; 6241 if (filter->filter_info.dst_ip_mask == 0) 6242 mask &= IXGBE_FTQF_DEST_ADDR_MASK; 6243 if (filter->filter_info.src_port_mask == 0) 6244 mask &= IXGBE_FTQF_SOURCE_PORT_MASK; 6245 if (filter->filter_info.dst_port_mask == 0) 6246 mask &= IXGBE_FTQF_DEST_PORT_MASK; 6247 if (filter->filter_info.proto_mask == 0) 6248 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; 6249 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; 6250 ftqf |= IXGBE_FTQF_POOL_MASK_EN; 6251 ftqf |= IXGBE_FTQF_QUEUE_ENABLE; 6252 6253 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); 6254 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); 6255 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); 6256 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); 6257 6258 l34timir |= IXGBE_L34T_IMIR_RESERVE; 6259 l34timir |= (uint32_t)(filter->queue << 6260 IXGBE_L34T_IMIR_QUEUE_SHIFT); 6261 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); 6262 } 6263 6264 /* 6265 * add a 5tuple filter 6266 * 6267 * @param 6268 * dev: Pointer to struct rte_eth_dev. 6269 * index: the index the filter allocates. 6270 * filter: pointer to the filter that will be added. 6271 * rx_queue: the queue id the filter assigned to. 6272 * 6273 * @return 6274 * - On success, zero. 6275 * - On failure, a negative value. 6276 */ 6277 static int 6278 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 6279 struct ixgbe_5tuple_filter *filter) 6280 { 6281 struct ixgbe_filter_info *filter_info = 6282 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6283 int i, idx, shift; 6284 6285 /* 6286 * look for an unused 5tuple filter index, 6287 * and insert the filter to list. 6288 */ 6289 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { 6290 idx = i / (sizeof(uint32_t) * NBBY); 6291 shift = i % (sizeof(uint32_t) * NBBY); 6292 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 6293 filter_info->fivetuple_mask[idx] |= 1 << shift; 6294 filter->index = i; 6295 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 6296 filter, 6297 entries); 6298 break; 6299 } 6300 } 6301 if (i >= IXGBE_MAX_FTQF_FILTERS) { 6302 PMD_DRV_LOG(ERR, "5tuple filters are full."); 6303 return -ENOSYS; 6304 } 6305 6306 ixgbe_inject_5tuple_filter(dev, filter); 6307 6308 return 0; 6309 } 6310 6311 /* 6312 * remove a 5tuple filter 6313 * 6314 * @param 6315 * dev: Pointer to struct rte_eth_dev. 6316 * filter: the pointer of the filter will be removed. 6317 */ 6318 static void 6319 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 6320 struct ixgbe_5tuple_filter *filter) 6321 { 6322 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6323 struct ixgbe_filter_info *filter_info = 6324 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6325 uint16_t index = filter->index; 6326 6327 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 6328 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 6329 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 6330 rte_free(filter); 6331 6332 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); 6333 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); 6334 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); 6335 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); 6336 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); 6337 } 6338 6339 static int 6340 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 6341 { 6342 struct ixgbe_hw *hw; 6343 uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; 6344 struct rte_eth_dev_data *dev_data = dev->data; 6345 6346 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6347 6348 if (mtu < RTE_ETHER_MIN_MTU || max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) 6349 return -EINVAL; 6350 6351 /* If device is started, refuse mtu that requires the support of 6352 * scattered packets when this feature has not been enabled before. 6353 */ 6354 if (dev_data->dev_started && !dev_data->scattered_rx && 6355 (max_frame + 2 * RTE_VLAN_HLEN > 6356 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 6357 PMD_INIT_LOG(ERR, "Stop port first."); 6358 return -EINVAL; 6359 } 6360 6361 /* 6362 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU 6363 * request of the version 2.0 of the mailbox API. 6364 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 6365 * of the mailbox API. 6366 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers 6367 * prior to 3.11.33 which contains the following change: 6368 * "ixgbe: Enable jumbo frames support w/ SR-IOV" 6369 */ 6370 if (ixgbevf_rlpml_set_vf(hw, max_frame)) 6371 return -EINVAL; 6372 6373 return 0; 6374 } 6375 6376 static inline struct ixgbe_5tuple_filter * 6377 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, 6378 struct ixgbe_5tuple_filter_info *key) 6379 { 6380 struct ixgbe_5tuple_filter *it; 6381 6382 TAILQ_FOREACH(it, filter_list, entries) { 6383 if (memcmp(key, &it->filter_info, 6384 sizeof(struct ixgbe_5tuple_filter_info)) == 0) { 6385 return it; 6386 } 6387 } 6388 return NULL; 6389 } 6390 6391 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ 6392 static inline int 6393 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 6394 struct ixgbe_5tuple_filter_info *filter_info) 6395 { 6396 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || 6397 filter->priority > IXGBE_5TUPLE_MAX_PRI || 6398 filter->priority < IXGBE_5TUPLE_MIN_PRI) 6399 return -EINVAL; 6400 6401 switch (filter->dst_ip_mask) { 6402 case UINT32_MAX: 6403 filter_info->dst_ip_mask = 0; 6404 filter_info->dst_ip = filter->dst_ip; 6405 break; 6406 case 0: 6407 filter_info->dst_ip_mask = 1; 6408 break; 6409 default: 6410 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 6411 return -EINVAL; 6412 } 6413 6414 switch (filter->src_ip_mask) { 6415 case UINT32_MAX: 6416 filter_info->src_ip_mask = 0; 6417 filter_info->src_ip = filter->src_ip; 6418 break; 6419 case 0: 6420 filter_info->src_ip_mask = 1; 6421 break; 6422 default: 6423 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 6424 return -EINVAL; 6425 } 6426 6427 switch (filter->dst_port_mask) { 6428 case UINT16_MAX: 6429 filter_info->dst_port_mask = 0; 6430 filter_info->dst_port = filter->dst_port; 6431 break; 6432 case 0: 6433 filter_info->dst_port_mask = 1; 6434 break; 6435 default: 6436 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 6437 return -EINVAL; 6438 } 6439 6440 switch (filter->src_port_mask) { 6441 case UINT16_MAX: 6442 filter_info->src_port_mask = 0; 6443 filter_info->src_port = filter->src_port; 6444 break; 6445 case 0: 6446 filter_info->src_port_mask = 1; 6447 break; 6448 default: 6449 PMD_DRV_LOG(ERR, "invalid src_port mask."); 6450 return -EINVAL; 6451 } 6452 6453 switch (filter->proto_mask) { 6454 case UINT8_MAX: 6455 filter_info->proto_mask = 0; 6456 filter_info->proto = 6457 convert_protocol_type(filter->proto); 6458 break; 6459 case 0: 6460 filter_info->proto_mask = 1; 6461 break; 6462 default: 6463 PMD_DRV_LOG(ERR, "invalid protocol mask."); 6464 return -EINVAL; 6465 } 6466 6467 filter_info->priority = (uint8_t)filter->priority; 6468 return 0; 6469 } 6470 6471 /* 6472 * add or delete a ntuple filter 6473 * 6474 * @param 6475 * dev: Pointer to struct rte_eth_dev. 6476 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 6477 * add: if true, add filter, if false, remove filter 6478 * 6479 * @return 6480 * - On success, zero. 6481 * - On failure, a negative value. 6482 */ 6483 int 6484 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 6485 struct rte_eth_ntuple_filter *ntuple_filter, 6486 bool add) 6487 { 6488 struct ixgbe_filter_info *filter_info = 6489 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6490 struct ixgbe_5tuple_filter_info filter_5tuple; 6491 struct ixgbe_5tuple_filter *filter; 6492 int ret; 6493 6494 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 6495 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 6496 return -EINVAL; 6497 } 6498 6499 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 6500 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 6501 if (ret < 0) 6502 return ret; 6503 6504 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 6505 &filter_5tuple); 6506 if (filter != NULL && add) { 6507 PMD_DRV_LOG(ERR, "filter exists."); 6508 return -EEXIST; 6509 } 6510 if (filter == NULL && !add) { 6511 PMD_DRV_LOG(ERR, "filter doesn't exist."); 6512 return -ENOENT; 6513 } 6514 6515 if (add) { 6516 filter = rte_zmalloc("ixgbe_5tuple_filter", 6517 sizeof(struct ixgbe_5tuple_filter), 0); 6518 if (filter == NULL) 6519 return -ENOMEM; 6520 rte_memcpy(&filter->filter_info, 6521 &filter_5tuple, 6522 sizeof(struct ixgbe_5tuple_filter_info)); 6523 filter->queue = ntuple_filter->queue; 6524 ret = ixgbe_add_5tuple_filter(dev, filter); 6525 if (ret < 0) { 6526 rte_free(filter); 6527 return ret; 6528 } 6529 } else 6530 ixgbe_remove_5tuple_filter(dev, filter); 6531 6532 return 0; 6533 } 6534 6535 int 6536 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 6537 struct rte_eth_ethertype_filter *filter, 6538 bool add) 6539 { 6540 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6541 struct ixgbe_filter_info *filter_info = 6542 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6543 uint32_t etqf = 0; 6544 uint32_t etqs = 0; 6545 int ret; 6546 struct ixgbe_ethertype_filter ethertype_filter; 6547 6548 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6549 return -EINVAL; 6550 6551 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || 6552 filter->ether_type == RTE_ETHER_TYPE_IPV6) { 6553 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 6554 " ethertype filter.", filter->ether_type); 6555 return -EINVAL; 6556 } 6557 6558 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 6559 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 6560 return -EINVAL; 6561 } 6562 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 6563 PMD_DRV_LOG(ERR, "drop option is unsupported."); 6564 return -EINVAL; 6565 } 6566 6567 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 6568 if (ret >= 0 && add) { 6569 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 6570 filter->ether_type); 6571 return -EEXIST; 6572 } 6573 if (ret < 0 && !add) { 6574 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 6575 filter->ether_type); 6576 return -ENOENT; 6577 } 6578 6579 if (add) { 6580 etqf = IXGBE_ETQF_FILTER_EN; 6581 etqf |= (uint32_t)filter->ether_type; 6582 etqs |= (uint32_t)((filter->queue << 6583 IXGBE_ETQS_RX_QUEUE_SHIFT) & 6584 IXGBE_ETQS_RX_QUEUE); 6585 etqs |= IXGBE_ETQS_QUEUE_EN; 6586 6587 ethertype_filter.ethertype = filter->ether_type; 6588 ethertype_filter.etqf = etqf; 6589 ethertype_filter.etqs = etqs; 6590 ethertype_filter.conf = FALSE; 6591 ret = ixgbe_ethertype_filter_insert(filter_info, 6592 ðertype_filter); 6593 if (ret < 0) { 6594 PMD_DRV_LOG(ERR, "ethertype filters are full."); 6595 return -ENOSPC; 6596 } 6597 } else { 6598 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 6599 if (ret < 0) 6600 return -ENOSYS; 6601 } 6602 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); 6603 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); 6604 IXGBE_WRITE_FLUSH(hw); 6605 6606 return 0; 6607 } 6608 6609 static int 6610 ixgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev, 6611 const struct rte_flow_ops **ops) 6612 { 6613 *ops = &ixgbe_flow_ops; 6614 return 0; 6615 } 6616 6617 static u8 * 6618 ixgbe_dev_addr_list_itr(__rte_unused struct ixgbe_hw *hw, 6619 u8 **mc_addr_ptr, u32 *vmdq) 6620 { 6621 u8 *mc_addr; 6622 6623 *vmdq = 0; 6624 mc_addr = *mc_addr_ptr; 6625 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 6626 return mc_addr; 6627 } 6628 6629 static int 6630 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 6631 struct rte_ether_addr *mc_addr_set, 6632 uint32_t nb_mc_addr) 6633 { 6634 struct ixgbe_hw *hw; 6635 u8 *mc_addr_list; 6636 6637 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6638 mc_addr_list = (u8 *)mc_addr_set; 6639 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 6640 ixgbe_dev_addr_list_itr, TRUE); 6641 } 6642 6643 static uint64_t 6644 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 6645 { 6646 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6647 uint64_t systime_cycles; 6648 6649 switch (hw->mac.type) { 6650 case ixgbe_mac_X550: 6651 case ixgbe_mac_X550EM_x: 6652 case ixgbe_mac_X550EM_a: 6653 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 6654 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6655 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6656 * NSEC_PER_SEC; 6657 break; 6658 default: 6659 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6660 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6661 << 32; 6662 } 6663 6664 return systime_cycles; 6665 } 6666 6667 static uint64_t 6668 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6669 { 6670 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6671 uint64_t rx_tstamp_cycles; 6672 6673 switch (hw->mac.type) { 6674 case ixgbe_mac_X550: 6675 case ixgbe_mac_X550EM_x: 6676 case ixgbe_mac_X550EM_a: 6677 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6678 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6679 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6680 * NSEC_PER_SEC; 6681 break; 6682 default: 6683 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6684 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6685 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6686 << 32; 6687 } 6688 6689 return rx_tstamp_cycles; 6690 } 6691 6692 static uint64_t 6693 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6694 { 6695 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6696 uint64_t tx_tstamp_cycles; 6697 6698 switch (hw->mac.type) { 6699 case ixgbe_mac_X550: 6700 case ixgbe_mac_X550EM_x: 6701 case ixgbe_mac_X550EM_a: 6702 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6703 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6704 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6705 * NSEC_PER_SEC; 6706 break; 6707 default: 6708 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6709 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6710 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6711 << 32; 6712 } 6713 6714 return tx_tstamp_cycles; 6715 } 6716 6717 static void 6718 ixgbe_start_timecounters(struct rte_eth_dev *dev) 6719 { 6720 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6721 struct ixgbe_adapter *adapter = dev->data->dev_private; 6722 struct rte_eth_link link; 6723 uint32_t incval = 0; 6724 uint32_t shift = 0; 6725 6726 /* Get current link speed. */ 6727 ixgbe_dev_link_update(dev, 1); 6728 rte_eth_linkstatus_get(dev, &link); 6729 6730 switch (link.link_speed) { 6731 case RTE_ETH_SPEED_NUM_100M: 6732 incval = IXGBE_INCVAL_100; 6733 shift = IXGBE_INCVAL_SHIFT_100; 6734 break; 6735 case RTE_ETH_SPEED_NUM_1G: 6736 incval = IXGBE_INCVAL_1GB; 6737 shift = IXGBE_INCVAL_SHIFT_1GB; 6738 break; 6739 case RTE_ETH_SPEED_NUM_10G: 6740 default: 6741 incval = IXGBE_INCVAL_10GB; 6742 shift = IXGBE_INCVAL_SHIFT_10GB; 6743 break; 6744 } 6745 6746 switch (hw->mac.type) { 6747 case ixgbe_mac_X550: 6748 case ixgbe_mac_X550EM_x: 6749 case ixgbe_mac_X550EM_a: 6750 /* Independent of link speed. */ 6751 incval = 1; 6752 /* Cycles read will be interpreted as ns. */ 6753 shift = 0; 6754 /* Fall-through */ 6755 case ixgbe_mac_X540: 6756 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); 6757 break; 6758 case ixgbe_mac_82599EB: 6759 incval >>= IXGBE_INCVAL_SHIFT_82599; 6760 shift -= IXGBE_INCVAL_SHIFT_82599; 6761 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 6762 (1 << IXGBE_INCPER_SHIFT_82599) | incval); 6763 break; 6764 default: 6765 /* Not supported. */ 6766 return; 6767 } 6768 6769 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 6770 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6771 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6772 6773 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6774 adapter->systime_tc.cc_shift = shift; 6775 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 6776 6777 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6778 adapter->rx_tstamp_tc.cc_shift = shift; 6779 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6780 6781 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6782 adapter->tx_tstamp_tc.cc_shift = shift; 6783 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6784 } 6785 6786 static int 6787 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 6788 { 6789 struct ixgbe_adapter *adapter = dev->data->dev_private; 6790 6791 adapter->systime_tc.nsec += delta; 6792 adapter->rx_tstamp_tc.nsec += delta; 6793 adapter->tx_tstamp_tc.nsec += delta; 6794 6795 return 0; 6796 } 6797 6798 static int 6799 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 6800 { 6801 uint64_t ns; 6802 struct ixgbe_adapter *adapter = dev->data->dev_private; 6803 6804 ns = rte_timespec_to_ns(ts); 6805 /* Set the timecounters to a new value. */ 6806 adapter->systime_tc.nsec = ns; 6807 adapter->rx_tstamp_tc.nsec = ns; 6808 adapter->tx_tstamp_tc.nsec = ns; 6809 6810 return 0; 6811 } 6812 6813 static int 6814 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 6815 { 6816 uint64_t ns, systime_cycles; 6817 struct ixgbe_adapter *adapter = dev->data->dev_private; 6818 6819 systime_cycles = ixgbe_read_systime_cyclecounter(dev); 6820 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 6821 *ts = rte_ns_to_timespec(ns); 6822 6823 return 0; 6824 } 6825 6826 static int 6827 ixgbe_timesync_enable(struct rte_eth_dev *dev) 6828 { 6829 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6830 uint32_t tsync_ctl; 6831 uint32_t tsauxc; 6832 6833 /* Stop the timesync system time. */ 6834 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); 6835 /* Reset the timesync system time value. */ 6836 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); 6837 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); 6838 6839 /* Enable system time for platforms where it isn't on by default. */ 6840 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); 6841 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; 6842 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); 6843 6844 ixgbe_start_timecounters(dev); 6845 6846 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6847 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 6848 (RTE_ETHER_TYPE_1588 | 6849 IXGBE_ETQF_FILTER_EN | 6850 IXGBE_ETQF_1588)); 6851 6852 /* Enable timestamping of received PTP packets. */ 6853 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6854 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; 6855 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6856 6857 /* Enable timestamping of transmitted PTP packets. */ 6858 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6859 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; 6860 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6861 6862 IXGBE_WRITE_FLUSH(hw); 6863 6864 return 0; 6865 } 6866 6867 static int 6868 ixgbe_timesync_disable(struct rte_eth_dev *dev) 6869 { 6870 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6871 uint32_t tsync_ctl; 6872 6873 /* Disable timestamping of transmitted PTP packets. */ 6874 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6875 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; 6876 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6877 6878 /* Disable timestamping of received PTP packets. */ 6879 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6880 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; 6881 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6882 6883 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6884 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 6885 6886 /* Stop incrementing the System Time registers. */ 6887 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); 6888 6889 return 0; 6890 } 6891 6892 static int 6893 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 6894 struct timespec *timestamp, 6895 uint32_t flags __rte_unused) 6896 { 6897 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6898 struct ixgbe_adapter *adapter = dev->data->dev_private; 6899 uint32_t tsync_rxctl; 6900 uint64_t rx_tstamp_cycles; 6901 uint64_t ns; 6902 6903 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6904 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) 6905 return -EINVAL; 6906 6907 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); 6908 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 6909 *timestamp = rte_ns_to_timespec(ns); 6910 6911 return 0; 6912 } 6913 6914 static int 6915 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 6916 struct timespec *timestamp) 6917 { 6918 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6919 struct ixgbe_adapter *adapter = dev->data->dev_private; 6920 uint32_t tsync_txctl; 6921 uint64_t tx_tstamp_cycles; 6922 uint64_t ns; 6923 6924 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6925 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) 6926 return -EINVAL; 6927 6928 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); 6929 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 6930 *timestamp = rte_ns_to_timespec(ns); 6931 6932 return 0; 6933 } 6934 6935 static int 6936 ixgbe_get_reg_length(struct rte_eth_dev *dev) 6937 { 6938 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6939 int count = 0; 6940 int g_ind = 0; 6941 const struct reg_info *reg_group; 6942 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 6943 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 6944 6945 while ((reg_group = reg_set[g_ind++])) 6946 count += ixgbe_regs_group_count(reg_group); 6947 6948 return count; 6949 } 6950 6951 static int 6952 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 6953 { 6954 int count = 0; 6955 int g_ind = 0; 6956 const struct reg_info *reg_group; 6957 6958 while ((reg_group = ixgbevf_regs[g_ind++])) 6959 count += ixgbe_regs_group_count(reg_group); 6960 6961 return count; 6962 } 6963 6964 static int 6965 ixgbe_get_regs(struct rte_eth_dev *dev, 6966 struct rte_dev_reg_info *regs) 6967 { 6968 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6969 uint32_t *data = regs->data; 6970 int g_ind = 0; 6971 int count = 0; 6972 const struct reg_info *reg_group; 6973 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 6974 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 6975 6976 if (data == NULL) { 6977 regs->length = ixgbe_get_reg_length(dev); 6978 regs->width = sizeof(uint32_t); 6979 return 0; 6980 } 6981 6982 /* Support only full register dump */ 6983 if ((regs->length == 0) || 6984 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { 6985 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 6986 hw->device_id; 6987 while ((reg_group = reg_set[g_ind++])) 6988 count += ixgbe_read_regs_group(dev, &data[count], 6989 reg_group); 6990 return 0; 6991 } 6992 6993 return -ENOTSUP; 6994 } 6995 6996 static int 6997 ixgbevf_get_regs(struct rte_eth_dev *dev, 6998 struct rte_dev_reg_info *regs) 6999 { 7000 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7001 uint32_t *data = regs->data; 7002 int g_ind = 0; 7003 int count = 0; 7004 const struct reg_info *reg_group; 7005 7006 if (data == NULL) { 7007 regs->length = ixgbevf_get_reg_length(dev); 7008 regs->width = sizeof(uint32_t); 7009 return 0; 7010 } 7011 7012 /* Support only full register dump */ 7013 if ((regs->length == 0) || 7014 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { 7015 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7016 hw->device_id; 7017 while ((reg_group = ixgbevf_regs[g_ind++])) 7018 count += ixgbe_read_regs_group(dev, &data[count], 7019 reg_group); 7020 return 0; 7021 } 7022 7023 return -ENOTSUP; 7024 } 7025 7026 static int 7027 ixgbe_get_eeprom_length(struct rte_eth_dev *dev) 7028 { 7029 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7030 7031 /* Return unit is byte count */ 7032 return hw->eeprom.word_size * 2; 7033 } 7034 7035 static int 7036 ixgbe_get_eeprom(struct rte_eth_dev *dev, 7037 struct rte_dev_eeprom_info *in_eeprom) 7038 { 7039 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7040 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7041 uint16_t *data = in_eeprom->data; 7042 int first, length; 7043 7044 first = in_eeprom->offset >> 1; 7045 length = in_eeprom->length >> 1; 7046 if ((first > hw->eeprom.word_size) || 7047 ((first + length) > hw->eeprom.word_size)) 7048 return -EINVAL; 7049 7050 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7051 7052 return eeprom->ops.read_buffer(hw, first, length, data); 7053 } 7054 7055 static int 7056 ixgbe_set_eeprom(struct rte_eth_dev *dev, 7057 struct rte_dev_eeprom_info *in_eeprom) 7058 { 7059 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7060 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7061 uint16_t *data = in_eeprom->data; 7062 int first, length; 7063 7064 first = in_eeprom->offset >> 1; 7065 length = in_eeprom->length >> 1; 7066 if ((first > hw->eeprom.word_size) || 7067 ((first + length) > hw->eeprom.word_size)) 7068 return -EINVAL; 7069 7070 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7071 7072 return eeprom->ops.write_buffer(hw, first, length, data); 7073 } 7074 7075 static int 7076 ixgbe_get_module_info(struct rte_eth_dev *dev, 7077 struct rte_eth_dev_module_info *modinfo) 7078 { 7079 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7080 uint32_t status; 7081 uint8_t sff8472_rev, addr_mode; 7082 bool page_swap = false; 7083 7084 /* Check whether we support SFF-8472 or not */ 7085 status = hw->phy.ops.read_i2c_eeprom(hw, 7086 IXGBE_SFF_SFF_8472_COMP, 7087 &sff8472_rev); 7088 if (status != 0) 7089 return -EIO; 7090 7091 /* addressing mode is not supported */ 7092 status = hw->phy.ops.read_i2c_eeprom(hw, 7093 IXGBE_SFF_SFF_8472_SWAP, 7094 &addr_mode); 7095 if (status != 0) 7096 return -EIO; 7097 7098 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 7099 PMD_DRV_LOG(ERR, 7100 "Address change required to access page 0xA2, " 7101 "but not supported. Please report the module " 7102 "type to the driver maintainers."); 7103 page_swap = true; 7104 } 7105 7106 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { 7107 /* We have a SFP, but it does not support SFF-8472 */ 7108 modinfo->type = RTE_ETH_MODULE_SFF_8079; 7109 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 7110 } else { 7111 /* We have a SFP which supports a revision of SFF-8472. */ 7112 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7113 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7114 } 7115 7116 return 0; 7117 } 7118 7119 static int 7120 ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 7121 struct rte_dev_eeprom_info *info) 7122 { 7123 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7124 uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; 7125 uint8_t databyte = 0xFF; 7126 uint8_t *data = info->data; 7127 uint32_t i = 0; 7128 7129 for (i = info->offset; i < info->offset + info->length; i++) { 7130 if (i < RTE_ETH_MODULE_SFF_8079_LEN) 7131 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); 7132 else 7133 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); 7134 7135 if (status != 0) 7136 return -EIO; 7137 7138 data[i - info->offset] = databyte; 7139 } 7140 7141 return 0; 7142 } 7143 7144 uint16_t 7145 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { 7146 switch (mac_type) { 7147 case ixgbe_mac_X550: 7148 case ixgbe_mac_X550EM_x: 7149 case ixgbe_mac_X550EM_a: 7150 return RTE_ETH_RSS_RETA_SIZE_512; 7151 case ixgbe_mac_X550_vf: 7152 case ixgbe_mac_X550EM_x_vf: 7153 case ixgbe_mac_X550EM_a_vf: 7154 return RTE_ETH_RSS_RETA_SIZE_64; 7155 case ixgbe_mac_X540_vf: 7156 case ixgbe_mac_82599_vf: 7157 return 0; 7158 default: 7159 return RTE_ETH_RSS_RETA_SIZE_128; 7160 } 7161 } 7162 7163 uint32_t 7164 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { 7165 switch (mac_type) { 7166 case ixgbe_mac_X550: 7167 case ixgbe_mac_X550EM_x: 7168 case ixgbe_mac_X550EM_a: 7169 if (reta_idx < RTE_ETH_RSS_RETA_SIZE_128) 7170 return IXGBE_RETA(reta_idx >> 2); 7171 else 7172 return IXGBE_ERETA((reta_idx - RTE_ETH_RSS_RETA_SIZE_128) >> 2); 7173 case ixgbe_mac_X550_vf: 7174 case ixgbe_mac_X550EM_x_vf: 7175 case ixgbe_mac_X550EM_a_vf: 7176 return IXGBE_VFRETA(reta_idx >> 2); 7177 default: 7178 return IXGBE_RETA(reta_idx >> 2); 7179 } 7180 } 7181 7182 uint32_t 7183 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { 7184 switch (mac_type) { 7185 case ixgbe_mac_X550_vf: 7186 case ixgbe_mac_X550EM_x_vf: 7187 case ixgbe_mac_X550EM_a_vf: 7188 return IXGBE_VFMRQC; 7189 default: 7190 return IXGBE_MRQC; 7191 } 7192 } 7193 7194 uint32_t 7195 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { 7196 switch (mac_type) { 7197 case ixgbe_mac_X550_vf: 7198 case ixgbe_mac_X550EM_x_vf: 7199 case ixgbe_mac_X550EM_a_vf: 7200 return IXGBE_VFRSSRK(i); 7201 default: 7202 return IXGBE_RSSRK(i); 7203 } 7204 } 7205 7206 bool 7207 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { 7208 switch (mac_type) { 7209 case ixgbe_mac_82599_vf: 7210 case ixgbe_mac_X540_vf: 7211 return 0; 7212 default: 7213 return 1; 7214 } 7215 } 7216 7217 static int 7218 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 7219 struct rte_eth_dcb_info *dcb_info) 7220 { 7221 struct ixgbe_dcb_config *dcb_config = 7222 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 7223 struct ixgbe_dcb_tc_config *tc; 7224 struct rte_eth_dcb_tc_queue_mapping *tc_queue; 7225 uint8_t nb_tcs; 7226 uint8_t i, j; 7227 7228 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) 7229 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 7230 else 7231 dcb_info->nb_tcs = 1; 7232 7233 tc_queue = &dcb_info->tc_queue; 7234 nb_tcs = dcb_info->nb_tcs; 7235 7236 if (dcb_config->vt_mode) { /* vt is enabled*/ 7237 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 7238 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 7239 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) 7240 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 7241 if (RTE_ETH_DEV_SRIOV(dev).active > 0) { 7242 for (j = 0; j < nb_tcs; j++) { 7243 tc_queue->tc_rxq[0][j].base = j; 7244 tc_queue->tc_rxq[0][j].nb_queue = 1; 7245 tc_queue->tc_txq[0][j].base = j; 7246 tc_queue->tc_txq[0][j].nb_queue = 1; 7247 } 7248 } else { 7249 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 7250 for (j = 0; j < nb_tcs; j++) { 7251 tc_queue->tc_rxq[i][j].base = 7252 i * nb_tcs + j; 7253 tc_queue->tc_rxq[i][j].nb_queue = 1; 7254 tc_queue->tc_txq[i][j].base = 7255 i * nb_tcs + j; 7256 tc_queue->tc_txq[i][j].nb_queue = 1; 7257 } 7258 } 7259 } 7260 } else { /* vt is disabled*/ 7261 struct rte_eth_dcb_rx_conf *rx_conf = 7262 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 7263 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) 7264 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 7265 if (dcb_info->nb_tcs == RTE_ETH_4_TCS) { 7266 for (i = 0; i < dcb_info->nb_tcs; i++) { 7267 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 7268 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7269 } 7270 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7271 dcb_info->tc_queue.tc_txq[0][1].base = 64; 7272 dcb_info->tc_queue.tc_txq[0][2].base = 96; 7273 dcb_info->tc_queue.tc_txq[0][3].base = 112; 7274 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 7275 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7276 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7277 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7278 } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) { 7279 for (i = 0; i < dcb_info->nb_tcs; i++) { 7280 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 7281 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7282 } 7283 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7284 dcb_info->tc_queue.tc_txq[0][1].base = 32; 7285 dcb_info->tc_queue.tc_txq[0][2].base = 64; 7286 dcb_info->tc_queue.tc_txq[0][3].base = 80; 7287 dcb_info->tc_queue.tc_txq[0][4].base = 96; 7288 dcb_info->tc_queue.tc_txq[0][5].base = 104; 7289 dcb_info->tc_queue.tc_txq[0][6].base = 112; 7290 dcb_info->tc_queue.tc_txq[0][7].base = 120; 7291 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 7292 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7293 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7294 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7295 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 7296 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 7297 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 7298 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 7299 } 7300 } 7301 for (i = 0; i < dcb_info->nb_tcs; i++) { 7302 tc = &dcb_config->tc_config[i]; 7303 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; 7304 } 7305 return 0; 7306 } 7307 7308 /* Update e-tag ether type */ 7309 static int 7310 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, 7311 uint16_t ether_type) 7312 { 7313 uint32_t etag_etype; 7314 7315 if (hw->mac.type != ixgbe_mac_X550 && 7316 hw->mac.type != ixgbe_mac_X550EM_x && 7317 hw->mac.type != ixgbe_mac_X550EM_a) { 7318 return -ENOTSUP; 7319 } 7320 7321 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7322 etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; 7323 etag_etype |= ether_type; 7324 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7325 IXGBE_WRITE_FLUSH(hw); 7326 7327 return 0; 7328 } 7329 7330 /* Enable e-tag tunnel */ 7331 static int 7332 ixgbe_e_tag_enable(struct ixgbe_hw *hw) 7333 { 7334 uint32_t etag_etype; 7335 7336 if (hw->mac.type != ixgbe_mac_X550 && 7337 hw->mac.type != ixgbe_mac_X550EM_x && 7338 hw->mac.type != ixgbe_mac_X550EM_a) { 7339 return -ENOTSUP; 7340 } 7341 7342 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7343 etag_etype |= IXGBE_ETAG_ETYPE_VALID; 7344 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7345 IXGBE_WRITE_FLUSH(hw); 7346 7347 return 0; 7348 } 7349 7350 static int 7351 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, 7352 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7353 { 7354 int ret = 0; 7355 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7356 uint32_t i, rar_entries; 7357 uint32_t rar_low, rar_high; 7358 7359 if (hw->mac.type != ixgbe_mac_X550 && 7360 hw->mac.type != ixgbe_mac_X550EM_x && 7361 hw->mac.type != ixgbe_mac_X550EM_a) { 7362 return -ENOTSUP; 7363 } 7364 7365 rar_entries = ixgbe_get_num_rx_addrs(hw); 7366 7367 for (i = 1; i < rar_entries; i++) { 7368 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7369 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 7370 if ((rar_high & IXGBE_RAH_AV) && 7371 (rar_high & IXGBE_RAH_ADTYPE) && 7372 ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == 7373 l2_tunnel->tunnel_id)) { 7374 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 7375 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 7376 7377 ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); 7378 7379 return ret; 7380 } 7381 } 7382 7383 return ret; 7384 } 7385 7386 static int 7387 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, 7388 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7389 { 7390 int ret = 0; 7391 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7392 uint32_t i, rar_entries; 7393 uint32_t rar_low, rar_high; 7394 7395 if (hw->mac.type != ixgbe_mac_X550 && 7396 hw->mac.type != ixgbe_mac_X550EM_x && 7397 hw->mac.type != ixgbe_mac_X550EM_a) { 7398 return -ENOTSUP; 7399 } 7400 7401 /* One entry for one tunnel. Try to remove potential existing entry. */ 7402 ixgbe_e_tag_filter_del(dev, l2_tunnel); 7403 7404 rar_entries = ixgbe_get_num_rx_addrs(hw); 7405 7406 for (i = 1; i < rar_entries; i++) { 7407 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7408 if (rar_high & IXGBE_RAH_AV) { 7409 continue; 7410 } else { 7411 ixgbe_set_vmdq(hw, i, l2_tunnel->pool); 7412 rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; 7413 rar_low = l2_tunnel->tunnel_id; 7414 7415 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); 7416 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); 7417 7418 return ret; 7419 } 7420 } 7421 7422 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 7423 " Please remove a rule before adding a new one."); 7424 return -EINVAL; 7425 } 7426 7427 static inline struct ixgbe_l2_tn_filter * 7428 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, 7429 struct ixgbe_l2_tn_key *key) 7430 { 7431 int ret; 7432 7433 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); 7434 if (ret < 0) 7435 return NULL; 7436 7437 return l2_tn_info->hash_map[ret]; 7438 } 7439 7440 static inline int 7441 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7442 struct ixgbe_l2_tn_filter *l2_tn_filter) 7443 { 7444 int ret; 7445 7446 ret = rte_hash_add_key(l2_tn_info->hash_handle, 7447 &l2_tn_filter->key); 7448 7449 if (ret < 0) { 7450 PMD_DRV_LOG(ERR, 7451 "Failed to insert L2 tunnel filter" 7452 " to hash table %d!", 7453 ret); 7454 return ret; 7455 } 7456 7457 l2_tn_info->hash_map[ret] = l2_tn_filter; 7458 7459 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7460 7461 return 0; 7462 } 7463 7464 static inline int 7465 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7466 struct ixgbe_l2_tn_key *key) 7467 { 7468 int ret; 7469 struct ixgbe_l2_tn_filter *l2_tn_filter; 7470 7471 ret = rte_hash_del_key(l2_tn_info->hash_handle, key); 7472 7473 if (ret < 0) { 7474 PMD_DRV_LOG(ERR, 7475 "No such L2 tunnel filter to delete %d!", 7476 ret); 7477 return ret; 7478 } 7479 7480 l2_tn_filter = l2_tn_info->hash_map[ret]; 7481 l2_tn_info->hash_map[ret] = NULL; 7482 7483 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7484 rte_free(l2_tn_filter); 7485 7486 return 0; 7487 } 7488 7489 /* Add l2 tunnel filter */ 7490 int 7491 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 7492 struct ixgbe_l2_tunnel_conf *l2_tunnel, 7493 bool restore) 7494 { 7495 int ret; 7496 struct ixgbe_l2_tn_info *l2_tn_info = 7497 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7498 struct ixgbe_l2_tn_key key; 7499 struct ixgbe_l2_tn_filter *node; 7500 7501 if (!restore) { 7502 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7503 key.tn_id = l2_tunnel->tunnel_id; 7504 7505 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); 7506 7507 if (node) { 7508 PMD_DRV_LOG(ERR, 7509 "The L2 tunnel filter already exists!"); 7510 return -EINVAL; 7511 } 7512 7513 node = rte_zmalloc("ixgbe_l2_tn", 7514 sizeof(struct ixgbe_l2_tn_filter), 7515 0); 7516 if (!node) 7517 return -ENOMEM; 7518 7519 rte_memcpy(&node->key, 7520 &key, 7521 sizeof(struct ixgbe_l2_tn_key)); 7522 node->pool = l2_tunnel->pool; 7523 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); 7524 if (ret < 0) { 7525 rte_free(node); 7526 return ret; 7527 } 7528 } 7529 7530 switch (l2_tunnel->l2_tunnel_type) { 7531 case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: 7532 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); 7533 break; 7534 default: 7535 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7536 ret = -EINVAL; 7537 break; 7538 } 7539 7540 if ((!restore) && (ret < 0)) 7541 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7542 7543 return ret; 7544 } 7545 7546 /* Delete l2 tunnel filter */ 7547 int 7548 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 7549 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7550 { 7551 int ret; 7552 struct ixgbe_l2_tn_info *l2_tn_info = 7553 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7554 struct ixgbe_l2_tn_key key; 7555 7556 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7557 key.tn_id = l2_tunnel->tunnel_id; 7558 ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7559 if (ret < 0) 7560 return ret; 7561 7562 switch (l2_tunnel->l2_tunnel_type) { 7563 case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: 7564 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); 7565 break; 7566 default: 7567 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7568 ret = -EINVAL; 7569 break; 7570 } 7571 7572 return ret; 7573 } 7574 7575 static int 7576 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 7577 { 7578 int ret = 0; 7579 uint32_t ctrl; 7580 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7581 7582 if (hw->mac.type != ixgbe_mac_X550 && 7583 hw->mac.type != ixgbe_mac_X550EM_x && 7584 hw->mac.type != ixgbe_mac_X550EM_a) { 7585 return -ENOTSUP; 7586 } 7587 7588 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 7589 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 7590 if (en) 7591 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; 7592 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 7593 7594 return ret; 7595 } 7596 7597 static int 7598 ixgbe_update_vxlan_port(struct ixgbe_hw *hw, 7599 uint16_t port) 7600 { 7601 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); 7602 IXGBE_WRITE_FLUSH(hw); 7603 7604 return 0; 7605 } 7606 7607 /* There's only one register for VxLAN UDP port. 7608 * So, we cannot add several ports. Will update it. 7609 */ 7610 static int 7611 ixgbe_add_vxlan_port(struct ixgbe_hw *hw, 7612 uint16_t port) 7613 { 7614 if (port == 0) { 7615 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 7616 return -EINVAL; 7617 } 7618 7619 return ixgbe_update_vxlan_port(hw, port); 7620 } 7621 7622 /* We cannot delete the VxLAN port. For there's a register for VxLAN 7623 * UDP port, it must have a value. 7624 * So, will reset it to the original value 0. 7625 */ 7626 static int 7627 ixgbe_del_vxlan_port(struct ixgbe_hw *hw, 7628 uint16_t port) 7629 { 7630 uint16_t cur_port; 7631 7632 cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); 7633 7634 if (cur_port != port) { 7635 PMD_DRV_LOG(ERR, "Port %u does not exist.", port); 7636 return -EINVAL; 7637 } 7638 7639 return ixgbe_update_vxlan_port(hw, 0); 7640 } 7641 7642 /* Add UDP tunneling port */ 7643 static int 7644 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 7645 struct rte_eth_udp_tunnel *udp_tunnel) 7646 { 7647 int ret = 0; 7648 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7649 7650 if (hw->mac.type != ixgbe_mac_X550 && 7651 hw->mac.type != ixgbe_mac_X550EM_x && 7652 hw->mac.type != ixgbe_mac_X550EM_a) { 7653 return -ENOTSUP; 7654 } 7655 7656 if (udp_tunnel == NULL) 7657 return -EINVAL; 7658 7659 switch (udp_tunnel->prot_type) { 7660 case RTE_ETH_TUNNEL_TYPE_VXLAN: 7661 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); 7662 break; 7663 7664 case RTE_ETH_TUNNEL_TYPE_GENEVE: 7665 case RTE_ETH_TUNNEL_TYPE_TEREDO: 7666 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7667 ret = -EINVAL; 7668 break; 7669 7670 default: 7671 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7672 ret = -EINVAL; 7673 break; 7674 } 7675 7676 return ret; 7677 } 7678 7679 /* Remove UDP tunneling port */ 7680 static int 7681 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 7682 struct rte_eth_udp_tunnel *udp_tunnel) 7683 { 7684 int ret = 0; 7685 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7686 7687 if (hw->mac.type != ixgbe_mac_X550 && 7688 hw->mac.type != ixgbe_mac_X550EM_x && 7689 hw->mac.type != ixgbe_mac_X550EM_a) { 7690 return -ENOTSUP; 7691 } 7692 7693 if (udp_tunnel == NULL) 7694 return -EINVAL; 7695 7696 switch (udp_tunnel->prot_type) { 7697 case RTE_ETH_TUNNEL_TYPE_VXLAN: 7698 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); 7699 break; 7700 case RTE_ETH_TUNNEL_TYPE_GENEVE: 7701 case RTE_ETH_TUNNEL_TYPE_TEREDO: 7702 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7703 ret = -EINVAL; 7704 break; 7705 default: 7706 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7707 ret = -EINVAL; 7708 break; 7709 } 7710 7711 return ret; 7712 } 7713 7714 static int 7715 ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) 7716 { 7717 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7718 int ret; 7719 7720 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) { 7721 case IXGBE_SUCCESS: 7722 ret = 0; 7723 break; 7724 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7725 ret = -ENOTSUP; 7726 break; 7727 default: 7728 ret = -EAGAIN; 7729 break; 7730 } 7731 7732 return ret; 7733 } 7734 7735 static int 7736 ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) 7737 { 7738 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7739 int ret; 7740 7741 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) { 7742 case IXGBE_SUCCESS: 7743 ret = 0; 7744 break; 7745 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7746 ret = -ENOTSUP; 7747 break; 7748 default: 7749 ret = -EAGAIN; 7750 break; 7751 } 7752 7753 return ret; 7754 } 7755 7756 static int 7757 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 7758 { 7759 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7760 int ret; 7761 int mode = IXGBEVF_XCAST_MODE_ALLMULTI; 7762 7763 switch (hw->mac.ops.update_xcast_mode(hw, mode)) { 7764 case IXGBE_SUCCESS: 7765 ret = 0; 7766 break; 7767 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7768 ret = -ENOTSUP; 7769 break; 7770 default: 7771 ret = -EAGAIN; 7772 break; 7773 } 7774 7775 return ret; 7776 } 7777 7778 static int 7779 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 7780 { 7781 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7782 int ret; 7783 7784 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { 7785 case IXGBE_SUCCESS: 7786 ret = 0; 7787 break; 7788 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7789 ret = -ENOTSUP; 7790 break; 7791 default: 7792 ret = -EAGAIN; 7793 break; 7794 } 7795 7796 return ret; 7797 } 7798 7799 static void ixgbevf_mbx_process(struct rte_eth_dev *dev) 7800 { 7801 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7802 u32 in_msg = 0; 7803 7804 /* peek the message first */ 7805 in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); 7806 7807 /* PF reset VF event */ 7808 if (in_msg == IXGBE_PF_CONTROL_MSG) { 7809 /* dummy mbx read to ack pf */ 7810 if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) 7811 return; 7812 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 7813 NULL); 7814 } 7815 } 7816 7817 static int 7818 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 7819 { 7820 uint32_t eicr; 7821 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7822 struct ixgbe_interrupt *intr = 7823 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7824 ixgbevf_intr_disable(dev); 7825 7826 /* read-on-clear nic registers here */ 7827 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); 7828 intr->flags = 0; 7829 7830 /* only one misc vector supported - mailbox */ 7831 eicr &= IXGBE_VTEICR_MASK; 7832 if (eicr == IXGBE_MISC_VEC_ID) 7833 intr->flags |= IXGBE_FLAG_MAILBOX; 7834 7835 return 0; 7836 } 7837 7838 static int 7839 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) 7840 { 7841 struct ixgbe_interrupt *intr = 7842 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7843 7844 if (intr->flags & IXGBE_FLAG_MAILBOX) { 7845 ixgbevf_mbx_process(dev); 7846 intr->flags &= ~IXGBE_FLAG_MAILBOX; 7847 } 7848 7849 ixgbevf_intr_enable(dev); 7850 7851 return 0; 7852 } 7853 7854 static void 7855 ixgbevf_dev_interrupt_handler(void *param) 7856 { 7857 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 7858 7859 ixgbevf_dev_interrupt_get_status(dev); 7860 ixgbevf_dev_interrupt_action(dev); 7861 } 7862 7863 /** 7864 * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path 7865 * @hw: pointer to hardware structure 7866 * 7867 * Stops the transmit data path and waits for the HW to internally empty 7868 * the Tx security block 7869 **/ 7870 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) 7871 { 7872 #define IXGBE_MAX_SECTX_POLL 40 7873 7874 int i; 7875 int sectxreg; 7876 7877 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 7878 sectxreg |= IXGBE_SECTXCTRL_TX_DIS; 7879 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 7880 for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { 7881 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); 7882 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) 7883 break; 7884 /* Use interrupt-safe sleep just in case */ 7885 usec_delay(1000); 7886 } 7887 7888 /* For informational purposes only */ 7889 if (i >= IXGBE_MAX_SECTX_POLL) 7890 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " 7891 "path fully disabled. Continuing with init."); 7892 7893 return IXGBE_SUCCESS; 7894 } 7895 7896 /** 7897 * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path 7898 * @hw: pointer to hardware structure 7899 * 7900 * Enables the transmit data path. 7901 **/ 7902 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) 7903 { 7904 uint32_t sectxreg; 7905 7906 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 7907 sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; 7908 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 7909 IXGBE_WRITE_FLUSH(hw); 7910 7911 return IXGBE_SUCCESS; 7912 } 7913 7914 /* restore n-tuple filter */ 7915 static inline void 7916 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) 7917 { 7918 struct ixgbe_filter_info *filter_info = 7919 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7920 struct ixgbe_5tuple_filter *node; 7921 7922 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { 7923 ixgbe_inject_5tuple_filter(dev, node); 7924 } 7925 } 7926 7927 /* restore ethernet type filter */ 7928 static inline void 7929 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) 7930 { 7931 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7932 struct ixgbe_filter_info *filter_info = 7933 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7934 int i; 7935 7936 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 7937 if (filter_info->ethertype_mask & (1 << i)) { 7938 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 7939 filter_info->ethertype_filters[i].etqf); 7940 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 7941 filter_info->ethertype_filters[i].etqs); 7942 IXGBE_WRITE_FLUSH(hw); 7943 } 7944 } 7945 } 7946 7947 /* restore SYN filter */ 7948 static inline void 7949 ixgbe_syn_filter_restore(struct rte_eth_dev *dev) 7950 { 7951 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7952 struct ixgbe_filter_info *filter_info = 7953 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7954 uint32_t synqf; 7955 7956 synqf = filter_info->syn_info; 7957 7958 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 7959 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 7960 IXGBE_WRITE_FLUSH(hw); 7961 } 7962 } 7963 7964 /* restore L2 tunnel filter */ 7965 static inline void 7966 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) 7967 { 7968 struct ixgbe_l2_tn_info *l2_tn_info = 7969 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7970 struct ixgbe_l2_tn_filter *node; 7971 struct ixgbe_l2_tunnel_conf l2_tn_conf; 7972 7973 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { 7974 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; 7975 l2_tn_conf.tunnel_id = node->key.tn_id; 7976 l2_tn_conf.pool = node->pool; 7977 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); 7978 } 7979 } 7980 7981 /* restore rss filter */ 7982 static inline void 7983 ixgbe_rss_filter_restore(struct rte_eth_dev *dev) 7984 { 7985 struct ixgbe_filter_info *filter_info = 7986 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 7987 7988 if (filter_info->rss_info.conf.queue_num) 7989 ixgbe_config_rss_filter(dev, 7990 &filter_info->rss_info, TRUE); 7991 } 7992 7993 static int 7994 ixgbe_filter_restore(struct rte_eth_dev *dev) 7995 { 7996 ixgbe_ntuple_filter_restore(dev); 7997 ixgbe_ethertype_filter_restore(dev); 7998 ixgbe_syn_filter_restore(dev); 7999 ixgbe_fdir_filter_restore(dev); 8000 ixgbe_l2_tn_filter_restore(dev); 8001 ixgbe_rss_filter_restore(dev); 8002 8003 return 0; 8004 } 8005 8006 static void 8007 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) 8008 { 8009 struct ixgbe_l2_tn_info *l2_tn_info = 8010 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8011 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8012 8013 if (l2_tn_info->e_tag_en) 8014 (void)ixgbe_e_tag_enable(hw); 8015 8016 if (l2_tn_info->e_tag_fwd_en) 8017 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); 8018 8019 (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); 8020 } 8021 8022 /* remove all the n-tuple filters */ 8023 void 8024 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) 8025 { 8026 struct ixgbe_filter_info *filter_info = 8027 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8028 struct ixgbe_5tuple_filter *p_5tuple; 8029 8030 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) 8031 ixgbe_remove_5tuple_filter(dev, p_5tuple); 8032 } 8033 8034 /* remove all the ether type filters */ 8035 void 8036 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) 8037 { 8038 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8039 struct ixgbe_filter_info *filter_info = 8040 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8041 int i; 8042 8043 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8044 if (filter_info->ethertype_mask & (1 << i) && 8045 !filter_info->ethertype_filters[i].conf) { 8046 (void)ixgbe_ethertype_filter_remove(filter_info, 8047 (uint8_t)i); 8048 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); 8049 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); 8050 IXGBE_WRITE_FLUSH(hw); 8051 } 8052 } 8053 } 8054 8055 /* remove the SYN filter */ 8056 void 8057 ixgbe_clear_syn_filter(struct rte_eth_dev *dev) 8058 { 8059 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8060 struct ixgbe_filter_info *filter_info = 8061 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8062 8063 if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { 8064 filter_info->syn_info = 0; 8065 8066 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); 8067 IXGBE_WRITE_FLUSH(hw); 8068 } 8069 } 8070 8071 /* remove all the L2 tunnel filters */ 8072 int 8073 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) 8074 { 8075 struct ixgbe_l2_tn_info *l2_tn_info = 8076 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8077 struct ixgbe_l2_tn_filter *l2_tn_filter; 8078 struct ixgbe_l2_tunnel_conf l2_tn_conf; 8079 int ret = 0; 8080 8081 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 8082 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; 8083 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; 8084 l2_tn_conf.pool = l2_tn_filter->pool; 8085 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); 8086 if (ret < 0) 8087 return ret; 8088 } 8089 8090 return 0; 8091 } 8092 8093 void 8094 ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev, 8095 struct ixgbe_macsec_setting *macsec_setting) 8096 { 8097 struct ixgbe_macsec_setting *macsec = 8098 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8099 8100 macsec->offload_en = macsec_setting->offload_en; 8101 macsec->encrypt_en = macsec_setting->encrypt_en; 8102 macsec->replayprotect_en = macsec_setting->replayprotect_en; 8103 } 8104 8105 void 8106 ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev) 8107 { 8108 struct ixgbe_macsec_setting *macsec = 8109 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8110 8111 macsec->offload_en = 0; 8112 macsec->encrypt_en = 0; 8113 macsec->replayprotect_en = 0; 8114 } 8115 8116 void 8117 ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, 8118 struct ixgbe_macsec_setting *macsec_setting) 8119 { 8120 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8121 uint32_t ctrl; 8122 uint8_t en = macsec_setting->encrypt_en; 8123 uint8_t rp = macsec_setting->replayprotect_en; 8124 8125 /** 8126 * Workaround: 8127 * As no ixgbe_disable_sec_rx_path equivalent is 8128 * implemented for tx in the base code, and we are 8129 * not allowed to modify the base code in DPDK, so 8130 * just call the hand-written one directly for now. 8131 * The hardware support has been checked by 8132 * ixgbe_disable_sec_rx_path(). 8133 */ 8134 ixgbe_disable_sec_tx_path_generic(hw); 8135 8136 /* Enable Ethernet CRC (required by MACsec offload) */ 8137 ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 8138 ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; 8139 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl); 8140 8141 /* Enable the TX and RX crypto engines */ 8142 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8143 ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS; 8144 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8145 8146 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8147 ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS; 8148 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8149 8150 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 8151 ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK; 8152 ctrl |= 0x3; 8153 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl); 8154 8155 /* Enable SA lookup */ 8156 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8157 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8158 ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT : 8159 IXGBE_LSECTXCTRL_AUTH; 8160 ctrl |= IXGBE_LSECTXCTRL_AISCI; 8161 ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8162 ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8163 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8164 8165 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8166 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8167 ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT; 8168 ctrl &= ~IXGBE_LSECRXCTRL_PLSH; 8169 if (rp) 8170 ctrl |= IXGBE_LSECRXCTRL_RP; 8171 else 8172 ctrl &= ~IXGBE_LSECRXCTRL_RP; 8173 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8174 8175 /* Start the data paths */ 8176 ixgbe_enable_sec_rx_path(hw); 8177 /** 8178 * Workaround: 8179 * As no ixgbe_enable_sec_rx_path equivalent is 8180 * implemented for tx in the base code, and we are 8181 * not allowed to modify the base code in DPDK, so 8182 * just call the hand-written one directly for now. 8183 */ 8184 ixgbe_enable_sec_tx_path_generic(hw); 8185 } 8186 8187 void 8188 ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) 8189 { 8190 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8191 uint32_t ctrl; 8192 8193 /** 8194 * Workaround: 8195 * As no ixgbe_disable_sec_rx_path equivalent is 8196 * implemented for tx in the base code, and we are 8197 * not allowed to modify the base code in DPDK, so 8198 * just call the hand-written one directly for now. 8199 * The hardware support has been checked by 8200 * ixgbe_disable_sec_rx_path(). 8201 */ 8202 ixgbe_disable_sec_tx_path_generic(hw); 8203 8204 /* Disable the TX and RX crypto engines */ 8205 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8206 ctrl |= IXGBE_SECTXCTRL_SECTX_DIS; 8207 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8208 8209 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8210 ctrl |= IXGBE_SECRXCTRL_SECRX_DIS; 8211 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8212 8213 /* Disable SA lookup */ 8214 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8215 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8216 ctrl |= IXGBE_LSECTXCTRL_DISABLE; 8217 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8218 8219 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8220 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8221 ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT; 8222 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8223 8224 /* Start the data paths */ 8225 ixgbe_enable_sec_rx_path(hw); 8226 /** 8227 * Workaround: 8228 * As no ixgbe_enable_sec_rx_path equivalent is 8229 * implemented for tx in the base code, and we are 8230 * not allowed to modify the base code in DPDK, so 8231 * just call the hand-written one directly for now. 8232 */ 8233 ixgbe_enable_sec_tx_path_generic(hw); 8234 } 8235 8236 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); 8237 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); 8238 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 8239 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); 8240 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); 8241 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); 8242 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, 8243 IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); 8244 8245 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_init, init, NOTICE); 8246 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_driver, driver, NOTICE); 8247 8248 #ifdef RTE_ETHDEV_DEBUG_RX 8249 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_rx, rx, DEBUG); 8250 #endif 8251 #ifdef RTE_ETHDEV_DEBUG_TX 8252 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_tx, tx, DEBUG); 8253 #endif 8254