1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 #include <inttypes.h> 13 #include <netinet/in.h> 14 #include <rte_string_fns.h> 15 #include <rte_byteorder.h> 16 #include <rte_common.h> 17 #include <rte_cycles.h> 18 19 #include <rte_interrupts.h> 20 #include <rte_log.h> 21 #include <rte_debug.h> 22 #include <rte_pci.h> 23 #include <rte_bus_pci.h> 24 #include <rte_branch_prediction.h> 25 #include <rte_memory.h> 26 #include <rte_kvargs.h> 27 #include <rte_eal.h> 28 #include <rte_alarm.h> 29 #include <rte_ether.h> 30 #include <ethdev_driver.h> 31 #include <ethdev_pci.h> 32 #include <rte_malloc.h> 33 #include <rte_random.h> 34 #include <rte_dev.h> 35 #include <rte_hash_crc.h> 36 #ifdef RTE_LIB_SECURITY 37 #include <rte_security_driver.h> 38 #endif 39 40 #include "ixgbe_logs.h" 41 #include "base/ixgbe_api.h" 42 #include "base/ixgbe_vf.h" 43 #include "base/ixgbe_common.h" 44 #include "ixgbe_ethdev.h" 45 #include "ixgbe_bypass.h" 46 #include "ixgbe_rxtx.h" 47 #include "base/ixgbe_type.h" 48 #include "base/ixgbe_phy.h" 49 #include "ixgbe_regs.h" 50 51 /* 52 * High threshold controlling when to start sending XOFF frames. Must be at 53 * least 8 bytes less than receive packet buffer size. This value is in units 54 * of 1024 bytes. 55 */ 56 #define IXGBE_FC_HI 0x80 57 58 /* 59 * Low threshold controlling when to start sending XON frames. This value is 60 * in units of 1024 bytes. 61 */ 62 #define IXGBE_FC_LO 0x40 63 64 /* Timer value included in XOFF frames. */ 65 #define IXGBE_FC_PAUSE 0x680 66 67 /*Default value of Max Rx Queue*/ 68 #define IXGBE_MAX_RX_QUEUE_NUM 128 69 70 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 71 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 72 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ 73 74 #define IXGBE_MMW_SIZE_DEFAULT 0x4 75 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 76 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ 77 78 /* 79 * Default values for RX/TX configuration 80 */ 81 #define IXGBE_DEFAULT_RX_FREE_THRESH 32 82 #define IXGBE_DEFAULT_RX_PTHRESH 8 83 #define IXGBE_DEFAULT_RX_HTHRESH 8 84 #define IXGBE_DEFAULT_RX_WTHRESH 0 85 86 #define IXGBE_DEFAULT_TX_FREE_THRESH 32 87 #define IXGBE_DEFAULT_TX_PTHRESH 32 88 #define IXGBE_DEFAULT_TX_HTHRESH 0 89 #define IXGBE_DEFAULT_TX_WTHRESH 0 90 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 91 92 /* Bit shift and mask */ 93 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) 94 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) 95 #define IXGBE_8_BIT_WIDTH CHAR_BIT 96 #define IXGBE_8_BIT_MASK UINT8_MAX 97 98 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ 99 100 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) 101 102 /* Additional timesync values. */ 103 #define NSEC_PER_SEC 1000000000L 104 #define IXGBE_INCVAL_10GB 0x66666666 105 #define IXGBE_INCVAL_1GB 0x40000000 106 #define IXGBE_INCVAL_100 0x50000000 107 #define IXGBE_INCVAL_SHIFT_10GB 28 108 #define IXGBE_INCVAL_SHIFT_1GB 24 109 #define IXGBE_INCVAL_SHIFT_100 21 110 #define IXGBE_INCVAL_SHIFT_82599 7 111 #define IXGBE_INCPER_SHIFT_82599 24 112 113 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 114 115 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 116 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 117 #define IXGBE_ETAG_ETYPE 0x00005084 118 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff 119 #define IXGBE_ETAG_ETYPE_VALID 0x80000000 120 #define IXGBE_RAH_ADTYPE 0x40000000 121 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff 122 #define IXGBE_VMVIR_TAGA_MASK 0x18000000 123 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 124 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ 125 #define IXGBE_QDE_STRIP_TAG 0x00000004 126 #define IXGBE_VTEICR_MASK 0x07 127 128 #define IXGBE_EXVET_VET_EXT_SHIFT 16 129 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 130 131 #define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" 132 133 static const char * const ixgbevf_valid_arguments[] = { 134 IXGBEVF_DEVARG_PFLINK_FULLCHK, 135 NULL 136 }; 137 138 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); 139 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); 140 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); 141 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); 142 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); 143 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); 144 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); 145 static int ixgbe_dev_configure(struct rte_eth_dev *dev); 146 static int ixgbe_dev_start(struct rte_eth_dev *dev); 147 static int ixgbe_dev_stop(struct rte_eth_dev *dev); 148 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); 149 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); 150 static int ixgbe_dev_close(struct rte_eth_dev *dev); 151 static int ixgbe_dev_reset(struct rte_eth_dev *dev); 152 static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 153 static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 154 static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 155 static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 156 static int ixgbe_dev_link_update(struct rte_eth_dev *dev, 157 int wait_to_complete); 158 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, 159 struct rte_eth_stats *stats); 160 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, 161 struct rte_eth_xstat *xstats, unsigned n); 162 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, 163 struct rte_eth_xstat *xstats, unsigned n); 164 static int 165 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 166 uint64_t *values, unsigned int n); 167 static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev); 168 static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); 169 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 170 struct rte_eth_xstat_name *xstats_names, 171 unsigned int size); 172 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, 173 struct rte_eth_xstat_name *xstats_names, unsigned limit); 174 static int ixgbe_dev_xstats_get_names_by_id( 175 struct rte_eth_dev *dev, 176 struct rte_eth_xstat_name *xstats_names, 177 const uint64_t *ids, 178 unsigned int limit); 179 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 180 uint16_t queue_id, 181 uint8_t stat_idx, 182 uint8_t is_rx); 183 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 184 size_t fw_size); 185 static int ixgbe_dev_info_get(struct rte_eth_dev *dev, 186 struct rte_eth_dev_info *dev_info); 187 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 188 static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, 189 struct rte_eth_dev_info *dev_info); 190 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 191 192 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, 193 uint16_t vlan_id, int on); 194 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 195 enum rte_vlan_type vlan_type, 196 uint16_t tpid_id); 197 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 198 uint16_t queue, bool on); 199 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, 200 int on); 201 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, 202 int mask); 203 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); 204 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 205 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 206 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); 207 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); 208 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); 209 210 static int ixgbe_dev_led_on(struct rte_eth_dev *dev); 211 static int ixgbe_dev_led_off(struct rte_eth_dev *dev); 212 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, 213 struct rte_eth_fc_conf *fc_conf); 214 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, 215 struct rte_eth_fc_conf *fc_conf); 216 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 217 struct rte_eth_pfc_conf *pfc_conf); 218 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 219 struct rte_eth_rss_reta_entry64 *reta_conf, 220 uint16_t reta_size); 221 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 222 struct rte_eth_rss_reta_entry64 *reta_conf, 223 uint16_t reta_size); 224 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); 225 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 226 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 227 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 228 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); 229 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); 230 static void ixgbe_dev_interrupt_handler(void *param); 231 static void ixgbe_dev_interrupt_delayed_handler(void *param); 232 static void *ixgbe_dev_setup_link_thread_handler(void *param); 233 static int ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, 234 uint32_t timeout_ms); 235 236 static int ixgbe_add_rar(struct rte_eth_dev *dev, 237 struct rte_ether_addr *mac_addr, 238 uint32_t index, uint32_t pool); 239 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); 240 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, 241 struct rte_ether_addr *mac_addr); 242 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); 243 static bool is_device_supported(struct rte_eth_dev *dev, 244 struct rte_pci_driver *drv); 245 246 /* For Virtual Function support */ 247 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); 248 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); 249 static int ixgbevf_dev_configure(struct rte_eth_dev *dev); 250 static int ixgbevf_dev_start(struct rte_eth_dev *dev); 251 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, 252 int wait_to_complete); 253 static int ixgbevf_dev_stop(struct rte_eth_dev *dev); 254 static int ixgbevf_dev_close(struct rte_eth_dev *dev); 255 static int ixgbevf_dev_reset(struct rte_eth_dev *dev); 256 static void ixgbevf_intr_disable(struct rte_eth_dev *dev); 257 static void ixgbevf_intr_enable(struct rte_eth_dev *dev); 258 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, 259 struct rte_eth_stats *stats); 260 static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); 261 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 262 uint16_t vlan_id, int on); 263 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, 264 uint16_t queue, int on); 265 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 266 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 267 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 268 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 269 uint16_t queue_id); 270 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 271 uint16_t queue_id); 272 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 273 uint8_t queue, uint8_t msix_vector); 274 static void ixgbevf_configure_msix(struct rte_eth_dev *dev); 275 static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); 276 static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); 277 static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); 278 static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); 279 280 /* For Eth VMDQ APIs support */ 281 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct 282 rte_ether_addr * mac_addr, uint8_t on); 283 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); 284 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 285 struct rte_eth_mirror_conf *mirror_conf, 286 uint8_t rule_id, uint8_t on); 287 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, 288 uint8_t rule_id); 289 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 290 uint16_t queue_id); 291 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 292 uint16_t queue_id); 293 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 294 uint8_t queue, uint8_t msix_vector); 295 static void ixgbe_configure_msix(struct rte_eth_dev *dev); 296 297 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, 298 struct rte_ether_addr *mac_addr, 299 uint32_t index, uint32_t pool); 300 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 301 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 302 struct rte_ether_addr *mac_addr); 303 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 304 struct ixgbe_5tuple_filter *filter); 305 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 306 struct ixgbe_5tuple_filter *filter); 307 static int ixgbe_dev_flow_ops_get(struct rte_eth_dev *dev, 308 const struct rte_flow_ops **ops); 309 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 310 311 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 312 struct rte_ether_addr *mc_addr_set, 313 uint32_t nb_mc_addr); 314 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 315 struct rte_eth_dcb_info *dcb_info); 316 317 static int ixgbe_get_reg_length(struct rte_eth_dev *dev); 318 static int ixgbe_get_regs(struct rte_eth_dev *dev, 319 struct rte_dev_reg_info *regs); 320 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); 321 static int ixgbe_get_eeprom(struct rte_eth_dev *dev, 322 struct rte_dev_eeprom_info *eeprom); 323 static int ixgbe_set_eeprom(struct rte_eth_dev *dev, 324 struct rte_dev_eeprom_info *eeprom); 325 326 static int ixgbe_get_module_info(struct rte_eth_dev *dev, 327 struct rte_eth_dev_module_info *modinfo); 328 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 329 struct rte_dev_eeprom_info *info); 330 331 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); 332 static int ixgbevf_get_regs(struct rte_eth_dev *dev, 333 struct rte_dev_reg_info *regs); 334 335 static int ixgbe_timesync_enable(struct rte_eth_dev *dev); 336 static int ixgbe_timesync_disable(struct rte_eth_dev *dev); 337 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 338 struct timespec *timestamp, 339 uint32_t flags); 340 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 341 struct timespec *timestamp); 342 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 343 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, 344 struct timespec *timestamp); 345 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, 346 const struct timespec *timestamp); 347 static void ixgbevf_dev_interrupt_handler(void *param); 348 349 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 350 struct rte_eth_udp_tunnel *udp_tunnel); 351 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 352 struct rte_eth_udp_tunnel *udp_tunnel); 353 static int ixgbe_filter_restore(struct rte_eth_dev *dev); 354 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); 355 static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw); 356 357 /* 358 * Define VF Stats MACRO for Non "cleared on read" register 359 */ 360 #define UPDATE_VF_STAT(reg, last, cur) \ 361 { \ 362 uint32_t latest = IXGBE_READ_REG(hw, reg); \ 363 cur += (latest - last) & UINT_MAX; \ 364 last = latest; \ 365 } 366 367 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ 368 { \ 369 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ 370 u64 new_msb = IXGBE_READ_REG(hw, msb); \ 371 u64 latest = ((new_msb << 32) | new_lsb); \ 372 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ 373 last = latest; \ 374 } 375 376 #define IXGBE_SET_HWSTRIP(h, q) do {\ 377 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 378 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 379 (h)->bitmap[idx] |= 1 << bit;\ 380 } while (0) 381 382 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\ 383 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 384 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 385 (h)->bitmap[idx] &= ~(1 << bit);\ 386 } while (0) 387 388 #define IXGBE_GET_HWSTRIP(h, q, r) do {\ 389 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 390 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 391 (r) = (h)->bitmap[idx] >> bit & 1;\ 392 } while (0) 393 394 /* 395 * The set of PCI devices this driver supports 396 */ 397 static const struct rte_pci_id pci_id_ixgbe_map[] = { 398 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, 399 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, 400 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, 401 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, 402 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, 403 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, 404 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, 405 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, 406 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, 407 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, 408 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, 409 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, 410 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, 411 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, 412 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, 413 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, 414 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, 415 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, 416 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, 417 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, 418 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, 419 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, 420 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, 421 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, 422 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, 423 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, 424 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, 425 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, 426 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, 427 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, 428 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, 429 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, 430 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, 431 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, 432 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, 433 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, 434 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, 435 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, 436 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, 437 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, 438 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, 439 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, 440 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, 441 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, 442 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, 443 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, 444 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, 445 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) }, 446 #ifdef RTE_LIBRTE_IXGBE_BYPASS 447 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, 448 #endif 449 { .vendor_id = 0, /* sentinel */ }, 450 }; 451 452 /* 453 * The set of PCI devices this driver supports (for 82599 VF) 454 */ 455 static const struct rte_pci_id pci_id_ixgbevf_map[] = { 456 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, 457 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, 458 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, 459 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, 460 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, 461 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, 462 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, 463 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, 464 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, 465 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, 466 { .vendor_id = 0, /* sentinel */ }, 467 }; 468 469 static const struct rte_eth_desc_lim rx_desc_lim = { 470 .nb_max = IXGBE_MAX_RING_DESC, 471 .nb_min = IXGBE_MIN_RING_DESC, 472 .nb_align = IXGBE_RXD_ALIGN, 473 }; 474 475 static const struct rte_eth_desc_lim tx_desc_lim = { 476 .nb_max = IXGBE_MAX_RING_DESC, 477 .nb_min = IXGBE_MIN_RING_DESC, 478 .nb_align = IXGBE_TXD_ALIGN, 479 .nb_seg_max = IXGBE_TX_MAX_SEG, 480 .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, 481 }; 482 483 static const struct eth_dev_ops ixgbe_eth_dev_ops = { 484 .dev_configure = ixgbe_dev_configure, 485 .dev_start = ixgbe_dev_start, 486 .dev_stop = ixgbe_dev_stop, 487 .dev_set_link_up = ixgbe_dev_set_link_up, 488 .dev_set_link_down = ixgbe_dev_set_link_down, 489 .dev_close = ixgbe_dev_close, 490 .dev_reset = ixgbe_dev_reset, 491 .promiscuous_enable = ixgbe_dev_promiscuous_enable, 492 .promiscuous_disable = ixgbe_dev_promiscuous_disable, 493 .allmulticast_enable = ixgbe_dev_allmulticast_enable, 494 .allmulticast_disable = ixgbe_dev_allmulticast_disable, 495 .link_update = ixgbe_dev_link_update, 496 .stats_get = ixgbe_dev_stats_get, 497 .xstats_get = ixgbe_dev_xstats_get, 498 .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, 499 .stats_reset = ixgbe_dev_stats_reset, 500 .xstats_reset = ixgbe_dev_xstats_reset, 501 .xstats_get_names = ixgbe_dev_xstats_get_names, 502 .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, 503 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, 504 .fw_version_get = ixgbe_fw_version_get, 505 .dev_infos_get = ixgbe_dev_info_get, 506 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 507 .mtu_set = ixgbe_dev_mtu_set, 508 .vlan_filter_set = ixgbe_vlan_filter_set, 509 .vlan_tpid_set = ixgbe_vlan_tpid_set, 510 .vlan_offload_set = ixgbe_vlan_offload_set, 511 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, 512 .rx_queue_start = ixgbe_dev_rx_queue_start, 513 .rx_queue_stop = ixgbe_dev_rx_queue_stop, 514 .tx_queue_start = ixgbe_dev_tx_queue_start, 515 .tx_queue_stop = ixgbe_dev_tx_queue_stop, 516 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 517 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, 518 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, 519 .rx_queue_release = ixgbe_dev_rx_queue_release, 520 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 521 .tx_queue_release = ixgbe_dev_tx_queue_release, 522 .dev_led_on = ixgbe_dev_led_on, 523 .dev_led_off = ixgbe_dev_led_off, 524 .flow_ctrl_get = ixgbe_flow_ctrl_get, 525 .flow_ctrl_set = ixgbe_flow_ctrl_set, 526 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, 527 .mac_addr_add = ixgbe_add_rar, 528 .mac_addr_remove = ixgbe_remove_rar, 529 .mac_addr_set = ixgbe_set_default_mac_addr, 530 .uc_hash_table_set = ixgbe_uc_hash_table_set, 531 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, 532 .mirror_rule_set = ixgbe_mirror_rule_set, 533 .mirror_rule_reset = ixgbe_mirror_rule_reset, 534 .set_queue_rate_limit = ixgbe_set_queue_rate_limit, 535 .reta_update = ixgbe_dev_rss_reta_update, 536 .reta_query = ixgbe_dev_rss_reta_query, 537 .rss_hash_update = ixgbe_dev_rss_hash_update, 538 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 539 .flow_ops_get = ixgbe_dev_flow_ops_get, 540 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 541 .rxq_info_get = ixgbe_rxq_info_get, 542 .txq_info_get = ixgbe_txq_info_get, 543 .timesync_enable = ixgbe_timesync_enable, 544 .timesync_disable = ixgbe_timesync_disable, 545 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, 546 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, 547 .get_reg = ixgbe_get_regs, 548 .get_eeprom_length = ixgbe_get_eeprom_length, 549 .get_eeprom = ixgbe_get_eeprom, 550 .set_eeprom = ixgbe_set_eeprom, 551 .get_module_info = ixgbe_get_module_info, 552 .get_module_eeprom = ixgbe_get_module_eeprom, 553 .get_dcb_info = ixgbe_dev_get_dcb_info, 554 .timesync_adjust_time = ixgbe_timesync_adjust_time, 555 .timesync_read_time = ixgbe_timesync_read_time, 556 .timesync_write_time = ixgbe_timesync_write_time, 557 .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, 558 .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, 559 .tm_ops_get = ixgbe_tm_ops_get, 560 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 561 .get_monitor_addr = ixgbe_get_monitor_addr, 562 }; 563 564 /* 565 * dev_ops for virtual function, bare necessities for basic vf 566 * operation have been implemented 567 */ 568 static const struct eth_dev_ops ixgbevf_eth_dev_ops = { 569 .dev_configure = ixgbevf_dev_configure, 570 .dev_start = ixgbevf_dev_start, 571 .dev_stop = ixgbevf_dev_stop, 572 .link_update = ixgbevf_dev_link_update, 573 .stats_get = ixgbevf_dev_stats_get, 574 .xstats_get = ixgbevf_dev_xstats_get, 575 .stats_reset = ixgbevf_dev_stats_reset, 576 .xstats_reset = ixgbevf_dev_stats_reset, 577 .xstats_get_names = ixgbevf_dev_xstats_get_names, 578 .dev_close = ixgbevf_dev_close, 579 .dev_reset = ixgbevf_dev_reset, 580 .promiscuous_enable = ixgbevf_dev_promiscuous_enable, 581 .promiscuous_disable = ixgbevf_dev_promiscuous_disable, 582 .allmulticast_enable = ixgbevf_dev_allmulticast_enable, 583 .allmulticast_disable = ixgbevf_dev_allmulticast_disable, 584 .dev_infos_get = ixgbevf_dev_info_get, 585 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 586 .mtu_set = ixgbevf_dev_set_mtu, 587 .vlan_filter_set = ixgbevf_vlan_filter_set, 588 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, 589 .vlan_offload_set = ixgbevf_vlan_offload_set, 590 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 591 .rx_queue_release = ixgbe_dev_rx_queue_release, 592 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 593 .tx_queue_release = ixgbe_dev_tx_queue_release, 594 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, 595 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, 596 .mac_addr_add = ixgbevf_add_mac_addr, 597 .mac_addr_remove = ixgbevf_remove_mac_addr, 598 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 599 .rxq_info_get = ixgbe_rxq_info_get, 600 .txq_info_get = ixgbe_txq_info_get, 601 .mac_addr_set = ixgbevf_set_default_mac_addr, 602 .get_reg = ixgbevf_get_regs, 603 .reta_update = ixgbe_dev_rss_reta_update, 604 .reta_query = ixgbe_dev_rss_reta_query, 605 .rss_hash_update = ixgbe_dev_rss_hash_update, 606 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 607 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 608 .get_monitor_addr = ixgbe_get_monitor_addr, 609 }; 610 611 /* store statistics names and its offset in stats structure */ 612 struct rte_ixgbe_xstats_name_off { 613 char name[RTE_ETH_XSTATS_NAME_SIZE]; 614 unsigned offset; 615 }; 616 617 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { 618 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, 619 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, 620 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, 621 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, 622 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, 623 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, 624 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, 625 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, 626 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, 627 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, 628 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, 629 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, 630 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, 631 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, 632 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 633 prc1023)}, 634 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 635 prc1522)}, 636 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, 637 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, 638 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, 639 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, 640 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, 641 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, 642 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, 643 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, 644 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, 645 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, 646 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, 647 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, 648 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, 649 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, 650 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, 651 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, 652 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 653 ptc1023)}, 654 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 655 ptc1522)}, 656 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, 657 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, 658 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, 659 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, 660 661 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, 662 fdirustat_add)}, 663 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, 664 fdirustat_remove)}, 665 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, 666 fdirfstat_fadd)}, 667 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, 668 fdirfstat_fremove)}, 669 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, 670 fdirmatch)}, 671 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, 672 fdirmiss)}, 673 674 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, 675 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, 676 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, 677 fclast)}, 678 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, 679 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, 680 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, 681 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, 682 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, 683 fcoe_noddp)}, 684 {"rx_fcoe_no_direct_data_placement_ext_buff", 685 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, 686 687 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 688 lxontxc)}, 689 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 690 lxonrxc)}, 691 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 692 lxofftxc)}, 693 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 694 lxoffrxc)}, 695 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, 696 }; 697 698 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ 699 sizeof(rte_ixgbe_stats_strings[0])) 700 701 /* MACsec statistics */ 702 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { 703 {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 704 out_pkts_untagged)}, 705 {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, 706 out_pkts_encrypted)}, 707 {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, 708 out_pkts_protected)}, 709 {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, 710 out_octets_encrypted)}, 711 {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, 712 out_octets_protected)}, 713 {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 714 in_pkts_untagged)}, 715 {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, 716 in_pkts_badtag)}, 717 {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, 718 in_pkts_nosci)}, 719 {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, 720 in_pkts_unknownsci)}, 721 {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, 722 in_octets_decrypted)}, 723 {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, 724 in_octets_validated)}, 725 {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, 726 in_pkts_unchecked)}, 727 {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, 728 in_pkts_delayed)}, 729 {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, 730 in_pkts_late)}, 731 {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, 732 in_pkts_ok)}, 733 {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, 734 in_pkts_invalid)}, 735 {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, 736 in_pkts_notvalid)}, 737 {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, 738 in_pkts_unusedsa)}, 739 {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, 740 in_pkts_notusingsa)}, 741 }; 742 743 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ 744 sizeof(rte_ixgbe_macsec_strings[0])) 745 746 /* Per-queue statistics */ 747 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { 748 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, 749 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, 750 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, 751 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, 752 }; 753 754 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ 755 sizeof(rte_ixgbe_rxq_strings[0])) 756 #define IXGBE_NB_RXQ_PRIO_VALUES 8 757 758 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { 759 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, 760 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, 761 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, 762 pxon2offc)}, 763 }; 764 765 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ 766 sizeof(rte_ixgbe_txq_strings[0])) 767 #define IXGBE_NB_TXQ_PRIO_VALUES 8 768 769 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { 770 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, 771 }; 772 773 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ 774 sizeof(rte_ixgbevf_stats_strings[0])) 775 776 /* 777 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. 778 */ 779 static inline int 780 ixgbe_is_sfp(struct ixgbe_hw *hw) 781 { 782 switch (hw->phy.type) { 783 case ixgbe_phy_sfp_avago: 784 case ixgbe_phy_sfp_ftl: 785 case ixgbe_phy_sfp_intel: 786 case ixgbe_phy_sfp_unknown: 787 case ixgbe_phy_sfp_passive_tyco: 788 case ixgbe_phy_sfp_passive_unknown: 789 return 1; 790 default: 791 return 0; 792 } 793 } 794 795 static inline int32_t 796 ixgbe_pf_reset_hw(struct ixgbe_hw *hw) 797 { 798 uint32_t ctrl_ext; 799 int32_t status; 800 801 status = ixgbe_reset_hw(hw); 802 803 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 804 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 805 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 806 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 807 IXGBE_WRITE_FLUSH(hw); 808 809 if (status == IXGBE_ERR_SFP_NOT_PRESENT) 810 status = IXGBE_SUCCESS; 811 return status; 812 } 813 814 static inline void 815 ixgbe_enable_intr(struct rte_eth_dev *dev) 816 { 817 struct ixgbe_interrupt *intr = 818 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 819 struct ixgbe_hw *hw = 820 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 821 822 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); 823 IXGBE_WRITE_FLUSH(hw); 824 } 825 826 /* 827 * This function is based on ixgbe_disable_intr() in base/ixgbe.h. 828 */ 829 static void 830 ixgbe_disable_intr(struct ixgbe_hw *hw) 831 { 832 PMD_INIT_FUNC_TRACE(); 833 834 if (hw->mac.type == ixgbe_mac_82598EB) { 835 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); 836 } else { 837 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); 838 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); 839 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); 840 } 841 IXGBE_WRITE_FLUSH(hw); 842 } 843 844 /* 845 * This function resets queue statistics mapping registers. 846 * From Niantic datasheet, Initialization of Statistics section: 847 * "...if software requires the queue counters, the RQSMR and TQSM registers 848 * must be re-programmed following a device reset. 849 */ 850 static void 851 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) 852 { 853 uint32_t i; 854 855 for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { 856 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); 857 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); 858 } 859 } 860 861 862 static int 863 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 864 uint16_t queue_id, 865 uint8_t stat_idx, 866 uint8_t is_rx) 867 { 868 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 869 #define NB_QMAP_FIELDS_PER_QSM_REG 4 870 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f 871 872 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 873 struct ixgbe_stat_mapping_registers *stat_mappings = 874 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); 875 uint32_t qsmr_mask = 0; 876 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 877 uint32_t q_map; 878 uint8_t n, offset; 879 880 if ((hw->mac.type != ixgbe_mac_82599EB) && 881 (hw->mac.type != ixgbe_mac_X540) && 882 (hw->mac.type != ixgbe_mac_X550) && 883 (hw->mac.type != ixgbe_mac_X550EM_x) && 884 (hw->mac.type != ixgbe_mac_X550EM_a)) 885 return -ENOSYS; 886 887 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 888 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 889 queue_id, stat_idx); 890 891 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 892 if (n >= IXGBE_NB_STAT_MAPPING_REGS) { 893 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 894 return -EIO; 895 } 896 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 897 898 /* Now clear any previous stat_idx set */ 899 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 900 if (!is_rx) 901 stat_mappings->tqsm[n] &= ~clearing_mask; 902 else 903 stat_mappings->rqsmr[n] &= ~clearing_mask; 904 905 q_map = (uint32_t)stat_idx; 906 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 907 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 908 if (!is_rx) 909 stat_mappings->tqsm[n] |= qsmr_mask; 910 else 911 stat_mappings->rqsmr[n] |= qsmr_mask; 912 913 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 914 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 915 queue_id, stat_idx); 916 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 917 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); 918 919 /* Now write the mapping in the appropriate register */ 920 if (is_rx) { 921 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", 922 stat_mappings->rqsmr[n], n); 923 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); 924 } else { 925 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", 926 stat_mappings->tqsm[n], n); 927 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); 928 } 929 return 0; 930 } 931 932 static void 933 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) 934 { 935 struct ixgbe_stat_mapping_registers *stat_mappings = 936 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); 937 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 938 int i; 939 940 /* write whatever was in stat mapping table to the NIC */ 941 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { 942 /* rx */ 943 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); 944 945 /* tx */ 946 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); 947 } 948 } 949 950 static void 951 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) 952 { 953 uint8_t i; 954 struct ixgbe_dcb_tc_config *tc; 955 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; 956 957 dcb_config->num_tcs.pg_tcs = dcb_max_tc; 958 dcb_config->num_tcs.pfc_tcs = dcb_max_tc; 959 for (i = 0; i < dcb_max_tc; i++) { 960 tc = &dcb_config->tc_config[i]; 961 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; 962 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 963 (uint8_t)(100/dcb_max_tc + (i & 1)); 964 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; 965 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 966 (uint8_t)(100/dcb_max_tc + (i & 1)); 967 tc->pfc = ixgbe_dcb_pfc_disabled; 968 } 969 970 /* Initialize default user to priority mapping, UPx->TC0 */ 971 tc = &dcb_config->tc_config[0]; 972 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 973 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 974 for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { 975 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; 976 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; 977 } 978 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; 979 dcb_config->pfc_mode_enable = false; 980 dcb_config->vt_mode = true; 981 dcb_config->round_robin_enable = false; 982 /* support all DCB capabilities in 82599 */ 983 dcb_config->support.capabilities = 0xFF; 984 985 /*we only support 4 Tcs for X540, X550 */ 986 if (hw->mac.type == ixgbe_mac_X540 || 987 hw->mac.type == ixgbe_mac_X550 || 988 hw->mac.type == ixgbe_mac_X550EM_x || 989 hw->mac.type == ixgbe_mac_X550EM_a) { 990 dcb_config->num_tcs.pg_tcs = 4; 991 dcb_config->num_tcs.pfc_tcs = 4; 992 } 993 } 994 995 /* 996 * Ensure that all locks are released before first NVM or PHY access 997 */ 998 static void 999 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) 1000 { 1001 uint16_t mask; 1002 1003 /* 1004 * Phy lock should not fail in this early stage. If this is the case, 1005 * it is due to an improper exit of the application. 1006 * So force the release of the faulty lock. Release of common lock 1007 * is done automatically by swfw_sync function. 1008 */ 1009 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; 1010 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1011 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); 1012 } 1013 ixgbe_release_swfw_semaphore(hw, mask); 1014 1015 /* 1016 * These ones are more tricky since they are common to all ports; but 1017 * swfw_sync retries last long enough (1s) to be almost sure that if 1018 * lock can not be taken it is due to an improper lock of the 1019 * semaphore. 1020 */ 1021 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; 1022 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1023 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1024 } 1025 ixgbe_release_swfw_semaphore(hw, mask); 1026 } 1027 1028 /* 1029 * This function is based on code in ixgbe_attach() in base/ixgbe.c. 1030 * It returns 0 on success. 1031 */ 1032 static int 1033 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 1034 { 1035 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1036 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1037 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1038 struct ixgbe_hw *hw = 1039 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1040 struct ixgbe_vfta *shadow_vfta = 1041 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1042 struct ixgbe_hwstrip *hwstrip = 1043 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1044 struct ixgbe_dcb_config *dcb_config = 1045 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); 1046 struct ixgbe_filter_info *filter_info = 1047 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1048 struct ixgbe_bw_conf *bw_conf = 1049 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); 1050 uint32_t ctrl_ext; 1051 uint16_t csum; 1052 int diag, i, ret; 1053 1054 PMD_INIT_FUNC_TRACE(); 1055 1056 ixgbe_dev_macsec_setting_reset(eth_dev); 1057 1058 eth_dev->dev_ops = &ixgbe_eth_dev_ops; 1059 eth_dev->rx_queue_count = ixgbe_dev_rx_queue_count; 1060 eth_dev->rx_descriptor_done = ixgbe_dev_rx_descriptor_done; 1061 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1062 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1063 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1064 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1065 eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; 1066 1067 /* 1068 * For secondary processes, we don't initialise any further as primary 1069 * has already done this work. Only check we don't need a different 1070 * RX and TX function. 1071 */ 1072 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1073 struct ixgbe_tx_queue *txq; 1074 /* TX queue function in primary, set by last queue initialized 1075 * Tx queue may not initialized by primary process 1076 */ 1077 if (eth_dev->data->tx_queues) { 1078 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; 1079 ixgbe_set_tx_function(eth_dev, txq); 1080 } else { 1081 /* Use default TX function if we get here */ 1082 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 1083 "Using default TX function."); 1084 } 1085 1086 ixgbe_set_rx_function(eth_dev); 1087 1088 return 0; 1089 } 1090 1091 rte_atomic32_clear(&ad->link_thread_running); 1092 rte_eth_copy_pci_info(eth_dev, pci_dev); 1093 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1094 1095 /* Vendor and Device ID need to be set before init of shared code */ 1096 hw->device_id = pci_dev->id.device_id; 1097 hw->vendor_id = pci_dev->id.vendor_id; 1098 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1099 hw->allow_unsupported_sfp = 1; 1100 1101 /* Initialize the shared code (base driver) */ 1102 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1103 diag = ixgbe_bypass_init_shared_code(hw); 1104 #else 1105 diag = ixgbe_init_shared_code(hw); 1106 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1107 1108 if (diag != IXGBE_SUCCESS) { 1109 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); 1110 return -EIO; 1111 } 1112 1113 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { 1114 PMD_INIT_LOG(ERR, "\nERROR: " 1115 "Firmware recovery mode detected. Limiting functionality.\n" 1116 "Refer to the Intel(R) Ethernet Adapters and Devices " 1117 "User Guide for details on firmware recovery mode."); 1118 return -EIO; 1119 } 1120 1121 /* pick up the PCI bus settings for reporting later */ 1122 ixgbe_get_bus_info(hw); 1123 1124 /* Unlock any pending hardware semaphore */ 1125 ixgbe_swfw_lock_reset(hw); 1126 1127 #ifdef RTE_LIB_SECURITY 1128 /* Initialize security_ctx only for primary process*/ 1129 if (ixgbe_ipsec_ctx_create(eth_dev)) 1130 return -ENOMEM; 1131 #endif 1132 1133 /* Initialize DCB configuration*/ 1134 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); 1135 ixgbe_dcb_init(hw, dcb_config); 1136 /* Get Hardware Flow Control setting */ 1137 hw->fc.requested_mode = ixgbe_fc_none; 1138 hw->fc.current_mode = ixgbe_fc_none; 1139 hw->fc.pause_time = IXGBE_FC_PAUSE; 1140 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 1141 hw->fc.low_water[i] = IXGBE_FC_LO; 1142 hw->fc.high_water[i] = IXGBE_FC_HI; 1143 } 1144 hw->fc.send_xon = 1; 1145 1146 /* Make sure we have a good EEPROM before we read from it */ 1147 diag = ixgbe_validate_eeprom_checksum(hw, &csum); 1148 if (diag != IXGBE_SUCCESS) { 1149 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); 1150 return -EIO; 1151 } 1152 1153 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1154 diag = ixgbe_bypass_init_hw(hw); 1155 #else 1156 diag = ixgbe_init_hw(hw); 1157 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1158 1159 /* 1160 * Devices with copper phys will fail to initialise if ixgbe_init_hw() 1161 * is called too soon after the kernel driver unbinding/binding occurs. 1162 * The failure occurs in ixgbe_identify_phy_generic() for all devices, 1163 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is 1164 * also called. See ixgbe_identify_phy_82599(). The reason for the 1165 * failure is not known, and only occuts when virtualisation features 1166 * are disabled in the bios. A delay of 100ms was found to be enough by 1167 * trial-and-error, and is doubled to be safe. 1168 */ 1169 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { 1170 rte_delay_ms(200); 1171 diag = ixgbe_init_hw(hw); 1172 } 1173 1174 if (diag == IXGBE_ERR_SFP_NOT_PRESENT) 1175 diag = IXGBE_SUCCESS; 1176 1177 if (diag == IXGBE_ERR_EEPROM_VERSION) { 1178 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 1179 "LOM. Please be aware there may be issues associated " 1180 "with your hardware."); 1181 PMD_INIT_LOG(ERR, "If you are experiencing problems " 1182 "please contact your Intel or hardware representative " 1183 "who provided you with this hardware."); 1184 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) 1185 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 1186 if (diag) { 1187 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); 1188 return -EIO; 1189 } 1190 1191 /* Reset the hw statistics */ 1192 ixgbe_dev_stats_reset(eth_dev); 1193 1194 /* disable interrupt */ 1195 ixgbe_disable_intr(hw); 1196 1197 /* reset mappings for queue statistics hw counters*/ 1198 ixgbe_reset_qstat_mappings(hw); 1199 1200 /* Allocate memory for storing MAC addresses */ 1201 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * 1202 hw->mac.num_rar_entries, 0); 1203 if (eth_dev->data->mac_addrs == NULL) { 1204 PMD_INIT_LOG(ERR, 1205 "Failed to allocate %u bytes needed to store " 1206 "MAC addresses", 1207 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1208 return -ENOMEM; 1209 } 1210 /* Copy the permanent MAC address */ 1211 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 1212 ð_dev->data->mac_addrs[0]); 1213 1214 /* Allocate memory for storing hash filter MAC addresses */ 1215 eth_dev->data->hash_mac_addrs = rte_zmalloc( 1216 "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); 1217 if (eth_dev->data->hash_mac_addrs == NULL) { 1218 PMD_INIT_LOG(ERR, 1219 "Failed to allocate %d bytes needed to store MAC addresses", 1220 RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); 1221 rte_free(eth_dev->data->mac_addrs); 1222 eth_dev->data->mac_addrs = NULL; 1223 return -ENOMEM; 1224 } 1225 1226 /* initialize the vfta */ 1227 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1228 1229 /* initialize the hw strip bitmap*/ 1230 memset(hwstrip, 0, sizeof(*hwstrip)); 1231 1232 /* initialize PF if max_vfs not zero */ 1233 ret = ixgbe_pf_host_init(eth_dev); 1234 if (ret) { 1235 rte_free(eth_dev->data->mac_addrs); 1236 eth_dev->data->mac_addrs = NULL; 1237 rte_free(eth_dev->data->hash_mac_addrs); 1238 eth_dev->data->hash_mac_addrs = NULL; 1239 return ret; 1240 } 1241 1242 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1243 /* let hardware know driver is loaded */ 1244 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 1245 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1246 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 1247 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 1248 IXGBE_WRITE_FLUSH(hw); 1249 1250 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 1251 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 1252 (int) hw->mac.type, (int) hw->phy.type, 1253 (int) hw->phy.sfp_type); 1254 else 1255 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 1256 (int) hw->mac.type, (int) hw->phy.type); 1257 1258 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1259 eth_dev->data->port_id, pci_dev->id.vendor_id, 1260 pci_dev->id.device_id); 1261 1262 rte_intr_callback_register(intr_handle, 1263 ixgbe_dev_interrupt_handler, eth_dev); 1264 1265 /* enable uio/vfio intr/eventfd mapping */ 1266 rte_intr_enable(intr_handle); 1267 1268 /* enable support intr */ 1269 ixgbe_enable_intr(eth_dev); 1270 1271 /* initialize filter info */ 1272 memset(filter_info, 0, 1273 sizeof(struct ixgbe_filter_info)); 1274 1275 /* initialize 5tuple filter list */ 1276 TAILQ_INIT(&filter_info->fivetuple_list); 1277 1278 /* initialize flow director filter list & hash */ 1279 ixgbe_fdir_filter_init(eth_dev); 1280 1281 /* initialize l2 tunnel filter list & hash */ 1282 ixgbe_l2_tn_filter_init(eth_dev); 1283 1284 /* initialize flow filter lists */ 1285 ixgbe_filterlist_init(); 1286 1287 /* initialize bandwidth configuration info */ 1288 memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); 1289 1290 /* initialize Traffic Manager configuration */ 1291 ixgbe_tm_conf_init(eth_dev); 1292 1293 return 0; 1294 } 1295 1296 static int 1297 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1298 { 1299 PMD_INIT_FUNC_TRACE(); 1300 1301 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1302 return 0; 1303 1304 ixgbe_dev_close(eth_dev); 1305 1306 return 0; 1307 } 1308 1309 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 1310 { 1311 struct ixgbe_filter_info *filter_info = 1312 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1313 struct ixgbe_5tuple_filter *p_5tuple; 1314 1315 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 1316 TAILQ_REMOVE(&filter_info->fivetuple_list, 1317 p_5tuple, 1318 entries); 1319 rte_free(p_5tuple); 1320 } 1321 memset(filter_info->fivetuple_mask, 0, 1322 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 1323 1324 return 0; 1325 } 1326 1327 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) 1328 { 1329 struct ixgbe_hw_fdir_info *fdir_info = 1330 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1331 struct ixgbe_fdir_filter *fdir_filter; 1332 1333 if (fdir_info->hash_map) 1334 rte_free(fdir_info->hash_map); 1335 if (fdir_info->hash_handle) 1336 rte_hash_free(fdir_info->hash_handle); 1337 1338 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { 1339 TAILQ_REMOVE(&fdir_info->fdir_list, 1340 fdir_filter, 1341 entries); 1342 rte_free(fdir_filter); 1343 } 1344 1345 return 0; 1346 } 1347 1348 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) 1349 { 1350 struct ixgbe_l2_tn_info *l2_tn_info = 1351 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1352 struct ixgbe_l2_tn_filter *l2_tn_filter; 1353 1354 if (l2_tn_info->hash_map) 1355 rte_free(l2_tn_info->hash_map); 1356 if (l2_tn_info->hash_handle) 1357 rte_hash_free(l2_tn_info->hash_handle); 1358 1359 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 1360 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, 1361 l2_tn_filter, 1362 entries); 1363 rte_free(l2_tn_filter); 1364 } 1365 1366 return 0; 1367 } 1368 1369 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) 1370 { 1371 struct ixgbe_hw_fdir_info *fdir_info = 1372 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1373 char fdir_hash_name[RTE_HASH_NAMESIZE]; 1374 struct rte_hash_parameters fdir_hash_params = { 1375 .name = fdir_hash_name, 1376 .entries = IXGBE_MAX_FDIR_FILTER_NUM, 1377 .key_len = sizeof(union ixgbe_atr_input), 1378 .hash_func = rte_hash_crc, 1379 .hash_func_init_val = 0, 1380 .socket_id = rte_socket_id(), 1381 }; 1382 1383 TAILQ_INIT(&fdir_info->fdir_list); 1384 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, 1385 "fdir_%s", eth_dev->device->name); 1386 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); 1387 if (!fdir_info->hash_handle) { 1388 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); 1389 return -EINVAL; 1390 } 1391 fdir_info->hash_map = rte_zmalloc("ixgbe", 1392 sizeof(struct ixgbe_fdir_filter *) * 1393 IXGBE_MAX_FDIR_FILTER_NUM, 1394 0); 1395 if (!fdir_info->hash_map) { 1396 PMD_INIT_LOG(ERR, 1397 "Failed to allocate memory for fdir hash map!"); 1398 rte_hash_free(fdir_info->hash_handle); 1399 return -ENOMEM; 1400 } 1401 fdir_info->mask_added = FALSE; 1402 1403 return 0; 1404 } 1405 1406 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) 1407 { 1408 struct ixgbe_l2_tn_info *l2_tn_info = 1409 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1410 char l2_tn_hash_name[RTE_HASH_NAMESIZE]; 1411 struct rte_hash_parameters l2_tn_hash_params = { 1412 .name = l2_tn_hash_name, 1413 .entries = IXGBE_MAX_L2_TN_FILTER_NUM, 1414 .key_len = sizeof(struct ixgbe_l2_tn_key), 1415 .hash_func = rte_hash_crc, 1416 .hash_func_init_val = 0, 1417 .socket_id = rte_socket_id(), 1418 }; 1419 1420 TAILQ_INIT(&l2_tn_info->l2_tn_list); 1421 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, 1422 "l2_tn_%s", eth_dev->device->name); 1423 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); 1424 if (!l2_tn_info->hash_handle) { 1425 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); 1426 return -EINVAL; 1427 } 1428 l2_tn_info->hash_map = rte_zmalloc("ixgbe", 1429 sizeof(struct ixgbe_l2_tn_filter *) * 1430 IXGBE_MAX_L2_TN_FILTER_NUM, 1431 0); 1432 if (!l2_tn_info->hash_map) { 1433 PMD_INIT_LOG(ERR, 1434 "Failed to allocate memory for L2 TN hash map!"); 1435 rte_hash_free(l2_tn_info->hash_handle); 1436 return -ENOMEM; 1437 } 1438 l2_tn_info->e_tag_en = FALSE; 1439 l2_tn_info->e_tag_fwd_en = FALSE; 1440 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; 1441 1442 return 0; 1443 } 1444 /* 1445 * Negotiate mailbox API version with the PF. 1446 * After reset API version is always set to the basic one (ixgbe_mbox_api_10). 1447 * Then we try to negotiate starting with the most recent one. 1448 * If all negotiation attempts fail, then we will proceed with 1449 * the default one (ixgbe_mbox_api_10). 1450 */ 1451 static void 1452 ixgbevf_negotiate_api(struct ixgbe_hw *hw) 1453 { 1454 int32_t i; 1455 1456 /* start with highest supported, proceed down */ 1457 static const enum ixgbe_pfvf_api_rev sup_ver[] = { 1458 ixgbe_mbox_api_13, 1459 ixgbe_mbox_api_12, 1460 ixgbe_mbox_api_11, 1461 ixgbe_mbox_api_10, 1462 }; 1463 1464 for (i = 0; 1465 i != RTE_DIM(sup_ver) && 1466 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; 1467 i++) 1468 ; 1469 } 1470 1471 static void 1472 generate_random_mac_addr(struct rte_ether_addr *mac_addr) 1473 { 1474 uint64_t random; 1475 1476 /* Set Organizationally Unique Identifier (OUI) prefix. */ 1477 mac_addr->addr_bytes[0] = 0x00; 1478 mac_addr->addr_bytes[1] = 0x09; 1479 mac_addr->addr_bytes[2] = 0xC0; 1480 /* Force indication of locally assigned MAC address. */ 1481 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 1482 /* Generate the last 3 bytes of the MAC address with a random number. */ 1483 random = rte_rand(); 1484 memcpy(&mac_addr->addr_bytes[3], &random, 3); 1485 } 1486 1487 static int 1488 devarg_handle_int(__rte_unused const char *key, const char *value, 1489 void *extra_args) 1490 { 1491 uint16_t *n = extra_args; 1492 1493 if (value == NULL || extra_args == NULL) 1494 return -EINVAL; 1495 1496 *n = (uint16_t)strtoul(value, NULL, 0); 1497 if (*n == USHRT_MAX && errno == ERANGE) 1498 return -1; 1499 1500 return 0; 1501 } 1502 1503 static void 1504 ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, 1505 struct rte_devargs *devargs) 1506 { 1507 struct rte_kvargs *kvlist; 1508 uint16_t pflink_fullchk; 1509 1510 if (devargs == NULL) 1511 return; 1512 1513 kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); 1514 if (kvlist == NULL) 1515 return; 1516 1517 if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && 1518 rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, 1519 devarg_handle_int, &pflink_fullchk) == 0 && 1520 pflink_fullchk == 1) 1521 adapter->pflink_fullchk = 1; 1522 1523 rte_kvargs_free(kvlist); 1524 } 1525 1526 /* 1527 * Virtual Function device init 1528 */ 1529 static int 1530 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) 1531 { 1532 int diag; 1533 uint32_t tc, tcs; 1534 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1535 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1536 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1537 struct ixgbe_hw *hw = 1538 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1539 struct ixgbe_vfta *shadow_vfta = 1540 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1541 struct ixgbe_hwstrip *hwstrip = 1542 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1543 struct rte_ether_addr *perm_addr = 1544 (struct rte_ether_addr *)hw->mac.perm_addr; 1545 1546 PMD_INIT_FUNC_TRACE(); 1547 1548 eth_dev->dev_ops = &ixgbevf_eth_dev_ops; 1549 eth_dev->rx_descriptor_done = ixgbe_dev_rx_descriptor_done; 1550 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1551 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1552 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1553 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1554 1555 /* for secondary processes, we don't initialise any further as primary 1556 * has already done this work. Only check we don't need a different 1557 * RX function 1558 */ 1559 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1560 struct ixgbe_tx_queue *txq; 1561 /* TX queue function in primary, set by last queue initialized 1562 * Tx queue may not initialized by primary process 1563 */ 1564 if (eth_dev->data->tx_queues) { 1565 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; 1566 ixgbe_set_tx_function(eth_dev, txq); 1567 } else { 1568 /* Use default TX function if we get here */ 1569 PMD_INIT_LOG(NOTICE, 1570 "No TX queues configured yet. Using default TX function."); 1571 } 1572 1573 ixgbe_set_rx_function(eth_dev); 1574 1575 return 0; 1576 } 1577 1578 rte_atomic32_clear(&ad->link_thread_running); 1579 ixgbevf_parse_devargs(eth_dev->data->dev_private, 1580 pci_dev->device.devargs); 1581 1582 rte_eth_copy_pci_info(eth_dev, pci_dev); 1583 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1584 1585 hw->device_id = pci_dev->id.device_id; 1586 hw->vendor_id = pci_dev->id.vendor_id; 1587 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1588 1589 /* initialize the vfta */ 1590 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1591 1592 /* initialize the hw strip bitmap*/ 1593 memset(hwstrip, 0, sizeof(*hwstrip)); 1594 1595 /* Initialize the shared code (base driver) */ 1596 diag = ixgbe_init_shared_code(hw); 1597 if (diag != IXGBE_SUCCESS) { 1598 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); 1599 return -EIO; 1600 } 1601 1602 /* init_mailbox_params */ 1603 hw->mbx.ops.init_params(hw); 1604 1605 /* Reset the hw statistics */ 1606 ixgbevf_dev_stats_reset(eth_dev); 1607 1608 /* Disable the interrupts for VF */ 1609 ixgbevf_intr_disable(eth_dev); 1610 1611 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ 1612 diag = hw->mac.ops.reset_hw(hw); 1613 1614 /* 1615 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when 1616 * the underlying PF driver has not assigned a MAC address to the VF. 1617 * In this case, assign a random MAC address. 1618 */ 1619 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { 1620 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1621 /* 1622 * This error code will be propagated to the app by 1623 * rte_eth_dev_reset, so use a public error code rather than 1624 * the internal-only IXGBE_ERR_RESET_FAILED 1625 */ 1626 return -EAGAIN; 1627 } 1628 1629 /* negotiate mailbox API version to use with the PF. */ 1630 ixgbevf_negotiate_api(hw); 1631 1632 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 1633 ixgbevf_get_queues(hw, &tcs, &tc); 1634 1635 /* Allocate memory for storing MAC addresses */ 1636 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * 1637 hw->mac.num_rar_entries, 0); 1638 if (eth_dev->data->mac_addrs == NULL) { 1639 PMD_INIT_LOG(ERR, 1640 "Failed to allocate %u bytes needed to store " 1641 "MAC addresses", 1642 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1643 return -ENOMEM; 1644 } 1645 1646 /* Generate a random MAC address, if none was assigned by PF. */ 1647 if (rte_is_zero_ether_addr(perm_addr)) { 1648 generate_random_mac_addr(perm_addr); 1649 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 1650 if (diag) { 1651 rte_free(eth_dev->data->mac_addrs); 1652 eth_dev->data->mac_addrs = NULL; 1653 return diag; 1654 } 1655 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1656 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1657 RTE_ETHER_ADDR_PRT_FMT, 1658 RTE_ETHER_ADDR_BYTES(perm_addr)); 1659 } 1660 1661 /* Copy the permanent MAC address */ 1662 rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 1663 1664 /* reset the hardware with the new settings */ 1665 diag = hw->mac.ops.start_hw(hw); 1666 switch (diag) { 1667 case 0: 1668 break; 1669 1670 default: 1671 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1672 rte_free(eth_dev->data->mac_addrs); 1673 eth_dev->data->mac_addrs = NULL; 1674 return -EIO; 1675 } 1676 1677 rte_intr_callback_register(intr_handle, 1678 ixgbevf_dev_interrupt_handler, eth_dev); 1679 rte_intr_enable(intr_handle); 1680 ixgbevf_intr_enable(eth_dev); 1681 1682 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 1683 eth_dev->data->port_id, pci_dev->id.vendor_id, 1684 pci_dev->id.device_id, "ixgbe_mac_82599_vf"); 1685 1686 return 0; 1687 } 1688 1689 /* Virtual Function device uninit */ 1690 1691 static int 1692 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) 1693 { 1694 PMD_INIT_FUNC_TRACE(); 1695 1696 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1697 return 0; 1698 1699 ixgbevf_dev_close(eth_dev); 1700 1701 return 0; 1702 } 1703 1704 static int 1705 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1706 struct rte_pci_device *pci_dev) 1707 { 1708 char name[RTE_ETH_NAME_MAX_LEN]; 1709 struct rte_eth_dev *pf_ethdev; 1710 struct rte_eth_devargs eth_da; 1711 int i, retval; 1712 1713 if (pci_dev->device.devargs) { 1714 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, 1715 ð_da); 1716 if (retval) 1717 return retval; 1718 } else 1719 memset(ð_da, 0, sizeof(eth_da)); 1720 1721 if (eth_da.nb_representor_ports > 0 && 1722 eth_da.type != RTE_ETH_REPRESENTOR_VF) { 1723 PMD_DRV_LOG(ERR, "unsupported representor type: %s\n", 1724 pci_dev->device.devargs->args); 1725 return -ENOTSUP; 1726 } 1727 1728 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 1729 sizeof(struct ixgbe_adapter), 1730 eth_dev_pci_specific_init, pci_dev, 1731 eth_ixgbe_dev_init, NULL); 1732 1733 if (retval || eth_da.nb_representor_ports < 1) 1734 return retval; 1735 1736 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1737 if (pf_ethdev == NULL) 1738 return -ENODEV; 1739 1740 /* probe VF representor ports */ 1741 for (i = 0; i < eth_da.nb_representor_ports; i++) { 1742 struct ixgbe_vf_info *vfinfo; 1743 struct ixgbe_vf_representor representor; 1744 1745 vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( 1746 pf_ethdev->data->dev_private); 1747 if (vfinfo == NULL) { 1748 PMD_DRV_LOG(ERR, 1749 "no virtual functions supported by PF"); 1750 break; 1751 } 1752 1753 representor.vf_id = eth_da.representor_ports[i]; 1754 representor.switch_domain_id = vfinfo->switch_domain_id; 1755 representor.pf_ethdev = pf_ethdev; 1756 1757 /* representor port net_bdf_port */ 1758 snprintf(name, sizeof(name), "net_%s_representor_%d", 1759 pci_dev->device.name, 1760 eth_da.representor_ports[i]); 1761 1762 retval = rte_eth_dev_create(&pci_dev->device, name, 1763 sizeof(struct ixgbe_vf_representor), NULL, NULL, 1764 ixgbe_vf_representor_init, &representor); 1765 1766 if (retval) 1767 PMD_DRV_LOG(ERR, "failed to create ixgbe vf " 1768 "representor %s.", name); 1769 } 1770 1771 return 0; 1772 } 1773 1774 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) 1775 { 1776 struct rte_eth_dev *ethdev; 1777 1778 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1779 if (!ethdev) 1780 return 0; 1781 1782 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 1783 return rte_eth_dev_pci_generic_remove(pci_dev, 1784 ixgbe_vf_representor_uninit); 1785 else 1786 return rte_eth_dev_pci_generic_remove(pci_dev, 1787 eth_ixgbe_dev_uninit); 1788 } 1789 1790 static struct rte_pci_driver rte_ixgbe_pmd = { 1791 .id_table = pci_id_ixgbe_map, 1792 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1793 .probe = eth_ixgbe_pci_probe, 1794 .remove = eth_ixgbe_pci_remove, 1795 }; 1796 1797 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1798 struct rte_pci_device *pci_dev) 1799 { 1800 return rte_eth_dev_pci_generic_probe(pci_dev, 1801 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init); 1802 } 1803 1804 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) 1805 { 1806 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit); 1807 } 1808 1809 /* 1810 * virtual function driver struct 1811 */ 1812 static struct rte_pci_driver rte_ixgbevf_pmd = { 1813 .id_table = pci_id_ixgbevf_map, 1814 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1815 .probe = eth_ixgbevf_pci_probe, 1816 .remove = eth_ixgbevf_pci_remove, 1817 }; 1818 1819 static int 1820 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1821 { 1822 struct ixgbe_hw *hw = 1823 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1824 struct ixgbe_vfta *shadow_vfta = 1825 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1826 uint32_t vfta; 1827 uint32_t vid_idx; 1828 uint32_t vid_bit; 1829 1830 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 1831 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 1832 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); 1833 if (on) 1834 vfta |= vid_bit; 1835 else 1836 vfta &= ~vid_bit; 1837 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); 1838 1839 /* update local VFTA copy */ 1840 shadow_vfta->vfta[vid_idx] = vfta; 1841 1842 return 0; 1843 } 1844 1845 static void 1846 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 1847 { 1848 if (on) 1849 ixgbe_vlan_hw_strip_enable(dev, queue); 1850 else 1851 ixgbe_vlan_hw_strip_disable(dev, queue); 1852 } 1853 1854 static int 1855 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1856 enum rte_vlan_type vlan_type, 1857 uint16_t tpid) 1858 { 1859 struct ixgbe_hw *hw = 1860 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1861 int ret = 0; 1862 uint32_t reg; 1863 uint32_t qinq; 1864 1865 qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1866 qinq &= IXGBE_DMATXCTL_GDV; 1867 1868 switch (vlan_type) { 1869 case ETH_VLAN_TYPE_INNER: 1870 if (qinq) { 1871 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1872 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1873 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1874 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1875 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1876 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1877 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1878 } else { 1879 ret = -ENOTSUP; 1880 PMD_DRV_LOG(ERR, "Inner type is not supported" 1881 " by single VLAN"); 1882 } 1883 break; 1884 case ETH_VLAN_TYPE_OUTER: 1885 if (qinq) { 1886 /* Only the high 16-bits is valid */ 1887 IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << 1888 IXGBE_EXVET_VET_EXT_SHIFT); 1889 } else { 1890 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1891 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1892 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1893 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1894 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1895 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1896 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1897 } 1898 1899 break; 1900 default: 1901 ret = -EINVAL; 1902 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1903 break; 1904 } 1905 1906 return ret; 1907 } 1908 1909 void 1910 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1911 { 1912 struct ixgbe_hw *hw = 1913 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1914 uint32_t vlnctrl; 1915 1916 PMD_INIT_FUNC_TRACE(); 1917 1918 /* Filter Table Disable */ 1919 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1920 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1921 1922 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1923 } 1924 1925 void 1926 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1927 { 1928 struct ixgbe_hw *hw = 1929 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1930 struct ixgbe_vfta *shadow_vfta = 1931 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1932 uint32_t vlnctrl; 1933 uint16_t i; 1934 1935 PMD_INIT_FUNC_TRACE(); 1936 1937 /* Filter Table Enable */ 1938 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1939 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 1940 vlnctrl |= IXGBE_VLNCTRL_VFE; 1941 1942 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1943 1944 /* write whatever is in local vfta copy */ 1945 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1946 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); 1947 } 1948 1949 static void 1950 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 1951 { 1952 struct ixgbe_hwstrip *hwstrip = 1953 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); 1954 struct ixgbe_rx_queue *rxq; 1955 1956 if (queue >= IXGBE_MAX_RX_QUEUE_NUM) 1957 return; 1958 1959 if (on) 1960 IXGBE_SET_HWSTRIP(hwstrip, queue); 1961 else 1962 IXGBE_CLEAR_HWSTRIP(hwstrip, queue); 1963 1964 if (queue >= dev->data->nb_rx_queues) 1965 return; 1966 1967 rxq = dev->data->rx_queues[queue]; 1968 1969 if (on) { 1970 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; 1971 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 1972 } else { 1973 rxq->vlan_flags = PKT_RX_VLAN; 1974 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 1975 } 1976 } 1977 1978 static void 1979 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 1980 { 1981 struct ixgbe_hw *hw = 1982 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1983 uint32_t ctrl; 1984 1985 PMD_INIT_FUNC_TRACE(); 1986 1987 if (hw->mac.type == ixgbe_mac_82598EB) { 1988 /* No queue level support */ 1989 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 1990 return; 1991 } 1992 1993 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1994 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 1995 ctrl &= ~IXGBE_RXDCTL_VME; 1996 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 1997 1998 /* record those setting for HW strip per queue */ 1999 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 2000 } 2001 2002 static void 2003 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 2004 { 2005 struct ixgbe_hw *hw = 2006 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2007 uint32_t ctrl; 2008 2009 PMD_INIT_FUNC_TRACE(); 2010 2011 if (hw->mac.type == ixgbe_mac_82598EB) { 2012 /* No queue level supported */ 2013 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2014 return; 2015 } 2016 2017 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2018 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2019 ctrl |= IXGBE_RXDCTL_VME; 2020 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2021 2022 /* record those setting for HW strip per queue */ 2023 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 2024 } 2025 2026 static void 2027 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2028 { 2029 struct ixgbe_hw *hw = 2030 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2031 uint32_t ctrl; 2032 2033 PMD_INIT_FUNC_TRACE(); 2034 2035 /* DMATXCTRL: Geric Double VLAN Disable */ 2036 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2037 ctrl &= ~IXGBE_DMATXCTL_GDV; 2038 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2039 2040 /* CTRL_EXT: Global Double VLAN Disable */ 2041 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2042 ctrl &= ~IXGBE_EXTENDED_VLAN; 2043 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2044 2045 } 2046 2047 static void 2048 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2049 { 2050 struct ixgbe_hw *hw = 2051 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2052 uint32_t ctrl; 2053 2054 PMD_INIT_FUNC_TRACE(); 2055 2056 /* DMATXCTRL: Geric Double VLAN Enable */ 2057 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2058 ctrl |= IXGBE_DMATXCTL_GDV; 2059 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2060 2061 /* CTRL_EXT: Global Double VLAN Enable */ 2062 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2063 ctrl |= IXGBE_EXTENDED_VLAN; 2064 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2065 2066 /* Clear pooling mode of PFVTCTL. It's required by X550. */ 2067 if (hw->mac.type == ixgbe_mac_X550 || 2068 hw->mac.type == ixgbe_mac_X550EM_x || 2069 hw->mac.type == ixgbe_mac_X550EM_a) { 2070 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 2071 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 2072 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 2073 } 2074 2075 /* 2076 * VET EXT field in the EXVET register = 0x8100 by default 2077 * So no need to change. Same to VT field of DMATXCTL register 2078 */ 2079 } 2080 2081 void 2082 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 2083 { 2084 struct ixgbe_hw *hw = 2085 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2086 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 2087 uint32_t ctrl; 2088 uint16_t i; 2089 struct ixgbe_rx_queue *rxq; 2090 bool on; 2091 2092 PMD_INIT_FUNC_TRACE(); 2093 2094 if (hw->mac.type == ixgbe_mac_82598EB) { 2095 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 2096 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2097 ctrl |= IXGBE_VLNCTRL_VME; 2098 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2099 } else { 2100 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2101 ctrl &= ~IXGBE_VLNCTRL_VME; 2102 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2103 } 2104 } else { 2105 /* 2106 * Other 10G NIC, the VLAN strip can be setup 2107 * per queue in RXDCTL 2108 */ 2109 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2110 rxq = dev->data->rx_queues[i]; 2111 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 2112 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 2113 ctrl |= IXGBE_RXDCTL_VME; 2114 on = TRUE; 2115 } else { 2116 ctrl &= ~IXGBE_RXDCTL_VME; 2117 on = FALSE; 2118 } 2119 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 2120 2121 /* record those setting for HW strip per queue */ 2122 ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); 2123 } 2124 } 2125 } 2126 2127 static void 2128 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 2129 { 2130 uint16_t i; 2131 struct rte_eth_rxmode *rxmode; 2132 struct ixgbe_rx_queue *rxq; 2133 2134 if (mask & ETH_VLAN_STRIP_MASK) { 2135 rxmode = &dev->data->dev_conf.rxmode; 2136 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2137 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2138 rxq = dev->data->rx_queues[i]; 2139 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2140 } 2141 else 2142 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2143 rxq = dev->data->rx_queues[i]; 2144 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2145 } 2146 } 2147 } 2148 2149 static int 2150 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 2151 { 2152 struct rte_eth_rxmode *rxmode; 2153 rxmode = &dev->data->dev_conf.rxmode; 2154 2155 if (mask & ETH_VLAN_STRIP_MASK) { 2156 ixgbe_vlan_hw_strip_config(dev); 2157 } 2158 2159 if (mask & ETH_VLAN_FILTER_MASK) { 2160 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2161 ixgbe_vlan_hw_filter_enable(dev); 2162 else 2163 ixgbe_vlan_hw_filter_disable(dev); 2164 } 2165 2166 if (mask & ETH_VLAN_EXTEND_MASK) { 2167 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2168 ixgbe_vlan_hw_extend_enable(dev); 2169 else 2170 ixgbe_vlan_hw_extend_disable(dev); 2171 } 2172 2173 return 0; 2174 } 2175 2176 static int 2177 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2178 { 2179 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 2180 2181 ixgbe_vlan_offload_config(dev, mask); 2182 2183 return 0; 2184 } 2185 2186 static void 2187 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2188 { 2189 struct ixgbe_hw *hw = 2190 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2191 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ 2192 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2193 2194 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ 2195 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); 2196 } 2197 2198 static int 2199 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 2200 { 2201 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2202 2203 switch (nb_rx_q) { 2204 case 1: 2205 case 2: 2206 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; 2207 break; 2208 case 4: 2209 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; 2210 break; 2211 default: 2212 return -EINVAL; 2213 } 2214 2215 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 2216 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2217 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 2218 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2219 return 0; 2220 } 2221 2222 static int 2223 ixgbe_check_mq_mode(struct rte_eth_dev *dev) 2224 { 2225 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 2226 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2227 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2228 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2229 2230 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 2231 /* check multi-queue mode */ 2232 switch (dev_conf->rxmode.mq_mode) { 2233 case ETH_MQ_RX_VMDQ_DCB: 2234 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); 2235 break; 2236 case ETH_MQ_RX_VMDQ_DCB_RSS: 2237 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 2238 PMD_INIT_LOG(ERR, "SRIOV active," 2239 " unsupported mq_mode rx %d.", 2240 dev_conf->rxmode.mq_mode); 2241 return -EINVAL; 2242 case ETH_MQ_RX_RSS: 2243 case ETH_MQ_RX_VMDQ_RSS: 2244 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; 2245 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 2246 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 2247 PMD_INIT_LOG(ERR, "SRIOV is active," 2248 " invalid queue number" 2249 " for VMDQ RSS, allowed" 2250 " value are 1, 2 or 4."); 2251 return -EINVAL; 2252 } 2253 break; 2254 case ETH_MQ_RX_VMDQ_ONLY: 2255 case ETH_MQ_RX_NONE: 2256 /* if nothing mq mode configure, use default scheme */ 2257 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; 2258 break; 2259 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ 2260 /* SRIOV only works in VMDq enable mode */ 2261 PMD_INIT_LOG(ERR, "SRIOV is active," 2262 " wrong mq_mode rx %d.", 2263 dev_conf->rxmode.mq_mode); 2264 return -EINVAL; 2265 } 2266 2267 switch (dev_conf->txmode.mq_mode) { 2268 case ETH_MQ_TX_VMDQ_DCB: 2269 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); 2270 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2271 break; 2272 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ 2273 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; 2274 break; 2275 } 2276 2277 /* check valid queue number */ 2278 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 2279 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 2280 PMD_INIT_LOG(ERR, "SRIOV is active," 2281 " nb_rx_q=%d nb_tx_q=%d queue number" 2282 " must be less than or equal to %d.", 2283 nb_rx_q, nb_tx_q, 2284 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 2285 return -EINVAL; 2286 } 2287 } else { 2288 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { 2289 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 2290 " not supported."); 2291 return -EINVAL; 2292 } 2293 /* check configuration for vmdb+dcb mode */ 2294 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { 2295 const struct rte_eth_vmdq_dcb_conf *conf; 2296 2297 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2298 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 2299 IXGBE_VMDQ_DCB_NB_QUEUES); 2300 return -EINVAL; 2301 } 2302 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 2303 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2304 conf->nb_queue_pools == ETH_32_POOLS)) { 2305 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2306 " nb_queue_pools must be %d or %d.", 2307 ETH_16_POOLS, ETH_32_POOLS); 2308 return -EINVAL; 2309 } 2310 } 2311 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { 2312 const struct rte_eth_vmdq_dcb_tx_conf *conf; 2313 2314 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2315 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 2316 IXGBE_VMDQ_DCB_NB_QUEUES); 2317 return -EINVAL; 2318 } 2319 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2320 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2321 conf->nb_queue_pools == ETH_32_POOLS)) { 2322 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2323 " nb_queue_pools != %d and" 2324 " nb_queue_pools != %d.", 2325 ETH_16_POOLS, ETH_32_POOLS); 2326 return -EINVAL; 2327 } 2328 } 2329 2330 /* For DCB mode check our configuration before we go further */ 2331 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { 2332 const struct rte_eth_dcb_rx_conf *conf; 2333 2334 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 2335 if (!(conf->nb_tcs == ETH_4_TCS || 2336 conf->nb_tcs == ETH_8_TCS)) { 2337 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2338 " and nb_tcs != %d.", 2339 ETH_4_TCS, ETH_8_TCS); 2340 return -EINVAL; 2341 } 2342 } 2343 2344 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { 2345 const struct rte_eth_dcb_tx_conf *conf; 2346 2347 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 2348 if (!(conf->nb_tcs == ETH_4_TCS || 2349 conf->nb_tcs == ETH_8_TCS)) { 2350 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2351 " and nb_tcs != %d.", 2352 ETH_4_TCS, ETH_8_TCS); 2353 return -EINVAL; 2354 } 2355 } 2356 2357 /* 2358 * When DCB/VT is off, maximum number of queues changes, 2359 * except for 82598EB, which remains constant. 2360 */ 2361 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 2362 hw->mac.type != ixgbe_mac_82598EB) { 2363 if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { 2364 PMD_INIT_LOG(ERR, 2365 "Neither VT nor DCB are enabled, " 2366 "nb_tx_q > %d.", 2367 IXGBE_NONE_MODE_TX_NB_QUEUES); 2368 return -EINVAL; 2369 } 2370 } 2371 } 2372 return 0; 2373 } 2374 2375 static int 2376 ixgbe_dev_configure(struct rte_eth_dev *dev) 2377 { 2378 struct ixgbe_interrupt *intr = 2379 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2380 struct ixgbe_adapter *adapter = dev->data->dev_private; 2381 int ret; 2382 2383 PMD_INIT_FUNC_TRACE(); 2384 2385 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 2386 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 2387 2388 /* multipe queue mode checking */ 2389 ret = ixgbe_check_mq_mode(dev); 2390 if (ret != 0) { 2391 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", 2392 ret); 2393 return ret; 2394 } 2395 2396 /* set flag to update link status after init */ 2397 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2398 2399 /* 2400 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 2401 * allocation or vector Rx preconditions we will reset it. 2402 */ 2403 adapter->rx_bulk_alloc_allowed = true; 2404 adapter->rx_vec_allowed = true; 2405 2406 return 0; 2407 } 2408 2409 static void 2410 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 2411 { 2412 struct ixgbe_hw *hw = 2413 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2414 struct ixgbe_interrupt *intr = 2415 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2416 uint32_t gpie; 2417 2418 /* only set up it on X550EM_X */ 2419 if (hw->mac.type == ixgbe_mac_X550EM_x) { 2420 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2421 gpie |= IXGBE_SDP0_GPIEN_X550EM_x; 2422 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2423 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 2424 intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; 2425 } 2426 } 2427 2428 int 2429 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 2430 uint16_t tx_rate, uint64_t q_msk) 2431 { 2432 struct ixgbe_hw *hw; 2433 struct ixgbe_vf_info *vfinfo; 2434 struct rte_eth_link link; 2435 uint8_t nb_q_per_pool; 2436 uint32_t queue_stride; 2437 uint32_t queue_idx, idx = 0, vf_idx; 2438 uint32_t queue_end; 2439 uint16_t total_rate = 0; 2440 struct rte_pci_device *pci_dev; 2441 int ret; 2442 2443 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2444 ret = rte_eth_link_get_nowait(dev->data->port_id, &link); 2445 if (ret < 0) 2446 return ret; 2447 2448 if (vf >= pci_dev->max_vfs) 2449 return -EINVAL; 2450 2451 if (tx_rate > link.link_speed) 2452 return -EINVAL; 2453 2454 if (q_msk == 0) 2455 return 0; 2456 2457 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2458 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); 2459 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2460 queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2461 queue_idx = vf * queue_stride; 2462 queue_end = queue_idx + nb_q_per_pool - 1; 2463 if (queue_end >= hw->mac.max_tx_queues) 2464 return -EINVAL; 2465 2466 if (vfinfo) { 2467 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { 2468 if (vf_idx == vf) 2469 continue; 2470 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 2471 idx++) 2472 total_rate += vfinfo[vf_idx].tx_rate[idx]; 2473 } 2474 } else { 2475 return -EINVAL; 2476 } 2477 2478 /* Store tx_rate for this vf. */ 2479 for (idx = 0; idx < nb_q_per_pool; idx++) { 2480 if (((uint64_t)0x1 << idx) & q_msk) { 2481 if (vfinfo[vf].tx_rate[idx] != tx_rate) 2482 vfinfo[vf].tx_rate[idx] = tx_rate; 2483 total_rate += tx_rate; 2484 } 2485 } 2486 2487 if (total_rate > dev->data->dev_link.link_speed) { 2488 /* Reset stored TX rate of the VF if it causes exceed 2489 * link speed. 2490 */ 2491 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 2492 return -EINVAL; 2493 } 2494 2495 /* Set RTTBCNRC of each queue/pool for vf X */ 2496 for (; queue_idx <= queue_end; queue_idx++) { 2497 if (0x1 & q_msk) 2498 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 2499 q_msk = q_msk >> 1; 2500 } 2501 2502 return 0; 2503 } 2504 2505 static int 2506 ixgbe_flow_ctrl_enable(struct rte_eth_dev *dev, struct ixgbe_hw *hw) 2507 { 2508 struct ixgbe_adapter *adapter = dev->data->dev_private; 2509 int err; 2510 uint32_t mflcn; 2511 2512 ixgbe_setup_fc(hw); 2513 2514 err = ixgbe_fc_enable(hw); 2515 2516 /* Not negotiated is not an error case */ 2517 if (err == IXGBE_SUCCESS || err == IXGBE_ERR_FC_NOT_NEGOTIATED) { 2518 /* 2519 *check if we want to forward MAC frames - driver doesn't 2520 *have native capability to do that, 2521 *so we'll write the registers ourselves 2522 */ 2523 2524 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2525 2526 /* set or clear MFLCN.PMCF bit depending on configuration */ 2527 if (adapter->mac_ctrl_frame_fwd != 0) 2528 mflcn |= IXGBE_MFLCN_PMCF; 2529 else 2530 mflcn &= ~IXGBE_MFLCN_PMCF; 2531 2532 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); 2533 IXGBE_WRITE_FLUSH(hw); 2534 2535 return 0; 2536 } 2537 return err; 2538 } 2539 2540 /* 2541 * Configure device link speed and setup link. 2542 * It returns 0 on success. 2543 */ 2544 static int 2545 ixgbe_dev_start(struct rte_eth_dev *dev) 2546 { 2547 struct ixgbe_hw *hw = 2548 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2549 struct ixgbe_vf_info *vfinfo = 2550 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2551 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2552 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2553 uint32_t intr_vector = 0; 2554 int err; 2555 bool link_up = false, negotiate = 0; 2556 uint32_t speed = 0; 2557 uint32_t allowed_speeds = 0; 2558 int mask = 0; 2559 int status; 2560 uint16_t vf, idx; 2561 uint32_t *link_speeds; 2562 struct ixgbe_tm_conf *tm_conf = 2563 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2564 struct ixgbe_macsec_setting *macsec_setting = 2565 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 2566 2567 PMD_INIT_FUNC_TRACE(); 2568 2569 /* Stop the link setup handler before resetting the HW. */ 2570 ixgbe_dev_wait_setup_link_complete(dev, 0); 2571 2572 /* disable uio/vfio intr/eventfd mapping */ 2573 rte_intr_disable(intr_handle); 2574 2575 /* stop adapter */ 2576 hw->adapter_stopped = 0; 2577 ixgbe_stop_adapter(hw); 2578 2579 /* reinitialize adapter 2580 * this calls reset and start 2581 */ 2582 status = ixgbe_pf_reset_hw(hw); 2583 if (status != 0) 2584 return -1; 2585 hw->mac.ops.start_hw(hw); 2586 hw->mac.get_link_status = true; 2587 2588 /* configure PF module if SRIOV enabled */ 2589 ixgbe_pf_host_configure(dev); 2590 2591 ixgbe_dev_phy_intr_setup(dev); 2592 2593 /* check and configure queue intr-vector mapping */ 2594 if ((rte_intr_cap_multiple(intr_handle) || 2595 !RTE_ETH_DEV_SRIOV(dev).active) && 2596 dev->data->dev_conf.intr_conf.rxq != 0) { 2597 intr_vector = dev->data->nb_rx_queues; 2598 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { 2599 PMD_INIT_LOG(ERR, "At most %d intr queues supported", 2600 IXGBE_MAX_INTR_QUEUE_NUM); 2601 return -ENOTSUP; 2602 } 2603 if (rte_intr_efd_enable(intr_handle, intr_vector)) 2604 return -1; 2605 } 2606 2607 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 2608 intr_handle->intr_vec = 2609 rte_zmalloc("intr_vec", 2610 dev->data->nb_rx_queues * sizeof(int), 0); 2611 if (intr_handle->intr_vec == NULL) { 2612 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 2613 " intr_vec", dev->data->nb_rx_queues); 2614 return -ENOMEM; 2615 } 2616 } 2617 2618 /* confiugre msix for sleep until rx interrupt */ 2619 ixgbe_configure_msix(dev); 2620 2621 /* initialize transmission unit */ 2622 ixgbe_dev_tx_init(dev); 2623 2624 /* This can fail when allocating mbufs for descriptor rings */ 2625 err = ixgbe_dev_rx_init(dev); 2626 if (err) { 2627 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 2628 goto error; 2629 } 2630 2631 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 2632 ETH_VLAN_EXTEND_MASK; 2633 err = ixgbe_vlan_offload_config(dev, mask); 2634 if (err) { 2635 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 2636 goto error; 2637 } 2638 2639 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 2640 /* Enable vlan filtering for VMDq */ 2641 ixgbe_vmdq_vlan_hw_filter_enable(dev); 2642 } 2643 2644 /* Configure DCB hw */ 2645 ixgbe_configure_dcb(dev); 2646 2647 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { 2648 err = ixgbe_fdir_configure(dev); 2649 if (err) 2650 goto error; 2651 } 2652 2653 /* Restore vf rate limit */ 2654 if (vfinfo != NULL) { 2655 for (vf = 0; vf < pci_dev->max_vfs; vf++) 2656 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 2657 if (vfinfo[vf].tx_rate[idx] != 0) 2658 ixgbe_set_vf_rate_limit( 2659 dev, vf, 2660 vfinfo[vf].tx_rate[idx], 2661 1 << idx); 2662 } 2663 2664 ixgbe_restore_statistics_mapping(dev); 2665 2666 err = ixgbe_flow_ctrl_enable(dev, hw); 2667 if (err < 0) { 2668 PMD_INIT_LOG(ERR, "enable flow ctrl err"); 2669 goto error; 2670 } 2671 2672 err = ixgbe_dev_rxtx_start(dev); 2673 if (err < 0) { 2674 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 2675 goto error; 2676 } 2677 2678 /* Skip link setup if loopback mode is enabled. */ 2679 if (dev->data->dev_conf.lpbk_mode != 0) { 2680 err = ixgbe_check_supported_loopback_mode(dev); 2681 if (err < 0) { 2682 PMD_INIT_LOG(ERR, "Unsupported loopback mode"); 2683 goto error; 2684 } else { 2685 goto skip_link_setup; 2686 } 2687 } 2688 2689 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 2690 err = hw->mac.ops.setup_sfp(hw); 2691 if (err) 2692 goto error; 2693 } 2694 2695 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2696 /* Turn on the copper */ 2697 ixgbe_set_phy_power(hw, true); 2698 } else { 2699 /* Turn on the laser */ 2700 ixgbe_enable_tx_laser(hw); 2701 } 2702 2703 err = ixgbe_check_link(hw, &speed, &link_up, 0); 2704 if (err) 2705 goto error; 2706 dev->data->dev_link.link_status = link_up; 2707 2708 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); 2709 if (err) 2710 goto error; 2711 2712 switch (hw->mac.type) { 2713 case ixgbe_mac_X550: 2714 case ixgbe_mac_X550EM_x: 2715 case ixgbe_mac_X550EM_a: 2716 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 2717 ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G | 2718 ETH_LINK_SPEED_10G; 2719 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 2720 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 2721 allowed_speeds = ETH_LINK_SPEED_10M | 2722 ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; 2723 break; 2724 default: 2725 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 2726 ETH_LINK_SPEED_10G; 2727 } 2728 2729 link_speeds = &dev->data->dev_conf.link_speeds; 2730 2731 /* Ignore autoneg flag bit and check the validity of 2732 * link_speed 2733 */ 2734 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { 2735 PMD_INIT_LOG(ERR, "Invalid link setting"); 2736 goto error; 2737 } 2738 2739 speed = 0x0; 2740 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { 2741 switch (hw->mac.type) { 2742 case ixgbe_mac_82598EB: 2743 speed = IXGBE_LINK_SPEED_82598_AUTONEG; 2744 break; 2745 case ixgbe_mac_82599EB: 2746 case ixgbe_mac_X540: 2747 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2748 break; 2749 case ixgbe_mac_X550: 2750 case ixgbe_mac_X550EM_x: 2751 case ixgbe_mac_X550EM_a: 2752 speed = IXGBE_LINK_SPEED_X550_AUTONEG; 2753 break; 2754 default: 2755 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2756 } 2757 } else { 2758 if (*link_speeds & ETH_LINK_SPEED_10G) 2759 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2760 if (*link_speeds & ETH_LINK_SPEED_5G) 2761 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2762 if (*link_speeds & ETH_LINK_SPEED_2_5G) 2763 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2764 if (*link_speeds & ETH_LINK_SPEED_1G) 2765 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2766 if (*link_speeds & ETH_LINK_SPEED_100M) 2767 speed |= IXGBE_LINK_SPEED_100_FULL; 2768 if (*link_speeds & ETH_LINK_SPEED_10M) 2769 speed |= IXGBE_LINK_SPEED_10_FULL; 2770 } 2771 2772 err = ixgbe_setup_link(hw, speed, link_up); 2773 if (err) 2774 goto error; 2775 2776 skip_link_setup: 2777 2778 if (rte_intr_allow_others(intr_handle)) { 2779 /* check if lsc interrupt is enabled */ 2780 if (dev->data->dev_conf.intr_conf.lsc != 0) 2781 ixgbe_dev_lsc_interrupt_setup(dev, TRUE); 2782 else 2783 ixgbe_dev_lsc_interrupt_setup(dev, FALSE); 2784 ixgbe_dev_macsec_interrupt_setup(dev); 2785 } else { 2786 rte_intr_callback_unregister(intr_handle, 2787 ixgbe_dev_interrupt_handler, dev); 2788 if (dev->data->dev_conf.intr_conf.lsc != 0) 2789 PMD_INIT_LOG(INFO, "lsc won't enable because of" 2790 " no intr multiplex"); 2791 } 2792 2793 /* check if rxq interrupt is enabled */ 2794 if (dev->data->dev_conf.intr_conf.rxq != 0 && 2795 rte_intr_dp_is_en(intr_handle)) 2796 ixgbe_dev_rxq_interrupt_setup(dev); 2797 2798 /* enable uio/vfio intr/eventfd mapping */ 2799 rte_intr_enable(intr_handle); 2800 2801 /* resume enabled intr since hw reset */ 2802 ixgbe_enable_intr(dev); 2803 ixgbe_l2_tunnel_conf(dev); 2804 ixgbe_filter_restore(dev); 2805 2806 if (tm_conf->root && !tm_conf->committed) 2807 PMD_DRV_LOG(WARNING, 2808 "please call hierarchy_commit() " 2809 "before starting the port"); 2810 2811 /* wait for the controller to acquire link */ 2812 err = ixgbe_wait_for_link_up(hw); 2813 if (err) 2814 goto error; 2815 2816 /* 2817 * Update link status right before return, because it may 2818 * start link configuration process in a separate thread. 2819 */ 2820 ixgbe_dev_link_update(dev, 0); 2821 2822 /* setup the macsec setting register */ 2823 if (macsec_setting->offload_en) 2824 ixgbe_dev_macsec_register_enable(dev, macsec_setting); 2825 2826 return 0; 2827 2828 error: 2829 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); 2830 ixgbe_dev_clear_queues(dev); 2831 return -EIO; 2832 } 2833 2834 /* 2835 * Stop device: disable rx and tx functions to allow for reconfiguring. 2836 */ 2837 static int 2838 ixgbe_dev_stop(struct rte_eth_dev *dev) 2839 { 2840 struct rte_eth_link link; 2841 struct ixgbe_adapter *adapter = dev->data->dev_private; 2842 struct ixgbe_hw *hw = 2843 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2844 struct ixgbe_vf_info *vfinfo = 2845 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2846 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2847 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2848 int vf; 2849 struct ixgbe_tm_conf *tm_conf = 2850 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2851 2852 if (hw->adapter_stopped) 2853 return 0; 2854 2855 PMD_INIT_FUNC_TRACE(); 2856 2857 ixgbe_dev_wait_setup_link_complete(dev, 0); 2858 2859 /* disable interrupts */ 2860 ixgbe_disable_intr(hw); 2861 2862 /* reset the NIC */ 2863 ixgbe_pf_reset_hw(hw); 2864 hw->adapter_stopped = 0; 2865 2866 /* stop adapter */ 2867 ixgbe_stop_adapter(hw); 2868 2869 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 2870 vfinfo[vf].clear_to_send = false; 2871 2872 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2873 /* Turn off the copper */ 2874 ixgbe_set_phy_power(hw, false); 2875 } else { 2876 /* Turn off the laser */ 2877 ixgbe_disable_tx_laser(hw); 2878 } 2879 2880 ixgbe_dev_clear_queues(dev); 2881 2882 /* Clear stored conf */ 2883 dev->data->scattered_rx = 0; 2884 dev->data->lro = 0; 2885 2886 /* Clear recorded link status */ 2887 memset(&link, 0, sizeof(link)); 2888 rte_eth_linkstatus_set(dev, &link); 2889 2890 if (!rte_intr_allow_others(intr_handle)) 2891 /* resume to the default handler */ 2892 rte_intr_callback_register(intr_handle, 2893 ixgbe_dev_interrupt_handler, 2894 (void *)dev); 2895 2896 /* Clean datapath event and queue/vec mapping */ 2897 rte_intr_efd_disable(intr_handle); 2898 if (intr_handle->intr_vec != NULL) { 2899 rte_free(intr_handle->intr_vec); 2900 intr_handle->intr_vec = NULL; 2901 } 2902 2903 /* reset hierarchy commit */ 2904 tm_conf->committed = false; 2905 2906 adapter->rss_reta_updated = 0; 2907 2908 hw->adapter_stopped = true; 2909 dev->data->dev_started = 0; 2910 2911 return 0; 2912 } 2913 2914 /* 2915 * Set device link up: enable tx. 2916 */ 2917 static int 2918 ixgbe_dev_set_link_up(struct rte_eth_dev *dev) 2919 { 2920 struct ixgbe_hw *hw = 2921 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2922 if (hw->mac.type == ixgbe_mac_82599EB) { 2923 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2924 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2925 /* Not suported in bypass mode */ 2926 PMD_INIT_LOG(ERR, "Set link up is not supported " 2927 "by device id 0x%x", hw->device_id); 2928 return -ENOTSUP; 2929 } 2930 #endif 2931 } 2932 2933 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2934 /* Turn on the copper */ 2935 ixgbe_set_phy_power(hw, true); 2936 } else { 2937 /* Turn on the laser */ 2938 ixgbe_enable_tx_laser(hw); 2939 ixgbe_dev_link_update(dev, 0); 2940 } 2941 2942 return 0; 2943 } 2944 2945 /* 2946 * Set device link down: disable tx. 2947 */ 2948 static int 2949 ixgbe_dev_set_link_down(struct rte_eth_dev *dev) 2950 { 2951 struct ixgbe_hw *hw = 2952 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2953 if (hw->mac.type == ixgbe_mac_82599EB) { 2954 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2955 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2956 /* Not suported in bypass mode */ 2957 PMD_INIT_LOG(ERR, "Set link down is not supported " 2958 "by device id 0x%x", hw->device_id); 2959 return -ENOTSUP; 2960 } 2961 #endif 2962 } 2963 2964 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2965 /* Turn off the copper */ 2966 ixgbe_set_phy_power(hw, false); 2967 } else { 2968 /* Turn off the laser */ 2969 ixgbe_disable_tx_laser(hw); 2970 ixgbe_dev_link_update(dev, 0); 2971 } 2972 2973 return 0; 2974 } 2975 2976 /* 2977 * Reset and stop device. 2978 */ 2979 static int 2980 ixgbe_dev_close(struct rte_eth_dev *dev) 2981 { 2982 struct ixgbe_hw *hw = 2983 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2984 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2985 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2986 int retries = 0; 2987 int ret; 2988 2989 PMD_INIT_FUNC_TRACE(); 2990 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2991 return 0; 2992 2993 ixgbe_pf_reset_hw(hw); 2994 2995 ret = ixgbe_dev_stop(dev); 2996 2997 ixgbe_dev_free_queues(dev); 2998 2999 ixgbe_disable_pcie_master(hw); 3000 3001 /* reprogram the RAR[0] in case user changed it. */ 3002 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 3003 3004 /* Unlock any pending hardware semaphore */ 3005 ixgbe_swfw_lock_reset(hw); 3006 3007 /* disable uio intr before callback unregister */ 3008 rte_intr_disable(intr_handle); 3009 3010 do { 3011 ret = rte_intr_callback_unregister(intr_handle, 3012 ixgbe_dev_interrupt_handler, dev); 3013 if (ret >= 0 || ret == -ENOENT) { 3014 break; 3015 } else if (ret != -EAGAIN) { 3016 PMD_INIT_LOG(ERR, 3017 "intr callback unregister failed: %d", 3018 ret); 3019 } 3020 rte_delay_ms(100); 3021 } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); 3022 3023 /* cancel the delay handler before remove dev */ 3024 rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev); 3025 3026 /* uninitialize PF if max_vfs not zero */ 3027 ixgbe_pf_host_uninit(dev); 3028 3029 /* remove all the fdir filters & hash */ 3030 ixgbe_fdir_filter_uninit(dev); 3031 3032 /* remove all the L2 tunnel filters & hash */ 3033 ixgbe_l2_tn_filter_uninit(dev); 3034 3035 /* Remove all ntuple filters of the device */ 3036 ixgbe_ntuple_filter_uninit(dev); 3037 3038 /* clear all the filters list */ 3039 ixgbe_filterlist_flush(); 3040 3041 /* Remove all Traffic Manager configuration */ 3042 ixgbe_tm_conf_uninit(dev); 3043 3044 #ifdef RTE_LIB_SECURITY 3045 rte_free(dev->security_ctx); 3046 #endif 3047 3048 return ret; 3049 } 3050 3051 /* 3052 * Reset PF device. 3053 */ 3054 static int 3055 ixgbe_dev_reset(struct rte_eth_dev *dev) 3056 { 3057 int ret; 3058 3059 /* When a DPDK PMD PF begin to reset PF port, it should notify all 3060 * its VF to make them align with it. The detailed notification 3061 * mechanism is PMD specific. As to ixgbe PF, it is rather complex. 3062 * To avoid unexpected behavior in VF, currently reset of PF with 3063 * SR-IOV activation is not supported. It might be supported later. 3064 */ 3065 if (dev->data->sriov.active) 3066 return -ENOTSUP; 3067 3068 ret = eth_ixgbe_dev_uninit(dev); 3069 if (ret) 3070 return ret; 3071 3072 ret = eth_ixgbe_dev_init(dev, NULL); 3073 3074 return ret; 3075 } 3076 3077 static void 3078 ixgbe_read_stats_registers(struct ixgbe_hw *hw, 3079 struct ixgbe_hw_stats *hw_stats, 3080 struct ixgbe_macsec_stats *macsec_stats, 3081 uint64_t *total_missed_rx, uint64_t *total_qbrc, 3082 uint64_t *total_qprc, uint64_t *total_qprdc) 3083 { 3084 uint32_t bprc, lxon, lxoff, total; 3085 uint32_t delta_gprc = 0; 3086 unsigned i; 3087 /* Workaround for RX byte count not including CRC bytes when CRC 3088 * strip is enabled. CRC bytes are removed from counters when crc_strip 3089 * is disabled. 3090 */ 3091 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & 3092 IXGBE_HLREG0_RXCRCSTRP); 3093 3094 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 3095 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 3096 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 3097 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 3098 3099 for (i = 0; i < 8; i++) { 3100 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 3101 3102 /* global total per queue */ 3103 hw_stats->mpc[i] += mp; 3104 /* Running comprehensive total for stats display */ 3105 *total_missed_rx += hw_stats->mpc[i]; 3106 if (hw->mac.type == ixgbe_mac_82598EB) { 3107 hw_stats->rnbc[i] += 3108 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3109 hw_stats->pxonrxc[i] += 3110 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 3111 hw_stats->pxoffrxc[i] += 3112 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 3113 } else { 3114 hw_stats->pxonrxc[i] += 3115 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 3116 hw_stats->pxoffrxc[i] += 3117 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 3118 hw_stats->pxon2offc[i] += 3119 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 3120 } 3121 hw_stats->pxontxc[i] += 3122 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 3123 hw_stats->pxofftxc[i] += 3124 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 3125 } 3126 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3127 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3128 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3129 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3130 3131 delta_gprc += delta_qprc; 3132 3133 hw_stats->qprc[i] += delta_qprc; 3134 hw_stats->qptc[i] += delta_qptc; 3135 3136 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 3137 hw_stats->qbrc[i] += 3138 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); 3139 if (crc_strip == 0) 3140 hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; 3141 3142 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 3143 hw_stats->qbtc[i] += 3144 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); 3145 3146 hw_stats->qprdc[i] += delta_qprdc; 3147 *total_qprdc += hw_stats->qprdc[i]; 3148 3149 *total_qprc += hw_stats->qprc[i]; 3150 *total_qbrc += hw_stats->qbrc[i]; 3151 } 3152 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 3153 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 3154 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3155 3156 /* 3157 * An errata states that gprc actually counts good + missed packets: 3158 * Workaround to set gprc to summated queue packet receives 3159 */ 3160 hw_stats->gprc = *total_qprc; 3161 3162 if (hw->mac.type != ixgbe_mac_82598EB) { 3163 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 3164 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 3165 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 3166 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 3167 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 3168 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 3169 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3170 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3171 } else { 3172 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3173 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3174 /* 82598 only has a counter in the high register */ 3175 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3176 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3177 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3178 } 3179 uint64_t old_tpr = hw_stats->tpr; 3180 3181 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 3182 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 3183 3184 if (crc_strip == 0) 3185 hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; 3186 3187 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); 3188 hw_stats->gptc += delta_gptc; 3189 hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; 3190 hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 3191 3192 /* 3193 * Workaround: mprc hardware is incorrectly counting 3194 * broadcasts, so for now we subtract those. 3195 */ 3196 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3197 hw_stats->bprc += bprc; 3198 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3199 if (hw->mac.type == ixgbe_mac_82598EB) 3200 hw_stats->mprc -= bprc; 3201 3202 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3203 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3204 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 3205 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 3206 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3207 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3208 3209 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3210 hw_stats->lxontxc += lxon; 3211 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3212 hw_stats->lxofftxc += lxoff; 3213 total = lxon + lxoff; 3214 3215 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 3216 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 3217 hw_stats->gptc -= total; 3218 hw_stats->mptc -= total; 3219 hw_stats->ptc64 -= total; 3220 hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; 3221 3222 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 3223 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 3224 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3225 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 3226 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 3227 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 3228 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 3229 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 3230 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 3231 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 3232 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 3233 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 3234 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 3235 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 3236 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 3237 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 3238 /* Only read FCOE on 82599 */ 3239 if (hw->mac.type != ixgbe_mac_82598EB) { 3240 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 3241 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 3242 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 3243 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 3244 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 3245 } 3246 3247 /* Flow Director Stats registers */ 3248 if (hw->mac.type != ixgbe_mac_82598EB) { 3249 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 3250 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 3251 hw_stats->fdirustat_add += IXGBE_READ_REG(hw, 3252 IXGBE_FDIRUSTAT) & 0xFFFF; 3253 hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, 3254 IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; 3255 hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, 3256 IXGBE_FDIRFSTAT) & 0xFFFF; 3257 hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, 3258 IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; 3259 } 3260 /* MACsec Stats registers */ 3261 macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); 3262 macsec_stats->out_pkts_encrypted += 3263 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); 3264 macsec_stats->out_pkts_protected += 3265 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); 3266 macsec_stats->out_octets_encrypted += 3267 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); 3268 macsec_stats->out_octets_protected += 3269 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); 3270 macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); 3271 macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); 3272 macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); 3273 macsec_stats->in_pkts_unknownsci += 3274 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); 3275 macsec_stats->in_octets_decrypted += 3276 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); 3277 macsec_stats->in_octets_validated += 3278 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); 3279 macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); 3280 macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); 3281 macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); 3282 for (i = 0; i < 2; i++) { 3283 macsec_stats->in_pkts_ok += 3284 IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); 3285 macsec_stats->in_pkts_invalid += 3286 IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); 3287 macsec_stats->in_pkts_notvalid += 3288 IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); 3289 } 3290 macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); 3291 macsec_stats->in_pkts_notusingsa += 3292 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); 3293 } 3294 3295 /* 3296 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c 3297 */ 3298 static int 3299 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3300 { 3301 struct ixgbe_hw *hw = 3302 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3303 struct ixgbe_hw_stats *hw_stats = 3304 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3305 struct ixgbe_macsec_stats *macsec_stats = 3306 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3307 dev->data->dev_private); 3308 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3309 unsigned i; 3310 3311 total_missed_rx = 0; 3312 total_qbrc = 0; 3313 total_qprc = 0; 3314 total_qprdc = 0; 3315 3316 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3317 &total_qbrc, &total_qprc, &total_qprdc); 3318 3319 if (stats == NULL) 3320 return -EINVAL; 3321 3322 /* Fill out the rte_eth_stats statistics structure */ 3323 stats->ipackets = total_qprc; 3324 stats->ibytes = total_qbrc; 3325 stats->opackets = hw_stats->gptc; 3326 stats->obytes = hw_stats->gotc; 3327 3328 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3329 stats->q_ipackets[i] = hw_stats->qprc[i]; 3330 stats->q_opackets[i] = hw_stats->qptc[i]; 3331 stats->q_ibytes[i] = hw_stats->qbrc[i]; 3332 stats->q_obytes[i] = hw_stats->qbtc[i]; 3333 stats->q_errors[i] = hw_stats->qprdc[i]; 3334 } 3335 3336 /* Rx Errors */ 3337 stats->imissed = total_missed_rx; 3338 stats->ierrors = hw_stats->crcerrs + 3339 hw_stats->mspdc + 3340 hw_stats->rlec + 3341 hw_stats->ruc + 3342 hw_stats->roc + 3343 hw_stats->illerrc + 3344 hw_stats->errbc + 3345 hw_stats->rfc + 3346 hw_stats->fccrc + 3347 hw_stats->fclast; 3348 3349 /* 3350 * 82599 errata, UDP frames with a 0 checksum can be marked as checksum 3351 * errors. 3352 */ 3353 if (hw->mac.type != ixgbe_mac_82599EB) 3354 stats->ierrors += hw_stats->xec; 3355 3356 /* Tx Errors */ 3357 stats->oerrors = 0; 3358 return 0; 3359 } 3360 3361 static int 3362 ixgbe_dev_stats_reset(struct rte_eth_dev *dev) 3363 { 3364 struct ixgbe_hw_stats *stats = 3365 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3366 3367 /* HW registers are cleared on read */ 3368 ixgbe_dev_stats_get(dev, NULL); 3369 3370 /* Reset software totals */ 3371 memset(stats, 0, sizeof(*stats)); 3372 3373 return 0; 3374 } 3375 3376 /* This function calculates the number of xstats based on the current config */ 3377 static unsigned 3378 ixgbe_xstats_calc_num(void) { 3379 return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + 3380 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + 3381 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); 3382 } 3383 3384 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3385 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) 3386 { 3387 const unsigned cnt_stats = ixgbe_xstats_calc_num(); 3388 unsigned stat, i, count; 3389 3390 if (xstats_names != NULL) { 3391 count = 0; 3392 3393 /* Note: limit >= cnt_stats checked upstream 3394 * in rte_eth_xstats_names() 3395 */ 3396 3397 /* Extended stats from ixgbe_hw_stats */ 3398 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3399 strlcpy(xstats_names[count].name, 3400 rte_ixgbe_stats_strings[i].name, 3401 sizeof(xstats_names[count].name)); 3402 count++; 3403 } 3404 3405 /* MACsec Stats */ 3406 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3407 strlcpy(xstats_names[count].name, 3408 rte_ixgbe_macsec_strings[i].name, 3409 sizeof(xstats_names[count].name)); 3410 count++; 3411 } 3412 3413 /* RX Priority Stats */ 3414 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3415 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3416 snprintf(xstats_names[count].name, 3417 sizeof(xstats_names[count].name), 3418 "rx_priority%u_%s", i, 3419 rte_ixgbe_rxq_strings[stat].name); 3420 count++; 3421 } 3422 } 3423 3424 /* TX Priority Stats */ 3425 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3426 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3427 snprintf(xstats_names[count].name, 3428 sizeof(xstats_names[count].name), 3429 "tx_priority%u_%s", i, 3430 rte_ixgbe_txq_strings[stat].name); 3431 count++; 3432 } 3433 } 3434 } 3435 return cnt_stats; 3436 } 3437 3438 static int ixgbe_dev_xstats_get_names_by_id( 3439 struct rte_eth_dev *dev, 3440 struct rte_eth_xstat_name *xstats_names, 3441 const uint64_t *ids, 3442 unsigned int limit) 3443 { 3444 if (!ids) { 3445 const unsigned int cnt_stats = ixgbe_xstats_calc_num(); 3446 unsigned int stat, i, count; 3447 3448 if (xstats_names != NULL) { 3449 count = 0; 3450 3451 /* Note: limit >= cnt_stats checked upstream 3452 * in rte_eth_xstats_names() 3453 */ 3454 3455 /* Extended stats from ixgbe_hw_stats */ 3456 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3457 strlcpy(xstats_names[count].name, 3458 rte_ixgbe_stats_strings[i].name, 3459 sizeof(xstats_names[count].name)); 3460 count++; 3461 } 3462 3463 /* MACsec Stats */ 3464 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3465 strlcpy(xstats_names[count].name, 3466 rte_ixgbe_macsec_strings[i].name, 3467 sizeof(xstats_names[count].name)); 3468 count++; 3469 } 3470 3471 /* RX Priority Stats */ 3472 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3473 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3474 snprintf(xstats_names[count].name, 3475 sizeof(xstats_names[count].name), 3476 "rx_priority%u_%s", i, 3477 rte_ixgbe_rxq_strings[stat].name); 3478 count++; 3479 } 3480 } 3481 3482 /* TX Priority Stats */ 3483 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3484 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3485 snprintf(xstats_names[count].name, 3486 sizeof(xstats_names[count].name), 3487 "tx_priority%u_%s", i, 3488 rte_ixgbe_txq_strings[stat].name); 3489 count++; 3490 } 3491 } 3492 } 3493 return cnt_stats; 3494 } 3495 3496 uint16_t i; 3497 uint16_t size = ixgbe_xstats_calc_num(); 3498 struct rte_eth_xstat_name xstats_names_copy[size]; 3499 3500 ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL, 3501 size); 3502 3503 for (i = 0; i < limit; i++) { 3504 if (ids[i] >= size) { 3505 PMD_INIT_LOG(ERR, "id value isn't valid"); 3506 return -1; 3507 } 3508 strcpy(xstats_names[i].name, 3509 xstats_names_copy[ids[i]].name); 3510 } 3511 return limit; 3512 } 3513 3514 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3515 struct rte_eth_xstat_name *xstats_names, unsigned limit) 3516 { 3517 unsigned i; 3518 3519 if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) 3520 return -ENOMEM; 3521 3522 if (xstats_names != NULL) 3523 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) 3524 strlcpy(xstats_names[i].name, 3525 rte_ixgbevf_stats_strings[i].name, 3526 sizeof(xstats_names[i].name)); 3527 return IXGBEVF_NB_XSTATS; 3528 } 3529 3530 static int 3531 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3532 unsigned n) 3533 { 3534 struct ixgbe_hw *hw = 3535 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3536 struct ixgbe_hw_stats *hw_stats = 3537 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3538 struct ixgbe_macsec_stats *macsec_stats = 3539 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3540 dev->data->dev_private); 3541 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3542 unsigned i, stat, count = 0; 3543 3544 count = ixgbe_xstats_calc_num(); 3545 3546 if (n < count) 3547 return count; 3548 3549 total_missed_rx = 0; 3550 total_qbrc = 0; 3551 total_qprc = 0; 3552 total_qprdc = 0; 3553 3554 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3555 &total_qbrc, &total_qprc, &total_qprdc); 3556 3557 /* If this is a reset xstats is NULL, and we have cleared the 3558 * registers by reading them. 3559 */ 3560 if (!xstats) 3561 return 0; 3562 3563 /* Extended stats from ixgbe_hw_stats */ 3564 count = 0; 3565 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3566 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3567 rte_ixgbe_stats_strings[i].offset); 3568 xstats[count].id = count; 3569 count++; 3570 } 3571 3572 /* MACsec Stats */ 3573 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3574 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + 3575 rte_ixgbe_macsec_strings[i].offset); 3576 xstats[count].id = count; 3577 count++; 3578 } 3579 3580 /* RX Priority Stats */ 3581 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3582 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3583 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3584 rte_ixgbe_rxq_strings[stat].offset + 3585 (sizeof(uint64_t) * i)); 3586 xstats[count].id = count; 3587 count++; 3588 } 3589 } 3590 3591 /* TX Priority Stats */ 3592 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3593 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3594 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3595 rte_ixgbe_txq_strings[stat].offset + 3596 (sizeof(uint64_t) * i)); 3597 xstats[count].id = count; 3598 count++; 3599 } 3600 } 3601 return count; 3602 } 3603 3604 static int 3605 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 3606 uint64_t *values, unsigned int n) 3607 { 3608 if (!ids) { 3609 struct ixgbe_hw *hw = 3610 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3611 struct ixgbe_hw_stats *hw_stats = 3612 IXGBE_DEV_PRIVATE_TO_STATS( 3613 dev->data->dev_private); 3614 struct ixgbe_macsec_stats *macsec_stats = 3615 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3616 dev->data->dev_private); 3617 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3618 unsigned int i, stat, count = 0; 3619 3620 count = ixgbe_xstats_calc_num(); 3621 3622 if (!ids && n < count) 3623 return count; 3624 3625 total_missed_rx = 0; 3626 total_qbrc = 0; 3627 total_qprc = 0; 3628 total_qprdc = 0; 3629 3630 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, 3631 &total_missed_rx, &total_qbrc, &total_qprc, 3632 &total_qprdc); 3633 3634 /* If this is a reset xstats is NULL, and we have cleared the 3635 * registers by reading them. 3636 */ 3637 if (!ids && !values) 3638 return 0; 3639 3640 /* Extended stats from ixgbe_hw_stats */ 3641 count = 0; 3642 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3643 values[count] = *(uint64_t *)(((char *)hw_stats) + 3644 rte_ixgbe_stats_strings[i].offset); 3645 count++; 3646 } 3647 3648 /* MACsec Stats */ 3649 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3650 values[count] = *(uint64_t *)(((char *)macsec_stats) + 3651 rte_ixgbe_macsec_strings[i].offset); 3652 count++; 3653 } 3654 3655 /* RX Priority Stats */ 3656 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3657 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3658 values[count] = 3659 *(uint64_t *)(((char *)hw_stats) + 3660 rte_ixgbe_rxq_strings[stat].offset + 3661 (sizeof(uint64_t) * i)); 3662 count++; 3663 } 3664 } 3665 3666 /* TX Priority Stats */ 3667 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3668 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3669 values[count] = 3670 *(uint64_t *)(((char *)hw_stats) + 3671 rte_ixgbe_txq_strings[stat].offset + 3672 (sizeof(uint64_t) * i)); 3673 count++; 3674 } 3675 } 3676 return count; 3677 } 3678 3679 uint16_t i; 3680 uint16_t size = ixgbe_xstats_calc_num(); 3681 uint64_t values_copy[size]; 3682 3683 ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); 3684 3685 for (i = 0; i < n; i++) { 3686 if (ids[i] >= size) { 3687 PMD_INIT_LOG(ERR, "id value isn't valid"); 3688 return -1; 3689 } 3690 values[i] = values_copy[ids[i]]; 3691 } 3692 return n; 3693 } 3694 3695 static int 3696 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) 3697 { 3698 struct ixgbe_hw_stats *stats = 3699 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3700 struct ixgbe_macsec_stats *macsec_stats = 3701 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3702 dev->data->dev_private); 3703 3704 unsigned count = ixgbe_xstats_calc_num(); 3705 3706 /* HW registers are cleared on read */ 3707 ixgbe_dev_xstats_get(dev, NULL, count); 3708 3709 /* Reset software totals */ 3710 memset(stats, 0, sizeof(*stats)); 3711 memset(macsec_stats, 0, sizeof(*macsec_stats)); 3712 3713 return 0; 3714 } 3715 3716 static void 3717 ixgbevf_update_stats(struct rte_eth_dev *dev) 3718 { 3719 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3720 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3721 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3722 3723 /* Good Rx packet, include VF loopback */ 3724 UPDATE_VF_STAT(IXGBE_VFGPRC, 3725 hw_stats->last_vfgprc, hw_stats->vfgprc); 3726 3727 /* Good Rx octets, include VF loopback */ 3728 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 3729 hw_stats->last_vfgorc, hw_stats->vfgorc); 3730 3731 /* Good Tx packet, include VF loopback */ 3732 UPDATE_VF_STAT(IXGBE_VFGPTC, 3733 hw_stats->last_vfgptc, hw_stats->vfgptc); 3734 3735 /* Good Tx octets, include VF loopback */ 3736 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 3737 hw_stats->last_vfgotc, hw_stats->vfgotc); 3738 3739 /* Rx Multicst Packet */ 3740 UPDATE_VF_STAT(IXGBE_VFMPRC, 3741 hw_stats->last_vfmprc, hw_stats->vfmprc); 3742 } 3743 3744 static int 3745 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3746 unsigned n) 3747 { 3748 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3749 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3750 unsigned i; 3751 3752 if (n < IXGBEVF_NB_XSTATS) 3753 return IXGBEVF_NB_XSTATS; 3754 3755 ixgbevf_update_stats(dev); 3756 3757 if (!xstats) 3758 return 0; 3759 3760 /* Extended stats */ 3761 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { 3762 xstats[i].id = i; 3763 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 3764 rte_ixgbevf_stats_strings[i].offset); 3765 } 3766 3767 return IXGBEVF_NB_XSTATS; 3768 } 3769 3770 static int 3771 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3772 { 3773 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3774 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3775 3776 ixgbevf_update_stats(dev); 3777 3778 if (stats == NULL) 3779 return -EINVAL; 3780 3781 stats->ipackets = hw_stats->vfgprc; 3782 stats->ibytes = hw_stats->vfgorc; 3783 stats->opackets = hw_stats->vfgptc; 3784 stats->obytes = hw_stats->vfgotc; 3785 return 0; 3786 } 3787 3788 static int 3789 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) 3790 { 3791 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3792 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3793 3794 /* Sync HW register to the last stats */ 3795 ixgbevf_dev_stats_get(dev, NULL); 3796 3797 /* reset HW current stats*/ 3798 hw_stats->vfgprc = 0; 3799 hw_stats->vfgorc = 0; 3800 hw_stats->vfgptc = 0; 3801 hw_stats->vfgotc = 0; 3802 hw_stats->vfmprc = 0; 3803 3804 return 0; 3805 } 3806 3807 static int 3808 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 3809 { 3810 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3811 u16 eeprom_verh, eeprom_verl; 3812 u32 etrack_id; 3813 int ret; 3814 3815 ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); 3816 ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); 3817 3818 etrack_id = (eeprom_verh << 16) | eeprom_verl; 3819 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); 3820 if (ret < 0) 3821 return -EINVAL; 3822 3823 ret += 1; /* add the size of '\0' */ 3824 if (fw_size < (size_t)ret) 3825 return ret; 3826 else 3827 return 0; 3828 } 3829 3830 static int 3831 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3832 { 3833 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3834 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3835 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 3836 3837 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3838 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3839 if (RTE_ETH_DEV_SRIOV(dev).active == 0) { 3840 /* 3841 * When DCB/VT is off, maximum number of queues changes, 3842 * except for 82598EB, which remains constant. 3843 */ 3844 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 3845 hw->mac.type != ixgbe_mac_82598EB) 3846 dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; 3847 } 3848 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ 3849 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ 3850 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3851 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3852 dev_info->max_vfs = pci_dev->max_vfs; 3853 if (hw->mac.type == ixgbe_mac_82598EB) 3854 dev_info->max_vmdq_pools = ETH_16_POOLS; 3855 else 3856 dev_info->max_vmdq_pools = ETH_64_POOLS; 3857 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3858 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3859 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 3860 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3861 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3862 dev_info->rx_queue_offload_capa); 3863 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3864 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3865 3866 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3867 .rx_thresh = { 3868 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3869 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3870 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3871 }, 3872 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3873 .rx_drop_en = 0, 3874 .offloads = 0, 3875 }; 3876 3877 dev_info->default_txconf = (struct rte_eth_txconf) { 3878 .tx_thresh = { 3879 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3880 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3881 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3882 }, 3883 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3884 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3885 .offloads = 0, 3886 }; 3887 3888 dev_info->rx_desc_lim = rx_desc_lim; 3889 dev_info->tx_desc_lim = tx_desc_lim; 3890 3891 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3892 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3893 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3894 3895 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; 3896 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3897 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 3898 dev_info->speed_capa = ETH_LINK_SPEED_10M | 3899 ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; 3900 3901 if (hw->mac.type == ixgbe_mac_X540 || 3902 hw->mac.type == ixgbe_mac_X540_vf || 3903 hw->mac.type == ixgbe_mac_X550 || 3904 hw->mac.type == ixgbe_mac_X550_vf) { 3905 dev_info->speed_capa |= ETH_LINK_SPEED_100M; 3906 } 3907 if (hw->mac.type == ixgbe_mac_X550) { 3908 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; 3909 dev_info->speed_capa |= ETH_LINK_SPEED_5G; 3910 } 3911 3912 /* Driver-preferred Rx/Tx parameters */ 3913 dev_info->default_rxportconf.burst_size = 32; 3914 dev_info->default_txportconf.burst_size = 32; 3915 dev_info->default_rxportconf.nb_queues = 1; 3916 dev_info->default_txportconf.nb_queues = 1; 3917 dev_info->default_rxportconf.ring_size = 256; 3918 dev_info->default_txportconf.ring_size = 256; 3919 3920 return 0; 3921 } 3922 3923 static const uint32_t * 3924 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 3925 { 3926 static const uint32_t ptypes[] = { 3927 /* For non-vec functions, 3928 * refers to ixgbe_rxd_pkt_info_to_pkt_type(); 3929 * for vec functions, 3930 * refers to _recv_raw_pkts_vec(). 3931 */ 3932 RTE_PTYPE_L2_ETHER, 3933 RTE_PTYPE_L3_IPV4, 3934 RTE_PTYPE_L3_IPV4_EXT, 3935 RTE_PTYPE_L3_IPV6, 3936 RTE_PTYPE_L3_IPV6_EXT, 3937 RTE_PTYPE_L4_SCTP, 3938 RTE_PTYPE_L4_TCP, 3939 RTE_PTYPE_L4_UDP, 3940 RTE_PTYPE_TUNNEL_IP, 3941 RTE_PTYPE_INNER_L3_IPV6, 3942 RTE_PTYPE_INNER_L3_IPV6_EXT, 3943 RTE_PTYPE_INNER_L4_TCP, 3944 RTE_PTYPE_INNER_L4_UDP, 3945 RTE_PTYPE_UNKNOWN 3946 }; 3947 3948 if (dev->rx_pkt_burst == ixgbe_recv_pkts || 3949 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || 3950 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || 3951 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) 3952 return ptypes; 3953 3954 #if defined(RTE_ARCH_X86) || defined(__ARM_NEON) 3955 if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || 3956 dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) 3957 return ptypes; 3958 #endif 3959 return NULL; 3960 } 3961 3962 static int 3963 ixgbevf_dev_info_get(struct rte_eth_dev *dev, 3964 struct rte_eth_dev_info *dev_info) 3965 { 3966 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3967 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3968 3969 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3970 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3971 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ 3972 dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ 3973 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3974 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3975 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3976 dev_info->max_vfs = pci_dev->max_vfs; 3977 if (hw->mac.type == ixgbe_mac_82598EB) 3978 dev_info->max_vmdq_pools = ETH_16_POOLS; 3979 else 3980 dev_info->max_vmdq_pools = ETH_64_POOLS; 3981 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3982 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3983 dev_info->rx_queue_offload_capa); 3984 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3985 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3986 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3987 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3988 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3989 3990 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3991 .rx_thresh = { 3992 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3993 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3994 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3995 }, 3996 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3997 .rx_drop_en = 0, 3998 .offloads = 0, 3999 }; 4000 4001 dev_info->default_txconf = (struct rte_eth_txconf) { 4002 .tx_thresh = { 4003 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 4004 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 4005 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 4006 }, 4007 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 4008 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 4009 .offloads = 0, 4010 }; 4011 4012 dev_info->rx_desc_lim = rx_desc_lim; 4013 dev_info->tx_desc_lim = tx_desc_lim; 4014 4015 return 0; 4016 } 4017 4018 static int 4019 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 4020 bool *link_up, int wait_to_complete) 4021 { 4022 struct ixgbe_adapter *adapter = container_of(hw, 4023 struct ixgbe_adapter, hw); 4024 struct ixgbe_mbx_info *mbx = &hw->mbx; 4025 struct ixgbe_mac_info *mac = &hw->mac; 4026 uint32_t links_reg, in_msg; 4027 int ret_val = 0; 4028 4029 /* If we were hit with a reset drop the link */ 4030 if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) 4031 mac->get_link_status = true; 4032 4033 if (!mac->get_link_status) 4034 goto out; 4035 4036 /* if link status is down no point in checking to see if pf is up */ 4037 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4038 if (!(links_reg & IXGBE_LINKS_UP)) 4039 goto out; 4040 4041 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs 4042 * before the link status is correct 4043 */ 4044 if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { 4045 int i; 4046 4047 for (i = 0; i < 5; i++) { 4048 rte_delay_us(100); 4049 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4050 4051 if (!(links_reg & IXGBE_LINKS_UP)) 4052 goto out; 4053 } 4054 } 4055 4056 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 4057 case IXGBE_LINKS_SPEED_10G_82599: 4058 *speed = IXGBE_LINK_SPEED_10GB_FULL; 4059 if (hw->mac.type >= ixgbe_mac_X550) { 4060 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4061 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 4062 } 4063 break; 4064 case IXGBE_LINKS_SPEED_1G_82599: 4065 *speed = IXGBE_LINK_SPEED_1GB_FULL; 4066 break; 4067 case IXGBE_LINKS_SPEED_100_82599: 4068 *speed = IXGBE_LINK_SPEED_100_FULL; 4069 if (hw->mac.type == ixgbe_mac_X550) { 4070 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4071 *speed = IXGBE_LINK_SPEED_5GB_FULL; 4072 } 4073 break; 4074 case IXGBE_LINKS_SPEED_10_X550EM_A: 4075 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4076 /* Since Reserved in older MAC's */ 4077 if (hw->mac.type >= ixgbe_mac_X550) 4078 *speed = IXGBE_LINK_SPEED_10_FULL; 4079 break; 4080 default: 4081 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4082 } 4083 4084 if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { 4085 if (*speed == IXGBE_LINK_SPEED_UNKNOWN) 4086 mac->get_link_status = true; 4087 else 4088 mac->get_link_status = false; 4089 4090 goto out; 4091 } 4092 4093 /* if the read failed it could just be a mailbox collision, best wait 4094 * until we are called again and don't report an error 4095 */ 4096 if (mbx->ops.read(hw, &in_msg, 1, 0)) 4097 goto out; 4098 4099 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { 4100 /* msg is not CTS and is NACK we must have lost CTS status */ 4101 if (in_msg & IXGBE_VT_MSGTYPE_NACK) 4102 mac->get_link_status = false; 4103 goto out; 4104 } 4105 4106 /* the pf is talking, if we timed out in the past we reinit */ 4107 if (!mbx->timeout) { 4108 ret_val = -1; 4109 goto out; 4110 } 4111 4112 /* if we passed all the tests above then the link is up and we no 4113 * longer need to check for link 4114 */ 4115 mac->get_link_status = false; 4116 4117 out: 4118 *link_up = !mac->get_link_status; 4119 return ret_val; 4120 } 4121 4122 /* 4123 * If @timeout_ms was 0, it means that it will not return until link complete. 4124 * It returns 1 on complete, return 0 on timeout. 4125 */ 4126 static int 4127 ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, uint32_t timeout_ms) 4128 { 4129 #define WARNING_TIMEOUT 9000 /* 9s in total */ 4130 struct ixgbe_adapter *ad = dev->data->dev_private; 4131 uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT; 4132 4133 while (rte_atomic32_read(&ad->link_thread_running)) { 4134 msec_delay(1); 4135 timeout--; 4136 4137 if (timeout_ms) { 4138 if (!timeout) 4139 return 0; 4140 } else if (!timeout) { 4141 /* It will not return until link complete */ 4142 timeout = WARNING_TIMEOUT; 4143 PMD_DRV_LOG(ERR, "IXGBE link thread not complete too long time!"); 4144 } 4145 } 4146 4147 return 1; 4148 } 4149 4150 static void * 4151 ixgbe_dev_setup_link_thread_handler(void *param) 4152 { 4153 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4154 struct ixgbe_adapter *ad = dev->data->dev_private; 4155 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4156 struct ixgbe_interrupt *intr = 4157 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4158 u32 speed; 4159 bool autoneg = false; 4160 4161 pthread_detach(pthread_self()); 4162 speed = hw->phy.autoneg_advertised; 4163 if (!speed) 4164 ixgbe_get_link_capabilities(hw, &speed, &autoneg); 4165 4166 ixgbe_setup_link(hw, speed, true); 4167 4168 intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 4169 rte_atomic32_clear(&ad->link_thread_running); 4170 return NULL; 4171 } 4172 4173 /* 4174 * In freebsd environment, nic_uio drivers do not support interrupts, 4175 * rte_intr_callback_register() will fail to register interrupts. 4176 * We can not make link status to change from down to up by interrupt 4177 * callback. So we need to wait for the controller to acquire link 4178 * when ports start. 4179 * It returns 0 on link up. 4180 */ 4181 static int 4182 ixgbe_wait_for_link_up(struct ixgbe_hw *hw) 4183 { 4184 #ifdef RTE_EXEC_ENV_FREEBSD 4185 int err, i; 4186 bool link_up = false; 4187 uint32_t speed = 0; 4188 const int nb_iter = 25; 4189 4190 for (i = 0; i < nb_iter; i++) { 4191 err = ixgbe_check_link(hw, &speed, &link_up, 0); 4192 if (err) 4193 return err; 4194 if (link_up) 4195 return 0; 4196 msec_delay(200); 4197 } 4198 4199 return 0; 4200 #else 4201 RTE_SET_USED(hw); 4202 return 0; 4203 #endif 4204 } 4205 4206 /* return 0 means link status changed, -1 means not changed */ 4207 int 4208 ixgbe_dev_link_update_share(struct rte_eth_dev *dev, 4209 int wait_to_complete, int vf) 4210 { 4211 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4212 struct ixgbe_adapter *ad = dev->data->dev_private; 4213 struct rte_eth_link link; 4214 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4215 struct ixgbe_interrupt *intr = 4216 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4217 bool link_up; 4218 int diag; 4219 int wait = 1; 4220 u32 esdp_reg; 4221 4222 memset(&link, 0, sizeof(link)); 4223 link.link_status = ETH_LINK_DOWN; 4224 link.link_speed = ETH_SPEED_NUM_NONE; 4225 link.link_duplex = ETH_LINK_HALF_DUPLEX; 4226 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 4227 ETH_LINK_SPEED_FIXED); 4228 4229 hw->mac.get_link_status = true; 4230 4231 if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) 4232 return rte_eth_linkstatus_set(dev, &link); 4233 4234 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 4235 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 4236 wait = 0; 4237 4238 /* BSD has no interrupt mechanism, so force NIC status synchronization. */ 4239 #ifdef RTE_EXEC_ENV_FREEBSD 4240 wait = 1; 4241 #endif 4242 4243 if (vf) 4244 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); 4245 else 4246 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); 4247 4248 if (diag != 0) { 4249 link.link_speed = ETH_SPEED_NUM_100M; 4250 link.link_duplex = ETH_LINK_FULL_DUPLEX; 4251 return rte_eth_linkstatus_set(dev, &link); 4252 } 4253 4254 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4255 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 4256 if ((esdp_reg & IXGBE_ESDP_SDP3)) 4257 link_up = 0; 4258 } 4259 4260 if (link_up == 0) { 4261 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4262 ixgbe_dev_wait_setup_link_complete(dev, 0); 4263 if (rte_atomic32_test_and_set(&ad->link_thread_running)) { 4264 /* To avoid race condition between threads, set 4265 * the IXGBE_FLAG_NEED_LINK_CONFIG flag only 4266 * when there is no link thread running. 4267 */ 4268 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 4269 if (rte_ctrl_thread_create(&ad->link_thread_tid, 4270 "ixgbe-link-handler", 4271 NULL, 4272 ixgbe_dev_setup_link_thread_handler, 4273 dev) < 0) { 4274 PMD_DRV_LOG(ERR, 4275 "Create link thread failed!"); 4276 rte_atomic32_clear(&ad->link_thread_running); 4277 } 4278 } else { 4279 PMD_DRV_LOG(ERR, 4280 "Other link thread is running now!"); 4281 } 4282 } 4283 return rte_eth_linkstatus_set(dev, &link); 4284 } 4285 4286 link.link_status = ETH_LINK_UP; 4287 link.link_duplex = ETH_LINK_FULL_DUPLEX; 4288 4289 switch (link_speed) { 4290 default: 4291 case IXGBE_LINK_SPEED_UNKNOWN: 4292 link.link_speed = ETH_SPEED_NUM_UNKNOWN; 4293 break; 4294 4295 case IXGBE_LINK_SPEED_10_FULL: 4296 link.link_speed = ETH_SPEED_NUM_10M; 4297 break; 4298 4299 case IXGBE_LINK_SPEED_100_FULL: 4300 link.link_speed = ETH_SPEED_NUM_100M; 4301 break; 4302 4303 case IXGBE_LINK_SPEED_1GB_FULL: 4304 link.link_speed = ETH_SPEED_NUM_1G; 4305 break; 4306 4307 case IXGBE_LINK_SPEED_2_5GB_FULL: 4308 link.link_speed = ETH_SPEED_NUM_2_5G; 4309 break; 4310 4311 case IXGBE_LINK_SPEED_5GB_FULL: 4312 link.link_speed = ETH_SPEED_NUM_5G; 4313 break; 4314 4315 case IXGBE_LINK_SPEED_10GB_FULL: 4316 link.link_speed = ETH_SPEED_NUM_10G; 4317 break; 4318 } 4319 4320 return rte_eth_linkstatus_set(dev, &link); 4321 } 4322 4323 static int 4324 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4325 { 4326 return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); 4327 } 4328 4329 static int 4330 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4331 { 4332 return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); 4333 } 4334 4335 static int 4336 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 4337 { 4338 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4339 uint32_t fctrl; 4340 4341 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4342 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4343 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4344 4345 return 0; 4346 } 4347 4348 static int 4349 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 4350 { 4351 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4352 uint32_t fctrl; 4353 4354 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4355 fctrl &= (~IXGBE_FCTRL_UPE); 4356 if (dev->data->all_multicast == 1) 4357 fctrl |= IXGBE_FCTRL_MPE; 4358 else 4359 fctrl &= (~IXGBE_FCTRL_MPE); 4360 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4361 4362 return 0; 4363 } 4364 4365 static int 4366 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 4367 { 4368 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4369 uint32_t fctrl; 4370 4371 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4372 fctrl |= IXGBE_FCTRL_MPE; 4373 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4374 4375 return 0; 4376 } 4377 4378 static int 4379 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 4380 { 4381 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4382 uint32_t fctrl; 4383 4384 if (dev->data->promiscuous == 1) 4385 return 0; /* must remain in all_multicast mode */ 4386 4387 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4388 fctrl &= (~IXGBE_FCTRL_MPE); 4389 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4390 4391 return 0; 4392 } 4393 4394 /** 4395 * It clears the interrupt causes and enables the interrupt. 4396 * It will be called once only during nic initialized. 4397 * 4398 * @param dev 4399 * Pointer to struct rte_eth_dev. 4400 * @param on 4401 * Enable or Disable. 4402 * 4403 * @return 4404 * - On success, zero. 4405 * - On failure, a negative value. 4406 */ 4407 static int 4408 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 4409 { 4410 struct ixgbe_interrupt *intr = 4411 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4412 4413 ixgbe_dev_link_status_print(dev); 4414 if (on) 4415 intr->mask |= IXGBE_EICR_LSC; 4416 else 4417 intr->mask &= ~IXGBE_EICR_LSC; 4418 4419 return 0; 4420 } 4421 4422 /** 4423 * It clears the interrupt causes and enables the interrupt. 4424 * It will be called once only during nic initialized. 4425 * 4426 * @param dev 4427 * Pointer to struct rte_eth_dev. 4428 * 4429 * @return 4430 * - On success, zero. 4431 * - On failure, a negative value. 4432 */ 4433 static int 4434 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 4435 { 4436 struct ixgbe_interrupt *intr = 4437 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4438 4439 intr->mask |= IXGBE_EICR_RTX_QUEUE; 4440 4441 return 0; 4442 } 4443 4444 /** 4445 * It clears the interrupt causes and enables the interrupt. 4446 * It will be called once only during nic initialized. 4447 * 4448 * @param dev 4449 * Pointer to struct rte_eth_dev. 4450 * 4451 * @return 4452 * - On success, zero. 4453 * - On failure, a negative value. 4454 */ 4455 static int 4456 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 4457 { 4458 struct ixgbe_interrupt *intr = 4459 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4460 4461 intr->mask |= IXGBE_EICR_LINKSEC; 4462 4463 return 0; 4464 } 4465 4466 /* 4467 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. 4468 * 4469 * @param dev 4470 * Pointer to struct rte_eth_dev. 4471 * 4472 * @return 4473 * - On success, zero. 4474 * - On failure, a negative value. 4475 */ 4476 static int 4477 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 4478 { 4479 uint32_t eicr; 4480 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4481 struct ixgbe_interrupt *intr = 4482 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4483 4484 /* clear all cause mask */ 4485 ixgbe_disable_intr(hw); 4486 4487 /* read-on-clear nic registers here */ 4488 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4489 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 4490 4491 intr->flags = 0; 4492 4493 /* set flag for async link update */ 4494 if (eicr & IXGBE_EICR_LSC) 4495 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4496 4497 if (eicr & IXGBE_EICR_MAILBOX) 4498 intr->flags |= IXGBE_FLAG_MAILBOX; 4499 4500 if (eicr & IXGBE_EICR_LINKSEC) 4501 intr->flags |= IXGBE_FLAG_MACSEC; 4502 4503 if (hw->mac.type == ixgbe_mac_X550EM_x && 4504 hw->phy.type == ixgbe_phy_x550em_ext_t && 4505 (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) 4506 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; 4507 4508 return 0; 4509 } 4510 4511 /** 4512 * It gets and then prints the link status. 4513 * 4514 * @param dev 4515 * Pointer to struct rte_eth_dev. 4516 * 4517 * @return 4518 * - On success, zero. 4519 * - On failure, a negative value. 4520 */ 4521 static void 4522 ixgbe_dev_link_status_print(struct rte_eth_dev *dev) 4523 { 4524 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4525 struct rte_eth_link link; 4526 4527 rte_eth_linkstatus_get(dev, &link); 4528 4529 if (link.link_status) { 4530 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 4531 (int)(dev->data->port_id), 4532 (unsigned)link.link_speed, 4533 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 4534 "full-duplex" : "half-duplex"); 4535 } else { 4536 PMD_INIT_LOG(INFO, " Port %d: Link Down", 4537 (int)(dev->data->port_id)); 4538 } 4539 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 4540 pci_dev->addr.domain, 4541 pci_dev->addr.bus, 4542 pci_dev->addr.devid, 4543 pci_dev->addr.function); 4544 } 4545 4546 /* 4547 * It executes link_update after knowing an interrupt occurred. 4548 * 4549 * @param dev 4550 * Pointer to struct rte_eth_dev. 4551 * 4552 * @return 4553 * - On success, zero. 4554 * - On failure, a negative value. 4555 */ 4556 static int 4557 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) 4558 { 4559 struct ixgbe_interrupt *intr = 4560 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4561 int64_t timeout; 4562 struct ixgbe_hw *hw = 4563 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4564 4565 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 4566 4567 if (intr->flags & IXGBE_FLAG_MAILBOX) { 4568 ixgbe_pf_mbx_process(dev); 4569 intr->flags &= ~IXGBE_FLAG_MAILBOX; 4570 } 4571 4572 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4573 ixgbe_handle_lasi(hw); 4574 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4575 } 4576 4577 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4578 struct rte_eth_link link; 4579 4580 /* get the link status before link update, for predicting later */ 4581 rte_eth_linkstatus_get(dev, &link); 4582 4583 ixgbe_dev_link_update(dev, 0); 4584 4585 /* likely to up */ 4586 if (!link.link_status) 4587 /* handle it 1 sec later, wait it being stable */ 4588 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; 4589 /* likely to down */ 4590 else 4591 /* handle it 4 sec later, wait it being stable */ 4592 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; 4593 4594 ixgbe_dev_link_status_print(dev); 4595 if (rte_eal_alarm_set(timeout * 1000, 4596 ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) 4597 PMD_DRV_LOG(ERR, "Error setting alarm"); 4598 else { 4599 /* remember original mask */ 4600 intr->mask_original = intr->mask; 4601 /* only disable lsc interrupt */ 4602 intr->mask &= ~IXGBE_EIMS_LSC; 4603 } 4604 } 4605 4606 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 4607 ixgbe_enable_intr(dev); 4608 4609 return 0; 4610 } 4611 4612 /** 4613 * Interrupt handler which shall be registered for alarm callback for delayed 4614 * handling specific interrupt to wait for the stable nic state. As the 4615 * NIC interrupt state is not stable for ixgbe after link is just down, 4616 * it needs to wait 4 seconds to get the stable status. 4617 * 4618 * @param handle 4619 * Pointer to interrupt handle. 4620 * @param param 4621 * The address of parameter (struct rte_eth_dev *) regsitered before. 4622 * 4623 * @return 4624 * void 4625 */ 4626 static void 4627 ixgbe_dev_interrupt_delayed_handler(void *param) 4628 { 4629 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4630 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4631 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4632 struct ixgbe_interrupt *intr = 4633 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4634 struct ixgbe_hw *hw = 4635 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4636 uint32_t eicr; 4637 4638 ixgbe_disable_intr(hw); 4639 4640 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4641 if (eicr & IXGBE_EICR_MAILBOX) 4642 ixgbe_pf_mbx_process(dev); 4643 4644 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4645 ixgbe_handle_lasi(hw); 4646 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4647 } 4648 4649 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4650 ixgbe_dev_link_update(dev, 0); 4651 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 4652 ixgbe_dev_link_status_print(dev); 4653 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 4654 } 4655 4656 if (intr->flags & IXGBE_FLAG_MACSEC) { 4657 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, NULL); 4658 intr->flags &= ~IXGBE_FLAG_MACSEC; 4659 } 4660 4661 /* restore original mask */ 4662 intr->mask = intr->mask_original; 4663 intr->mask_original = 0; 4664 4665 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 4666 ixgbe_enable_intr(dev); 4667 rte_intr_ack(intr_handle); 4668 } 4669 4670 /** 4671 * Interrupt handler triggered by NIC for handling 4672 * specific interrupt. 4673 * 4674 * @param handle 4675 * Pointer to interrupt handle. 4676 * @param param 4677 * The address of parameter (struct rte_eth_dev *) regsitered before. 4678 * 4679 * @return 4680 * void 4681 */ 4682 static void 4683 ixgbe_dev_interrupt_handler(void *param) 4684 { 4685 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4686 4687 ixgbe_dev_interrupt_get_status(dev); 4688 ixgbe_dev_interrupt_action(dev); 4689 } 4690 4691 static int 4692 ixgbe_dev_led_on(struct rte_eth_dev *dev) 4693 { 4694 struct ixgbe_hw *hw; 4695 4696 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4697 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4698 } 4699 4700 static int 4701 ixgbe_dev_led_off(struct rte_eth_dev *dev) 4702 { 4703 struct ixgbe_hw *hw; 4704 4705 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4706 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4707 } 4708 4709 static int 4710 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4711 { 4712 struct ixgbe_hw *hw; 4713 uint32_t mflcn_reg; 4714 uint32_t fccfg_reg; 4715 int rx_pause; 4716 int tx_pause; 4717 4718 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4719 4720 fc_conf->pause_time = hw->fc.pause_time; 4721 fc_conf->high_water = hw->fc.high_water[0]; 4722 fc_conf->low_water = hw->fc.low_water[0]; 4723 fc_conf->send_xon = hw->fc.send_xon; 4724 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 4725 4726 /* 4727 * Return rx_pause status according to actual setting of 4728 * MFLCN register. 4729 */ 4730 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4731 if (mflcn_reg & IXGBE_MFLCN_PMCF) 4732 fc_conf->mac_ctrl_frame_fwd = 1; 4733 else 4734 fc_conf->mac_ctrl_frame_fwd = 0; 4735 4736 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) 4737 rx_pause = 1; 4738 else 4739 rx_pause = 0; 4740 4741 /* 4742 * Return tx_pause status according to actual setting of 4743 * FCCFG register. 4744 */ 4745 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4746 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) 4747 tx_pause = 1; 4748 else 4749 tx_pause = 0; 4750 4751 if (rx_pause && tx_pause) 4752 fc_conf->mode = RTE_FC_FULL; 4753 else if (rx_pause) 4754 fc_conf->mode = RTE_FC_RX_PAUSE; 4755 else if (tx_pause) 4756 fc_conf->mode = RTE_FC_TX_PAUSE; 4757 else 4758 fc_conf->mode = RTE_FC_NONE; 4759 4760 return 0; 4761 } 4762 4763 static int 4764 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4765 { 4766 struct ixgbe_hw *hw; 4767 struct ixgbe_adapter *adapter = dev->data->dev_private; 4768 int err; 4769 uint32_t rx_buf_size; 4770 uint32_t max_high_water; 4771 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4772 ixgbe_fc_none, 4773 ixgbe_fc_rx_pause, 4774 ixgbe_fc_tx_pause, 4775 ixgbe_fc_full 4776 }; 4777 4778 PMD_INIT_FUNC_TRACE(); 4779 4780 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4781 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); 4782 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4783 4784 /* 4785 * At least reserve one Ethernet frame for watermark 4786 * high_water/low_water in kilo bytes for ixgbe 4787 */ 4788 max_high_water = (rx_buf_size - 4789 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4790 if ((fc_conf->high_water > max_high_water) || 4791 (fc_conf->high_water < fc_conf->low_water)) { 4792 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4793 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4794 return -EINVAL; 4795 } 4796 4797 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; 4798 hw->fc.pause_time = fc_conf->pause_time; 4799 hw->fc.high_water[0] = fc_conf->high_water; 4800 hw->fc.low_water[0] = fc_conf->low_water; 4801 hw->fc.send_xon = fc_conf->send_xon; 4802 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 4803 adapter->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 4804 4805 err = ixgbe_flow_ctrl_enable(dev, hw); 4806 if (err < 0) { 4807 PMD_INIT_LOG(ERR, "ixgbe_flow_ctrl_enable = 0x%x", err); 4808 return -EIO; 4809 } 4810 return err; 4811 } 4812 4813 /** 4814 * ixgbe_pfc_enable_generic - Enable flow control 4815 * @hw: pointer to hardware structure 4816 * @tc_num: traffic class number 4817 * Enable flow control according to the current settings. 4818 */ 4819 static int 4820 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) 4821 { 4822 int ret_val = 0; 4823 uint32_t mflcn_reg, fccfg_reg; 4824 uint32_t reg; 4825 uint32_t fcrtl, fcrth; 4826 uint8_t i; 4827 uint8_t nb_rx_en; 4828 4829 /* Validate the water mark configuration */ 4830 if (!hw->fc.pause_time) { 4831 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4832 goto out; 4833 } 4834 4835 /* Low water mark of zero causes XOFF floods */ 4836 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 4837 /* High/Low water can not be 0 */ 4838 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { 4839 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4840 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4841 goto out; 4842 } 4843 4844 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { 4845 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4846 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4847 goto out; 4848 } 4849 } 4850 /* Negotiate the fc mode to use */ 4851 ixgbe_fc_autoneg(hw); 4852 4853 /* Disable any previous flow control settings */ 4854 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4855 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); 4856 4857 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4858 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 4859 4860 switch (hw->fc.current_mode) { 4861 case ixgbe_fc_none: 4862 /* 4863 * If the count of enabled RX Priority Flow control >1, 4864 * and the TX pause can not be disabled 4865 */ 4866 nb_rx_en = 0; 4867 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4868 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4869 if (reg & IXGBE_FCRTH_FCEN) 4870 nb_rx_en++; 4871 } 4872 if (nb_rx_en > 1) 4873 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4874 break; 4875 case ixgbe_fc_rx_pause: 4876 /* 4877 * Rx Flow control is enabled and Tx Flow control is 4878 * disabled by software override. Since there really 4879 * isn't a way to advertise that we are capable of RX 4880 * Pause ONLY, we will advertise that we support both 4881 * symmetric and asymmetric Rx PAUSE. Later, we will 4882 * disable the adapter's ability to send PAUSE frames. 4883 */ 4884 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4885 /* 4886 * If the count of enabled RX Priority Flow control >1, 4887 * and the TX pause can not be disabled 4888 */ 4889 nb_rx_en = 0; 4890 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4891 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4892 if (reg & IXGBE_FCRTH_FCEN) 4893 nb_rx_en++; 4894 } 4895 if (nb_rx_en > 1) 4896 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4897 break; 4898 case ixgbe_fc_tx_pause: 4899 /* 4900 * Tx Flow control is enabled, and Rx Flow control is 4901 * disabled by software override. 4902 */ 4903 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4904 break; 4905 case ixgbe_fc_full: 4906 /* Flow control (both Rx and Tx) is enabled by SW override. */ 4907 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4908 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4909 break; 4910 default: 4911 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); 4912 ret_val = IXGBE_ERR_CONFIG; 4913 goto out; 4914 } 4915 4916 /* Set 802.3x based flow control settings. */ 4917 mflcn_reg |= IXGBE_MFLCN_DPF; 4918 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 4919 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 4920 4921 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 4922 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 4923 hw->fc.high_water[tc_num]) { 4924 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; 4925 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); 4926 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; 4927 } else { 4928 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); 4929 /* 4930 * In order to prevent Tx hangs when the internal Tx 4931 * switch is enabled we must set the high water mark 4932 * to the maximum FCRTH value. This allows the Tx 4933 * switch to function even under heavy Rx workloads. 4934 */ 4935 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; 4936 } 4937 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); 4938 4939 /* Configure pause time (2 TCs per register) */ 4940 reg = hw->fc.pause_time * 0x00010001; 4941 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 4942 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 4943 4944 /* Configure flow control refresh threshold value */ 4945 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 4946 4947 out: 4948 return ret_val; 4949 } 4950 4951 static int 4952 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) 4953 { 4954 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4955 int32_t ret_val = IXGBE_NOT_IMPLEMENTED; 4956 4957 if (hw->mac.type != ixgbe_mac_82598EB) { 4958 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); 4959 } 4960 return ret_val; 4961 } 4962 4963 static int 4964 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) 4965 { 4966 int err; 4967 uint32_t rx_buf_size; 4968 uint32_t max_high_water; 4969 uint8_t tc_num; 4970 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; 4971 struct ixgbe_hw *hw = 4972 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4973 struct ixgbe_dcb_config *dcb_config = 4974 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 4975 4976 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4977 ixgbe_fc_none, 4978 ixgbe_fc_rx_pause, 4979 ixgbe_fc_tx_pause, 4980 ixgbe_fc_full 4981 }; 4982 4983 PMD_INIT_FUNC_TRACE(); 4984 4985 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); 4986 tc_num = map[pfc_conf->priority]; 4987 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); 4988 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4989 /* 4990 * At least reserve one Ethernet frame for watermark 4991 * high_water/low_water in kilo bytes for ixgbe 4992 */ 4993 max_high_water = (rx_buf_size - 4994 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4995 if ((pfc_conf->fc.high_water > max_high_water) || 4996 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { 4997 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4998 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4999 return -EINVAL; 5000 } 5001 5002 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; 5003 hw->fc.pause_time = pfc_conf->fc.pause_time; 5004 hw->fc.send_xon = pfc_conf->fc.send_xon; 5005 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 5006 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 5007 5008 err = ixgbe_dcb_pfc_enable(dev, tc_num); 5009 5010 /* Not negotiated is not an error case */ 5011 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 5012 return 0; 5013 5014 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); 5015 return -EIO; 5016 } 5017 5018 static int 5019 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 5020 struct rte_eth_rss_reta_entry64 *reta_conf, 5021 uint16_t reta_size) 5022 { 5023 uint16_t i, sp_reta_size; 5024 uint8_t j, mask; 5025 uint32_t reta, r; 5026 uint16_t idx, shift; 5027 struct ixgbe_adapter *adapter = dev->data->dev_private; 5028 struct rte_eth_dev_data *dev_data = dev->data; 5029 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5030 uint32_t reta_reg; 5031 5032 PMD_INIT_FUNC_TRACE(); 5033 5034 if (!dev_data->dev_started) { 5035 PMD_DRV_LOG(ERR, 5036 "port %d must be started before rss reta update", 5037 dev_data->port_id); 5038 return -EIO; 5039 } 5040 5041 if (!ixgbe_rss_update_sp(hw->mac.type)) { 5042 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 5043 "NIC."); 5044 return -ENOTSUP; 5045 } 5046 5047 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5048 if (reta_size != sp_reta_size) { 5049 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5050 "(%d) doesn't match the number hardware can supported " 5051 "(%d)", reta_size, sp_reta_size); 5052 return -EINVAL; 5053 } 5054 5055 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5056 idx = i / RTE_RETA_GROUP_SIZE; 5057 shift = i % RTE_RETA_GROUP_SIZE; 5058 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5059 IXGBE_4_BIT_MASK); 5060 if (!mask) 5061 continue; 5062 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5063 if (mask == IXGBE_4_BIT_MASK) 5064 r = 0; 5065 else 5066 r = IXGBE_READ_REG(hw, reta_reg); 5067 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5068 if (mask & (0x1 << j)) 5069 reta |= reta_conf[idx].reta[shift + j] << 5070 (CHAR_BIT * j); 5071 else 5072 reta |= r & (IXGBE_8_BIT_MASK << 5073 (CHAR_BIT * j)); 5074 } 5075 IXGBE_WRITE_REG(hw, reta_reg, reta); 5076 } 5077 adapter->rss_reta_updated = 1; 5078 5079 return 0; 5080 } 5081 5082 static int 5083 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 5084 struct rte_eth_rss_reta_entry64 *reta_conf, 5085 uint16_t reta_size) 5086 { 5087 uint16_t i, sp_reta_size; 5088 uint8_t j, mask; 5089 uint32_t reta; 5090 uint16_t idx, shift; 5091 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5092 uint32_t reta_reg; 5093 5094 PMD_INIT_FUNC_TRACE(); 5095 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5096 if (reta_size != sp_reta_size) { 5097 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5098 "(%d) doesn't match the number hardware can supported " 5099 "(%d)", reta_size, sp_reta_size); 5100 return -EINVAL; 5101 } 5102 5103 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5104 idx = i / RTE_RETA_GROUP_SIZE; 5105 shift = i % RTE_RETA_GROUP_SIZE; 5106 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5107 IXGBE_4_BIT_MASK); 5108 if (!mask) 5109 continue; 5110 5111 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5112 reta = IXGBE_READ_REG(hw, reta_reg); 5113 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5114 if (mask & (0x1 << j)) 5115 reta_conf[idx].reta[shift + j] = 5116 ((reta >> (CHAR_BIT * j)) & 5117 IXGBE_8_BIT_MASK); 5118 } 5119 } 5120 5121 return 0; 5122 } 5123 5124 static int 5125 ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 5126 uint32_t index, uint32_t pool) 5127 { 5128 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5129 uint32_t enable_addr = 1; 5130 5131 return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, 5132 pool, enable_addr); 5133 } 5134 5135 static void 5136 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 5137 { 5138 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5139 5140 ixgbe_clear_rar(hw, index); 5141 } 5142 5143 static int 5144 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 5145 { 5146 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5147 5148 ixgbe_remove_rar(dev, 0); 5149 ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 5150 5151 return 0; 5152 } 5153 5154 static bool 5155 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 5156 { 5157 if (strcmp(dev->device->driver->name, drv->driver.name)) 5158 return false; 5159 5160 return true; 5161 } 5162 5163 bool 5164 is_ixgbe_supported(struct rte_eth_dev *dev) 5165 { 5166 return is_device_supported(dev, &rte_ixgbe_pmd); 5167 } 5168 5169 static int 5170 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 5171 { 5172 uint32_t hlreg0; 5173 uint32_t maxfrs; 5174 struct ixgbe_hw *hw; 5175 struct rte_eth_dev_info dev_info; 5176 uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; 5177 struct rte_eth_dev_data *dev_data = dev->data; 5178 int ret; 5179 5180 ret = ixgbe_dev_info_get(dev, &dev_info); 5181 if (ret != 0) 5182 return ret; 5183 5184 /* check that mtu is within the allowed range */ 5185 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 5186 return -EINVAL; 5187 5188 /* If device is started, refuse mtu that requires the support of 5189 * scattered packets when this feature has not been enabled before. 5190 */ 5191 if (dev_data->dev_started && !dev_data->scattered_rx && 5192 (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > 5193 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 5194 PMD_INIT_LOG(ERR, "Stop port first."); 5195 return -EINVAL; 5196 } 5197 5198 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5199 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 5200 5201 /* switch to jumbo mode if needed */ 5202 if (frame_size > IXGBE_ETH_MAX_LEN) { 5203 dev->data->dev_conf.rxmode.offloads |= 5204 DEV_RX_OFFLOAD_JUMBO_FRAME; 5205 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 5206 } else { 5207 dev->data->dev_conf.rxmode.offloads &= 5208 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 5209 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 5210 } 5211 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 5212 5213 /* update max frame size */ 5214 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 5215 5216 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 5217 maxfrs &= 0x0000FFFF; 5218 maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); 5219 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); 5220 5221 return 0; 5222 } 5223 5224 /* 5225 * Virtual Function operations 5226 */ 5227 static void 5228 ixgbevf_intr_disable(struct rte_eth_dev *dev) 5229 { 5230 struct ixgbe_interrupt *intr = 5231 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5232 struct ixgbe_hw *hw = 5233 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5234 5235 PMD_INIT_FUNC_TRACE(); 5236 5237 /* Clear interrupt mask to stop from interrupts being generated */ 5238 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 5239 5240 IXGBE_WRITE_FLUSH(hw); 5241 5242 /* Clear mask value. */ 5243 intr->mask = 0; 5244 } 5245 5246 static void 5247 ixgbevf_intr_enable(struct rte_eth_dev *dev) 5248 { 5249 struct ixgbe_interrupt *intr = 5250 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5251 struct ixgbe_hw *hw = 5252 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5253 5254 PMD_INIT_FUNC_TRACE(); 5255 5256 /* VF enable interrupt autoclean */ 5257 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); 5258 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); 5259 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); 5260 5261 IXGBE_WRITE_FLUSH(hw); 5262 5263 /* Save IXGBE_VTEIMS value to mask. */ 5264 intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; 5265 } 5266 5267 static int 5268 ixgbevf_dev_configure(struct rte_eth_dev *dev) 5269 { 5270 struct rte_eth_conf *conf = &dev->data->dev_conf; 5271 struct ixgbe_adapter *adapter = dev->data->dev_private; 5272 5273 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 5274 dev->data->port_id); 5275 5276 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 5277 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 5278 5279 /* 5280 * VF has no ability to enable/disable HW CRC 5281 * Keep the persistent behavior the same as Host PF 5282 */ 5283 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC 5284 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { 5285 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 5286 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; 5287 } 5288 #else 5289 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { 5290 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 5291 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; 5292 } 5293 #endif 5294 5295 /* 5296 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 5297 * allocation or vector Rx preconditions we will reset it. 5298 */ 5299 adapter->rx_bulk_alloc_allowed = true; 5300 adapter->rx_vec_allowed = true; 5301 5302 return 0; 5303 } 5304 5305 static int 5306 ixgbevf_dev_start(struct rte_eth_dev *dev) 5307 { 5308 struct ixgbe_hw *hw = 5309 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5310 uint32_t intr_vector = 0; 5311 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5312 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5313 5314 int err, mask = 0; 5315 5316 PMD_INIT_FUNC_TRACE(); 5317 5318 /* Stop the link setup handler before resetting the HW. */ 5319 ixgbe_dev_wait_setup_link_complete(dev, 0); 5320 5321 err = hw->mac.ops.reset_hw(hw); 5322 5323 /** 5324 * In this case, reuses the MAC address assigned by VF 5325 * initialization. 5326 */ 5327 if (err != IXGBE_SUCCESS && err != IXGBE_ERR_INVALID_MAC_ADDR) { 5328 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); 5329 return err; 5330 } 5331 5332 hw->mac.get_link_status = true; 5333 5334 /* negotiate mailbox API version to use with the PF. */ 5335 ixgbevf_negotiate_api(hw); 5336 5337 ixgbevf_dev_tx_init(dev); 5338 5339 /* This can fail when allocating mbufs for descriptor rings */ 5340 err = ixgbevf_dev_rx_init(dev); 5341 if (err) { 5342 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 5343 ixgbe_dev_clear_queues(dev); 5344 return err; 5345 } 5346 5347 /* Set vfta */ 5348 ixgbevf_set_vfta_all(dev, 1); 5349 5350 /* Set HW strip */ 5351 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 5352 ETH_VLAN_EXTEND_MASK; 5353 err = ixgbevf_vlan_offload_config(dev, mask); 5354 if (err) { 5355 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); 5356 ixgbe_dev_clear_queues(dev); 5357 return err; 5358 } 5359 5360 ixgbevf_dev_rxtx_start(dev); 5361 5362 /* check and configure queue intr-vector mapping */ 5363 if (rte_intr_cap_multiple(intr_handle) && 5364 dev->data->dev_conf.intr_conf.rxq) { 5365 /* According to datasheet, only vector 0/1/2 can be used, 5366 * now only one vector is used for Rx queue 5367 */ 5368 intr_vector = 1; 5369 if (rte_intr_efd_enable(intr_handle, intr_vector)) { 5370 ixgbe_dev_clear_queues(dev); 5371 return -1; 5372 } 5373 } 5374 5375 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 5376 intr_handle->intr_vec = 5377 rte_zmalloc("intr_vec", 5378 dev->data->nb_rx_queues * sizeof(int), 0); 5379 if (intr_handle->intr_vec == NULL) { 5380 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 5381 " intr_vec", dev->data->nb_rx_queues); 5382 ixgbe_dev_clear_queues(dev); 5383 return -ENOMEM; 5384 } 5385 } 5386 ixgbevf_configure_msix(dev); 5387 5388 /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt 5389 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). 5390 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) 5391 * is not cleared, it will fail when following rte_intr_enable( ) tries 5392 * to map Rx queue interrupt to other VFIO vectors. 5393 * So clear uio/vfio intr/evevnfd first to avoid failure. 5394 */ 5395 rte_intr_disable(intr_handle); 5396 5397 rte_intr_enable(intr_handle); 5398 5399 /* Re-enable interrupt for VF */ 5400 ixgbevf_intr_enable(dev); 5401 5402 /* 5403 * Update link status right before return, because it may 5404 * start link configuration process in a separate thread. 5405 */ 5406 ixgbevf_dev_link_update(dev, 0); 5407 5408 hw->adapter_stopped = false; 5409 5410 return 0; 5411 } 5412 5413 static int 5414 ixgbevf_dev_stop(struct rte_eth_dev *dev) 5415 { 5416 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5417 struct ixgbe_adapter *adapter = dev->data->dev_private; 5418 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5419 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5420 5421 if (hw->adapter_stopped) 5422 return 0; 5423 5424 PMD_INIT_FUNC_TRACE(); 5425 5426 ixgbe_dev_wait_setup_link_complete(dev, 0); 5427 5428 ixgbevf_intr_disable(dev); 5429 5430 dev->data->dev_started = 0; 5431 hw->adapter_stopped = 1; 5432 ixgbe_stop_adapter(hw); 5433 5434 /* 5435 * Clear what we set, but we still keep shadow_vfta to 5436 * restore after device starts 5437 */ 5438 ixgbevf_set_vfta_all(dev, 0); 5439 5440 /* Clear stored conf */ 5441 dev->data->scattered_rx = 0; 5442 5443 ixgbe_dev_clear_queues(dev); 5444 5445 /* Clean datapath event and queue/vec mapping */ 5446 rte_intr_efd_disable(intr_handle); 5447 if (intr_handle->intr_vec != NULL) { 5448 rte_free(intr_handle->intr_vec); 5449 intr_handle->intr_vec = NULL; 5450 } 5451 5452 adapter->rss_reta_updated = 0; 5453 5454 return 0; 5455 } 5456 5457 static int 5458 ixgbevf_dev_close(struct rte_eth_dev *dev) 5459 { 5460 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5461 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5462 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5463 int ret; 5464 5465 PMD_INIT_FUNC_TRACE(); 5466 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5467 return 0; 5468 5469 ixgbe_reset_hw(hw); 5470 5471 ret = ixgbevf_dev_stop(dev); 5472 5473 ixgbe_dev_free_queues(dev); 5474 5475 /** 5476 * Remove the VF MAC address ro ensure 5477 * that the VF traffic goes to the PF 5478 * after stop, close and detach of the VF 5479 **/ 5480 ixgbevf_remove_mac_addr(dev, 0); 5481 5482 rte_intr_disable(intr_handle); 5483 rte_intr_callback_unregister(intr_handle, 5484 ixgbevf_dev_interrupt_handler, dev); 5485 5486 return ret; 5487 } 5488 5489 /* 5490 * Reset VF device 5491 */ 5492 static int 5493 ixgbevf_dev_reset(struct rte_eth_dev *dev) 5494 { 5495 int ret; 5496 5497 ret = eth_ixgbevf_dev_uninit(dev); 5498 if (ret) 5499 return ret; 5500 5501 ret = eth_ixgbevf_dev_init(dev); 5502 5503 return ret; 5504 } 5505 5506 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 5507 { 5508 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5509 struct ixgbe_vfta *shadow_vfta = 5510 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5511 int i = 0, j = 0, vfta = 0, mask = 1; 5512 5513 for (i = 0; i < IXGBE_VFTA_SIZE; i++) { 5514 vfta = shadow_vfta->vfta[i]; 5515 if (vfta) { 5516 mask = 1; 5517 for (j = 0; j < 32; j++) { 5518 if (vfta & mask) 5519 ixgbe_set_vfta(hw, (i<<5)+j, 0, 5520 on, false); 5521 mask <<= 1; 5522 } 5523 } 5524 } 5525 5526 } 5527 5528 static int 5529 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 5530 { 5531 struct ixgbe_hw *hw = 5532 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5533 struct ixgbe_vfta *shadow_vfta = 5534 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5535 uint32_t vid_idx = 0; 5536 uint32_t vid_bit = 0; 5537 int ret = 0; 5538 5539 PMD_INIT_FUNC_TRACE(); 5540 5541 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ 5542 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); 5543 if (ret) { 5544 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 5545 return ret; 5546 } 5547 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 5548 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 5549 5550 /* Save what we set and retore it after device reset */ 5551 if (on) 5552 shadow_vfta->vfta[vid_idx] |= vid_bit; 5553 else 5554 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 5555 5556 return 0; 5557 } 5558 5559 static void 5560 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 5561 { 5562 struct ixgbe_hw *hw = 5563 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5564 uint32_t ctrl; 5565 5566 PMD_INIT_FUNC_TRACE(); 5567 5568 if (queue >= hw->mac.max_rx_queues) 5569 return; 5570 5571 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 5572 if (on) 5573 ctrl |= IXGBE_RXDCTL_VME; 5574 else 5575 ctrl &= ~IXGBE_RXDCTL_VME; 5576 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 5577 5578 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); 5579 } 5580 5581 static int 5582 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 5583 { 5584 struct ixgbe_rx_queue *rxq; 5585 uint16_t i; 5586 int on = 0; 5587 5588 /* VF function only support hw strip feature, others are not support */ 5589 if (mask & ETH_VLAN_STRIP_MASK) { 5590 for (i = 0; i < dev->data->nb_rx_queues; i++) { 5591 rxq = dev->data->rx_queues[i]; 5592 on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 5593 ixgbevf_vlan_strip_queue_set(dev, i, on); 5594 } 5595 } 5596 5597 return 0; 5598 } 5599 5600 static int 5601 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 5602 { 5603 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 5604 5605 ixgbevf_vlan_offload_config(dev, mask); 5606 5607 return 0; 5608 } 5609 5610 int 5611 ixgbe_vt_check(struct ixgbe_hw *hw) 5612 { 5613 uint32_t reg_val; 5614 5615 /* if Virtualization Technology is enabled */ 5616 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 5617 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { 5618 PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); 5619 return -1; 5620 } 5621 5622 return 0; 5623 } 5624 5625 static uint32_t 5626 ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) 5627 { 5628 uint32_t vector = 0; 5629 5630 switch (hw->mac.mc_filter_type) { 5631 case 0: /* use bits [47:36] of the address */ 5632 vector = ((uc_addr->addr_bytes[4] >> 4) | 5633 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 5634 break; 5635 case 1: /* use bits [46:35] of the address */ 5636 vector = ((uc_addr->addr_bytes[4] >> 3) | 5637 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 5638 break; 5639 case 2: /* use bits [45:34] of the address */ 5640 vector = ((uc_addr->addr_bytes[4] >> 2) | 5641 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 5642 break; 5643 case 3: /* use bits [43:32] of the address */ 5644 vector = ((uc_addr->addr_bytes[4]) | 5645 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 5646 break; 5647 default: /* Invalid mc_filter_type */ 5648 break; 5649 } 5650 5651 /* vector can only be 12-bits or boundary will be exceeded */ 5652 vector &= 0xFFF; 5653 return vector; 5654 } 5655 5656 static int 5657 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, 5658 struct rte_ether_addr *mac_addr, uint8_t on) 5659 { 5660 uint32_t vector; 5661 uint32_t uta_idx; 5662 uint32_t reg_val; 5663 uint32_t uta_shift; 5664 uint32_t rc; 5665 const uint32_t ixgbe_uta_idx_mask = 0x7F; 5666 const uint32_t ixgbe_uta_bit_shift = 5; 5667 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; 5668 const uint32_t bit1 = 0x1; 5669 5670 struct ixgbe_hw *hw = 5671 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5672 struct ixgbe_uta_info *uta_info = 5673 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5674 5675 /* The UTA table only exists on 82599 hardware and newer */ 5676 if (hw->mac.type < ixgbe_mac_82599EB) 5677 return -ENOTSUP; 5678 5679 vector = ixgbe_uta_vector(hw, mac_addr); 5680 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; 5681 uta_shift = vector & ixgbe_uta_bit_mask; 5682 5683 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); 5684 if (rc == on) 5685 return 0; 5686 5687 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); 5688 if (on) { 5689 uta_info->uta_in_use++; 5690 reg_val |= (bit1 << uta_shift); 5691 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); 5692 } else { 5693 uta_info->uta_in_use--; 5694 reg_val &= ~(bit1 << uta_shift); 5695 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); 5696 } 5697 5698 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); 5699 5700 if (uta_info->uta_in_use > 0) 5701 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 5702 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 5703 else 5704 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 5705 5706 return 0; 5707 } 5708 5709 static int 5710 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 5711 { 5712 int i; 5713 struct ixgbe_hw *hw = 5714 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5715 struct ixgbe_uta_info *uta_info = 5716 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5717 5718 /* The UTA table only exists on 82599 hardware and newer */ 5719 if (hw->mac.type < ixgbe_mac_82599EB) 5720 return -ENOTSUP; 5721 5722 if (on) { 5723 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5724 uta_info->uta_shadow[i] = ~0; 5725 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); 5726 } 5727 } else { 5728 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5729 uta_info->uta_shadow[i] = 0; 5730 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 5731 } 5732 } 5733 return 0; 5734 5735 } 5736 5737 uint32_t 5738 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 5739 { 5740 uint32_t new_val = orig_val; 5741 5742 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) 5743 new_val |= IXGBE_VMOLR_AUPE; 5744 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) 5745 new_val |= IXGBE_VMOLR_ROMPE; 5746 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) 5747 new_val |= IXGBE_VMOLR_ROPE; 5748 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) 5749 new_val |= IXGBE_VMOLR_BAM; 5750 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) 5751 new_val |= IXGBE_VMOLR_MPE; 5752 5753 return new_val; 5754 } 5755 5756 #define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ 5757 #define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ 5758 #define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ 5759 #define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ 5760 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ 5761 ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ 5762 ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) 5763 5764 static int 5765 ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 5766 struct rte_eth_mirror_conf *mirror_conf, 5767 uint8_t rule_id, uint8_t on) 5768 { 5769 uint32_t mr_ctl, vlvf; 5770 uint32_t mp_lsb = 0; 5771 uint32_t mv_msb = 0; 5772 uint32_t mv_lsb = 0; 5773 uint32_t mp_msb = 0; 5774 uint8_t i = 0; 5775 int reg_index = 0; 5776 uint64_t vlan_mask = 0; 5777 5778 const uint8_t pool_mask_offset = 32; 5779 const uint8_t vlan_mask_offset = 32; 5780 const uint8_t dst_pool_offset = 8; 5781 const uint8_t rule_mr_offset = 4; 5782 const uint8_t mirror_rule_mask = 0x0F; 5783 5784 struct ixgbe_mirror_info *mr_info = 5785 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); 5786 struct ixgbe_hw *hw = 5787 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5788 uint8_t mirror_type = 0; 5789 5790 if (ixgbe_vt_check(hw) < 0) 5791 return -ENOTSUP; 5792 5793 if (rule_id >= IXGBE_MAX_MIRROR_RULES) 5794 return -EINVAL; 5795 5796 if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { 5797 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", 5798 mirror_conf->rule_type); 5799 return -EINVAL; 5800 } 5801 5802 if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { 5803 mirror_type |= IXGBE_MRCTL_VLME; 5804 /* Check if vlan id is valid and find conresponding VLAN ID 5805 * index in VLVF 5806 */ 5807 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { 5808 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { 5809 /* search vlan id related pool vlan filter 5810 * index 5811 */ 5812 reg_index = ixgbe_find_vlvf_slot( 5813 hw, 5814 mirror_conf->vlan.vlan_id[i], 5815 false); 5816 if (reg_index < 0) 5817 return -EINVAL; 5818 vlvf = IXGBE_READ_REG(hw, 5819 IXGBE_VLVF(reg_index)); 5820 if ((vlvf & IXGBE_VLVF_VIEN) && 5821 ((vlvf & IXGBE_VLVF_VLANID_MASK) == 5822 mirror_conf->vlan.vlan_id[i])) 5823 vlan_mask |= (1ULL << reg_index); 5824 else 5825 return -EINVAL; 5826 } 5827 } 5828 5829 if (on) { 5830 mv_lsb = vlan_mask & 0xFFFFFFFF; 5831 mv_msb = vlan_mask >> vlan_mask_offset; 5832 5833 mr_info->mr_conf[rule_id].vlan.vlan_mask = 5834 mirror_conf->vlan.vlan_mask; 5835 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { 5836 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) 5837 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 5838 mirror_conf->vlan.vlan_id[i]; 5839 } 5840 } else { 5841 mv_lsb = 0; 5842 mv_msb = 0; 5843 mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; 5844 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) 5845 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; 5846 } 5847 } 5848 5849 /** 5850 * if enable pool mirror, write related pool mask register,if disable 5851 * pool mirror, clear PFMRVM register 5852 */ 5853 if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { 5854 mirror_type |= IXGBE_MRCTL_VPME; 5855 if (on) { 5856 mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; 5857 mp_msb = mirror_conf->pool_mask >> pool_mask_offset; 5858 mr_info->mr_conf[rule_id].pool_mask = 5859 mirror_conf->pool_mask; 5860 5861 } else { 5862 mp_lsb = 0; 5863 mp_msb = 0; 5864 mr_info->mr_conf[rule_id].pool_mask = 0; 5865 } 5866 } 5867 if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) 5868 mirror_type |= IXGBE_MRCTL_UPME; 5869 if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) 5870 mirror_type |= IXGBE_MRCTL_DPME; 5871 5872 /* read mirror control register and recalculate it */ 5873 mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); 5874 5875 if (on) { 5876 mr_ctl |= mirror_type; 5877 mr_ctl &= mirror_rule_mask; 5878 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; 5879 } else { 5880 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); 5881 } 5882 5883 mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; 5884 mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; 5885 5886 /* write mirrror control register */ 5887 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); 5888 5889 /* write pool mirrror control register */ 5890 if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { 5891 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); 5892 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), 5893 mp_msb); 5894 } 5895 /* write VLAN mirrror control register */ 5896 if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { 5897 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); 5898 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), 5899 mv_msb); 5900 } 5901 5902 return 0; 5903 } 5904 5905 static int 5906 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) 5907 { 5908 int mr_ctl = 0; 5909 uint32_t lsb_val = 0; 5910 uint32_t msb_val = 0; 5911 const uint8_t rule_mr_offset = 4; 5912 5913 struct ixgbe_hw *hw = 5914 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5915 struct ixgbe_mirror_info *mr_info = 5916 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); 5917 5918 if (ixgbe_vt_check(hw) < 0) 5919 return -ENOTSUP; 5920 5921 if (rule_id >= IXGBE_MAX_MIRROR_RULES) 5922 return -EINVAL; 5923 5924 memset(&mr_info->mr_conf[rule_id], 0, 5925 sizeof(struct rte_eth_mirror_conf)); 5926 5927 /* clear PFVMCTL register */ 5928 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); 5929 5930 /* clear pool mask register */ 5931 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); 5932 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); 5933 5934 /* clear vlan mask register */ 5935 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); 5936 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); 5937 5938 return 0; 5939 } 5940 5941 static int 5942 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5943 { 5944 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5945 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5946 struct ixgbe_interrupt *intr = 5947 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5948 struct ixgbe_hw *hw = 5949 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5950 uint32_t vec = IXGBE_MISC_VEC_ID; 5951 5952 if (rte_intr_allow_others(intr_handle)) 5953 vec = IXGBE_RX_VEC_START; 5954 intr->mask |= (1 << vec); 5955 RTE_SET_USED(queue_id); 5956 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5957 5958 rte_intr_ack(intr_handle); 5959 5960 return 0; 5961 } 5962 5963 static int 5964 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5965 { 5966 struct ixgbe_interrupt *intr = 5967 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5968 struct ixgbe_hw *hw = 5969 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5970 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5971 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5972 uint32_t vec = IXGBE_MISC_VEC_ID; 5973 5974 if (rte_intr_allow_others(intr_handle)) 5975 vec = IXGBE_RX_VEC_START; 5976 intr->mask &= ~(1 << vec); 5977 RTE_SET_USED(queue_id); 5978 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5979 5980 return 0; 5981 } 5982 5983 static int 5984 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5985 { 5986 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5987 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5988 uint32_t mask; 5989 struct ixgbe_hw *hw = 5990 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5991 struct ixgbe_interrupt *intr = 5992 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5993 5994 if (queue_id < 16) { 5995 ixgbe_disable_intr(hw); 5996 intr->mask |= (1 << queue_id); 5997 ixgbe_enable_intr(dev); 5998 } else if (queue_id < 32) { 5999 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 6000 mask &= (1 << queue_id); 6001 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 6002 } else if (queue_id < 64) { 6003 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 6004 mask &= (1 << (queue_id - 32)); 6005 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 6006 } 6007 rte_intr_ack(intr_handle); 6008 6009 return 0; 6010 } 6011 6012 static int 6013 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 6014 { 6015 uint32_t mask; 6016 struct ixgbe_hw *hw = 6017 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6018 struct ixgbe_interrupt *intr = 6019 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 6020 6021 if (queue_id < 16) { 6022 ixgbe_disable_intr(hw); 6023 intr->mask &= ~(1 << queue_id); 6024 ixgbe_enable_intr(dev); 6025 } else if (queue_id < 32) { 6026 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 6027 mask &= ~(1 << queue_id); 6028 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 6029 } else if (queue_id < 64) { 6030 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 6031 mask &= ~(1 << (queue_id - 32)); 6032 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 6033 } 6034 6035 return 0; 6036 } 6037 6038 static void 6039 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 6040 uint8_t queue, uint8_t msix_vector) 6041 { 6042 uint32_t tmp, idx; 6043 6044 if (direction == -1) { 6045 /* other causes */ 6046 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 6047 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 6048 tmp &= ~0xFF; 6049 tmp |= msix_vector; 6050 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); 6051 } else { 6052 /* rx or tx cause */ 6053 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 6054 idx = ((16 * (queue & 1)) + (8 * direction)); 6055 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 6056 tmp &= ~(0xFF << idx); 6057 tmp |= (msix_vector << idx); 6058 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); 6059 } 6060 } 6061 6062 /** 6063 * set the IVAR registers, mapping interrupt causes to vectors 6064 * @param hw 6065 * pointer to ixgbe_hw struct 6066 * @direction 6067 * 0 for Rx, 1 for Tx, -1 for other causes 6068 * @queue 6069 * queue to map the corresponding interrupt to 6070 * @msix_vector 6071 * the vector to map to the corresponding queue 6072 */ 6073 static void 6074 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 6075 uint8_t queue, uint8_t msix_vector) 6076 { 6077 uint32_t tmp, idx; 6078 6079 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 6080 if (hw->mac.type == ixgbe_mac_82598EB) { 6081 if (direction == -1) 6082 direction = 0; 6083 idx = (((direction * 64) + queue) >> 2) & 0x1F; 6084 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); 6085 tmp &= ~(0xFF << (8 * (queue & 0x3))); 6086 tmp |= (msix_vector << (8 * (queue & 0x3))); 6087 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); 6088 } else if ((hw->mac.type == ixgbe_mac_82599EB) || 6089 (hw->mac.type == ixgbe_mac_X540) || 6090 (hw->mac.type == ixgbe_mac_X550) || 6091 (hw->mac.type == ixgbe_mac_X550EM_x)) { 6092 if (direction == -1) { 6093 /* other causes */ 6094 idx = ((queue & 1) * 8); 6095 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 6096 tmp &= ~(0xFF << idx); 6097 tmp |= (msix_vector << idx); 6098 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); 6099 } else { 6100 /* rx or tx causes */ 6101 idx = ((16 * (queue & 1)) + (8 * direction)); 6102 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 6103 tmp &= ~(0xFF << idx); 6104 tmp |= (msix_vector << idx); 6105 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); 6106 } 6107 } 6108 } 6109 6110 static void 6111 ixgbevf_configure_msix(struct rte_eth_dev *dev) 6112 { 6113 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6114 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 6115 struct ixgbe_hw *hw = 6116 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6117 uint32_t q_idx; 6118 uint32_t vector_idx = IXGBE_MISC_VEC_ID; 6119 uint32_t base = IXGBE_MISC_VEC_ID; 6120 6121 /* Configure VF other cause ivar */ 6122 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); 6123 6124 /* won't configure msix register if no mapping is done 6125 * between intr vector and event fd. 6126 */ 6127 if (!rte_intr_dp_is_en(intr_handle)) 6128 return; 6129 6130 if (rte_intr_allow_others(intr_handle)) { 6131 base = IXGBE_RX_VEC_START; 6132 vector_idx = IXGBE_RX_VEC_START; 6133 } 6134 6135 /* Configure all RX queues of VF */ 6136 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 6137 /* Force all queue use vector 0, 6138 * as IXGBE_VF_MAXMSIVECOTR = 1 6139 */ 6140 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 6141 intr_handle->intr_vec[q_idx] = vector_idx; 6142 if (vector_idx < base + intr_handle->nb_efd - 1) 6143 vector_idx++; 6144 } 6145 6146 /* As RX queue setting above show, all queues use the vector 0. 6147 * Set only the ITR value of IXGBE_MISC_VEC_ID. 6148 */ 6149 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), 6150 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 6151 | IXGBE_EITR_CNT_WDIS); 6152 } 6153 6154 /** 6155 * Sets up the hardware to properly generate MSI-X interrupts 6156 * @hw 6157 * board private structure 6158 */ 6159 static void 6160 ixgbe_configure_msix(struct rte_eth_dev *dev) 6161 { 6162 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6163 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 6164 struct ixgbe_hw *hw = 6165 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6166 uint32_t queue_id, base = IXGBE_MISC_VEC_ID; 6167 uint32_t vec = IXGBE_MISC_VEC_ID; 6168 uint32_t mask; 6169 uint32_t gpie; 6170 6171 /* won't configure msix register if no mapping is done 6172 * between intr vector and event fd 6173 * but if misx has been enabled already, need to configure 6174 * auto clean, auto mask and throttling. 6175 */ 6176 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 6177 if (!rte_intr_dp_is_en(intr_handle) && 6178 !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) 6179 return; 6180 6181 if (rte_intr_allow_others(intr_handle)) 6182 vec = base = IXGBE_RX_VEC_START; 6183 6184 /* setup GPIE for MSI-x mode */ 6185 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 6186 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 6187 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; 6188 /* auto clearing and auto setting corresponding bits in EIMS 6189 * when MSI-X interrupt is triggered 6190 */ 6191 if (hw->mac.type == ixgbe_mac_82598EB) { 6192 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 6193 } else { 6194 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 6195 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 6196 } 6197 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 6198 6199 /* Populate the IVAR table and set the ITR values to the 6200 * corresponding register. 6201 */ 6202 if (rte_intr_dp_is_en(intr_handle)) { 6203 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 6204 queue_id++) { 6205 /* by default, 1:1 mapping */ 6206 ixgbe_set_ivar_map(hw, 0, queue_id, vec); 6207 intr_handle->intr_vec[queue_id] = vec; 6208 if (vec < base + intr_handle->nb_efd - 1) 6209 vec++; 6210 } 6211 6212 switch (hw->mac.type) { 6213 case ixgbe_mac_82598EB: 6214 ixgbe_set_ivar_map(hw, -1, 6215 IXGBE_IVAR_OTHER_CAUSES_INDEX, 6216 IXGBE_MISC_VEC_ID); 6217 break; 6218 case ixgbe_mac_82599EB: 6219 case ixgbe_mac_X540: 6220 case ixgbe_mac_X550: 6221 case ixgbe_mac_X550EM_x: 6222 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); 6223 break; 6224 default: 6225 break; 6226 } 6227 } 6228 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), 6229 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 6230 | IXGBE_EITR_CNT_WDIS); 6231 6232 /* set up to autoclear timer, and the vectors */ 6233 mask = IXGBE_EIMS_ENABLE_MASK; 6234 mask &= ~(IXGBE_EIMS_OTHER | 6235 IXGBE_EIMS_MAILBOX | 6236 IXGBE_EIMS_LSC); 6237 6238 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 6239 } 6240 6241 int 6242 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 6243 uint16_t queue_idx, uint16_t tx_rate) 6244 { 6245 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6246 struct rte_eth_rxmode *rxmode; 6247 uint32_t rf_dec, rf_int; 6248 uint32_t bcnrc_val; 6249 uint16_t link_speed = dev->data->dev_link.link_speed; 6250 6251 if (queue_idx >= hw->mac.max_tx_queues) 6252 return -EINVAL; 6253 6254 if (tx_rate != 0) { 6255 /* Calculate the rate factor values to set */ 6256 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; 6257 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; 6258 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; 6259 6260 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 6261 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & 6262 IXGBE_RTTBCNRC_RF_INT_MASK_M); 6263 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 6264 } else { 6265 bcnrc_val = 0; 6266 } 6267 6268 rxmode = &dev->data->dev_conf.rxmode; 6269 /* 6270 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 6271 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise 6272 * set as 0x4. 6273 */ 6274 if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && 6275 (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) 6276 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 6277 IXGBE_MMW_SIZE_JUMBO_FRAME); 6278 else 6279 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 6280 IXGBE_MMW_SIZE_DEFAULT); 6281 6282 /* Set RTTBCNRC of queue X */ 6283 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); 6284 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 6285 IXGBE_WRITE_FLUSH(hw); 6286 6287 return 0; 6288 } 6289 6290 static int 6291 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 6292 __rte_unused uint32_t index, 6293 __rte_unused uint32_t pool) 6294 { 6295 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6296 int diag; 6297 6298 /* 6299 * On a 82599 VF, adding again the same MAC addr is not an idempotent 6300 * operation. Trap this case to avoid exhausting the [very limited] 6301 * set of PF resources used to store VF MAC addresses. 6302 */ 6303 if (memcmp(hw->mac.perm_addr, mac_addr, 6304 sizeof(struct rte_ether_addr)) == 0) 6305 return -1; 6306 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6307 if (diag != 0) 6308 PMD_DRV_LOG(ERR, "Unable to add MAC address " 6309 RTE_ETHER_ADDR_PRT_FMT " - diag=%d", 6310 RTE_ETHER_ADDR_BYTES(mac_addr), diag); 6311 return diag; 6312 } 6313 6314 static void 6315 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 6316 { 6317 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6318 struct rte_ether_addr *perm_addr = 6319 (struct rte_ether_addr *)hw->mac.perm_addr; 6320 struct rte_ether_addr *mac_addr; 6321 uint32_t i; 6322 int diag; 6323 6324 /* 6325 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does 6326 * not support the deletion of a given MAC address. 6327 * Instead, it imposes to delete all MAC addresses, then to add again 6328 * all MAC addresses with the exception of the one to be deleted. 6329 */ 6330 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); 6331 6332 /* 6333 * Add again all MAC addresses, with the exception of the deleted one 6334 * and of the permanent MAC address. 6335 */ 6336 for (i = 0, mac_addr = dev->data->mac_addrs; 6337 i < hw->mac.num_rar_entries; i++, mac_addr++) { 6338 /* Skip the deleted MAC address */ 6339 if (i == index) 6340 continue; 6341 /* Skip NULL MAC addresses */ 6342 if (rte_is_zero_ether_addr(mac_addr)) 6343 continue; 6344 /* Skip the permanent MAC address */ 6345 if (memcmp(perm_addr, mac_addr, 6346 sizeof(struct rte_ether_addr)) == 0) 6347 continue; 6348 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6349 if (diag != 0) 6350 PMD_DRV_LOG(ERR, 6351 "Adding again MAC address " 6352 RTE_ETHER_ADDR_PRT_FMT " failed " 6353 "diag=%d", RTE_ETHER_ADDR_BYTES(mac_addr), 6354 diag); 6355 } 6356 } 6357 6358 static int 6359 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 6360 struct rte_ether_addr *addr) 6361 { 6362 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6363 6364 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); 6365 6366 return 0; 6367 } 6368 6369 int 6370 ixgbe_syn_filter_set(struct rte_eth_dev *dev, 6371 struct rte_eth_syn_filter *filter, 6372 bool add) 6373 { 6374 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6375 struct ixgbe_filter_info *filter_info = 6376 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6377 uint32_t syn_info; 6378 uint32_t synqf; 6379 6380 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6381 return -EINVAL; 6382 6383 syn_info = filter_info->syn_info; 6384 6385 if (add) { 6386 if (syn_info & IXGBE_SYN_FILTER_ENABLE) 6387 return -EINVAL; 6388 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & 6389 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); 6390 6391 if (filter->hig_pri) 6392 synqf |= IXGBE_SYN_FILTER_SYNQFP; 6393 else 6394 synqf &= ~IXGBE_SYN_FILTER_SYNQFP; 6395 } else { 6396 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 6397 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) 6398 return -ENOENT; 6399 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); 6400 } 6401 6402 filter_info->syn_info = synqf; 6403 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 6404 IXGBE_WRITE_FLUSH(hw); 6405 return 0; 6406 } 6407 6408 6409 static inline enum ixgbe_5tuple_protocol 6410 convert_protocol_type(uint8_t protocol_value) 6411 { 6412 if (protocol_value == IPPROTO_TCP) 6413 return IXGBE_FILTER_PROTOCOL_TCP; 6414 else if (protocol_value == IPPROTO_UDP) 6415 return IXGBE_FILTER_PROTOCOL_UDP; 6416 else if (protocol_value == IPPROTO_SCTP) 6417 return IXGBE_FILTER_PROTOCOL_SCTP; 6418 else 6419 return IXGBE_FILTER_PROTOCOL_NONE; 6420 } 6421 6422 /* inject a 5-tuple filter to HW */ 6423 static inline void 6424 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, 6425 struct ixgbe_5tuple_filter *filter) 6426 { 6427 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6428 int i; 6429 uint32_t ftqf, sdpqf; 6430 uint32_t l34timir = 0; 6431 uint8_t mask = 0xff; 6432 6433 i = filter->index; 6434 6435 sdpqf = (uint32_t)(filter->filter_info.dst_port << 6436 IXGBE_SDPQF_DSTPORT_SHIFT); 6437 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); 6438 6439 ftqf = (uint32_t)(filter->filter_info.proto & 6440 IXGBE_FTQF_PROTOCOL_MASK); 6441 ftqf |= (uint32_t)((filter->filter_info.priority & 6442 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); 6443 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 6444 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; 6445 if (filter->filter_info.dst_ip_mask == 0) 6446 mask &= IXGBE_FTQF_DEST_ADDR_MASK; 6447 if (filter->filter_info.src_port_mask == 0) 6448 mask &= IXGBE_FTQF_SOURCE_PORT_MASK; 6449 if (filter->filter_info.dst_port_mask == 0) 6450 mask &= IXGBE_FTQF_DEST_PORT_MASK; 6451 if (filter->filter_info.proto_mask == 0) 6452 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; 6453 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; 6454 ftqf |= IXGBE_FTQF_POOL_MASK_EN; 6455 ftqf |= IXGBE_FTQF_QUEUE_ENABLE; 6456 6457 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); 6458 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); 6459 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); 6460 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); 6461 6462 l34timir |= IXGBE_L34T_IMIR_RESERVE; 6463 l34timir |= (uint32_t)(filter->queue << 6464 IXGBE_L34T_IMIR_QUEUE_SHIFT); 6465 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); 6466 } 6467 6468 /* 6469 * add a 5tuple filter 6470 * 6471 * @param 6472 * dev: Pointer to struct rte_eth_dev. 6473 * index: the index the filter allocates. 6474 * filter: ponter to the filter that will be added. 6475 * rx_queue: the queue id the filter assigned to. 6476 * 6477 * @return 6478 * - On success, zero. 6479 * - On failure, a negative value. 6480 */ 6481 static int 6482 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 6483 struct ixgbe_5tuple_filter *filter) 6484 { 6485 struct ixgbe_filter_info *filter_info = 6486 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6487 int i, idx, shift; 6488 6489 /* 6490 * look for an unused 5tuple filter index, 6491 * and insert the filter to list. 6492 */ 6493 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { 6494 idx = i / (sizeof(uint32_t) * NBBY); 6495 shift = i % (sizeof(uint32_t) * NBBY); 6496 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 6497 filter_info->fivetuple_mask[idx] |= 1 << shift; 6498 filter->index = i; 6499 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 6500 filter, 6501 entries); 6502 break; 6503 } 6504 } 6505 if (i >= IXGBE_MAX_FTQF_FILTERS) { 6506 PMD_DRV_LOG(ERR, "5tuple filters are full."); 6507 return -ENOSYS; 6508 } 6509 6510 ixgbe_inject_5tuple_filter(dev, filter); 6511 6512 return 0; 6513 } 6514 6515 /* 6516 * remove a 5tuple filter 6517 * 6518 * @param 6519 * dev: Pointer to struct rte_eth_dev. 6520 * filter: the pointer of the filter will be removed. 6521 */ 6522 static void 6523 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 6524 struct ixgbe_5tuple_filter *filter) 6525 { 6526 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6527 struct ixgbe_filter_info *filter_info = 6528 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6529 uint16_t index = filter->index; 6530 6531 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 6532 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 6533 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 6534 rte_free(filter); 6535 6536 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); 6537 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); 6538 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); 6539 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); 6540 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); 6541 } 6542 6543 static int 6544 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 6545 { 6546 struct ixgbe_hw *hw; 6547 uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; 6548 struct rte_eth_dev_data *dev_data = dev->data; 6549 6550 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6551 6552 if (mtu < RTE_ETHER_MIN_MTU || 6553 max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) 6554 return -EINVAL; 6555 6556 /* If device is started, refuse mtu that requires the support of 6557 * scattered packets when this feature has not been enabled before. 6558 */ 6559 if (dev_data->dev_started && !dev_data->scattered_rx && 6560 (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > 6561 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 6562 PMD_INIT_LOG(ERR, "Stop port first."); 6563 return -EINVAL; 6564 } 6565 6566 /* 6567 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU 6568 * request of the version 2.0 of the mailbox API. 6569 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 6570 * of the mailbox API. 6571 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers 6572 * prior to 3.11.33 which contains the following change: 6573 * "ixgbe: Enable jumbo frames support w/ SR-IOV" 6574 */ 6575 if (ixgbevf_rlpml_set_vf(hw, max_frame)) 6576 return -EINVAL; 6577 6578 /* update max frame size */ 6579 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; 6580 return 0; 6581 } 6582 6583 static inline struct ixgbe_5tuple_filter * 6584 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, 6585 struct ixgbe_5tuple_filter_info *key) 6586 { 6587 struct ixgbe_5tuple_filter *it; 6588 6589 TAILQ_FOREACH(it, filter_list, entries) { 6590 if (memcmp(key, &it->filter_info, 6591 sizeof(struct ixgbe_5tuple_filter_info)) == 0) { 6592 return it; 6593 } 6594 } 6595 return NULL; 6596 } 6597 6598 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ 6599 static inline int 6600 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 6601 struct ixgbe_5tuple_filter_info *filter_info) 6602 { 6603 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || 6604 filter->priority > IXGBE_5TUPLE_MAX_PRI || 6605 filter->priority < IXGBE_5TUPLE_MIN_PRI) 6606 return -EINVAL; 6607 6608 switch (filter->dst_ip_mask) { 6609 case UINT32_MAX: 6610 filter_info->dst_ip_mask = 0; 6611 filter_info->dst_ip = filter->dst_ip; 6612 break; 6613 case 0: 6614 filter_info->dst_ip_mask = 1; 6615 break; 6616 default: 6617 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 6618 return -EINVAL; 6619 } 6620 6621 switch (filter->src_ip_mask) { 6622 case UINT32_MAX: 6623 filter_info->src_ip_mask = 0; 6624 filter_info->src_ip = filter->src_ip; 6625 break; 6626 case 0: 6627 filter_info->src_ip_mask = 1; 6628 break; 6629 default: 6630 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 6631 return -EINVAL; 6632 } 6633 6634 switch (filter->dst_port_mask) { 6635 case UINT16_MAX: 6636 filter_info->dst_port_mask = 0; 6637 filter_info->dst_port = filter->dst_port; 6638 break; 6639 case 0: 6640 filter_info->dst_port_mask = 1; 6641 break; 6642 default: 6643 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 6644 return -EINVAL; 6645 } 6646 6647 switch (filter->src_port_mask) { 6648 case UINT16_MAX: 6649 filter_info->src_port_mask = 0; 6650 filter_info->src_port = filter->src_port; 6651 break; 6652 case 0: 6653 filter_info->src_port_mask = 1; 6654 break; 6655 default: 6656 PMD_DRV_LOG(ERR, "invalid src_port mask."); 6657 return -EINVAL; 6658 } 6659 6660 switch (filter->proto_mask) { 6661 case UINT8_MAX: 6662 filter_info->proto_mask = 0; 6663 filter_info->proto = 6664 convert_protocol_type(filter->proto); 6665 break; 6666 case 0: 6667 filter_info->proto_mask = 1; 6668 break; 6669 default: 6670 PMD_DRV_LOG(ERR, "invalid protocol mask."); 6671 return -EINVAL; 6672 } 6673 6674 filter_info->priority = (uint8_t)filter->priority; 6675 return 0; 6676 } 6677 6678 /* 6679 * add or delete a ntuple filter 6680 * 6681 * @param 6682 * dev: Pointer to struct rte_eth_dev. 6683 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 6684 * add: if true, add filter, if false, remove filter 6685 * 6686 * @return 6687 * - On success, zero. 6688 * - On failure, a negative value. 6689 */ 6690 int 6691 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 6692 struct rte_eth_ntuple_filter *ntuple_filter, 6693 bool add) 6694 { 6695 struct ixgbe_filter_info *filter_info = 6696 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6697 struct ixgbe_5tuple_filter_info filter_5tuple; 6698 struct ixgbe_5tuple_filter *filter; 6699 int ret; 6700 6701 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 6702 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 6703 return -EINVAL; 6704 } 6705 6706 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 6707 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 6708 if (ret < 0) 6709 return ret; 6710 6711 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 6712 &filter_5tuple); 6713 if (filter != NULL && add) { 6714 PMD_DRV_LOG(ERR, "filter exists."); 6715 return -EEXIST; 6716 } 6717 if (filter == NULL && !add) { 6718 PMD_DRV_LOG(ERR, "filter doesn't exist."); 6719 return -ENOENT; 6720 } 6721 6722 if (add) { 6723 filter = rte_zmalloc("ixgbe_5tuple_filter", 6724 sizeof(struct ixgbe_5tuple_filter), 0); 6725 if (filter == NULL) 6726 return -ENOMEM; 6727 rte_memcpy(&filter->filter_info, 6728 &filter_5tuple, 6729 sizeof(struct ixgbe_5tuple_filter_info)); 6730 filter->queue = ntuple_filter->queue; 6731 ret = ixgbe_add_5tuple_filter(dev, filter); 6732 if (ret < 0) { 6733 rte_free(filter); 6734 return ret; 6735 } 6736 } else 6737 ixgbe_remove_5tuple_filter(dev, filter); 6738 6739 return 0; 6740 } 6741 6742 int 6743 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 6744 struct rte_eth_ethertype_filter *filter, 6745 bool add) 6746 { 6747 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6748 struct ixgbe_filter_info *filter_info = 6749 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6750 uint32_t etqf = 0; 6751 uint32_t etqs = 0; 6752 int ret; 6753 struct ixgbe_ethertype_filter ethertype_filter; 6754 6755 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6756 return -EINVAL; 6757 6758 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || 6759 filter->ether_type == RTE_ETHER_TYPE_IPV6) { 6760 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 6761 " ethertype filter.", filter->ether_type); 6762 return -EINVAL; 6763 } 6764 6765 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 6766 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 6767 return -EINVAL; 6768 } 6769 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 6770 PMD_DRV_LOG(ERR, "drop option is unsupported."); 6771 return -EINVAL; 6772 } 6773 6774 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 6775 if (ret >= 0 && add) { 6776 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 6777 filter->ether_type); 6778 return -EEXIST; 6779 } 6780 if (ret < 0 && !add) { 6781 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 6782 filter->ether_type); 6783 return -ENOENT; 6784 } 6785 6786 if (add) { 6787 etqf = IXGBE_ETQF_FILTER_EN; 6788 etqf |= (uint32_t)filter->ether_type; 6789 etqs |= (uint32_t)((filter->queue << 6790 IXGBE_ETQS_RX_QUEUE_SHIFT) & 6791 IXGBE_ETQS_RX_QUEUE); 6792 etqs |= IXGBE_ETQS_QUEUE_EN; 6793 6794 ethertype_filter.ethertype = filter->ether_type; 6795 ethertype_filter.etqf = etqf; 6796 ethertype_filter.etqs = etqs; 6797 ethertype_filter.conf = FALSE; 6798 ret = ixgbe_ethertype_filter_insert(filter_info, 6799 ðertype_filter); 6800 if (ret < 0) { 6801 PMD_DRV_LOG(ERR, "ethertype filters are full."); 6802 return -ENOSPC; 6803 } 6804 } else { 6805 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 6806 if (ret < 0) 6807 return -ENOSYS; 6808 } 6809 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); 6810 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); 6811 IXGBE_WRITE_FLUSH(hw); 6812 6813 return 0; 6814 } 6815 6816 static int 6817 ixgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev, 6818 const struct rte_flow_ops **ops) 6819 { 6820 *ops = &ixgbe_flow_ops; 6821 return 0; 6822 } 6823 6824 static u8 * 6825 ixgbe_dev_addr_list_itr(__rte_unused struct ixgbe_hw *hw, 6826 u8 **mc_addr_ptr, u32 *vmdq) 6827 { 6828 u8 *mc_addr; 6829 6830 *vmdq = 0; 6831 mc_addr = *mc_addr_ptr; 6832 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 6833 return mc_addr; 6834 } 6835 6836 static int 6837 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 6838 struct rte_ether_addr *mc_addr_set, 6839 uint32_t nb_mc_addr) 6840 { 6841 struct ixgbe_hw *hw; 6842 u8 *mc_addr_list; 6843 6844 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6845 mc_addr_list = (u8 *)mc_addr_set; 6846 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 6847 ixgbe_dev_addr_list_itr, TRUE); 6848 } 6849 6850 static uint64_t 6851 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 6852 { 6853 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6854 uint64_t systime_cycles; 6855 6856 switch (hw->mac.type) { 6857 case ixgbe_mac_X550: 6858 case ixgbe_mac_X550EM_x: 6859 case ixgbe_mac_X550EM_a: 6860 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 6861 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6862 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6863 * NSEC_PER_SEC; 6864 break; 6865 default: 6866 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6867 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6868 << 32; 6869 } 6870 6871 return systime_cycles; 6872 } 6873 6874 static uint64_t 6875 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6876 { 6877 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6878 uint64_t rx_tstamp_cycles; 6879 6880 switch (hw->mac.type) { 6881 case ixgbe_mac_X550: 6882 case ixgbe_mac_X550EM_x: 6883 case ixgbe_mac_X550EM_a: 6884 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6885 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6886 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6887 * NSEC_PER_SEC; 6888 break; 6889 default: 6890 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6891 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6892 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6893 << 32; 6894 } 6895 6896 return rx_tstamp_cycles; 6897 } 6898 6899 static uint64_t 6900 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6901 { 6902 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6903 uint64_t tx_tstamp_cycles; 6904 6905 switch (hw->mac.type) { 6906 case ixgbe_mac_X550: 6907 case ixgbe_mac_X550EM_x: 6908 case ixgbe_mac_X550EM_a: 6909 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6910 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6911 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6912 * NSEC_PER_SEC; 6913 break; 6914 default: 6915 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6916 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6917 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6918 << 32; 6919 } 6920 6921 return tx_tstamp_cycles; 6922 } 6923 6924 static void 6925 ixgbe_start_timecounters(struct rte_eth_dev *dev) 6926 { 6927 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6928 struct ixgbe_adapter *adapter = dev->data->dev_private; 6929 struct rte_eth_link link; 6930 uint32_t incval = 0; 6931 uint32_t shift = 0; 6932 6933 /* Get current link speed. */ 6934 ixgbe_dev_link_update(dev, 1); 6935 rte_eth_linkstatus_get(dev, &link); 6936 6937 switch (link.link_speed) { 6938 case ETH_SPEED_NUM_100M: 6939 incval = IXGBE_INCVAL_100; 6940 shift = IXGBE_INCVAL_SHIFT_100; 6941 break; 6942 case ETH_SPEED_NUM_1G: 6943 incval = IXGBE_INCVAL_1GB; 6944 shift = IXGBE_INCVAL_SHIFT_1GB; 6945 break; 6946 case ETH_SPEED_NUM_10G: 6947 default: 6948 incval = IXGBE_INCVAL_10GB; 6949 shift = IXGBE_INCVAL_SHIFT_10GB; 6950 break; 6951 } 6952 6953 switch (hw->mac.type) { 6954 case ixgbe_mac_X550: 6955 case ixgbe_mac_X550EM_x: 6956 case ixgbe_mac_X550EM_a: 6957 /* Independent of link speed. */ 6958 incval = 1; 6959 /* Cycles read will be interpreted as ns. */ 6960 shift = 0; 6961 /* Fall-through */ 6962 case ixgbe_mac_X540: 6963 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); 6964 break; 6965 case ixgbe_mac_82599EB: 6966 incval >>= IXGBE_INCVAL_SHIFT_82599; 6967 shift -= IXGBE_INCVAL_SHIFT_82599; 6968 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 6969 (1 << IXGBE_INCPER_SHIFT_82599) | incval); 6970 break; 6971 default: 6972 /* Not supported. */ 6973 return; 6974 } 6975 6976 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 6977 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6978 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6979 6980 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6981 adapter->systime_tc.cc_shift = shift; 6982 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 6983 6984 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6985 adapter->rx_tstamp_tc.cc_shift = shift; 6986 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6987 6988 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6989 adapter->tx_tstamp_tc.cc_shift = shift; 6990 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6991 } 6992 6993 static int 6994 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 6995 { 6996 struct ixgbe_adapter *adapter = dev->data->dev_private; 6997 6998 adapter->systime_tc.nsec += delta; 6999 adapter->rx_tstamp_tc.nsec += delta; 7000 adapter->tx_tstamp_tc.nsec += delta; 7001 7002 return 0; 7003 } 7004 7005 static int 7006 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 7007 { 7008 uint64_t ns; 7009 struct ixgbe_adapter *adapter = dev->data->dev_private; 7010 7011 ns = rte_timespec_to_ns(ts); 7012 /* Set the timecounters to a new value. */ 7013 adapter->systime_tc.nsec = ns; 7014 adapter->rx_tstamp_tc.nsec = ns; 7015 adapter->tx_tstamp_tc.nsec = ns; 7016 7017 return 0; 7018 } 7019 7020 static int 7021 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 7022 { 7023 uint64_t ns, systime_cycles; 7024 struct ixgbe_adapter *adapter = dev->data->dev_private; 7025 7026 systime_cycles = ixgbe_read_systime_cyclecounter(dev); 7027 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 7028 *ts = rte_ns_to_timespec(ns); 7029 7030 return 0; 7031 } 7032 7033 static int 7034 ixgbe_timesync_enable(struct rte_eth_dev *dev) 7035 { 7036 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7037 uint32_t tsync_ctl; 7038 uint32_t tsauxc; 7039 7040 /* Stop the timesync system time. */ 7041 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); 7042 /* Reset the timesync system time value. */ 7043 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); 7044 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); 7045 7046 /* Enable system time for platforms where it isn't on by default. */ 7047 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); 7048 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; 7049 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); 7050 7051 ixgbe_start_timecounters(dev); 7052 7053 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 7054 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 7055 (RTE_ETHER_TYPE_1588 | 7056 IXGBE_ETQF_FILTER_EN | 7057 IXGBE_ETQF_1588)); 7058 7059 /* Enable timestamping of received PTP packets. */ 7060 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7061 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; 7062 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 7063 7064 /* Enable timestamping of transmitted PTP packets. */ 7065 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7066 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; 7067 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 7068 7069 IXGBE_WRITE_FLUSH(hw); 7070 7071 return 0; 7072 } 7073 7074 static int 7075 ixgbe_timesync_disable(struct rte_eth_dev *dev) 7076 { 7077 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7078 uint32_t tsync_ctl; 7079 7080 /* Disable timestamping of transmitted PTP packets. */ 7081 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7082 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; 7083 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 7084 7085 /* Disable timestamping of received PTP packets. */ 7086 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7087 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; 7088 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 7089 7090 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 7091 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 7092 7093 /* Stop incrementating the System Time registers. */ 7094 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); 7095 7096 return 0; 7097 } 7098 7099 static int 7100 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 7101 struct timespec *timestamp, 7102 uint32_t flags __rte_unused) 7103 { 7104 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7105 struct ixgbe_adapter *adapter = dev->data->dev_private; 7106 uint32_t tsync_rxctl; 7107 uint64_t rx_tstamp_cycles; 7108 uint64_t ns; 7109 7110 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7111 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) 7112 return -EINVAL; 7113 7114 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); 7115 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 7116 *timestamp = rte_ns_to_timespec(ns); 7117 7118 return 0; 7119 } 7120 7121 static int 7122 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 7123 struct timespec *timestamp) 7124 { 7125 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7126 struct ixgbe_adapter *adapter = dev->data->dev_private; 7127 uint32_t tsync_txctl; 7128 uint64_t tx_tstamp_cycles; 7129 uint64_t ns; 7130 7131 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7132 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) 7133 return -EINVAL; 7134 7135 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); 7136 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 7137 *timestamp = rte_ns_to_timespec(ns); 7138 7139 return 0; 7140 } 7141 7142 static int 7143 ixgbe_get_reg_length(struct rte_eth_dev *dev) 7144 { 7145 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7146 int count = 0; 7147 int g_ind = 0; 7148 const struct reg_info *reg_group; 7149 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 7150 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 7151 7152 while ((reg_group = reg_set[g_ind++])) 7153 count += ixgbe_regs_group_count(reg_group); 7154 7155 return count; 7156 } 7157 7158 static int 7159 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 7160 { 7161 int count = 0; 7162 int g_ind = 0; 7163 const struct reg_info *reg_group; 7164 7165 while ((reg_group = ixgbevf_regs[g_ind++])) 7166 count += ixgbe_regs_group_count(reg_group); 7167 7168 return count; 7169 } 7170 7171 static int 7172 ixgbe_get_regs(struct rte_eth_dev *dev, 7173 struct rte_dev_reg_info *regs) 7174 { 7175 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7176 uint32_t *data = regs->data; 7177 int g_ind = 0; 7178 int count = 0; 7179 const struct reg_info *reg_group; 7180 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 7181 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 7182 7183 if (data == NULL) { 7184 regs->length = ixgbe_get_reg_length(dev); 7185 regs->width = sizeof(uint32_t); 7186 return 0; 7187 } 7188 7189 /* Support only full register dump */ 7190 if ((regs->length == 0) || 7191 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { 7192 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7193 hw->device_id; 7194 while ((reg_group = reg_set[g_ind++])) 7195 count += ixgbe_read_regs_group(dev, &data[count], 7196 reg_group); 7197 return 0; 7198 } 7199 7200 return -ENOTSUP; 7201 } 7202 7203 static int 7204 ixgbevf_get_regs(struct rte_eth_dev *dev, 7205 struct rte_dev_reg_info *regs) 7206 { 7207 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7208 uint32_t *data = regs->data; 7209 int g_ind = 0; 7210 int count = 0; 7211 const struct reg_info *reg_group; 7212 7213 if (data == NULL) { 7214 regs->length = ixgbevf_get_reg_length(dev); 7215 regs->width = sizeof(uint32_t); 7216 return 0; 7217 } 7218 7219 /* Support only full register dump */ 7220 if ((regs->length == 0) || 7221 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { 7222 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7223 hw->device_id; 7224 while ((reg_group = ixgbevf_regs[g_ind++])) 7225 count += ixgbe_read_regs_group(dev, &data[count], 7226 reg_group); 7227 return 0; 7228 } 7229 7230 return -ENOTSUP; 7231 } 7232 7233 static int 7234 ixgbe_get_eeprom_length(struct rte_eth_dev *dev) 7235 { 7236 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7237 7238 /* Return unit is byte count */ 7239 return hw->eeprom.word_size * 2; 7240 } 7241 7242 static int 7243 ixgbe_get_eeprom(struct rte_eth_dev *dev, 7244 struct rte_dev_eeprom_info *in_eeprom) 7245 { 7246 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7247 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7248 uint16_t *data = in_eeprom->data; 7249 int first, length; 7250 7251 first = in_eeprom->offset >> 1; 7252 length = in_eeprom->length >> 1; 7253 if ((first > hw->eeprom.word_size) || 7254 ((first + length) > hw->eeprom.word_size)) 7255 return -EINVAL; 7256 7257 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7258 7259 return eeprom->ops.read_buffer(hw, first, length, data); 7260 } 7261 7262 static int 7263 ixgbe_set_eeprom(struct rte_eth_dev *dev, 7264 struct rte_dev_eeprom_info *in_eeprom) 7265 { 7266 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7267 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7268 uint16_t *data = in_eeprom->data; 7269 int first, length; 7270 7271 first = in_eeprom->offset >> 1; 7272 length = in_eeprom->length >> 1; 7273 if ((first > hw->eeprom.word_size) || 7274 ((first + length) > hw->eeprom.word_size)) 7275 return -EINVAL; 7276 7277 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7278 7279 return eeprom->ops.write_buffer(hw, first, length, data); 7280 } 7281 7282 static int 7283 ixgbe_get_module_info(struct rte_eth_dev *dev, 7284 struct rte_eth_dev_module_info *modinfo) 7285 { 7286 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7287 uint32_t status; 7288 uint8_t sff8472_rev, addr_mode; 7289 bool page_swap = false; 7290 7291 /* Check whether we support SFF-8472 or not */ 7292 status = hw->phy.ops.read_i2c_eeprom(hw, 7293 IXGBE_SFF_SFF_8472_COMP, 7294 &sff8472_rev); 7295 if (status != 0) 7296 return -EIO; 7297 7298 /* addressing mode is not supported */ 7299 status = hw->phy.ops.read_i2c_eeprom(hw, 7300 IXGBE_SFF_SFF_8472_SWAP, 7301 &addr_mode); 7302 if (status != 0) 7303 return -EIO; 7304 7305 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 7306 PMD_DRV_LOG(ERR, 7307 "Address change required to access page 0xA2, " 7308 "but not supported. Please report the module " 7309 "type to the driver maintainers."); 7310 page_swap = true; 7311 } 7312 7313 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { 7314 /* We have a SFP, but it does not support SFF-8472 */ 7315 modinfo->type = RTE_ETH_MODULE_SFF_8079; 7316 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 7317 } else { 7318 /* We have a SFP which supports a revision of SFF-8472. */ 7319 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7320 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7321 } 7322 7323 return 0; 7324 } 7325 7326 static int 7327 ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 7328 struct rte_dev_eeprom_info *info) 7329 { 7330 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7331 uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; 7332 uint8_t databyte = 0xFF; 7333 uint8_t *data = info->data; 7334 uint32_t i = 0; 7335 7336 for (i = info->offset; i < info->offset + info->length; i++) { 7337 if (i < RTE_ETH_MODULE_SFF_8079_LEN) 7338 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); 7339 else 7340 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); 7341 7342 if (status != 0) 7343 return -EIO; 7344 7345 data[i - info->offset] = databyte; 7346 } 7347 7348 return 0; 7349 } 7350 7351 uint16_t 7352 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { 7353 switch (mac_type) { 7354 case ixgbe_mac_X550: 7355 case ixgbe_mac_X550EM_x: 7356 case ixgbe_mac_X550EM_a: 7357 return ETH_RSS_RETA_SIZE_512; 7358 case ixgbe_mac_X550_vf: 7359 case ixgbe_mac_X550EM_x_vf: 7360 case ixgbe_mac_X550EM_a_vf: 7361 return ETH_RSS_RETA_SIZE_64; 7362 case ixgbe_mac_X540_vf: 7363 case ixgbe_mac_82599_vf: 7364 return 0; 7365 default: 7366 return ETH_RSS_RETA_SIZE_128; 7367 } 7368 } 7369 7370 uint32_t 7371 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { 7372 switch (mac_type) { 7373 case ixgbe_mac_X550: 7374 case ixgbe_mac_X550EM_x: 7375 case ixgbe_mac_X550EM_a: 7376 if (reta_idx < ETH_RSS_RETA_SIZE_128) 7377 return IXGBE_RETA(reta_idx >> 2); 7378 else 7379 return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); 7380 case ixgbe_mac_X550_vf: 7381 case ixgbe_mac_X550EM_x_vf: 7382 case ixgbe_mac_X550EM_a_vf: 7383 return IXGBE_VFRETA(reta_idx >> 2); 7384 default: 7385 return IXGBE_RETA(reta_idx >> 2); 7386 } 7387 } 7388 7389 uint32_t 7390 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { 7391 switch (mac_type) { 7392 case ixgbe_mac_X550_vf: 7393 case ixgbe_mac_X550EM_x_vf: 7394 case ixgbe_mac_X550EM_a_vf: 7395 return IXGBE_VFMRQC; 7396 default: 7397 return IXGBE_MRQC; 7398 } 7399 } 7400 7401 uint32_t 7402 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { 7403 switch (mac_type) { 7404 case ixgbe_mac_X550_vf: 7405 case ixgbe_mac_X550EM_x_vf: 7406 case ixgbe_mac_X550EM_a_vf: 7407 return IXGBE_VFRSSRK(i); 7408 default: 7409 return IXGBE_RSSRK(i); 7410 } 7411 } 7412 7413 bool 7414 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { 7415 switch (mac_type) { 7416 case ixgbe_mac_82599_vf: 7417 case ixgbe_mac_X540_vf: 7418 return 0; 7419 default: 7420 return 1; 7421 } 7422 } 7423 7424 static int 7425 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 7426 struct rte_eth_dcb_info *dcb_info) 7427 { 7428 struct ixgbe_dcb_config *dcb_config = 7429 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 7430 struct ixgbe_dcb_tc_config *tc; 7431 struct rte_eth_dcb_tc_queue_mapping *tc_queue; 7432 uint8_t nb_tcs; 7433 uint8_t i, j; 7434 7435 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) 7436 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 7437 else 7438 dcb_info->nb_tcs = 1; 7439 7440 tc_queue = &dcb_info->tc_queue; 7441 nb_tcs = dcb_info->nb_tcs; 7442 7443 if (dcb_config->vt_mode) { /* vt is enabled*/ 7444 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 7445 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 7446 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 7447 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 7448 if (RTE_ETH_DEV_SRIOV(dev).active > 0) { 7449 for (j = 0; j < nb_tcs; j++) { 7450 tc_queue->tc_rxq[0][j].base = j; 7451 tc_queue->tc_rxq[0][j].nb_queue = 1; 7452 tc_queue->tc_txq[0][j].base = j; 7453 tc_queue->tc_txq[0][j].nb_queue = 1; 7454 } 7455 } else { 7456 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 7457 for (j = 0; j < nb_tcs; j++) { 7458 tc_queue->tc_rxq[i][j].base = 7459 i * nb_tcs + j; 7460 tc_queue->tc_rxq[i][j].nb_queue = 1; 7461 tc_queue->tc_txq[i][j].base = 7462 i * nb_tcs + j; 7463 tc_queue->tc_txq[i][j].nb_queue = 1; 7464 } 7465 } 7466 } 7467 } else { /* vt is disabled*/ 7468 struct rte_eth_dcb_rx_conf *rx_conf = 7469 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 7470 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 7471 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 7472 if (dcb_info->nb_tcs == ETH_4_TCS) { 7473 for (i = 0; i < dcb_info->nb_tcs; i++) { 7474 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 7475 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7476 } 7477 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7478 dcb_info->tc_queue.tc_txq[0][1].base = 64; 7479 dcb_info->tc_queue.tc_txq[0][2].base = 96; 7480 dcb_info->tc_queue.tc_txq[0][3].base = 112; 7481 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 7482 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7483 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7484 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7485 } else if (dcb_info->nb_tcs == ETH_8_TCS) { 7486 for (i = 0; i < dcb_info->nb_tcs; i++) { 7487 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 7488 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7489 } 7490 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7491 dcb_info->tc_queue.tc_txq[0][1].base = 32; 7492 dcb_info->tc_queue.tc_txq[0][2].base = 64; 7493 dcb_info->tc_queue.tc_txq[0][3].base = 80; 7494 dcb_info->tc_queue.tc_txq[0][4].base = 96; 7495 dcb_info->tc_queue.tc_txq[0][5].base = 104; 7496 dcb_info->tc_queue.tc_txq[0][6].base = 112; 7497 dcb_info->tc_queue.tc_txq[0][7].base = 120; 7498 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 7499 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7500 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7501 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7502 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 7503 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 7504 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 7505 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 7506 } 7507 } 7508 for (i = 0; i < dcb_info->nb_tcs; i++) { 7509 tc = &dcb_config->tc_config[i]; 7510 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; 7511 } 7512 return 0; 7513 } 7514 7515 /* Update e-tag ether type */ 7516 static int 7517 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, 7518 uint16_t ether_type) 7519 { 7520 uint32_t etag_etype; 7521 7522 if (hw->mac.type != ixgbe_mac_X550 && 7523 hw->mac.type != ixgbe_mac_X550EM_x && 7524 hw->mac.type != ixgbe_mac_X550EM_a) { 7525 return -ENOTSUP; 7526 } 7527 7528 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7529 etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; 7530 etag_etype |= ether_type; 7531 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7532 IXGBE_WRITE_FLUSH(hw); 7533 7534 return 0; 7535 } 7536 7537 /* Enable e-tag tunnel */ 7538 static int 7539 ixgbe_e_tag_enable(struct ixgbe_hw *hw) 7540 { 7541 uint32_t etag_etype; 7542 7543 if (hw->mac.type != ixgbe_mac_X550 && 7544 hw->mac.type != ixgbe_mac_X550EM_x && 7545 hw->mac.type != ixgbe_mac_X550EM_a) { 7546 return -ENOTSUP; 7547 } 7548 7549 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7550 etag_etype |= IXGBE_ETAG_ETYPE_VALID; 7551 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7552 IXGBE_WRITE_FLUSH(hw); 7553 7554 return 0; 7555 } 7556 7557 static int 7558 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, 7559 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7560 { 7561 int ret = 0; 7562 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7563 uint32_t i, rar_entries; 7564 uint32_t rar_low, rar_high; 7565 7566 if (hw->mac.type != ixgbe_mac_X550 && 7567 hw->mac.type != ixgbe_mac_X550EM_x && 7568 hw->mac.type != ixgbe_mac_X550EM_a) { 7569 return -ENOTSUP; 7570 } 7571 7572 rar_entries = ixgbe_get_num_rx_addrs(hw); 7573 7574 for (i = 1; i < rar_entries; i++) { 7575 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7576 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 7577 if ((rar_high & IXGBE_RAH_AV) && 7578 (rar_high & IXGBE_RAH_ADTYPE) && 7579 ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == 7580 l2_tunnel->tunnel_id)) { 7581 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 7582 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 7583 7584 ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); 7585 7586 return ret; 7587 } 7588 } 7589 7590 return ret; 7591 } 7592 7593 static int 7594 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, 7595 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7596 { 7597 int ret = 0; 7598 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7599 uint32_t i, rar_entries; 7600 uint32_t rar_low, rar_high; 7601 7602 if (hw->mac.type != ixgbe_mac_X550 && 7603 hw->mac.type != ixgbe_mac_X550EM_x && 7604 hw->mac.type != ixgbe_mac_X550EM_a) { 7605 return -ENOTSUP; 7606 } 7607 7608 /* One entry for one tunnel. Try to remove potential existing entry. */ 7609 ixgbe_e_tag_filter_del(dev, l2_tunnel); 7610 7611 rar_entries = ixgbe_get_num_rx_addrs(hw); 7612 7613 for (i = 1; i < rar_entries; i++) { 7614 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7615 if (rar_high & IXGBE_RAH_AV) { 7616 continue; 7617 } else { 7618 ixgbe_set_vmdq(hw, i, l2_tunnel->pool); 7619 rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; 7620 rar_low = l2_tunnel->tunnel_id; 7621 7622 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); 7623 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); 7624 7625 return ret; 7626 } 7627 } 7628 7629 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 7630 " Please remove a rule before adding a new one."); 7631 return -EINVAL; 7632 } 7633 7634 static inline struct ixgbe_l2_tn_filter * 7635 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, 7636 struct ixgbe_l2_tn_key *key) 7637 { 7638 int ret; 7639 7640 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); 7641 if (ret < 0) 7642 return NULL; 7643 7644 return l2_tn_info->hash_map[ret]; 7645 } 7646 7647 static inline int 7648 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7649 struct ixgbe_l2_tn_filter *l2_tn_filter) 7650 { 7651 int ret; 7652 7653 ret = rte_hash_add_key(l2_tn_info->hash_handle, 7654 &l2_tn_filter->key); 7655 7656 if (ret < 0) { 7657 PMD_DRV_LOG(ERR, 7658 "Failed to insert L2 tunnel filter" 7659 " to hash table %d!", 7660 ret); 7661 return ret; 7662 } 7663 7664 l2_tn_info->hash_map[ret] = l2_tn_filter; 7665 7666 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7667 7668 return 0; 7669 } 7670 7671 static inline int 7672 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7673 struct ixgbe_l2_tn_key *key) 7674 { 7675 int ret; 7676 struct ixgbe_l2_tn_filter *l2_tn_filter; 7677 7678 ret = rte_hash_del_key(l2_tn_info->hash_handle, key); 7679 7680 if (ret < 0) { 7681 PMD_DRV_LOG(ERR, 7682 "No such L2 tunnel filter to delete %d!", 7683 ret); 7684 return ret; 7685 } 7686 7687 l2_tn_filter = l2_tn_info->hash_map[ret]; 7688 l2_tn_info->hash_map[ret] = NULL; 7689 7690 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7691 rte_free(l2_tn_filter); 7692 7693 return 0; 7694 } 7695 7696 /* Add l2 tunnel filter */ 7697 int 7698 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 7699 struct ixgbe_l2_tunnel_conf *l2_tunnel, 7700 bool restore) 7701 { 7702 int ret; 7703 struct ixgbe_l2_tn_info *l2_tn_info = 7704 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7705 struct ixgbe_l2_tn_key key; 7706 struct ixgbe_l2_tn_filter *node; 7707 7708 if (!restore) { 7709 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7710 key.tn_id = l2_tunnel->tunnel_id; 7711 7712 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); 7713 7714 if (node) { 7715 PMD_DRV_LOG(ERR, 7716 "The L2 tunnel filter already exists!"); 7717 return -EINVAL; 7718 } 7719 7720 node = rte_zmalloc("ixgbe_l2_tn", 7721 sizeof(struct ixgbe_l2_tn_filter), 7722 0); 7723 if (!node) 7724 return -ENOMEM; 7725 7726 rte_memcpy(&node->key, 7727 &key, 7728 sizeof(struct ixgbe_l2_tn_key)); 7729 node->pool = l2_tunnel->pool; 7730 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); 7731 if (ret < 0) { 7732 rte_free(node); 7733 return ret; 7734 } 7735 } 7736 7737 switch (l2_tunnel->l2_tunnel_type) { 7738 case RTE_L2_TUNNEL_TYPE_E_TAG: 7739 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); 7740 break; 7741 default: 7742 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7743 ret = -EINVAL; 7744 break; 7745 } 7746 7747 if ((!restore) && (ret < 0)) 7748 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7749 7750 return ret; 7751 } 7752 7753 /* Delete l2 tunnel filter */ 7754 int 7755 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 7756 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7757 { 7758 int ret; 7759 struct ixgbe_l2_tn_info *l2_tn_info = 7760 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7761 struct ixgbe_l2_tn_key key; 7762 7763 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7764 key.tn_id = l2_tunnel->tunnel_id; 7765 ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7766 if (ret < 0) 7767 return ret; 7768 7769 switch (l2_tunnel->l2_tunnel_type) { 7770 case RTE_L2_TUNNEL_TYPE_E_TAG: 7771 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); 7772 break; 7773 default: 7774 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7775 ret = -EINVAL; 7776 break; 7777 } 7778 7779 return ret; 7780 } 7781 7782 static int 7783 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 7784 { 7785 int ret = 0; 7786 uint32_t ctrl; 7787 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7788 7789 if (hw->mac.type != ixgbe_mac_X550 && 7790 hw->mac.type != ixgbe_mac_X550EM_x && 7791 hw->mac.type != ixgbe_mac_X550EM_a) { 7792 return -ENOTSUP; 7793 } 7794 7795 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 7796 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 7797 if (en) 7798 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; 7799 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 7800 7801 return ret; 7802 } 7803 7804 static int 7805 ixgbe_update_vxlan_port(struct ixgbe_hw *hw, 7806 uint16_t port) 7807 { 7808 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); 7809 IXGBE_WRITE_FLUSH(hw); 7810 7811 return 0; 7812 } 7813 7814 /* There's only one register for VxLAN UDP port. 7815 * So, we cannot add several ports. Will update it. 7816 */ 7817 static int 7818 ixgbe_add_vxlan_port(struct ixgbe_hw *hw, 7819 uint16_t port) 7820 { 7821 if (port == 0) { 7822 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 7823 return -EINVAL; 7824 } 7825 7826 return ixgbe_update_vxlan_port(hw, port); 7827 } 7828 7829 /* We cannot delete the VxLAN port. For there's a register for VxLAN 7830 * UDP port, it must have a value. 7831 * So, will reset it to the original value 0. 7832 */ 7833 static int 7834 ixgbe_del_vxlan_port(struct ixgbe_hw *hw, 7835 uint16_t port) 7836 { 7837 uint16_t cur_port; 7838 7839 cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); 7840 7841 if (cur_port != port) { 7842 PMD_DRV_LOG(ERR, "Port %u does not exist.", port); 7843 return -EINVAL; 7844 } 7845 7846 return ixgbe_update_vxlan_port(hw, 0); 7847 } 7848 7849 /* Add UDP tunneling port */ 7850 static int 7851 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 7852 struct rte_eth_udp_tunnel *udp_tunnel) 7853 { 7854 int ret = 0; 7855 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7856 7857 if (hw->mac.type != ixgbe_mac_X550 && 7858 hw->mac.type != ixgbe_mac_X550EM_x && 7859 hw->mac.type != ixgbe_mac_X550EM_a) { 7860 return -ENOTSUP; 7861 } 7862 7863 if (udp_tunnel == NULL) 7864 return -EINVAL; 7865 7866 switch (udp_tunnel->prot_type) { 7867 case RTE_TUNNEL_TYPE_VXLAN: 7868 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); 7869 break; 7870 7871 case RTE_TUNNEL_TYPE_GENEVE: 7872 case RTE_TUNNEL_TYPE_TEREDO: 7873 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7874 ret = -EINVAL; 7875 break; 7876 7877 default: 7878 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7879 ret = -EINVAL; 7880 break; 7881 } 7882 7883 return ret; 7884 } 7885 7886 /* Remove UDP tunneling port */ 7887 static int 7888 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 7889 struct rte_eth_udp_tunnel *udp_tunnel) 7890 { 7891 int ret = 0; 7892 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7893 7894 if (hw->mac.type != ixgbe_mac_X550 && 7895 hw->mac.type != ixgbe_mac_X550EM_x && 7896 hw->mac.type != ixgbe_mac_X550EM_a) { 7897 return -ENOTSUP; 7898 } 7899 7900 if (udp_tunnel == NULL) 7901 return -EINVAL; 7902 7903 switch (udp_tunnel->prot_type) { 7904 case RTE_TUNNEL_TYPE_VXLAN: 7905 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); 7906 break; 7907 case RTE_TUNNEL_TYPE_GENEVE: 7908 case RTE_TUNNEL_TYPE_TEREDO: 7909 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7910 ret = -EINVAL; 7911 break; 7912 default: 7913 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7914 ret = -EINVAL; 7915 break; 7916 } 7917 7918 return ret; 7919 } 7920 7921 static int 7922 ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) 7923 { 7924 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7925 int ret; 7926 7927 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) { 7928 case IXGBE_SUCCESS: 7929 ret = 0; 7930 break; 7931 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7932 ret = -ENOTSUP; 7933 break; 7934 default: 7935 ret = -EAGAIN; 7936 break; 7937 } 7938 7939 return ret; 7940 } 7941 7942 static int 7943 ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) 7944 { 7945 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7946 int ret; 7947 7948 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) { 7949 case IXGBE_SUCCESS: 7950 ret = 0; 7951 break; 7952 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7953 ret = -ENOTSUP; 7954 break; 7955 default: 7956 ret = -EAGAIN; 7957 break; 7958 } 7959 7960 return ret; 7961 } 7962 7963 static int 7964 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 7965 { 7966 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7967 int ret; 7968 int mode = IXGBEVF_XCAST_MODE_ALLMULTI; 7969 7970 switch (hw->mac.ops.update_xcast_mode(hw, mode)) { 7971 case IXGBE_SUCCESS: 7972 ret = 0; 7973 break; 7974 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7975 ret = -ENOTSUP; 7976 break; 7977 default: 7978 ret = -EAGAIN; 7979 break; 7980 } 7981 7982 return ret; 7983 } 7984 7985 static int 7986 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 7987 { 7988 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7989 int ret; 7990 7991 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { 7992 case IXGBE_SUCCESS: 7993 ret = 0; 7994 break; 7995 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7996 ret = -ENOTSUP; 7997 break; 7998 default: 7999 ret = -EAGAIN; 8000 break; 8001 } 8002 8003 return ret; 8004 } 8005 8006 static void ixgbevf_mbx_process(struct rte_eth_dev *dev) 8007 { 8008 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8009 u32 in_msg = 0; 8010 8011 /* peek the message first */ 8012 in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); 8013 8014 /* PF reset VF event */ 8015 if (in_msg == IXGBE_PF_CONTROL_MSG) { 8016 /* dummy mbx read to ack pf */ 8017 if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) 8018 return; 8019 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 8020 NULL); 8021 } 8022 } 8023 8024 static int 8025 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 8026 { 8027 uint32_t eicr; 8028 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8029 struct ixgbe_interrupt *intr = 8030 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 8031 ixgbevf_intr_disable(dev); 8032 8033 /* read-on-clear nic registers here */ 8034 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); 8035 intr->flags = 0; 8036 8037 /* only one misc vector supported - mailbox */ 8038 eicr &= IXGBE_VTEICR_MASK; 8039 if (eicr == IXGBE_MISC_VEC_ID) 8040 intr->flags |= IXGBE_FLAG_MAILBOX; 8041 8042 return 0; 8043 } 8044 8045 static int 8046 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) 8047 { 8048 struct ixgbe_interrupt *intr = 8049 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 8050 8051 if (intr->flags & IXGBE_FLAG_MAILBOX) { 8052 ixgbevf_mbx_process(dev); 8053 intr->flags &= ~IXGBE_FLAG_MAILBOX; 8054 } 8055 8056 ixgbevf_intr_enable(dev); 8057 8058 return 0; 8059 } 8060 8061 static void 8062 ixgbevf_dev_interrupt_handler(void *param) 8063 { 8064 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 8065 8066 ixgbevf_dev_interrupt_get_status(dev); 8067 ixgbevf_dev_interrupt_action(dev); 8068 } 8069 8070 /** 8071 * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path 8072 * @hw: pointer to hardware structure 8073 * 8074 * Stops the transmit data path and waits for the HW to internally empty 8075 * the Tx security block 8076 **/ 8077 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) 8078 { 8079 #define IXGBE_MAX_SECTX_POLL 40 8080 8081 int i; 8082 int sectxreg; 8083 8084 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8085 sectxreg |= IXGBE_SECTXCTRL_TX_DIS; 8086 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 8087 for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { 8088 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); 8089 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) 8090 break; 8091 /* Use interrupt-safe sleep just in case */ 8092 usec_delay(1000); 8093 } 8094 8095 /* For informational purposes only */ 8096 if (i >= IXGBE_MAX_SECTX_POLL) 8097 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " 8098 "path fully disabled. Continuing with init."); 8099 8100 return IXGBE_SUCCESS; 8101 } 8102 8103 /** 8104 * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path 8105 * @hw: pointer to hardware structure 8106 * 8107 * Enables the transmit data path. 8108 **/ 8109 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) 8110 { 8111 uint32_t sectxreg; 8112 8113 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8114 sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; 8115 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 8116 IXGBE_WRITE_FLUSH(hw); 8117 8118 return IXGBE_SUCCESS; 8119 } 8120 8121 /* restore n-tuple filter */ 8122 static inline void 8123 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) 8124 { 8125 struct ixgbe_filter_info *filter_info = 8126 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8127 struct ixgbe_5tuple_filter *node; 8128 8129 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { 8130 ixgbe_inject_5tuple_filter(dev, node); 8131 } 8132 } 8133 8134 /* restore ethernet type filter */ 8135 static inline void 8136 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) 8137 { 8138 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8139 struct ixgbe_filter_info *filter_info = 8140 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8141 int i; 8142 8143 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8144 if (filter_info->ethertype_mask & (1 << i)) { 8145 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 8146 filter_info->ethertype_filters[i].etqf); 8147 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 8148 filter_info->ethertype_filters[i].etqs); 8149 IXGBE_WRITE_FLUSH(hw); 8150 } 8151 } 8152 } 8153 8154 /* restore SYN filter */ 8155 static inline void 8156 ixgbe_syn_filter_restore(struct rte_eth_dev *dev) 8157 { 8158 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8159 struct ixgbe_filter_info *filter_info = 8160 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8161 uint32_t synqf; 8162 8163 synqf = filter_info->syn_info; 8164 8165 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 8166 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 8167 IXGBE_WRITE_FLUSH(hw); 8168 } 8169 } 8170 8171 /* restore L2 tunnel filter */ 8172 static inline void 8173 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) 8174 { 8175 struct ixgbe_l2_tn_info *l2_tn_info = 8176 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8177 struct ixgbe_l2_tn_filter *node; 8178 struct ixgbe_l2_tunnel_conf l2_tn_conf; 8179 8180 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { 8181 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; 8182 l2_tn_conf.tunnel_id = node->key.tn_id; 8183 l2_tn_conf.pool = node->pool; 8184 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); 8185 } 8186 } 8187 8188 /* restore rss filter */ 8189 static inline void 8190 ixgbe_rss_filter_restore(struct rte_eth_dev *dev) 8191 { 8192 struct ixgbe_filter_info *filter_info = 8193 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8194 8195 if (filter_info->rss_info.conf.queue_num) 8196 ixgbe_config_rss_filter(dev, 8197 &filter_info->rss_info, TRUE); 8198 } 8199 8200 static int 8201 ixgbe_filter_restore(struct rte_eth_dev *dev) 8202 { 8203 ixgbe_ntuple_filter_restore(dev); 8204 ixgbe_ethertype_filter_restore(dev); 8205 ixgbe_syn_filter_restore(dev); 8206 ixgbe_fdir_filter_restore(dev); 8207 ixgbe_l2_tn_filter_restore(dev); 8208 ixgbe_rss_filter_restore(dev); 8209 8210 return 0; 8211 } 8212 8213 static void 8214 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) 8215 { 8216 struct ixgbe_l2_tn_info *l2_tn_info = 8217 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8218 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8219 8220 if (l2_tn_info->e_tag_en) 8221 (void)ixgbe_e_tag_enable(hw); 8222 8223 if (l2_tn_info->e_tag_fwd_en) 8224 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); 8225 8226 (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); 8227 } 8228 8229 /* remove all the n-tuple filters */ 8230 void 8231 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) 8232 { 8233 struct ixgbe_filter_info *filter_info = 8234 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8235 struct ixgbe_5tuple_filter *p_5tuple; 8236 8237 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) 8238 ixgbe_remove_5tuple_filter(dev, p_5tuple); 8239 } 8240 8241 /* remove all the ether type filters */ 8242 void 8243 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) 8244 { 8245 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8246 struct ixgbe_filter_info *filter_info = 8247 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8248 int i; 8249 8250 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8251 if (filter_info->ethertype_mask & (1 << i) && 8252 !filter_info->ethertype_filters[i].conf) { 8253 (void)ixgbe_ethertype_filter_remove(filter_info, 8254 (uint8_t)i); 8255 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); 8256 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); 8257 IXGBE_WRITE_FLUSH(hw); 8258 } 8259 } 8260 } 8261 8262 /* remove the SYN filter */ 8263 void 8264 ixgbe_clear_syn_filter(struct rte_eth_dev *dev) 8265 { 8266 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8267 struct ixgbe_filter_info *filter_info = 8268 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8269 8270 if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { 8271 filter_info->syn_info = 0; 8272 8273 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); 8274 IXGBE_WRITE_FLUSH(hw); 8275 } 8276 } 8277 8278 /* remove all the L2 tunnel filters */ 8279 int 8280 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) 8281 { 8282 struct ixgbe_l2_tn_info *l2_tn_info = 8283 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8284 struct ixgbe_l2_tn_filter *l2_tn_filter; 8285 struct ixgbe_l2_tunnel_conf l2_tn_conf; 8286 int ret = 0; 8287 8288 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 8289 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; 8290 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; 8291 l2_tn_conf.pool = l2_tn_filter->pool; 8292 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); 8293 if (ret < 0) 8294 return ret; 8295 } 8296 8297 return 0; 8298 } 8299 8300 void 8301 ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev, 8302 struct ixgbe_macsec_setting *macsec_setting) 8303 { 8304 struct ixgbe_macsec_setting *macsec = 8305 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8306 8307 macsec->offload_en = macsec_setting->offload_en; 8308 macsec->encrypt_en = macsec_setting->encrypt_en; 8309 macsec->replayprotect_en = macsec_setting->replayprotect_en; 8310 } 8311 8312 void 8313 ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev) 8314 { 8315 struct ixgbe_macsec_setting *macsec = 8316 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8317 8318 macsec->offload_en = 0; 8319 macsec->encrypt_en = 0; 8320 macsec->replayprotect_en = 0; 8321 } 8322 8323 void 8324 ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, 8325 struct ixgbe_macsec_setting *macsec_setting) 8326 { 8327 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8328 uint32_t ctrl; 8329 uint8_t en = macsec_setting->encrypt_en; 8330 uint8_t rp = macsec_setting->replayprotect_en; 8331 8332 /** 8333 * Workaround: 8334 * As no ixgbe_disable_sec_rx_path equivalent is 8335 * implemented for tx in the base code, and we are 8336 * not allowed to modify the base code in DPDK, so 8337 * just call the hand-written one directly for now. 8338 * The hardware support has been checked by 8339 * ixgbe_disable_sec_rx_path(). 8340 */ 8341 ixgbe_disable_sec_tx_path_generic(hw); 8342 8343 /* Enable Ethernet CRC (required by MACsec offload) */ 8344 ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 8345 ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; 8346 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl); 8347 8348 /* Enable the TX and RX crypto engines */ 8349 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8350 ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS; 8351 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8352 8353 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8354 ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS; 8355 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8356 8357 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 8358 ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK; 8359 ctrl |= 0x3; 8360 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl); 8361 8362 /* Enable SA lookup */ 8363 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8364 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8365 ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT : 8366 IXGBE_LSECTXCTRL_AUTH; 8367 ctrl |= IXGBE_LSECTXCTRL_AISCI; 8368 ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8369 ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8370 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8371 8372 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8373 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8374 ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT; 8375 ctrl &= ~IXGBE_LSECRXCTRL_PLSH; 8376 if (rp) 8377 ctrl |= IXGBE_LSECRXCTRL_RP; 8378 else 8379 ctrl &= ~IXGBE_LSECRXCTRL_RP; 8380 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8381 8382 /* Start the data paths */ 8383 ixgbe_enable_sec_rx_path(hw); 8384 /** 8385 * Workaround: 8386 * As no ixgbe_enable_sec_rx_path equivalent is 8387 * implemented for tx in the base code, and we are 8388 * not allowed to modify the base code in DPDK, so 8389 * just call the hand-written one directly for now. 8390 */ 8391 ixgbe_enable_sec_tx_path_generic(hw); 8392 } 8393 8394 void 8395 ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) 8396 { 8397 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8398 uint32_t ctrl; 8399 8400 /** 8401 * Workaround: 8402 * As no ixgbe_disable_sec_rx_path equivalent is 8403 * implemented for tx in the base code, and we are 8404 * not allowed to modify the base code in DPDK, so 8405 * just call the hand-written one directly for now. 8406 * The hardware support has been checked by 8407 * ixgbe_disable_sec_rx_path(). 8408 */ 8409 ixgbe_disable_sec_tx_path_generic(hw); 8410 8411 /* Disable the TX and RX crypto engines */ 8412 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8413 ctrl |= IXGBE_SECTXCTRL_SECTX_DIS; 8414 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8415 8416 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8417 ctrl |= IXGBE_SECRXCTRL_SECRX_DIS; 8418 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8419 8420 /* Disable SA lookup */ 8421 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8422 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8423 ctrl |= IXGBE_LSECTXCTRL_DISABLE; 8424 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8425 8426 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8427 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8428 ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT; 8429 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8430 8431 /* Start the data paths */ 8432 ixgbe_enable_sec_rx_path(hw); 8433 /** 8434 * Workaround: 8435 * As no ixgbe_enable_sec_rx_path equivalent is 8436 * implemented for tx in the base code, and we are 8437 * not allowed to modify the base code in DPDK, so 8438 * just call the hand-written one directly for now. 8439 */ 8440 ixgbe_enable_sec_tx_path_generic(hw); 8441 } 8442 8443 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); 8444 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); 8445 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 8446 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); 8447 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); 8448 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); 8449 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, 8450 IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); 8451 8452 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_init, init, NOTICE); 8453 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_driver, driver, NOTICE); 8454 8455 #ifdef RTE_ETHDEV_DEBUG_RX 8456 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_rx, rx, DEBUG); 8457 #endif 8458 #ifdef RTE_ETHDEV_DEBUG_TX 8459 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_tx, tx, DEBUG); 8460 #endif 8461